repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
ariev7x/S7270_kernel_cm | drivers/spi/spi-imx.c | 4796 | 24291 | /*
* Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright (C) 2008 Juergen Beisert
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the
* Free Software Foundation
* 51 Franklin Street, Fifth Floor
* Boston, MA 02110-1301, USA.
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/types.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <mach/spi.h>
#define DRIVER_NAME "spi_imx"
#define MXC_CSPIRXDATA 0x00
#define MXC_CSPITXDATA 0x04
#define MXC_CSPICTRL 0x08
#define MXC_CSPIINT 0x0c
#define MXC_RESET 0x1c
/* generic defines to abstract from the different register layouts */
#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
struct spi_imx_config {
unsigned int speed_hz;
unsigned int bpw;
unsigned int mode;
u8 cs;
};
enum spi_imx_devtype {
IMX1_CSPI,
IMX21_CSPI,
IMX27_CSPI,
IMX31_CSPI,
IMX35_CSPI, /* CSPI on all i.mx except above */
IMX51_ECSPI, /* ECSPI on i.mx51 and later */
};
struct spi_imx_data;
struct spi_imx_devtype_data {
void (*intctrl)(struct spi_imx_data *, int);
int (*config)(struct spi_imx_data *, struct spi_imx_config *);
void (*trigger)(struct spi_imx_data *);
int (*rx_available)(struct spi_imx_data *);
void (*reset)(struct spi_imx_data *);
enum spi_imx_devtype devtype;
};
struct spi_imx_data {
struct spi_bitbang bitbang;
struct completion xfer_done;
void __iomem *base;
int irq;
struct clk *clk;
unsigned long spi_clk;
unsigned int count;
void (*tx)(struct spi_imx_data *);
void (*rx)(struct spi_imx_data *);
void *rx_buf;
const void *tx_buf;
unsigned int txfifo; /* number of words pushed in tx FIFO */
struct spi_imx_devtype_data *devtype_data;
int chipselect[0];
};
static inline int is_imx27_cspi(struct spi_imx_data *d)
{
return d->devtype_data->devtype == IMX27_CSPI;
}
static inline int is_imx35_cspi(struct spi_imx_data *d)
{
return d->devtype_data->devtype == IMX35_CSPI;
}
static inline unsigned spi_imx_get_fifosize(struct spi_imx_data *d)
{
return (d->devtype_data->devtype == IMX51_ECSPI) ? 64 : 8;
}
#define MXC_SPI_BUF_RX(type) \
static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
{ \
unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \
\
if (spi_imx->rx_buf) { \
*(type *)spi_imx->rx_buf = val; \
spi_imx->rx_buf += sizeof(type); \
} \
}
#define MXC_SPI_BUF_TX(type) \
static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \
{ \
type val = 0; \
\
if (spi_imx->tx_buf) { \
val = *(type *)spi_imx->tx_buf; \
spi_imx->tx_buf += sizeof(type); \
} \
\
spi_imx->count -= sizeof(type); \
\
writel(val, spi_imx->base + MXC_CSPITXDATA); \
}
MXC_SPI_BUF_RX(u8)
MXC_SPI_BUF_TX(u8)
MXC_SPI_BUF_RX(u16)
MXC_SPI_BUF_TX(u16)
MXC_SPI_BUF_RX(u32)
MXC_SPI_BUF_TX(u32)
/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
* (which is currently not the case in this driver)
*/
static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
256, 384, 512, 768, 1024};
/* MX21, MX27 */
static unsigned int spi_imx_clkdiv_1(unsigned int fin,
unsigned int fspi, unsigned int max)
{
int i;
for (i = 2; i < max; i++)
if (fspi * mxc_clkdivs[i] >= fin)
return i;
return max;
}
/* MX1, MX31, MX35, MX51 CSPI */
static unsigned int spi_imx_clkdiv_2(unsigned int fin,
unsigned int fspi)
{
int i, div = 4;
for (i = 0; i < 7; i++) {
if (fspi * div >= fin)
return i;
div <<= 1;
}
return 7;
}
#define MX51_ECSPI_CTRL 0x08
#define MX51_ECSPI_CTRL_ENABLE (1 << 0)
#define MX51_ECSPI_CTRL_XCH (1 << 2)
#define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
#define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
#define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
#define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18)
#define MX51_ECSPI_CTRL_BL_OFFSET 20
#define MX51_ECSPI_CONFIG 0x0c
#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0))
#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4))
#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8))
#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12))
#define MX51_ECSPI_INT 0x10
#define MX51_ECSPI_INT_TEEN (1 << 0)
#define MX51_ECSPI_INT_RREN (1 << 3)
#define MX51_ECSPI_STAT 0x18
#define MX51_ECSPI_STAT_RR (1 << 3)
/* MX51 eCSPI */
static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi)
{
/*
* there are two 4-bit dividers, the pre-divider divides by
* $pre, the post-divider by 2^$post
*/
unsigned int pre, post;
if (unlikely(fspi > fin))
return 0;
post = fls(fin) - fls(fspi);
if (fin > fspi << post)
post++;
/* now we have: (fin <= fspi << post) with post being minimal */
post = max(4U, post) - 4;
if (unlikely(post > 0xf)) {
pr_err("%s: cannot set clock freq: %u (base freq: %u)\n",
__func__, fspi, fin);
return 0xff;
}
pre = DIV_ROUND_UP(fin, fspi << post) - 1;
pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
__func__, fin, fspi, post, pre);
return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
(post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
}
static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
{
unsigned val = 0;
if (enable & MXC_INT_TE)
val |= MX51_ECSPI_INT_TEEN;
if (enable & MXC_INT_RR)
val |= MX51_ECSPI_INT_RREN;
writel(val, spi_imx->base + MX51_ECSPI_INT);
}
static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
{
u32 reg;
reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
reg |= MX51_ECSPI_CTRL_XCH;
writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
}
static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
struct spi_imx_config *config)
{
u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0;
/*
* The hardware seems to have a race condition when changing modes. The
* current assumption is that the selection of the channel arrives
* earlier in the hardware than the mode bits when they are written at
* the same time.
* So set master mode for all channels as we do not support slave mode.
*/
ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
/* set clock speed */
ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz);
/* set chip select to use */
ctrl |= MX51_ECSPI_CTRL_CS(config->cs);
ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
cfg |= MX51_ECSPI_CONFIG_SBBCTRL(config->cs);
if (config->mode & SPI_CPHA)
cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
if (config->mode & SPI_CPOL)
cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
if (config->mode & SPI_CS_HIGH)
cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
return 0;
}
static int __maybe_unused mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
{
return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
}
static void __maybe_unused mx51_ecspi_reset(struct spi_imx_data *spi_imx)
{
/* drain receive buffer */
while (mx51_ecspi_rx_available(spi_imx))
readl(spi_imx->base + MXC_CSPIRXDATA);
}
#define MX31_INTREG_TEEN (1 << 0)
#define MX31_INTREG_RREN (1 << 3)
#define MX31_CSPICTRL_ENABLE (1 << 0)
#define MX31_CSPICTRL_MASTER (1 << 1)
#define MX31_CSPICTRL_XCH (1 << 2)
#define MX31_CSPICTRL_POL (1 << 4)
#define MX31_CSPICTRL_PHA (1 << 5)
#define MX31_CSPICTRL_SSCTL (1 << 6)
#define MX31_CSPICTRL_SSPOL (1 << 7)
#define MX31_CSPICTRL_BC_SHIFT 8
#define MX35_CSPICTRL_BL_SHIFT 20
#define MX31_CSPICTRL_CS_SHIFT 24
#define MX35_CSPICTRL_CS_SHIFT 12
#define MX31_CSPICTRL_DR_SHIFT 16
#define MX31_CSPISTATUS 0x14
#define MX31_STATUS_RR (1 << 3)
/* These functions also work for the i.MX35, but be aware that
* the i.MX35 has a slightly different register layout for bits
* we do not use here.
*/
static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
{
unsigned int val = 0;
if (enable & MXC_INT_TE)
val |= MX31_INTREG_TEEN;
if (enable & MXC_INT_RR)
val |= MX31_INTREG_RREN;
writel(val, spi_imx->base + MXC_CSPIINT);
}
static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx)
{
unsigned int reg;
reg = readl(spi_imx->base + MXC_CSPICTRL);
reg |= MX31_CSPICTRL_XCH;
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
static int __maybe_unused mx31_config(struct spi_imx_data *spi_imx,
struct spi_imx_config *config)
{
unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
int cs = spi_imx->chipselect[config->cs];
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
MX31_CSPICTRL_DR_SHIFT;
if (is_imx35_cspi(spi_imx)) {
reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
reg |= MX31_CSPICTRL_SSCTL;
} else {
reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
}
if (config->mode & SPI_CPHA)
reg |= MX31_CSPICTRL_PHA;
if (config->mode & SPI_CPOL)
reg |= MX31_CSPICTRL_POL;
if (config->mode & SPI_CS_HIGH)
reg |= MX31_CSPICTRL_SSPOL;
if (cs < 0)
reg |= (cs + 32) <<
(is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
MX31_CSPICTRL_CS_SHIFT);
writel(reg, spi_imx->base + MXC_CSPICTRL);
return 0;
}
static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx)
{
return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
}
static void __maybe_unused mx31_reset(struct spi_imx_data *spi_imx)
{
/* drain receive buffer */
while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
readl(spi_imx->base + MXC_CSPIRXDATA);
}
#define MX21_INTREG_RR (1 << 4)
#define MX21_INTREG_TEEN (1 << 9)
#define MX21_INTREG_RREN (1 << 13)
#define MX21_CSPICTRL_POL (1 << 5)
#define MX21_CSPICTRL_PHA (1 << 6)
#define MX21_CSPICTRL_SSPOL (1 << 8)
#define MX21_CSPICTRL_XCH (1 << 9)
#define MX21_CSPICTRL_ENABLE (1 << 10)
#define MX21_CSPICTRL_MASTER (1 << 11)
#define MX21_CSPICTRL_DR_SHIFT 14
#define MX21_CSPICTRL_CS_SHIFT 19
static void __maybe_unused mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
{
unsigned int val = 0;
if (enable & MXC_INT_TE)
val |= MX21_INTREG_TEEN;
if (enable & MXC_INT_RR)
val |= MX21_INTREG_RREN;
writel(val, spi_imx->base + MXC_CSPIINT);
}
static void __maybe_unused mx21_trigger(struct spi_imx_data *spi_imx)
{
unsigned int reg;
reg = readl(spi_imx->base + MXC_CSPICTRL);
reg |= MX21_CSPICTRL_XCH;
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
static int __maybe_unused mx21_config(struct spi_imx_data *spi_imx,
struct spi_imx_config *config)
{
unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
int cs = spi_imx->chipselect[config->cs];
unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) <<
MX21_CSPICTRL_DR_SHIFT;
reg |= config->bpw - 1;
if (config->mode & SPI_CPHA)
reg |= MX21_CSPICTRL_PHA;
if (config->mode & SPI_CPOL)
reg |= MX21_CSPICTRL_POL;
if (config->mode & SPI_CS_HIGH)
reg |= MX21_CSPICTRL_SSPOL;
if (cs < 0)
reg |= (cs + 32) << MX21_CSPICTRL_CS_SHIFT;
writel(reg, spi_imx->base + MXC_CSPICTRL);
return 0;
}
static int __maybe_unused mx21_rx_available(struct spi_imx_data *spi_imx)
{
return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
}
static void __maybe_unused mx21_reset(struct spi_imx_data *spi_imx)
{
writel(1, spi_imx->base + MXC_RESET);
}
#define MX1_INTREG_RR (1 << 3)
#define MX1_INTREG_TEEN (1 << 8)
#define MX1_INTREG_RREN (1 << 11)
#define MX1_CSPICTRL_POL (1 << 4)
#define MX1_CSPICTRL_PHA (1 << 5)
#define MX1_CSPICTRL_XCH (1 << 8)
#define MX1_CSPICTRL_ENABLE (1 << 9)
#define MX1_CSPICTRL_MASTER (1 << 10)
#define MX1_CSPICTRL_DR_SHIFT 13
static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
{
unsigned int val = 0;
if (enable & MXC_INT_TE)
val |= MX1_INTREG_TEEN;
if (enable & MXC_INT_RR)
val |= MX1_INTREG_RREN;
writel(val, spi_imx->base + MXC_CSPIINT);
}
static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx)
{
unsigned int reg;
reg = readl(spi_imx->base + MXC_CSPICTRL);
reg |= MX1_CSPICTRL_XCH;
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
static int __maybe_unused mx1_config(struct spi_imx_data *spi_imx,
struct spi_imx_config *config)
{
unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
MX1_CSPICTRL_DR_SHIFT;
reg |= config->bpw - 1;
if (config->mode & SPI_CPHA)
reg |= MX1_CSPICTRL_PHA;
if (config->mode & SPI_CPOL)
reg |= MX1_CSPICTRL_POL;
writel(reg, spi_imx->base + MXC_CSPICTRL);
return 0;
}
static int __maybe_unused mx1_rx_available(struct spi_imx_data *spi_imx)
{
return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
}
static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx)
{
writel(1, spi_imx->base + MXC_RESET);
}
static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
.intctrl = mx1_intctrl,
.config = mx1_config,
.trigger = mx1_trigger,
.rx_available = mx1_rx_available,
.reset = mx1_reset,
.devtype = IMX1_CSPI,
};
static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
.intctrl = mx21_intctrl,
.config = mx21_config,
.trigger = mx21_trigger,
.rx_available = mx21_rx_available,
.reset = mx21_reset,
.devtype = IMX21_CSPI,
};
static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
/* i.mx27 cspi shares the functions with i.mx21 one */
.intctrl = mx21_intctrl,
.config = mx21_config,
.trigger = mx21_trigger,
.rx_available = mx21_rx_available,
.reset = mx21_reset,
.devtype = IMX27_CSPI,
};
static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
.intctrl = mx31_intctrl,
.config = mx31_config,
.trigger = mx31_trigger,
.rx_available = mx31_rx_available,
.reset = mx31_reset,
.devtype = IMX31_CSPI,
};
static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
/* i.mx35 and later cspi shares the functions with i.mx31 one */
.intctrl = mx31_intctrl,
.config = mx31_config,
.trigger = mx31_trigger,
.rx_available = mx31_rx_available,
.reset = mx31_reset,
.devtype = IMX35_CSPI,
};
static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
.intctrl = mx51_ecspi_intctrl,
.config = mx51_ecspi_config,
.trigger = mx51_ecspi_trigger,
.rx_available = mx51_ecspi_rx_available,
.reset = mx51_ecspi_reset,
.devtype = IMX51_ECSPI,
};
static struct platform_device_id spi_imx_devtype[] = {
{
.name = "imx1-cspi",
.driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data,
}, {
.name = "imx21-cspi",
.driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data,
}, {
.name = "imx27-cspi",
.driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data,
}, {
.name = "imx31-cspi",
.driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data,
}, {
.name = "imx35-cspi",
.driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data,
}, {
.name = "imx51-ecspi",
.driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data,
}, {
/* sentinel */
}
};
static const struct of_device_id spi_imx_dt_ids[] = {
{ .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
{ .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
{ .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
{ .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
{ .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
{ .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
{ /* sentinel */ }
};
static void spi_imx_chipselect(struct spi_device *spi, int is_active)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
int gpio = spi_imx->chipselect[spi->chip_select];
int active = is_active != BITBANG_CS_INACTIVE;
int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH);
if (gpio < 0)
return;
gpio_set_value(gpio, dev_is_lowactive ^ active);
}
static void spi_imx_push(struct spi_imx_data *spi_imx)
{
while (spi_imx->txfifo < spi_imx_get_fifosize(spi_imx)) {
if (!spi_imx->count)
break;
spi_imx->tx(spi_imx);
spi_imx->txfifo++;
}
spi_imx->devtype_data->trigger(spi_imx);
}
static irqreturn_t spi_imx_isr(int irq, void *dev_id)
{
struct spi_imx_data *spi_imx = dev_id;
while (spi_imx->devtype_data->rx_available(spi_imx)) {
spi_imx->rx(spi_imx);
spi_imx->txfifo--;
}
if (spi_imx->count) {
spi_imx_push(spi_imx);
return IRQ_HANDLED;
}
if (spi_imx->txfifo) {
/* No data left to push, but still waiting for rx data,
* enable receive data available interrupt.
*/
spi_imx->devtype_data->intctrl(
spi_imx, MXC_INT_RR);
return IRQ_HANDLED;
}
spi_imx->devtype_data->intctrl(spi_imx, 0);
complete(&spi_imx->xfer_done);
return IRQ_HANDLED;
}
static int spi_imx_setupxfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
struct spi_imx_config config;
config.bpw = t ? t->bits_per_word : spi->bits_per_word;
config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
config.mode = spi->mode;
config.cs = spi->chip_select;
if (!config.speed_hz)
config.speed_hz = spi->max_speed_hz;
if (!config.bpw)
config.bpw = spi->bits_per_word;
if (!config.speed_hz)
config.speed_hz = spi->max_speed_hz;
/* Initialize the functions for transfer */
if (config.bpw <= 8) {
spi_imx->rx = spi_imx_buf_rx_u8;
spi_imx->tx = spi_imx_buf_tx_u8;
} else if (config.bpw <= 16) {
spi_imx->rx = spi_imx_buf_rx_u16;
spi_imx->tx = spi_imx_buf_tx_u16;
} else if (config.bpw <= 32) {
spi_imx->rx = spi_imx_buf_rx_u32;
spi_imx->tx = spi_imx_buf_tx_u32;
} else
BUG();
spi_imx->devtype_data->config(spi_imx, &config);
return 0;
}
static int spi_imx_transfer(struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
spi_imx->tx_buf = transfer->tx_buf;
spi_imx->rx_buf = transfer->rx_buf;
spi_imx->count = transfer->len;
spi_imx->txfifo = 0;
init_completion(&spi_imx->xfer_done);
spi_imx_push(spi_imx);
spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
wait_for_completion(&spi_imx->xfer_done);
return transfer->len;
}
static int spi_imx_setup(struct spi_device *spi)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
int gpio = spi_imx->chipselect[spi->chip_select];
dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
spi->mode, spi->bits_per_word, spi->max_speed_hz);
if (gpio >= 0)
gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
spi_imx_chipselect(spi, BITBANG_CS_INACTIVE);
return 0;
}
static void spi_imx_cleanup(struct spi_device *spi)
{
}
static int __devinit spi_imx_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_id =
of_match_device(spi_imx_dt_ids, &pdev->dev);
struct spi_imx_master *mxc_platform_info =
dev_get_platdata(&pdev->dev);
struct spi_master *master;
struct spi_imx_data *spi_imx;
struct resource *res;
int i, ret, num_cs;
if (!np && !mxc_platform_info) {
dev_err(&pdev->dev, "can't get the platform data\n");
return -EINVAL;
}
ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs);
if (ret < 0) {
if (mxc_platform_info)
num_cs = mxc_platform_info->num_chipselect;
else
return ret;
}
master = spi_alloc_master(&pdev->dev,
sizeof(struct spi_imx_data) + sizeof(int) * num_cs);
if (!master)
return -ENOMEM;
platform_set_drvdata(pdev, master);
master->bus_num = pdev->id;
master->num_chipselect = num_cs;
spi_imx = spi_master_get_devdata(master);
spi_imx->bitbang.master = spi_master_get(master);
for (i = 0; i < master->num_chipselect; i++) {
int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
if (cs_gpio < 0 && mxc_platform_info)
cs_gpio = mxc_platform_info->chipselect[i];
spi_imx->chipselect[i] = cs_gpio;
if (cs_gpio < 0)
continue;
ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
if (ret) {
dev_err(&pdev->dev, "can't get cs gpios\n");
goto out_gpio_free;
}
}
spi_imx->bitbang.chipselect = spi_imx_chipselect;
spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
spi_imx->bitbang.master->setup = spi_imx_setup;
spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
init_completion(&spi_imx->xfer_done);
spi_imx->devtype_data = of_id ? of_id->data :
(struct spi_imx_devtype_data *) pdev->id_entry->driver_data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "can't get platform resource\n");
ret = -ENOMEM;
goto out_gpio_free;
}
if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
dev_err(&pdev->dev, "request_mem_region failed\n");
ret = -EBUSY;
goto out_gpio_free;
}
spi_imx->base = ioremap(res->start, resource_size(res));
if (!spi_imx->base) {
ret = -EINVAL;
goto out_release_mem;
}
spi_imx->irq = platform_get_irq(pdev, 0);
if (spi_imx->irq < 0) {
ret = -EINVAL;
goto out_iounmap;
}
ret = request_irq(spi_imx->irq, spi_imx_isr, 0, DRIVER_NAME, spi_imx);
if (ret) {
dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret);
goto out_iounmap;
}
spi_imx->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(spi_imx->clk)) {
dev_err(&pdev->dev, "unable to get clock\n");
ret = PTR_ERR(spi_imx->clk);
goto out_free_irq;
}
clk_enable(spi_imx->clk);
spi_imx->spi_clk = clk_get_rate(spi_imx->clk);
spi_imx->devtype_data->reset(spi_imx);
spi_imx->devtype_data->intctrl(spi_imx, 0);
master->dev.of_node = pdev->dev.of_node;
ret = spi_bitbang_start(&spi_imx->bitbang);
if (ret) {
dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
goto out_clk_put;
}
dev_info(&pdev->dev, "probed\n");
return ret;
out_clk_put:
clk_disable(spi_imx->clk);
clk_put(spi_imx->clk);
out_free_irq:
free_irq(spi_imx->irq, spi_imx);
out_iounmap:
iounmap(spi_imx->base);
out_release_mem:
release_mem_region(res->start, resource_size(res));
out_gpio_free:
while (--i >= 0) {
if (spi_imx->chipselect[i] >= 0)
gpio_free(spi_imx->chipselect[i]);
}
spi_master_put(master);
kfree(master);
platform_set_drvdata(pdev, NULL);
return ret;
}
static int __devexit spi_imx_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
int i;
spi_bitbang_stop(&spi_imx->bitbang);
writel(0, spi_imx->base + MXC_CSPICTRL);
clk_disable(spi_imx->clk);
clk_put(spi_imx->clk);
free_irq(spi_imx->irq, spi_imx);
iounmap(spi_imx->base);
for (i = 0; i < master->num_chipselect; i++)
if (spi_imx->chipselect[i] >= 0)
gpio_free(spi_imx->chipselect[i]);
spi_master_put(master);
release_mem_region(res->start, resource_size(res));
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver spi_imx_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = spi_imx_dt_ids,
},
.id_table = spi_imx_devtype,
.probe = spi_imx_probe,
.remove = __devexit_p(spi_imx_remove),
};
module_platform_driver(spi_imx_driver);
MODULE_DESCRIPTION("SPI Master Controller driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Zaaap/android_kernel_lge_d620 | drivers/block/xsysace.c | 5052 | 33494 | /*
* Xilinx SystemACE device driver
*
* Copyright 2007 Secret Lab Technologies Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
/*
* The SystemACE chip is designed to configure FPGAs by loading an FPGA
* bitstream from a file on a CF card and squirting it into FPGAs connected
* to the SystemACE JTAG chain. It also has the advantage of providing an
* MPU interface which can be used to control the FPGA configuration process
* and to use the attached CF card for general purpose storage.
*
* This driver is a block device driver for the SystemACE.
*
* Initialization:
* The driver registers itself as a platform_device driver at module
* load time. The platform bus will take care of calling the
* ace_probe() method for all SystemACE instances in the system. Any
* number of SystemACE instances are supported. ace_probe() calls
* ace_setup() which initialized all data structures, reads the CF
* id structure and registers the device.
*
* Processing:
* Just about all of the heavy lifting in this driver is performed by
* a Finite State Machine (FSM). The driver needs to wait on a number
* of events; some raised by interrupts, some which need to be polled
* for. Describing all of the behaviour in a FSM seems to be the
* easiest way to keep the complexity low and make it easy to
* understand what the driver is doing. If the block ops or the
* request function need to interact with the hardware, then they
* simply need to flag the request and kick of FSM processing.
*
* The FSM itself is atomic-safe code which can be run from any
* context. The general process flow is:
* 1. obtain the ace->lock spinlock.
* 2. loop on ace_fsm_dostate() until the ace->fsm_continue flag is
* cleared.
* 3. release the lock.
*
* Individual states do not sleep in any way. If a condition needs to
* be waited for then the state much clear the fsm_continue flag and
* either schedule the FSM to be run again at a later time, or expect
* an interrupt to call the FSM when the desired condition is met.
*
* In normal operation, the FSM is processed at interrupt context
* either when the driver's tasklet is scheduled, or when an irq is
* raised by the hardware. The tasklet can be scheduled at any time.
* The request method in particular schedules the tasklet when a new
* request has been indicated by the block layer. Once started, the
* FSM proceeds as far as it can processing the request until it
* needs on a hardware event. At this point, it must yield execution.
*
* A state has two options when yielding execution:
* 1. ace_fsm_yield()
* - Call if need to poll for event.
* - clears the fsm_continue flag to exit the processing loop
* - reschedules the tasklet to run again as soon as possible
* 2. ace_fsm_yieldirq()
* - Call if an irq is expected from the HW
* - clears the fsm_continue flag to exit the processing loop
* - does not reschedule the tasklet so the FSM will not be processed
* again until an irq is received.
* After calling a yield function, the state must return control back
* to the FSM main loop.
*
* Additionally, the driver maintains a kernel timer which can process
* the FSM. If the FSM gets stalled, typically due to a missed
* interrupt, then the kernel timer will expire and the driver can
* continue where it left off.
*
* To Do:
* - Add FPGA configuration control interface.
* - Request major number from lanana
*/
#undef DEBUG
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/ata.h>
#include <linux/hdreg.h>
#include <linux/platform_device.h>
#if defined(CONFIG_OF)
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#endif
MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
MODULE_DESCRIPTION("Xilinx SystemACE device driver");
MODULE_LICENSE("GPL");
/* SystemACE register definitions */
#define ACE_BUSMODE (0x00)
#define ACE_STATUS (0x04)
#define ACE_STATUS_CFGLOCK (0x00000001)
#define ACE_STATUS_MPULOCK (0x00000002)
#define ACE_STATUS_CFGERROR (0x00000004) /* config controller error */
#define ACE_STATUS_CFCERROR (0x00000008) /* CF controller error */
#define ACE_STATUS_CFDETECT (0x00000010)
#define ACE_STATUS_DATABUFRDY (0x00000020)
#define ACE_STATUS_DATABUFMODE (0x00000040)
#define ACE_STATUS_CFGDONE (0x00000080)
#define ACE_STATUS_RDYFORCFCMD (0x00000100)
#define ACE_STATUS_CFGMODEPIN (0x00000200)
#define ACE_STATUS_CFGADDR_MASK (0x0000e000)
#define ACE_STATUS_CFBSY (0x00020000)
#define ACE_STATUS_CFRDY (0x00040000)
#define ACE_STATUS_CFDWF (0x00080000)
#define ACE_STATUS_CFDSC (0x00100000)
#define ACE_STATUS_CFDRQ (0x00200000)
#define ACE_STATUS_CFCORR (0x00400000)
#define ACE_STATUS_CFERR (0x00800000)
#define ACE_ERROR (0x08)
#define ACE_CFGLBA (0x0c)
#define ACE_MPULBA (0x10)
#define ACE_SECCNTCMD (0x14)
#define ACE_SECCNTCMD_RESET (0x0100)
#define ACE_SECCNTCMD_IDENTIFY (0x0200)
#define ACE_SECCNTCMD_READ_DATA (0x0300)
#define ACE_SECCNTCMD_WRITE_DATA (0x0400)
#define ACE_SECCNTCMD_ABORT (0x0600)
#define ACE_VERSION (0x16)
#define ACE_VERSION_REVISION_MASK (0x00FF)
#define ACE_VERSION_MINOR_MASK (0x0F00)
#define ACE_VERSION_MAJOR_MASK (0xF000)
#define ACE_CTRL (0x18)
#define ACE_CTRL_FORCELOCKREQ (0x0001)
#define ACE_CTRL_LOCKREQ (0x0002)
#define ACE_CTRL_FORCECFGADDR (0x0004)
#define ACE_CTRL_FORCECFGMODE (0x0008)
#define ACE_CTRL_CFGMODE (0x0010)
#define ACE_CTRL_CFGSTART (0x0020)
#define ACE_CTRL_CFGSEL (0x0040)
#define ACE_CTRL_CFGRESET (0x0080)
#define ACE_CTRL_DATABUFRDYIRQ (0x0100)
#define ACE_CTRL_ERRORIRQ (0x0200)
#define ACE_CTRL_CFGDONEIRQ (0x0400)
#define ACE_CTRL_RESETIRQ (0x0800)
#define ACE_CTRL_CFGPROG (0x1000)
#define ACE_CTRL_CFGADDR_MASK (0xe000)
#define ACE_FATSTAT (0x1c)
#define ACE_NUM_MINORS 16
#define ACE_SECTOR_SIZE (512)
#define ACE_FIFO_SIZE (32)
#define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE)
#define ACE_BUS_WIDTH_8 0
#define ACE_BUS_WIDTH_16 1
struct ace_reg_ops;
struct ace_device {
/* driver state data */
int id;
int media_change;
int users;
struct list_head list;
/* finite state machine data */
struct tasklet_struct fsm_tasklet;
uint fsm_task; /* Current activity (ACE_TASK_*) */
uint fsm_state; /* Current state (ACE_FSM_STATE_*) */
uint fsm_continue_flag; /* cleared to exit FSM mainloop */
uint fsm_iter_num;
struct timer_list stall_timer;
/* Transfer state/result, use for both id and block request */
struct request *req; /* request being processed */
void *data_ptr; /* pointer to I/O buffer */
int data_count; /* number of buffers remaining */
int data_result; /* Result of transfer; 0 := success */
int id_req_count; /* count of id requests */
int id_result;
struct completion id_completion; /* used when id req finishes */
int in_irq;
/* Details of hardware device */
resource_size_t physaddr;
void __iomem *baseaddr;
int irq;
int bus_width; /* 0 := 8 bit; 1 := 16 bit */
struct ace_reg_ops *reg_ops;
int lock_count;
/* Block device data structures */
spinlock_t lock;
struct device *dev;
struct request_queue *queue;
struct gendisk *gd;
/* Inserted CF card parameters */
u16 cf_id[ATA_ID_WORDS];
};
static DEFINE_MUTEX(xsysace_mutex);
static int ace_major;
/* ---------------------------------------------------------------------
* Low level register access
*/
struct ace_reg_ops {
u16(*in) (struct ace_device * ace, int reg);
void (*out) (struct ace_device * ace, int reg, u16 val);
void (*datain) (struct ace_device * ace);
void (*dataout) (struct ace_device * ace);
};
/* 8 Bit bus width */
static u16 ace_in_8(struct ace_device *ace, int reg)
{
void __iomem *r = ace->baseaddr + reg;
return in_8(r) | (in_8(r + 1) << 8);
}
static void ace_out_8(struct ace_device *ace, int reg, u16 val)
{
void __iomem *r = ace->baseaddr + reg;
out_8(r, val);
out_8(r + 1, val >> 8);
}
static void ace_datain_8(struct ace_device *ace)
{
void __iomem *r = ace->baseaddr + 0x40;
u8 *dst = ace->data_ptr;
int i = ACE_FIFO_SIZE;
while (i--)
*dst++ = in_8(r++);
ace->data_ptr = dst;
}
static void ace_dataout_8(struct ace_device *ace)
{
void __iomem *r = ace->baseaddr + 0x40;
u8 *src = ace->data_ptr;
int i = ACE_FIFO_SIZE;
while (i--)
out_8(r++, *src++);
ace->data_ptr = src;
}
static struct ace_reg_ops ace_reg_8_ops = {
.in = ace_in_8,
.out = ace_out_8,
.datain = ace_datain_8,
.dataout = ace_dataout_8,
};
/* 16 bit big endian bus attachment */
static u16 ace_in_be16(struct ace_device *ace, int reg)
{
return in_be16(ace->baseaddr + reg);
}
static void ace_out_be16(struct ace_device *ace, int reg, u16 val)
{
out_be16(ace->baseaddr + reg, val);
}
static void ace_datain_be16(struct ace_device *ace)
{
int i = ACE_FIFO_SIZE / 2;
u16 *dst = ace->data_ptr;
while (i--)
*dst++ = in_le16(ace->baseaddr + 0x40);
ace->data_ptr = dst;
}
static void ace_dataout_be16(struct ace_device *ace)
{
int i = ACE_FIFO_SIZE / 2;
u16 *src = ace->data_ptr;
while (i--)
out_le16(ace->baseaddr + 0x40, *src++);
ace->data_ptr = src;
}
/* 16 bit little endian bus attachment */
static u16 ace_in_le16(struct ace_device *ace, int reg)
{
return in_le16(ace->baseaddr + reg);
}
static void ace_out_le16(struct ace_device *ace, int reg, u16 val)
{
out_le16(ace->baseaddr + reg, val);
}
static void ace_datain_le16(struct ace_device *ace)
{
int i = ACE_FIFO_SIZE / 2;
u16 *dst = ace->data_ptr;
while (i--)
*dst++ = in_be16(ace->baseaddr + 0x40);
ace->data_ptr = dst;
}
static void ace_dataout_le16(struct ace_device *ace)
{
int i = ACE_FIFO_SIZE / 2;
u16 *src = ace->data_ptr;
while (i--)
out_be16(ace->baseaddr + 0x40, *src++);
ace->data_ptr = src;
}
static struct ace_reg_ops ace_reg_be16_ops = {
.in = ace_in_be16,
.out = ace_out_be16,
.datain = ace_datain_be16,
.dataout = ace_dataout_be16,
};
static struct ace_reg_ops ace_reg_le16_ops = {
.in = ace_in_le16,
.out = ace_out_le16,
.datain = ace_datain_le16,
.dataout = ace_dataout_le16,
};
static inline u16 ace_in(struct ace_device *ace, int reg)
{
return ace->reg_ops->in(ace, reg);
}
static inline u32 ace_in32(struct ace_device *ace, int reg)
{
return ace_in(ace, reg) | (ace_in(ace, reg + 2) << 16);
}
static inline void ace_out(struct ace_device *ace, int reg, u16 val)
{
ace->reg_ops->out(ace, reg, val);
}
static inline void ace_out32(struct ace_device *ace, int reg, u32 val)
{
ace_out(ace, reg, val);
ace_out(ace, reg + 2, val >> 16);
}
/* ---------------------------------------------------------------------
* Debug support functions
*/
#if defined(DEBUG)
static void ace_dump_mem(void *base, int len)
{
const char *ptr = base;
int i, j;
for (i = 0; i < len; i += 16) {
printk(KERN_INFO "%.8x:", i);
for (j = 0; j < 16; j++) {
if (!(j % 4))
printk(" ");
printk("%.2x", ptr[i + j]);
}
printk(" ");
for (j = 0; j < 16; j++)
printk("%c", isprint(ptr[i + j]) ? ptr[i + j] : '.');
printk("\n");
}
}
#else
static inline void ace_dump_mem(void *base, int len)
{
}
#endif
static void ace_dump_regs(struct ace_device *ace)
{
dev_info(ace->dev,
" ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n"
" status:%.8x mpu_lba:%.8x busmode:%4x\n"
" error: %.8x cfg_lba:%.8x fatstat:%.4x\n",
ace_in32(ace, ACE_CTRL),
ace_in(ace, ACE_SECCNTCMD),
ace_in(ace, ACE_VERSION),
ace_in32(ace, ACE_STATUS),
ace_in32(ace, ACE_MPULBA),
ace_in(ace, ACE_BUSMODE),
ace_in32(ace, ACE_ERROR),
ace_in32(ace, ACE_CFGLBA), ace_in(ace, ACE_FATSTAT));
}
void ace_fix_driveid(u16 *id)
{
#if defined(__BIG_ENDIAN)
int i;
/* All half words have wrong byte order; swap the bytes */
for (i = 0; i < ATA_ID_WORDS; i++, id++)
*id = le16_to_cpu(*id);
#endif
}
/* ---------------------------------------------------------------------
* Finite State Machine (FSM) implementation
*/
/* FSM tasks; used to direct state transitions */
#define ACE_TASK_IDLE 0
#define ACE_TASK_IDENTIFY 1
#define ACE_TASK_READ 2
#define ACE_TASK_WRITE 3
#define ACE_FSM_NUM_TASKS 4
/* FSM state definitions */
#define ACE_FSM_STATE_IDLE 0
#define ACE_FSM_STATE_REQ_LOCK 1
#define ACE_FSM_STATE_WAIT_LOCK 2
#define ACE_FSM_STATE_WAIT_CFREADY 3
#define ACE_FSM_STATE_IDENTIFY_PREPARE 4
#define ACE_FSM_STATE_IDENTIFY_TRANSFER 5
#define ACE_FSM_STATE_IDENTIFY_COMPLETE 6
#define ACE_FSM_STATE_REQ_PREPARE 7
#define ACE_FSM_STATE_REQ_TRANSFER 8
#define ACE_FSM_STATE_REQ_COMPLETE 9
#define ACE_FSM_STATE_ERROR 10
#define ACE_FSM_NUM_STATES 11
/* Set flag to exit FSM loop and reschedule tasklet */
static inline void ace_fsm_yield(struct ace_device *ace)
{
dev_dbg(ace->dev, "ace_fsm_yield()\n");
tasklet_schedule(&ace->fsm_tasklet);
ace->fsm_continue_flag = 0;
}
/* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */
static inline void ace_fsm_yieldirq(struct ace_device *ace)
{
dev_dbg(ace->dev, "ace_fsm_yieldirq()\n");
if (!ace->irq)
/* No IRQ assigned, so need to poll */
tasklet_schedule(&ace->fsm_tasklet);
ace->fsm_continue_flag = 0;
}
/* Get the next read/write request; ending requests that we don't handle */
struct request *ace_get_next_request(struct request_queue * q)
{
struct request *req;
while ((req = blk_peek_request(q)) != NULL) {
if (req->cmd_type == REQ_TYPE_FS)
break;
blk_start_request(req);
__blk_end_request_all(req, -EIO);
}
return req;
}
static void ace_fsm_dostate(struct ace_device *ace)
{
struct request *req;
u32 status;
u16 val;
int count;
#if defined(DEBUG)
dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
ace->fsm_state, ace->id_req_count);
#endif
/* Verify that there is actually a CF in the slot. If not, then
* bail out back to the idle state and wake up all the waiters */
status = ace_in32(ace, ACE_STATUS);
if ((status & ACE_STATUS_CFDETECT) == 0) {
ace->fsm_state = ACE_FSM_STATE_IDLE;
ace->media_change = 1;
set_capacity(ace->gd, 0);
dev_info(ace->dev, "No CF in slot\n");
/* Drop all in-flight and pending requests */
if (ace->req) {
__blk_end_request_all(ace->req, -EIO);
ace->req = NULL;
}
while ((req = blk_fetch_request(ace->queue)) != NULL)
__blk_end_request_all(req, -EIO);
/* Drop back to IDLE state and notify waiters */
ace->fsm_state = ACE_FSM_STATE_IDLE;
ace->id_result = -EIO;
while (ace->id_req_count) {
complete(&ace->id_completion);
ace->id_req_count--;
}
}
switch (ace->fsm_state) {
case ACE_FSM_STATE_IDLE:
/* See if there is anything to do */
if (ace->id_req_count || ace_get_next_request(ace->queue)) {
ace->fsm_iter_num++;
ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
mod_timer(&ace->stall_timer, jiffies + HZ);
if (!timer_pending(&ace->stall_timer))
add_timer(&ace->stall_timer);
break;
}
del_timer(&ace->stall_timer);
ace->fsm_continue_flag = 0;
break;
case ACE_FSM_STATE_REQ_LOCK:
if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
/* Already have the lock, jump to next state */
ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
break;
}
/* Request the lock */
val = ace_in(ace, ACE_CTRL);
ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
break;
case ACE_FSM_STATE_WAIT_LOCK:
if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
/* got the lock; move to next state */
ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
break;
}
/* wait a bit for the lock */
ace_fsm_yield(ace);
break;
case ACE_FSM_STATE_WAIT_CFREADY:
status = ace_in32(ace, ACE_STATUS);
if (!(status & ACE_STATUS_RDYFORCFCMD) ||
(status & ACE_STATUS_CFBSY)) {
/* CF card isn't ready; it needs to be polled */
ace_fsm_yield(ace);
break;
}
/* Device is ready for command; determine what to do next */
if (ace->id_req_count)
ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
else
ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
break;
case ACE_FSM_STATE_IDENTIFY_PREPARE:
/* Send identify command */
ace->fsm_task = ACE_TASK_IDENTIFY;
ace->data_ptr = ace->cf_id;
ace->data_count = ACE_BUF_PER_SECTOR;
ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);
/* As per datasheet, put config controller in reset */
val = ace_in(ace, ACE_CTRL);
ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
/* irq handler takes over from this point; wait for the
* transfer to complete */
ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
ace_fsm_yieldirq(ace);
break;
case ACE_FSM_STATE_IDENTIFY_TRANSFER:
/* Check that the sysace is ready to receive data */
status = ace_in32(ace, ACE_STATUS);
if (status & ACE_STATUS_CFBSY) {
dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
ace->fsm_task, ace->fsm_iter_num,
ace->data_count);
ace_fsm_yield(ace);
break;
}
if (!(status & ACE_STATUS_DATABUFRDY)) {
ace_fsm_yield(ace);
break;
}
/* Transfer the next buffer */
ace->reg_ops->datain(ace);
ace->data_count--;
/* If there are still buffers to be transfers; jump out here */
if (ace->data_count != 0) {
ace_fsm_yieldirq(ace);
break;
}
/* transfer finished; kick state machine */
dev_dbg(ace->dev, "identify finished\n");
ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
break;
case ACE_FSM_STATE_IDENTIFY_COMPLETE:
ace_fix_driveid(ace->cf_id);
ace_dump_mem(ace->cf_id, 512); /* Debug: Dump out disk ID */
if (ace->data_result) {
/* Error occurred, disable the disk */
ace->media_change = 1;
set_capacity(ace->gd, 0);
dev_err(ace->dev, "error fetching CF id (%i)\n",
ace->data_result);
} else {
ace->media_change = 0;
/* Record disk parameters */
set_capacity(ace->gd,
ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
dev_info(ace->dev, "capacity: %i sectors\n",
ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
}
/* We're done, drop to IDLE state and notify waiters */
ace->fsm_state = ACE_FSM_STATE_IDLE;
ace->id_result = ace->data_result;
while (ace->id_req_count) {
complete(&ace->id_completion);
ace->id_req_count--;
}
break;
case ACE_FSM_STATE_REQ_PREPARE:
req = ace_get_next_request(ace->queue);
if (!req) {
ace->fsm_state = ACE_FSM_STATE_IDLE;
break;
}
blk_start_request(req);
/* Okay, it's a data request, set it up for transfer */
dev_dbg(ace->dev,
"request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
(unsigned long long)blk_rq_pos(req),
blk_rq_sectors(req), blk_rq_cur_sectors(req),
rq_data_dir(req));
ace->req = req;
ace->data_ptr = req->buffer;
ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);
count = blk_rq_sectors(req);
if (rq_data_dir(req)) {
/* Kick off write request */
dev_dbg(ace->dev, "write data\n");
ace->fsm_task = ACE_TASK_WRITE;
ace_out(ace, ACE_SECCNTCMD,
count | ACE_SECCNTCMD_WRITE_DATA);
} else {
/* Kick off read request */
dev_dbg(ace->dev, "read data\n");
ace->fsm_task = ACE_TASK_READ;
ace_out(ace, ACE_SECCNTCMD,
count | ACE_SECCNTCMD_READ_DATA);
}
/* As per datasheet, put config controller in reset */
val = ace_in(ace, ACE_CTRL);
ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
/* Move to the transfer state. The systemace will raise
* an interrupt once there is something to do
*/
ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
if (ace->fsm_task == ACE_TASK_READ)
ace_fsm_yieldirq(ace); /* wait for data ready */
break;
case ACE_FSM_STATE_REQ_TRANSFER:
/* Check that the sysace is ready to receive data */
status = ace_in32(ace, ACE_STATUS);
if (status & ACE_STATUS_CFBSY) {
dev_dbg(ace->dev,
"CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
ace->fsm_task, ace->fsm_iter_num,
blk_rq_cur_sectors(ace->req) * 16,
ace->data_count, ace->in_irq);
ace_fsm_yield(ace); /* need to poll CFBSY bit */
break;
}
if (!(status & ACE_STATUS_DATABUFRDY)) {
dev_dbg(ace->dev,
"DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
ace->fsm_task, ace->fsm_iter_num,
blk_rq_cur_sectors(ace->req) * 16,
ace->data_count, ace->in_irq);
ace_fsm_yieldirq(ace);
break;
}
/* Transfer the next buffer */
if (ace->fsm_task == ACE_TASK_WRITE)
ace->reg_ops->dataout(ace);
else
ace->reg_ops->datain(ace);
ace->data_count--;
/* If there are still buffers to be transfers; jump out here */
if (ace->data_count != 0) {
ace_fsm_yieldirq(ace);
break;
}
/* bio finished; is there another one? */
if (__blk_end_request_cur(ace->req, 0)) {
/* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
* blk_rq_sectors(ace->req),
* blk_rq_cur_sectors(ace->req));
*/
ace->data_ptr = ace->req->buffer;
ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
ace_fsm_yieldirq(ace);
break;
}
ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
break;
case ACE_FSM_STATE_REQ_COMPLETE:
ace->req = NULL;
/* Finished request; go to idle state */
ace->fsm_state = ACE_FSM_STATE_IDLE;
break;
default:
ace->fsm_state = ACE_FSM_STATE_IDLE;
break;
}
}
static void ace_fsm_tasklet(unsigned long data)
{
struct ace_device *ace = (void *)data;
unsigned long flags;
spin_lock_irqsave(&ace->lock, flags);
/* Loop over state machine until told to stop */
ace->fsm_continue_flag = 1;
while (ace->fsm_continue_flag)
ace_fsm_dostate(ace);
spin_unlock_irqrestore(&ace->lock, flags);
}
static void ace_stall_timer(unsigned long data)
{
struct ace_device *ace = (void *)data;
unsigned long flags;
dev_warn(ace->dev,
"kicking stalled fsm; state=%i task=%i iter=%i dc=%i\n",
ace->fsm_state, ace->fsm_task, ace->fsm_iter_num,
ace->data_count);
spin_lock_irqsave(&ace->lock, flags);
/* Rearm the stall timer *before* entering FSM (which may then
* delete the timer) */
mod_timer(&ace->stall_timer, jiffies + HZ);
/* Loop over state machine until told to stop */
ace->fsm_continue_flag = 1;
while (ace->fsm_continue_flag)
ace_fsm_dostate(ace);
spin_unlock_irqrestore(&ace->lock, flags);
}
/* ---------------------------------------------------------------------
* Interrupt handling routines
*/
static int ace_interrupt_checkstate(struct ace_device *ace)
{
u32 sreg = ace_in32(ace, ACE_STATUS);
u16 creg = ace_in(ace, ACE_CTRL);
/* Check for error occurrence */
if ((sreg & (ACE_STATUS_CFGERROR | ACE_STATUS_CFCERROR)) &&
(creg & ACE_CTRL_ERRORIRQ)) {
dev_err(ace->dev, "transfer failure\n");
ace_dump_regs(ace);
return -EIO;
}
return 0;
}
static irqreturn_t ace_interrupt(int irq, void *dev_id)
{
u16 creg;
struct ace_device *ace = dev_id;
/* be safe and get the lock */
spin_lock(&ace->lock);
ace->in_irq = 1;
/* clear the interrupt */
creg = ace_in(ace, ACE_CTRL);
ace_out(ace, ACE_CTRL, creg | ACE_CTRL_RESETIRQ);
ace_out(ace, ACE_CTRL, creg);
/* check for IO failures */
if (ace_interrupt_checkstate(ace))
ace->data_result = -EIO;
if (ace->fsm_task == 0) {
dev_err(ace->dev,
"spurious irq; stat=%.8x ctrl=%.8x cmd=%.4x\n",
ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_CTRL),
ace_in(ace, ACE_SECCNTCMD));
dev_err(ace->dev, "fsm_task=%i fsm_state=%i data_count=%i\n",
ace->fsm_task, ace->fsm_state, ace->data_count);
}
/* Loop over state machine until told to stop */
ace->fsm_continue_flag = 1;
while (ace->fsm_continue_flag)
ace_fsm_dostate(ace);
/* done with interrupt; drop the lock */
ace->in_irq = 0;
spin_unlock(&ace->lock);
return IRQ_HANDLED;
}
/* ---------------------------------------------------------------------
* Block ops
*/
static void ace_request(struct request_queue * q)
{
struct request *req;
struct ace_device *ace;
req = ace_get_next_request(q);
if (req) {
ace = req->rq_disk->private_data;
tasklet_schedule(&ace->fsm_tasklet);
}
}
static unsigned int ace_check_events(struct gendisk *gd, unsigned int clearing)
{
struct ace_device *ace = gd->private_data;
dev_dbg(ace->dev, "ace_check_events(): %i\n", ace->media_change);
return ace->media_change ? DISK_EVENT_MEDIA_CHANGE : 0;
}
static int ace_revalidate_disk(struct gendisk *gd)
{
struct ace_device *ace = gd->private_data;
unsigned long flags;
dev_dbg(ace->dev, "ace_revalidate_disk()\n");
if (ace->media_change) {
dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n");
spin_lock_irqsave(&ace->lock, flags);
ace->id_req_count++;
spin_unlock_irqrestore(&ace->lock, flags);
tasklet_schedule(&ace->fsm_tasklet);
wait_for_completion(&ace->id_completion);
}
dev_dbg(ace->dev, "revalidate complete\n");
return ace->id_result;
}
static int ace_open(struct block_device *bdev, fmode_t mode)
{
struct ace_device *ace = bdev->bd_disk->private_data;
unsigned long flags;
dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1);
mutex_lock(&xsysace_mutex);
spin_lock_irqsave(&ace->lock, flags);
ace->users++;
spin_unlock_irqrestore(&ace->lock, flags);
check_disk_change(bdev);
mutex_unlock(&xsysace_mutex);
return 0;
}
static int ace_release(struct gendisk *disk, fmode_t mode)
{
struct ace_device *ace = disk->private_data;
unsigned long flags;
u16 val;
dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1);
mutex_lock(&xsysace_mutex);
spin_lock_irqsave(&ace->lock, flags);
ace->users--;
if (ace->users == 0) {
val = ace_in(ace, ACE_CTRL);
ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ);
}
spin_unlock_irqrestore(&ace->lock, flags);
mutex_unlock(&xsysace_mutex);
return 0;
}
static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct ace_device *ace = bdev->bd_disk->private_data;
u16 *cf_id = ace->cf_id;
dev_dbg(ace->dev, "ace_getgeo()\n");
geo->heads = cf_id[ATA_ID_HEADS];
geo->sectors = cf_id[ATA_ID_SECTORS];
geo->cylinders = cf_id[ATA_ID_CYLS];
return 0;
}
static const struct block_device_operations ace_fops = {
.owner = THIS_MODULE,
.open = ace_open,
.release = ace_release,
.check_events = ace_check_events,
.revalidate_disk = ace_revalidate_disk,
.getgeo = ace_getgeo,
};
/* --------------------------------------------------------------------
* SystemACE device setup/teardown code
*/
static int __devinit ace_setup(struct ace_device *ace)
{
u16 version;
u16 val;
int rc;
dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace);
dev_dbg(ace->dev, "physaddr=0x%llx irq=%i\n",
(unsigned long long)ace->physaddr, ace->irq);
spin_lock_init(&ace->lock);
init_completion(&ace->id_completion);
/*
* Map the device
*/
ace->baseaddr = ioremap(ace->physaddr, 0x80);
if (!ace->baseaddr)
goto err_ioremap;
/*
* Initialize the state machine tasklet and stall timer
*/
tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);
/*
* Initialize the request queue
*/
ace->queue = blk_init_queue(ace_request, &ace->lock);
if (ace->queue == NULL)
goto err_blk_initq;
blk_queue_logical_block_size(ace->queue, 512);
/*
* Allocate and initialize GD structure
*/
ace->gd = alloc_disk(ACE_NUM_MINORS);
if (!ace->gd)
goto err_alloc_disk;
ace->gd->major = ace_major;
ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
ace->gd->fops = &ace_fops;
ace->gd->queue = ace->queue;
ace->gd->private_data = ace;
snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
/* set bus width */
if (ace->bus_width == ACE_BUS_WIDTH_16) {
/* 0x0101 should work regardless of endianess */
ace_out_le16(ace, ACE_BUSMODE, 0x0101);
/* read it back to determine endianess */
if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001)
ace->reg_ops = &ace_reg_le16_ops;
else
ace->reg_ops = &ace_reg_be16_ops;
} else {
ace_out_8(ace, ACE_BUSMODE, 0x00);
ace->reg_ops = &ace_reg_8_ops;
}
/* Make sure version register is sane */
version = ace_in(ace, ACE_VERSION);
if ((version == 0) || (version == 0xFFFF))
goto err_read;
/* Put sysace in a sane state by clearing most control reg bits */
ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE |
ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
/* Now we can hook up the irq handler */
if (ace->irq) {
rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
if (rc) {
/* Failure - fall back to polled mode */
dev_err(ace->dev, "request_irq failed\n");
ace->irq = 0;
}
}
/* Enable interrupts */
val = ace_in(ace, ACE_CTRL);
val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ;
ace_out(ace, ACE_CTRL, val);
/* Print the identification */
dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n",
(version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff);
dev_dbg(ace->dev, "physaddr 0x%llx, mapped to 0x%p, irq=%i\n",
(unsigned long long) ace->physaddr, ace->baseaddr, ace->irq);
ace->media_change = 1;
ace_revalidate_disk(ace->gd);
/* Make the sysace device 'live' */
add_disk(ace->gd);
return 0;
err_read:
put_disk(ace->gd);
err_alloc_disk:
blk_cleanup_queue(ace->queue);
err_blk_initq:
iounmap(ace->baseaddr);
err_ioremap:
dev_info(ace->dev, "xsysace: error initializing device at 0x%llx\n",
(unsigned long long) ace->physaddr);
return -ENOMEM;
}
static void __devexit ace_teardown(struct ace_device *ace)
{
if (ace->gd) {
del_gendisk(ace->gd);
put_disk(ace->gd);
}
if (ace->queue)
blk_cleanup_queue(ace->queue);
tasklet_kill(&ace->fsm_tasklet);
if (ace->irq)
free_irq(ace->irq, ace);
iounmap(ace->baseaddr);
}
static int __devinit
ace_alloc(struct device *dev, int id, resource_size_t physaddr,
int irq, int bus_width)
{
struct ace_device *ace;
int rc;
dev_dbg(dev, "ace_alloc(%p)\n", dev);
if (!physaddr) {
rc = -ENODEV;
goto err_noreg;
}
/* Allocate and initialize the ace device structure */
ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL);
if (!ace) {
rc = -ENOMEM;
goto err_alloc;
}
ace->dev = dev;
ace->id = id;
ace->physaddr = physaddr;
ace->irq = irq;
ace->bus_width = bus_width;
/* Call the setup code */
rc = ace_setup(ace);
if (rc)
goto err_setup;
dev_set_drvdata(dev, ace);
return 0;
err_setup:
dev_set_drvdata(dev, NULL);
kfree(ace);
err_alloc:
err_noreg:
dev_err(dev, "could not initialize device, err=%i\n", rc);
return rc;
}
static void __devexit ace_free(struct device *dev)
{
struct ace_device *ace = dev_get_drvdata(dev);
dev_dbg(dev, "ace_free(%p)\n", dev);
if (ace) {
ace_teardown(ace);
dev_set_drvdata(dev, NULL);
kfree(ace);
}
}
/* ---------------------------------------------------------------------
* Platform Bus Support
*/
static int __devinit ace_probe(struct platform_device *dev)
{
resource_size_t physaddr = 0;
int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
u32 id = dev->id;
int irq = 0;
int i;
dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
/* device id and bus width */
of_property_read_u32(dev->dev.of_node, "port-number", &id);
if (id < 0)
id = 0;
if (of_find_property(dev->dev.of_node, "8-bit", NULL))
bus_width = ACE_BUS_WIDTH_8;
for (i = 0; i < dev->num_resources; i++) {
if (dev->resource[i].flags & IORESOURCE_MEM)
physaddr = dev->resource[i].start;
if (dev->resource[i].flags & IORESOURCE_IRQ)
irq = dev->resource[i].start;
}
/* Call the bus-independent setup code */
return ace_alloc(&dev->dev, id, physaddr, irq, bus_width);
}
/*
* Platform bus remove() method
*/
static int __devexit ace_remove(struct platform_device *dev)
{
ace_free(&dev->dev);
return 0;
}
#if defined(CONFIG_OF)
/* Match table for of_platform binding */
static const struct of_device_id ace_of_match[] __devinitconst = {
{ .compatible = "xlnx,opb-sysace-1.00.b", },
{ .compatible = "xlnx,opb-sysace-1.00.c", },
{ .compatible = "xlnx,xps-sysace-1.00.a", },
{ .compatible = "xlnx,sysace", },
{},
};
MODULE_DEVICE_TABLE(of, ace_of_match);
#else /* CONFIG_OF */
#define ace_of_match NULL
#endif /* CONFIG_OF */
static struct platform_driver ace_platform_driver = {
.probe = ace_probe,
.remove = __devexit_p(ace_remove),
.driver = {
.owner = THIS_MODULE,
.name = "xsysace",
.of_match_table = ace_of_match,
},
};
/* ---------------------------------------------------------------------
* Module init/exit routines
*/
static int __init ace_init(void)
{
int rc;
ace_major = register_blkdev(ace_major, "xsysace");
if (ace_major <= 0) {
rc = -ENOMEM;
goto err_blk;
}
rc = platform_driver_register(&ace_platform_driver);
if (rc)
goto err_plat;
pr_info("Xilinx SystemACE device driver, major=%i\n", ace_major);
return 0;
err_plat:
unregister_blkdev(ace_major, "xsysace");
err_blk:
printk(KERN_ERR "xsysace: registration failed; err=%i\n", rc);
return rc;
}
module_init(ace_init);
static void __exit ace_exit(void)
{
pr_debug("Unregistering Xilinx SystemACE driver\n");
platform_driver_unregister(&ace_platform_driver);
unregister_blkdev(ace_major, "xsysace");
}
module_exit(ace_exit);
| gpl-2.0 |
lloydchang/ubuntu-precise | drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c | 5052 | 9561 | /*
* linux/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/firmware.h>
#include <linux/jiffies.h>
#include <linux/sched.h>
#include "regs-mfc.h"
#include "s5p_mfc_cmd.h"
#include "s5p_mfc_common.h"
#include "s5p_mfc_debug.h"
#include "s5p_mfc_intr.h"
#include "s5p_mfc_pm.h"
static void *s5p_mfc_bitproc_buf;
static size_t s5p_mfc_bitproc_phys;
static unsigned char *s5p_mfc_bitproc_virt;
/* Allocate and load firmware */
int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
{
struct firmware *fw_blob;
size_t bank2_base_phys;
void *b_base;
int err;
/* Firmare has to be present as a separate file or compiled
* into kernel. */
mfc_debug_enter();
err = request_firmware((const struct firmware **)&fw_blob,
"s5p-mfc.fw", dev->v4l2_dev.dev);
if (err != 0) {
mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
return -EINVAL;
}
dev->fw_size = ALIGN(fw_blob->size, FIRMWARE_ALIGN);
if (s5p_mfc_bitproc_buf) {
mfc_err("Attempting to allocate firmware when it seems that it is already loaded\n");
release_firmware(fw_blob);
return -ENOMEM;
}
s5p_mfc_bitproc_buf = vb2_dma_contig_memops.alloc(
dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], dev->fw_size);
if (IS_ERR(s5p_mfc_bitproc_buf)) {
s5p_mfc_bitproc_buf = 0;
mfc_err("Allocating bitprocessor buffer failed\n");
release_firmware(fw_blob);
return -ENOMEM;
}
s5p_mfc_bitproc_phys = s5p_mfc_mem_cookie(
dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], s5p_mfc_bitproc_buf);
if (s5p_mfc_bitproc_phys & ((1 << MFC_BASE_ALIGN_ORDER) - 1)) {
mfc_err("The base memory for bank 1 is not aligned to 128KB\n");
vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
s5p_mfc_bitproc_phys = 0;
s5p_mfc_bitproc_buf = 0;
release_firmware(fw_blob);
return -EIO;
}
s5p_mfc_bitproc_virt = vb2_dma_contig_memops.vaddr(s5p_mfc_bitproc_buf);
if (!s5p_mfc_bitproc_virt) {
mfc_err("Bitprocessor memory remap failed\n");
vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
s5p_mfc_bitproc_phys = 0;
s5p_mfc_bitproc_buf = 0;
release_firmware(fw_blob);
return -EIO;
}
dev->bank1 = s5p_mfc_bitproc_phys;
b_base = vb2_dma_contig_memops.alloc(
dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], 1 << MFC_BANK2_ALIGN_ORDER);
if (IS_ERR(b_base)) {
vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
s5p_mfc_bitproc_phys = 0;
s5p_mfc_bitproc_buf = 0;
mfc_err("Allocating bank2 base failed\n");
release_firmware(fw_blob);
return -ENOMEM;
}
bank2_base_phys = s5p_mfc_mem_cookie(
dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], b_base);
vb2_dma_contig_memops.put(b_base);
if (bank2_base_phys & ((1 << MFC_BASE_ALIGN_ORDER) - 1)) {
mfc_err("The base memory for bank 2 is not aligned to 128KB\n");
vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
s5p_mfc_bitproc_phys = 0;
s5p_mfc_bitproc_buf = 0;
release_firmware(fw_blob);
return -EIO;
}
dev->bank2 = bank2_base_phys;
memcpy(s5p_mfc_bitproc_virt, fw_blob->data, fw_blob->size);
wmb();
release_firmware(fw_blob);
mfc_debug_leave();
return 0;
}
/* Reload firmware to MFC */
int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev)
{
struct firmware *fw_blob;
int err;
/* Firmare has to be present as a separate file or compiled
* into kernel. */
mfc_debug_enter();
err = request_firmware((const struct firmware **)&fw_blob,
"s5p-mfc.fw", dev->v4l2_dev.dev);
if (err != 0) {
mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
return -EINVAL;
}
if (fw_blob->size > dev->fw_size) {
mfc_err("MFC firmware is too big to be loaded\n");
release_firmware(fw_blob);
return -ENOMEM;
}
if (s5p_mfc_bitproc_buf == 0 || s5p_mfc_bitproc_phys == 0) {
mfc_err("MFC firmware is not allocated or was not mapped correctly\n");
release_firmware(fw_blob);
return -EINVAL;
}
memcpy(s5p_mfc_bitproc_virt, fw_blob->data, fw_blob->size);
wmb();
release_firmware(fw_blob);
mfc_debug_leave();
return 0;
}
/* Release firmware memory */
int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev)
{
/* Before calling this function one has to make sure
* that MFC is no longer processing */
if (!s5p_mfc_bitproc_buf)
return -EINVAL;
vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
s5p_mfc_bitproc_virt = 0;
s5p_mfc_bitproc_phys = 0;
s5p_mfc_bitproc_buf = 0;
return 0;
}
/* Reset the device */
int s5p_mfc_reset(struct s5p_mfc_dev *dev)
{
unsigned int mc_status;
unsigned long timeout;
mfc_debug_enter();
/* Stop procedure */
/* reset RISC */
mfc_write(dev, 0x3f6, S5P_FIMV_SW_RESET);
/* All reset except for MC */
mfc_write(dev, 0x3e2, S5P_FIMV_SW_RESET);
mdelay(10);
timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
/* Check MC status */
do {
if (time_after(jiffies, timeout)) {
mfc_err("Timeout while resetting MFC\n");
return -EIO;
}
mc_status = mfc_read(dev, S5P_FIMV_MC_STATUS);
} while (mc_status & 0x3);
mfc_write(dev, 0x0, S5P_FIMV_SW_RESET);
mfc_write(dev, 0x3fe, S5P_FIMV_SW_RESET);
mfc_debug_leave();
return 0;
}
static inline void s5p_mfc_init_memctrl(struct s5p_mfc_dev *dev)
{
mfc_write(dev, dev->bank1, S5P_FIMV_MC_DRAMBASE_ADR_A);
mfc_write(dev, dev->bank2, S5P_FIMV_MC_DRAMBASE_ADR_B);
mfc_debug(2, "Bank1: %08x, Bank2: %08x\n", dev->bank1, dev->bank2);
}
static inline void s5p_mfc_clear_cmds(struct s5p_mfc_dev *dev)
{
mfc_write(dev, 0xffffffff, S5P_FIMV_SI_CH0_INST_ID);
mfc_write(dev, 0xffffffff, S5P_FIMV_SI_CH1_INST_ID);
mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
mfc_write(dev, 0, S5P_FIMV_HOST2RISC_CMD);
}
/* Initialize hardware */
int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
{
unsigned int ver;
int ret;
mfc_debug_enter();
if (!s5p_mfc_bitproc_buf)
return -EINVAL;
/* 0. MFC reset */
mfc_debug(2, "MFC reset..\n");
s5p_mfc_clock_on();
ret = s5p_mfc_reset(dev);
if (ret) {
mfc_err("Failed to reset MFC - timeout\n");
return ret;
}
mfc_debug(2, "Done MFC reset..\n");
/* 1. Set DRAM base Addr */
s5p_mfc_init_memctrl(dev);
/* 2. Initialize registers of channel I/F */
s5p_mfc_clear_cmds(dev);
/* 3. Release reset signal to the RISC */
s5p_mfc_clean_dev_int_flags(dev);
mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
mfc_debug(2, "Will now wait for completion of firmware transfer\n");
if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_FW_STATUS_RET)) {
mfc_err("Failed to load firmware\n");
s5p_mfc_reset(dev);
s5p_mfc_clock_off();
return -EIO;
}
s5p_mfc_clean_dev_int_flags(dev);
/* 4. Initialize firmware */
ret = s5p_mfc_sys_init_cmd(dev);
if (ret) {
mfc_err("Failed to send command to MFC - timeout\n");
s5p_mfc_reset(dev);
s5p_mfc_clock_off();
return ret;
}
mfc_debug(2, "Ok, now will write a command to init the system\n");
if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_SYS_INIT_RET)) {
mfc_err("Failed to load firmware\n");
s5p_mfc_reset(dev);
s5p_mfc_clock_off();
return -EIO;
}
dev->int_cond = 0;
if (dev->int_err != 0 || dev->int_type !=
S5P_FIMV_R2H_CMD_SYS_INIT_RET) {
/* Failure. */
mfc_err("Failed to init firmware - error: %d int: %d\n",
dev->int_err, dev->int_type);
s5p_mfc_reset(dev);
s5p_mfc_clock_off();
return -EIO;
}
ver = mfc_read(dev, S5P_FIMV_FW_VERSION);
mfc_debug(2, "MFC F/W version : %02xyy, %02xmm, %02xdd\n",
(ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
s5p_mfc_clock_off();
mfc_debug_leave();
return 0;
}
int s5p_mfc_sleep(struct s5p_mfc_dev *dev)
{
int ret;
mfc_debug_enter();
s5p_mfc_clock_on();
s5p_mfc_clean_dev_int_flags(dev);
ret = s5p_mfc_sleep_cmd(dev);
if (ret) {
mfc_err("Failed to send command to MFC - timeout\n");
return ret;
}
if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_SLEEP_RET)) {
mfc_err("Failed to sleep\n");
return -EIO;
}
s5p_mfc_clock_off();
dev->int_cond = 0;
if (dev->int_err != 0 || dev->int_type !=
S5P_FIMV_R2H_CMD_SLEEP_RET) {
/* Failure. */
mfc_err("Failed to sleep - error: %d int: %d\n", dev->int_err,
dev->int_type);
return -EIO;
}
mfc_debug_leave();
return ret;
}
int s5p_mfc_wakeup(struct s5p_mfc_dev *dev)
{
int ret;
mfc_debug_enter();
/* 0. MFC reset */
mfc_debug(2, "MFC reset..\n");
s5p_mfc_clock_on();
ret = s5p_mfc_reset(dev);
if (ret) {
mfc_err("Failed to reset MFC - timeout\n");
return ret;
}
mfc_debug(2, "Done MFC reset..\n");
/* 1. Set DRAM base Addr */
s5p_mfc_init_memctrl(dev);
/* 2. Initialize registers of channel I/F */
s5p_mfc_clear_cmds(dev);
s5p_mfc_clean_dev_int_flags(dev);
/* 3. Initialize firmware */
ret = s5p_mfc_wakeup_cmd(dev);
if (ret) {
mfc_err("Failed to send command to MFC - timeout\n");
return ret;
}
/* 4. Release reset signal to the RISC */
mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
mfc_debug(2, "Ok, now will write a command to wakeup the system\n");
if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_WAKEUP_RET)) {
mfc_err("Failed to load firmware\n");
return -EIO;
}
s5p_mfc_clock_off();
dev->int_cond = 0;
if (dev->int_err != 0 || dev->int_type !=
S5P_FIMV_R2H_CMD_WAKEUP_RET) {
/* Failure. */
mfc_err("Failed to wakeup - error: %d int: %d\n", dev->int_err,
dev->int_type);
return -EIO;
}
mfc_debug_leave();
return 0;
}
| gpl-2.0 |
aosp-samsung-msm7x30/android_kernel_samsung_msm7x30-common | arch/x86/kernel/x8664_ksyms_64.c | 7868 | 1436 | /* Exports for assembly files.
All C exports should go in the respective C files. */
#include <linux/module.h>
#include <linux/smp.h>
#include <net/checksum.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/desc.h>
#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_TRACER
/* mcount is defined in assembly */
EXPORT_SYMBOL(mcount);
#endif
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
EXPORT_SYMBOL(__get_user_4);
EXPORT_SYMBOL(__get_user_8);
EXPORT_SYMBOL(__put_user_1);
EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(copy_user_generic_string);
EXPORT_SYMBOL(copy_user_generic_unrolled);
EXPORT_SYMBOL(__copy_user_nocache);
EXPORT_SYMBOL(_copy_from_user);
EXPORT_SYMBOL(_copy_to_user);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(csum_partial);
/*
* Export string functions. We normally rely on gcc builtin for most of these,
* but gcc sometimes decides not to inline them.
*/
#undef memcpy
#undef memset
#undef memmove
extern void *memset(void *, int, __kernel_size_t);
extern void *memcpy(void *, const void *, __kernel_size_t);
extern void *__memcpy(void *, const void *, __kernel_size_t);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(empty_zero_page);
#ifndef CONFIG_PARAVIRT
EXPORT_SYMBOL(native_load_gs_index);
#endif
| gpl-2.0 |
rebelde-/kyubi | net/ipv6/netfilter/ip6table_raw.c | 8892 | 2121 | /*
* IPv6 raw table, a port of the IPv4 raw table to IPv6
*
* Copyright (C) 2003 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*/
#include <linux/module.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/slab.h>
#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
static const struct xt_table packet_raw = {
.name = "raw",
.valid_hooks = RAW_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_IPV6,
.priority = NF_IP6_PRI_RAW,
};
/* The work comes in here from netfilter.c. */
static unsigned int
ip6table_raw_hook(unsigned int hook, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
const struct net *net = dev_net((in != NULL) ? in : out);
return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_raw);
}
static struct nf_hook_ops *rawtable_ops __read_mostly;
static int __net_init ip6table_raw_net_init(struct net *net)
{
struct ip6t_replace *repl;
repl = ip6t_alloc_initial_table(&packet_raw);
if (repl == NULL)
return -ENOMEM;
net->ipv6.ip6table_raw =
ip6t_register_table(net, &packet_raw, repl);
kfree(repl);
if (IS_ERR(net->ipv6.ip6table_raw))
return PTR_ERR(net->ipv6.ip6table_raw);
return 0;
}
static void __net_exit ip6table_raw_net_exit(struct net *net)
{
ip6t_unregister_table(net, net->ipv6.ip6table_raw);
}
static struct pernet_operations ip6table_raw_net_ops = {
.init = ip6table_raw_net_init,
.exit = ip6table_raw_net_exit,
};
static int __init ip6table_raw_init(void)
{
int ret;
ret = register_pernet_subsys(&ip6table_raw_net_ops);
if (ret < 0)
return ret;
/* Register hooks */
rawtable_ops = xt_hook_link(&packet_raw, ip6table_raw_hook);
if (IS_ERR(rawtable_ops)) {
ret = PTR_ERR(rawtable_ops);
goto cleanup_table;
}
return ret;
cleanup_table:
unregister_pernet_subsys(&ip6table_raw_net_ops);
return ret;
}
static void __exit ip6table_raw_fini(void)
{
xt_hook_unlink(&packet_raw, rawtable_ops);
unregister_pernet_subsys(&ip6table_raw_net_ops);
}
module_init(ip6table_raw_init);
module_exit(ip6table_raw_fini);
MODULE_LICENSE("GPL");
| gpl-2.0 |
smksyj/linux_modified_mlock | drivers/net/wireless/b43/tables_lpphy.c | 9916 | 153834 | /*
Broadcom B43 wireless driver
IEEE 802.11a/g LP-PHY and radio device data tables
Copyright (c) 2009 Michael Buesch <m@bues.ch>
Copyright (c) 2009 Gábor Stefanik <netrolller.3d@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#include "b43.h"
#include "tables_lpphy.h"
#include "phy_common.h"
#include "phy_lp.h"
/* Entry of the 2062/2063 radio init table */
struct b206x_init_tab_entry {
u16 offset;
u16 value_a;
u16 value_g;
u8 flags;
};
#define B206X_FLAG_A 0x01 /* Flag: Init in A mode */
#define B206X_FLAG_G 0x02 /* Flag: Init in G mode */
static const struct b206x_init_tab_entry b2062_init_tab[] = {
/* { .offset = B2062_N_COMM1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = 0x0001, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_COMM2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_COMM3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2062_N_COMM4, .value_a = 0x0001, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_N_COMM5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_COMM6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_COMM7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_COMM8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_COMM9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_COMM10, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_COMM11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_COMM12, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_COMM13, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_COMM14, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_COMM15, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_PDN_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2062_N_PDN_CTL1, .value_a = 0x0000, .value_g = 0x00CA, .flags = B206X_FLAG_G, },
/* { .offset = B2062_N_PDN_CTL2, .value_a = 0x0018, .value_g = 0x0018, .flags = 0, }, */
{ .offset = B2062_N_PDN_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_N_PDN_CTL4, .value_a = 0x0015, .value_g = 0x002A, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_N_GEN_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_IQ_CALIB, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */
{ .offset = B2062_N_LGENC, .value_a = 0x00DB, .value_g = 0x00FF, .flags = B206X_FLAG_A, },
/* { .offset = B2062_N_LGENA_LPF, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */
/* { .offset = B2062_N_LGENA_BIAS0, .value_a = 0x0041, .value_g = 0x0041, .flags = 0, }, */
/* { .offset = B2062_N_LGNEA_BIAS1, .value_a = 0x0002, .value_g = 0x0002, .flags = 0, }, */
/* { .offset = B2062_N_LGENA_CTL0, .value_a = 0x0032, .value_g = 0x0032, .flags = 0, }, */
/* { .offset = B2062_N_LGENA_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_LGENA_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2062_N_LGENA_TUNE0, .value_a = 0x00DD, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_N_LGENA_TUNE1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2062_N_LGENA_TUNE2, .value_a = 0x00DD, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_N_LGENA_TUNE3, .value_a = 0x0077, .value_g = 0x00B5, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_N_LGENA_CTL3, .value_a = 0x0000, .value_g = 0x00FF, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_N_LGENA_CTL4, .value_a = 0x001F, .value_g = 0x001F, .flags = 0, }, */
/* { .offset = B2062_N_LGENA_CTL5, .value_a = 0x0032, .value_g = 0x0032, .flags = 0, }, */
/* { .offset = B2062_N_LGENA_CTL6, .value_a = 0x0032, .value_g = 0x0032, .flags = 0, }, */
{ .offset = B2062_N_LGENA_CTL7, .value_a = 0x0033, .value_g = 0x0033, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_N_RXA_CTL0, .value_a = 0x0009, .value_g = 0x0009, .flags = 0, }, */
{ .offset = B2062_N_RXA_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, },
/* { .offset = B2062_N_RXA_CTL2, .value_a = 0x0018, .value_g = 0x0018, .flags = 0, }, */
/* { .offset = B2062_N_RXA_CTL3, .value_a = 0x0027, .value_g = 0x0027, .flags = 0, }, */
/* { .offset = B2062_N_RXA_CTL4, .value_a = 0x0028, .value_g = 0x0028, .flags = 0, }, */
/* { .offset = B2062_N_RXA_CTL5, .value_a = 0x0007, .value_g = 0x0007, .flags = 0, }, */
/* { .offset = B2062_N_RXA_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_RXA_CTL7, .value_a = 0x0008, .value_g = 0x0008, .flags = 0, }, */
{ .offset = B2062_N_RXBB_CTL0, .value_a = 0x0082, .value_g = 0x0080, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_N_RXBB_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_RXBB_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_RXBB_GAIN0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2062_N_RXBB_GAIN1, .value_a = 0x0004, .value_g = 0x0004, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_N_RXBB_GAIN2, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_N_RXBB_GAIN3, .value_a = 0x0011, .value_g = 0x0011, .flags = 0, }, */
/* { .offset = B2062_N_RXBB_RSSI0, .value_a = 0x0043, .value_g = 0x0043, .flags = 0, }, */
/* { .offset = B2062_N_RXBB_RSSI1, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
/* { .offset = B2062_N_RXBB_CALIB0, .value_a = 0x0010, .value_g = 0x0010, .flags = 0, }, */
/* { .offset = B2062_N_RXBB_CALIB1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_RXBB_CALIB2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_RXBB_BIAS0, .value_a = 0x0006, .value_g = 0x0006, .flags = 0, }, */
/* { .offset = B2062_N_RXBB_BIAS1, .value_a = 0x002A, .value_g = 0x002A, .flags = 0, }, */
/* { .offset = B2062_N_RXBB_BIAS2, .value_a = 0x00AA, .value_g = 0x00AA, .flags = 0, }, */
/* { .offset = B2062_N_RXBB_BIAS3, .value_a = 0x0021, .value_g = 0x0021, .flags = 0, }, */
/* { .offset = B2062_N_RXBB_BIAS4, .value_a = 0x00AA, .value_g = 0x00AA, .flags = 0, }, */
/* { .offset = B2062_N_RXBB_BIAS5, .value_a = 0x0022, .value_g = 0x0022, .flags = 0, }, */
/* { .offset = B2062_N_RXBB_RSSI2, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */
/* { .offset = B2062_N_RXBB_RSSI3, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
/* { .offset = B2062_N_RXBB_RSSI4, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */
/* { .offset = B2062_N_RXBB_RSSI5, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
/* { .offset = B2062_N_TX_CTL0, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */
/* { .offset = B2062_N_TX_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_TX_CTL2, .value_a = 0x0084, .value_g = 0x0084, .flags = 0, }, */
/* { .offset = B2062_N_TX_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2062_N_TX_CTL4, .value_a = 0x0003, .value_g = 0x0003, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_N_TX_CTL5, .value_a = 0x0002, .value_g = 0x0002, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_N_TX_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_TX_CTL7, .value_a = 0x0058, .value_g = 0x0058, .flags = 0, }, */
/* { .offset = B2062_N_TX_CTL8, .value_a = 0x0082, .value_g = 0x0082, .flags = 0, }, */
/* { .offset = B2062_N_TX_CTL9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_TX_CTL_A, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_TX_GC2G, .value_a = 0x00FF, .value_g = 0x00FF, .flags = 0, }, */
/* { .offset = B2062_N_TX_GC5G, .value_a = 0x00FF, .value_g = 0x00FF, .flags = 0, }, */
{ .offset = B2062_N_TX_TUNE, .value_a = 0x0088, .value_g = 0x001B, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_N_TX_PAD, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */
/* { .offset = B2062_N_TX_PGA, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */
/* { .offset = B2062_N_TX_PADAUX, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
/* { .offset = B2062_N_TX_PGAAUX, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
/* { .offset = B2062_N_TSSI_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_TSSI_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_TSSI_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_IQ_CALIB_CTL0, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
/* { .offset = B2062_N_IQ_CALIB_CTL1, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
/* { .offset = B2062_N_IQ_CALIB_CTL2, .value_a = 0x0032, .value_g = 0x0032, .flags = 0, }, */
/* { .offset = B2062_N_CALIB_TS, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_CALIB_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_CALIB_CTL1, .value_a = 0x0015, .value_g = 0x0015, .flags = 0, }, */
/* { .offset = B2062_N_CALIB_CTL2, .value_a = 0x000F, .value_g = 0x000F, .flags = 0, }, */
/* { .offset = B2062_N_CALIB_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_CALIB_CTL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_CALIB_DBG0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_CALIB_DBG1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_CALIB_DBG2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_CALIB_DBG3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_PSENSE_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_PSENSE_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_PSENSE_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_N_TEST_BUF0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_COMM1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_RADIO_ID_CODE, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_COMM2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_COMM3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2062_S_COMM4, .value_a = 0x0001, .value_g = 0x0000, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_S_COMM5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_COMM6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_COMM7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_COMM8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_COMM9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_COMM10, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_COMM11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_COMM12, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_COMM13, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_COMM14, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_COMM15, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2062_S_PDS_CTL0, .value_a = 0x00FF, .value_g = 0x00FF, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_S_PDS_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_PDS_CTL2, .value_a = 0x008E, .value_g = 0x008E, .flags = 0, }, */
/* { .offset = B2062_S_PDS_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_BG_CTL0, .value_a = 0x0006, .value_g = 0x0006, .flags = 0, }, */
/* { .offset = B2062_S_BG_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_BG_CTL2, .value_a = 0x0011, .value_g = 0x0011, .flags = 0, }, */
{ .offset = B2062_S_LGENG_CTL0, .value_a = 0x00F8, .value_g = 0x00D8, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_LGENG_CTL1, .value_a = 0x003C, .value_g = 0x0024, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_S_LGENG_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_LGENG_CTL3, .value_a = 0x0041, .value_g = 0x0041, .flags = 0, }, */
/* { .offset = B2062_S_LGENG_CTL4, .value_a = 0x0002, .value_g = 0x0002, .flags = 0, }, */
/* { .offset = B2062_S_LGENG_CTL5, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
/* { .offset = B2062_S_LGENG_CTL6, .value_a = 0x0022, .value_g = 0x0022, .flags = 0, }, */
/* { .offset = B2062_S_LGENG_CTL7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2062_S_LGENG_CTL8, .value_a = 0x0088, .value_g = 0x0080, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_S_LGENG_CTL9, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */
{ .offset = B2062_S_LGENG_CTL10, .value_a = 0x0088, .value_g = 0x0080, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_S_LGENG_CTL11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_REFPLL_CTL0, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_REFPLL_CTL1, .value_a = 0x0007, .value_g = 0x0007, .flags = 0, }, */
/* { .offset = B2062_S_REFPLL_CTL2, .value_a = 0x00AF, .value_g = 0x00AF, .flags = 0, }, */
/* { .offset = B2062_S_REFPLL_CTL3, .value_a = 0x0012, .value_g = 0x0012, .flags = 0, }, */
/* { .offset = B2062_S_REFPLL_CTL4, .value_a = 0x000B, .value_g = 0x000B, .flags = 0, }, */
/* { .offset = B2062_S_REFPLL_CTL5, .value_a = 0x005F, .value_g = 0x005F, .flags = 0, }, */
/* { .offset = B2062_S_REFPLL_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_REFPLL_CTL7, .value_a = 0x0040, .value_g = 0x0040, .flags = 0, }, */
/* { .offset = B2062_S_REFPLL_CTL8, .value_a = 0x0052, .value_g = 0x0052, .flags = 0, }, */
/* { .offset = B2062_S_REFPLL_CTL9, .value_a = 0x0026, .value_g = 0x0026, .flags = 0, }, */
/* { .offset = B2062_S_REFPLL_CTL10, .value_a = 0x0003, .value_g = 0x0003, .flags = 0, }, */
/* { .offset = B2062_S_REFPLL_CTL11, .value_a = 0x0036, .value_g = 0x0036, .flags = 0, }, */
/* { .offset = B2062_S_REFPLL_CTL12, .value_a = 0x0057, .value_g = 0x0057, .flags = 0, }, */
/* { .offset = B2062_S_REFPLL_CTL13, .value_a = 0x0011, .value_g = 0x0011, .flags = 0, }, */
/* { .offset = B2062_S_REFPLL_CTL14, .value_a = 0x0075, .value_g = 0x0075, .flags = 0, }, */
/* { .offset = B2062_S_REFPLL_CTL15, .value_a = 0x00B4, .value_g = 0x00B4, .flags = 0, }, */
/* { .offset = B2062_S_REFPLL_CTL16, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2062_S_RFPLL_CTL0, .value_a = 0x0098, .value_g = 0x0098, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_RFPLL_CTL1, .value_a = 0x0010, .value_g = 0x0010, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_S_RFPLL_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_RFPLL_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_RFPLL_CTL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2062_S_RFPLL_CTL5, .value_a = 0x0043, .value_g = 0x0043, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_RFPLL_CTL6, .value_a = 0x0047, .value_g = 0x0047, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_RFPLL_CTL7, .value_a = 0x000C, .value_g = 0x000C, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_RFPLL_CTL8, .value_a = 0x0011, .value_g = 0x0011, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_RFPLL_CTL9, .value_a = 0x0011, .value_g = 0x0011, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_RFPLL_CTL10, .value_a = 0x000E, .value_g = 0x000E, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_RFPLL_CTL11, .value_a = 0x0008, .value_g = 0x0008, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_RFPLL_CTL12, .value_a = 0x0033, .value_g = 0x0033, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_RFPLL_CTL13, .value_a = 0x000A, .value_g = 0x000A, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_RFPLL_CTL14, .value_a = 0x0006, .value_g = 0x0006, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_S_RFPLL_CTL15, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_RFPLL_CTL16, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_RFPLL_CTL17, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2062_S_RFPLL_CTL18, .value_a = 0x003E, .value_g = 0x003E, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_RFPLL_CTL19, .value_a = 0x0013, .value_g = 0x0013, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_S_RFPLL_CTL20, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2062_S_RFPLL_CTL21, .value_a = 0x0062, .value_g = 0x0062, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_RFPLL_CTL22, .value_a = 0x0007, .value_g = 0x0007, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_RFPLL_CTL23, .value_a = 0x0016, .value_g = 0x0016, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_RFPLL_CTL24, .value_a = 0x005C, .value_g = 0x005C, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_RFPLL_CTL25, .value_a = 0x0095, .value_g = 0x0095, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_S_RFPLL_CTL26, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_RFPLL_CTL27, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_RFPLL_CTL28, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_RFPLL_CTL29, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2062_S_RFPLL_CTL30, .value_a = 0x00A0, .value_g = 0x00A0, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_RFPLL_CTL31, .value_a = 0x0004, .value_g = 0x0004, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_S_RFPLL_CTL32, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2062_S_RFPLL_CTL33, .value_a = 0x00CC, .value_g = 0x00CC, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2062_S_RFPLL_CTL34, .value_a = 0x0007, .value_g = 0x0007, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2062_S_RXG_CNT0, .value_a = 0x0010, .value_g = 0x0010, .flags = 0, }, */
/* { .offset = B2062_S_RXG_CNT1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_RXG_CNT2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_RXG_CNT3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_RXG_CNT4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_RXG_CNT5, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
/* { .offset = B2062_S_RXG_CNT6, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
/* { .offset = B2062_S_RXG_CNT7, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */
{ .offset = B2062_S_RXG_CNT8, .value_a = 0x000F, .value_g = 0x000F, .flags = B206X_FLAG_A, },
/* { .offset = B2062_S_RXG_CNT9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_RXG_CNT10, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
/* { .offset = B2062_S_RXG_CNT11, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */
/* { .offset = B2062_S_RXG_CNT12, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
/* { .offset = B2062_S_RXG_CNT13, .value_a = 0x0044, .value_g = 0x0044, .flags = 0, }, */
/* { .offset = B2062_S_RXG_CNT14, .value_a = 0x00A0, .value_g = 0x00A0, .flags = 0, }, */
/* { .offset = B2062_S_RXG_CNT15, .value_a = 0x0004, .value_g = 0x0004, .flags = 0, }, */
/* { .offset = B2062_S_RXG_CNT16, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2062_S_RXG_CNT17, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
};
static const struct b206x_init_tab_entry b2063_init_tab[] = {
{ .offset = B2063_COMM1, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, },
/* { .offset = B2063_COMM2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_COMM3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_COMM4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_COMM5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_COMM6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_COMM7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_COMM8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_COMM9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2063_COMM10, .value_a = 0x0001, .value_g = 0x0000, .flags = B206X_FLAG_A, },
/* { .offset = B2063_COMM11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_COMM12, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_COMM13, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_COMM14, .value_a = 0x0006, .value_g = 0x0006, .flags = 0, }, */
/* { .offset = B2063_COMM15, .value_a = 0x000f, .value_g = 0x000f, .flags = 0, }, */
{ .offset = B2063_COMM16, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, },
{ .offset = B2063_COMM17, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, },
{ .offset = B2063_COMM18, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, },
{ .offset = B2063_COMM19, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, },
{ .offset = B2063_COMM20, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, },
{ .offset = B2063_COMM21, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, },
{ .offset = B2063_COMM22, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, },
{ .offset = B2063_COMM23, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, },
{ .offset = B2063_COMM24, .value_a = 0x0000, .value_g = 0x0000, .flags = B206X_FLAG_G, },
/* { .offset = B2063_PWR_SWITCH_CTL, .value_a = 0x007f, .value_g = 0x007f, .flags = 0, }, */
/* { .offset = B2063_PLL_SP1, .value_a = 0x003f, .value_g = 0x003f, .flags = 0, }, */
/* { .offset = B2063_PLL_SP2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2063_LOGEN_SP1, .value_a = 0x00e8, .value_g = 0x00d4, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2063_LOGEN_SP2, .value_a = 0x00a7, .value_g = 0x0053, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_LOGEN_SP3, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */
{ .offset = B2063_LOGEN_SP4, .value_a = 0x00f0, .value_g = 0x000f, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_LOGEN_SP5, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */
{ .offset = B2063_G_RX_SP1, .value_a = 0x001f, .value_g = 0x005e, .flags = B206X_FLAG_G, },
{ .offset = B2063_G_RX_SP2, .value_a = 0x007f, .value_g = 0x007e, .flags = B206X_FLAG_G, },
{ .offset = B2063_G_RX_SP3, .value_a = 0x0030, .value_g = 0x00f0, .flags = B206X_FLAG_G, },
/* { .offset = B2063_G_RX_SP4, .value_a = 0x0035, .value_g = 0x0035, .flags = 0, }, */
/* { .offset = B2063_G_RX_SP5, .value_a = 0x003f, .value_g = 0x003f, .flags = 0, }, */
/* { .offset = B2063_G_RX_SP6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2063_G_RX_SP7, .value_a = 0x007f, .value_g = 0x007f, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_G_RX_SP8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_G_RX_SP9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2063_G_RX_SP10, .value_a = 0x000c, .value_g = 0x000c, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_G_RX_SP11, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2063_A_RX_SP1, .value_a = 0x003c, .value_g = 0x003f, .flags = B206X_FLAG_A, },
{ .offset = B2063_A_RX_SP2, .value_a = 0x00fc, .value_g = 0x00fe, .flags = B206X_FLAG_A, },
/* { .offset = B2063_A_RX_SP3, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */
/* { .offset = B2063_A_RX_SP4, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */
/* { .offset = B2063_A_RX_SP5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_A_RX_SP6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2063_A_RX_SP7, .value_a = 0x0008, .value_g = 0x0008, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_RX_BB_SP1, .value_a = 0x000f, .value_g = 0x000f, .flags = 0, }, */
/* { .offset = B2063_RX_BB_SP2, .value_a = 0x0022, .value_g = 0x0022, .flags = 0, }, */
/* { .offset = B2063_RX_BB_SP3, .value_a = 0x00a8, .value_g = 0x00a8, .flags = 0, }, */
{ .offset = B2063_RX_BB_SP4, .value_a = 0x0060, .value_g = 0x0060, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_RX_BB_SP5, .value_a = 0x0011, .value_g = 0x0011, .flags = 0, }, */
/* { .offset = B2063_RX_BB_SP6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_RX_BB_SP7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2063_RX_BB_SP8, .value_a = 0x0030, .value_g = 0x0030, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_TX_RF_SP1, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */
/* { .offset = B2063_TX_RF_SP2, .value_a = 0x0003, .value_g = 0x0003, .flags = 0, }, */
{ .offset = B2063_TX_RF_SP3, .value_a = 0x000c, .value_g = 0x000b, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2063_TX_RF_SP4, .value_a = 0x0010, .value_g = 0x000f, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_TX_RF_SP5, .value_a = 0x000f, .value_g = 0x000f, .flags = 0, }, */
/* { .offset = B2063_TX_RF_SP6, .value_a = 0x0080, .value_g = 0x0080, .flags = 0, }, */
/* { .offset = B2063_TX_RF_SP7, .value_a = 0x0068, .value_g = 0x0068, .flags = 0, }, */
/* { .offset = B2063_TX_RF_SP8, .value_a = 0x0068, .value_g = 0x0068, .flags = 0, }, */
/* { .offset = B2063_TX_RF_SP9, .value_a = 0x0080, .value_g = 0x0080, .flags = 0, }, */
/* { .offset = B2063_TX_RF_SP10, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */
/* { .offset = B2063_TX_RF_SP11, .value_a = 0x0003, .value_g = 0x0003, .flags = 0, }, */
/* { .offset = B2063_TX_RF_SP12, .value_a = 0x0038, .value_g = 0x0038, .flags = 0, }, */
/* { .offset = B2063_TX_RF_SP13, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */
/* { .offset = B2063_TX_RF_SP14, .value_a = 0x0038, .value_g = 0x0038, .flags = 0, }, */
/* { .offset = B2063_TX_RF_SP15, .value_a = 0x00c0, .value_g = 0x00c0, .flags = 0, }, */
/* { .offset = B2063_TX_RF_SP16, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */
/* { .offset = B2063_TX_RF_SP17, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */
{ .offset = B2063_PA_SP1, .value_a = 0x003d, .value_g = 0x00fd, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_PA_SP2, .value_a = 0x000c, .value_g = 0x000c, .flags = 0, }, */
/* { .offset = B2063_PA_SP3, .value_a = 0x0096, .value_g = 0x0096, .flags = 0, }, */
/* { .offset = B2063_PA_SP4, .value_a = 0x005a, .value_g = 0x005a, .flags = 0, }, */
/* { .offset = B2063_PA_SP5, .value_a = 0x007f, .value_g = 0x007f, .flags = 0, }, */
/* { .offset = B2063_PA_SP6, .value_a = 0x007f, .value_g = 0x007f, .flags = 0, }, */
/* { .offset = B2063_PA_SP7, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
{ .offset = B2063_TX_BB_SP1, .value_a = 0x0002, .value_g = 0x0002, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_TX_BB_SP2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_TX_BB_SP3, .value_a = 0x0030, .value_g = 0x0030, .flags = 0, }, */
/* { .offset = B2063_REG_SP1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2063_BANDGAP_CTL1, .value_a = 0x0056, .value_g = 0x0056, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_BANDGAP_CTL2, .value_a = 0x0006, .value_g = 0x0006, .flags = 0, }, */
/* { .offset = B2063_LPO_CTL1, .value_a = 0x000e, .value_g = 0x000e, .flags = 0, }, */
/* { .offset = B2063_RC_CALIB_CTL1, .value_a = 0x007e, .value_g = 0x007e, .flags = 0, }, */
/* { .offset = B2063_RC_CALIB_CTL2, .value_a = 0x0015, .value_g = 0x0015, .flags = 0, }, */
/* { .offset = B2063_RC_CALIB_CTL3, .value_a = 0x000f, .value_g = 0x000f, .flags = 0, }, */
/* { .offset = B2063_RC_CALIB_CTL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_RC_CALIB_CTL5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_RC_CALIB_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_RC_CALIB_CTL7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_RC_CALIB_CTL8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_RC_CALIB_CTL9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_RC_CALIB_CTL10, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_CALNRST, .value_a = 0x0004, .value_g = 0x0004, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_IN_PLL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_IN_PLL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_CP1, .value_a = 0x00cf, .value_g = 0x00cf, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_CP2, .value_a = 0x0059, .value_g = 0x0059, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_CP3, .value_a = 0x0007, .value_g = 0x0007, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_CP4, .value_a = 0x0042, .value_g = 0x0042, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_LF1, .value_a = 0x00db, .value_g = 0x00db, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_LF2, .value_a = 0x0094, .value_g = 0x0094, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_LF3, .value_a = 0x0028, .value_g = 0x0028, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_LF4, .value_a = 0x0063, .value_g = 0x0063, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_SG1, .value_a = 0x0007, .value_g = 0x0007, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_SG2, .value_a = 0x00d3, .value_g = 0x00d3, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_SG3, .value_a = 0x00b1, .value_g = 0x00b1, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_SG4, .value_a = 0x003b, .value_g = 0x003b, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_SG5, .value_a = 0x0006, .value_g = 0x0006, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_VCO1, .value_a = 0x0058, .value_g = 0x0058, .flags = 0, }, */
{ .offset = B2063_PLL_JTAG_PLL_VCO2, .value_a = 0x00f7, .value_g = 0x00f7, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB3, .value_a = 0x0002, .value_g = 0x0002, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB5, .value_a = 0x0009, .value_g = 0x0009, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB6, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB7, .value_a = 0x0016, .value_g = 0x0016, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB8, .value_a = 0x006b, .value_g = 0x006b, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_VCO_CALIB10, .value_a = 0x00b3, .value_g = 0x00b3, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_XTAL_12, .value_a = 0x0004, .value_g = 0x0004, .flags = 0, }, */
/* { .offset = B2063_PLL_JTAG_PLL_XTAL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LOGEN_ACL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LOGEN_ACL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LOGEN_ACL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LOGEN_ACL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LOGEN_ACL5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_INPUTS, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_WAITCNT, .value_a = 0x0002, .value_g = 0x0002, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_OVR1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_OVR2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_OVAL1, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_OVAL2, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_OVAL3, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_OVAL4, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_OVAL5, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_OVAL6, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_OVAL7, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_CALVLD1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_CALVLD2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_CVAL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_CVAL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_CVAL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_CVAL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_CVAL5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_CVAL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LO_CALIB_CVAL7, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LOGEN_CALIB_EN, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LOGEN_PEAKDET1, .value_a = 0x00ff, .value_g = 0x00ff, .flags = 0, }, */
/* { .offset = B2063_LOGEN_RCCR1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LOGEN_VCOBUF1, .value_a = 0x0060, .value_g = 0x0060, .flags = 0, }, */
/* { .offset = B2063_LOGEN_MIXER1, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */
/* { .offset = B2063_LOGEN_MIXER2, .value_a = 0x000c, .value_g = 0x000c, .flags = 0, }, */
/* { .offset = B2063_LOGEN_BUF1, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */
/* { .offset = B2063_LOGEN_BUF2, .value_a = 0x000c, .value_g = 0x000c, .flags = 0, }, */
/* { .offset = B2063_LOGEN_DIV1, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */
/* { .offset = B2063_LOGEN_DIV2, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */
/* { .offset = B2063_LOGEN_DIV3, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */
/* { .offset = B2063_LOGEN_CBUFRX1, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */
/* { .offset = B2063_LOGEN_CBUFRX2, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */
/* { .offset = B2063_LOGEN_CBUFTX1, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */
/* { .offset = B2063_LOGEN_CBUFTX2, .value_a = 0x0066, .value_g = 0x0066, .flags = 0, }, */
/* { .offset = B2063_LOGEN_IDAC1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LOGEN_SPARE1, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */
/* { .offset = B2063_LOGEN_SPARE2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_LOGEN_SPARE3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_G_RX_1ST1, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
/* { .offset = B2063_G_RX_1ST2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_G_RX_1ST3, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */
/* { .offset = B2063_G_RX_2ND1, .value_a = 0x0030, .value_g = 0x0030, .flags = 0, }, */
/* { .offset = B2063_G_RX_2ND2, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
/* { .offset = B2063_G_RX_2ND3, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
/* { .offset = B2063_G_RX_2ND4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_G_RX_2ND5, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
/* { .offset = B2063_G_RX_2ND6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_G_RX_2ND7, .value_a = 0x0035, .value_g = 0x0035, .flags = 0, }, */
/* { .offset = B2063_G_RX_2ND8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_G_RX_PS1, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
/* { .offset = B2063_G_RX_PS2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_G_RX_PS3, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
/* { .offset = B2063_G_RX_PS4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_G_RX_PS5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_G_RX_MIX1, .value_a = 0x0044, .value_g = 0x0044, .flags = 0, }, */
/* { .offset = B2063_G_RX_MIX2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2063_G_RX_MIX3, .value_a = 0x0071, .value_g = 0x0071, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2063_G_RX_MIX4, .value_a = 0x0071, .value_g = 0x0071, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_G_RX_MIX5, .value_a = 0x0003, .value_g = 0x0003, .flags = 0, }, */
/* { .offset = B2063_G_RX_MIX6, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */
/* { .offset = B2063_G_RX_MIX7, .value_a = 0x0044, .value_g = 0x0044, .flags = 0, }, */
/* { .offset = B2063_G_RX_MIX8, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */
/* { .offset = B2063_G_RX_PDET1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_G_RX_SPARES1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_G_RX_SPARES2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_G_RX_SPARES3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_A_RX_1ST1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2063_A_RX_1ST2, .value_a = 0x00f0, .value_g = 0x0030, .flags = B206X_FLAG_A, },
/* { .offset = B2063_A_RX_1ST3, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */
/* { .offset = B2063_A_RX_1ST4, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
/* { .offset = B2063_A_RX_1ST5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_A_RX_2ND1, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */
/* { .offset = B2063_A_RX_2ND2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_A_RX_2ND3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_A_RX_2ND4, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */
/* { .offset = B2063_A_RX_2ND5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_A_RX_2ND6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_A_RX_2ND7, .value_a = 0x0005, .value_g = 0x0005, .flags = 0, }, */
/* { .offset = B2063_A_RX_PS1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_A_RX_PS2, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
/* { .offset = B2063_A_RX_PS3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_A_RX_PS4, .value_a = 0x0033, .value_g = 0x0033, .flags = 0, }, */
/* { .offset = B2063_A_RX_PS5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2063_A_RX_PS6, .value_a = 0x0077, .value_g = 0x0077, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_A_RX_MIX1, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */
/* { .offset = B2063_A_RX_MIX2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_A_RX_MIX3, .value_a = 0x0044, .value_g = 0x0044, .flags = 0, }, */
{ .offset = B2063_A_RX_MIX4, .value_a = 0x0003, .value_g = 0x0003, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2063_A_RX_MIX5, .value_a = 0x000f, .value_g = 0x000f, .flags = B206X_FLAG_A | B206X_FLAG_G, },
{ .offset = B2063_A_RX_MIX6, .value_a = 0x000f, .value_g = 0x000f, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_A_RX_MIX7, .value_a = 0x0044, .value_g = 0x0044, .flags = 0, }, */
/* { .offset = B2063_A_RX_MIX8, .value_a = 0x0001, .value_g = 0x0001, .flags = 0, }, */
/* { .offset = B2063_A_RX_PWRDET1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_A_RX_SPARE1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_A_RX_SPARE2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_A_RX_SPARE3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2063_RX_TIA_CTL1, .value_a = 0x0077, .value_g = 0x0077, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_RX_TIA_CTL2, .value_a = 0x0058, .value_g = 0x0058, .flags = 0, }, */
{ .offset = B2063_RX_TIA_CTL3, .value_a = 0x0077, .value_g = 0x0077, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_RX_TIA_CTL4, .value_a = 0x0058, .value_g = 0x0058, .flags = 0, }, */
/* { .offset = B2063_RX_TIA_CTL5, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_RX_TIA_CTL6, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_RX_BB_CTL1, .value_a = 0x0074, .value_g = 0x0074, .flags = 0, }, */
{ .offset = B2063_RX_BB_CTL2, .value_a = 0x0004, .value_g = 0x0004, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_RX_BB_CTL3, .value_a = 0x00a2, .value_g = 0x00a2, .flags = 0, }, */
/* { .offset = B2063_RX_BB_CTL4, .value_a = 0x00aa, .value_g = 0x00aa, .flags = 0, }, */
/* { .offset = B2063_RX_BB_CTL5, .value_a = 0x0024, .value_g = 0x0024, .flags = 0, }, */
/* { .offset = B2063_RX_BB_CTL6, .value_a = 0x00a9, .value_g = 0x00a9, .flags = 0, }, */
/* { .offset = B2063_RX_BB_CTL7, .value_a = 0x0028, .value_g = 0x0028, .flags = 0, }, */
/* { .offset = B2063_RX_BB_CTL8, .value_a = 0x0010, .value_g = 0x0010, .flags = 0, }, */
/* { .offset = B2063_RX_BB_CTL9, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
/* { .offset = B2063_TX_RF_CTL1, .value_a = 0x0080, .value_g = 0x0080, .flags = 0, }, */
/* { .offset = B2063_TX_RF_IDAC_LO_RF_I, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */
/* { .offset = B2063_TX_RF_IDAC_LO_RF_Q, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */
/* { .offset = B2063_TX_RF_IDAC_LO_BB_I, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */
/* { .offset = B2063_TX_RF_IDAC_LO_BB_Q, .value_a = 0x0088, .value_g = 0x0088, .flags = 0, }, */
/* { .offset = B2063_TX_RF_CTL2, .value_a = 0x0080, .value_g = 0x0080, .flags = 0, }, */
/* { .offset = B2063_TX_RF_CTL3, .value_a = 0x0038, .value_g = 0x0038, .flags = 0, }, */
/* { .offset = B2063_TX_RF_CTL4, .value_a = 0x00b8, .value_g = 0x00b8, .flags = 0, }, */
/* { .offset = B2063_TX_RF_CTL5, .value_a = 0x0080, .value_g = 0x0080, .flags = 0, }, */
/* { .offset = B2063_TX_RF_CTL6, .value_a = 0x0038, .value_g = 0x0038, .flags = 0, }, */
/* { .offset = B2063_TX_RF_CTL7, .value_a = 0x0078, .value_g = 0x0078, .flags = 0, }, */
/* { .offset = B2063_TX_RF_CTL8, .value_a = 0x00c0, .value_g = 0x00c0, .flags = 0, }, */
/* { .offset = B2063_TX_RF_CTL9, .value_a = 0x0003, .value_g = 0x0003, .flags = 0, }, */
/* { .offset = B2063_TX_RF_CTL10, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_TX_RF_CTL14, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_TX_RF_CTL15, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2063_PA_CTL1, .value_a = 0x0000, .value_g = 0x0004, .flags = B206X_FLAG_A, },
/* { .offset = B2063_PA_CTL2, .value_a = 0x000c, .value_g = 0x000c, .flags = 0, }, */
/* { .offset = B2063_PA_CTL3, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_PA_CTL4, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_PA_CTL5, .value_a = 0x0096, .value_g = 0x0096, .flags = 0, }, */
/* { .offset = B2063_PA_CTL6, .value_a = 0x0077, .value_g = 0x0077, .flags = 0, }, */
/* { .offset = B2063_PA_CTL7, .value_a = 0x005a, .value_g = 0x005a, .flags = 0, }, */
/* { .offset = B2063_PA_CTL8, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_PA_CTL9, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_PA_CTL10, .value_a = 0x0021, .value_g = 0x0021, .flags = 0, }, */
/* { .offset = B2063_PA_CTL11, .value_a = 0x0070, .value_g = 0x0070, .flags = 0, }, */
/* { .offset = B2063_PA_CTL12, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_PA_CTL13, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_TX_BB_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_TX_BB_CTL2, .value_a = 0x00b3, .value_g = 0x00b3, .flags = 0, }, */
/* { .offset = B2063_TX_BB_CTL3, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
/* { .offset = B2063_TX_BB_CTL4, .value_a = 0x000b, .value_g = 0x000b, .flags = 0, }, */
/* { .offset = B2063_GPIO_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
{ .offset = B2063_VREG_CTL1, .value_a = 0x0003, .value_g = 0x0003, .flags = B206X_FLAG_A | B206X_FLAG_G, },
/* { .offset = B2063_AMUX_CTL1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_IQ_CALIB_GVAR, .value_a = 0x00b3, .value_g = 0x00b3, .flags = 0, }, */
/* { .offset = B2063_IQ_CALIB_CTL1, .value_a = 0x0055, .value_g = 0x0055, .flags = 0, }, */
/* { .offset = B2063_IQ_CALIB_CTL2, .value_a = 0x0030, .value_g = 0x0030, .flags = 0, }, */
/* { .offset = B2063_TEMPSENSE_CTL1, .value_a = 0x0046, .value_g = 0x0046, .flags = 0, }, */
/* { .offset = B2063_TEMPSENSE_CTL2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_TX_RX_LOOPBACK1, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_TX_RX_LOOPBACK2, .value_a = 0x0000, .value_g = 0x0000, .flags = 0, }, */
/* { .offset = B2063_EXT_TSSI_CTL1, .value_a = 0x0021, .value_g = 0x0021, .flags = 0, }, */
/* { .offset = B2063_EXT_TSSI_CTL2, .value_a = 0x0023, .value_g = 0x0023, .flags = 0, }, */
/* { .offset = B2063_AFE_CTL , .value_a = 0x0002, .value_g = 0x0002, .flags = 0, }, */
};
void b2062_upload_init_table(struct b43_wldev *dev)
{
const struct b206x_init_tab_entry *e;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(b2062_init_tab); i++) {
e = &b2062_init_tab[i];
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
if (!(e->flags & B206X_FLAG_G))
continue;
b43_radio_write(dev, e->offset, e->value_g);
} else {
if (!(e->flags & B206X_FLAG_A))
continue;
b43_radio_write(dev, e->offset, e->value_a);
}
}
}
void b2063_upload_init_table(struct b43_wldev *dev)
{
const struct b206x_init_tab_entry *e;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(b2063_init_tab); i++) {
e = &b2063_init_tab[i];
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
if (!(e->flags & B206X_FLAG_G))
continue;
b43_radio_write(dev, e->offset, e->value_g);
} else {
if (!(e->flags & B206X_FLAG_A))
continue;
b43_radio_write(dev, e->offset, e->value_a);
}
}
}
u32 b43_lptab_read(struct b43_wldev *dev, u32 offset)
{
u32 type, value;
type = offset & B43_LPTAB_TYPEMASK;
offset &= ~B43_LPTAB_TYPEMASK;
B43_WARN_ON(offset > 0xFFFF);
switch (type) {
case B43_LPTAB_8BIT:
b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset);
value = b43_phy_read(dev, B43_LPPHY_TABLEDATALO) & 0xFF;
break;
case B43_LPTAB_16BIT:
b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset);
value = b43_phy_read(dev, B43_LPPHY_TABLEDATALO);
break;
case B43_LPTAB_32BIT:
b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset);
value = b43_phy_read(dev, B43_LPPHY_TABLEDATAHI);
value <<= 16;
value |= b43_phy_read(dev, B43_LPPHY_TABLEDATALO);
break;
default:
B43_WARN_ON(1);
value = 0;
}
return value;
}
void b43_lptab_read_bulk(struct b43_wldev *dev, u32 offset,
unsigned int nr_elements, void *_data)
{
u32 type;
u8 *data = _data;
unsigned int i;
type = offset & B43_LPTAB_TYPEMASK;
offset &= ~B43_LPTAB_TYPEMASK;
B43_WARN_ON(offset > 0xFFFF);
b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset);
for (i = 0; i < nr_elements; i++) {
switch (type) {
case B43_LPTAB_8BIT:
*data = b43_phy_read(dev, B43_LPPHY_TABLEDATALO) & 0xFF;
data++;
break;
case B43_LPTAB_16BIT:
*((u16 *)data) = b43_phy_read(dev, B43_LPPHY_TABLEDATALO);
data += 2;
break;
case B43_LPTAB_32BIT:
*((u32 *)data) = b43_phy_read(dev, B43_LPPHY_TABLEDATAHI);
*((u32 *)data) <<= 16;
*((u32 *)data) |= b43_phy_read(dev, B43_LPPHY_TABLEDATALO);
data += 4;
break;
default:
B43_WARN_ON(1);
}
}
}
void b43_lptab_write(struct b43_wldev *dev, u32 offset, u32 value)
{
u32 type;
type = offset & B43_LPTAB_TYPEMASK;
offset &= ~B43_LPTAB_TYPEMASK;
B43_WARN_ON(offset > 0xFFFF);
switch (type) {
case B43_LPTAB_8BIT:
B43_WARN_ON(value & ~0xFF);
b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset);
b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value);
break;
case B43_LPTAB_16BIT:
B43_WARN_ON(value & ~0xFFFF);
b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset);
b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value);
break;
case B43_LPTAB_32BIT:
b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset);
b43_phy_write(dev, B43_LPPHY_TABLEDATAHI, value >> 16);
b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value);
break;
default:
B43_WARN_ON(1);
}
}
void b43_lptab_write_bulk(struct b43_wldev *dev, u32 offset,
unsigned int nr_elements, const void *_data)
{
u32 type, value;
const u8 *data = _data;
unsigned int i;
type = offset & B43_LPTAB_TYPEMASK;
offset &= ~B43_LPTAB_TYPEMASK;
B43_WARN_ON(offset > 0xFFFF);
b43_phy_write(dev, B43_LPPHY_TABLE_ADDR, offset);
for (i = 0; i < nr_elements; i++) {
switch (type) {
case B43_LPTAB_8BIT:
value = *data;
data++;
B43_WARN_ON(value & ~0xFF);
b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value);
break;
case B43_LPTAB_16BIT:
value = *((u16 *)data);
data += 2;
B43_WARN_ON(value & ~0xFFFF);
b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value);
break;
case B43_LPTAB_32BIT:
value = *((u32 *)data);
data += 4;
b43_phy_write(dev, B43_LPPHY_TABLEDATAHI, value >> 16);
b43_phy_write(dev, B43_LPPHY_TABLEDATALO, value);
break;
default:
B43_WARN_ON(1);
}
}
}
static const u8 lpphy_min_sig_sq_table[] = {
0xde, 0xdc, 0xda, 0xd8, 0xd6, 0xd4, 0xd2, 0xcf, 0xcd,
0xca, 0xc7, 0xc4, 0xc1, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe,
0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0x00,
0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe,
0xbe, 0xbe, 0xbe, 0xbe, 0xc1, 0xc4, 0xc7, 0xca, 0xcd,
0xcf, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde,
};
static const u16 lpphy_rev01_noise_scale_table[] = {
0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4,
0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa400, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4,
0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0x00a4,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4c00, 0x2d36,
0x0000, 0x0000, 0x4c00, 0x2d36,
};
static const u16 lpphy_rev2plus_noise_scale_table[] = {
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4,
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4,
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x0000,
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4,
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4,
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4,
0x00a4,
};
static const u16 lpphy_crs_gain_nft_table[] = {
0x0366, 0x036a, 0x036f, 0x0364, 0x0367, 0x036d, 0x0374, 0x037f, 0x036f,
0x037b, 0x038a, 0x0378, 0x0367, 0x036d, 0x0375, 0x0381, 0x0374, 0x0381,
0x0392, 0x03a9, 0x03c4, 0x03e1, 0x0001, 0x001f, 0x0040, 0x005e, 0x007f,
0x009e, 0x00bd, 0x00dd, 0x00fd, 0x011d, 0x013d,
};
static const u16 lpphy_rev01_filter_control_table[] = {
0xa0fc, 0x10fc, 0x10db, 0x20b7, 0xff93, 0x10bf, 0x109b, 0x2077, 0xff53,
0x0127,
};
static const u32 lpphy_rev2plus_filter_control_table[] = {
0x000141fc, 0x000021fc, 0x000021b7, 0x0000416f, 0x0001ff27, 0x0000217f,
0x00002137, 0x000040ef, 0x0001fea7, 0x0000024f,
};
static const u32 lpphy_rev01_ps_control_table[] = {
0x00010000, 0x000000a0, 0x00040000, 0x00000048, 0x08080101, 0x00000080,
0x08080101, 0x00000040, 0x08080101, 0x000000c0, 0x08a81501, 0x000000c0,
0x0fe8fd01, 0x000000c0, 0x08300105, 0x000000c0, 0x08080201, 0x000000c0,
0x08280205, 0x000000c0, 0xe80802fe, 0x000000c7, 0x28080206, 0x000000c0,
0x08080202, 0x000000c0, 0x0ba87602, 0x000000c0, 0x1068013d, 0x000000c0,
0x10280105, 0x000000c0, 0x08880102, 0x000000c0, 0x08280106, 0x000000c0,
0xe80801fd, 0x000000c7, 0xa8080115, 0x000000c0,
};
static const u32 lpphy_rev2plus_ps_control_table[] = {
0x00e38e08, 0x00e08e38, 0x00000000, 0x00000000, 0x00000000, 0x00002080,
0x00006180, 0x00003002, 0x00000040, 0x00002042, 0x00180047, 0x00080043,
0x00000041, 0x000020c1, 0x00046006, 0x00042002, 0x00040000, 0x00002003,
0x00180006, 0x00080002,
};
static const u8 lpphy_pll_fraction_table[] = {
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
};
static const u16 lpphy_iqlo_cal_table[] = {
0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002,
0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0300, 0x0400, 0x0600,
0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006,
0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
};
static const u16 lpphy_rev0_ofdm_cck_gain_table[] = {
0x0001, 0x0001, 0x0001, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001, 0x5001,
0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055, 0x2065, 0x2075,
0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d, 0x135d, 0x055d, 0x155d,
0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d, 0x755d,
};
static const u16 lpphy_rev1_ofdm_cck_gain_table[] = {
0x5000, 0x6000, 0x7000, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001, 0x5001,
0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055, 0x2065, 0x2075,
0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d, 0x135d, 0x055d, 0x155d,
0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d, 0x755d,
};
static const u16 lpphy_gain_delta_table[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
};
static const u32 lpphy_tx_power_control_table[] = {
0x00000050, 0x0000004f, 0x0000004e, 0x0000004d, 0x0000004c, 0x0000004b,
0x0000004a, 0x00000049, 0x00000048, 0x00000047, 0x00000046, 0x00000045,
0x00000044, 0x00000043, 0x00000042, 0x00000041, 0x00000040, 0x0000003f,
0x0000003e, 0x0000003d, 0x0000003c, 0x0000003b, 0x0000003a, 0x00000039,
0x00000038, 0x00000037, 0x00000036, 0x00000035, 0x00000034, 0x00000033,
0x00000032, 0x00000031, 0x00000030, 0x0000002f, 0x0000002e, 0x0000002d,
0x0000002c, 0x0000002b, 0x0000002a, 0x00000029, 0x00000028, 0x00000027,
0x00000026, 0x00000025, 0x00000024, 0x00000023, 0x00000022, 0x00000021,
0x00000020, 0x0000001f, 0x0000001e, 0x0000001d, 0x0000001c, 0x0000001b,
0x0000001a, 0x00000019, 0x00000018, 0x00000017, 0x00000016, 0x00000015,
0x00000014, 0x00000013, 0x00000012, 0x00000011, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x000075a0, 0x000075a0, 0x000075a1, 0x000075a1, 0x000075a2, 0x000075a2,
0x000075a3, 0x000075a3, 0x000074b0, 0x000074b0, 0x000074b1, 0x000074b1,
0x000074b2, 0x000074b2, 0x000074b3, 0x000074b3, 0x00006d20, 0x00006d20,
0x00006d21, 0x00006d21, 0x00006d22, 0x00006d22, 0x00006d23, 0x00006d23,
0x00004660, 0x00004660, 0x00004661, 0x00004661, 0x00004662, 0x00004662,
0x00004663, 0x00004663, 0x00003e60, 0x00003e60, 0x00003e61, 0x00003e61,
0x00003e62, 0x00003e62, 0x00003e63, 0x00003e63, 0x00003660, 0x00003660,
0x00003661, 0x00003661, 0x00003662, 0x00003662, 0x00003663, 0x00003663,
0x00002e60, 0x00002e60, 0x00002e61, 0x00002e61, 0x00002e62, 0x00002e62,
0x00002e63, 0x00002e63, 0x00002660, 0x00002660, 0x00002661, 0x00002661,
0x00002662, 0x00002662, 0x00002663, 0x00002663, 0x000025e0, 0x000025e0,
0x000025e1, 0x000025e1, 0x000025e2, 0x000025e2, 0x000025e3, 0x000025e3,
0x00001de0, 0x00001de0, 0x00001de1, 0x00001de1, 0x00001de2, 0x00001de2,
0x00001de3, 0x00001de3, 0x00001d60, 0x00001d60, 0x00001d61, 0x00001d61,
0x00001d62, 0x00001d62, 0x00001d63, 0x00001d63, 0x00001560, 0x00001560,
0x00001561, 0x00001561, 0x00001562, 0x00001562, 0x00001563, 0x00001563,
0x00000d60, 0x00000d60, 0x00000d61, 0x00000d61, 0x00000d62, 0x00000d62,
0x00000d63, 0x00000d63, 0x00000ce0, 0x00000ce0, 0x00000ce1, 0x00000ce1,
0x00000ce2, 0x00000ce2, 0x00000ce3, 0x00000ce3, 0x00000e10, 0x00000e10,
0x00000e11, 0x00000e11, 0x00000e12, 0x00000e12, 0x00000e13, 0x00000e13,
0x00000bf0, 0x00000bf0, 0x00000bf1, 0x00000bf1, 0x00000bf2, 0x00000bf2,
0x00000bf3, 0x00000bf3, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x000000ff, 0x000002fc,
0x0000fa08, 0x00000305, 0x00000206, 0x00000304, 0x0000fb04, 0x0000fcff,
0x000005fb, 0x0000fd01, 0x00000401, 0x00000006, 0x0000ff03, 0x000007fc,
0x0000fc08, 0x00000203, 0x0000fffb, 0x00000600, 0x0000fa01, 0x0000fc03,
0x0000fe06, 0x0000fe00, 0x00000102, 0x000007fd, 0x000004fb, 0x000006ff,
0x000004fd, 0x0000fdfa, 0x000007fb, 0x0000fdfa, 0x0000fa06, 0x00000500,
0x0000f902, 0x000007fa, 0x0000fafa, 0x00000500, 0x000007fa, 0x00000700,
0x00000305, 0x000004ff, 0x00000801, 0x00000503, 0x000005f9, 0x00000404,
0x0000fb08, 0x000005fd, 0x00000501, 0x00000405, 0x0000fb03, 0x000007fc,
0x00000403, 0x00000303, 0x00000402, 0x0000faff, 0x0000fe05, 0x000005fd,
0x0000fe01, 0x000007fa, 0x00000202, 0x00000504, 0x00000102, 0x000008fe,
0x0000fa04, 0x0000fafc, 0x0000fe08, 0x000000f9, 0x000002fa, 0x000003fe,
0x00000304, 0x000004f9, 0x00000100, 0x0000fd06, 0x000008fc, 0x00000701,
0x00000504, 0x0000fdfe, 0x0000fdfc, 0x000003fe, 0x00000704, 0x000002fc,
0x000004f9, 0x0000fdfd, 0x0000fa07, 0x00000205, 0x000003fd, 0x000005fb,
0x000004f9, 0x00000804, 0x0000fc06, 0x0000fcf9, 0x00000100, 0x0000fe05,
0x00000408, 0x0000fb02, 0x00000304, 0x000006fe, 0x000004fa, 0x00000305,
0x000008fc, 0x00000102, 0x000001fd, 0x000004fc, 0x0000fe03, 0x00000701,
0x000001fb, 0x000001f9, 0x00000206, 0x000006fd, 0x00000508, 0x00000700,
0x00000304, 0x000005fe, 0x000005ff, 0x0000fa04, 0x00000303, 0x0000fefb,
0x000007f9, 0x0000fefc, 0x000004fd, 0x000005fc, 0x0000fffd, 0x0000fc08,
0x0000fbf9, 0x0000fd07, 0x000008fb, 0x0000fe02, 0x000006fb, 0x00000702,
};
static const u32 lpphy_gain_idx_table[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x10000001, 0x00000000, 0x20000082, 0x00000000, 0x40000104, 0x00000000,
0x60004207, 0x00000001, 0x7000838a, 0x00000001, 0xd021050d, 0x00000001,
0xe041c683, 0x00000001, 0x50828805, 0x00000000, 0x80e34288, 0x00000000,
0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000, 0x12064711, 0x00000001,
0xb0a18612, 0x00000010, 0xe1024794, 0x00000010, 0x11630915, 0x00000011,
0x31c3ca1b, 0x00000011, 0xc1848a9c, 0x00000018, 0xf1e50da0, 0x00000018,
0x22468e21, 0x00000019, 0x4286d023, 0x00000019, 0xa347d0a4, 0x00000019,
0xb36811a6, 0x00000019, 0xf3e89227, 0x00000019, 0x0408d329, 0x0000001a,
0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a, 0x54aa152c, 0x0000001a,
0x64ca55ad, 0x0000001a, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x10000001, 0x00000000, 0x20000082, 0x00000000,
0x40000104, 0x00000000, 0x60004207, 0x00000001, 0x7000838a, 0x00000001,
0xd021050d, 0x00000001, 0xe041c683, 0x00000001, 0x50828805, 0x00000000,
0x80e34288, 0x00000000, 0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000,
0x12064711, 0x00000001, 0xb0a18612, 0x00000010, 0xe1024794, 0x00000010,
0x11630915, 0x00000011, 0x31c3ca1b, 0x00000011, 0xc1848a9c, 0x00000018,
0xf1e50da0, 0x00000018, 0x22468e21, 0x00000019, 0x4286d023, 0x00000019,
0xa347d0a4, 0x00000019, 0xb36811a6, 0x00000019, 0xf3e89227, 0x00000019,
0x0408d329, 0x0000001a, 0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a,
0x54aa152c, 0x0000001a, 0x64ca55ad, 0x0000001a,
};
static const u16 lpphy_aux_gain_idx_table[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0001, 0x0002, 0x0004, 0x0016, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0016,
};
static const u32 lpphy_gain_value_table[] = {
0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb, 0x00000004,
0x00000008, 0x0000000d, 0x00000001, 0x00000004, 0x00000007, 0x0000000a,
0x0000000d, 0x00000010, 0x00000012, 0x00000015, 0x00000000, 0x00000006,
0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000012, 0x00000000,
0x00000000, 0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000003, 0x00000006, 0x00000009, 0x0000000c, 0x0000000f,
0x00000012, 0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000009, 0x000000f1,
0x00000000, 0x00000000,
};
static const u16 lpphy_gain_table[] = {
0x0000, 0x0400, 0x0800, 0x0802, 0x0804, 0x0806, 0x0807, 0x0808, 0x080a,
0x080b, 0x080c, 0x080e, 0x080f, 0x0810, 0x0812, 0x0813, 0x0814, 0x0816,
0x0817, 0x081a, 0x081b, 0x081f, 0x0820, 0x0824, 0x0830, 0x0834, 0x0837,
0x083b, 0x083f, 0x0840, 0x0844, 0x0857, 0x085b, 0x085f, 0x08d7, 0x08db,
0x08df, 0x0957, 0x095b, 0x095f, 0x0b57, 0x0b5b, 0x0b5f, 0x0f5f, 0x135f,
0x175f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
};
static const u32 lpphy_a0_gain_idx_table[] = {
0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060, 0x00511065,
0x004c806b, 0x0047d072, 0x00444078, 0x00400080, 0x003ca087, 0x0039408f,
0x0035e098, 0x0032e0a1, 0x003030aa, 0x002d80b4, 0x002ae0bf, 0x002880ca,
0x002640d6, 0x002410e3, 0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e,
0x001b012f, 0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193,
0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a, 0x000e523a,
0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd, 0x000ac2f8, 0x000a2325,
0x00099355, 0x00091387, 0x000883bd, 0x000813f5, 0x0007a432, 0x00073471,
0x0006c4b5, 0x000664fc, 0x00061547, 0x0005b598, 0x000565ec, 0x00051646,
0x0004d6a5, 0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd,
0x00036963, 0x000339f2, 0x00030a89, 0x0002db28,
};
static const u16 lpphy_a0_aux_gain_idx_table[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0002, 0x0014, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0002, 0x0014,
};
static const u32 lpphy_a0_gain_value_table[] = {
0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb, 0x00000004,
0x00000008, 0x0000000d, 0x00000001, 0x00000004, 0x00000007, 0x0000000a,
0x0000000d, 0x00000010, 0x00000012, 0x00000015, 0x00000000, 0x00000006,
0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000012, 0x00000000,
0x00000000, 0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000003, 0x00000006, 0x00000009, 0x0000000c, 0x0000000f,
0x00000012, 0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000000f, 0x000000f7,
0x00000000, 0x00000000,
};
static const u16 lpphy_a0_gain_table[] = {
0x0000, 0x0002, 0x0004, 0x0006, 0x0007, 0x0008, 0x000a, 0x000b, 0x000c,
0x000e, 0x000f, 0x0010, 0x0012, 0x0013, 0x0014, 0x0016, 0x0017, 0x001a,
0x001b, 0x001f, 0x0020, 0x0024, 0x0030, 0x0034, 0x0037, 0x003b, 0x003f,
0x0040, 0x0044, 0x0057, 0x005b, 0x005f, 0x00d7, 0x00db, 0x00df, 0x0157,
0x015b, 0x015f, 0x0357, 0x035b, 0x035f, 0x075f, 0x0b5f, 0x0f5f, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
};
static const u16 lpphy_sw_control_table[] = {
0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0128,
0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0009, 0x0009,
0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0018, 0x0018, 0x0018,
0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0128, 0x0128, 0x0009, 0x0009,
0x0028, 0x0028, 0x0028, 0x0028, 0x0128, 0x0128, 0x0009, 0x0009, 0x0028,
0x0028, 0x0028, 0x0028, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009,
0x0009, 0x0009, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018,
0x0018,
};
static const u8 lpphy_hf_table[] = {
0x4b, 0x36, 0x24, 0x18, 0x49, 0x34, 0x23, 0x17, 0x48,
0x33, 0x23, 0x17, 0x48, 0x33, 0x23, 0x17,
};
static const u32 lpphy_papd_eps_table[] = {
0x00000000, 0x00013ffc, 0x0001dff3, 0x0001bff0, 0x00023fe9, 0x00021fdf,
0x00028fdf, 0x00033fd2, 0x00039fcb, 0x00043fc7, 0x0004efc2, 0x00055fb5,
0x0005cfb0, 0x00063fa8, 0x00068fa3, 0x00071f98, 0x0007ef92, 0x00084f8b,
0x0008df82, 0x00097f77, 0x0009df69, 0x000a3f62, 0x000adf57, 0x000b6f4c,
0x000bff41, 0x000c9f39, 0x000cff30, 0x000dbf27, 0x000e4f1e, 0x000edf16,
0x000f7f13, 0x00102f11, 0x00110f10, 0x0011df11, 0x0012ef15, 0x00143f1c,
0x00158f27, 0x00172f35, 0x00193f47, 0x001baf5f, 0x001e6f7e, 0x0021cfa4,
0x0025bfd2, 0x002a2008, 0x002fb047, 0x00360090, 0x003d40e0, 0x0045c135,
0x004fb189, 0x005ae1d7, 0x0067221d, 0x0075025a, 0x007ff291, 0x007ff2bf,
0x007ff2e3, 0x007ff2ff, 0x007ff315, 0x007ff329, 0x007ff33f, 0x007ff356,
0x007ff36e, 0x007ff39c, 0x007ff441, 0x007ff506,
};
static const u32 lpphy_papd_mult_table[] = {
0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060, 0x00511065,
0x004c806b, 0x0047d072, 0x00444078, 0x00400080, 0x003ca087, 0x0039408f,
0x0035e098, 0x0032e0a1, 0x003030aa, 0x002d80b4, 0x002ae0bf, 0x002880ca,
0x002640d6, 0x002410e3, 0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e,
0x001b012f, 0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193,
0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a, 0x000e523a,
0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd, 0x000ac2f8, 0x000a2325,
0x00099355, 0x00091387, 0x000883bd, 0x000813f5, 0x0007a432, 0x00073471,
0x0006c4b5, 0x000664fc, 0x00061547, 0x0005b598, 0x000565ec, 0x00051646,
0x0004d6a5, 0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd,
0x00036963, 0x000339f2, 0x00030a89, 0x0002db28,
};
static struct lpphy_tx_gain_table_entry lpphy_rev0_nopa_tx_gain_table[] = {
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 152, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 147, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 143, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 139, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 135, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 131, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 128, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 124, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 121, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 117, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 114, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 111, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 107, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 104, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 101, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 99, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 96, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 93, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 90, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 88, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 85, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 83, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 81, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 78, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 76, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 74, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 71, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 69, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 67, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 58, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 58, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 71, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 69, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 67, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 58, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 56, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 73, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 71, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 69, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 67, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 73, },
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, },
};
static struct lpphy_tx_gain_table_entry lpphy_rev0_2ghz_tx_gain_table[] = {
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, },
{ .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, },
{ .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 73, },
{ .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 71, },
{ .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 69, },
{ .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 67, },
{ .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 65, },
{ .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 63, },
{ .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, },
{ .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 65, },
{ .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 63, },
{ .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, },
{ .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 73, },
{ .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, },
{ .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 69, },
{ .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 67, },
{ .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 65, },
{ .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 63, },
{ .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 61, },
{ .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 10, .pad = 5, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 63, },
{ .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 61, },
{ .gm = 4, .pga = 9, .pad = 5, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 71, },
{ .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 69, },
{ .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 67, },
{ .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 65, },
{ .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 63, },
{ .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 9, .pad = 4, .dac = 0, .bb_mult = 58, },
{ .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 65, },
{ .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 63, },
{ .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 61, },
{ .gm = 4, .pga = 8, .pad = 4, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 61, },
{ .gm = 4, .pga = 7, .pad = 4, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 7, .pad = 3, .dac = 0, .bb_mult = 67, },
{ .gm = 4, .pga = 7, .pad = 3, .dac = 0, .bb_mult = 65, },
{ .gm = 4, .pga = 7, .pad = 3, .dac = 0, .bb_mult = 63, },
{ .gm = 4, .pga = 7, .pad = 3, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 7, .pad = 3, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 6, .pad = 3, .dac = 0, .bb_mult = 65, },
{ .gm = 4, .pga = 6, .pad = 3, .dac = 0, .bb_mult = 63, },
{ .gm = 4, .pga = 6, .pad = 3, .dac = 0, .bb_mult = 61, },
{ .gm = 4, .pga = 6, .pad = 3, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 6, .pad = 3, .dac = 0, .bb_mult = 58, },
{ .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 5, .pad = 3, .dac = 0, .bb_mult = 57, },
{ .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 83, },
{ .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 81, },
{ .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 78, },
{ .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 76, },
{ .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 74, },
{ .gm = 4, .pga = 4, .pad = 2, .dac = 0, .bb_mult = 72, },
};
static struct lpphy_tx_gain_table_entry lpphy_rev0_5ghz_tx_gain_table[] = {
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 99, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 96, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 93, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 90, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 88, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 85, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 83, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 81, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 78, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 76, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 74, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 55, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 58, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 56, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 55, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 71, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 69, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 67, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 58, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 56, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 73, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 71, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 69, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 67, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 58, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 71, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 69, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 67, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 58, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 56, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 58, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 69, },
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 67, },
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, },
};
static struct lpphy_tx_gain_table_entry lpphy_rev1_nopa_tx_gain_table[] = {
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 152, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 147, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 143, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 139, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 135, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 131, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 128, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 124, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 121, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 117, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 114, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 111, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 107, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 104, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 101, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 99, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 96, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 93, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 90, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 88, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 85, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 83, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 81, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 78, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 76, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 74, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 71, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 69, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 67, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 58, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 58, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 71, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 69, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 67, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 58, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 56, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 73, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 71, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 69, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 67, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 73, },
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, },
};
static struct lpphy_tx_gain_table_entry lpphy_rev1_2ghz_tx_gain_table[] = {
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 90, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 88, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 85, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 83, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 81, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 78, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 76, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 74, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, },
{ .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, },
{ .gm = 4, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 73, },
{ .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 71, },
{ .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 69, },
{ .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 67, },
{ .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 65, },
{ .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 63, },
{ .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, },
{ .gm = 4, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 65, },
{ .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 63, },
{ .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, },
{ .gm = 4, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, },
{ .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 73, },
{ .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 71, },
{ .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 69, },
{ .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 67, },
{ .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 65, },
{ .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 63, },
{ .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 61, },
{ .gm = 4, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, },
{ .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 72, },
{ .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 70, },
{ .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 68, },
{ .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 66, },
{ .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 64, },
{ .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 62, },
{ .gm = 4, .pga = 10, .pad = 6, .dac = 0, .bb_mult = 60, },
};
static struct lpphy_tx_gain_table_entry lpphy_rev1_5ghz_tx_gain_table[] = {
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 99, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 96, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 93, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 90, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 88, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 85, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 83, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 81, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 78, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 76, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 74, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 15, .pad = 15, .dac = 0, .bb_mult = 55, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 58, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 56, },
{ .gm = 7, .pga = 15, .pad = 14, .dac = 0, .bb_mult = 55, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 71, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 69, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 67, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 58, },
{ .gm = 7, .pga = 15, .pad = 13, .dac = 0, .bb_mult = 56, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 72, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 15, .pad = 12, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 73, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 71, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 69, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 67, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 11, .dac = 0, .bb_mult = 58, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 71, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 69, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 67, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 15, .pad = 10, .dac = 0, .bb_mult = 58, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 15, .pad = 9, .dac = 0, .bb_mult = 56, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 14, .pad = 9, .dac = 0, .bb_mult = 58, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 13, .pad = 9, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 60, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 13, .pad = 8, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 12, .pad = 8, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 12, .pad = 7, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 70, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 68, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 66, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 61, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 59, },
{ .gm = 7, .pga = 11, .pad = 7, .dac = 0, .bb_mult = 57, },
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 69, },
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 67, },
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 65, },
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 63, },
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 62, },
{ .gm = 7, .pga = 11, .pad = 6, .dac = 0, .bb_mult = 60, },
};
static struct lpphy_tx_gain_table_entry lpphy_rev2_nopa_tx_gain_table[] = {
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 152, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 147, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 143, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 139, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 135, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 131, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 128, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 124, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 121, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 117, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 114, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 111, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 107, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 104, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 101, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 99, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 96, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 93, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 90, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 88, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 85, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 83, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 81, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 78, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 76, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 74, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 72, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 70, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 68, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 66, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 197, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 192, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 186, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 181, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 176, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 171, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 166, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 161, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 157, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 152, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 148, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 144, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 140, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 136, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 132, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 128, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 124, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 121, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 117, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 114, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 111, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 108, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 105, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 102, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 99, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 96, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 93, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 91, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 88, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 86, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 83, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 81, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 79, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 76, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 74, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 72, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 70, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 68, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 66, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 64, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 248, .pad = 64, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 248, .pad = 62, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 241, .pad = 62, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 241, .pad = 60, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 234, .pad = 60, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 234, .pad = 59, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 227, .pad = 59, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 227, .pad = 57, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 221, .pad = 57, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 221, .pad = 55, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 215, .pad = 55, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 215, .pad = 54, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 208, .pad = 54, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 208, .pad = 52, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 203, .pad = 52, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 203, .pad = 51, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 197, .pad = 51, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 197, .pad = 49, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 191, .pad = 49, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 191, .pad = 48, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 186, .pad = 48, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 186, .pad = 47, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 181, .pad = 47, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 181, .pad = 45, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 175, .pad = 45, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 175, .pad = 44, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 170, .pad = 44, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 170, .pad = 43, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 166, .pad = 43, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 166, .pad = 42, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 161, .pad = 42, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 161, .pad = 40, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 156, .pad = 40, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 156, .pad = 39, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 152, .pad = 39, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 152, .pad = 38, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 148, .pad = 38, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 148, .pad = 37, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 143, .pad = 37, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 143, .pad = 36, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 139, .pad = 36, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 139, .pad = 35, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 135, .pad = 35, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 135, .pad = 34, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 132, .pad = 34, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 132, .pad = 33, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 128, .pad = 33, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 128, .pad = 32, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 124, .pad = 32, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 124, .pad = 31, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 121, .pad = 31, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 121, .pad = 30, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 117, .pad = 30, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 117, .pad = 29, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 114, .pad = 29, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 114, .pad = 29, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 111, .pad = 29, .dac = 0, .bb_mult = 64, },
};
static struct lpphy_tx_gain_table_entry lpphy_rev2_2ghz_tx_gain_table[] = {
{ .gm = 7, .pga = 99, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 96, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 93, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 90, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 88, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 85, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 83, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 81, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 78, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 76, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 74, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 72, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 70, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 68, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 66, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 64, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 64, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 62, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 62, .pad = 248, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 60, .pad = 248, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 60, .pad = 241, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 59, .pad = 241, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 59, .pad = 234, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 57, .pad = 234, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 57, .pad = 227, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 55, .pad = 227, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 55, .pad = 221, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 54, .pad = 221, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 54, .pad = 215, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 52, .pad = 215, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 52, .pad = 208, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 51, .pad = 208, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 51, .pad = 203, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 49, .pad = 203, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 49, .pad = 197, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 48, .pad = 197, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 48, .pad = 191, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 47, .pad = 191, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 47, .pad = 186, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 45, .pad = 186, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 45, .pad = 181, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 44, .pad = 181, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 44, .pad = 175, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 43, .pad = 175, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 43, .pad = 170, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 42, .pad = 170, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 42, .pad = 166, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 40, .pad = 166, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 40, .pad = 161, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 39, .pad = 161, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 39, .pad = 156, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 38, .pad = 156, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 38, .pad = 152, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 37, .pad = 152, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 37, .pad = 148, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 36, .pad = 148, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 36, .pad = 143, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 35, .pad = 143, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 35, .pad = 139, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 34, .pad = 139, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 34, .pad = 135, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 33, .pad = 135, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 33, .pad = 132, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 32, .pad = 132, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 32, .pad = 128, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 31, .pad = 128, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 31, .pad = 124, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 30, .pad = 124, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 30, .pad = 121, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 29, .pad = 121, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 29, .pad = 117, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 29, .pad = 117, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 29, .pad = 114, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 28, .pad = 114, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 28, .pad = 111, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 27, .pad = 111, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 27, .pad = 108, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 26, .pad = 108, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 26, .pad = 104, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 25, .pad = 104, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 25, .pad = 102, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 25, .pad = 102, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 25, .pad = 99, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 24, .pad = 99, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 24, .pad = 96, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 23, .pad = 96, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 23, .pad = 93, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 23, .pad = 93, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 23, .pad = 90, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 22, .pad = 90, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 22, .pad = 88, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 21, .pad = 88, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 21, .pad = 85, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 21, .pad = 85, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 21, .pad = 83, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 20, .pad = 83, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 20, .pad = 81, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 20, .pad = 81, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 20, .pad = 78, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 19, .pad = 78, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 19, .pad = 76, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 19, .pad = 76, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 19, .pad = 74, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 18, .pad = 74, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 18, .pad = 72, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 18, .pad = 72, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 18, .pad = 70, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 17, .pad = 70, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 17, .pad = 68, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 17, .pad = 68, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 17, .pad = 66, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 16, .pad = 66, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 16, .pad = 64, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 16, .pad = 64, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 16, .pad = 62, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 62, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 60, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 60, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 15, .pad = 59, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 14, .pad = 59, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 14, .pad = 57, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 14, .pad = 57, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 14, .pad = 55, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 14, .pad = 55, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 14, .pad = 54, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 13, .pad = 54, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 13, .pad = 52, .dac = 0, .bb_mult = 64, },
{ .gm = 7, .pga = 13, .pad = 52, .dac = 0, .bb_mult = 64, },
};
static struct lpphy_tx_gain_table_entry lpphy_rev2_5ghz_tx_gain_table[] = {
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 152, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 147, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 143, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 139, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 135, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 131, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 128, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 124, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 121, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 117, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 114, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 111, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 107, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 104, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 101, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 99, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 96, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 93, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 90, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 88, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 85, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 83, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 81, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 78, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 76, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 74, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 72, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 70, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 68, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 66, },
{ .gm = 255, .pga = 255, .pad = 255, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 248, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 241, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 234, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 227, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 221, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 215, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 208, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 203, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 197, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 191, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 186, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 181, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 175, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 170, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 166, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 161, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 156, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 152, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 148, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 143, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 139, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 135, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 132, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 128, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 124, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 121, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 117, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 114, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 111, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 108, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 104, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 102, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 99, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 96, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 93, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 90, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 88, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 85, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 83, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 81, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 78, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 76, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 74, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 72, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 70, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 68, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 66, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 64, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 64, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 255, .pad = 62, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 248, .pad = 62, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 248, .pad = 60, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 241, .pad = 60, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 241, .pad = 59, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 234, .pad = 59, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 234, .pad = 57, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 227, .pad = 57, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 227, .pad = 55, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 221, .pad = 55, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 221, .pad = 54, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 215, .pad = 54, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 215, .pad = 52, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 208, .pad = 52, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 208, .pad = 51, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 203, .pad = 51, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 203, .pad = 49, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 197, .pad = 49, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 197, .pad = 48, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 191, .pad = 48, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 191, .pad = 47, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 186, .pad = 47, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 186, .pad = 45, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 181, .pad = 45, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 181, .pad = 44, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 175, .pad = 44, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 175, .pad = 43, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 170, .pad = 43, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 170, .pad = 42, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 166, .pad = 42, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 166, .pad = 40, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 161, .pad = 40, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 161, .pad = 39, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 156, .pad = 39, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 156, .pad = 38, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 152, .pad = 38, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 152, .pad = 37, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 148, .pad = 37, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 148, .pad = 36, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 143, .pad = 36, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 143, .pad = 35, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 139, .pad = 35, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 139, .pad = 34, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 135, .pad = 34, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 135, .pad = 33, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 132, .pad = 33, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 132, .pad = 32, .dac = 0, .bb_mult = 64, },
{ .gm = 255, .pga = 128, .pad = 32, .dac = 0, .bb_mult = 64, },
};
void lpphy_rev0_1_table_init(struct b43_wldev *dev)
{
B43_WARN_ON(dev->phy.rev >= 2);
b43_lptab_write_bulk(dev, B43_LPTAB8(2, 0),
ARRAY_SIZE(lpphy_min_sig_sq_table), lpphy_min_sig_sq_table);
b43_lptab_write_bulk(dev, B43_LPTAB16(1, 0),
ARRAY_SIZE(lpphy_rev01_noise_scale_table), lpphy_rev01_noise_scale_table);
b43_lptab_write_bulk(dev, B43_LPTAB16(14, 0),
ARRAY_SIZE(lpphy_crs_gain_nft_table), lpphy_crs_gain_nft_table);
b43_lptab_write_bulk(dev, B43_LPTAB16(8, 0),
ARRAY_SIZE(lpphy_rev01_filter_control_table), lpphy_rev01_filter_control_table);
b43_lptab_write_bulk(dev, B43_LPTAB32(9, 0),
ARRAY_SIZE(lpphy_rev01_ps_control_table), lpphy_rev01_ps_control_table);
b43_lptab_write_bulk(dev, B43_LPTAB8(6, 0),
ARRAY_SIZE(lpphy_pll_fraction_table), lpphy_pll_fraction_table);
b43_lptab_write_bulk(dev, B43_LPTAB16(0, 0),
ARRAY_SIZE(lpphy_iqlo_cal_table), lpphy_iqlo_cal_table);
if (dev->phy.rev == 0) {
b43_lptab_write_bulk(dev, B43_LPTAB16(13, 0),
ARRAY_SIZE(lpphy_rev0_ofdm_cck_gain_table), lpphy_rev0_ofdm_cck_gain_table);
b43_lptab_write_bulk(dev, B43_LPTAB16(12, 0),
ARRAY_SIZE(lpphy_rev0_ofdm_cck_gain_table), lpphy_rev0_ofdm_cck_gain_table);
} else {
b43_lptab_write_bulk(dev, B43_LPTAB16(13, 0),
ARRAY_SIZE(lpphy_rev1_ofdm_cck_gain_table), lpphy_rev1_ofdm_cck_gain_table);
b43_lptab_write_bulk(dev, B43_LPTAB16(12, 0),
ARRAY_SIZE(lpphy_rev1_ofdm_cck_gain_table), lpphy_rev1_ofdm_cck_gain_table);
}
b43_lptab_write_bulk(dev, B43_LPTAB16(15, 0),
ARRAY_SIZE(lpphy_gain_delta_table), lpphy_gain_delta_table);
b43_lptab_write_bulk(dev, B43_LPTAB32(10, 0),
ARRAY_SIZE(lpphy_tx_power_control_table), lpphy_tx_power_control_table);
}
void lpphy_rev2plus_table_init(struct b43_wldev *dev)
{
int i;
B43_WARN_ON(dev->phy.rev < 2);
for (i = 0; i < 704; i++)
b43_lptab_write(dev, B43_LPTAB32(7, i), 0);
b43_lptab_write_bulk(dev, B43_LPTAB8(2, 0),
ARRAY_SIZE(lpphy_min_sig_sq_table), lpphy_min_sig_sq_table);
b43_lptab_write_bulk(dev, B43_LPTAB16(1, 0),
ARRAY_SIZE(lpphy_rev2plus_noise_scale_table), lpphy_rev2plus_noise_scale_table);
b43_lptab_write_bulk(dev, B43_LPTAB32(11, 0),
ARRAY_SIZE(lpphy_rev2plus_filter_control_table), lpphy_rev2plus_filter_control_table);
b43_lptab_write_bulk(dev, B43_LPTAB32(12, 0),
ARRAY_SIZE(lpphy_rev2plus_ps_control_table), lpphy_rev2plus_ps_control_table);
b43_lptab_write_bulk(dev, B43_LPTAB32(13, 0),
ARRAY_SIZE(lpphy_gain_idx_table), lpphy_gain_idx_table);
b43_lptab_write_bulk(dev, B43_LPTAB16(14, 0),
ARRAY_SIZE(lpphy_aux_gain_idx_table), lpphy_aux_gain_idx_table);
b43_lptab_write_bulk(dev, B43_LPTAB16(15, 0),
ARRAY_SIZE(lpphy_sw_control_table), lpphy_sw_control_table);
b43_lptab_write_bulk(dev, B43_LPTAB8(16, 0),
ARRAY_SIZE(lpphy_hf_table), lpphy_hf_table);
b43_lptab_write_bulk(dev, B43_LPTAB32(17, 0),
ARRAY_SIZE(lpphy_gain_value_table), lpphy_gain_value_table);
b43_lptab_write_bulk(dev, B43_LPTAB16(18, 0),
ARRAY_SIZE(lpphy_gain_table), lpphy_gain_table);
b43_lptab_write_bulk(dev, B43_LPTAB8(6, 0),
ARRAY_SIZE(lpphy_pll_fraction_table), lpphy_pll_fraction_table);
b43_lptab_write_bulk(dev, B43_LPTAB16(0, 0),
ARRAY_SIZE(lpphy_iqlo_cal_table), lpphy_iqlo_cal_table);
b43_lptab_write_bulk(dev, B43_LPTAB32(9, 0),
ARRAY_SIZE(lpphy_papd_eps_table), lpphy_papd_eps_table);
b43_lptab_write_bulk(dev, B43_LPTAB32(10, 0),
ARRAY_SIZE(lpphy_papd_mult_table), lpphy_papd_mult_table);
if ((dev->dev->chip_id == 0x4325) && (dev->dev->chip_rev == 0)) {
b43_lptab_write_bulk(dev, B43_LPTAB32(13, 0),
ARRAY_SIZE(lpphy_a0_gain_idx_table), lpphy_a0_gain_idx_table);
b43_lptab_write_bulk(dev, B43_LPTAB16(14, 0),
ARRAY_SIZE(lpphy_a0_aux_gain_idx_table), lpphy_a0_aux_gain_idx_table);
b43_lptab_write_bulk(dev, B43_LPTAB32(17, 0),
ARRAY_SIZE(lpphy_a0_gain_value_table), lpphy_a0_gain_value_table);
b43_lptab_write_bulk(dev, B43_LPTAB16(18, 0),
ARRAY_SIZE(lpphy_a0_gain_table), lpphy_a0_gain_table);
}
}
static void lpphy_rev0_1_write_gain_table(struct b43_wldev *dev, int offset,
struct lpphy_tx_gain_table_entry data)
{
u32 tmp;
B43_WARN_ON(dev->phy.rev >= 2);
tmp = data.pad << 11;
tmp |= data.pga << 7;
tmp |= data.gm << 4;
tmp |= data.dac;
b43_lptab_write(dev, B43_LPTAB32(10, 0xC0 + offset), tmp);
tmp = data.bb_mult << 20;
b43_lptab_write(dev, B43_LPTAB32(10, 0x140 + offset), tmp);
}
static void lpphy_rev2plus_write_gain_table(struct b43_wldev *dev, int offset,
struct lpphy_tx_gain_table_entry data)
{
u32 tmp;
B43_WARN_ON(dev->phy.rev < 2);
tmp = data.pad << 16;
tmp |= data.pga << 8;
tmp |= data.gm;
if (dev->phy.rev >= 3) {
if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
tmp |= 0x10 << 24;
else
tmp |= 0x70 << 24;
} else {
if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
tmp |= 0x14 << 24;
else
tmp |= 0x7F << 24;
}
b43_lptab_write(dev, B43_LPTAB32(7, 0xC0 + offset), tmp);
tmp = data.bb_mult << 20;
tmp |= data.dac << 28;
b43_lptab_write(dev, B43_LPTAB32(7, 0x140 + offset), tmp);
}
void lpphy_write_gain_table(struct b43_wldev *dev, int offset,
struct lpphy_tx_gain_table_entry data)
{
if (dev->phy.rev >= 2)
lpphy_rev2plus_write_gain_table(dev, offset, data);
else
lpphy_rev0_1_write_gain_table(dev, offset, data);
}
void lpphy_write_gain_table_bulk(struct b43_wldev *dev, int offset, int count,
struct lpphy_tx_gain_table_entry *table)
{
int i;
for (i = offset; i < count; i++)
lpphy_write_gain_table(dev, i, table[i]);
}
void lpphy_init_tx_gain_table(struct b43_wldev *dev)
{
struct ssb_sprom *sprom = dev->dev->bus_sprom;
switch (dev->phy.rev) {
case 0:
if ((sprom->boardflags_hi & B43_BFH_NOPA) ||
(sprom->boardflags_lo & B43_BFL_HGPA))
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev0_nopa_tx_gain_table);
else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev0_2ghz_tx_gain_table);
else
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev0_5ghz_tx_gain_table);
break;
case 1:
if ((sprom->boardflags_hi & B43_BFH_NOPA) ||
(sprom->boardflags_lo & B43_BFL_HGPA))
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev1_nopa_tx_gain_table);
else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev1_2ghz_tx_gain_table);
else
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev1_5ghz_tx_gain_table);
break;
default:
if (sprom->boardflags_hi & B43_BFH_NOPA)
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev2_nopa_tx_gain_table);
else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev2_2ghz_tx_gain_table);
else
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev2_5ghz_tx_gain_table);
}
}
| gpl-2.0 |
bq-dev/android_kernel_bq_common | drivers/ide/ide-4drives.c | 9916 | 1516 |
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/ide.h>
#define DRV_NAME "ide-4drives"
static bool probe_4drives;
module_param_named(probe, probe_4drives, bool, 0);
MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port");
static void ide_4drives_init_dev(ide_drive_t *drive)
{
if (drive->hwif->channel)
drive->select ^= 0x20;
}
static const struct ide_port_ops ide_4drives_port_ops = {
.init_dev = ide_4drives_init_dev,
};
static const struct ide_port_info ide_4drives_port_info = {
.port_ops = &ide_4drives_port_ops,
.host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA |
IDE_HFLAG_4DRIVES,
.chipset = ide_4drives,
};
static int __init ide_4drives_init(void)
{
unsigned long base = 0x1f0, ctl = 0x3f6;
struct ide_hw hw, *hws[] = { &hw, &hw };
if (probe_4drives == 0)
return -ENODEV;
if (!request_region(base, 8, DRV_NAME)) {
printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
DRV_NAME, base, base + 7);
return -EBUSY;
}
if (!request_region(ctl, 1, DRV_NAME)) {
printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
DRV_NAME, ctl);
release_region(base, 8);
return -EBUSY;
}
memset(&hw, 0, sizeof(hw));
ide_std_init_ports(&hw, base, ctl);
hw.irq = 14;
return ide_host_add(&ide_4drives_port_info, hws, 2, NULL);
}
module_init(ide_4drives_init);
MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("generic IDE chipset with 4 drives/port support");
MODULE_LICENSE("GPL");
| gpl-2.0 |
aaloanm/linux | drivers/reset/reset-pistachio.c | 189 | 3717 | /*
* Pistachio SoC Reset Controller driver
*
* Copyright (C) 2015 Imagination Technologies Ltd.
*
* Author: Damien Horsley <Damien.Horsley@imgtec.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
#include <linux/mfd/syscon.h>
#include <dt-bindings/reset/pistachio-resets.h>
#define PISTACHIO_SOFT_RESET 0
struct pistachio_reset_data {
struct reset_controller_dev rcdev;
struct regmap *periph_regs;
};
static inline int pistachio_reset_shift(unsigned long id)
{
switch (id) {
case PISTACHIO_RESET_I2C0:
case PISTACHIO_RESET_I2C1:
case PISTACHIO_RESET_I2C2:
case PISTACHIO_RESET_I2C3:
case PISTACHIO_RESET_I2S_IN:
case PISTACHIO_RESET_PRL_OUT:
case PISTACHIO_RESET_SPDIF_OUT:
case PISTACHIO_RESET_SPI:
case PISTACHIO_RESET_PWM_PDM:
case PISTACHIO_RESET_UART0:
case PISTACHIO_RESET_UART1:
case PISTACHIO_RESET_QSPI:
case PISTACHIO_RESET_MDC:
case PISTACHIO_RESET_SDHOST:
case PISTACHIO_RESET_ETHERNET:
case PISTACHIO_RESET_IR:
case PISTACHIO_RESET_HASH:
case PISTACHIO_RESET_TIMER:
return id;
case PISTACHIO_RESET_I2S_OUT:
case PISTACHIO_RESET_SPDIF_IN:
case PISTACHIO_RESET_EVT:
return id + 6;
case PISTACHIO_RESET_USB_H:
case PISTACHIO_RESET_USB_PR:
case PISTACHIO_RESET_USB_PHY_PR:
case PISTACHIO_RESET_USB_PHY_PON:
return id + 7;
default:
return -EINVAL;
}
}
static int pistachio_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct pistachio_reset_data *rd;
u32 mask;
int shift;
rd = container_of(rcdev, struct pistachio_reset_data, rcdev);
shift = pistachio_reset_shift(id);
if (shift < 0)
return shift;
mask = BIT(shift);
return regmap_update_bits(rd->periph_regs, PISTACHIO_SOFT_RESET,
mask, mask);
}
static int pistachio_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct pistachio_reset_data *rd;
u32 mask;
int shift;
rd = container_of(rcdev, struct pistachio_reset_data, rcdev);
shift = pistachio_reset_shift(id);
if (shift < 0)
return shift;
mask = BIT(shift);
return regmap_update_bits(rd->periph_regs, PISTACHIO_SOFT_RESET,
mask, 0);
}
static const struct reset_control_ops pistachio_reset_ops = {
.assert = pistachio_reset_assert,
.deassert = pistachio_reset_deassert,
};
static int pistachio_reset_probe(struct platform_device *pdev)
{
struct pistachio_reset_data *rd;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
rd = devm_kzalloc(dev, sizeof(*rd), GFP_KERNEL);
if (!rd)
return -ENOMEM;
rd->periph_regs = syscon_node_to_regmap(np->parent);
if (IS_ERR(rd->periph_regs))
return PTR_ERR(rd->periph_regs);
rd->rcdev.owner = THIS_MODULE;
rd->rcdev.nr_resets = PISTACHIO_RESET_MAX + 1;
rd->rcdev.ops = &pistachio_reset_ops;
rd->rcdev.of_node = np;
return devm_reset_controller_register(dev, &rd->rcdev);
}
static const struct of_device_id pistachio_reset_dt_ids[] = {
{ .compatible = "img,pistachio-reset", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, pistachio_reset_dt_ids);
static struct platform_driver pistachio_reset_driver = {
.probe = pistachio_reset_probe,
.driver = {
.name = "pistachio-reset",
.of_match_table = pistachio_reset_dt_ids,
},
};
module_platform_driver(pistachio_reset_driver);
MODULE_AUTHOR("Damien Horsley <Damien.Horsley@imgtec.com>");
MODULE_DESCRIPTION("Pistacho Reset Controller Driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
cleaton/acer_kernel_koud | drivers/pnp/pnpbios/bioscalls.c | 189 | 13235 | /*
* bioscalls.c - the lowlevel layer of the PnPBIOS driver
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/pnp.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/completion.h>
#include <linux/spinlock.h>
#include <asm/page.h>
#include <asm/desc.h>
#include <asm/system.h>
#include <asm/byteorder.h>
#include "pnpbios.h"
static struct {
u16 offset;
u16 segment;
} pnp_bios_callpoint;
/*
* These are some opcodes for a "static asmlinkage"
* As this code is *not* executed inside the linux kernel segment, but in a
* alias at offset 0, we need a far return that can not be compiled by
* default (please, prove me wrong! this is *really* ugly!)
* This is the only way to get the bios to return into the kernel code,
* because the bios code runs in 16 bit protected mode and therefore can only
* return to the caller if the call is within the first 64kB, and the linux
* kernel begins at offset 3GB...
*/
asmlinkage void pnp_bios_callfunc(void);
__asm__(".text \n"
__ALIGN_STR "\n"
"pnp_bios_callfunc:\n"
" pushl %edx \n"
" pushl %ecx \n"
" pushl %ebx \n"
" pushl %eax \n"
" lcallw *pnp_bios_callpoint\n"
" addl $16, %esp \n"
" lret \n"
".previous \n");
#define Q2_SET_SEL(cpu, selname, address, size) \
do { \
struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \
set_base(gdt[(selname) >> 3], (u32)(address)); \
set_limit(gdt[(selname) >> 3], size); \
} while(0)
static struct desc_struct bad_bios_desc;
/*
* At some point we want to use this stack frame pointer to unwind
* after PnP BIOS oopses.
*/
u32 pnp_bios_fault_esp;
u32 pnp_bios_fault_eip;
u32 pnp_bios_is_utter_crap = 0;
static spinlock_t pnp_bios_lock;
/*
* Support Functions
*/
static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
u16 arg4, u16 arg5, u16 arg6, u16 arg7,
void *ts1_base, u32 ts1_size,
void *ts2_base, u32 ts2_size)
{
unsigned long flags;
u16 status;
struct desc_struct save_desc_40;
int cpu;
/*
* PnP BIOSes are generally not terribly re-entrant.
* Also, don't rely on them to save everything correctly.
*/
if (pnp_bios_is_utter_crap)
return PNP_FUNCTION_NOT_SUPPORTED;
cpu = get_cpu();
save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
/* On some boxes IRQ's during PnP BIOS calls are deadly. */
spin_lock_irqsave(&pnp_bios_lock, flags);
/* The lock prevents us bouncing CPU here */
if (ts1_size)
Q2_SET_SEL(smp_processor_id(), PNP_TS1, ts1_base, ts1_size);
if (ts2_size)
Q2_SET_SEL(smp_processor_id(), PNP_TS2, ts2_base, ts2_size);
__asm__ __volatile__("pushl %%ebp\n\t"
"pushl %%edi\n\t"
"pushl %%esi\n\t"
"pushl %%ds\n\t"
"pushl %%es\n\t"
"pushl %%fs\n\t"
"pushl %%gs\n\t"
"pushfl\n\t"
"movl %%esp, pnp_bios_fault_esp\n\t"
"movl $1f, pnp_bios_fault_eip\n\t"
"lcall %5,%6\n\t"
"1:popfl\n\t"
"popl %%gs\n\t"
"popl %%fs\n\t"
"popl %%es\n\t"
"popl %%ds\n\t"
"popl %%esi\n\t"
"popl %%edi\n\t"
"popl %%ebp\n\t":"=a"(status)
:"0"((func) | (((u32) arg1) << 16)),
"b"((arg2) | (((u32) arg3) << 16)),
"c"((arg4) | (((u32) arg5) << 16)),
"d"((arg6) | (((u32) arg7) << 16)),
"i"(PNP_CS32), "i"(0)
:"memory");
spin_unlock_irqrestore(&pnp_bios_lock, flags);
get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
put_cpu();
/* If we get here and this is set then the PnP BIOS faulted on us. */
if (pnp_bios_is_utter_crap) {
printk(KERN_ERR
"PnPBIOS: Warning! Your PnP BIOS caused a fatal error. Attempting to continue\n");
printk(KERN_ERR
"PnPBIOS: You may need to reboot with the \"pnpbios=off\" option to operate stably\n");
printk(KERN_ERR
"PnPBIOS: Check with your vendor for an updated BIOS\n");
}
return status;
}
void pnpbios_print_status(const char *module, u16 status)
{
switch (status) {
case PNP_SUCCESS:
printk(KERN_ERR "PnPBIOS: %s: function successful\n", module);
break;
case PNP_NOT_SET_STATICALLY:
printk(KERN_ERR "PnPBIOS: %s: unable to set static resources\n",
module);
break;
case PNP_UNKNOWN_FUNCTION:
printk(KERN_ERR "PnPBIOS: %s: invalid function number passed\n",
module);
break;
case PNP_FUNCTION_NOT_SUPPORTED:
printk(KERN_ERR
"PnPBIOS: %s: function not supported on this system\n",
module);
break;
case PNP_INVALID_HANDLE:
printk(KERN_ERR "PnPBIOS: %s: invalid handle\n", module);
break;
case PNP_BAD_PARAMETER:
printk(KERN_ERR "PnPBIOS: %s: invalid parameters were passed\n",
module);
break;
case PNP_SET_FAILED:
printk(KERN_ERR "PnPBIOS: %s: unable to set resources\n",
module);
break;
case PNP_EVENTS_NOT_PENDING:
printk(KERN_ERR "PnPBIOS: %s: no events are pending\n", module);
break;
case PNP_SYSTEM_NOT_DOCKED:
printk(KERN_ERR "PnPBIOS: %s: the system is not docked\n",
module);
break;
case PNP_NO_ISA_PNP_CARDS:
printk(KERN_ERR
"PnPBIOS: %s: no isapnp cards are installed on this system\n",
module);
break;
case PNP_UNABLE_TO_DETERMINE_DOCK_CAPABILITIES:
printk(KERN_ERR
"PnPBIOS: %s: cannot determine the capabilities of the docking station\n",
module);
break;
case PNP_CONFIG_CHANGE_FAILED_NO_BATTERY:
printk(KERN_ERR
"PnPBIOS: %s: unable to undock, the system does not have a battery\n",
module);
break;
case PNP_CONFIG_CHANGE_FAILED_RESOURCE_CONFLICT:
printk(KERN_ERR
"PnPBIOS: %s: could not dock due to resource conflicts\n",
module);
break;
case PNP_BUFFER_TOO_SMALL:
printk(KERN_ERR "PnPBIOS: %s: the buffer passed is too small\n",
module);
break;
case PNP_USE_ESCD_SUPPORT:
printk(KERN_ERR "PnPBIOS: %s: use ESCD instead\n", module);
break;
case PNP_MESSAGE_NOT_SUPPORTED:
printk(KERN_ERR "PnPBIOS: %s: the message is unsupported\n",
module);
break;
case PNP_HARDWARE_ERROR:
printk(KERN_ERR "PnPBIOS: %s: a hardware failure has occured\n",
module);
break;
default:
printk(KERN_ERR "PnPBIOS: %s: unexpected status 0x%x\n", module,
status);
break;
}
}
/*
* PnP BIOS Low Level Calls
*/
#define PNP_GET_NUM_SYS_DEV_NODES 0x00
#define PNP_GET_SYS_DEV_NODE 0x01
#define PNP_SET_SYS_DEV_NODE 0x02
#define PNP_GET_EVENT 0x03
#define PNP_SEND_MESSAGE 0x04
#define PNP_GET_DOCKING_STATION_INFORMATION 0x05
#define PNP_SET_STATIC_ALLOCED_RES_INFO 0x09
#define PNP_GET_STATIC_ALLOCED_RES_INFO 0x0a
#define PNP_GET_APM_ID_TABLE 0x0b
#define PNP_GET_PNP_ISA_CONFIG_STRUC 0x40
#define PNP_GET_ESCD_INFO 0x41
#define PNP_READ_ESCD 0x42
#define PNP_WRITE_ESCD 0x43
/*
* Call PnP BIOS with function 0x00, "get number of system device nodes"
*/
static int __pnp_bios_dev_node_info(struct pnp_dev_node_info *data)
{
u16 status;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_NUM_SYS_DEV_NODES, 0, PNP_TS1, 2,
PNP_TS1, PNP_DS, 0, 0, data,
sizeof(struct pnp_dev_node_info), NULL, 0);
data->no_nodes &= 0xff;
return status;
}
int pnp_bios_dev_node_info(struct pnp_dev_node_info *data)
{
int status = __pnp_bios_dev_node_info(data);
if (status)
pnpbios_print_status("dev_node_info", status);
return status;
}
/*
* Note that some PnP BIOSes (e.g., on Sony Vaio laptops) die a horrible
* death if they are asked to access the "current" configuration.
* Therefore, if it's a matter of indifference, it's better to call
* get_dev_node() and set_dev_node() with boot=1 rather than with boot=0.
*/
/*
* Call PnP BIOS with function 0x01, "get system device node"
* Input: *nodenum = desired node,
* boot = whether to get nonvolatile boot (!=0)
* or volatile current (0) config
* Output: *nodenum=next node or 0xff if no more nodes
*/
static int __pnp_bios_get_dev_node(u8 *nodenum, char boot,
struct pnp_bios_node *data)
{
u16 status;
u16 tmp_nodenum;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
if (!boot && pnpbios_dont_use_current_config)
return PNP_FUNCTION_NOT_SUPPORTED;
tmp_nodenum = *nodenum;
status = call_pnp_bios(PNP_GET_SYS_DEV_NODE, 0, PNP_TS1, 0, PNP_TS2,
boot ? 2 : 1, PNP_DS, 0, &tmp_nodenum,
sizeof(tmp_nodenum), data, 65536);
*nodenum = tmp_nodenum;
return status;
}
int pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data)
{
int status;
status = __pnp_bios_get_dev_node(nodenum, boot, data);
if (status)
pnpbios_print_status("get_dev_node", status);
return status;
}
/*
* Call PnP BIOS with function 0x02, "set system device node"
* Input: *nodenum = desired node,
* boot = whether to set nonvolatile boot (!=0)
* or volatile current (0) config
*/
static int __pnp_bios_set_dev_node(u8 nodenum, char boot,
struct pnp_bios_node *data)
{
u16 status;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
if (!boot && pnpbios_dont_use_current_config)
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_SET_SYS_DEV_NODE, nodenum, 0, PNP_TS1,
boot ? 2 : 1, PNP_DS, 0, 0, data, 65536, NULL,
0);
return status;
}
int pnp_bios_set_dev_node(u8 nodenum, char boot, struct pnp_bios_node *data)
{
int status;
status = __pnp_bios_set_dev_node(nodenum, boot, data);
if (status) {
pnpbios_print_status("set_dev_node", status);
return status;
}
if (!boot) { /* Update devlist */
status = pnp_bios_get_dev_node(&nodenum, boot, data);
if (status)
return status;
}
return status;
}
/*
* Call PnP BIOS with function 0x05, "get docking station information"
*/
int pnp_bios_dock_station_info(struct pnp_docking_station_info *data)
{
u16 status;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_DOCKING_STATION_INFORMATION, 0, PNP_TS1,
PNP_DS, 0, 0, 0, 0, data,
sizeof(struct pnp_docking_station_info), NULL,
0);
return status;
}
/*
* Call PnP BIOS with function 0x0a, "get statically allocated resource
* information"
*/
static int __pnp_bios_get_stat_res(char *info)
{
u16 status;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1,
PNP_DS, 0, 0, 0, 0, info, 65536, NULL, 0);
return status;
}
int pnp_bios_get_stat_res(char *info)
{
int status;
status = __pnp_bios_get_stat_res(info);
if (status)
pnpbios_print_status("get_stat_res", status);
return status;
}
/*
* Call PnP BIOS with function 0x40, "get isa pnp configuration structure"
*/
static int __pnp_bios_isapnp_config(struct pnp_isa_config_struc *data)
{
u16 status;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_PNP_ISA_CONFIG_STRUC, 0, PNP_TS1, PNP_DS,
0, 0, 0, 0, data,
sizeof(struct pnp_isa_config_struc), NULL, 0);
return status;
}
int pnp_bios_isapnp_config(struct pnp_isa_config_struc *data)
{
int status;
status = __pnp_bios_isapnp_config(data);
if (status)
pnpbios_print_status("isapnp_config", status);
return status;
}
/*
* Call PnP BIOS with function 0x41, "get ESCD info"
*/
static int __pnp_bios_escd_info(struct escd_info_struc *data)
{
u16 status;
if (!pnp_bios_present())
return ESCD_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_ESCD_INFO, 0, PNP_TS1, 2, PNP_TS1, 4,
PNP_TS1, PNP_DS, data,
sizeof(struct escd_info_struc), NULL, 0);
return status;
}
int pnp_bios_escd_info(struct escd_info_struc *data)
{
int status;
status = __pnp_bios_escd_info(data);
if (status)
pnpbios_print_status("escd_info", status);
return status;
}
/*
* Call PnP BIOS function 0x42, "read ESCD"
* nvram_base is determined by calling escd_info
*/
static int __pnp_bios_read_escd(char *data, u32 nvram_base)
{
u16 status;
if (!pnp_bios_present())
return ESCD_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_READ_ESCD, 0, PNP_TS1, PNP_TS2, PNP_DS, 0, 0,
0, data, 65536, __va(nvram_base), 65536);
return status;
}
int pnp_bios_read_escd(char *data, u32 nvram_base)
{
int status;
status = __pnp_bios_read_escd(data, nvram_base);
if (status)
pnpbios_print_status("read_escd", status);
return status;
}
void pnpbios_calls_init(union pnp_bios_install_struct *header)
{
int i;
spin_lock_init(&pnp_bios_lock);
pnp_bios_callpoint.offset = header->fields.pm16offset;
pnp_bios_callpoint.segment = PNP_CS16;
bad_bios_desc.a = 0;
bad_bios_desc.b = 0x00409200;
set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
_set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
for_each_possible_cpu(i) {
struct desc_struct *gdt = get_cpu_gdt_table(i);
if (!gdt)
continue;
set_base(gdt[GDT_ENTRY_PNPBIOS_CS32], &pnp_bios_callfunc);
set_base(gdt[GDT_ENTRY_PNPBIOS_CS16],
__va(header->fields.pm16cseg));
set_base(gdt[GDT_ENTRY_PNPBIOS_DS],
__va(header->fields.pm16dseg));
}
}
| gpl-2.0 |
fileton/linux | sound/soc/codecs/wm0010.c | 189 | 23481 | /*
* wm0010.c -- WM0010 DSP Driver
*
* Copyright 2012 Wolfson Microelectronics PLC.
*
* Authors: Mark Brown <broonie@opensource.wolfsonmicro.com>
* Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
* Scott Ling <sl@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/interrupt.h>
#include <linux/irqreturn.h>
#include <linux/init.h>
#include <linux/spi/spi.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <sound/soc.h>
#include <sound/wm0010.h>
#define DEVICE_ID_WM0010 10
/* We only support v1 of the .dfw INFO record */
#define INFO_VERSION 1
enum dfw_cmd {
DFW_CMD_FUSE = 0x01,
DFW_CMD_CODE_HDR,
DFW_CMD_CODE_DATA,
DFW_CMD_PLL,
DFW_CMD_INFO = 0xff
};
struct dfw_binrec {
u8 command;
u32 length:24;
u32 address;
uint8_t data[0];
} __packed;
struct dfw_inforec {
u8 info_version;
u8 tool_major_version;
u8 tool_minor_version;
u8 dsp_target;
};
struct dfw_pllrec {
u8 command;
u32 length:24;
u32 address;
u32 clkctrl1;
u32 clkctrl2;
u32 clkctrl3;
u32 ldetctrl;
u32 uart_div;
u32 spi_div;
} __packed;
static struct pll_clock_map {
int max_sysclk;
int max_pll_spi_speed;
u32 pll_clkctrl1;
} pll_clock_map[] = { /* Dividers */
{ 22000000, 26000000, 0x00201f11 }, /* 2,32,2 */
{ 18000000, 26000000, 0x00203f21 }, /* 2,64,4 */
{ 14000000, 26000000, 0x00202620 }, /* 1,39,4 */
{ 10000000, 22000000, 0x00203120 }, /* 1,50,4 */
{ 6500000, 22000000, 0x00204520 }, /* 1,70,4 */
{ 5500000, 22000000, 0x00103f10 }, /* 1,64,2 */
};
enum wm0010_state {
WM0010_POWER_OFF,
WM0010_OUT_OF_RESET,
WM0010_BOOTROM,
WM0010_STAGE2,
WM0010_FIRMWARE,
};
struct wm0010_priv {
struct snd_soc_codec *codec;
struct mutex lock;
struct device *dev;
struct wm0010_pdata pdata;
int gpio_reset;
int gpio_reset_value;
struct regulator_bulk_data core_supplies[2];
struct regulator *dbvdd;
int sysclk;
enum wm0010_state state;
bool boot_failed;
bool ready;
bool pll_running;
int max_spi_freq;
int board_max_spi_speed;
u32 pll_clkctrl1;
spinlock_t irq_lock;
int irq;
struct completion boot_completion;
};
struct wm0010_spi_msg {
struct spi_message m;
struct spi_transfer t;
u8 *tx_buf;
u8 *rx_buf;
size_t len;
};
static const struct snd_soc_dapm_widget wm0010_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("CLKIN", SND_SOC_NOPM, 0, 0, NULL, 0),
};
static const struct snd_soc_dapm_route wm0010_dapm_routes[] = {
{ "SDI2 Capture", NULL, "SDI1 Playback" },
{ "SDI1 Capture", NULL, "SDI2 Playback" },
{ "SDI1 Capture", NULL, "CLKIN" },
{ "SDI2 Capture", NULL, "CLKIN" },
{ "SDI1 Playback", NULL, "CLKIN" },
{ "SDI2 Playback", NULL, "CLKIN" },
};
static const char *wm0010_state_to_str(enum wm0010_state state)
{
static const char * const state_to_str[] = {
"Power off",
"Out of reset",
"Boot ROM",
"Stage2",
"Firmware"
};
if (state < 0 || state >= ARRAY_SIZE(state_to_str))
return "null";
return state_to_str[state];
}
/* Called with wm0010->lock held */
static void wm0010_halt(struct snd_soc_codec *codec)
{
struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec);
unsigned long flags;
enum wm0010_state state;
/* Fetch the wm0010 state */
spin_lock_irqsave(&wm0010->irq_lock, flags);
state = wm0010->state;
spin_unlock_irqrestore(&wm0010->irq_lock, flags);
switch (state) {
case WM0010_POWER_OFF:
/* If there's nothing to do, bail out */
return;
case WM0010_OUT_OF_RESET:
case WM0010_BOOTROM:
case WM0010_STAGE2:
case WM0010_FIRMWARE:
/* Remember to put chip back into reset */
gpio_set_value_cansleep(wm0010->gpio_reset,
wm0010->gpio_reset_value);
/* Disable the regulators */
regulator_disable(wm0010->dbvdd);
regulator_bulk_disable(ARRAY_SIZE(wm0010->core_supplies),
wm0010->core_supplies);
break;
}
spin_lock_irqsave(&wm0010->irq_lock, flags);
wm0010->state = WM0010_POWER_OFF;
spin_unlock_irqrestore(&wm0010->irq_lock, flags);
}
struct wm0010_boot_xfer {
struct list_head list;
struct snd_soc_codec *codec;
struct completion *done;
struct spi_message m;
struct spi_transfer t;
};
/* Called with wm0010->lock held */
static void wm0010_mark_boot_failure(struct wm0010_priv *wm0010)
{
enum wm0010_state state;
unsigned long flags;
spin_lock_irqsave(&wm0010->irq_lock, flags);
state = wm0010->state;
spin_unlock_irqrestore(&wm0010->irq_lock, flags);
dev_err(wm0010->dev, "Failed to transition from `%s' state to `%s' state\n",
wm0010_state_to_str(state), wm0010_state_to_str(state + 1));
wm0010->boot_failed = true;
}
static void wm0010_boot_xfer_complete(void *data)
{
struct wm0010_boot_xfer *xfer = data;
struct snd_soc_codec *codec = xfer->codec;
struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec);
u32 *out32 = xfer->t.rx_buf;
int i;
if (xfer->m.status != 0) {
dev_err(codec->dev, "SPI transfer failed: %d\n",
xfer->m.status);
wm0010_mark_boot_failure(wm0010);
if (xfer->done)
complete(xfer->done);
return;
}
for (i = 0; i < xfer->t.len / 4; i++) {
dev_dbg(codec->dev, "%d: %04x\n", i, out32[i]);
switch (be32_to_cpu(out32[i])) {
case 0xe0e0e0e0:
dev_err(codec->dev,
"%d: ROM error reported in stage 2\n", i);
wm0010_mark_boot_failure(wm0010);
break;
case 0x55555555:
if (wm0010->state < WM0010_STAGE2)
break;
dev_err(codec->dev,
"%d: ROM bootloader running in stage 2\n", i);
wm0010_mark_boot_failure(wm0010);
break;
case 0x0fed0000:
dev_dbg(codec->dev, "Stage2 loader running\n");
break;
case 0x0fed0007:
dev_dbg(codec->dev, "CODE_HDR packet received\n");
break;
case 0x0fed0008:
dev_dbg(codec->dev, "CODE_DATA packet received\n");
break;
case 0x0fed0009:
dev_dbg(codec->dev, "Download complete\n");
break;
case 0x0fed000c:
dev_dbg(codec->dev, "Application start\n");
break;
case 0x0fed000e:
dev_dbg(codec->dev, "PLL packet received\n");
wm0010->pll_running = true;
break;
case 0x0fed0025:
dev_err(codec->dev, "Device reports image too long\n");
wm0010_mark_boot_failure(wm0010);
break;
case 0x0fed002c:
dev_err(codec->dev, "Device reports bad SPI packet\n");
wm0010_mark_boot_failure(wm0010);
break;
case 0x0fed0031:
dev_err(codec->dev, "Device reports SPI read overflow\n");
wm0010_mark_boot_failure(wm0010);
break;
case 0x0fed0032:
dev_err(codec->dev, "Device reports SPI underclock\n");
wm0010_mark_boot_failure(wm0010);
break;
case 0x0fed0033:
dev_err(codec->dev, "Device reports bad header packet\n");
wm0010_mark_boot_failure(wm0010);
break;
case 0x0fed0034:
dev_err(codec->dev, "Device reports invalid packet type\n");
wm0010_mark_boot_failure(wm0010);
break;
case 0x0fed0035:
dev_err(codec->dev, "Device reports data before header error\n");
wm0010_mark_boot_failure(wm0010);
break;
case 0x0fed0038:
dev_err(codec->dev, "Device reports invalid PLL packet\n");
break;
case 0x0fed003a:
dev_err(codec->dev, "Device reports packet alignment error\n");
wm0010_mark_boot_failure(wm0010);
break;
default:
dev_err(codec->dev, "Unrecognised return 0x%x\n",
be32_to_cpu(out32[i]));
wm0010_mark_boot_failure(wm0010);
break;
}
if (wm0010->boot_failed)
break;
}
if (xfer->done)
complete(xfer->done);
}
static void byte_swap_64(u64 *data_in, u64 *data_out, u32 len)
{
int i;
for (i = 0; i < len / 8; i++)
data_out[i] = cpu_to_be64(le64_to_cpu(data_in[i]));
}
static int wm0010_firmware_load(const char *name, struct snd_soc_codec *codec)
{
struct spi_device *spi = to_spi_device(codec->dev);
struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec);
struct list_head xfer_list;
struct wm0010_boot_xfer *xfer;
int ret;
struct completion done;
const struct firmware *fw;
const struct dfw_binrec *rec;
const struct dfw_inforec *inforec;
u64 *img;
u8 *out, dsp;
u32 len, offset;
INIT_LIST_HEAD(&xfer_list);
ret = request_firmware(&fw, name, codec->dev);
if (ret != 0) {
dev_err(codec->dev, "Failed to request application(%s): %d\n",
name, ret);
return ret;
}
rec = (const struct dfw_binrec *)fw->data;
inforec = (const struct dfw_inforec *)rec->data;
offset = 0;
dsp = inforec->dsp_target;
wm0010->boot_failed = false;
if (WARN_ON(!list_empty(&xfer_list)))
return -EINVAL;
init_completion(&done);
/* First record should be INFO */
if (rec->command != DFW_CMD_INFO) {
dev_err(codec->dev, "First record not INFO\r\n");
ret = -EINVAL;
goto abort;
}
if (inforec->info_version != INFO_VERSION) {
dev_err(codec->dev,
"Unsupported version (%02d) of INFO record\r\n",
inforec->info_version);
ret = -EINVAL;
goto abort;
}
dev_dbg(codec->dev, "Version v%02d INFO record found\r\n",
inforec->info_version);
/* Check it's a DSP file */
if (dsp != DEVICE_ID_WM0010) {
dev_err(codec->dev, "Not a WM0010 firmware file.\r\n");
ret = -EINVAL;
goto abort;
}
/* Skip the info record as we don't need to send it */
offset += ((rec->length) + 8);
rec = (void *)&rec->data[rec->length];
while (offset < fw->size) {
dev_dbg(codec->dev,
"Packet: command %d, data length = 0x%x\r\n",
rec->command, rec->length);
len = rec->length + 8;
xfer = kzalloc(sizeof(*xfer), GFP_KERNEL);
if (!xfer) {
ret = -ENOMEM;
goto abort;
}
xfer->codec = codec;
list_add_tail(&xfer->list, &xfer_list);
out = kzalloc(len, GFP_KERNEL | GFP_DMA);
if (!out) {
ret = -ENOMEM;
goto abort1;
}
xfer->t.rx_buf = out;
img = kzalloc(len, GFP_KERNEL | GFP_DMA);
if (!img) {
ret = -ENOMEM;
goto abort1;
}
xfer->t.tx_buf = img;
byte_swap_64((u64 *)&rec->command, img, len);
spi_message_init(&xfer->m);
xfer->m.complete = wm0010_boot_xfer_complete;
xfer->m.context = xfer;
xfer->t.len = len;
xfer->t.bits_per_word = 8;
if (!wm0010->pll_running) {
xfer->t.speed_hz = wm0010->sysclk / 6;
} else {
xfer->t.speed_hz = wm0010->max_spi_freq;
if (wm0010->board_max_spi_speed &&
(wm0010->board_max_spi_speed < wm0010->max_spi_freq))
xfer->t.speed_hz = wm0010->board_max_spi_speed;
}
/* Store max usable spi frequency for later use */
wm0010->max_spi_freq = xfer->t.speed_hz;
spi_message_add_tail(&xfer->t, &xfer->m);
offset += ((rec->length) + 8);
rec = (void *)&rec->data[rec->length];
if (offset >= fw->size) {
dev_dbg(codec->dev, "All transfers scheduled\n");
xfer->done = &done;
}
ret = spi_async(spi, &xfer->m);
if (ret != 0) {
dev_err(codec->dev, "Write failed: %d\n", ret);
goto abort1;
}
if (wm0010->boot_failed) {
dev_dbg(codec->dev, "Boot fail!\n");
ret = -EINVAL;
goto abort1;
}
}
wait_for_completion(&done);
ret = 0;
abort1:
while (!list_empty(&xfer_list)) {
xfer = list_first_entry(&xfer_list, struct wm0010_boot_xfer,
list);
kfree(xfer->t.rx_buf);
kfree(xfer->t.tx_buf);
list_del(&xfer->list);
kfree(xfer);
}
abort:
release_firmware(fw);
return ret;
}
static int wm0010_stage2_load(struct snd_soc_codec *codec)
{
struct spi_device *spi = to_spi_device(codec->dev);
struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec);
const struct firmware *fw;
struct spi_message m;
struct spi_transfer t;
u32 *img;
u8 *out;
int i;
int ret = 0;
ret = request_firmware(&fw, "wm0010_stage2.bin", codec->dev);
if (ret != 0) {
dev_err(codec->dev, "Failed to request stage2 loader: %d\n",
ret);
return ret;
}
dev_dbg(codec->dev, "Downloading %zu byte stage 2 loader\n", fw->size);
/* Copy to local buffer first as vmalloc causes problems for dma */
img = kzalloc(fw->size, GFP_KERNEL | GFP_DMA);
if (!img) {
ret = -ENOMEM;
goto abort2;
}
out = kzalloc(fw->size, GFP_KERNEL | GFP_DMA);
if (!out) {
ret = -ENOMEM;
goto abort1;
}
memcpy(img, &fw->data[0], fw->size);
spi_message_init(&m);
memset(&t, 0, sizeof(t));
t.rx_buf = out;
t.tx_buf = img;
t.len = fw->size;
t.bits_per_word = 8;
t.speed_hz = wm0010->sysclk / 10;
spi_message_add_tail(&t, &m);
dev_dbg(codec->dev, "Starting initial download at %dHz\n",
t.speed_hz);
ret = spi_sync(spi, &m);
if (ret != 0) {
dev_err(codec->dev, "Initial download failed: %d\n", ret);
goto abort;
}
/* Look for errors from the boot ROM */
for (i = 0; i < fw->size; i++) {
if (out[i] != 0x55) {
dev_err(codec->dev, "Boot ROM error: %x in %d\n",
out[i], i);
wm0010_mark_boot_failure(wm0010);
ret = -EBUSY;
goto abort;
}
}
abort:
kfree(out);
abort1:
kfree(img);
abort2:
release_firmware(fw);
return ret;
}
static int wm0010_boot(struct snd_soc_codec *codec)
{
struct spi_device *spi = to_spi_device(codec->dev);
struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec);
unsigned long flags;
int ret;
const struct firmware *fw;
struct spi_message m;
struct spi_transfer t;
struct dfw_pllrec pll_rec;
u32 *p, len;
u64 *img_swap;
u8 *out;
int i;
spin_lock_irqsave(&wm0010->irq_lock, flags);
if (wm0010->state != WM0010_POWER_OFF)
dev_warn(wm0010->dev, "DSP already powered up!\n");
spin_unlock_irqrestore(&wm0010->irq_lock, flags);
if (wm0010->sysclk > 26000000) {
dev_err(codec->dev, "Max DSP clock frequency is 26MHz\n");
ret = -ECANCELED;
goto err;
}
mutex_lock(&wm0010->lock);
wm0010->pll_running = false;
dev_dbg(codec->dev, "max_spi_freq: %d\n", wm0010->max_spi_freq);
ret = regulator_bulk_enable(ARRAY_SIZE(wm0010->core_supplies),
wm0010->core_supplies);
if (ret != 0) {
dev_err(&spi->dev, "Failed to enable core supplies: %d\n",
ret);
mutex_unlock(&wm0010->lock);
goto err;
}
ret = regulator_enable(wm0010->dbvdd);
if (ret != 0) {
dev_err(&spi->dev, "Failed to enable DBVDD: %d\n", ret);
goto err_core;
}
/* Release reset */
gpio_set_value_cansleep(wm0010->gpio_reset, !wm0010->gpio_reset_value);
spin_lock_irqsave(&wm0010->irq_lock, flags);
wm0010->state = WM0010_OUT_OF_RESET;
spin_unlock_irqrestore(&wm0010->irq_lock, flags);
/* First the bootloader */
ret = request_firmware(&fw, "wm0010_stage2.bin", codec->dev);
if (ret != 0) {
dev_err(codec->dev, "Failed to request stage2 loader: %d\n",
ret);
goto abort;
}
if (!wait_for_completion_timeout(&wm0010->boot_completion,
msecs_to_jiffies(20)))
dev_err(codec->dev, "Failed to get interrupt from DSP\n");
spin_lock_irqsave(&wm0010->irq_lock, flags);
wm0010->state = WM0010_BOOTROM;
spin_unlock_irqrestore(&wm0010->irq_lock, flags);
ret = wm0010_stage2_load(codec);
if (ret)
goto abort;
if (!wait_for_completion_timeout(&wm0010->boot_completion,
msecs_to_jiffies(20)))
dev_err(codec->dev, "Failed to get interrupt from DSP loader.\n");
spin_lock_irqsave(&wm0010->irq_lock, flags);
wm0010->state = WM0010_STAGE2;
spin_unlock_irqrestore(&wm0010->irq_lock, flags);
/* Only initialise PLL if max_spi_freq initialised */
if (wm0010->max_spi_freq) {
/* Initialise a PLL record */
memset(&pll_rec, 0, sizeof(pll_rec));
pll_rec.command = DFW_CMD_PLL;
pll_rec.length = (sizeof(pll_rec) - 8);
/* On wm0010 only the CLKCTRL1 value is used */
pll_rec.clkctrl1 = wm0010->pll_clkctrl1;
ret = -ENOMEM;
len = pll_rec.length + 8;
out = kzalloc(len, GFP_KERNEL | GFP_DMA);
if (!out) {
dev_err(codec->dev,
"Failed to allocate RX buffer\n");
goto abort;
}
img_swap = kzalloc(len, GFP_KERNEL | GFP_DMA);
if (!img_swap)
goto abort;
/* We need to re-order for 0010 */
byte_swap_64((u64 *)&pll_rec, img_swap, len);
spi_message_init(&m);
memset(&t, 0, sizeof(t));
t.rx_buf = out;
t.tx_buf = img_swap;
t.len = len;
t.bits_per_word = 8;
t.speed_hz = wm0010->sysclk / 6;
spi_message_add_tail(&t, &m);
ret = spi_sync(spi, &m);
if (ret != 0) {
dev_err(codec->dev, "First PLL write failed: %d\n", ret);
goto abort;
}
/* Use a second send of the message to get the return status */
ret = spi_sync(spi, &m);
if (ret != 0) {
dev_err(codec->dev, "Second PLL write failed: %d\n", ret);
goto abort;
}
p = (u32 *)out;
/* Look for PLL active code from the DSP */
for (i = 0; i < len / 4; i++) {
if (*p == 0x0e00ed0f) {
dev_dbg(codec->dev, "PLL packet received\n");
wm0010->pll_running = true;
break;
}
p++;
}
kfree(img_swap);
kfree(out);
} else
dev_dbg(codec->dev, "Not enabling DSP PLL.");
ret = wm0010_firmware_load("wm0010.dfw", codec);
if (ret != 0)
goto abort;
spin_lock_irqsave(&wm0010->irq_lock, flags);
wm0010->state = WM0010_FIRMWARE;
spin_unlock_irqrestore(&wm0010->irq_lock, flags);
mutex_unlock(&wm0010->lock);
return 0;
abort:
/* Put the chip back into reset */
wm0010_halt(codec);
mutex_unlock(&wm0010->lock);
return ret;
err_core:
mutex_unlock(&wm0010->lock);
regulator_bulk_disable(ARRAY_SIZE(wm0010->core_supplies),
wm0010->core_supplies);
err:
return ret;
}
static int wm0010_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec);
switch (level) {
case SND_SOC_BIAS_ON:
if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_PREPARE)
wm0010_boot(codec);
break;
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_PREPARE) {
mutex_lock(&wm0010->lock);
wm0010_halt(codec);
mutex_unlock(&wm0010->lock);
}
break;
case SND_SOC_BIAS_OFF:
break;
}
return 0;
}
static int wm0010_set_sysclk(struct snd_soc_codec *codec, int source,
int clk_id, unsigned int freq, int dir)
{
struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec);
unsigned int i;
wm0010->sysclk = freq;
if (freq < pll_clock_map[ARRAY_SIZE(pll_clock_map)-1].max_sysclk) {
wm0010->max_spi_freq = 0;
} else {
for (i = 0; i < ARRAY_SIZE(pll_clock_map); i++)
if (freq >= pll_clock_map[i].max_sysclk) {
wm0010->max_spi_freq = pll_clock_map[i].max_pll_spi_speed;
wm0010->pll_clkctrl1 = pll_clock_map[i].pll_clkctrl1;
break;
}
}
return 0;
}
static int wm0010_probe(struct snd_soc_codec *codec);
static struct snd_soc_codec_driver soc_codec_dev_wm0010 = {
.probe = wm0010_probe,
.set_bias_level = wm0010_set_bias_level,
.set_sysclk = wm0010_set_sysclk,
.idle_bias_off = true,
.dapm_widgets = wm0010_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm0010_dapm_widgets),
.dapm_routes = wm0010_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(wm0010_dapm_routes),
};
#define WM0010_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
#define WM0010_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |\
SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE |\
SNDRV_PCM_FMTBIT_S32_LE)
static struct snd_soc_dai_driver wm0010_dai[] = {
{
.name = "wm0010-sdi1",
.playback = {
.stream_name = "SDI1 Playback",
.channels_min = 1,
.channels_max = 2,
.rates = WM0010_RATES,
.formats = WM0010_FORMATS,
},
.capture = {
.stream_name = "SDI1 Capture",
.channels_min = 1,
.channels_max = 2,
.rates = WM0010_RATES,
.formats = WM0010_FORMATS,
},
},
{
.name = "wm0010-sdi2",
.playback = {
.stream_name = "SDI2 Playback",
.channels_min = 1,
.channels_max = 2,
.rates = WM0010_RATES,
.formats = WM0010_FORMATS,
},
.capture = {
.stream_name = "SDI2 Capture",
.channels_min = 1,
.channels_max = 2,
.rates = WM0010_RATES,
.formats = WM0010_FORMATS,
},
},
};
static irqreturn_t wm0010_irq(int irq, void *data)
{
struct wm0010_priv *wm0010 = data;
switch (wm0010->state) {
case WM0010_OUT_OF_RESET:
case WM0010_BOOTROM:
case WM0010_STAGE2:
spin_lock(&wm0010->irq_lock);
complete(&wm0010->boot_completion);
spin_unlock(&wm0010->irq_lock);
return IRQ_HANDLED;
default:
return IRQ_NONE;
}
return IRQ_NONE;
}
static int wm0010_probe(struct snd_soc_codec *codec)
{
struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec);
wm0010->codec = codec;
return 0;
}
static int wm0010_spi_probe(struct spi_device *spi)
{
unsigned long gpio_flags;
int ret;
int trigger;
int irq;
struct wm0010_priv *wm0010;
wm0010 = devm_kzalloc(&spi->dev, sizeof(*wm0010),
GFP_KERNEL);
if (!wm0010)
return -ENOMEM;
mutex_init(&wm0010->lock);
spin_lock_init(&wm0010->irq_lock);
spi_set_drvdata(spi, wm0010);
wm0010->dev = &spi->dev;
if (dev_get_platdata(&spi->dev))
memcpy(&wm0010->pdata, dev_get_platdata(&spi->dev),
sizeof(wm0010->pdata));
init_completion(&wm0010->boot_completion);
wm0010->core_supplies[0].supply = "AVDD";
wm0010->core_supplies[1].supply = "DCVDD";
ret = devm_regulator_bulk_get(wm0010->dev, ARRAY_SIZE(wm0010->core_supplies),
wm0010->core_supplies);
if (ret != 0) {
dev_err(wm0010->dev, "Failed to obtain core supplies: %d\n",
ret);
return ret;
}
wm0010->dbvdd = devm_regulator_get(wm0010->dev, "DBVDD");
if (IS_ERR(wm0010->dbvdd)) {
ret = PTR_ERR(wm0010->dbvdd);
dev_err(wm0010->dev, "Failed to obtain DBVDD: %d\n", ret);
return ret;
}
if (wm0010->pdata.gpio_reset) {
wm0010->gpio_reset = wm0010->pdata.gpio_reset;
if (wm0010->pdata.reset_active_high)
wm0010->gpio_reset_value = 1;
else
wm0010->gpio_reset_value = 0;
if (wm0010->gpio_reset_value)
gpio_flags = GPIOF_OUT_INIT_HIGH;
else
gpio_flags = GPIOF_OUT_INIT_LOW;
ret = devm_gpio_request_one(wm0010->dev, wm0010->gpio_reset,
gpio_flags, "wm0010 reset");
if (ret < 0) {
dev_err(wm0010->dev,
"Failed to request GPIO for DSP reset: %d\n",
ret);
return ret;
}
} else {
dev_err(wm0010->dev, "No reset GPIO configured\n");
return -EINVAL;
}
wm0010->state = WM0010_POWER_OFF;
irq = spi->irq;
if (wm0010->pdata.irq_flags)
trigger = wm0010->pdata.irq_flags;
else
trigger = IRQF_TRIGGER_FALLING;
trigger |= IRQF_ONESHOT;
ret = request_threaded_irq(irq, NULL, wm0010_irq, trigger | IRQF_ONESHOT,
"wm0010", wm0010);
if (ret) {
dev_err(wm0010->dev, "Failed to request IRQ %d: %d\n",
irq, ret);
return ret;
}
wm0010->irq = irq;
ret = irq_set_irq_wake(irq, 1);
if (ret) {
dev_err(wm0010->dev, "Failed to set IRQ %d as wake source: %d\n",
irq, ret);
return ret;
}
if (spi->max_speed_hz)
wm0010->board_max_spi_speed = spi->max_speed_hz;
else
wm0010->board_max_spi_speed = 0;
ret = snd_soc_register_codec(&spi->dev,
&soc_codec_dev_wm0010, wm0010_dai,
ARRAY_SIZE(wm0010_dai));
if (ret < 0)
return ret;
return 0;
}
static int wm0010_spi_remove(struct spi_device *spi)
{
struct wm0010_priv *wm0010 = spi_get_drvdata(spi);
snd_soc_unregister_codec(&spi->dev);
gpio_set_value_cansleep(wm0010->gpio_reset,
wm0010->gpio_reset_value);
irq_set_irq_wake(wm0010->irq, 0);
if (wm0010->irq)
free_irq(wm0010->irq, wm0010);
return 0;
}
static struct spi_driver wm0010_spi_driver = {
.driver = {
.name = "wm0010",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = wm0010_spi_probe,
.remove = wm0010_spi_remove,
};
module_spi_driver(wm0010_spi_driver);
MODULE_DESCRIPTION("ASoC WM0010 driver");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
oceanfly/linux | security/smack/smackfs.c | 189 | 68756 | /*
* Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2.
*
* Authors:
* Casey Schaufler <casey@schaufler-ca.com>
* Ahmed S. Darwish <darwish.07@gmail.com>
*
* Special thanks to the authors of selinuxfs.
*
* Karl MacMillan <kmacmillan@tresys.com>
* James Morris <jmorris@redhat.com>
*
*/
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/security.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/cipso_ipv4.h>
#include <linux/seq_file.h>
#include <linux/ctype.h>
#include <linux/audit.h>
#include <linux/magic.h>
#include "smack.h"
#define BEBITS (sizeof(__be32) * 8)
/*
* smackfs pseudo filesystem.
*/
enum smk_inos {
SMK_ROOT_INO = 2,
SMK_LOAD = 3, /* load policy */
SMK_CIPSO = 4, /* load label -> CIPSO mapping */
SMK_DOI = 5, /* CIPSO DOI */
SMK_DIRECT = 6, /* CIPSO level indicating direct label */
SMK_AMBIENT = 7, /* internet ambient label */
SMK_NET4ADDR = 8, /* single label hosts */
SMK_ONLYCAP = 9, /* the only "capable" label */
SMK_LOGGING = 10, /* logging */
SMK_LOAD_SELF = 11, /* task specific rules */
SMK_ACCESSES = 12, /* access policy */
SMK_MAPPED = 13, /* CIPSO level indicating mapped label */
SMK_LOAD2 = 14, /* load policy with long labels */
SMK_LOAD_SELF2 = 15, /* load task specific rules with long labels */
SMK_ACCESS2 = 16, /* make an access check with long labels */
SMK_CIPSO2 = 17, /* load long label -> CIPSO mapping */
SMK_REVOKE_SUBJ = 18, /* set rules with subject label to '-' */
SMK_CHANGE_RULE = 19, /* change or add rules (long labels) */
SMK_SYSLOG = 20, /* change syslog label) */
SMK_PTRACE = 21, /* set ptrace rule */
#ifdef CONFIG_SECURITY_SMACK_BRINGUP
SMK_UNCONFINED = 22, /* define an unconfined label */
#endif
#if IS_ENABLED(CONFIG_IPV6)
SMK_NET6ADDR = 23, /* single label IPv6 hosts */
#endif /* CONFIG_IPV6 */
};
/*
* List locks
*/
static DEFINE_MUTEX(smack_cipso_lock);
static DEFINE_MUTEX(smack_ambient_lock);
static DEFINE_MUTEX(smk_net4addr_lock);
#if IS_ENABLED(CONFIG_IPV6)
static DEFINE_MUTEX(smk_net6addr_lock);
#endif /* CONFIG_IPV6 */
/*
* This is the "ambient" label for network traffic.
* If it isn't somehow marked, use this.
* It can be reset via smackfs/ambient
*/
struct smack_known *smack_net_ambient;
/*
* This is the level in a CIPSO header that indicates a
* smack label is contained directly in the category set.
* It can be reset via smackfs/direct
*/
int smack_cipso_direct = SMACK_CIPSO_DIRECT_DEFAULT;
/*
* This is the level in a CIPSO header that indicates a
* secid is contained directly in the category set.
* It can be reset via smackfs/mapped
*/
int smack_cipso_mapped = SMACK_CIPSO_MAPPED_DEFAULT;
#ifdef CONFIG_SECURITY_SMACK_BRINGUP
/*
* Allow one label to be unconfined. This is for
* debugging and application bring-up purposes only.
* It is bad and wrong, but everyone seems to expect
* to have it.
*/
struct smack_known *smack_unconfined;
#endif
/*
* If this value is set restrict syslog use to the label specified.
* It can be reset via smackfs/syslog
*/
struct smack_known *smack_syslog_label;
/*
* Ptrace current rule
* SMACK_PTRACE_DEFAULT regular smack ptrace rules (/proc based)
* SMACK_PTRACE_EXACT labels must match, but can be overriden with
* CAP_SYS_PTRACE
* SMACK_PTRACE_DRACONIAN lables must match, CAP_SYS_PTRACE has no effect
*/
int smack_ptrace_rule = SMACK_PTRACE_DEFAULT;
/*
* Certain IP addresses may be designated as single label hosts.
* Packets are sent there unlabeled, but only from tasks that
* can write to the specified label.
*/
LIST_HEAD(smk_net4addr_list);
#if IS_ENABLED(CONFIG_IPV6)
LIST_HEAD(smk_net6addr_list);
#endif /* CONFIG_IPV6 */
/*
* Rule lists are maintained for each label.
* This master list is just for reading /smack/load and /smack/load2.
*/
struct smack_master_list {
struct list_head list;
struct smack_rule *smk_rule;
};
static LIST_HEAD(smack_rule_list);
struct smack_parsed_rule {
struct smack_known *smk_subject;
struct smack_known *smk_object;
int smk_access1;
int smk_access2;
};
static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
/*
* Values for parsing cipso rules
* SMK_DIGITLEN: Length of a digit field in a rule.
* SMK_CIPSOMIN: Minimum possible cipso rule length.
* SMK_CIPSOMAX: Maximum possible cipso rule length.
*/
#define SMK_DIGITLEN 4
#define SMK_CIPSOMIN (SMK_LABELLEN + 2 * SMK_DIGITLEN)
#define SMK_CIPSOMAX (SMK_CIPSOMIN + SMACK_CIPSO_MAXCATNUM * SMK_DIGITLEN)
/*
* Values for parsing MAC rules
* SMK_ACCESS: Maximum possible combination of access permissions
* SMK_ACCESSLEN: Maximum length for a rule access field
* SMK_LOADLEN: Smack rule length
*/
#define SMK_OACCESS "rwxa"
#define SMK_ACCESS "rwxatl"
#define SMK_OACCESSLEN (sizeof(SMK_OACCESS) - 1)
#define SMK_ACCESSLEN (sizeof(SMK_ACCESS) - 1)
#define SMK_OLOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_OACCESSLEN)
#define SMK_LOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_ACCESSLEN)
/*
* Stricly for CIPSO level manipulation.
* Set the category bit number in a smack label sized buffer.
*/
static inline void smack_catset_bit(unsigned int cat, char *catsetp)
{
if (cat == 0 || cat > (SMK_CIPSOLEN * 8))
return;
catsetp[(cat - 1) / 8] |= 0x80 >> ((cat - 1) % 8);
}
/**
* smk_netlabel_audit_set - fill a netlbl_audit struct
* @nap: structure to fill
*/
static void smk_netlabel_audit_set(struct netlbl_audit *nap)
{
struct smack_known *skp = smk_of_current();
nap->loginuid = audit_get_loginuid(current);
nap->sessionid = audit_get_sessionid(current);
nap->secid = skp->smk_secid;
}
/*
* Value for parsing single label host rules
* "1.2.3.4 X"
*/
#define SMK_NETLBLADDRMIN 9
/**
* smk_set_access - add a rule to the rule list or replace an old rule
* @srp: the rule to add or replace
* @rule_list: the list of rules
* @rule_lock: the rule list lock
* @global: if non-zero, indicates a global rule
*
* Looks through the current subject/object/access list for
* the subject/object pair and replaces the access that was
* there. If the pair isn't found add it with the specified
* access.
*
* Returns 0 if nothing goes wrong or -ENOMEM if it fails
* during the allocation of the new pair to add.
*/
static int smk_set_access(struct smack_parsed_rule *srp,
struct list_head *rule_list,
struct mutex *rule_lock, int global)
{
struct smack_rule *sp;
struct smack_master_list *smlp;
int found = 0;
int rc = 0;
mutex_lock(rule_lock);
/*
* Because the object label is less likely to match
* than the subject label check it first
*/
list_for_each_entry_rcu(sp, rule_list, list) {
if (sp->smk_object == srp->smk_object &&
sp->smk_subject == srp->smk_subject) {
found = 1;
sp->smk_access |= srp->smk_access1;
sp->smk_access &= ~srp->smk_access2;
break;
}
}
if (found == 0) {
sp = kzalloc(sizeof(*sp), GFP_KERNEL);
if (sp == NULL) {
rc = -ENOMEM;
goto out;
}
sp->smk_subject = srp->smk_subject;
sp->smk_object = srp->smk_object;
sp->smk_access = srp->smk_access1 & ~srp->smk_access2;
list_add_rcu(&sp->list, rule_list);
/*
* If this is a global as opposed to self and a new rule
* it needs to get added for reporting.
*/
if (global) {
smlp = kzalloc(sizeof(*smlp), GFP_KERNEL);
if (smlp != NULL) {
smlp->smk_rule = sp;
list_add_rcu(&smlp->list, &smack_rule_list);
} else
rc = -ENOMEM;
}
}
out:
mutex_unlock(rule_lock);
return rc;
}
/**
* smk_perm_from_str - parse smack accesses from a text string
* @string: a text string that contains a Smack accesses code
*
* Returns an integer with respective bits set for specified accesses.
*/
static int smk_perm_from_str(const char *string)
{
int perm = 0;
const char *cp;
for (cp = string; ; cp++)
switch (*cp) {
case '-':
break;
case 'r':
case 'R':
perm |= MAY_READ;
break;
case 'w':
case 'W':
perm |= MAY_WRITE;
break;
case 'x':
case 'X':
perm |= MAY_EXEC;
break;
case 'a':
case 'A':
perm |= MAY_APPEND;
break;
case 't':
case 'T':
perm |= MAY_TRANSMUTE;
break;
case 'l':
case 'L':
perm |= MAY_LOCK;
break;
case 'b':
case 'B':
perm |= MAY_BRINGUP;
break;
default:
return perm;
}
}
/**
* smk_fill_rule - Fill Smack rule from strings
* @subject: subject label string
* @object: object label string
* @access1: access string
* @access2: string with permissions to be removed
* @rule: Smack rule
* @import: if non-zero, import labels
* @len: label length limit
*
* Returns 0 on success, appropriate error code on failure.
*/
static int smk_fill_rule(const char *subject, const char *object,
const char *access1, const char *access2,
struct smack_parsed_rule *rule, int import,
int len)
{
const char *cp;
struct smack_known *skp;
if (import) {
rule->smk_subject = smk_import_entry(subject, len);
if (IS_ERR(rule->smk_subject))
return PTR_ERR(rule->smk_subject);
rule->smk_object = smk_import_entry(object, len);
if (IS_ERR(rule->smk_object))
return PTR_ERR(rule->smk_object);
} else {
cp = smk_parse_smack(subject, len);
if (IS_ERR(cp))
return PTR_ERR(cp);
skp = smk_find_entry(cp);
kfree(cp);
if (skp == NULL)
return -ENOENT;
rule->smk_subject = skp;
cp = smk_parse_smack(object, len);
if (IS_ERR(cp))
return PTR_ERR(cp);
skp = smk_find_entry(cp);
kfree(cp);
if (skp == NULL)
return -ENOENT;
rule->smk_object = skp;
}
rule->smk_access1 = smk_perm_from_str(access1);
if (access2)
rule->smk_access2 = smk_perm_from_str(access2);
else
rule->smk_access2 = ~rule->smk_access1;
return 0;
}
/**
* smk_parse_rule - parse Smack rule from load string
* @data: string to be parsed whose size is SMK_LOADLEN
* @rule: Smack rule
* @import: if non-zero, import labels
*
* Returns 0 on success, -1 on errors.
*/
static int smk_parse_rule(const char *data, struct smack_parsed_rule *rule,
int import)
{
int rc;
rc = smk_fill_rule(data, data + SMK_LABELLEN,
data + SMK_LABELLEN + SMK_LABELLEN, NULL, rule,
import, SMK_LABELLEN);
return rc;
}
/**
* smk_parse_long_rule - parse Smack rule from rule string
* @data: string to be parsed, null terminated
* @rule: Will be filled with Smack parsed rule
* @import: if non-zero, import labels
* @tokens: numer of substrings expected in data
*
* Returns number of processed bytes on success, -ERRNO on failure.
*/
static ssize_t smk_parse_long_rule(char *data, struct smack_parsed_rule *rule,
int import, int tokens)
{
ssize_t cnt = 0;
char *tok[4];
int rc;
int i;
/*
* Parsing the rule in-place, filling all white-spaces with '\0'
*/
for (i = 0; i < tokens; ++i) {
while (isspace(data[cnt]))
data[cnt++] = '\0';
if (data[cnt] == '\0')
/* Unexpected end of data */
return -EINVAL;
tok[i] = data + cnt;
while (data[cnt] && !isspace(data[cnt]))
++cnt;
}
while (isspace(data[cnt]))
data[cnt++] = '\0';
while (i < 4)
tok[i++] = NULL;
rc = smk_fill_rule(tok[0], tok[1], tok[2], tok[3], rule, import, 0);
return rc == 0 ? cnt : rc;
}
#define SMK_FIXED24_FMT 0 /* Fixed 24byte label format */
#define SMK_LONG_FMT 1 /* Variable long label format */
#define SMK_CHANGE_FMT 2 /* Rule modification format */
/**
* smk_write_rules_list - write() for any /smack rule file
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start - must be 0
* @rule_list: the list of rules to write to
* @rule_lock: lock for the rule list
* @format: /smack/load or /smack/load2 or /smack/change-rule format.
*
* Get one smack access rule from above.
* The format for SMK_LONG_FMT is:
* "subject<whitespace>object<whitespace>access[<whitespace>...]"
* The format for SMK_FIXED24_FMT is exactly:
* "subject object rwxat"
* The format for SMK_CHANGE_FMT is:
* "subject<whitespace>object<whitespace>
* acc_enable<whitespace>acc_disable[<whitespace>...]"
*/
static ssize_t smk_write_rules_list(struct file *file, const char __user *buf,
size_t count, loff_t *ppos,
struct list_head *rule_list,
struct mutex *rule_lock, int format)
{
struct smack_parsed_rule rule;
char *data;
int rc;
int trunc = 0;
int tokens;
ssize_t cnt = 0;
/*
* No partial writes.
* Enough data must be present.
*/
if (*ppos != 0)
return -EINVAL;
if (format == SMK_FIXED24_FMT) {
/*
* Minor hack for backward compatibility
*/
if (count < SMK_OLOADLEN || count > SMK_LOADLEN)
return -EINVAL;
} else {
if (count >= PAGE_SIZE) {
count = PAGE_SIZE - 1;
trunc = 1;
}
}
data = kmalloc(count + 1, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
if (copy_from_user(data, buf, count) != 0) {
rc = -EFAULT;
goto out;
}
/*
* In case of parsing only part of user buf,
* avoid having partial rule at the data buffer
*/
if (trunc) {
while (count > 0 && (data[count - 1] != '\n'))
--count;
if (count == 0) {
rc = -EINVAL;
goto out;
}
}
data[count] = '\0';
tokens = (format == SMK_CHANGE_FMT ? 4 : 3);
while (cnt < count) {
if (format == SMK_FIXED24_FMT) {
rc = smk_parse_rule(data, &rule, 1);
if (rc < 0)
goto out;
cnt = count;
} else {
rc = smk_parse_long_rule(data + cnt, &rule, 1, tokens);
if (rc < 0)
goto out;
if (rc == 0) {
rc = -EINVAL;
goto out;
}
cnt += rc;
}
if (rule_list == NULL)
rc = smk_set_access(&rule, &rule.smk_subject->smk_rules,
&rule.smk_subject->smk_rules_lock, 1);
else
rc = smk_set_access(&rule, rule_list, rule_lock, 0);
if (rc)
goto out;
}
rc = cnt;
out:
kfree(data);
return rc;
}
/*
* Core logic for smackfs seq list operations.
*/
static void *smk_seq_start(struct seq_file *s, loff_t *pos,
struct list_head *head)
{
struct list_head *list;
int i = *pos;
rcu_read_lock();
for (list = rcu_dereference(list_next_rcu(head));
list != head;
list = rcu_dereference(list_next_rcu(list))) {
if (i-- == 0)
return list;
}
return NULL;
}
static void *smk_seq_next(struct seq_file *s, void *v, loff_t *pos,
struct list_head *head)
{
struct list_head *list = v;
++*pos;
list = rcu_dereference(list_next_rcu(list));
return (list == head) ? NULL : list;
}
static void smk_seq_stop(struct seq_file *s, void *v)
{
rcu_read_unlock();
}
static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max)
{
/*
* Don't show any rules with label names too long for
* interface file (/smack/load or /smack/load2)
* because you should expect to be able to write
* anything you read back.
*/
if (strlen(srp->smk_subject->smk_known) >= max ||
strlen(srp->smk_object->smk_known) >= max)
return;
if (srp->smk_access == 0)
return;
seq_printf(s, "%s %s",
srp->smk_subject->smk_known,
srp->smk_object->smk_known);
seq_putc(s, ' ');
if (srp->smk_access & MAY_READ)
seq_putc(s, 'r');
if (srp->smk_access & MAY_WRITE)
seq_putc(s, 'w');
if (srp->smk_access & MAY_EXEC)
seq_putc(s, 'x');
if (srp->smk_access & MAY_APPEND)
seq_putc(s, 'a');
if (srp->smk_access & MAY_TRANSMUTE)
seq_putc(s, 't');
if (srp->smk_access & MAY_LOCK)
seq_putc(s, 'l');
if (srp->smk_access & MAY_BRINGUP)
seq_putc(s, 'b');
seq_putc(s, '\n');
}
/*
* Seq_file read operations for /smack/load
*/
static void *load2_seq_start(struct seq_file *s, loff_t *pos)
{
return smk_seq_start(s, pos, &smack_rule_list);
}
static void *load2_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
return smk_seq_next(s, v, pos, &smack_rule_list);
}
static int load_seq_show(struct seq_file *s, void *v)
{
struct list_head *list = v;
struct smack_master_list *smlp =
list_entry_rcu(list, struct smack_master_list, list);
smk_rule_show(s, smlp->smk_rule, SMK_LABELLEN);
return 0;
}
static const struct seq_operations load_seq_ops = {
.start = load2_seq_start,
.next = load2_seq_next,
.show = load_seq_show,
.stop = smk_seq_stop,
};
/**
* smk_open_load - open() for /smack/load
* @inode: inode structure representing file
* @file: "load" file pointer
*
* For reading, use load_seq_* seq_file reading operations.
*/
static int smk_open_load(struct inode *inode, struct file *file)
{
return seq_open(file, &load_seq_ops);
}
/**
* smk_write_load - write() for /smack/load
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start - must be 0
*
*/
static ssize_t smk_write_load(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
/*
* Must have privilege.
* No partial writes.
* Enough data must be present.
*/
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
SMK_FIXED24_FMT);
}
static const struct file_operations smk_load_ops = {
.open = smk_open_load,
.read = seq_read,
.llseek = seq_lseek,
.write = smk_write_load,
.release = seq_release,
};
/**
* smk_cipso_doi - initialize the CIPSO domain
*/
static void smk_cipso_doi(void)
{
int rc;
struct cipso_v4_doi *doip;
struct netlbl_audit nai;
smk_netlabel_audit_set(&nai);
rc = netlbl_cfg_map_del(NULL, PF_INET, NULL, NULL, &nai);
if (rc != 0)
printk(KERN_WARNING "%s:%d remove rc = %d\n",
__func__, __LINE__, rc);
doip = kmalloc(sizeof(struct cipso_v4_doi), GFP_KERNEL);
if (doip == NULL)
panic("smack: Failed to initialize cipso DOI.\n");
doip->map.std = NULL;
doip->doi = smk_cipso_doi_value;
doip->type = CIPSO_V4_MAP_PASS;
doip->tags[0] = CIPSO_V4_TAG_RBITMAP;
for (rc = 1; rc < CIPSO_V4_TAG_MAXCNT; rc++)
doip->tags[rc] = CIPSO_V4_TAG_INVALID;
rc = netlbl_cfg_cipsov4_add(doip, &nai);
if (rc != 0) {
printk(KERN_WARNING "%s:%d cipso add rc = %d\n",
__func__, __LINE__, rc);
kfree(doip);
return;
}
rc = netlbl_cfg_cipsov4_map_add(doip->doi, NULL, NULL, NULL, &nai);
if (rc != 0) {
printk(KERN_WARNING "%s:%d map add rc = %d\n",
__func__, __LINE__, rc);
kfree(doip);
return;
}
}
/**
* smk_unlbl_ambient - initialize the unlabeled domain
* @oldambient: previous domain string
*/
static void smk_unlbl_ambient(char *oldambient)
{
int rc;
struct netlbl_audit nai;
smk_netlabel_audit_set(&nai);
if (oldambient != NULL) {
rc = netlbl_cfg_map_del(oldambient, PF_INET, NULL, NULL, &nai);
if (rc != 0)
printk(KERN_WARNING "%s:%d remove rc = %d\n",
__func__, __LINE__, rc);
}
if (smack_net_ambient == NULL)
smack_net_ambient = &smack_known_floor;
rc = netlbl_cfg_unlbl_map_add(smack_net_ambient->smk_known, PF_INET,
NULL, NULL, &nai);
if (rc != 0)
printk(KERN_WARNING "%s:%d add rc = %d\n",
__func__, __LINE__, rc);
}
/*
* Seq_file read operations for /smack/cipso
*/
static void *cipso_seq_start(struct seq_file *s, loff_t *pos)
{
return smk_seq_start(s, pos, &smack_known_list);
}
static void *cipso_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
return smk_seq_next(s, v, pos, &smack_known_list);
}
/*
* Print cipso labels in format:
* label level[/cat[,cat]]
*/
static int cipso_seq_show(struct seq_file *s, void *v)
{
struct list_head *list = v;
struct smack_known *skp =
list_entry_rcu(list, struct smack_known, list);
struct netlbl_lsm_catmap *cmp = skp->smk_netlabel.attr.mls.cat;
char sep = '/';
int i;
/*
* Don't show a label that could not have been set using
* /smack/cipso. This is in support of the notion that
* anything read from /smack/cipso ought to be writeable
* to /smack/cipso.
*
* /smack/cipso2 should be used instead.
*/
if (strlen(skp->smk_known) >= SMK_LABELLEN)
return 0;
seq_printf(s, "%s %3d", skp->smk_known, skp->smk_netlabel.attr.mls.lvl);
for (i = netlbl_catmap_walk(cmp, 0); i >= 0;
i = netlbl_catmap_walk(cmp, i + 1)) {
seq_printf(s, "%c%d", sep, i);
sep = ',';
}
seq_putc(s, '\n');
return 0;
}
static const struct seq_operations cipso_seq_ops = {
.start = cipso_seq_start,
.next = cipso_seq_next,
.show = cipso_seq_show,
.stop = smk_seq_stop,
};
/**
* smk_open_cipso - open() for /smack/cipso
* @inode: inode structure representing file
* @file: "cipso" file pointer
*
* Connect our cipso_seq_* operations with /smack/cipso
* file_operations
*/
static int smk_open_cipso(struct inode *inode, struct file *file)
{
return seq_open(file, &cipso_seq_ops);
}
/**
* smk_set_cipso - do the work for write() for cipso and cipso2
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
* @format: /smack/cipso or /smack/cipso2
*
* Accepts only one cipso rule per write call.
* Returns number of bytes written or error code, as appropriate
*/
static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
size_t count, loff_t *ppos, int format)
{
struct smack_known *skp;
struct netlbl_lsm_secattr ncats;
char mapcatset[SMK_CIPSOLEN];
int maplevel;
unsigned int cat;
int catlen;
ssize_t rc = -EINVAL;
char *data = NULL;
char *rule;
int ret;
int i;
/*
* Must have privilege.
* No partial writes.
* Enough data must be present.
*/
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (*ppos != 0)
return -EINVAL;
if (format == SMK_FIXED24_FMT &&
(count < SMK_CIPSOMIN || count > SMK_CIPSOMAX))
return -EINVAL;
data = kzalloc(count + 1, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
if (copy_from_user(data, buf, count) != 0) {
rc = -EFAULT;
goto unlockedout;
}
data[count] = '\0';
rule = data;
/*
* Only allow one writer at a time. Writes should be
* quite rare and small in any case.
*/
mutex_lock(&smack_cipso_lock);
skp = smk_import_entry(rule, 0);
if (IS_ERR(skp)) {
rc = PTR_ERR(skp);
goto out;
}
if (format == SMK_FIXED24_FMT)
rule += SMK_LABELLEN;
else
rule += strlen(skp->smk_known) + 1;
ret = sscanf(rule, "%d", &maplevel);
if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL)
goto out;
rule += SMK_DIGITLEN;
ret = sscanf(rule, "%d", &catlen);
if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM)
goto out;
if (format == SMK_FIXED24_FMT &&
count != (SMK_CIPSOMIN + catlen * SMK_DIGITLEN))
goto out;
memset(mapcatset, 0, sizeof(mapcatset));
for (i = 0; i < catlen; i++) {
rule += SMK_DIGITLEN;
ret = sscanf(rule, "%u", &cat);
if (ret != 1 || cat > SMACK_CIPSO_MAXCATNUM)
goto out;
smack_catset_bit(cat, mapcatset);
}
rc = smk_netlbl_mls(maplevel, mapcatset, &ncats, SMK_CIPSOLEN);
if (rc >= 0) {
netlbl_catmap_free(skp->smk_netlabel.attr.mls.cat);
skp->smk_netlabel.attr.mls.cat = ncats.attr.mls.cat;
skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl;
rc = count;
}
out:
mutex_unlock(&smack_cipso_lock);
unlockedout:
kfree(data);
return rc;
}
/**
* smk_write_cipso - write() for /smack/cipso
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
*
* Accepts only one cipso rule per write call.
* Returns number of bytes written or error code, as appropriate
*/
static ssize_t smk_write_cipso(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
return smk_set_cipso(file, buf, count, ppos, SMK_FIXED24_FMT);
}
static const struct file_operations smk_cipso_ops = {
.open = smk_open_cipso,
.read = seq_read,
.llseek = seq_lseek,
.write = smk_write_cipso,
.release = seq_release,
};
/*
* Seq_file read operations for /smack/cipso2
*/
/*
* Print cipso labels in format:
* label level[/cat[,cat]]
*/
static int cipso2_seq_show(struct seq_file *s, void *v)
{
struct list_head *list = v;
struct smack_known *skp =
list_entry_rcu(list, struct smack_known, list);
struct netlbl_lsm_catmap *cmp = skp->smk_netlabel.attr.mls.cat;
char sep = '/';
int i;
seq_printf(s, "%s %3d", skp->smk_known, skp->smk_netlabel.attr.mls.lvl);
for (i = netlbl_catmap_walk(cmp, 0); i >= 0;
i = netlbl_catmap_walk(cmp, i + 1)) {
seq_printf(s, "%c%d", sep, i);
sep = ',';
}
seq_putc(s, '\n');
return 0;
}
static const struct seq_operations cipso2_seq_ops = {
.start = cipso_seq_start,
.next = cipso_seq_next,
.show = cipso2_seq_show,
.stop = smk_seq_stop,
};
/**
* smk_open_cipso2 - open() for /smack/cipso2
* @inode: inode structure representing file
* @file: "cipso2" file pointer
*
* Connect our cipso_seq_* operations with /smack/cipso2
* file_operations
*/
static int smk_open_cipso2(struct inode *inode, struct file *file)
{
return seq_open(file, &cipso2_seq_ops);
}
/**
* smk_write_cipso2 - write() for /smack/cipso2
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
*
* Accepts only one cipso rule per write call.
* Returns number of bytes written or error code, as appropriate
*/
static ssize_t smk_write_cipso2(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
return smk_set_cipso(file, buf, count, ppos, SMK_LONG_FMT);
}
static const struct file_operations smk_cipso2_ops = {
.open = smk_open_cipso2,
.read = seq_read,
.llseek = seq_lseek,
.write = smk_write_cipso2,
.release = seq_release,
};
/*
* Seq_file read operations for /smack/netlabel
*/
static void *net4addr_seq_start(struct seq_file *s, loff_t *pos)
{
return smk_seq_start(s, pos, &smk_net4addr_list);
}
static void *net4addr_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
return smk_seq_next(s, v, pos, &smk_net4addr_list);
}
/*
* Print host/label pairs
*/
static int net4addr_seq_show(struct seq_file *s, void *v)
{
struct list_head *list = v;
struct smk_net4addr *skp =
list_entry_rcu(list, struct smk_net4addr, list);
char *kp = SMACK_CIPSO_OPTION;
if (skp->smk_label != NULL)
kp = skp->smk_label->smk_known;
seq_printf(s, "%pI4/%d %s\n", &skp->smk_host.s_addr,
skp->smk_masks, kp);
return 0;
}
static const struct seq_operations net4addr_seq_ops = {
.start = net4addr_seq_start,
.next = net4addr_seq_next,
.show = net4addr_seq_show,
.stop = smk_seq_stop,
};
/**
* smk_open_net4addr - open() for /smack/netlabel
* @inode: inode structure representing file
* @file: "netlabel" file pointer
*
* Connect our net4addr_seq_* operations with /smack/netlabel
* file_operations
*/
static int smk_open_net4addr(struct inode *inode, struct file *file)
{
return seq_open(file, &net4addr_seq_ops);
}
/**
* smk_net4addr_insert
* @new : netlabel to insert
*
* This helper insert netlabel in the smack_net4addrs list
* sorted by netmask length (longest to smallest)
* locked by &smk_net4addr_lock in smk_write_net4addr
*
*/
static void smk_net4addr_insert(struct smk_net4addr *new)
{
struct smk_net4addr *m;
struct smk_net4addr *m_next;
if (list_empty(&smk_net4addr_list)) {
list_add_rcu(&new->list, &smk_net4addr_list);
return;
}
m = list_entry_rcu(smk_net4addr_list.next,
struct smk_net4addr, list);
/* the comparison '>' is a bit hacky, but works */
if (new->smk_masks > m->smk_masks) {
list_add_rcu(&new->list, &smk_net4addr_list);
return;
}
list_for_each_entry_rcu(m, &smk_net4addr_list, list) {
if (list_is_last(&m->list, &smk_net4addr_list)) {
list_add_rcu(&new->list, &m->list);
return;
}
m_next = list_entry_rcu(m->list.next,
struct smk_net4addr, list);
if (new->smk_masks > m_next->smk_masks) {
list_add_rcu(&new->list, &m->list);
return;
}
}
}
/**
* smk_write_net4addr - write() for /smack/netlabel
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
*
* Accepts only one net4addr per write call.
* Returns number of bytes written or error code, as appropriate
*/
static ssize_t smk_write_net4addr(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct smk_net4addr *snp;
struct sockaddr_in newname;
char *smack;
struct smack_known *skp = NULL;
char *data;
char *host = (char *)&newname.sin_addr.s_addr;
int rc;
struct netlbl_audit audit_info;
struct in_addr mask;
unsigned int m;
unsigned int masks;
int found;
u32 mask_bits = (1<<31);
__be32 nsa;
u32 temp_mask;
/*
* Must have privilege.
* No partial writes.
* Enough data must be present.
* "<addr/mask, as a.b.c.d/e><space><label>"
* "<addr, as a.b.c.d><space><label>"
*/
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (*ppos != 0)
return -EINVAL;
if (count < SMK_NETLBLADDRMIN)
return -EINVAL;
data = kzalloc(count + 1, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
if (copy_from_user(data, buf, count) != 0) {
rc = -EFAULT;
goto free_data_out;
}
smack = kzalloc(count + 1, GFP_KERNEL);
if (smack == NULL) {
rc = -ENOMEM;
goto free_data_out;
}
data[count] = '\0';
rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd/%u %s",
&host[0], &host[1], &host[2], &host[3], &masks, smack);
if (rc != 6) {
rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd %s",
&host[0], &host[1], &host[2], &host[3], smack);
if (rc != 5) {
rc = -EINVAL;
goto free_out;
}
m = BEBITS;
masks = 32;
}
if (masks > BEBITS) {
rc = -EINVAL;
goto free_out;
}
/*
* If smack begins with '-', it is an option, don't import it
*/
if (smack[0] != '-') {
skp = smk_import_entry(smack, 0);
if (IS_ERR(skp)) {
rc = PTR_ERR(skp);
goto free_out;
}
} else {
/*
* Only the -CIPSO option is supported for IPv4
*/
if (strcmp(smack, SMACK_CIPSO_OPTION) != 0) {
rc = -EINVAL;
goto free_out;
}
}
for (m = masks, temp_mask = 0; m > 0; m--) {
temp_mask |= mask_bits;
mask_bits >>= 1;
}
mask.s_addr = cpu_to_be32(temp_mask);
newname.sin_addr.s_addr &= mask.s_addr;
/*
* Only allow one writer at a time. Writes should be
* quite rare and small in any case.
*/
mutex_lock(&smk_net4addr_lock);
nsa = newname.sin_addr.s_addr;
/* try to find if the prefix is already in the list */
found = 0;
list_for_each_entry_rcu(snp, &smk_net4addr_list, list) {
if (snp->smk_host.s_addr == nsa && snp->smk_masks == masks) {
found = 1;
break;
}
}
smk_netlabel_audit_set(&audit_info);
if (found == 0) {
snp = kzalloc(sizeof(*snp), GFP_KERNEL);
if (snp == NULL)
rc = -ENOMEM;
else {
rc = 0;
snp->smk_host.s_addr = newname.sin_addr.s_addr;
snp->smk_mask.s_addr = mask.s_addr;
snp->smk_label = skp;
snp->smk_masks = masks;
smk_net4addr_insert(snp);
}
} else {
/*
* Delete the unlabeled entry, only if the previous label
* wasn't the special CIPSO option
*/
if (snp->smk_label != NULL)
rc = netlbl_cfg_unlbl_static_del(&init_net, NULL,
&snp->smk_host, &snp->smk_mask,
PF_INET, &audit_info);
else
rc = 0;
snp->smk_label = skp;
}
/*
* Now tell netlabel about the single label nature of
* this host so that incoming packets get labeled.
* but only if we didn't get the special CIPSO option
*/
if (rc == 0 && skp != NULL)
rc = netlbl_cfg_unlbl_static_add(&init_net, NULL,
&snp->smk_host, &snp->smk_mask, PF_INET,
snp->smk_label->smk_secid, &audit_info);
if (rc == 0)
rc = count;
mutex_unlock(&smk_net4addr_lock);
free_out:
kfree(smack);
free_data_out:
kfree(data);
return rc;
}
static const struct file_operations smk_net4addr_ops = {
.open = smk_open_net4addr,
.read = seq_read,
.llseek = seq_lseek,
.write = smk_write_net4addr,
.release = seq_release,
};
#if IS_ENABLED(CONFIG_IPV6)
/*
* Seq_file read operations for /smack/netlabel6
*/
static void *net6addr_seq_start(struct seq_file *s, loff_t *pos)
{
return smk_seq_start(s, pos, &smk_net6addr_list);
}
static void *net6addr_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
return smk_seq_next(s, v, pos, &smk_net6addr_list);
}
/*
* Print host/label pairs
*/
static int net6addr_seq_show(struct seq_file *s, void *v)
{
struct list_head *list = v;
struct smk_net6addr *skp =
list_entry(list, struct smk_net6addr, list);
if (skp->smk_label != NULL)
seq_printf(s, "%pI6/%d %s\n", &skp->smk_host, skp->smk_masks,
skp->smk_label->smk_known);
return 0;
}
static const struct seq_operations net6addr_seq_ops = {
.start = net6addr_seq_start,
.next = net6addr_seq_next,
.show = net6addr_seq_show,
.stop = smk_seq_stop,
};
/**
* smk_open_net6addr - open() for /smack/netlabel
* @inode: inode structure representing file
* @file: "netlabel" file pointer
*
* Connect our net6addr_seq_* operations with /smack/netlabel
* file_operations
*/
static int smk_open_net6addr(struct inode *inode, struct file *file)
{
return seq_open(file, &net6addr_seq_ops);
}
/**
* smk_net6addr_insert
* @new : entry to insert
*
* This inserts an entry in the smack_net6addrs list
* sorted by netmask length (longest to smallest)
* locked by &smk_net6addr_lock in smk_write_net6addr
*
*/
static void smk_net6addr_insert(struct smk_net6addr *new)
{
struct smk_net6addr *m_next;
struct smk_net6addr *m;
if (list_empty(&smk_net6addr_list)) {
list_add_rcu(&new->list, &smk_net6addr_list);
return;
}
m = list_entry_rcu(smk_net6addr_list.next,
struct smk_net6addr, list);
if (new->smk_masks > m->smk_masks) {
list_add_rcu(&new->list, &smk_net6addr_list);
return;
}
list_for_each_entry_rcu(m, &smk_net6addr_list, list) {
if (list_is_last(&m->list, &smk_net6addr_list)) {
list_add_rcu(&new->list, &m->list);
return;
}
m_next = list_entry_rcu(m->list.next,
struct smk_net6addr, list);
if (new->smk_masks > m_next->smk_masks) {
list_add_rcu(&new->list, &m->list);
return;
}
}
}
/**
* smk_write_net6addr - write() for /smack/netlabel
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
*
* Accepts only one net6addr per write call.
* Returns number of bytes written or error code, as appropriate
*/
static ssize_t smk_write_net6addr(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct smk_net6addr *snp;
struct in6_addr newname;
struct in6_addr fullmask;
struct smack_known *skp = NULL;
char *smack;
char *data;
int rc = 0;
int found = 0;
int i;
unsigned int scanned[8];
unsigned int m;
unsigned int mask = 128;
/*
* Must have privilege.
* No partial writes.
* Enough data must be present.
* "<addr/mask, as a:b:c:d:e:f:g:h/e><space><label>"
* "<addr, as a:b:c:d:e:f:g:h><space><label>"
*/
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (*ppos != 0)
return -EINVAL;
if (count < SMK_NETLBLADDRMIN)
return -EINVAL;
data = kzalloc(count + 1, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
if (copy_from_user(data, buf, count) != 0) {
rc = -EFAULT;
goto free_data_out;
}
smack = kzalloc(count + 1, GFP_KERNEL);
if (smack == NULL) {
rc = -ENOMEM;
goto free_data_out;
}
data[count] = '\0';
i = sscanf(data, "%x:%x:%x:%x:%x:%x:%x:%x/%u %s",
&scanned[0], &scanned[1], &scanned[2], &scanned[3],
&scanned[4], &scanned[5], &scanned[6], &scanned[7],
&mask, smack);
if (i != 10) {
i = sscanf(data, "%x:%x:%x:%x:%x:%x:%x:%x %s",
&scanned[0], &scanned[1], &scanned[2],
&scanned[3], &scanned[4], &scanned[5],
&scanned[6], &scanned[7], smack);
if (i != 9) {
rc = -EINVAL;
goto free_out;
}
}
if (mask > 128) {
rc = -EINVAL;
goto free_out;
}
for (i = 0; i < 8; i++) {
if (scanned[i] > 0xffff) {
rc = -EINVAL;
goto free_out;
}
newname.s6_addr16[i] = htons(scanned[i]);
}
/*
* If smack begins with '-', it is an option, don't import it
*/
if (smack[0] != '-') {
skp = smk_import_entry(smack, 0);
if (skp == NULL) {
rc = -EINVAL;
goto free_out;
}
} else {
/*
* Only -DELETE is supported for IPv6
*/
if (strcmp(smack, SMACK_DELETE_OPTION) != 0) {
rc = -EINVAL;
goto free_out;
}
}
for (i = 0, m = mask; i < 8; i++) {
if (m >= 16) {
fullmask.s6_addr16[i] = 0xffff;
m -= 16;
} else if (m > 0) {
fullmask.s6_addr16[i] = (1 << m) - 1;
m = 0;
} else
fullmask.s6_addr16[i] = 0;
newname.s6_addr16[i] &= fullmask.s6_addr16[i];
}
/*
* Only allow one writer at a time. Writes should be
* quite rare and small in any case.
*/
mutex_lock(&smk_net6addr_lock);
/*
* Try to find the prefix in the list
*/
list_for_each_entry_rcu(snp, &smk_net6addr_list, list) {
if (mask != snp->smk_masks)
continue;
for (found = 1, i = 0; i < 8; i++) {
if (newname.s6_addr16[i] !=
snp->smk_host.s6_addr16[i]) {
found = 0;
break;
}
}
if (found == 1)
break;
}
if (found == 0) {
snp = kzalloc(sizeof(*snp), GFP_KERNEL);
if (snp == NULL)
rc = -ENOMEM;
else {
snp->smk_host = newname;
snp->smk_mask = fullmask;
snp->smk_masks = mask;
snp->smk_label = skp;
smk_net6addr_insert(snp);
}
} else {
snp->smk_label = skp;
}
if (rc == 0)
rc = count;
mutex_unlock(&smk_net6addr_lock);
free_out:
kfree(smack);
free_data_out:
kfree(data);
return rc;
}
static const struct file_operations smk_net6addr_ops = {
.open = smk_open_net6addr,
.read = seq_read,
.llseek = seq_lseek,
.write = smk_write_net6addr,
.release = seq_release,
};
#endif /* CONFIG_IPV6 */
/**
* smk_read_doi - read() for /smack/doi
* @filp: file pointer, not actually used
* @buf: where to put the result
* @count: maximum to send along
* @ppos: where to start
*
* Returns number of bytes read or error code, as appropriate
*/
static ssize_t smk_read_doi(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
char temp[80];
ssize_t rc;
if (*ppos != 0)
return 0;
sprintf(temp, "%d", smk_cipso_doi_value);
rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
return rc;
}
/**
* smk_write_doi - write() for /smack/doi
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
*
* Returns number of bytes written or error code, as appropriate
*/
static ssize_t smk_write_doi(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
char temp[80];
int i;
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (count >= sizeof(temp) || count == 0)
return -EINVAL;
if (copy_from_user(temp, buf, count) != 0)
return -EFAULT;
temp[count] = '\0';
if (sscanf(temp, "%d", &i) != 1)
return -EINVAL;
smk_cipso_doi_value = i;
smk_cipso_doi();
return count;
}
static const struct file_operations smk_doi_ops = {
.read = smk_read_doi,
.write = smk_write_doi,
.llseek = default_llseek,
};
/**
* smk_read_direct - read() for /smack/direct
* @filp: file pointer, not actually used
* @buf: where to put the result
* @count: maximum to send along
* @ppos: where to start
*
* Returns number of bytes read or error code, as appropriate
*/
static ssize_t smk_read_direct(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
char temp[80];
ssize_t rc;
if (*ppos != 0)
return 0;
sprintf(temp, "%d", smack_cipso_direct);
rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
return rc;
}
/**
* smk_write_direct - write() for /smack/direct
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
*
* Returns number of bytes written or error code, as appropriate
*/
static ssize_t smk_write_direct(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct smack_known *skp;
char temp[80];
int i;
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (count >= sizeof(temp) || count == 0)
return -EINVAL;
if (copy_from_user(temp, buf, count) != 0)
return -EFAULT;
temp[count] = '\0';
if (sscanf(temp, "%d", &i) != 1)
return -EINVAL;
/*
* Don't do anything if the value hasn't actually changed.
* If it is changing reset the level on entries that were
* set up to be direct when they were created.
*/
if (smack_cipso_direct != i) {
mutex_lock(&smack_known_lock);
list_for_each_entry_rcu(skp, &smack_known_list, list)
if (skp->smk_netlabel.attr.mls.lvl ==
smack_cipso_direct)
skp->smk_netlabel.attr.mls.lvl = i;
smack_cipso_direct = i;
mutex_unlock(&smack_known_lock);
}
return count;
}
static const struct file_operations smk_direct_ops = {
.read = smk_read_direct,
.write = smk_write_direct,
.llseek = default_llseek,
};
/**
* smk_read_mapped - read() for /smack/mapped
* @filp: file pointer, not actually used
* @buf: where to put the result
* @count: maximum to send along
* @ppos: where to start
*
* Returns number of bytes read or error code, as appropriate
*/
static ssize_t smk_read_mapped(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
char temp[80];
ssize_t rc;
if (*ppos != 0)
return 0;
sprintf(temp, "%d", smack_cipso_mapped);
rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
return rc;
}
/**
* smk_write_mapped - write() for /smack/mapped
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
*
* Returns number of bytes written or error code, as appropriate
*/
static ssize_t smk_write_mapped(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct smack_known *skp;
char temp[80];
int i;
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (count >= sizeof(temp) || count == 0)
return -EINVAL;
if (copy_from_user(temp, buf, count) != 0)
return -EFAULT;
temp[count] = '\0';
if (sscanf(temp, "%d", &i) != 1)
return -EINVAL;
/*
* Don't do anything if the value hasn't actually changed.
* If it is changing reset the level on entries that were
* set up to be mapped when they were created.
*/
if (smack_cipso_mapped != i) {
mutex_lock(&smack_known_lock);
list_for_each_entry_rcu(skp, &smack_known_list, list)
if (skp->smk_netlabel.attr.mls.lvl ==
smack_cipso_mapped)
skp->smk_netlabel.attr.mls.lvl = i;
smack_cipso_mapped = i;
mutex_unlock(&smack_known_lock);
}
return count;
}
static const struct file_operations smk_mapped_ops = {
.read = smk_read_mapped,
.write = smk_write_mapped,
.llseek = default_llseek,
};
/**
* smk_read_ambient - read() for /smack/ambient
* @filp: file pointer, not actually used
* @buf: where to put the result
* @cn: maximum to send along
* @ppos: where to start
*
* Returns number of bytes read or error code, as appropriate
*/
static ssize_t smk_read_ambient(struct file *filp, char __user *buf,
size_t cn, loff_t *ppos)
{
ssize_t rc;
int asize;
if (*ppos != 0)
return 0;
/*
* Being careful to avoid a problem in the case where
* smack_net_ambient gets changed in midstream.
*/
mutex_lock(&smack_ambient_lock);
asize = strlen(smack_net_ambient->smk_known) + 1;
if (cn >= asize)
rc = simple_read_from_buffer(buf, cn, ppos,
smack_net_ambient->smk_known,
asize);
else
rc = -EINVAL;
mutex_unlock(&smack_ambient_lock);
return rc;
}
/**
* smk_write_ambient - write() for /smack/ambient
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
*
* Returns number of bytes written or error code, as appropriate
*/
static ssize_t smk_write_ambient(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct smack_known *skp;
char *oldambient;
char *data;
int rc = count;
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
data = kzalloc(count + 1, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
if (copy_from_user(data, buf, count) != 0) {
rc = -EFAULT;
goto out;
}
skp = smk_import_entry(data, count);
if (IS_ERR(skp)) {
rc = PTR_ERR(skp);
goto out;
}
mutex_lock(&smack_ambient_lock);
oldambient = smack_net_ambient->smk_known;
smack_net_ambient = skp;
smk_unlbl_ambient(oldambient);
mutex_unlock(&smack_ambient_lock);
out:
kfree(data);
return rc;
}
static const struct file_operations smk_ambient_ops = {
.read = smk_read_ambient,
.write = smk_write_ambient,
.llseek = default_llseek,
};
/*
* Seq_file operations for /smack/onlycap
*/
static void *onlycap_seq_start(struct seq_file *s, loff_t *pos)
{
return smk_seq_start(s, pos, &smack_onlycap_list);
}
static void *onlycap_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
return smk_seq_next(s, v, pos, &smack_onlycap_list);
}
static int onlycap_seq_show(struct seq_file *s, void *v)
{
struct list_head *list = v;
struct smack_onlycap *sop =
list_entry_rcu(list, struct smack_onlycap, list);
seq_puts(s, sop->smk_label->smk_known);
seq_putc(s, ' ');
return 0;
}
static const struct seq_operations onlycap_seq_ops = {
.start = onlycap_seq_start,
.next = onlycap_seq_next,
.show = onlycap_seq_show,
.stop = smk_seq_stop,
};
static int smk_open_onlycap(struct inode *inode, struct file *file)
{
return seq_open(file, &onlycap_seq_ops);
}
/**
* smk_list_swap_rcu - swap public list with a private one in RCU-safe way
* The caller must hold appropriate mutex to prevent concurrent modifications
* to the public list.
* Private list is assumed to be not accessible to other threads yet.
*
* @public: public list
* @private: private list
*/
static void smk_list_swap_rcu(struct list_head *public,
struct list_head *private)
{
struct list_head *first, *last;
if (list_empty(public)) {
list_splice_init_rcu(private, public, synchronize_rcu);
} else {
/* Remember public list before replacing it */
first = public->next;
last = public->prev;
/* Publish private list in place of public in RCU-safe way */
private->prev->next = public;
private->next->prev = public;
rcu_assign_pointer(public->next, private->next);
public->prev = private->prev;
synchronize_rcu();
/* When all readers are done with the old public list,
* attach it in place of private */
private->next = first;
private->prev = last;
first->prev = private;
last->next = private;
}
}
/**
* smk_write_onlycap - write() for smackfs/onlycap
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
*
* Returns number of bytes written or error code, as appropriate
*/
static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
char *data;
char *data_parse;
char *tok;
struct smack_known *skp;
struct smack_onlycap *sop;
struct smack_onlycap *sop2;
LIST_HEAD(list_tmp);
int rc = count;
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
data = kzalloc(count + 1, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
if (copy_from_user(data, buf, count) != 0) {
kfree(data);
return -EFAULT;
}
data_parse = data;
while ((tok = strsep(&data_parse, " ")) != NULL) {
if (!*tok)
continue;
skp = smk_import_entry(tok, 0);
if (IS_ERR(skp)) {
rc = PTR_ERR(skp);
break;
}
sop = kzalloc(sizeof(*sop), GFP_KERNEL);
if (sop == NULL) {
rc = -ENOMEM;
break;
}
sop->smk_label = skp;
list_add_rcu(&sop->list, &list_tmp);
}
kfree(data);
/*
* Clear the smack_onlycap on invalid label errors. This means
* that we can pass a null string to unset the onlycap value.
*
* Importing will also reject a label beginning with '-',
* so "-usecapabilities" will also work.
*
* But do so only on invalid label, not on system errors.
* The invalid label must be first to count as clearing attempt.
*/
if (rc == -EINVAL && list_empty(&list_tmp))
rc = count;
if (rc >= 0) {
mutex_lock(&smack_onlycap_lock);
smk_list_swap_rcu(&smack_onlycap_list, &list_tmp);
mutex_unlock(&smack_onlycap_lock);
}
list_for_each_entry_safe(sop, sop2, &list_tmp, list)
kfree(sop);
return rc;
}
static const struct file_operations smk_onlycap_ops = {
.open = smk_open_onlycap,
.read = seq_read,
.write = smk_write_onlycap,
.llseek = seq_lseek,
.release = seq_release,
};
#ifdef CONFIG_SECURITY_SMACK_BRINGUP
/**
* smk_read_unconfined - read() for smackfs/unconfined
* @filp: file pointer, not actually used
* @buf: where to put the result
* @cn: maximum to send along
* @ppos: where to start
*
* Returns number of bytes read or error code, as appropriate
*/
static ssize_t smk_read_unconfined(struct file *filp, char __user *buf,
size_t cn, loff_t *ppos)
{
char *smack = "";
ssize_t rc = -EINVAL;
int asize;
if (*ppos != 0)
return 0;
if (smack_unconfined != NULL)
smack = smack_unconfined->smk_known;
asize = strlen(smack) + 1;
if (cn >= asize)
rc = simple_read_from_buffer(buf, cn, ppos, smack, asize);
return rc;
}
/**
* smk_write_unconfined - write() for smackfs/unconfined
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
*
* Returns number of bytes written or error code, as appropriate
*/
static ssize_t smk_write_unconfined(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
char *data;
struct smack_known *skp;
int rc = count;
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
data = kzalloc(count + 1, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
if (copy_from_user(data, buf, count) != 0) {
rc = -EFAULT;
goto freeout;
}
/*
* Clear the smack_unconfined on invalid label errors. This means
* that we can pass a null string to unset the unconfined value.
*
* Importing will also reject a label beginning with '-',
* so "-confine" will also work.
*
* But do so only on invalid label, not on system errors.
*/
skp = smk_import_entry(data, count);
if (PTR_ERR(skp) == -EINVAL)
skp = NULL;
else if (IS_ERR(skp)) {
rc = PTR_ERR(skp);
goto freeout;
}
smack_unconfined = skp;
freeout:
kfree(data);
return rc;
}
static const struct file_operations smk_unconfined_ops = {
.read = smk_read_unconfined,
.write = smk_write_unconfined,
.llseek = default_llseek,
};
#endif /* CONFIG_SECURITY_SMACK_BRINGUP */
/**
* smk_read_logging - read() for /smack/logging
* @filp: file pointer, not actually used
* @buf: where to put the result
* @cn: maximum to send along
* @ppos: where to start
*
* Returns number of bytes read or error code, as appropriate
*/
static ssize_t smk_read_logging(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
char temp[32];
ssize_t rc;
if (*ppos != 0)
return 0;
sprintf(temp, "%d\n", log_policy);
rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
return rc;
}
/**
* smk_write_logging - write() for /smack/logging
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
*
* Returns number of bytes written or error code, as appropriate
*/
static ssize_t smk_write_logging(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
char temp[32];
int i;
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (count >= sizeof(temp) || count == 0)
return -EINVAL;
if (copy_from_user(temp, buf, count) != 0)
return -EFAULT;
temp[count] = '\0';
if (sscanf(temp, "%d", &i) != 1)
return -EINVAL;
if (i < 0 || i > 3)
return -EINVAL;
log_policy = i;
return count;
}
static const struct file_operations smk_logging_ops = {
.read = smk_read_logging,
.write = smk_write_logging,
.llseek = default_llseek,
};
/*
* Seq_file read operations for /smack/load-self
*/
static void *load_self_seq_start(struct seq_file *s, loff_t *pos)
{
struct task_smack *tsp = current_security();
return smk_seq_start(s, pos, &tsp->smk_rules);
}
static void *load_self_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct task_smack *tsp = current_security();
return smk_seq_next(s, v, pos, &tsp->smk_rules);
}
static int load_self_seq_show(struct seq_file *s, void *v)
{
struct list_head *list = v;
struct smack_rule *srp =
list_entry_rcu(list, struct smack_rule, list);
smk_rule_show(s, srp, SMK_LABELLEN);
return 0;
}
static const struct seq_operations load_self_seq_ops = {
.start = load_self_seq_start,
.next = load_self_seq_next,
.show = load_self_seq_show,
.stop = smk_seq_stop,
};
/**
* smk_open_load_self - open() for /smack/load-self2
* @inode: inode structure representing file
* @file: "load" file pointer
*
* For reading, use load_seq_* seq_file reading operations.
*/
static int smk_open_load_self(struct inode *inode, struct file *file)
{
return seq_open(file, &load_self_seq_ops);
}
/**
* smk_write_load_self - write() for /smack/load-self
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start - must be 0
*
*/
static ssize_t smk_write_load_self(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct task_smack *tsp = current_security();
return smk_write_rules_list(file, buf, count, ppos, &tsp->smk_rules,
&tsp->smk_rules_lock, SMK_FIXED24_FMT);
}
static const struct file_operations smk_load_self_ops = {
.open = smk_open_load_self,
.read = seq_read,
.llseek = seq_lseek,
.write = smk_write_load_self,
.release = seq_release,
};
/**
* smk_user_access - handle access check transaction
* @file: file pointer
* @buf: data from user space
* @count: bytes sent
* @ppos: where to start - must be 0
*/
static ssize_t smk_user_access(struct file *file, const char __user *buf,
size_t count, loff_t *ppos, int format)
{
struct smack_parsed_rule rule;
char *data;
int res;
data = simple_transaction_get(file, buf, count);
if (IS_ERR(data))
return PTR_ERR(data);
if (format == SMK_FIXED24_FMT) {
if (count < SMK_LOADLEN)
return -EINVAL;
res = smk_parse_rule(data, &rule, 0);
} else {
/*
* simple_transaction_get() returns null-terminated data
*/
res = smk_parse_long_rule(data, &rule, 0, 3);
}
if (res >= 0)
res = smk_access(rule.smk_subject, rule.smk_object,
rule.smk_access1, NULL);
else if (res != -ENOENT)
return res;
/*
* smk_access() can return a value > 0 in the "bringup" case.
*/
data[0] = res >= 0 ? '1' : '0';
data[1] = '\0';
simple_transaction_set(file, 2);
if (format == SMK_FIXED24_FMT)
return SMK_LOADLEN;
return count;
}
/**
* smk_write_access - handle access check transaction
* @file: file pointer
* @buf: data from user space
* @count: bytes sent
* @ppos: where to start - must be 0
*/
static ssize_t smk_write_access(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
return smk_user_access(file, buf, count, ppos, SMK_FIXED24_FMT);
}
static const struct file_operations smk_access_ops = {
.write = smk_write_access,
.read = simple_transaction_read,
.release = simple_transaction_release,
.llseek = generic_file_llseek,
};
/*
* Seq_file read operations for /smack/load2
*/
static int load2_seq_show(struct seq_file *s, void *v)
{
struct list_head *list = v;
struct smack_master_list *smlp =
list_entry_rcu(list, struct smack_master_list, list);
smk_rule_show(s, smlp->smk_rule, SMK_LONGLABEL);
return 0;
}
static const struct seq_operations load2_seq_ops = {
.start = load2_seq_start,
.next = load2_seq_next,
.show = load2_seq_show,
.stop = smk_seq_stop,
};
/**
* smk_open_load2 - open() for /smack/load2
* @inode: inode structure representing file
* @file: "load2" file pointer
*
* For reading, use load2_seq_* seq_file reading operations.
*/
static int smk_open_load2(struct inode *inode, struct file *file)
{
return seq_open(file, &load2_seq_ops);
}
/**
* smk_write_load2 - write() for /smack/load2
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start - must be 0
*
*/
static ssize_t smk_write_load2(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
/*
* Must have privilege.
*/
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
SMK_LONG_FMT);
}
static const struct file_operations smk_load2_ops = {
.open = smk_open_load2,
.read = seq_read,
.llseek = seq_lseek,
.write = smk_write_load2,
.release = seq_release,
};
/*
* Seq_file read operations for /smack/load-self2
*/
static void *load_self2_seq_start(struct seq_file *s, loff_t *pos)
{
struct task_smack *tsp = current_security();
return smk_seq_start(s, pos, &tsp->smk_rules);
}
static void *load_self2_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct task_smack *tsp = current_security();
return smk_seq_next(s, v, pos, &tsp->smk_rules);
}
static int load_self2_seq_show(struct seq_file *s, void *v)
{
struct list_head *list = v;
struct smack_rule *srp =
list_entry_rcu(list, struct smack_rule, list);
smk_rule_show(s, srp, SMK_LONGLABEL);
return 0;
}
static const struct seq_operations load_self2_seq_ops = {
.start = load_self2_seq_start,
.next = load_self2_seq_next,
.show = load_self2_seq_show,
.stop = smk_seq_stop,
};
/**
* smk_open_load_self2 - open() for /smack/load-self2
* @inode: inode structure representing file
* @file: "load" file pointer
*
* For reading, use load_seq_* seq_file reading operations.
*/
static int smk_open_load_self2(struct inode *inode, struct file *file)
{
return seq_open(file, &load_self2_seq_ops);
}
/**
* smk_write_load_self2 - write() for /smack/load-self2
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start - must be 0
*
*/
static ssize_t smk_write_load_self2(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct task_smack *tsp = current_security();
return smk_write_rules_list(file, buf, count, ppos, &tsp->smk_rules,
&tsp->smk_rules_lock, SMK_LONG_FMT);
}
static const struct file_operations smk_load_self2_ops = {
.open = smk_open_load_self2,
.read = seq_read,
.llseek = seq_lseek,
.write = smk_write_load_self2,
.release = seq_release,
};
/**
* smk_write_access2 - handle access check transaction
* @file: file pointer
* @buf: data from user space
* @count: bytes sent
* @ppos: where to start - must be 0
*/
static ssize_t smk_write_access2(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
return smk_user_access(file, buf, count, ppos, SMK_LONG_FMT);
}
static const struct file_operations smk_access2_ops = {
.write = smk_write_access2,
.read = simple_transaction_read,
.release = simple_transaction_release,
.llseek = generic_file_llseek,
};
/**
* smk_write_revoke_subj - write() for /smack/revoke-subject
* @file: file pointer
* @buf: data from user space
* @count: bytes sent
* @ppos: where to start - must be 0
*/
static ssize_t smk_write_revoke_subj(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
char *data;
const char *cp;
struct smack_known *skp;
struct smack_rule *sp;
struct list_head *rule_list;
struct mutex *rule_lock;
int rc = count;
if (*ppos != 0)
return -EINVAL;
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (count == 0 || count > SMK_LONGLABEL)
return -EINVAL;
data = kzalloc(count, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
if (copy_from_user(data, buf, count) != 0) {
rc = -EFAULT;
goto out_data;
}
cp = smk_parse_smack(data, count);
if (IS_ERR(cp)) {
rc = PTR_ERR(cp);
goto out_data;
}
skp = smk_find_entry(cp);
if (skp == NULL)
goto out_cp;
rule_list = &skp->smk_rules;
rule_lock = &skp->smk_rules_lock;
mutex_lock(rule_lock);
list_for_each_entry_rcu(sp, rule_list, list)
sp->smk_access = 0;
mutex_unlock(rule_lock);
out_cp:
kfree(cp);
out_data:
kfree(data);
return rc;
}
static const struct file_operations smk_revoke_subj_ops = {
.write = smk_write_revoke_subj,
.read = simple_transaction_read,
.release = simple_transaction_release,
.llseek = generic_file_llseek,
};
/**
* smk_init_sysfs - initialize /sys/fs/smackfs
*
*/
static int smk_init_sysfs(void)
{
return sysfs_create_mount_point(fs_kobj, "smackfs");
}
/**
* smk_write_change_rule - write() for /smack/change-rule
* @file: file pointer
* @buf: data from user space
* @count: bytes sent
* @ppos: where to start - must be 0
*/
static ssize_t smk_write_change_rule(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
/*
* Must have privilege.
*/
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
SMK_CHANGE_FMT);
}
static const struct file_operations smk_change_rule_ops = {
.write = smk_write_change_rule,
.read = simple_transaction_read,
.release = simple_transaction_release,
.llseek = generic_file_llseek,
};
/**
* smk_read_syslog - read() for smackfs/syslog
* @filp: file pointer, not actually used
* @buf: where to put the result
* @cn: maximum to send along
* @ppos: where to start
*
* Returns number of bytes read or error code, as appropriate
*/
static ssize_t smk_read_syslog(struct file *filp, char __user *buf,
size_t cn, loff_t *ppos)
{
struct smack_known *skp;
ssize_t rc = -EINVAL;
int asize;
if (*ppos != 0)
return 0;
if (smack_syslog_label == NULL)
skp = &smack_known_star;
else
skp = smack_syslog_label;
asize = strlen(skp->smk_known) + 1;
if (cn >= asize)
rc = simple_read_from_buffer(buf, cn, ppos, skp->smk_known,
asize);
return rc;
}
/**
* smk_write_syslog - write() for smackfs/syslog
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
*
* Returns number of bytes written or error code, as appropriate
*/
static ssize_t smk_write_syslog(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
char *data;
struct smack_known *skp;
int rc = count;
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
data = kzalloc(count + 1, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
if (copy_from_user(data, buf, count) != 0)
rc = -EFAULT;
else {
skp = smk_import_entry(data, count);
if (IS_ERR(skp))
rc = PTR_ERR(skp);
else
smack_syslog_label = skp;
}
kfree(data);
return rc;
}
static const struct file_operations smk_syslog_ops = {
.read = smk_read_syslog,
.write = smk_write_syslog,
.llseek = default_llseek,
};
/**
* smk_read_ptrace - read() for /smack/ptrace
* @filp: file pointer, not actually used
* @buf: where to put the result
* @count: maximum to send along
* @ppos: where to start
*
* Returns number of bytes read or error code, as appropriate
*/
static ssize_t smk_read_ptrace(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
char temp[32];
ssize_t rc;
if (*ppos != 0)
return 0;
sprintf(temp, "%d\n", smack_ptrace_rule);
rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
return rc;
}
/**
* smk_write_ptrace - write() for /smack/ptrace
* @file: file pointer
* @buf: data from user space
* @count: bytes sent
* @ppos: where to start - must be 0
*/
static ssize_t smk_write_ptrace(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
char temp[32];
int i;
if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (*ppos != 0 || count >= sizeof(temp) || count == 0)
return -EINVAL;
if (copy_from_user(temp, buf, count) != 0)
return -EFAULT;
temp[count] = '\0';
if (sscanf(temp, "%d", &i) != 1)
return -EINVAL;
if (i < SMACK_PTRACE_DEFAULT || i > SMACK_PTRACE_MAX)
return -EINVAL;
smack_ptrace_rule = i;
return count;
}
static const struct file_operations smk_ptrace_ops = {
.write = smk_write_ptrace,
.read = smk_read_ptrace,
.llseek = default_llseek,
};
/**
* smk_fill_super - fill the smackfs superblock
* @sb: the empty superblock
* @data: unused
* @silent: unused
*
* Fill in the well known entries for the smack filesystem
*
* Returns 0 on success, an error code on failure
*/
static int smk_fill_super(struct super_block *sb, void *data, int silent)
{
int rc;
struct inode *root_inode;
static struct tree_descr smack_files[] = {
[SMK_LOAD] = {
"load", &smk_load_ops, S_IRUGO|S_IWUSR},
[SMK_CIPSO] = {
"cipso", &smk_cipso_ops, S_IRUGO|S_IWUSR},
[SMK_DOI] = {
"doi", &smk_doi_ops, S_IRUGO|S_IWUSR},
[SMK_DIRECT] = {
"direct", &smk_direct_ops, S_IRUGO|S_IWUSR},
[SMK_AMBIENT] = {
"ambient", &smk_ambient_ops, S_IRUGO|S_IWUSR},
[SMK_NET4ADDR] = {
"netlabel", &smk_net4addr_ops, S_IRUGO|S_IWUSR},
[SMK_ONLYCAP] = {
"onlycap", &smk_onlycap_ops, S_IRUGO|S_IWUSR},
[SMK_LOGGING] = {
"logging", &smk_logging_ops, S_IRUGO|S_IWUSR},
[SMK_LOAD_SELF] = {
"load-self", &smk_load_self_ops, S_IRUGO|S_IWUGO},
[SMK_ACCESSES] = {
"access", &smk_access_ops, S_IRUGO|S_IWUGO},
[SMK_MAPPED] = {
"mapped", &smk_mapped_ops, S_IRUGO|S_IWUSR},
[SMK_LOAD2] = {
"load2", &smk_load2_ops, S_IRUGO|S_IWUSR},
[SMK_LOAD_SELF2] = {
"load-self2", &smk_load_self2_ops, S_IRUGO|S_IWUGO},
[SMK_ACCESS2] = {
"access2", &smk_access2_ops, S_IRUGO|S_IWUGO},
[SMK_CIPSO2] = {
"cipso2", &smk_cipso2_ops, S_IRUGO|S_IWUSR},
[SMK_REVOKE_SUBJ] = {
"revoke-subject", &smk_revoke_subj_ops,
S_IRUGO|S_IWUSR},
[SMK_CHANGE_RULE] = {
"change-rule", &smk_change_rule_ops, S_IRUGO|S_IWUSR},
[SMK_SYSLOG] = {
"syslog", &smk_syslog_ops, S_IRUGO|S_IWUSR},
[SMK_PTRACE] = {
"ptrace", &smk_ptrace_ops, S_IRUGO|S_IWUSR},
#ifdef CONFIG_SECURITY_SMACK_BRINGUP
[SMK_UNCONFINED] = {
"unconfined", &smk_unconfined_ops, S_IRUGO|S_IWUSR},
#endif
#if IS_ENABLED(CONFIG_IPV6)
[SMK_NET6ADDR] = {
"ipv6host", &smk_net6addr_ops, S_IRUGO|S_IWUSR},
#endif /* CONFIG_IPV6 */
/* last one */
{""}
};
rc = simple_fill_super(sb, SMACK_MAGIC, smack_files);
if (rc != 0) {
printk(KERN_ERR "%s failed %d while creating inodes\n",
__func__, rc);
return rc;
}
root_inode = d_inode(sb->s_root);
return 0;
}
/**
* smk_mount - get the smackfs superblock
* @fs_type: passed along without comment
* @flags: passed along without comment
* @dev_name: passed along without comment
* @data: passed along without comment
*
* Just passes everything along.
*
* Returns what the lower level code does.
*/
static struct dentry *smk_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_single(fs_type, flags, data, smk_fill_super);
}
static struct file_system_type smk_fs_type = {
.name = "smackfs",
.mount = smk_mount,
.kill_sb = kill_litter_super,
};
static struct vfsmount *smackfs_mount;
static int __init smk_preset_netlabel(struct smack_known *skp)
{
skp->smk_netlabel.domain = skp->smk_known;
skp->smk_netlabel.flags =
NETLBL_SECATTR_DOMAIN | NETLBL_SECATTR_MLS_LVL;
return smk_netlbl_mls(smack_cipso_direct, skp->smk_known,
&skp->smk_netlabel, strlen(skp->smk_known));
}
/**
* init_smk_fs - get the smackfs superblock
*
* register the smackfs
*
* Do not register smackfs if Smack wasn't enabled
* on boot. We can not put this method normally under the
* smack_init() code path since the security subsystem get
* initialized before the vfs caches.
*
* Returns true if we were not chosen on boot or if
* we were chosen and filesystem registration succeeded.
*/
static int __init init_smk_fs(void)
{
int err;
int rc;
if (!security_module_enable("smack"))
return 0;
err = smk_init_sysfs();
if (err)
printk(KERN_ERR "smackfs: sysfs mountpoint problem.\n");
err = register_filesystem(&smk_fs_type);
if (!err) {
smackfs_mount = kern_mount(&smk_fs_type);
if (IS_ERR(smackfs_mount)) {
printk(KERN_ERR "smackfs: could not mount!\n");
err = PTR_ERR(smackfs_mount);
smackfs_mount = NULL;
}
}
smk_cipso_doi();
smk_unlbl_ambient(NULL);
rc = smk_preset_netlabel(&smack_known_floor);
if (err == 0 && rc < 0)
err = rc;
rc = smk_preset_netlabel(&smack_known_hat);
if (err == 0 && rc < 0)
err = rc;
rc = smk_preset_netlabel(&smack_known_huh);
if (err == 0 && rc < 0)
err = rc;
rc = smk_preset_netlabel(&smack_known_invalid);
if (err == 0 && rc < 0)
err = rc;
rc = smk_preset_netlabel(&smack_known_star);
if (err == 0 && rc < 0)
err = rc;
rc = smk_preset_netlabel(&smack_known_web);
if (err == 0 && rc < 0)
err = rc;
return err;
}
__initcall(init_smk_fs);
| gpl-2.0 |
nazunamoe/Oxygen_united_kernel-gproj | drivers/base/dd.c | 445 | 16636 | /*
* drivers/base/dd.c - The core device/driver interactions.
*
* This file contains the (sometimes tricky) code that controls the
* interactions between devices and drivers, which primarily includes
* driver binding and unbinding.
*
* All of this code used to exist in drivers/base/bus.c, but was
* relocated to here in the name of compartmentalization (since it wasn't
* strictly code just for the 'struct bus_type'.
*
* Copyright (c) 2002-5 Patrick Mochel
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2007-2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2007-2009 Novell Inc.
*
* This file is released under the GPLv2
*/
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/async.h>
#include <linux/pm_runtime.h>
#include "base.h"
#include "power/power.h"
/*
* Deferred Probe infrastructure.
*
* Sometimes driver probe order matters, but the kernel doesn't always have
* dependency information which means some drivers will get probed before a
* resource it depends on is available. For example, an SDHCI driver may
* first need a GPIO line from an i2c GPIO controller before it can be
* initialized. If a required resource is not available yet, a driver can
* request probing to be deferred by returning -EPROBE_DEFER from its probe hook
*
* Deferred probe maintains two lists of devices, a pending list and an active
* list. A driver returning -EPROBE_DEFER causes the device to be added to the
* pending list. A successful driver probe will trigger moving all devices
* from the pending to the active list so that the workqueue will eventually
* retry them.
*
* The deferred_probe_mutex must be held any time the deferred_probe_*_list
* of the (struct device*)->p->deferred_probe pointers are manipulated
*/
static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static struct workqueue_struct *deferred_wq;
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
/**
* deferred_probe_work_func() - Retry probing devices in the active list.
*/
static void deferred_probe_work_func(struct work_struct *work)
{
struct device *dev;
struct device_private *private;
/*
* This block processes every device in the deferred 'active' list.
* Each device is removed from the active list and passed to
* bus_probe_device() to re-attempt the probe. The loop continues
* until every device in the active list is removed and retried.
*
* Note: Once the device is removed from the list and the mutex is
* released, it is possible for the device get freed by another thread
* and cause a illegal pointer dereference. This code uses
* get/put_device() to ensure the device structure cannot disappear
* from under our feet.
*/
mutex_lock(&deferred_probe_mutex);
while (!list_empty(&deferred_probe_active_list)) {
private = list_first_entry(&deferred_probe_active_list,
typeof(*dev->p), deferred_probe);
dev = private->device;
list_del_init(&private->deferred_probe);
get_device(dev);
/*
* Drop the mutex while probing each device; the probe path may
* manipulate the deferred list
*/
mutex_unlock(&deferred_probe_mutex);
dev_dbg(dev, "Retrying from deferred list\n");
bus_probe_device(dev);
mutex_lock(&deferred_probe_mutex);
put_device(dev);
}
mutex_unlock(&deferred_probe_mutex);
}
static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
static void driver_deferred_probe_add(struct device *dev)
{
mutex_lock(&deferred_probe_mutex);
if (list_empty(&dev->p->deferred_probe)) {
dev_dbg(dev, "Added to deferred list\n");
list_add(&dev->p->deferred_probe, &deferred_probe_pending_list);
}
mutex_unlock(&deferred_probe_mutex);
}
void driver_deferred_probe_del(struct device *dev)
{
mutex_lock(&deferred_probe_mutex);
if (!list_empty(&dev->p->deferred_probe)) {
dev_dbg(dev, "Removed from deferred list\n");
list_del_init(&dev->p->deferred_probe);
}
mutex_unlock(&deferred_probe_mutex);
}
static bool driver_deferred_probe_enable = false;
/**
* driver_deferred_probe_trigger() - Kick off re-probing deferred devices
*
* This functions moves all devices from the pending list to the active
* list and schedules the deferred probe workqueue to process them. It
* should be called anytime a driver is successfully bound to a device.
*
* Note, there is a race condition in multi-threaded probe. In the case where
* more than one device is probing at the same time, it is possible for one
* probe to complete successfully while another is about to defer. If the second
* depends on the first, then it will get put on the pending list after the
* trigger event has already occured and will be stuck there.
*
* The atomic 'deferred_trigger_count' is used to determine if a successful
* trigger has occurred in the midst of probing a driver. If the trigger count
* changes in the midst of a probe, then deferred processing should be triggered
* again.
*/
static void driver_deferred_probe_trigger(void)
{
if (!driver_deferred_probe_enable)
return;
/*
* A successful probe means that all the devices in the pending list
* should be triggered to be reprobed. Move all the deferred devices
* into the active list so they can be retried by the workqueue
*/
mutex_lock(&deferred_probe_mutex);
atomic_inc(&deferred_trigger_count);
list_splice_tail_init(&deferred_probe_pending_list,
&deferred_probe_active_list);
mutex_unlock(&deferred_probe_mutex);
/*
* Kick the re-probe thread. It may already be scheduled, but it is
* safe to kick it again.
*/
queue_work(deferred_wq, &deferred_probe_work);
}
/**
* deferred_probe_initcall() - Enable probing of deferred devices
*
* We don't want to get in the way when the bulk of drivers are getting probed.
* Instead, this initcall makes sure that deferred probing is delayed until
* late_initcall time.
*/
static int deferred_probe_initcall(void)
{
deferred_wq = create_singlethread_workqueue("deferwq");
if (WARN_ON(!deferred_wq))
return -ENOMEM;
driver_deferred_probe_enable = true;
driver_deferred_probe_trigger();
/* Sort as many dependencies as possible before exiting initcalls */
flush_workqueue(deferred_wq);
return 0;
}
late_initcall(deferred_probe_initcall);
static void driver_bound(struct device *dev)
{
if (klist_node_attached(&dev->p->knode_driver)) {
printk(KERN_WARNING "%s: device %s already bound\n",
__func__, kobject_name(&dev->kobj));
return;
}
pr_debug("driver: '%s': %s: bound to device '%s'\n", dev_name(dev),
__func__, dev->driver->name);
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
/*
* Make sure the device is no longer in one of the deferred lists and
* kick off retrying all pending devices
*/
driver_deferred_probe_del(dev);
driver_deferred_probe_trigger();
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_BOUND_DRIVER, dev);
}
static int driver_sysfs_add(struct device *dev)
{
int ret;
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_BIND_DRIVER, dev);
ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
kobject_name(&dev->kobj));
if (ret == 0) {
ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
"driver");
if (ret)
sysfs_remove_link(&dev->driver->p->kobj,
kobject_name(&dev->kobj));
}
return ret;
}
static void driver_sysfs_remove(struct device *dev)
{
struct device_driver *drv = dev->driver;
if (drv) {
sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
sysfs_remove_link(&dev->kobj, "driver");
}
}
/**
* device_bind_driver - bind a driver to one device.
* @dev: device.
*
* Allow manual attachment of a driver to a device.
* Caller must have already set @dev->driver.
*
* Note that this does not modify the bus reference count
* nor take the bus's rwsem. Please verify those are accounted
* for before calling this. (It is ok to call with no other effort
* from a driver's probe() method.)
*
* This function must be called with the device lock held.
*/
int device_bind_driver(struct device *dev)
{
int ret;
ret = driver_sysfs_add(dev);
if (!ret)
driver_bound(dev);
return ret;
}
EXPORT_SYMBOL_GPL(device_bind_driver);
static atomic_t probe_count = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
static int really_probe(struct device *dev, struct device_driver *drv)
{
int ret = 0;
int local_trigger_count = atomic_read(&deferred_trigger_count);
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
WARN_ON(!list_empty(&dev->devres_head));
dev->driver = drv;
if (driver_sysfs_add(dev)) {
printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
__func__, dev_name(dev));
goto probe_failed;
}
if (dev->bus->probe) {
ret = dev->bus->probe(dev);
if (ret)
goto probe_failed;
} else if (drv->probe) {
ret = drv->probe(dev);
if (ret)
goto probe_failed;
}
driver_bound(dev);
ret = 1;
pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
goto done;
probe_failed:
devres_release_all(dev);
driver_sysfs_remove(dev);
dev->driver = NULL;
if (ret == -EPROBE_DEFER) {
/* Driver requested deferred probing */
dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
driver_deferred_probe_add(dev);
/* Did a trigger occur while probing? Need to re-trigger if yes */
if (local_trigger_count != atomic_read(&deferred_trigger_count))
driver_deferred_probe_trigger();
} else if (ret != -ENODEV && ret != -ENXIO) {
/* driver matched but the probe failed */
printk(KERN_WARNING
"%s: probe of %s failed with error %d\n",
drv->name, dev_name(dev), ret);
} else {
pr_debug("%s: probe of %s rejects match %d\n",
drv->name, dev_name(dev), ret);
}
/*
* Ignore errors returned by ->probe so that the next driver can try
* its luck.
*/
ret = 0;
done:
atomic_dec(&probe_count);
wake_up(&probe_waitqueue);
return ret;
}
/**
* driver_probe_done
* Determine if the probe sequence is finished or not.
*
* Should somehow figure out how to use a semaphore, not an atomic variable...
*/
int driver_probe_done(void)
{
pr_debug("%s: probe_count = %d\n", __func__,
atomic_read(&probe_count));
if (atomic_read(&probe_count))
return -EBUSY;
return 0;
}
/**
* wait_for_device_probe
* Wait for device probing to be completed.
*/
void wait_for_device_probe(void)
{
/* wait for the known devices to complete their probing */
wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
async_synchronize_full();
}
EXPORT_SYMBOL_GPL(wait_for_device_probe);
/**
* driver_probe_device - attempt to bind device & driver together
* @drv: driver to bind a device to
* @dev: device to try to bind to the driver
*
* This function returns -ENODEV if the device is not registered,
* 1 if the device is bound successfully and 0 otherwise.
*
* This function must be called with @dev lock held. When called for a
* USB interface, @dev->parent lock must be held as well.
*/
int driver_probe_device(struct device_driver *drv, struct device *dev)
{
int ret = 0;
if (!device_is_registered(dev))
return -ENODEV;
pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
pm_runtime_get_noresume(dev);
pm_runtime_barrier(dev);
ret = really_probe(dev, drv);
pm_runtime_put_sync(dev);
return ret;
}
static int __device_attach(struct device_driver *drv, void *data)
{
struct device *dev = data;
if (!driver_match_device(drv, dev))
return 0;
return driver_probe_device(drv, dev);
}
/**
* device_attach - try to attach device to a driver.
* @dev: device.
*
* Walk the list of drivers that the bus has and call
* driver_probe_device() for each pair. If a compatible
* pair is found, break out and return.
*
* Returns 1 if the device was bound to a driver;
* 0 if no matching driver was found;
* -ENODEV if the device is not registered.
*
* When called for a USB interface, @dev->parent lock must be held.
*/
int device_attach(struct device *dev)
{
int ret = 0;
device_lock(dev);
if (dev->driver) {
if (klist_node_attached(&dev->p->knode_driver)) {
ret = 1;
goto out_unlock;
}
ret = device_bind_driver(dev);
if (ret == 0)
ret = 1;
else {
dev->driver = NULL;
ret = 0;
}
} else {
pm_runtime_get_noresume(dev);
ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach);
pm_runtime_put_sync(dev);
}
out_unlock:
device_unlock(dev);
return ret;
}
EXPORT_SYMBOL_GPL(device_attach);
static int __driver_attach(struct device *dev, void *data)
{
struct device_driver *drv = data;
/*
* Lock device and try to bind to it. We drop the error
* here and always return 0, because we need to keep trying
* to bind to devices and some drivers will return an error
* simply if it didn't support the device.
*
* driver_probe_device() will spit a warning if there
* is an error.
*/
if (!driver_match_device(drv, dev))
return 0;
if (dev->parent) /* Needed for USB */
device_lock(dev->parent);
device_lock(dev);
if (!dev->driver)
driver_probe_device(drv, dev);
device_unlock(dev);
if (dev->parent)
device_unlock(dev->parent);
return 0;
}
/**
* driver_attach - try to bind driver to devices.
* @drv: driver.
*
* Walk the list of devices that the bus has on it and try to
* match the driver with each one. If driver_probe_device()
* returns 0 and the @dev->driver is set, we've found a
* compatible pair.
*/
int driver_attach(struct device_driver *drv)
{
return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
}
EXPORT_SYMBOL_GPL(driver_attach);
/*
* __device_release_driver() must be called with @dev lock held.
* When called for a USB interface, @dev->parent lock must be held as well.
*/
static void __device_release_driver(struct device *dev)
{
struct device_driver *drv;
drv = dev->driver;
if (drv) {
pm_runtime_get_sync(dev);
driver_sysfs_remove(dev);
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_UNBIND_DRIVER,
dev);
pm_runtime_put_sync(dev);
if (dev->bus && dev->bus->remove)
dev->bus->remove(dev);
else if (drv->remove)
drv->remove(dev);
devres_release_all(dev);
dev->driver = NULL;
klist_remove(&dev->p->knode_driver);
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_UNBOUND_DRIVER,
dev);
}
}
/**
* device_release_driver - manually detach device from driver.
* @dev: device.
*
* Manually detach device from driver.
* When called for a USB interface, @dev->parent lock must be held.
*/
void device_release_driver(struct device *dev)
{
/*
* If anyone calls device_release_driver() recursively from
* within their ->remove callback for the same device, they
* will deadlock right here.
*/
device_lock(dev);
__device_release_driver(dev);
device_unlock(dev);
}
EXPORT_SYMBOL_GPL(device_release_driver);
/**
* driver_detach - detach driver from all devices it controls.
* @drv: driver.
*/
void driver_detach(struct device_driver *drv)
{
struct device_private *dev_prv;
struct device *dev;
for (;;) {
spin_lock(&drv->p->klist_devices.k_lock);
if (list_empty(&drv->p->klist_devices.k_list)) {
spin_unlock(&drv->p->klist_devices.k_lock);
break;
}
dev_prv = list_entry(drv->p->klist_devices.k_list.prev,
struct device_private,
knode_driver.n_node);
dev = dev_prv->device;
get_device(dev);
spin_unlock(&drv->p->klist_devices.k_lock);
if (dev->parent) /* Needed for USB */
device_lock(dev->parent);
device_lock(dev);
if (dev->driver == drv)
__device_release_driver(dev);
device_unlock(dev);
if (dev->parent)
device_unlock(dev->parent);
put_device(dev);
}
}
/*
* These exports can't be _GPL due to .h files using this within them, and it
* might break something that was previously working...
*/
void *dev_get_drvdata(const struct device *dev)
{
if (dev && dev->p)
return dev->p->driver_data;
return NULL;
}
EXPORT_SYMBOL(dev_get_drvdata);
int dev_set_drvdata(struct device *dev, void *data)
{
int error;
if (!dev->p) {
error = device_private_init(dev);
if (error)
return error;
}
dev->p->driver_data = data;
return 0;
}
EXPORT_SYMBOL(dev_set_drvdata);
| gpl-2.0 |
kissge/linuxkernel | arch/arm/plat-samsung/irq-uart.c | 957 | 3764 | /* arch/arm/plat-samsung/irq-uart.c
* originally part of arch/arm/plat-s3c64xx/irq.c
*
* Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
* Samsung- UART Interrupt handling
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/serial_core.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <mach/map.h>
#include <plat/irq-uart.h>
#include <plat/regs-serial.h>
#include <plat/cpu.h>
/* Note, we make use of the fact that the parent IRQs, IRQ_UART[0..3]
* are consecutive when looking up the interrupt in the demux routines.
*/
static inline void __iomem *s3c_irq_uart_base(unsigned int irq)
{
struct s3c_uart_irq *uirq = get_irq_chip_data(irq);
return uirq->regs;
}
static inline unsigned int s3c_irq_uart_bit(unsigned int irq)
{
return irq & 3;
}
static void s3c_irq_uart_mask(unsigned int irq)
{
void __iomem *regs = s3c_irq_uart_base(irq);
unsigned int bit = s3c_irq_uart_bit(irq);
u32 reg;
reg = __raw_readl(regs + S3C64XX_UINTM);
reg |= (1 << bit);
__raw_writel(reg, regs + S3C64XX_UINTM);
}
static void s3c_irq_uart_maskack(unsigned int irq)
{
void __iomem *regs = s3c_irq_uart_base(irq);
unsigned int bit = s3c_irq_uart_bit(irq);
u32 reg;
reg = __raw_readl(regs + S3C64XX_UINTM);
reg |= (1 << bit);
__raw_writel(reg, regs + S3C64XX_UINTM);
__raw_writel(1 << bit, regs + S3C64XX_UINTP);
}
static void s3c_irq_uart_unmask(unsigned int irq)
{
void __iomem *regs = s3c_irq_uart_base(irq);
unsigned int bit = s3c_irq_uart_bit(irq);
u32 reg;
reg = __raw_readl(regs + S3C64XX_UINTM);
reg &= ~(1 << bit);
__raw_writel(reg, regs + S3C64XX_UINTM);
}
static void s3c_irq_uart_ack(unsigned int irq)
{
void __iomem *regs = s3c_irq_uart_base(irq);
unsigned int bit = s3c_irq_uart_bit(irq);
__raw_writel(1 << bit, regs + S3C64XX_UINTP);
}
static void s3c_irq_demux_uart(unsigned int irq, struct irq_desc *desc)
{
struct s3c_uart_irq *uirq = desc->handler_data;
u32 pend = __raw_readl(uirq->regs + S3C64XX_UINTP);
int base = uirq->base_irq;
if (pend & (1 << 0))
generic_handle_irq(base);
if (pend & (1 << 1))
generic_handle_irq(base + 1);
if (pend & (1 << 2))
generic_handle_irq(base + 2);
if (pend & (1 << 3))
generic_handle_irq(base + 3);
}
static struct irq_chip s3c_irq_uart = {
.name = "s3c-uart",
.mask = s3c_irq_uart_mask,
.unmask = s3c_irq_uart_unmask,
.mask_ack = s3c_irq_uart_maskack,
.ack = s3c_irq_uart_ack,
};
static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq)
{
struct irq_desc *desc = irq_to_desc(uirq->parent_irq);
void __iomem *reg_base = uirq->regs;
unsigned int irq;
int offs;
/* mask all interrupts at the start. */
__raw_writel(0xf, reg_base + S3C64XX_UINTM);
for (offs = 0; offs < 3; offs++) {
irq = uirq->base_irq + offs;
set_irq_chip(irq, &s3c_irq_uart);
set_irq_chip_data(irq, uirq);
set_irq_handler(irq, handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
desc->handler_data = uirq;
set_irq_chained_handler(uirq->parent_irq, s3c_irq_demux_uart);
}
/**
* s3c_init_uart_irqs() - initialise UART IRQs and the necessary demuxing
* @irq: The interrupt data for registering
* @nr_irqs: The number of interrupt descriptions in @irq.
*
* Register the UART interrupts specified by @irq including the demuxing
* routines. This supports the S3C6400 and newer style of devices.
*/
void __init s3c_init_uart_irqs(struct s3c_uart_irq *irq, unsigned int nr_irqs)
{
for (; nr_irqs > 0; nr_irqs--, irq++)
s3c_init_uart_irq(irq);
}
| gpl-2.0 |
cdesjardins/DTS-Eagle-Integration_CAF-Android-kernel | arch/mips/sgi-ip27/ip27-smp.c | 1981 | 6109 | /*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
* Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/nodemask.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/sn/arch.h>
#include <asm/sn/gda.h>
#include <asm/sn/intr.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/launch.h>
#include <asm/sn/mapped_kernel.h>
#include <asm/sn/sn_private.h>
#include <asm/sn/types.h>
#include <asm/sn/sn0/hubpi.h>
#include <asm/sn/sn0/hubio.h>
#include <asm/sn/sn0/ip27.h>
/*
* Takes as first input the PROM assigned cpu id, and the kernel
* assigned cpu id as the second.
*/
static void alloc_cpupda(cpuid_t cpu, int cpunum)
{
cnodeid_t node = get_cpu_cnode(cpu);
nasid_t nasid = COMPACT_TO_NASID_NODEID(node);
cputonasid(cpunum) = nasid;
sn_cpu_info[cpunum].p_nodeid = node;
cputoslice(cpunum) = get_cpu_slice(cpu);
}
static nasid_t get_actual_nasid(lboard_t *brd)
{
klhub_t *hub;
if (!brd)
return INVALID_NASID;
/* find out if we are a completely disabled brd. */
hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
if (!hub)
return INVALID_NASID;
if (!(hub->hub_info.flags & KLINFO_ENABLE)) /* disabled node brd */
return hub->hub_info.physid;
else
return brd->brd_nasid;
}
static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest)
{
static int tot_cpus_found = 0;
lboard_t *brd;
klcpu_t *acpu;
int cpus_found = 0;
cpuid_t cpuid;
brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
do {
acpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU);
while (acpu) {
cpuid = acpu->cpu_info.virtid;
/* cnode is not valid for completely disabled brds */
if (get_actual_nasid(brd) == brd->brd_nasid)
cpuid_to_compact_node[cpuid] = cnode;
if (cpuid > highest)
highest = cpuid;
/* Only let it join in if it's marked enabled */
if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
(tot_cpus_found != NR_CPUS)) {
set_cpu_possible(cpuid, true);
alloc_cpupda(cpuid, tot_cpus_found);
cpus_found++;
tot_cpus_found++;
}
acpu = (klcpu_t *)find_component(brd, (klinfo_t *)acpu,
KLSTRUCT_CPU);
}
brd = KLCF_NEXT(brd);
if (!brd)
break;
brd = find_lboard(brd, KLTYPE_IP27);
} while (brd);
return highest;
}
void cpu_node_probe(void)
{
int i, highest = 0;
gda_t *gdap = GDA;
/*
* Initialize the arrays to invalid nodeid (-1)
*/
for (i = 0; i < MAX_COMPACT_NODES; i++)
compact_to_nasid_node[i] = INVALID_NASID;
for (i = 0; i < MAX_NASIDS; i++)
nasid_to_compact_node[i] = INVALID_CNODEID;
for (i = 0; i < MAXCPUS; i++)
cpuid_to_compact_node[i] = INVALID_CNODEID;
/*
* MCD - this whole "compact node" stuff can probably be dropped,
* as we can handle sparse numbering now
*/
nodes_clear(node_online_map);
for (i = 0; i < MAX_COMPACT_NODES; i++) {
nasid_t nasid = gdap->g_nasidtable[i];
if (nasid == INVALID_NASID)
break;
compact_to_nasid_node[i] = nasid;
nasid_to_compact_node[nasid] = i;
node_set_online(num_online_nodes());
highest = do_cpumask(i, nasid, highest);
}
printk("Discovered %d cpus on %d nodes\n", highest + 1, num_online_nodes());
}
static __init void intr_clear_all(nasid_t nasid)
{
int i;
REMOTE_HUB_S(nasid, PI_INT_MASK0_A, 0);
REMOTE_HUB_S(nasid, PI_INT_MASK0_B, 0);
REMOTE_HUB_S(nasid, PI_INT_MASK1_A, 0);
REMOTE_HUB_S(nasid, PI_INT_MASK1_B, 0);
for (i = 0; i < 128; i++)
REMOTE_HUB_CLR_INTR(nasid, i);
}
static void ip27_send_ipi_single(int destid, unsigned int action)
{
int irq;
switch (action) {
case SMP_RESCHEDULE_YOURSELF:
irq = CPU_RESCHED_A_IRQ;
break;
case SMP_CALL_FUNCTION:
irq = CPU_CALL_A_IRQ;
break;
default:
panic("sendintr");
}
irq += cputoslice(destid);
/*
* Convert the compact hub number to the NASID to get the correct
* part of the address space. Then set the interrupt bit associated
* with the CPU we want to send the interrupt to.
*/
REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
}
static void ip27_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
ip27_send_ipi_single(i, action);
}
static void __cpuinit ip27_init_secondary(void)
{
per_cpu_init();
}
static void __cpuinit ip27_smp_finish(void)
{
extern void hub_rt_clock_event_init(void);
hub_rt_clock_event_init();
local_irq_enable();
}
static void __init ip27_cpus_done(void)
{
}
/*
* Launch a slave into smp_bootstrap(). It doesn't take an argument, and we
* set sp to the kernel stack of the newly created idle process, gp to the proc
* struct so that current_thread_info() will work.
*/
static void __cpuinit ip27_boot_secondary(int cpu, struct task_struct *idle)
{
unsigned long gp = (unsigned long)task_thread_info(idle);
unsigned long sp = __KSTK_TOS(idle);
LAUNCH_SLAVE(cputonasid(cpu), cputoslice(cpu),
(launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap),
0, (void *) sp, (void *) gp);
}
static void __init ip27_smp_setup(void)
{
cnodeid_t cnode;
for_each_online_node(cnode) {
if (cnode == 0)
continue;
intr_clear_all(COMPACT_TO_NASID_NODEID(cnode));
}
replicate_kernel_text();
/*
* Assumption to be fixed: we're always booted on logical / physical
* processor 0. While we're always running on logical processor 0
* this still means this is physical processor zero; it might for
* example be disabled in the firmware.
*/
alloc_cpupda(0, 0);
}
static void __init ip27_prepare_cpus(unsigned int max_cpus)
{
/* We already did everything necessary earlier */
}
struct plat_smp_ops ip27_smp_ops = {
.send_ipi_single = ip27_send_ipi_single,
.send_ipi_mask = ip27_send_ipi_mask,
.init_secondary = ip27_init_secondary,
.smp_finish = ip27_smp_finish,
.cpus_done = ip27_cpus_done,
.boot_secondary = ip27_boot_secondary,
.smp_setup = ip27_smp_setup,
.prepare_cpus = ip27_prepare_cpus,
};
| gpl-2.0 |
AndreiLux/ref-3.10 | sound/aoa/fabrics/layout.c | 2237 | 27988 | /*
* Apple Onboard Audio driver -- layout/machine id fabric
*
* Copyright 2006-2008 Johannes Berg <johannes@sipsolutions.net>
*
* GPL v2, can be found in COPYING.
*
*
* This fabric module looks for sound codecs based on the
* layout-id or device-id property in the device tree.
*/
#include <asm/prom.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "../aoa.h"
#include "../soundbus/soundbus.h"
MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Layout-ID fabric for snd-aoa");
#define MAX_CODECS_PER_BUS 2
/* These are the connections the layout fabric
* knows about. It doesn't really care about the
* input ones, but I thought I'd separate them
* to give them proper names. The thing is that
* Apple usually will distinguish the active output
* by GPIOs, while the active input is set directly
* on the codec. Hence we here tell the codec what
* we think is connected. This information is hard-
* coded below ... */
#define CC_SPEAKERS (1<<0)
#define CC_HEADPHONE (1<<1)
#define CC_LINEOUT (1<<2)
#define CC_DIGITALOUT (1<<3)
#define CC_LINEIN (1<<4)
#define CC_MICROPHONE (1<<5)
#define CC_DIGITALIN (1<<6)
/* pretty bogus but users complain...
* This is a flag saying that the LINEOUT
* should be renamed to HEADPHONE.
* be careful with input detection! */
#define CC_LINEOUT_LABELLED_HEADPHONE (1<<7)
struct codec_connection {
/* CC_ flags from above */
int connected;
/* codec dependent bit to be set in the aoa_codec.connected field.
* This intentionally doesn't have any generic flags because the
* fabric has to know the codec anyway and all codecs might have
* different connectors */
int codec_bit;
};
struct codec_connect_info {
char *name;
struct codec_connection *connections;
};
#define LAYOUT_FLAG_COMBO_LINEOUT_SPDIF (1<<0)
struct layout {
unsigned int layout_id, device_id;
struct codec_connect_info codecs[MAX_CODECS_PER_BUS];
int flags;
/* if busname is not assigned, we use 'Master' below,
* so that our layout table doesn't need to be filled
* too much.
* We only assign these two if we expect to find more
* than one soundbus, i.e. on those machines with
* multiple layout-ids */
char *busname;
int pcmid;
};
MODULE_ALIAS("sound-layout-36");
MODULE_ALIAS("sound-layout-41");
MODULE_ALIAS("sound-layout-45");
MODULE_ALIAS("sound-layout-47");
MODULE_ALIAS("sound-layout-48");
MODULE_ALIAS("sound-layout-49");
MODULE_ALIAS("sound-layout-50");
MODULE_ALIAS("sound-layout-51");
MODULE_ALIAS("sound-layout-56");
MODULE_ALIAS("sound-layout-57");
MODULE_ALIAS("sound-layout-58");
MODULE_ALIAS("sound-layout-60");
MODULE_ALIAS("sound-layout-61");
MODULE_ALIAS("sound-layout-62");
MODULE_ALIAS("sound-layout-64");
MODULE_ALIAS("sound-layout-65");
MODULE_ALIAS("sound-layout-66");
MODULE_ALIAS("sound-layout-67");
MODULE_ALIAS("sound-layout-68");
MODULE_ALIAS("sound-layout-69");
MODULE_ALIAS("sound-layout-70");
MODULE_ALIAS("sound-layout-72");
MODULE_ALIAS("sound-layout-76");
MODULE_ALIAS("sound-layout-80");
MODULE_ALIAS("sound-layout-82");
MODULE_ALIAS("sound-layout-84");
MODULE_ALIAS("sound-layout-86");
MODULE_ALIAS("sound-layout-90");
MODULE_ALIAS("sound-layout-92");
MODULE_ALIAS("sound-layout-94");
MODULE_ALIAS("sound-layout-96");
MODULE_ALIAS("sound-layout-98");
MODULE_ALIAS("sound-layout-100");
MODULE_ALIAS("aoa-device-id-14");
MODULE_ALIAS("aoa-device-id-22");
MODULE_ALIAS("aoa-device-id-35");
MODULE_ALIAS("aoa-device-id-44");
/* onyx with all but microphone connected */
static struct codec_connection onyx_connections_nomic[] = {
{
.connected = CC_SPEAKERS | CC_HEADPHONE | CC_LINEOUT,
.codec_bit = 0,
},
{
.connected = CC_DIGITALOUT,
.codec_bit = 1,
},
{
.connected = CC_LINEIN,
.codec_bit = 2,
},
{} /* terminate array by .connected == 0 */
};
/* onyx on machines without headphone */
static struct codec_connection onyx_connections_noheadphones[] = {
{
.connected = CC_SPEAKERS | CC_LINEOUT |
CC_LINEOUT_LABELLED_HEADPHONE,
.codec_bit = 0,
},
{
.connected = CC_DIGITALOUT,
.codec_bit = 1,
},
/* FIXME: are these correct? probably not for all the machines
* below ... If not this will need separating. */
{
.connected = CC_LINEIN,
.codec_bit = 2,
},
{
.connected = CC_MICROPHONE,
.codec_bit = 3,
},
{} /* terminate array by .connected == 0 */
};
/* onyx on machines with real line-out */
static struct codec_connection onyx_connections_reallineout[] = {
{
.connected = CC_SPEAKERS | CC_LINEOUT | CC_HEADPHONE,
.codec_bit = 0,
},
{
.connected = CC_DIGITALOUT,
.codec_bit = 1,
},
{
.connected = CC_LINEIN,
.codec_bit = 2,
},
{} /* terminate array by .connected == 0 */
};
/* tas on machines without line out */
static struct codec_connection tas_connections_nolineout[] = {
{
.connected = CC_SPEAKERS | CC_HEADPHONE,
.codec_bit = 0,
},
{
.connected = CC_LINEIN,
.codec_bit = 2,
},
{
.connected = CC_MICROPHONE,
.codec_bit = 3,
},
{} /* terminate array by .connected == 0 */
};
/* tas on machines with neither line out nor line in */
static struct codec_connection tas_connections_noline[] = {
{
.connected = CC_SPEAKERS | CC_HEADPHONE,
.codec_bit = 0,
},
{
.connected = CC_MICROPHONE,
.codec_bit = 3,
},
{} /* terminate array by .connected == 0 */
};
/* tas on machines without microphone */
static struct codec_connection tas_connections_nomic[] = {
{
.connected = CC_SPEAKERS | CC_HEADPHONE | CC_LINEOUT,
.codec_bit = 0,
},
{
.connected = CC_LINEIN,
.codec_bit = 2,
},
{} /* terminate array by .connected == 0 */
};
/* tas on machines with everything connected */
static struct codec_connection tas_connections_all[] = {
{
.connected = CC_SPEAKERS | CC_HEADPHONE | CC_LINEOUT,
.codec_bit = 0,
},
{
.connected = CC_LINEIN,
.codec_bit = 2,
},
{
.connected = CC_MICROPHONE,
.codec_bit = 3,
},
{} /* terminate array by .connected == 0 */
};
static struct codec_connection toonie_connections[] = {
{
.connected = CC_SPEAKERS | CC_HEADPHONE,
.codec_bit = 0,
},
{} /* terminate array by .connected == 0 */
};
static struct codec_connection topaz_input[] = {
{
.connected = CC_DIGITALIN,
.codec_bit = 0,
},
{} /* terminate array by .connected == 0 */
};
static struct codec_connection topaz_output[] = {
{
.connected = CC_DIGITALOUT,
.codec_bit = 1,
},
{} /* terminate array by .connected == 0 */
};
static struct codec_connection topaz_inout[] = {
{
.connected = CC_DIGITALIN,
.codec_bit = 0,
},
{
.connected = CC_DIGITALOUT,
.codec_bit = 1,
},
{} /* terminate array by .connected == 0 */
};
static struct layout layouts[] = {
/* last PowerBooks (15" Oct 2005) */
{ .layout_id = 82,
.flags = LAYOUT_FLAG_COMBO_LINEOUT_SPDIF,
.codecs[0] = {
.name = "onyx",
.connections = onyx_connections_noheadphones,
},
.codecs[1] = {
.name = "topaz",
.connections = topaz_input,
},
},
/* PowerMac9,1 */
{ .layout_id = 60,
.codecs[0] = {
.name = "onyx",
.connections = onyx_connections_reallineout,
},
},
/* PowerMac9,1 */
{ .layout_id = 61,
.codecs[0] = {
.name = "topaz",
.connections = topaz_input,
},
},
/* PowerBook5,7 */
{ .layout_id = 64,
.flags = LAYOUT_FLAG_COMBO_LINEOUT_SPDIF,
.codecs[0] = {
.name = "onyx",
.connections = onyx_connections_noheadphones,
},
},
/* PowerBook5,7 */
{ .layout_id = 65,
.codecs[0] = {
.name = "topaz",
.connections = topaz_input,
},
},
/* PowerBook5,9 [17" Oct 2005] */
{ .layout_id = 84,
.flags = LAYOUT_FLAG_COMBO_LINEOUT_SPDIF,
.codecs[0] = {
.name = "onyx",
.connections = onyx_connections_noheadphones,
},
.codecs[1] = {
.name = "topaz",
.connections = topaz_input,
},
},
/* PowerMac8,1 */
{ .layout_id = 45,
.codecs[0] = {
.name = "onyx",
.connections = onyx_connections_noheadphones,
},
.codecs[1] = {
.name = "topaz",
.connections = topaz_input,
},
},
/* Quad PowerMac (analog in, analog/digital out) */
{ .layout_id = 68,
.codecs[0] = {
.name = "onyx",
.connections = onyx_connections_nomic,
},
},
/* Quad PowerMac (digital in) */
{ .layout_id = 69,
.codecs[0] = {
.name = "topaz",
.connections = topaz_input,
},
.busname = "digital in", .pcmid = 1 },
/* Early 2005 PowerBook (PowerBook 5,6) */
{ .layout_id = 70,
.codecs[0] = {
.name = "tas",
.connections = tas_connections_nolineout,
},
},
/* PowerBook 5,4 */
{ .layout_id = 51,
.codecs[0] = {
.name = "tas",
.connections = tas_connections_nolineout,
},
},
/* PowerBook6,5 */
{ .device_id = 44,
.codecs[0] = {
.name = "tas",
.connections = tas_connections_all,
},
},
/* PowerBook6,7 */
{ .layout_id = 80,
.codecs[0] = {
.name = "tas",
.connections = tas_connections_noline,
},
},
/* PowerBook6,8 */
{ .layout_id = 72,
.codecs[0] = {
.name = "tas",
.connections = tas_connections_nolineout,
},
},
/* PowerMac8,2 */
{ .layout_id = 86,
.codecs[0] = {
.name = "onyx",
.connections = onyx_connections_nomic,
},
.codecs[1] = {
.name = "topaz",
.connections = topaz_input,
},
},
/* PowerBook6,7 */
{ .layout_id = 92,
.codecs[0] = {
.name = "tas",
.connections = tas_connections_nolineout,
},
},
/* PowerMac10,1 (Mac Mini) */
{ .layout_id = 58,
.codecs[0] = {
.name = "toonie",
.connections = toonie_connections,
},
},
{
.layout_id = 96,
.codecs[0] = {
.name = "onyx",
.connections = onyx_connections_noheadphones,
},
},
/* unknown, untested, but this comes from Apple */
{ .layout_id = 41,
.codecs[0] = {
.name = "tas",
.connections = tas_connections_all,
},
},
{ .layout_id = 36,
.codecs[0] = {
.name = "tas",
.connections = tas_connections_nomic,
},
.codecs[1] = {
.name = "topaz",
.connections = topaz_inout,
},
},
{ .layout_id = 47,
.codecs[0] = {
.name = "onyx",
.connections = onyx_connections_noheadphones,
},
},
{ .layout_id = 48,
.codecs[0] = {
.name = "topaz",
.connections = topaz_input,
},
},
{ .layout_id = 49,
.codecs[0] = {
.name = "onyx",
.connections = onyx_connections_nomic,
},
},
{ .layout_id = 50,
.codecs[0] = {
.name = "topaz",
.connections = topaz_input,
},
},
{ .layout_id = 56,
.codecs[0] = {
.name = "onyx",
.connections = onyx_connections_noheadphones,
},
},
{ .layout_id = 57,
.codecs[0] = {
.name = "topaz",
.connections = topaz_input,
},
},
{ .layout_id = 62,
.codecs[0] = {
.name = "onyx",
.connections = onyx_connections_noheadphones,
},
.codecs[1] = {
.name = "topaz",
.connections = topaz_output,
},
},
{ .layout_id = 66,
.codecs[0] = {
.name = "onyx",
.connections = onyx_connections_noheadphones,
},
},
{ .layout_id = 67,
.codecs[0] = {
.name = "topaz",
.connections = topaz_input,
},
},
{ .layout_id = 76,
.codecs[0] = {
.name = "tas",
.connections = tas_connections_nomic,
},
.codecs[1] = {
.name = "topaz",
.connections = topaz_inout,
},
},
{ .layout_id = 90,
.codecs[0] = {
.name = "tas",
.connections = tas_connections_noline,
},
},
{ .layout_id = 94,
.codecs[0] = {
.name = "onyx",
/* but it has an external mic?? how to select? */
.connections = onyx_connections_noheadphones,
},
},
{ .layout_id = 98,
.codecs[0] = {
.name = "toonie",
.connections = toonie_connections,
},
},
{ .layout_id = 100,
.codecs[0] = {
.name = "topaz",
.connections = topaz_input,
},
.codecs[1] = {
.name = "onyx",
.connections = onyx_connections_noheadphones,
},
},
/* PowerMac3,4 */
{ .device_id = 14,
.codecs[0] = {
.name = "tas",
.connections = tas_connections_noline,
},
},
/* PowerMac3,6 */
{ .device_id = 22,
.codecs[0] = {
.name = "tas",
.connections = tas_connections_all,
},
},
/* PowerBook5,2 */
{ .device_id = 35,
.codecs[0] = {
.name = "tas",
.connections = tas_connections_all,
},
},
{}
};
static struct layout *find_layout_by_id(unsigned int id)
{
struct layout *l;
l = layouts;
while (l->codecs[0].name) {
if (l->layout_id == id)
return l;
l++;
}
return NULL;
}
static struct layout *find_layout_by_device(unsigned int id)
{
struct layout *l;
l = layouts;
while (l->codecs[0].name) {
if (l->device_id == id)
return l;
l++;
}
return NULL;
}
static void use_layout(struct layout *l)
{
int i;
for (i=0; i<MAX_CODECS_PER_BUS; i++) {
if (l->codecs[i].name) {
request_module("snd-aoa-codec-%s", l->codecs[i].name);
}
}
/* now we wait for the codecs to call us back */
}
struct layout_dev;
struct layout_dev_ptr {
struct layout_dev *ptr;
};
struct layout_dev {
struct list_head list;
struct soundbus_dev *sdev;
struct device_node *sound;
struct aoa_codec *codecs[MAX_CODECS_PER_BUS];
struct layout *layout;
struct gpio_runtime gpio;
/* we need these for headphone/lineout detection */
struct snd_kcontrol *headphone_ctrl;
struct snd_kcontrol *lineout_ctrl;
struct snd_kcontrol *speaker_ctrl;
struct snd_kcontrol *master_ctrl;
struct snd_kcontrol *headphone_detected_ctrl;
struct snd_kcontrol *lineout_detected_ctrl;
struct layout_dev_ptr selfptr_headphone;
struct layout_dev_ptr selfptr_lineout;
u32 have_lineout_detect:1,
have_headphone_detect:1,
switch_on_headphone:1,
switch_on_lineout:1;
};
static LIST_HEAD(layouts_list);
static int layouts_list_items;
/* this can go away but only if we allow multiple cards,
* make the fabric handle all the card stuff, etc... */
static struct layout_dev *layout_device;
#define control_info snd_ctl_boolean_mono_info
#define AMP_CONTROL(n, description) \
static int n##_control_get(struct snd_kcontrol *kcontrol, \
struct snd_ctl_elem_value *ucontrol) \
{ \
struct gpio_runtime *gpio = snd_kcontrol_chip(kcontrol); \
if (gpio->methods && gpio->methods->get_##n) \
ucontrol->value.integer.value[0] = \
gpio->methods->get_##n(gpio); \
return 0; \
} \
static int n##_control_put(struct snd_kcontrol *kcontrol, \
struct snd_ctl_elem_value *ucontrol) \
{ \
struct gpio_runtime *gpio = snd_kcontrol_chip(kcontrol); \
if (gpio->methods && gpio->methods->get_##n) \
gpio->methods->set_##n(gpio, \
!!ucontrol->value.integer.value[0]); \
return 1; \
} \
static struct snd_kcontrol_new n##_ctl = { \
.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = description, \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \
.info = control_info, \
.get = n##_control_get, \
.put = n##_control_put, \
}
AMP_CONTROL(headphone, "Headphone Switch");
AMP_CONTROL(speakers, "Speakers Switch");
AMP_CONTROL(lineout, "Line-Out Switch");
AMP_CONTROL(master, "Master Switch");
static int detect_choice_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct layout_dev *ldev = snd_kcontrol_chip(kcontrol);
switch (kcontrol->private_value) {
case 0:
ucontrol->value.integer.value[0] = ldev->switch_on_headphone;
break;
case 1:
ucontrol->value.integer.value[0] = ldev->switch_on_lineout;
break;
default:
return -ENODEV;
}
return 0;
}
static int detect_choice_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct layout_dev *ldev = snd_kcontrol_chip(kcontrol);
switch (kcontrol->private_value) {
case 0:
ldev->switch_on_headphone = !!ucontrol->value.integer.value[0];
break;
case 1:
ldev->switch_on_lineout = !!ucontrol->value.integer.value[0];
break;
default:
return -ENODEV;
}
return 1;
}
static struct snd_kcontrol_new headphone_detect_choice = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Headphone Detect Autoswitch",
.info = control_info,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.get = detect_choice_get,
.put = detect_choice_put,
.private_value = 0,
};
static struct snd_kcontrol_new lineout_detect_choice = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line-Out Detect Autoswitch",
.info = control_info,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.get = detect_choice_get,
.put = detect_choice_put,
.private_value = 1,
};
static int detected_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct layout_dev *ldev = snd_kcontrol_chip(kcontrol);
int v;
switch (kcontrol->private_value) {
case 0:
v = ldev->gpio.methods->get_detect(&ldev->gpio,
AOA_NOTIFY_HEADPHONE);
break;
case 1:
v = ldev->gpio.methods->get_detect(&ldev->gpio,
AOA_NOTIFY_LINE_OUT);
break;
default:
return -ENODEV;
}
ucontrol->value.integer.value[0] = v;
return 0;
}
static struct snd_kcontrol_new headphone_detected = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Headphone Detected",
.info = control_info,
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.get = detected_get,
.private_value = 0,
};
static struct snd_kcontrol_new lineout_detected = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line-Out Detected",
.info = control_info,
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.get = detected_get,
.private_value = 1,
};
static int check_codec(struct aoa_codec *codec,
struct layout_dev *ldev,
struct codec_connect_info *cci)
{
const u32 *ref;
char propname[32];
struct codec_connection *cc;
/* if the codec has a 'codec' node, we require a reference */
if (codec->node && (strcmp(codec->node->name, "codec") == 0)) {
snprintf(propname, sizeof(propname),
"platform-%s-codec-ref", codec->name);
ref = of_get_property(ldev->sound, propname, NULL);
if (!ref) {
printk(KERN_INFO "snd-aoa-fabric-layout: "
"required property %s not present\n", propname);
return -ENODEV;
}
if (*ref != codec->node->phandle) {
printk(KERN_INFO "snd-aoa-fabric-layout: "
"%s doesn't match!\n", propname);
return -ENODEV;
}
} else {
if (layouts_list_items != 1) {
printk(KERN_INFO "snd-aoa-fabric-layout: "
"more than one soundbus, but no references.\n");
return -ENODEV;
}
}
codec->soundbus_dev = ldev->sdev;
codec->gpio = &ldev->gpio;
cc = cci->connections;
if (!cc)
return -EINVAL;
printk(KERN_INFO "snd-aoa-fabric-layout: can use this codec\n");
codec->connected = 0;
codec->fabric_data = cc;
while (cc->connected) {
codec->connected |= 1<<cc->codec_bit;
cc++;
}
return 0;
}
static int layout_found_codec(struct aoa_codec *codec)
{
struct layout_dev *ldev;
int i;
list_for_each_entry(ldev, &layouts_list, list) {
for (i=0; i<MAX_CODECS_PER_BUS; i++) {
if (!ldev->layout->codecs[i].name)
continue;
if (strcmp(ldev->layout->codecs[i].name, codec->name) == 0) {
if (check_codec(codec,
ldev,
&ldev->layout->codecs[i]) == 0)
return 0;
}
}
}
return -ENODEV;
}
static void layout_remove_codec(struct aoa_codec *codec)
{
int i;
/* here remove the codec from the layout dev's
* codec reference */
codec->soundbus_dev = NULL;
codec->gpio = NULL;
for (i=0; i<MAX_CODECS_PER_BUS; i++) {
}
}
static void layout_notify(void *data)
{
struct layout_dev_ptr *dptr = data;
struct layout_dev *ldev;
int v, update;
struct snd_kcontrol *detected, *c;
struct snd_card *card = aoa_get_card();
ldev = dptr->ptr;
if (data == &ldev->selfptr_headphone) {
v = ldev->gpio.methods->get_detect(&ldev->gpio, AOA_NOTIFY_HEADPHONE);
detected = ldev->headphone_detected_ctrl;
update = ldev->switch_on_headphone;
if (update) {
ldev->gpio.methods->set_speakers(&ldev->gpio, !v);
ldev->gpio.methods->set_headphone(&ldev->gpio, v);
ldev->gpio.methods->set_lineout(&ldev->gpio, 0);
}
} else if (data == &ldev->selfptr_lineout) {
v = ldev->gpio.methods->get_detect(&ldev->gpio, AOA_NOTIFY_LINE_OUT);
detected = ldev->lineout_detected_ctrl;
update = ldev->switch_on_lineout;
if (update) {
ldev->gpio.methods->set_speakers(&ldev->gpio, !v);
ldev->gpio.methods->set_headphone(&ldev->gpio, 0);
ldev->gpio.methods->set_lineout(&ldev->gpio, v);
}
} else
return;
if (detected)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &detected->id);
if (update) {
c = ldev->headphone_ctrl;
if (c)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &c->id);
c = ldev->speaker_ctrl;
if (c)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &c->id);
c = ldev->lineout_ctrl;
if (c)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &c->id);
}
}
static void layout_attached_codec(struct aoa_codec *codec)
{
struct codec_connection *cc;
struct snd_kcontrol *ctl;
int headphones, lineout;
struct layout_dev *ldev = layout_device;
/* need to add this codec to our codec array! */
cc = codec->fabric_data;
headphones = codec->gpio->methods->get_detect(codec->gpio,
AOA_NOTIFY_HEADPHONE);
lineout = codec->gpio->methods->get_detect(codec->gpio,
AOA_NOTIFY_LINE_OUT);
if (codec->gpio->methods->set_master) {
ctl = snd_ctl_new1(&master_ctl, codec->gpio);
ldev->master_ctrl = ctl;
aoa_snd_ctl_add(ctl);
}
while (cc->connected) {
if (cc->connected & CC_SPEAKERS) {
if (headphones <= 0 && lineout <= 0)
ldev->gpio.methods->set_speakers(codec->gpio, 1);
ctl = snd_ctl_new1(&speakers_ctl, codec->gpio);
ldev->speaker_ctrl = ctl;
aoa_snd_ctl_add(ctl);
}
if (cc->connected & CC_HEADPHONE) {
if (headphones == 1)
ldev->gpio.methods->set_headphone(codec->gpio, 1);
ctl = snd_ctl_new1(&headphone_ctl, codec->gpio);
ldev->headphone_ctrl = ctl;
aoa_snd_ctl_add(ctl);
ldev->have_headphone_detect =
!ldev->gpio.methods
->set_notify(&ldev->gpio,
AOA_NOTIFY_HEADPHONE,
layout_notify,
&ldev->selfptr_headphone);
if (ldev->have_headphone_detect) {
ctl = snd_ctl_new1(&headphone_detect_choice,
ldev);
aoa_snd_ctl_add(ctl);
ctl = snd_ctl_new1(&headphone_detected,
ldev);
ldev->headphone_detected_ctrl = ctl;
aoa_snd_ctl_add(ctl);
}
}
if (cc->connected & CC_LINEOUT) {
if (lineout == 1)
ldev->gpio.methods->set_lineout(codec->gpio, 1);
ctl = snd_ctl_new1(&lineout_ctl, codec->gpio);
if (cc->connected & CC_LINEOUT_LABELLED_HEADPHONE)
strlcpy(ctl->id.name,
"Headphone Switch", sizeof(ctl->id.name));
ldev->lineout_ctrl = ctl;
aoa_snd_ctl_add(ctl);
ldev->have_lineout_detect =
!ldev->gpio.methods
->set_notify(&ldev->gpio,
AOA_NOTIFY_LINE_OUT,
layout_notify,
&ldev->selfptr_lineout);
if (ldev->have_lineout_detect) {
ctl = snd_ctl_new1(&lineout_detect_choice,
ldev);
if (cc->connected & CC_LINEOUT_LABELLED_HEADPHONE)
strlcpy(ctl->id.name,
"Headphone Detect Autoswitch",
sizeof(ctl->id.name));
aoa_snd_ctl_add(ctl);
ctl = snd_ctl_new1(&lineout_detected,
ldev);
if (cc->connected & CC_LINEOUT_LABELLED_HEADPHONE)
strlcpy(ctl->id.name,
"Headphone Detected",
sizeof(ctl->id.name));
ldev->lineout_detected_ctrl = ctl;
aoa_snd_ctl_add(ctl);
}
}
cc++;
}
/* now update initial state */
if (ldev->have_headphone_detect)
layout_notify(&ldev->selfptr_headphone);
if (ldev->have_lineout_detect)
layout_notify(&ldev->selfptr_lineout);
}
static struct aoa_fabric layout_fabric = {
.name = "SoundByLayout",
.owner = THIS_MODULE,
.found_codec = layout_found_codec,
.remove_codec = layout_remove_codec,
.attached_codec = layout_attached_codec,
};
static int aoa_fabric_layout_probe(struct soundbus_dev *sdev)
{
struct device_node *sound = NULL;
const unsigned int *id;
struct layout *layout = NULL;
struct layout_dev *ldev = NULL;
int err;
/* hm, currently we can only have one ... */
if (layout_device)
return -ENODEV;
/* by breaking out we keep a reference */
while ((sound = of_get_next_child(sdev->ofdev.dev.of_node, sound))) {
if (sound->type && strcasecmp(sound->type, "soundchip") == 0)
break;
}
if (!sound)
return -ENODEV;
id = of_get_property(sound, "layout-id", NULL);
if (id) {
layout = find_layout_by_id(*id);
} else {
id = of_get_property(sound, "device-id", NULL);
if (id)
layout = find_layout_by_device(*id);
}
if (!layout) {
printk(KERN_ERR "snd-aoa-fabric-layout: unknown layout\n");
goto outnodev;
}
ldev = kzalloc(sizeof(struct layout_dev), GFP_KERNEL);
if (!ldev)
goto outnodev;
layout_device = ldev;
ldev->sdev = sdev;
ldev->sound = sound;
ldev->layout = layout;
ldev->gpio.node = sound->parent;
switch (layout->layout_id) {
case 0: /* anything with device_id, not layout_id */
case 41: /* that unknown machine no one seems to have */
case 51: /* PowerBook5,4 */
case 58: /* Mac Mini */
ldev->gpio.methods = ftr_gpio_methods;
printk(KERN_DEBUG
"snd-aoa-fabric-layout: Using direct GPIOs\n");
break;
default:
ldev->gpio.methods = pmf_gpio_methods;
printk(KERN_DEBUG
"snd-aoa-fabric-layout: Using PMF GPIOs\n");
}
ldev->selfptr_headphone.ptr = ldev;
ldev->selfptr_lineout.ptr = ldev;
dev_set_drvdata(&sdev->ofdev.dev, ldev);
list_add(&ldev->list, &layouts_list);
layouts_list_items++;
/* assign these before registering ourselves, so
* callbacks that are done during registration
* already have the values */
sdev->pcmid = ldev->layout->pcmid;
if (ldev->layout->busname) {
sdev->pcmname = ldev->layout->busname;
} else {
sdev->pcmname = "Master";
}
ldev->gpio.methods->init(&ldev->gpio);
err = aoa_fabric_register(&layout_fabric, &sdev->ofdev.dev);
if (err && err != -EALREADY) {
printk(KERN_INFO "snd-aoa-fabric-layout: can't use,"
" another fabric is active!\n");
goto outlistdel;
}
use_layout(layout);
ldev->switch_on_headphone = 1;
ldev->switch_on_lineout = 1;
return 0;
outlistdel:
/* we won't be using these then... */
ldev->gpio.methods->exit(&ldev->gpio);
/* reset if we didn't use it */
sdev->pcmname = NULL;
sdev->pcmid = -1;
list_del(&ldev->list);
layouts_list_items--;
kfree(ldev);
outnodev:
of_node_put(sound);
layout_device = NULL;
return -ENODEV;
}
static int aoa_fabric_layout_remove(struct soundbus_dev *sdev)
{
struct layout_dev *ldev = dev_get_drvdata(&sdev->ofdev.dev);
int i;
for (i=0; i<MAX_CODECS_PER_BUS; i++) {
if (ldev->codecs[i]) {
aoa_fabric_unlink_codec(ldev->codecs[i]);
}
ldev->codecs[i] = NULL;
}
list_del(&ldev->list);
layouts_list_items--;
of_node_put(ldev->sound);
ldev->gpio.methods->set_notify(&ldev->gpio,
AOA_NOTIFY_HEADPHONE,
NULL,
NULL);
ldev->gpio.methods->set_notify(&ldev->gpio,
AOA_NOTIFY_LINE_OUT,
NULL,
NULL);
ldev->gpio.methods->exit(&ldev->gpio);
layout_device = NULL;
kfree(ldev);
sdev->pcmid = -1;
sdev->pcmname = NULL;
return 0;
}
#ifdef CONFIG_PM
static int aoa_fabric_layout_suspend(struct soundbus_dev *sdev, pm_message_t state)
{
struct layout_dev *ldev = dev_get_drvdata(&sdev->ofdev.dev);
if (ldev->gpio.methods && ldev->gpio.methods->all_amps_off)
ldev->gpio.methods->all_amps_off(&ldev->gpio);
return 0;
}
static int aoa_fabric_layout_resume(struct soundbus_dev *sdev)
{
struct layout_dev *ldev = dev_get_drvdata(&sdev->ofdev.dev);
if (ldev->gpio.methods && ldev->gpio.methods->all_amps_off)
ldev->gpio.methods->all_amps_restore(&ldev->gpio);
return 0;
}
#endif
static struct soundbus_driver aoa_soundbus_driver = {
.name = "snd_aoa_soundbus_drv",
.owner = THIS_MODULE,
.probe = aoa_fabric_layout_probe,
.remove = aoa_fabric_layout_remove,
#ifdef CONFIG_PM
.suspend = aoa_fabric_layout_suspend,
.resume = aoa_fabric_layout_resume,
#endif
.driver = {
.owner = THIS_MODULE,
}
};
static int __init aoa_fabric_layout_init(void)
{
int err;
err = soundbus_register_driver(&aoa_soundbus_driver);
if (err)
return err;
return 0;
}
static void __exit aoa_fabric_layout_exit(void)
{
soundbus_unregister_driver(&aoa_soundbus_driver);
aoa_fabric_unregister(&layout_fabric);
}
module_init(aoa_fabric_layout_init);
module_exit(aoa_fabric_layout_exit);
| gpl-2.0 |
jthatch12/SKJT | arch/powerpc/platforms/85xx/p3041_ds.c | 2493 | 1596 | /*
* P3041 DS Setup
*
* Maintained by Kumar Gala (see MAINTAINERS for contact information)
*
* Copyright 2009-2010 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/phy.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include "corenet_ds.h"
/*
* Called very early, device-tree isn't unflattened
*/
static int __init p3041_ds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
return of_flat_dt_is_compatible(root, "fsl,P3041DS");
}
define_machine(p3041_ds) {
.name = "P3041 DS",
.probe = p3041_ds_probe,
.setup_arch = corenet_ds_setup_arch,
.init_IRQ = corenet_ds_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
.get_irq = mpic_get_coreint_irq,
.restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
machine_device_initcall(p3041_ds, corenet_ds_publish_devices);
#ifdef CONFIG_SWIOTLB
machine_arch_initcall(p3041_ds, swiotlb_setup_bus_notifier);
#endif
| gpl-2.0 |
victormlourenco/kernel_msm | drivers/net/macvlan.c | 3261 | 22719 | /*
* Copyright (c) 2007 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* The code this is based on carried the following copyright notice:
* ---
* (C) Copyright 2001-2006
* Alex Zeffertt, Cambridge Broadband Ltd, ajz@cambridgebroadband.com
* Re-worked by Ben Greear <greearb@candelatech.com>
* ---
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/rculist.h>
#include <linux/notifier.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/if_link.h>
#include <linux/if_macvlan.h>
#include <net/rtnetlink.h>
#include <net/xfrm.h>
#define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE)
struct macvlan_port {
struct net_device *dev;
struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
struct list_head vlans;
struct rcu_head rcu;
bool passthru;
int count;
};
static void macvlan_port_destroy(struct net_device *dev);
#define macvlan_port_get_rcu(dev) \
((struct macvlan_port *) rcu_dereference(dev->rx_handler_data))
#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data)
#define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT)
static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
const unsigned char *addr)
{
struct macvlan_dev *vlan;
struct hlist_node *n;
hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[addr[5]], hlist) {
if (!compare_ether_addr_64bits(vlan->dev->dev_addr, addr))
return vlan;
}
return NULL;
}
static void macvlan_hash_add(struct macvlan_dev *vlan)
{
struct macvlan_port *port = vlan->port;
const unsigned char *addr = vlan->dev->dev_addr;
hlist_add_head_rcu(&vlan->hlist, &port->vlan_hash[addr[5]]);
}
static void macvlan_hash_del(struct macvlan_dev *vlan, bool sync)
{
hlist_del_rcu(&vlan->hlist);
if (sync)
synchronize_rcu();
}
static void macvlan_hash_change_addr(struct macvlan_dev *vlan,
const unsigned char *addr)
{
macvlan_hash_del(vlan, true);
/* Now that we are unhashed it is safe to change the device
* address without confusing packet delivery.
*/
memcpy(vlan->dev->dev_addr, addr, ETH_ALEN);
macvlan_hash_add(vlan);
}
static int macvlan_addr_busy(const struct macvlan_port *port,
const unsigned char *addr)
{
/* Test to see if the specified multicast address is
* currently in use by the underlying device or
* another macvlan.
*/
if (!compare_ether_addr_64bits(port->dev->dev_addr, addr))
return 1;
if (macvlan_hash_lookup(port, addr))
return 1;
return 0;
}
static int macvlan_broadcast_one(struct sk_buff *skb,
const struct macvlan_dev *vlan,
const struct ethhdr *eth, bool local)
{
struct net_device *dev = vlan->dev;
if (!skb)
return NET_RX_DROP;
if (local)
return vlan->forward(dev, skb);
skb->dev = dev;
if (!compare_ether_addr_64bits(eth->h_dest,
dev->broadcast))
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
return vlan->receive(skb);
}
static void macvlan_broadcast(struct sk_buff *skb,
const struct macvlan_port *port,
struct net_device *src,
enum macvlan_mode mode)
{
const struct ethhdr *eth = eth_hdr(skb);
const struct macvlan_dev *vlan;
struct hlist_node *n;
struct sk_buff *nskb;
unsigned int i;
int err;
if (skb->protocol == htons(ETH_P_PAUSE))
return;
for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) {
if (vlan->dev == src || !(vlan->mode & mode))
continue;
nskb = skb_clone(skb, GFP_ATOMIC);
err = macvlan_broadcast_one(nskb, vlan, eth,
mode == MACVLAN_MODE_BRIDGE);
macvlan_count_rx(vlan, skb->len + ETH_HLEN,
err == NET_RX_SUCCESS, 1);
}
}
}
/* called under rcu_read_lock() from netif_receive_skb */
static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
{
struct macvlan_port *port;
struct sk_buff *skb = *pskb;
const struct ethhdr *eth = eth_hdr(skb);
const struct macvlan_dev *vlan;
const struct macvlan_dev *src;
struct net_device *dev;
unsigned int len = 0;
int ret = NET_RX_DROP;
port = macvlan_port_get_rcu(skb->dev);
if (is_multicast_ether_addr(eth->h_dest)) {
skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
if (!skb)
return RX_HANDLER_CONSUMED;
eth = eth_hdr(skb);
src = macvlan_hash_lookup(port, eth->h_source);
if (!src)
/* frame comes from an external address */
macvlan_broadcast(skb, port, NULL,
MACVLAN_MODE_PRIVATE |
MACVLAN_MODE_VEPA |
MACVLAN_MODE_PASSTHRU|
MACVLAN_MODE_BRIDGE);
else if (src->mode == MACVLAN_MODE_VEPA)
/* flood to everyone except source */
macvlan_broadcast(skb, port, src->dev,
MACVLAN_MODE_VEPA |
MACVLAN_MODE_BRIDGE);
else if (src->mode == MACVLAN_MODE_BRIDGE)
/*
* flood only to VEPA ports, bridge ports
* already saw the frame on the way out.
*/
macvlan_broadcast(skb, port, src->dev,
MACVLAN_MODE_VEPA);
else {
/* forward to original port. */
vlan = src;
ret = macvlan_broadcast_one(skb, vlan, eth, 0);
goto out;
}
return RX_HANDLER_PASS;
}
if (port->passthru)
vlan = list_first_entry(&port->vlans, struct macvlan_dev, list);
else
vlan = macvlan_hash_lookup(port, eth->h_dest);
if (vlan == NULL)
return RX_HANDLER_PASS;
dev = vlan->dev;
if (unlikely(!(dev->flags & IFF_UP))) {
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
len = skb->len + ETH_HLEN;
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
goto out;
skb->dev = dev;
skb->pkt_type = PACKET_HOST;
ret = vlan->receive(skb);
out:
macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0);
return RX_HANDLER_CONSUMED;
}
static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
{
const struct macvlan_dev *vlan = netdev_priv(dev);
const struct macvlan_port *port = vlan->port;
const struct macvlan_dev *dest;
__u8 ip_summed = skb->ip_summed;
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
const struct ethhdr *eth = (void *)skb->data;
skb->ip_summed = CHECKSUM_UNNECESSARY;
/* send to other bridge ports directly */
if (is_multicast_ether_addr(eth->h_dest)) {
macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE);
goto xmit_world;
}
dest = macvlan_hash_lookup(port, eth->h_dest);
if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
/* send to lowerdev first for its network taps */
dev_forward_skb(vlan->lowerdev, skb);
return NET_XMIT_SUCCESS;
}
}
xmit_world:
skb->ip_summed = ip_summed;
skb->dev = vlan->lowerdev;
return dev_queue_xmit(skb);
}
netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
unsigned int len = skb->len;
int ret;
const struct macvlan_dev *vlan = netdev_priv(dev);
ret = macvlan_queue_xmit(skb, dev);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
struct macvlan_pcpu_stats *pcpu_stats;
pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
pcpu_stats->tx_packets++;
pcpu_stats->tx_bytes += len;
u64_stats_update_end(&pcpu_stats->syncp);
} else {
this_cpu_inc(vlan->pcpu_stats->tx_dropped);
}
return ret;
}
EXPORT_SYMBOL_GPL(macvlan_start_xmit);
static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned len)
{
const struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
return dev_hard_header(skb, lowerdev, type, daddr,
saddr ? : dev->dev_addr, len);
}
static const struct header_ops macvlan_hard_header_ops = {
.create = macvlan_hard_header,
.rebuild = eth_rebuild_header,
.parse = eth_header_parse,
.cache = eth_header_cache,
.cache_update = eth_header_cache_update,
};
static int macvlan_open(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
int err;
if (vlan->port->passthru) {
dev_set_promiscuity(lowerdev, 1);
goto hash_add;
}
err = -EBUSY;
if (macvlan_addr_busy(vlan->port, dev->dev_addr))
goto out;
err = dev_uc_add(lowerdev, dev->dev_addr);
if (err < 0)
goto out;
if (dev->flags & IFF_ALLMULTI) {
err = dev_set_allmulti(lowerdev, 1);
if (err < 0)
goto del_unicast;
}
hash_add:
macvlan_hash_add(vlan);
return 0;
del_unicast:
dev_uc_del(lowerdev, dev->dev_addr);
out:
return err;
}
static int macvlan_stop(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
if (vlan->port->passthru) {
dev_set_promiscuity(lowerdev, -1);
goto hash_del;
}
dev_mc_unsync(lowerdev, dev);
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, -1);
dev_uc_del(lowerdev, dev->dev_addr);
hash_del:
macvlan_hash_del(vlan, !dev->dismantle);
return 0;
}
static int macvlan_set_mac_address(struct net_device *dev, void *p)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
struct sockaddr *addr = p;
int err;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
if (!(dev->flags & IFF_UP)) {
/* Just copy in the new address */
dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
} else {
/* Rehash and update the device filters */
if (macvlan_addr_busy(vlan->port, addr->sa_data))
return -EBUSY;
err = dev_uc_add(lowerdev, addr->sa_data);
if (err)
return err;
dev_uc_del(lowerdev, dev->dev_addr);
macvlan_hash_change_addr(vlan, addr->sa_data);
}
return 0;
}
static void macvlan_change_rx_flags(struct net_device *dev, int change)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
if (change & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
}
static void macvlan_set_multicast_list(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
dev_mc_sync(vlan->lowerdev, dev);
}
static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
{
struct macvlan_dev *vlan = netdev_priv(dev);
if (new_mtu < 68 || vlan->lowerdev->mtu < new_mtu)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
/*
* macvlan network devices have devices nesting below it and are a special
* "super class" of normal network devices; split their locks off into a
* separate class since they always nest.
*/
static struct lock_class_key macvlan_netdev_xmit_lock_key;
static struct lock_class_key macvlan_netdev_addr_lock_key;
#define MACVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
NETIF_F_HW_VLAN_FILTER)
#define MACVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
static void macvlan_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
void *_unused)
{
lockdep_set_class(&txq->_xmit_lock,
&macvlan_netdev_xmit_lock_key);
}
static void macvlan_set_lockdep_class(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock,
&macvlan_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
}
static int macvlan_init(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
const struct net_device *lowerdev = vlan->lowerdev;
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
(lowerdev->state & MACVLAN_STATE_MASK);
dev->features = lowerdev->features & MACVLAN_FEATURES;
dev->features |= NETIF_F_LLTX;
dev->gso_max_size = lowerdev->gso_max_size;
dev->iflink = lowerdev->ifindex;
dev->hard_header_len = lowerdev->hard_header_len;
macvlan_set_lockdep_class(dev);
vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
if (!vlan->pcpu_stats)
return -ENOMEM;
return 0;
}
static void macvlan_uninit(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvlan_port *port = vlan->port;
free_percpu(vlan->pcpu_stats);
port->count -= 1;
if (!port->count)
macvlan_port_destroy(port->dev);
}
static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct macvlan_dev *vlan = netdev_priv(dev);
if (vlan->pcpu_stats) {
struct macvlan_pcpu_stats *p;
u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
u32 rx_errors = 0, tx_dropped = 0;
unsigned int start;
int i;
for_each_possible_cpu(i) {
p = per_cpu_ptr(vlan->pcpu_stats, i);
do {
start = u64_stats_fetch_begin_bh(&p->syncp);
rx_packets = p->rx_packets;
rx_bytes = p->rx_bytes;
rx_multicast = p->rx_multicast;
tx_packets = p->tx_packets;
tx_bytes = p->tx_bytes;
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
stats->multicast += rx_multicast;
stats->tx_packets += tx_packets;
stats->tx_bytes += tx_bytes;
/* rx_errors & tx_dropped are u32, updated
* without syncp protection.
*/
rx_errors += p->rx_errors;
tx_dropped += p->tx_dropped;
}
stats->rx_errors = rx_errors;
stats->rx_dropped = rx_errors;
stats->tx_dropped = tx_dropped;
}
return stats;
}
static int macvlan_vlan_rx_add_vid(struct net_device *dev,
unsigned short vid)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
return vlan_vid_add(lowerdev, vid);
}
static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
unsigned short vid)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
vlan_vid_del(lowerdev, vid);
return 0;
}
static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
snprintf(drvinfo->driver, 32, "macvlan");
snprintf(drvinfo->version, 32, "0.1");
}
static int macvlan_ethtool_get_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
const struct macvlan_dev *vlan = netdev_priv(dev);
return __ethtool_get_settings(vlan->lowerdev, cmd);
}
static const struct ethtool_ops macvlan_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_settings = macvlan_ethtool_get_settings,
.get_drvinfo = macvlan_ethtool_get_drvinfo,
};
static const struct net_device_ops macvlan_netdev_ops = {
.ndo_init = macvlan_init,
.ndo_uninit = macvlan_uninit,
.ndo_open = macvlan_open,
.ndo_stop = macvlan_stop,
.ndo_start_xmit = macvlan_start_xmit,
.ndo_change_mtu = macvlan_change_mtu,
.ndo_change_rx_flags = macvlan_change_rx_flags,
.ndo_set_mac_address = macvlan_set_mac_address,
.ndo_set_rx_mode = macvlan_set_multicast_list,
.ndo_get_stats64 = macvlan_dev_get_stats64,
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid,
};
void macvlan_common_setup(struct net_device *dev)
{
ether_setup(dev);
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
dev->netdev_ops = &macvlan_netdev_ops;
dev->destructor = free_netdev;
dev->header_ops = &macvlan_hard_header_ops,
dev->ethtool_ops = &macvlan_ethtool_ops;
}
EXPORT_SYMBOL_GPL(macvlan_common_setup);
static void macvlan_setup(struct net_device *dev)
{
macvlan_common_setup(dev);
dev->tx_queue_len = 0;
}
static int macvlan_port_create(struct net_device *dev)
{
struct macvlan_port *port;
unsigned int i;
int err;
if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
return -EINVAL;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (port == NULL)
return -ENOMEM;
port->passthru = false;
port->dev = dev;
INIT_LIST_HEAD(&port->vlans);
for (i = 0; i < MACVLAN_HASH_SIZE; i++)
INIT_HLIST_HEAD(&port->vlan_hash[i]);
err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
if (err)
kfree(port);
else
dev->priv_flags |= IFF_MACVLAN_PORT;
return err;
}
static void macvlan_port_destroy(struct net_device *dev)
{
struct macvlan_port *port = macvlan_port_get(dev);
dev->priv_flags &= ~IFF_MACVLAN_PORT;
netdev_rx_handler_unregister(dev);
kfree_rcu(port, rcu);
}
static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
return -EINVAL;
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
return -EADDRNOTAVAIL;
}
if (data && data[IFLA_MACVLAN_MODE]) {
switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
case MACVLAN_MODE_PRIVATE:
case MACVLAN_MODE_VEPA:
case MACVLAN_MODE_BRIDGE:
case MACVLAN_MODE_PASSTHRU:
break;
default:
return -EINVAL;
}
}
return 0;
}
int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
int (*receive)(struct sk_buff *skb),
int (*forward)(struct net_device *dev,
struct sk_buff *skb))
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvlan_port *port;
struct net_device *lowerdev;
int err;
if (!tb[IFLA_LINK])
return -EINVAL;
lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
if (lowerdev == NULL)
return -ENODEV;
/* When creating macvlans on top of other macvlans - use
* the real device as the lowerdev.
*/
if (lowerdev->rtnl_link_ops == dev->rtnl_link_ops) {
struct macvlan_dev *lowervlan = netdev_priv(lowerdev);
lowerdev = lowervlan->lowerdev;
}
if (!tb[IFLA_MTU])
dev->mtu = lowerdev->mtu;
else if (dev->mtu > lowerdev->mtu)
return -EINVAL;
if (!tb[IFLA_ADDRESS])
eth_hw_addr_random(dev);
if (!macvlan_port_exists(lowerdev)) {
err = macvlan_port_create(lowerdev);
if (err < 0)
return err;
}
port = macvlan_port_get(lowerdev);
/* Only 1 macvlan device can be created in passthru mode */
if (port->passthru)
return -EINVAL;
vlan->lowerdev = lowerdev;
vlan->dev = dev;
vlan->port = port;
vlan->receive = receive;
vlan->forward = forward;
vlan->mode = MACVLAN_MODE_VEPA;
if (data && data[IFLA_MACVLAN_MODE])
vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
if (port->count)
return -EINVAL;
port->passthru = true;
memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
}
port->count += 1;
err = register_netdevice(dev);
if (err < 0)
goto destroy_port;
list_add_tail(&vlan->list, &port->vlans);
netif_stacked_transfer_operstate(lowerdev, dev);
return 0;
destroy_port:
port->count -= 1;
if (!port->count)
macvlan_port_destroy(lowerdev);
return err;
}
EXPORT_SYMBOL_GPL(macvlan_common_newlink);
static int macvlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
return macvlan_common_newlink(src_net, dev, tb, data,
netif_rx,
dev_forward_skb);
}
void macvlan_dellink(struct net_device *dev, struct list_head *head)
{
struct macvlan_dev *vlan = netdev_priv(dev);
list_del(&vlan->list);
unregister_netdevice_queue(dev, head);
}
EXPORT_SYMBOL_GPL(macvlan_dellink);
static int macvlan_changelink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct macvlan_dev *vlan = netdev_priv(dev);
if (data && data[IFLA_MACVLAN_MODE])
vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
return 0;
}
static size_t macvlan_get_size(const struct net_device *dev)
{
return nla_total_size(4);
}
static int macvlan_fill_info(struct sk_buff *skb,
const struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
NLA_PUT_U32(skb, IFLA_MACVLAN_MODE, vlan->mode);
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
[IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
};
int macvlan_link_register(struct rtnl_link_ops *ops)
{
/* common fields */
ops->priv_size = sizeof(struct macvlan_dev);
ops->validate = macvlan_validate;
ops->maxtype = IFLA_MACVLAN_MAX;
ops->policy = macvlan_policy;
ops->changelink = macvlan_changelink;
ops->get_size = macvlan_get_size;
ops->fill_info = macvlan_fill_info;
return rtnl_link_register(ops);
};
EXPORT_SYMBOL_GPL(macvlan_link_register);
static struct rtnl_link_ops macvlan_link_ops = {
.kind = "macvlan",
.setup = macvlan_setup,
.newlink = macvlan_newlink,
.dellink = macvlan_dellink,
};
static int macvlan_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = ptr;
struct macvlan_dev *vlan, *next;
struct macvlan_port *port;
LIST_HEAD(list_kill);
if (!macvlan_port_exists(dev))
return NOTIFY_DONE;
port = macvlan_port_get(dev);
switch (event) {
case NETDEV_CHANGE:
list_for_each_entry(vlan, &port->vlans, list)
netif_stacked_transfer_operstate(vlan->lowerdev,
vlan->dev);
break;
case NETDEV_FEAT_CHANGE:
list_for_each_entry(vlan, &port->vlans, list) {
vlan->dev->features = dev->features & MACVLAN_FEATURES;
vlan->dev->gso_max_size = dev->gso_max_size;
netdev_features_change(vlan->dev);
}
break;
case NETDEV_UNREGISTER:
/* twiddle thumbs on netns device moves */
if (dev->reg_state != NETREG_UNREGISTERING)
break;
list_for_each_entry_safe(vlan, next, &port->vlans, list)
vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
unregister_netdevice_many(&list_kill);
list_del(&list_kill);
break;
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
return NOTIFY_BAD;
}
return NOTIFY_DONE;
}
static struct notifier_block macvlan_notifier_block __read_mostly = {
.notifier_call = macvlan_device_event,
};
static int __init macvlan_init_module(void)
{
int err;
register_netdevice_notifier(&macvlan_notifier_block);
err = macvlan_link_register(&macvlan_link_ops);
if (err < 0)
goto err1;
return 0;
err1:
unregister_netdevice_notifier(&macvlan_notifier_block);
return err;
}
static void __exit macvlan_cleanup_module(void)
{
rtnl_link_unregister(&macvlan_link_ops);
unregister_netdevice_notifier(&macvlan_notifier_block);
}
module_init(macvlan_init_module);
module_exit(macvlan_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_DESCRIPTION("Driver for MAC address based VLANs");
MODULE_ALIAS_RTNL_LINK("macvlan");
| gpl-2.0 |
SOKP/kernel_oneplus_msm8974 | drivers/mmc/host/davinci_mmc.c | 4797 | 42925 | /*
* davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver
*
* Copyright (C) 2006 Texas Instruments.
* Original author: Purushotam Kumar
* Copyright (C) 2009 David Brownell
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/cpufreq.h>
#include <linux/mmc/host.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/mmc/mmc.h>
#include <mach/mmc.h>
#include <mach/edma.h>
/*
* Register Definitions
*/
#define DAVINCI_MMCCTL 0x00 /* Control Register */
#define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */
#define DAVINCI_MMCST0 0x08 /* Status Register 0 */
#define DAVINCI_MMCST1 0x0C /* Status Register 1 */
#define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */
#define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */
#define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */
#define DAVINCI_MMCBLEN 0x1C /* Block Length Register */
#define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */
#define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */
#define DAVINCI_MMCDRR 0x28 /* Data Receive Register */
#define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */
#define DAVINCI_MMCCMD 0x30 /* Command Register */
#define DAVINCI_MMCARGHL 0x34 /* Argument Register */
#define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */
#define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */
#define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */
#define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */
#define DAVINCI_MMCDRSP 0x48 /* Data Response Register */
#define DAVINCI_MMCETOK 0x4C
#define DAVINCI_MMCCIDX 0x50 /* Command Index Register */
#define DAVINCI_MMCCKC 0x54
#define DAVINCI_MMCTORC 0x58
#define DAVINCI_MMCTODC 0x5C
#define DAVINCI_MMCBLNC 0x60
#define DAVINCI_SDIOCTL 0x64
#define DAVINCI_SDIOST0 0x68
#define DAVINCI_SDIOIEN 0x6C
#define DAVINCI_SDIOIST 0x70
#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */
/* DAVINCI_MMCCTL definitions */
#define MMCCTL_DATRST (1 << 0)
#define MMCCTL_CMDRST (1 << 1)
#define MMCCTL_WIDTH_8_BIT (1 << 8)
#define MMCCTL_WIDTH_4_BIT (1 << 2)
#define MMCCTL_DATEG_DISABLED (0 << 6)
#define MMCCTL_DATEG_RISING (1 << 6)
#define MMCCTL_DATEG_FALLING (2 << 6)
#define MMCCTL_DATEG_BOTH (3 << 6)
#define MMCCTL_PERMDR_LE (0 << 9)
#define MMCCTL_PERMDR_BE (1 << 9)
#define MMCCTL_PERMDX_LE (0 << 10)
#define MMCCTL_PERMDX_BE (1 << 10)
/* DAVINCI_MMCCLK definitions */
#define MMCCLK_CLKEN (1 << 8)
#define MMCCLK_CLKRT_MASK (0xFF << 0)
/* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */
#define MMCST0_DATDNE BIT(0) /* data done */
#define MMCST0_BSYDNE BIT(1) /* busy done */
#define MMCST0_RSPDNE BIT(2) /* command done */
#define MMCST0_TOUTRD BIT(3) /* data read timeout */
#define MMCST0_TOUTRS BIT(4) /* command response timeout */
#define MMCST0_CRCWR BIT(5) /* data write CRC error */
#define MMCST0_CRCRD BIT(6) /* data read CRC error */
#define MMCST0_CRCRS BIT(7) /* command response CRC error */
#define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */
#define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/
#define MMCST0_DATED BIT(11) /* DAT3 edge detect */
#define MMCST0_TRNDNE BIT(12) /* transfer done */
/* DAVINCI_MMCST1 definitions */
#define MMCST1_BUSY (1 << 0)
/* DAVINCI_MMCCMD definitions */
#define MMCCMD_CMD_MASK (0x3F << 0)
#define MMCCMD_PPLEN (1 << 7)
#define MMCCMD_BSYEXP (1 << 8)
#define MMCCMD_RSPFMT_MASK (3 << 9)
#define MMCCMD_RSPFMT_NONE (0 << 9)
#define MMCCMD_RSPFMT_R1456 (1 << 9)
#define MMCCMD_RSPFMT_R2 (2 << 9)
#define MMCCMD_RSPFMT_R3 (3 << 9)
#define MMCCMD_DTRW (1 << 11)
#define MMCCMD_STRMTP (1 << 12)
#define MMCCMD_WDATX (1 << 13)
#define MMCCMD_INITCK (1 << 14)
#define MMCCMD_DCLR (1 << 15)
#define MMCCMD_DMATRIG (1 << 16)
/* DAVINCI_MMCFIFOCTL definitions */
#define MMCFIFOCTL_FIFORST (1 << 0)
#define MMCFIFOCTL_FIFODIR_WR (1 << 1)
#define MMCFIFOCTL_FIFODIR_RD (0 << 1)
#define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */
#define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */
#define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */
#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */
#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */
/* DAVINCI_SDIOST0 definitions */
#define SDIOST0_DAT1_HI BIT(0)
/* DAVINCI_SDIOIEN definitions */
#define SDIOIEN_IOINTEN BIT(0)
/* DAVINCI_SDIOIST definitions */
#define SDIOIST_IOINT BIT(0)
/* MMCSD Init clock in Hz in opendrain mode */
#define MMCSD_INIT_CLOCK 200000
/*
* One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
* and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
* for drivers with max_segs == 1, making the segments bigger (64KB)
* than the page or two that's otherwise typical. nr_sg (passed from
* platform data) == 16 gives at least the same throughput boost, using
* EDMA transfer linkage instead of spending CPU time copying pages.
*/
#define MAX_CCNT ((1 << 16) - 1)
#define MAX_NR_SG 16
static unsigned rw_threshold = 32;
module_param(rw_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(rw_threshold,
"Read/Write threshold. Default = 32");
static unsigned poll_threshold = 128;
module_param(poll_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(poll_threshold,
"Polling transaction size threshold. Default = 128");
static unsigned poll_loopcount = 32;
module_param(poll_loopcount, uint, S_IRUGO);
MODULE_PARM_DESC(poll_loopcount,
"Maximum polling loop count. Default = 32");
static unsigned __initdata use_dma = 1;
module_param(use_dma, uint, 0);
MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1");
struct mmc_davinci_host {
struct mmc_command *cmd;
struct mmc_data *data;
struct mmc_host *mmc;
struct clk *clk;
unsigned int mmc_input_clk;
void __iomem *base;
struct resource *mem_res;
int mmc_irq, sdio_irq;
unsigned char bus_mode;
#define DAVINCI_MMC_DATADIR_NONE 0
#define DAVINCI_MMC_DATADIR_READ 1
#define DAVINCI_MMC_DATADIR_WRITE 2
unsigned char data_dir;
unsigned char suspended;
/* buffer is used during PIO of one scatterlist segment, and
* is updated along with buffer_bytes_left. bytes_left applies
* to all N blocks of the PIO transfer.
*/
u8 *buffer;
u32 buffer_bytes_left;
u32 bytes_left;
u32 rxdma, txdma;
bool use_dma;
bool do_dma;
bool sdio_int;
bool active_request;
/* Scatterlist DMA uses one or more parameter RAM entries:
* the main one (associated with rxdma or txdma) plus zero or
* more links. The entries for a given transfer differ only
* by memory buffer (address, length) and link field.
*/
struct edmacc_param tx_template;
struct edmacc_param rx_template;
unsigned n_link;
u32 links[MAX_NR_SG - 1];
/* For PIO we walk scatterlists one segment at a time. */
unsigned int sg_len;
struct scatterlist *sg;
/* Version of the MMC/SD controller */
u8 version;
/* for ns in one cycle calculation */
unsigned ns_in_one_cycle;
/* Number of sg segments */
u8 nr_sg;
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
#endif
};
static irqreturn_t mmc_davinci_irq(int irq, void *dev_id);
/* PIO only */
static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
{
host->buffer_bytes_left = sg_dma_len(host->sg);
host->buffer = sg_virt(host->sg);
if (host->buffer_bytes_left > host->bytes_left)
host->buffer_bytes_left = host->bytes_left;
}
static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
unsigned int n)
{
u8 *p;
unsigned int i;
if (host->buffer_bytes_left == 0) {
host->sg = sg_next(host->data->sg);
mmc_davinci_sg_to_buf(host);
}
p = host->buffer;
if (n > host->buffer_bytes_left)
n = host->buffer_bytes_left;
host->buffer_bytes_left -= n;
host->bytes_left -= n;
/* NOTE: we never transfer more than rw_threshold bytes
* to/from the fifo here; there's no I/O overlap.
* This also assumes that access width( i.e. ACCWD) is 4 bytes
*/
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
for (i = 0; i < (n >> 2); i++) {
writel(*((u32 *)p), host->base + DAVINCI_MMCDXR);
p = p + 4;
}
if (n & 3) {
iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3));
p = p + (n & 3);
}
} else {
for (i = 0; i < (n >> 2); i++) {
*((u32 *)p) = readl(host->base + DAVINCI_MMCDRR);
p = p + 4;
}
if (n & 3) {
ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3));
p = p + (n & 3);
}
}
host->buffer = p;
}
static void mmc_davinci_start_command(struct mmc_davinci_host *host,
struct mmc_command *cmd)
{
u32 cmd_reg = 0;
u32 im_val;
dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n",
cmd->opcode, cmd->arg,
({ char *s;
switch (mmc_resp_type(cmd)) {
case MMC_RSP_R1:
s = ", R1/R5/R6/R7 response";
break;
case MMC_RSP_R1B:
s = ", R1b response";
break;
case MMC_RSP_R2:
s = ", R2 response";
break;
case MMC_RSP_R3:
s = ", R3/R4 response";
break;
default:
s = ", (R? response)";
break;
}; s; }));
host->cmd = cmd;
switch (mmc_resp_type(cmd)) {
case MMC_RSP_R1B:
/* There's some spec confusion about when R1B is
* allowed, but if the card doesn't issue a BUSY
* then it's harmless for us to allow it.
*/
cmd_reg |= MMCCMD_BSYEXP;
/* FALLTHROUGH */
case MMC_RSP_R1: /* 48 bits, CRC */
cmd_reg |= MMCCMD_RSPFMT_R1456;
break;
case MMC_RSP_R2: /* 136 bits, CRC */
cmd_reg |= MMCCMD_RSPFMT_R2;
break;
case MMC_RSP_R3: /* 48 bits, no CRC */
cmd_reg |= MMCCMD_RSPFMT_R3;
break;
default:
cmd_reg |= MMCCMD_RSPFMT_NONE;
dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n",
mmc_resp_type(cmd));
break;
}
/* Set command index */
cmd_reg |= cmd->opcode;
/* Enable EDMA transfer triggers */
if (host->do_dma)
cmd_reg |= MMCCMD_DMATRIG;
if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL &&
host->data_dir == DAVINCI_MMC_DATADIR_READ)
cmd_reg |= MMCCMD_DMATRIG;
/* Setting whether command involves data transfer or not */
if (cmd->data)
cmd_reg |= MMCCMD_WDATX;
/* Setting whether stream or block transfer */
if (cmd->flags & MMC_DATA_STREAM)
cmd_reg |= MMCCMD_STRMTP;
/* Setting whether data read or write */
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
cmd_reg |= MMCCMD_DTRW;
if (host->bus_mode == MMC_BUSMODE_PUSHPULL)
cmd_reg |= MMCCMD_PPLEN;
/* set Command timeout */
writel(0x1FFF, host->base + DAVINCI_MMCTOR);
/* Enable interrupt (calculate here, defer until FIFO is stuffed). */
im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS;
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
im_val |= MMCST0_DATDNE | MMCST0_CRCWR;
if (!host->do_dma)
im_val |= MMCST0_DXRDY;
} else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) {
im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD;
if (!host->do_dma)
im_val |= MMCST0_DRRDY;
}
/*
* Before non-DMA WRITE commands the controller needs priming:
* FIFO should be populated with 32 bytes i.e. whatever is the FIFO size
*/
if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE))
davinci_fifo_data_trans(host, rw_threshold);
writel(cmd->arg, host->base + DAVINCI_MMCARGHL);
writel(cmd_reg, host->base + DAVINCI_MMCCMD);
host->active_request = true;
if (!host->do_dma && host->bytes_left <= poll_threshold) {
u32 count = poll_loopcount;
while (host->active_request && count--) {
mmc_davinci_irq(0, host);
cpu_relax();
}
}
if (host->active_request)
writel(im_val, host->base + DAVINCI_MMCIM);
}
/*----------------------------------------------------------------------*/
/* DMA infrastructure */
static void davinci_abort_dma(struct mmc_davinci_host *host)
{
int sync_dev;
if (host->data_dir == DAVINCI_MMC_DATADIR_READ)
sync_dev = host->rxdma;
else
sync_dev = host->txdma;
edma_stop(sync_dev);
edma_clean_channel(sync_dev);
}
static void
mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data);
static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data)
{
if (DMA_COMPLETE != ch_status) {
struct mmc_davinci_host *host = data;
/* Currently means: DMA Event Missed, or "null" transfer
* request was seen. In the future, TC errors (like bad
* addresses) might be presented too.
*/
dev_warn(mmc_dev(host->mmc), "DMA %s error\n",
(host->data->flags & MMC_DATA_WRITE)
? "write" : "read");
host->data->error = -EIO;
mmc_davinci_xfer_done(host, host->data);
}
}
/* Set up tx or rx template, to be modified and updated later */
static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host,
bool tx, struct edmacc_param *template)
{
unsigned sync_dev;
const u16 acnt = 4;
const u16 bcnt = rw_threshold >> 2;
const u16 ccnt = 0;
u32 src_port = 0;
u32 dst_port = 0;
s16 src_bidx, dst_bidx;
s16 src_cidx, dst_cidx;
/*
* A-B Sync transfer: each DMA request is for one "frame" of
* rw_threshold bytes, broken into "acnt"-size chunks repeated
* "bcnt" times. Each segment needs "ccnt" such frames; since
* we tell the block layer our mmc->max_seg_size limit, we can
* trust (later) that it's within bounds.
*
* The FIFOs are read/written in 4-byte chunks (acnt == 4) and
* EDMA will optimize memory operations to use larger bursts.
*/
if (tx) {
sync_dev = host->txdma;
/* src_prt, ccnt, and link to be set up later */
src_bidx = acnt;
src_cidx = acnt * bcnt;
dst_port = host->mem_res->start + DAVINCI_MMCDXR;
dst_bidx = 0;
dst_cidx = 0;
} else {
sync_dev = host->rxdma;
src_port = host->mem_res->start + DAVINCI_MMCDRR;
src_bidx = 0;
src_cidx = 0;
/* dst_prt, ccnt, and link to be set up later */
dst_bidx = acnt;
dst_cidx = acnt * bcnt;
}
/*
* We can't use FIFO mode for the FIFOs because MMC FIFO addresses
* are not 256-bit (32-byte) aligned. So we use INCR, and the W8BIT
* parameter is ignored.
*/
edma_set_src(sync_dev, src_port, INCR, W8BIT);
edma_set_dest(sync_dev, dst_port, INCR, W8BIT);
edma_set_src_index(sync_dev, src_bidx, src_cidx);
edma_set_dest_index(sync_dev, dst_bidx, dst_cidx);
edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC);
edma_read_slot(sync_dev, template);
/* don't bother with irqs or chaining */
template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12;
}
static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
struct mmc_data *data)
{
struct edmacc_param *template;
int channel, slot;
unsigned link;
struct scatterlist *sg;
unsigned sg_len;
unsigned bytes_left = host->bytes_left;
const unsigned shift = ffs(rw_threshold) - 1;
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
template = &host->tx_template;
channel = host->txdma;
} else {
template = &host->rx_template;
channel = host->rxdma;
}
/* We know sg_len and ccnt will never be out of range because
* we told the mmc layer which in turn tells the block layer
* to ensure that it only hands us one scatterlist segment
* per EDMA PARAM entry. Update the PARAM
* entries needed for each segment of this scatterlist.
*/
for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len;
sg_len-- != 0 && bytes_left;
sg = sg_next(sg), slot = host->links[link++]) {
u32 buf = sg_dma_address(sg);
unsigned count = sg_dma_len(sg);
template->link_bcntrld = sg_len
? (EDMA_CHAN_SLOT(host->links[link]) << 5)
: 0xffff;
if (count > bytes_left)
count = bytes_left;
bytes_left -= count;
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
template->src = buf;
else
template->dst = buf;
template->ccnt = count >> shift;
edma_write_slot(slot, template);
}
if (host->version == MMC_CTLR_VERSION_2)
edma_clear_event(channel);
edma_start(channel);
}
static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
struct mmc_data *data)
{
int i;
int mask = rw_threshold - 1;
host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
((data->flags & MMC_DATA_WRITE)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE));
/* no individual DMA segment should need a partial FIFO */
for (i = 0; i < host->sg_len; i++) {
if (sg_dma_len(data->sg + i) & mask) {
dma_unmap_sg(mmc_dev(host->mmc),
data->sg, data->sg_len,
(data->flags & MMC_DATA_WRITE)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
return -1;
}
}
host->do_dma = 1;
mmc_davinci_send_dma_request(host, data);
return 0;
}
static void __init_or_module
davinci_release_dma_channels(struct mmc_davinci_host *host)
{
unsigned i;
if (!host->use_dma)
return;
for (i = 0; i < host->n_link; i++)
edma_free_slot(host->links[i]);
edma_free_channel(host->txdma);
edma_free_channel(host->rxdma);
}
static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
{
u32 link_size;
int r, i;
/* Acquire master DMA write channel */
r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host,
EVENTQ_DEFAULT);
if (r < 0) {
dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
"tx", r);
return r;
}
mmc_davinci_dma_setup(host, true, &host->tx_template);
/* Acquire master DMA read channel */
r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host,
EVENTQ_DEFAULT);
if (r < 0) {
dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n",
"rx", r);
goto free_master_write;
}
mmc_davinci_dma_setup(host, false, &host->rx_template);
/* Allocate parameter RAM slots, which will later be bound to a
* channel as needed to handle a scatterlist.
*/
link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links));
for (i = 0; i < link_size; i++) {
r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
if (r < 0) {
dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
r);
break;
}
host->links[i] = r;
}
host->n_link = i;
return 0;
free_master_write:
edma_free_channel(host->txdma);
return r;
}
/*----------------------------------------------------------------------*/
static void
mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
{
int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0;
int timeout;
struct mmc_data *data = req->data;
if (host->version == MMC_CTLR_VERSION_2)
fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0;
host->data = data;
if (data == NULL) {
host->data_dir = DAVINCI_MMC_DATADIR_NONE;
writel(0, host->base + DAVINCI_MMCBLEN);
writel(0, host->base + DAVINCI_MMCNBLK);
return;
}
dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n",
(data->flags & MMC_DATA_STREAM) ? "stream" : "block",
(data->flags & MMC_DATA_WRITE) ? "write" : "read",
data->blocks, data->blksz);
dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n",
data->timeout_clks, data->timeout_ns);
timeout = data->timeout_clks +
(data->timeout_ns / host->ns_in_one_cycle);
if (timeout > 0xffff)
timeout = 0xffff;
writel(timeout, host->base + DAVINCI_MMCTOD);
writel(data->blocks, host->base + DAVINCI_MMCNBLK);
writel(data->blksz, host->base + DAVINCI_MMCBLEN);
/* Configure the FIFO */
switch (data->flags & MMC_DATA_WRITE) {
case MMC_DATA_WRITE:
host->data_dir = DAVINCI_MMC_DATADIR_WRITE;
writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST,
host->base + DAVINCI_MMCFIFOCTL);
writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR,
host->base + DAVINCI_MMCFIFOCTL);
break;
default:
host->data_dir = DAVINCI_MMC_DATADIR_READ;
writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST,
host->base + DAVINCI_MMCFIFOCTL);
writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD,
host->base + DAVINCI_MMCFIFOCTL);
break;
}
host->buffer = NULL;
host->bytes_left = data->blocks * data->blksz;
/* For now we try to use DMA whenever we won't need partial FIFO
* reads or writes, either for the whole transfer (as tested here)
* or for any individual scatterlist segment (tested when we call
* start_dma_transfer).
*
* While we *could* change that, unusual block sizes are rarely
* used. The occasional fallback to PIO should't hurt.
*/
if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0
&& mmc_davinci_start_dma_transfer(host, data) == 0) {
/* zero this to ensure we take no PIO paths */
host->bytes_left = 0;
} else {
/* Revert to CPU Copy */
host->sg_len = data->sg_len;
host->sg = host->data->sg;
mmc_davinci_sg_to_buf(host);
}
}
static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req)
{
struct mmc_davinci_host *host = mmc_priv(mmc);
unsigned long timeout = jiffies + msecs_to_jiffies(900);
u32 mmcst1 = 0;
/* Card may still be sending BUSY after a previous operation,
* typically some kind of write. If so, we can't proceed yet.
*/
while (time_before(jiffies, timeout)) {
mmcst1 = readl(host->base + DAVINCI_MMCST1);
if (!(mmcst1 & MMCST1_BUSY))
break;
cpu_relax();
}
if (mmcst1 & MMCST1_BUSY) {
dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n");
req->cmd->error = -ETIMEDOUT;
mmc_request_done(mmc, req);
return;
}
host->do_dma = 0;
mmc_davinci_prepare_data(host, req);
mmc_davinci_start_command(host, req->cmd);
}
static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host,
unsigned int mmc_req_freq)
{
unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0;
mmc_pclk = host->mmc_input_clk;
if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq))
mmc_push_pull_divisor = ((unsigned int)mmc_pclk
/ (2 * mmc_req_freq)) - 1;
else
mmc_push_pull_divisor = 0;
mmc_freq = (unsigned int)mmc_pclk
/ (2 * (mmc_push_pull_divisor + 1));
if (mmc_freq > mmc_req_freq)
mmc_push_pull_divisor = mmc_push_pull_divisor + 1;
/* Convert ns to clock cycles */
if (mmc_req_freq <= 400000)
host->ns_in_one_cycle = (1000000) / (((mmc_pclk
/ (2 * (mmc_push_pull_divisor + 1)))/1000));
else
host->ns_in_one_cycle = (1000000) / (((mmc_pclk
/ (2 * (mmc_push_pull_divisor + 1)))/1000000));
return mmc_push_pull_divisor;
}
static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios)
{
unsigned int open_drain_freq = 0, mmc_pclk = 0;
unsigned int mmc_push_pull_freq = 0;
struct mmc_davinci_host *host = mmc_priv(mmc);
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
u32 temp;
/* Ignoring the init clock value passed for fixing the inter
* operability with different cards.
*/
open_drain_freq = ((unsigned int)mmc_pclk
/ (2 * MMCSD_INIT_CLOCK)) - 1;
if (open_drain_freq > 0xFF)
open_drain_freq = 0xFF;
temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
temp |= open_drain_freq;
writel(temp, host->base + DAVINCI_MMCCLK);
/* Convert ns to clock cycles */
host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000);
} else {
u32 temp;
mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock);
if (mmc_push_pull_freq > 0xFF)
mmc_push_pull_freq = 0xFF;
temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN;
writel(temp, host->base + DAVINCI_MMCCLK);
udelay(10);
temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK;
temp |= mmc_push_pull_freq;
writel(temp, host->base + DAVINCI_MMCCLK);
writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
udelay(10);
}
}
static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mmc_davinci_host *host = mmc_priv(mmc);
struct platform_device *pdev = to_platform_device(mmc->parent);
struct davinci_mmc_config *config = pdev->dev.platform_data;
dev_dbg(mmc_dev(host->mmc),
"clock %dHz busmode %d powermode %d Vdd %04x\n",
ios->clock, ios->bus_mode, ios->power_mode,
ios->vdd);
switch (ios->power_mode) {
case MMC_POWER_OFF:
if (config && config->set_power)
config->set_power(pdev->id, false);
break;
case MMC_POWER_UP:
if (config && config->set_power)
config->set_power(pdev->id, true);
break;
}
switch (ios->bus_width) {
case MMC_BUS_WIDTH_8:
dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n");
writel((readl(host->base + DAVINCI_MMCCTL) &
~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT,
host->base + DAVINCI_MMCCTL);
break;
case MMC_BUS_WIDTH_4:
dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n");
if (host->version == MMC_CTLR_VERSION_2)
writel((readl(host->base + DAVINCI_MMCCTL) &
~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT,
host->base + DAVINCI_MMCCTL);
else
writel(readl(host->base + DAVINCI_MMCCTL) |
MMCCTL_WIDTH_4_BIT,
host->base + DAVINCI_MMCCTL);
break;
case MMC_BUS_WIDTH_1:
dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n");
if (host->version == MMC_CTLR_VERSION_2)
writel(readl(host->base + DAVINCI_MMCCTL) &
~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT),
host->base + DAVINCI_MMCCTL);
else
writel(readl(host->base + DAVINCI_MMCCTL) &
~MMCCTL_WIDTH_4_BIT,
host->base + DAVINCI_MMCCTL);
break;
}
calculate_clk_divider(mmc, ios);
host->bus_mode = ios->bus_mode;
if (ios->power_mode == MMC_POWER_UP) {
unsigned long timeout = jiffies + msecs_to_jiffies(50);
bool lose = true;
/* Send clock cycles, poll completion */
writel(0, host->base + DAVINCI_MMCARGHL);
writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD);
while (time_before(jiffies, timeout)) {
u32 tmp = readl(host->base + DAVINCI_MMCST0);
if (tmp & MMCST0_RSPDNE) {
lose = false;
break;
}
cpu_relax();
}
if (lose)
dev_warn(mmc_dev(host->mmc), "powerup timeout\n");
}
/* FIXME on power OFF, reset things ... */
}
static void
mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
{
host->data = NULL;
if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
/*
* SDIO Interrupt Detection work-around as suggested by
* Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata
* 2.1.6): Signal SDIO interrupt only if it is enabled by core
*/
if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) &
SDIOST0_DAT1_HI)) {
writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
mmc_signal_sdio_irq(host->mmc);
}
}
if (host->do_dma) {
davinci_abort_dma(host);
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
(data->flags & MMC_DATA_WRITE)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
host->do_dma = false;
}
host->data_dir = DAVINCI_MMC_DATADIR_NONE;
if (!data->stop || (host->cmd && host->cmd->error)) {
mmc_request_done(host->mmc, data->mrq);
writel(0, host->base + DAVINCI_MMCIM);
host->active_request = false;
} else
mmc_davinci_start_command(host, data->stop);
}
static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
struct mmc_command *cmd)
{
host->cmd = NULL;
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
/* response type 2 */
cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01);
cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23);
cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45);
cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
} else {
/* response types 1, 1b, 3, 4, 5, 6 */
cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67);
}
}
if (host->data == NULL || cmd->error) {
if (cmd->error == -ETIMEDOUT)
cmd->mrq->cmd->retries = 0;
mmc_request_done(host->mmc, cmd->mrq);
writel(0, host->base + DAVINCI_MMCIM);
host->active_request = false;
}
}
static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host,
int val)
{
u32 temp;
temp = readl(host->base + DAVINCI_MMCCTL);
if (val) /* reset */
temp |= MMCCTL_CMDRST | MMCCTL_DATRST;
else /* enable */
temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
writel(temp, host->base + DAVINCI_MMCCTL);
udelay(10);
}
static void
davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
{
mmc_davinci_reset_ctrl(host, 1);
mmc_davinci_reset_ctrl(host, 0);
}
static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
{
struct mmc_davinci_host *host = dev_id;
unsigned int status;
status = readl(host->base + DAVINCI_SDIOIST);
if (status & SDIOIST_IOINT) {
dev_dbg(mmc_dev(host->mmc),
"SDIO interrupt status %x\n", status);
writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
mmc_signal_sdio_irq(host->mmc);
}
return IRQ_HANDLED;
}
static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
{
struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
unsigned int status, qstatus;
int end_command = 0;
int end_transfer = 0;
struct mmc_data *data = host->data;
if (host->cmd == NULL && host->data == NULL) {
status = readl(host->base + DAVINCI_MMCST0);
dev_dbg(mmc_dev(host->mmc),
"Spurious interrupt 0x%04x\n", status);
/* Disable the interrupt from mmcsd */
writel(0, host->base + DAVINCI_MMCIM);
return IRQ_NONE;
}
status = readl(host->base + DAVINCI_MMCST0);
qstatus = status;
/* handle FIFO first when using PIO for data.
* bytes_left will decrease to zero as I/O progress and status will
* read zero over iteration because this controller status
* register(MMCST0) reports any status only once and it is cleared
* by read. So, it is not unbouned loop even in the case of
* non-dma.
*/
if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) {
unsigned long im_val;
/*
* If interrupts fire during the following loop, they will be
* handled by the handler, but the PIC will still buffer these.
* As a result, the handler will be called again to serve these
* needlessly. In order to avoid these spurious interrupts,
* keep interrupts masked during the loop.
*/
im_val = readl(host->base + DAVINCI_MMCIM);
writel(0, host->base + DAVINCI_MMCIM);
do {
davinci_fifo_data_trans(host, rw_threshold);
status = readl(host->base + DAVINCI_MMCST0);
qstatus |= status;
} while (host->bytes_left &&
(status & (MMCST0_DXRDY | MMCST0_DRRDY)));
/*
* If an interrupt is pending, it is assumed it will fire when
* it is unmasked. This assumption is also taken when the MMCIM
* is first set. Otherwise, writing to MMCIM after reading the
* status is race-prone.
*/
writel(im_val, host->base + DAVINCI_MMCIM);
}
if (qstatus & MMCST0_DATDNE) {
/* All blocks sent/received, and CRC checks passed */
if (data != NULL) {
if ((host->do_dma == 0) && (host->bytes_left > 0)) {
/* if datasize < rw_threshold
* no RX ints are generated
*/
davinci_fifo_data_trans(host, host->bytes_left);
}
end_transfer = 1;
data->bytes_xfered = data->blocks * data->blksz;
} else {
dev_err(mmc_dev(host->mmc),
"DATDNE with no host->data\n");
}
}
if (qstatus & MMCST0_TOUTRD) {
/* Read data timeout */
data->error = -ETIMEDOUT;
end_transfer = 1;
dev_dbg(mmc_dev(host->mmc),
"read data timeout, status %x\n",
qstatus);
davinci_abort_data(host, data);
}
if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) {
/* Data CRC error */
data->error = -EILSEQ;
end_transfer = 1;
/* NOTE: this controller uses CRCWR to report both CRC
* errors and timeouts (on writes). MMCDRSP values are
* only weakly documented, but 0x9f was clearly a timeout
* case and the two three-bit patterns in various SD specs
* (101, 010) aren't part of it ...
*/
if (qstatus & MMCST0_CRCWR) {
u32 temp = readb(host->base + DAVINCI_MMCDRSP);
if (temp == 0x9f)
data->error = -ETIMEDOUT;
}
dev_dbg(mmc_dev(host->mmc), "data %s %s error\n",
(qstatus & MMCST0_CRCWR) ? "write" : "read",
(data->error == -ETIMEDOUT) ? "timeout" : "CRC");
davinci_abort_data(host, data);
}
if (qstatus & MMCST0_TOUTRS) {
/* Command timeout */
if (host->cmd) {
dev_dbg(mmc_dev(host->mmc),
"CMD%d timeout, status %x\n",
host->cmd->opcode, qstatus);
host->cmd->error = -ETIMEDOUT;
if (data) {
end_transfer = 1;
davinci_abort_data(host, data);
} else
end_command = 1;
}
}
if (qstatus & MMCST0_CRCRS) {
/* Command CRC error */
dev_dbg(mmc_dev(host->mmc), "Command CRC error\n");
if (host->cmd) {
host->cmd->error = -EILSEQ;
end_command = 1;
}
}
if (qstatus & MMCST0_RSPDNE) {
/* End of command phase */
end_command = (int) host->cmd;
}
if (end_command)
mmc_davinci_cmd_done(host, host->cmd);
if (end_transfer)
mmc_davinci_xfer_done(host, data);
return IRQ_HANDLED;
}
static int mmc_davinci_get_cd(struct mmc_host *mmc)
{
struct platform_device *pdev = to_platform_device(mmc->parent);
struct davinci_mmc_config *config = pdev->dev.platform_data;
if (!config || !config->get_cd)
return -ENOSYS;
return config->get_cd(pdev->id);
}
static int mmc_davinci_get_ro(struct mmc_host *mmc)
{
struct platform_device *pdev = to_platform_device(mmc->parent);
struct davinci_mmc_config *config = pdev->dev.platform_data;
if (!config || !config->get_ro)
return -ENOSYS;
return config->get_ro(pdev->id);
}
static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct mmc_davinci_host *host = mmc_priv(mmc);
if (enable) {
if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) {
writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
mmc_signal_sdio_irq(host->mmc);
} else {
host->sdio_int = true;
writel(readl(host->base + DAVINCI_SDIOIEN) |
SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN);
}
} else {
host->sdio_int = false;
writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN,
host->base + DAVINCI_SDIOIEN);
}
}
static struct mmc_host_ops mmc_davinci_ops = {
.request = mmc_davinci_request,
.set_ios = mmc_davinci_set_ios,
.get_cd = mmc_davinci_get_cd,
.get_ro = mmc_davinci_get_ro,
.enable_sdio_irq = mmc_davinci_enable_sdio_irq,
};
/*----------------------------------------------------------------------*/
#ifdef CONFIG_CPU_FREQ
static int mmc_davinci_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
{
struct mmc_davinci_host *host;
unsigned int mmc_pclk;
struct mmc_host *mmc;
unsigned long flags;
host = container_of(nb, struct mmc_davinci_host, freq_transition);
mmc = host->mmc;
mmc_pclk = clk_get_rate(host->clk);
if (val == CPUFREQ_POSTCHANGE) {
spin_lock_irqsave(&mmc->lock, flags);
host->mmc_input_clk = mmc_pclk;
calculate_clk_divider(mmc, &mmc->ios);
spin_unlock_irqrestore(&mmc->lock, flags);
}
return 0;
}
static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
{
host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition;
return cpufreq_register_notifier(&host->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
}
static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
{
cpufreq_unregister_notifier(&host->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
}
#else
static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host)
{
return 0;
}
static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
{
}
#endif
static void __init init_mmcsd_host(struct mmc_davinci_host *host)
{
mmc_davinci_reset_ctrl(host, 1);
writel(0, host->base + DAVINCI_MMCCLK);
writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
writel(0x1FFF, host->base + DAVINCI_MMCTOR);
writel(0xFFFF, host->base + DAVINCI_MMCTOD);
mmc_davinci_reset_ctrl(host, 0);
}
static int __init davinci_mmcsd_probe(struct platform_device *pdev)
{
struct davinci_mmc_config *pdata = pdev->dev.platform_data;
struct mmc_davinci_host *host = NULL;
struct mmc_host *mmc = NULL;
struct resource *r, *mem = NULL;
int ret = 0, irq = 0;
size_t mem_size;
/* REVISIT: when we're fully converted, fail if pdata is NULL */
ret = -ENODEV;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!r || irq == NO_IRQ)
goto out;
ret = -EBUSY;
mem_size = resource_size(r);
mem = request_mem_region(r->start, mem_size, pdev->name);
if (!mem)
goto out;
ret = -ENOMEM;
mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev);
if (!mmc)
goto out;
host = mmc_priv(mmc);
host->mmc = mmc; /* Important */
r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!r)
goto out;
host->rxdma = r->start;
r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (!r)
goto out;
host->txdma = r->start;
host->mem_res = mem;
host->base = ioremap(mem->start, mem_size);
if (!host->base)
goto out;
ret = -ENXIO;
host->clk = clk_get(&pdev->dev, "MMCSDCLK");
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
goto out;
}
clk_enable(host->clk);
host->mmc_input_clk = clk_get_rate(host->clk);
init_mmcsd_host(host);
if (pdata->nr_sg)
host->nr_sg = pdata->nr_sg - 1;
if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
host->nr_sg = MAX_NR_SG;
host->use_dma = use_dma;
host->mmc_irq = irq;
host->sdio_irq = platform_get_irq(pdev, 1);
if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
host->use_dma = 0;
/* REVISIT: someday, support IRQ-driven card detection. */
mmc->caps |= MMC_CAP_NEEDS_POLL;
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
if (pdata && (pdata->wires == 4 || pdata->wires == 0))
mmc->caps |= MMC_CAP_4_BIT_DATA;
if (pdata && (pdata->wires == 8))
mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA);
host->version = pdata->version;
mmc->ops = &mmc_davinci_ops;
mmc->f_min = 312500;
mmc->f_max = 25000000;
if (pdata && pdata->max_freq)
mmc->f_max = pdata->max_freq;
if (pdata && pdata->caps)
mmc->caps |= pdata->caps;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
/* With no iommu coalescing pages, each phys_seg is a hw_seg.
* Each hw_seg uses one EDMA parameter RAM slot, always one
* channel and then usually some linked slots.
*/
mmc->max_segs = 1 + host->n_link;
/* EDMA limit per hw segment (one or two MBytes) */
mmc->max_seg_size = MAX_CCNT * rw_threshold;
/* MMC/SD controller limits for multiblock requests */
mmc->max_blk_size = 4095; /* BLEN is 12 bits */
mmc->max_blk_count = 65535; /* NBLK is 16 bits */
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs);
dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size);
dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size);
dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size);
platform_set_drvdata(pdev, host);
ret = mmc_davinci_cpufreq_register(host);
if (ret) {
dev_err(&pdev->dev, "failed to register cpufreq\n");
goto cpu_freq_fail;
}
ret = mmc_add_host(mmc);
if (ret < 0)
goto out;
ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host);
if (ret)
goto out;
if (host->sdio_irq >= 0) {
ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0,
mmc_hostname(mmc), host);
if (!ret)
mmc->caps |= MMC_CAP_SDIO_IRQ;
}
rename_region(mem, mmc_hostname(mmc));
dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
host->use_dma ? "DMA" : "PIO",
(mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1);
return 0;
out:
mmc_davinci_cpufreq_deregister(host);
cpu_freq_fail:
if (host) {
davinci_release_dma_channels(host);
if (host->clk) {
clk_disable(host->clk);
clk_put(host->clk);
}
if (host->base)
iounmap(host->base);
}
if (mmc)
mmc_free_host(mmc);
if (mem)
release_resource(mem);
dev_dbg(&pdev->dev, "probe err %d\n", ret);
return ret;
}
static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
{
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
if (host) {
mmc_davinci_cpufreq_deregister(host);
mmc_remove_host(host->mmc);
free_irq(host->mmc_irq, host);
if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
free_irq(host->sdio_irq, host);
davinci_release_dma_channels(host);
clk_disable(host->clk);
clk_put(host->clk);
iounmap(host->base);
release_resource(host->mem_res);
mmc_free_host(host->mmc);
}
return 0;
}
#ifdef CONFIG_PM
static int davinci_mmcsd_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
int ret;
ret = mmc_suspend_host(host->mmc);
if (!ret) {
writel(0, host->base + DAVINCI_MMCIM);
mmc_davinci_reset_ctrl(host, 1);
clk_disable(host->clk);
host->suspended = 1;
} else {
host->suspended = 0;
}
return ret;
}
static int davinci_mmcsd_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
int ret;
if (!host->suspended)
return 0;
clk_enable(host->clk);
mmc_davinci_reset_ctrl(host, 0);
ret = mmc_resume_host(host->mmc);
if (!ret)
host->suspended = 0;
return ret;
}
static const struct dev_pm_ops davinci_mmcsd_pm = {
.suspend = davinci_mmcsd_suspend,
.resume = davinci_mmcsd_resume,
};
#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
#else
#define davinci_mmcsd_pm_ops NULL
#endif
static struct platform_driver davinci_mmcsd_driver = {
.driver = {
.name = "davinci_mmc",
.owner = THIS_MODULE,
.pm = davinci_mmcsd_pm_ops,
},
.remove = __exit_p(davinci_mmcsd_remove),
};
static int __init davinci_mmcsd_init(void)
{
return platform_driver_probe(&davinci_mmcsd_driver,
davinci_mmcsd_probe);
}
module_init(davinci_mmcsd_init);
static void __exit davinci_mmcsd_exit(void)
{
platform_driver_unregister(&davinci_mmcsd_driver);
}
module_exit(davinci_mmcsd_exit);
MODULE_AUTHOR("Texas Instruments India");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
| gpl-2.0 |
h2o64/android_kernel_motorola_msm8226 | arch/arm/mach-omap1/board-h2-mmc.c | 5053 | 1823 | /*
* linux/arch/arm/mach-omap1/board-h2-mmc.c
*
* Copyright (C) 2007 Instituto Nokia de Tecnologia - INdT
* Author: Felipe Balbi <felipe.lima@indt.org.br>
*
* This code is based on linux/arch/arm/mach-omap2/board-n800-mmc.c, which is:
* Copyright (C) 2006 Nokia Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/i2c/tps65010.h>
#include <plat/mmc.h>
#include "board-h2.h"
#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
static int mmc_set_power(struct device *dev, int slot, int power_on,
int vdd)
{
gpio_set_value(H2_TPS_GPIO_MMC_PWR_EN, power_on);
return 0;
}
static int mmc_late_init(struct device *dev)
{
int ret = gpio_request(H2_TPS_GPIO_MMC_PWR_EN, "MMC power");
if (ret < 0)
return ret;
gpio_direction_output(H2_TPS_GPIO_MMC_PWR_EN, 0);
return ret;
}
static void mmc_cleanup(struct device *dev)
{
gpio_free(H2_TPS_GPIO_MMC_PWR_EN);
}
/*
* H2 could use the following functions tested:
* - mmc_get_cover_state that uses OMAP_MPUIO(1)
* - mmc_get_wp that uses OMAP_MPUIO(3)
*/
static struct omap_mmc_platform_data mmc1_data = {
.nr_slots = 1,
.init = mmc_late_init,
.cleanup = mmc_cleanup,
.dma_mask = 0xffffffff,
.slots[0] = {
.set_power = mmc_set_power,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.name = "mmcblk",
},
};
static struct omap_mmc_platform_data *mmc_data[OMAP16XX_NR_MMC];
void __init h2_mmc_init(void)
{
mmc_data[0] = &mmc1_data;
omap1_init_mmc(mmc_data, OMAP16XX_NR_MMC);
}
#else
void __init h2_mmc_init(void)
{
}
#endif
| gpl-2.0 |
omega-roms/G900F_Omega_Kernel_LL_5.0 | fs/nls/nls_cp437.c | 12221 | 17508 | /*
* linux/fs/nls/nls_cp437.c
*
* Charset cp437 translation tables.
* Generated automatically from the Unicode and charset
* tables from the Unicode Organization (www.unicode.org).
* The Unicode to charset table has only exact mappings.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/errno.h>
static const wchar_t charset2uni[256] = {
/* 0x00*/
0x0000, 0x0001, 0x0002, 0x0003,
0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b,
0x000c, 0x000d, 0x000e, 0x000f,
/* 0x10*/
0x0010, 0x0011, 0x0012, 0x0013,
0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b,
0x001c, 0x001d, 0x001e, 0x001f,
/* 0x20*/
0x0020, 0x0021, 0x0022, 0x0023,
0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b,
0x002c, 0x002d, 0x002e, 0x002f,
/* 0x30*/
0x0030, 0x0031, 0x0032, 0x0033,
0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b,
0x003c, 0x003d, 0x003e, 0x003f,
/* 0x40*/
0x0040, 0x0041, 0x0042, 0x0043,
0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b,
0x004c, 0x004d, 0x004e, 0x004f,
/* 0x50*/
0x0050, 0x0051, 0x0052, 0x0053,
0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b,
0x005c, 0x005d, 0x005e, 0x005f,
/* 0x60*/
0x0060, 0x0061, 0x0062, 0x0063,
0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b,
0x006c, 0x006d, 0x006e, 0x006f,
/* 0x70*/
0x0070, 0x0071, 0x0072, 0x0073,
0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b,
0x007c, 0x007d, 0x007e, 0x007f,
/* 0x80*/
0x00c7, 0x00fc, 0x00e9, 0x00e2,
0x00e4, 0x00e0, 0x00e5, 0x00e7,
0x00ea, 0x00eb, 0x00e8, 0x00ef,
0x00ee, 0x00ec, 0x00c4, 0x00c5,
/* 0x90*/
0x00c9, 0x00e6, 0x00c6, 0x00f4,
0x00f6, 0x00f2, 0x00fb, 0x00f9,
0x00ff, 0x00d6, 0x00dc, 0x00a2,
0x00a3, 0x00a5, 0x20a7, 0x0192,
/* 0xa0*/
0x00e1, 0x00ed, 0x00f3, 0x00fa,
0x00f1, 0x00d1, 0x00aa, 0x00ba,
0x00bf, 0x2310, 0x00ac, 0x00bd,
0x00bc, 0x00a1, 0x00ab, 0x00bb,
/* 0xb0*/
0x2591, 0x2592, 0x2593, 0x2502,
0x2524, 0x2561, 0x2562, 0x2556,
0x2555, 0x2563, 0x2551, 0x2557,
0x255d, 0x255c, 0x255b, 0x2510,
/* 0xc0*/
0x2514, 0x2534, 0x252c, 0x251c,
0x2500, 0x253c, 0x255e, 0x255f,
0x255a, 0x2554, 0x2569, 0x2566,
0x2560, 0x2550, 0x256c, 0x2567,
/* 0xd0*/
0x2568, 0x2564, 0x2565, 0x2559,
0x2558, 0x2552, 0x2553, 0x256b,
0x256a, 0x2518, 0x250c, 0x2588,
0x2584, 0x258c, 0x2590, 0x2580,
/* 0xe0*/
0x03b1, 0x00df, 0x0393, 0x03c0,
0x03a3, 0x03c3, 0x00b5, 0x03c4,
0x03a6, 0x0398, 0x03a9, 0x03b4,
0x221e, 0x03c6, 0x03b5, 0x2229,
/* 0xf0*/
0x2261, 0x00b1, 0x2265, 0x2264,
0x2320, 0x2321, 0x00f7, 0x2248,
0x00b0, 0x2219, 0x00b7, 0x221a,
0x207f, 0x00b2, 0x25a0, 0x00a0,
};
static const unsigned char page00[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0xff, 0xad, 0x9b, 0x9c, 0x00, 0x9d, 0x00, 0x00, /* 0xa0-0xa7 */
0x00, 0x00, 0xa6, 0xae, 0xaa, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
0xf8, 0xf1, 0xfd, 0x00, 0x00, 0xe6, 0x00, 0xfa, /* 0xb0-0xb7 */
0x00, 0x00, 0xa7, 0xaf, 0xac, 0xab, 0x00, 0xa8, /* 0xb8-0xbf */
0x00, 0x00, 0x00, 0x00, 0x8e, 0x8f, 0x92, 0x80, /* 0xc0-0xc7 */
0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
0x00, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x99, 0x00, /* 0xd0-0xd7 */
0x00, 0x00, 0x00, 0x00, 0x9a, 0x00, 0x00, 0xe1, /* 0xd8-0xdf */
0x85, 0xa0, 0x83, 0x00, 0x84, 0x86, 0x91, 0x87, /* 0xe0-0xe7 */
0x8a, 0x82, 0x88, 0x89, 0x8d, 0xa1, 0x8c, 0x8b, /* 0xe8-0xef */
0x00, 0xa4, 0x95, 0xa2, 0x93, 0x00, 0x94, 0xf6, /* 0xf0-0xf7 */
0x00, 0x97, 0xa3, 0x96, 0x81, 0x00, 0x00, 0x98, /* 0xf8-0xff */
};
static const unsigned char page01[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
};
static const unsigned char page03[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0xe2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0xe4, 0x00, 0x00, 0xe8, 0x00, /* 0xa0-0xa7 */
0x00, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
0x00, 0xe0, 0x00, 0x00, 0xeb, 0xee, 0x00, 0x00, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
0xe3, 0x00, 0x00, 0xe5, 0xe7, 0x00, 0xed, 0x00, /* 0xc0-0xc7 */
};
static const unsigned char page20[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, /* 0xa0-0xa7 */
};
static const unsigned char page22[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0xec, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0xf0, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */
};
static const unsigned char page23[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0xf4, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
};
static const unsigned char page25[256] = {
0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */
0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */
0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */
0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
};
static const unsigned char *const page_uni2charset[256] = {
page00, page01, NULL, page03, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
page20, NULL, page22, page23, NULL, page25, NULL, NULL,
};
static const unsigned char charset2lower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x84, 0x86, /* 0x88-0x8f */
0x82, 0x91, 0x91, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x94, 0x81, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
0xe0, 0xe1, 0x00, 0xe3, 0xe5, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xed, 0x00, 0x00, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
};
static const unsigned char charset2upper[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x9a, 0x90, 0x00, 0x8e, 0x00, 0x8f, 0x80, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x92, 0x92, 0x00, 0x99, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0x00, 0xa5, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
0x00, 0xe1, 0xe2, 0x00, 0xe4, 0xe4, 0x00, 0x00, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0x00, 0xec, 0xe8, 0x00, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
};
static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
{
const unsigned char *uni2charset;
unsigned char cl = uni & 0x00ff;
unsigned char ch = (uni & 0xff00) >> 8;
if (boundlen <= 0)
return -ENAMETOOLONG;
uni2charset = page_uni2charset[ch];
if (uni2charset && uni2charset[cl])
out[0] = uni2charset[cl];
else
return -EINVAL;
return 1;
}
static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
{
*uni = charset2uni[*rawstring];
if (*uni == 0x0000)
return -EINVAL;
return 1;
}
static struct nls_table table = {
.charset = "cp437",
.uni2char = uni2char,
.char2uni = char2uni,
.charset2lower = charset2lower,
.charset2upper = charset2upper,
.owner = THIS_MODULE,
};
static int __init init_nls_cp437(void)
{
return register_nls(&table);
}
static void __exit exit_nls_cp437(void)
{
unregister_nls(&table);
}
module_init(init_nls_cp437)
module_exit(exit_nls_cp437)
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
DutchDanny/samsung-gt-P7510-ics-3.1.10 | arch/avr32/boards/mimc200/flash.c | 13757 | 3150 | /*
* MIMC200 board-specific flash initialization
*
* Copyright (C) 2008 Mercury IMC Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <mach/smc.h>
static struct smc_timing flash_timing __initdata = {
.ncs_read_setup = 0,
.nrd_setup = 15,
.ncs_write_setup = 0,
.nwe_setup = 0,
.ncs_read_pulse = 115,
.nrd_pulse = 110,
.ncs_write_pulse = 60,
.nwe_pulse = 60,
.read_cycle = 115,
.write_cycle = 100,
};
static struct smc_config flash_config __initdata = {
.bus_width = 2,
.nrd_controlled = 1,
.nwe_controlled = 1,
.byte_write = 1,
};
/* system flash definition */
static struct mtd_partition flash_parts_system[] = {
{
.name = "u-boot",
.offset = 0x00000000,
.size = 0x00020000, /* 128 KiB */
.mask_flags = MTD_WRITEABLE,
},
{
.name = "root",
.offset = 0x00020000,
.size = 0x007c0000,
},
{
.name = "splash",
.offset = 0x007e0000,
.size = 0x00010000, /* 64KiB */
},
{
.name = "env",
.offset = 0x007f0000,
.size = 0x00010000,
.mask_flags = MTD_WRITEABLE,
},
};
static struct physmap_flash_data flash_system = {
.width = 2,
.nr_parts = ARRAY_SIZE(flash_parts_system),
.parts = flash_parts_system,
};
static struct resource flash_resource_system = {
.start = 0x00000000,
.end = 0x007fffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device flash_device_system = {
.name = "physmap-flash",
.id = 0,
.resource = &flash_resource_system,
.num_resources = 1,
.dev = {
.platform_data = &flash_system,
},
};
/* data flash definition */
static struct mtd_partition flash_parts_data[] = {
{
.name = "data",
.offset = 0x00000000,
.size = 0x00800000,
},
};
static struct physmap_flash_data flash_data = {
.width = 2,
.nr_parts = ARRAY_SIZE(flash_parts_data),
.parts = flash_parts_data,
};
static struct resource flash_resource_data = {
.start = 0x08000000,
.end = 0x087fffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device flash_device_data = {
.name = "physmap-flash",
.id = 1,
.resource = &flash_resource_data,
.num_resources = 1,
.dev = {
.platform_data = &flash_data,
},
};
/* This needs to be called after the SMC has been initialized */
static int __init mimc200_flash_init(void)
{
int ret;
smc_set_timing(&flash_config, &flash_timing);
ret = smc_set_configuration(0, &flash_config);
if (ret < 0) {
printk(KERN_ERR "mimc200: failed to set 'System' NOR flash timing\n");
return ret;
}
ret = smc_set_configuration(1, &flash_config);
if (ret < 0) {
printk(KERN_ERR "mimc200: failed to set 'Data' NOR flash timing\n");
return ret;
}
platform_device_register(&flash_device_system);
platform_device_register(&flash_device_data);
return 0;
}
device_initcall(mimc200_flash_init);
| gpl-2.0 |
jforge/linux | sound/soc/codecs/lm49453.c | 190 | 53003 | /*
* lm49453.c - LM49453 ALSA Soc Audio driver
*
* Copyright (c) 2012 Texas Instruments, Inc
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* Initially based on sound/soc/codecs/wm8350.c
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/tlv.h>
#include <sound/jack.h>
#include <sound/initval.h>
#include <asm/div64.h>
#include "lm49453.h"
static struct reg_default lm49453_reg_defs[] = {
{ 0, 0x00 },
{ 1, 0x00 },
{ 2, 0x00 },
{ 3, 0x00 },
{ 4, 0x00 },
{ 5, 0x00 },
{ 6, 0x00 },
{ 7, 0x00 },
{ 8, 0x00 },
{ 9, 0x00 },
{ 10, 0x00 },
{ 11, 0x00 },
{ 12, 0x00 },
{ 13, 0x00 },
{ 14, 0x00 },
{ 15, 0x00 },
{ 16, 0x00 },
{ 17, 0x00 },
{ 18, 0x00 },
{ 19, 0x00 },
{ 20, 0x00 },
{ 21, 0x00 },
{ 22, 0x00 },
{ 23, 0x00 },
{ 32, 0x00 },
{ 33, 0x00 },
{ 35, 0x00 },
{ 36, 0x00 },
{ 37, 0x00 },
{ 46, 0x00 },
{ 48, 0x00 },
{ 49, 0x00 },
{ 51, 0x00 },
{ 56, 0x00 },
{ 58, 0x00 },
{ 59, 0x00 },
{ 60, 0x00 },
{ 61, 0x00 },
{ 62, 0x00 },
{ 63, 0x00 },
{ 64, 0x00 },
{ 65, 0x00 },
{ 66, 0x00 },
{ 67, 0x00 },
{ 68, 0x00 },
{ 69, 0x00 },
{ 70, 0x00 },
{ 71, 0x00 },
{ 72, 0x00 },
{ 73, 0x00 },
{ 74, 0x00 },
{ 75, 0x00 },
{ 76, 0x00 },
{ 77, 0x00 },
{ 78, 0x00 },
{ 79, 0x00 },
{ 80, 0x00 },
{ 81, 0x00 },
{ 82, 0x00 },
{ 83, 0x00 },
{ 85, 0x00 },
{ 85, 0x00 },
{ 86, 0x00 },
{ 87, 0x00 },
{ 88, 0x00 },
{ 89, 0x00 },
{ 90, 0x00 },
{ 91, 0x00 },
{ 92, 0x00 },
{ 93, 0x00 },
{ 94, 0x00 },
{ 95, 0x00 },
{ 96, 0x01 },
{ 97, 0x00 },
{ 98, 0x00 },
{ 99, 0x00 },
{ 100, 0x00 },
{ 101, 0x00 },
{ 102, 0x00 },
{ 103, 0x01 },
{ 104, 0x01 },
{ 105, 0x00 },
{ 106, 0x01 },
{ 107, 0x00 },
{ 108, 0x00 },
{ 109, 0x00 },
{ 110, 0x00 },
{ 111, 0x02 },
{ 112, 0x02 },
{ 113, 0x00 },
{ 121, 0x80 },
{ 122, 0xBB },
{ 123, 0x80 },
{ 124, 0xBB },
{ 128, 0x00 },
{ 130, 0x00 },
{ 131, 0x00 },
{ 132, 0x00 },
{ 133, 0x0A },
{ 134, 0x0A },
{ 135, 0x0A },
{ 136, 0x0F },
{ 137, 0x00 },
{ 138, 0x73 },
{ 139, 0x33 },
{ 140, 0x73 },
{ 141, 0x33 },
{ 142, 0x73 },
{ 143, 0x33 },
{ 144, 0x73 },
{ 145, 0x33 },
{ 146, 0x73 },
{ 147, 0x33 },
{ 148, 0x73 },
{ 149, 0x33 },
{ 150, 0x73 },
{ 151, 0x33 },
{ 152, 0x00 },
{ 153, 0x00 },
{ 154, 0x00 },
{ 155, 0x00 },
{ 176, 0x00 },
{ 177, 0x00 },
{ 178, 0x00 },
{ 179, 0x00 },
{ 180, 0x00 },
{ 181, 0x00 },
{ 182, 0x00 },
{ 183, 0x00 },
{ 184, 0x00 },
{ 185, 0x00 },
{ 186, 0x00 },
{ 187, 0x00 },
{ 188, 0x00 },
{ 189, 0x00 },
{ 208, 0x06 },
{ 209, 0x00 },
{ 210, 0x08 },
{ 211, 0x54 },
{ 212, 0x14 },
{ 213, 0x0d },
{ 214, 0x0d },
{ 215, 0x14 },
{ 216, 0x60 },
{ 221, 0x00 },
{ 222, 0x00 },
{ 223, 0x00 },
{ 224, 0x00 },
{ 248, 0x00 },
{ 249, 0x00 },
{ 250, 0x00 },
{ 255, 0x00 },
};
/* codec private data */
struct lm49453_priv {
struct regmap *regmap;
int fs_rate;
};
/* capture path controls */
static const char *lm49453_mic2mode_text[] = {"Single Ended", "Differential"};
static SOC_ENUM_SINGLE_DECL(lm49453_mic2mode_enum, LM49453_P0_MICR_REG, 5,
lm49453_mic2mode_text);
static const char *lm49453_dmic_cfg_text[] = {"DMICDAT1", "DMICDAT2"};
static SOC_ENUM_SINGLE_DECL(lm49453_dmic12_cfg_enum,
LM49453_P0_DIGITAL_MIC1_CONFIG_REG, 7,
lm49453_dmic_cfg_text);
static SOC_ENUM_SINGLE_DECL(lm49453_dmic34_cfg_enum,
LM49453_P0_DIGITAL_MIC2_CONFIG_REG, 7,
lm49453_dmic_cfg_text);
/* MUX Controls */
static const char *lm49453_adcl_mux_text[] = { "MIC1", "Aux_L" };
static const char *lm49453_adcr_mux_text[] = { "MIC2", "Aux_R" };
static SOC_ENUM_SINGLE_DECL(lm49453_adcl_enum,
LM49453_P0_ANALOG_MIXER_ADC_REG, 0,
lm49453_adcl_mux_text);
static SOC_ENUM_SINGLE_DECL(lm49453_adcr_enum,
LM49453_P0_ANALOG_MIXER_ADC_REG, 1,
lm49453_adcr_mux_text);
static const struct snd_kcontrol_new lm49453_adcl_mux_control =
SOC_DAPM_ENUM("ADC Left Mux", lm49453_adcl_enum);
static const struct snd_kcontrol_new lm49453_adcr_mux_control =
SOC_DAPM_ENUM("ADC Right Mux", lm49453_adcr_enum);
static const struct snd_kcontrol_new lm49453_headset_left_mixer[] = {
SOC_DAPM_SINGLE("Port1_1 Switch", LM49453_P0_DACHPL1_REG, 0, 1, 0),
SOC_DAPM_SINGLE("Port1_2 Switch", LM49453_P0_DACHPL1_REG, 1, 1, 0),
SOC_DAPM_SINGLE("Port1_3 Switch", LM49453_P0_DACHPL1_REG, 2, 1, 0),
SOC_DAPM_SINGLE("Port1_4 Switch", LM49453_P0_DACHPL1_REG, 3, 1, 0),
SOC_DAPM_SINGLE("Port1_5 Switch", LM49453_P0_DACHPL1_REG, 4, 1, 0),
SOC_DAPM_SINGLE("Port1_6 Switch", LM49453_P0_DACHPL1_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_7 Switch", LM49453_P0_DACHPL1_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port1_8 Switch", LM49453_P0_DACHPL1_REG, 7, 1, 0),
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_DACHPL2_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_DACHPL2_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_DACHPL2_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_DACHPL2_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_DACHPL2_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_DACHPL2_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port2_1 Switch", LM49453_P0_DACHPL2_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port2_2 Switch", LM49453_P0_DACHPL2_REG, 7, 1, 0),
SOC_DAPM_SINGLE("Sidetone Switch", LM49453_P0_STN_SEL_REG, 0, 0, 0),
};
static const struct snd_kcontrol_new lm49453_headset_right_mixer[] = {
SOC_DAPM_SINGLE("Port1_1 Switch", LM49453_P0_DACHPR1_REG, 0, 1, 0),
SOC_DAPM_SINGLE("Port1_2 Switch", LM49453_P0_DACHPR1_REG, 1, 1, 0),
SOC_DAPM_SINGLE("Port1_3 Switch", LM49453_P0_DACHPR1_REG, 2, 1, 0),
SOC_DAPM_SINGLE("Port1_4 Switch", LM49453_P0_DACHPR1_REG, 3, 1, 0),
SOC_DAPM_SINGLE("Port1_5 Switch", LM49453_P0_DACHPR1_REG, 4, 1, 0),
SOC_DAPM_SINGLE("Port1_6 Switch", LM49453_P0_DACHPR1_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_7 Switch", LM49453_P0_DACHPR1_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port1_8 Switch", LM49453_P0_DACHPR1_REG, 7, 1, 0),
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_DACHPR2_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_DACHPR2_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_DACHPR2_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_DACHPR2_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_DACHPR2_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_DACHPR2_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port2_1 Switch", LM49453_P0_DACHPR2_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port2_2 Switch", LM49453_P0_DACHPR2_REG, 7, 1, 0),
SOC_DAPM_SINGLE("Sidetone Switch", LM49453_P0_STN_SEL_REG, 1, 0, 0),
};
static const struct snd_kcontrol_new lm49453_speaker_left_mixer[] = {
SOC_DAPM_SINGLE("Port1_1 Switch", LM49453_P0_DACLSL1_REG, 0, 1, 0),
SOC_DAPM_SINGLE("Port1_2 Switch", LM49453_P0_DACLSL1_REG, 1, 1, 0),
SOC_DAPM_SINGLE("Port1_3 Switch", LM49453_P0_DACLSL1_REG, 2, 1, 0),
SOC_DAPM_SINGLE("Port1_4 Switch", LM49453_P0_DACLSL1_REG, 3, 1, 0),
SOC_DAPM_SINGLE("Port1_5 Switch", LM49453_P0_DACLSL1_REG, 4, 1, 0),
SOC_DAPM_SINGLE("Port1_6 Switch", LM49453_P0_DACLSL1_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_7 Switch", LM49453_P0_DACLSL1_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port1_8 Switch", LM49453_P0_DACLSL1_REG, 7, 1, 0),
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_DACLSL2_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_DACLSL2_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_DACLSL2_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_DACLSL2_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_DACLSL2_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_DACLSL2_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port2_1 Switch", LM49453_P0_DACLSL2_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port2_2 Switch", LM49453_P0_DACLSL2_REG, 7, 1, 0),
SOC_DAPM_SINGLE("Sidetone Switch", LM49453_P0_STN_SEL_REG, 2, 0, 0),
};
static const struct snd_kcontrol_new lm49453_speaker_right_mixer[] = {
SOC_DAPM_SINGLE("Port1_1 Switch", LM49453_P0_DACLSR1_REG, 0, 1, 0),
SOC_DAPM_SINGLE("Port1_2 Switch", LM49453_P0_DACLSR1_REG, 1, 1, 0),
SOC_DAPM_SINGLE("Port1_3 Switch", LM49453_P0_DACLSR1_REG, 2, 1, 0),
SOC_DAPM_SINGLE("Port1_4 Switch", LM49453_P0_DACLSR1_REG, 3, 1, 0),
SOC_DAPM_SINGLE("Port1_5 Switch", LM49453_P0_DACLSR1_REG, 4, 1, 0),
SOC_DAPM_SINGLE("Port1_6 Switch", LM49453_P0_DACLSR1_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_7 Switch", LM49453_P0_DACLSR1_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port1_8 Switch", LM49453_P0_DACLSR1_REG, 7, 1, 0),
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_DACLSR2_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_DACLSR2_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_DACLSR2_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_DACLSR2_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_DACLSR2_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_DACLSR2_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port2_1 Switch", LM49453_P0_DACLSR2_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port2_2 Switch", LM49453_P0_DACLSR2_REG, 7, 1, 0),
SOC_DAPM_SINGLE("Sidetone Switch", LM49453_P0_STN_SEL_REG, 3, 0, 0),
};
static const struct snd_kcontrol_new lm49453_haptic_left_mixer[] = {
SOC_DAPM_SINGLE("Port1_1 Switch", LM49453_P0_DACHAL1_REG, 0, 1, 0),
SOC_DAPM_SINGLE("Port1_2 Switch", LM49453_P0_DACHAL1_REG, 1, 1, 0),
SOC_DAPM_SINGLE("Port1_3 Switch", LM49453_P0_DACHAL1_REG, 2, 1, 0),
SOC_DAPM_SINGLE("Port1_4 Switch", LM49453_P0_DACHAL1_REG, 3, 1, 0),
SOC_DAPM_SINGLE("Port1_5 Switch", LM49453_P0_DACHAL1_REG, 4, 1, 0),
SOC_DAPM_SINGLE("Port1_6 Switch", LM49453_P0_DACHAL1_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_7 Switch", LM49453_P0_DACHAL1_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port1_8 Switch", LM49453_P0_DACHAL1_REG, 7, 1, 0),
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_DACHAL2_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_DACHAL2_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_DACHAL2_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_DACHAL2_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_DACHAL2_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_DACHAL2_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port2_1 Switch", LM49453_P0_DACHAL2_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port2_2 Switch", LM49453_P0_DACHAL2_REG, 7, 1, 0),
SOC_DAPM_SINGLE("Sidetone Switch", LM49453_P0_STN_SEL_REG, 4, 0, 0),
};
static const struct snd_kcontrol_new lm49453_haptic_right_mixer[] = {
SOC_DAPM_SINGLE("Port1_1 Switch", LM49453_P0_DACHAR1_REG, 0, 1, 0),
SOC_DAPM_SINGLE("Port1_2 Switch", LM49453_P0_DACHAR1_REG, 1, 1, 0),
SOC_DAPM_SINGLE("Port1_3 Switch", LM49453_P0_DACHAR1_REG, 2, 1, 0),
SOC_DAPM_SINGLE("Port1_4 Switch", LM49453_P0_DACHAR1_REG, 3, 1, 0),
SOC_DAPM_SINGLE("Port1_5 Switch", LM49453_P0_DACHAR1_REG, 4, 1, 0),
SOC_DAPM_SINGLE("Port1_6 Switch", LM49453_P0_DACHAR1_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_7 Switch", LM49453_P0_DACHAR1_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port1_8 Switch", LM49453_P0_DACHAR1_REG, 7, 1, 0),
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_DACHAR2_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_DACHAR2_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_DACHAR2_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_DACHAR2_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_DACHAR2_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_DACHAR2_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port2_1 Switch", LM49453_P0_DACHAR2_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port2_2 Switch", LM49453_P0_DACHAR2_REG, 7, 1, 0),
SOC_DAPM_SINGLE("Sidetone Switch", LM49453_P0_STN_SEL_REG, 5, 0, 0),
};
static const struct snd_kcontrol_new lm49453_lineout_left_mixer[] = {
SOC_DAPM_SINGLE("Port1_1 Switch", LM49453_P0_DACLOL1_REG, 0, 1, 0),
SOC_DAPM_SINGLE("Port1_2 Switch", LM49453_P0_DACLOL1_REG, 1, 1, 0),
SOC_DAPM_SINGLE("Port1_3 Switch", LM49453_P0_DACLOL1_REG, 2, 1, 0),
SOC_DAPM_SINGLE("Port1_4 Switch", LM49453_P0_DACLOL1_REG, 3, 1, 0),
SOC_DAPM_SINGLE("Port1_5 Switch", LM49453_P0_DACLOL1_REG, 4, 1, 0),
SOC_DAPM_SINGLE("Port1_6 Switch", LM49453_P0_DACLOL1_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_7 Switch", LM49453_P0_DACLOL1_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port1_8 Switch", LM49453_P0_DACLOL1_REG, 7, 1, 0),
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_DACLOL2_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_DACLOL2_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_DACLOL2_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_DACLOL2_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_DACLOL2_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_DACLOL2_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port2_1 Switch", LM49453_P0_DACLOL2_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port2_2 Switch", LM49453_P0_DACLOL2_REG, 7, 1, 0),
SOC_DAPM_SINGLE("Sidetone Switch", LM49453_P0_STN_SEL_REG, 6, 0, 0),
};
static const struct snd_kcontrol_new lm49453_lineout_right_mixer[] = {
SOC_DAPM_SINGLE("Port1_1 Switch", LM49453_P0_DACLOR1_REG, 0, 1, 0),
SOC_DAPM_SINGLE("Port1_2 Switch", LM49453_P0_DACLOR1_REG, 1, 1, 0),
SOC_DAPM_SINGLE("Port1_3 Switch", LM49453_P0_DACLOR1_REG, 2, 1, 0),
SOC_DAPM_SINGLE("Port1_4 Switch", LM49453_P0_DACLOR1_REG, 3, 1, 0),
SOC_DAPM_SINGLE("Port1_5 Switch", LM49453_P0_DACLOR1_REG, 4, 1, 0),
SOC_DAPM_SINGLE("Port1_6 Switch", LM49453_P0_DACLOR1_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_7 Switch", LM49453_P0_DACLOR1_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port1_8 Switch", LM49453_P0_DACLOR1_REG, 7, 1, 0),
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_DACLOR2_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_DACLOR2_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_DACLOR2_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_DACLOR2_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_DACLOR2_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_DACLOR2_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port2_1 Switch", LM49453_P0_DACLOR2_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port2_2 Switch", LM49453_P0_DACLOR2_REG, 7, 1, 0),
SOC_DAPM_SINGLE("Sidetone Switch", LM49453_P0_STN_SEL_REG, 7, 0, 0),
};
static const struct snd_kcontrol_new lm49453_port1_tx1_mixer[] = {
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_PORT1_TX1_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_PORT1_TX1_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_PORT1_TX1_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_PORT1_TX1_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_PORT1_TX1_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_PORT1_TX1_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_1 Switch", LM49453_P0_PORT1_TX1_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port2_1 Switch", LM49453_P0_PORT1_TX1_REG, 7, 1, 0),
};
static const struct snd_kcontrol_new lm49453_port1_tx2_mixer[] = {
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_PORT1_TX2_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_PORT1_TX2_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_PORT1_TX2_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_PORT1_TX2_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_PORT1_TX2_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_PORT1_TX2_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_2 Switch", LM49453_P0_PORT1_TX2_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port2_2 Switch", LM49453_P0_PORT1_TX2_REG, 7, 1, 0),
};
static const struct snd_kcontrol_new lm49453_port1_tx3_mixer[] = {
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_PORT1_TX3_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_PORT1_TX3_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_PORT1_TX3_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_PORT1_TX3_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_PORT1_TX3_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_PORT1_TX3_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_3 Switch", LM49453_P0_PORT1_TX3_REG, 6, 1, 0),
};
static const struct snd_kcontrol_new lm49453_port1_tx4_mixer[] = {
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_PORT1_TX4_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_PORT1_TX4_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_PORT1_TX4_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_PORT1_TX4_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_PORT1_TX4_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_PORT1_TX4_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_4 Switch", LM49453_P0_PORT1_TX4_REG, 6, 1, 0),
};
static const struct snd_kcontrol_new lm49453_port1_tx5_mixer[] = {
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_PORT1_TX5_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_PORT1_TX5_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_PORT1_TX5_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_PORT1_TX5_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_PORT1_TX5_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_PORT1_TX5_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_5 Switch", LM49453_P0_PORT1_TX5_REG, 6, 1, 0),
};
static const struct snd_kcontrol_new lm49453_port1_tx6_mixer[] = {
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_PORT1_TX6_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_PORT1_TX6_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_PORT1_TX6_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_PORT1_TX6_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_PORT1_TX6_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_PORT1_TX6_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_6 Switch", LM49453_P0_PORT1_TX6_REG, 6, 1, 0),
};
static const struct snd_kcontrol_new lm49453_port1_tx7_mixer[] = {
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_PORT1_TX7_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_PORT1_TX7_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_PORT1_TX7_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_PORT1_TX7_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_PORT1_TX7_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_PORT1_TX7_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_7 Switch", LM49453_P0_PORT1_TX7_REG, 6, 1, 0),
};
static const struct snd_kcontrol_new lm49453_port1_tx8_mixer[] = {
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_PORT1_TX8_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_PORT1_TX8_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_PORT1_TX8_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_PORT1_TX8_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_PORT1_TX8_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_PORT1_TX8_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_8 Switch", LM49453_P0_PORT1_TX8_REG, 6, 1, 0),
};
static const struct snd_kcontrol_new lm49453_port2_tx1_mixer[] = {
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_PORT2_TX1_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_PORT2_TX1_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_PORT2_TX1_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_PORT2_TX1_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_PORT2_TX1_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_PORT2_TX1_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_1 Switch", LM49453_P0_PORT2_TX1_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port2_1 Switch", LM49453_P0_PORT2_TX1_REG, 7, 1, 0),
};
static const struct snd_kcontrol_new lm49453_port2_tx2_mixer[] = {
SOC_DAPM_SINGLE("DMIC1L Switch", LM49453_P0_PORT2_TX2_REG, 0, 1, 0),
SOC_DAPM_SINGLE("DMIC1R Switch", LM49453_P0_PORT2_TX2_REG, 1, 1, 0),
SOC_DAPM_SINGLE("DMIC2L Switch", LM49453_P0_PORT2_TX2_REG, 2, 1, 0),
SOC_DAPM_SINGLE("DMIC2R Switch", LM49453_P0_PORT2_TX2_REG, 3, 1, 0),
SOC_DAPM_SINGLE("ADCL Switch", LM49453_P0_PORT2_TX2_REG, 4, 1, 0),
SOC_DAPM_SINGLE("ADCR Switch", LM49453_P0_PORT2_TX2_REG, 5, 1, 0),
SOC_DAPM_SINGLE("Port1_2 Switch", LM49453_P0_PORT2_TX2_REG, 6, 1, 0),
SOC_DAPM_SINGLE("Port2_2 Switch", LM49453_P0_PORT2_TX2_REG, 7, 1, 0),
};
/* TLV Declarations */
static const DECLARE_TLV_DB_SCALE(adc_dac_tlv, -7650, 150, 1);
static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 200, 1);
static const DECLARE_TLV_DB_SCALE(port_tlv, -1800, 600, 0);
static const DECLARE_TLV_DB_SCALE(stn_tlv, -7200, 150, 0);
static const struct snd_kcontrol_new lm49453_sidetone_mixer_controls[] = {
/* Sidetone supports mono only */
SOC_DAPM_SINGLE_TLV("Sidetone ADCL Volume", LM49453_P0_STN_VOL_ADCL_REG,
0, 0x3F, 0, stn_tlv),
SOC_DAPM_SINGLE_TLV("Sidetone ADCR Volume", LM49453_P0_STN_VOL_ADCR_REG,
0, 0x3F, 0, stn_tlv),
SOC_DAPM_SINGLE_TLV("Sidetone DMIC1L Volume", LM49453_P0_STN_VOL_DMIC1L_REG,
0, 0x3F, 0, stn_tlv),
SOC_DAPM_SINGLE_TLV("Sidetone DMIC1R Volume", LM49453_P0_STN_VOL_DMIC1R_REG,
0, 0x3F, 0, stn_tlv),
SOC_DAPM_SINGLE_TLV("Sidetone DMIC2L Volume", LM49453_P0_STN_VOL_DMIC2L_REG,
0, 0x3F, 0, stn_tlv),
SOC_DAPM_SINGLE_TLV("Sidetone DMIC2R Volume", LM49453_P0_STN_VOL_DMIC2R_REG,
0, 0x3F, 0, stn_tlv),
};
static const struct snd_kcontrol_new lm49453_snd_controls[] = {
/* mic1 and mic2 supports mono only */
SOC_SINGLE_TLV("Mic1 Volume", LM49453_P0_MICL_REG, 0, 15, 0, mic_tlv),
SOC_SINGLE_TLV("Mic2 Volume", LM49453_P0_MICR_REG, 0, 15, 0, mic_tlv),
SOC_SINGLE_TLV("ADCL Volume", LM49453_P0_ADC_LEVELL_REG, 0, 63,
0, adc_dac_tlv),
SOC_SINGLE_TLV("ADCR Volume", LM49453_P0_ADC_LEVELR_REG, 0, 63,
0, adc_dac_tlv),
SOC_DOUBLE_R_TLV("DMIC1 Volume", LM49453_P0_DMIC1_LEVELL_REG,
LM49453_P0_DMIC1_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
SOC_DOUBLE_R_TLV("DMIC2 Volume", LM49453_P0_DMIC2_LEVELL_REG,
LM49453_P0_DMIC2_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
SOC_DAPM_ENUM("Mic2Mode", lm49453_mic2mode_enum),
SOC_DAPM_ENUM("DMIC12 SRC", lm49453_dmic12_cfg_enum),
SOC_DAPM_ENUM("DMIC34 SRC", lm49453_dmic34_cfg_enum),
/* Capture path filter enable */
SOC_SINGLE("DMIC1 HPFilter Switch", LM49453_P0_ADC_FX_ENABLES_REG,
0, 1, 0),
SOC_SINGLE("DMIC2 HPFilter Switch", LM49453_P0_ADC_FX_ENABLES_REG,
1, 1, 0),
SOC_SINGLE("ADC HPFilter Switch", LM49453_P0_ADC_FX_ENABLES_REG,
2, 1, 0),
SOC_DOUBLE_R_TLV("DAC HP Volume", LM49453_P0_DAC_HP_LEVELL_REG,
LM49453_P0_DAC_HP_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
SOC_DOUBLE_R_TLV("DAC LO Volume", LM49453_P0_DAC_LO_LEVELL_REG,
LM49453_P0_DAC_LO_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
SOC_DOUBLE_R_TLV("DAC LS Volume", LM49453_P0_DAC_LS_LEVELL_REG,
LM49453_P0_DAC_LS_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
SOC_DOUBLE_R_TLV("DAC HA Volume", LM49453_P0_DAC_HA_LEVELL_REG,
LM49453_P0_DAC_HA_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
SOC_SINGLE_TLV("EP Volume", LM49453_P0_DAC_LS_LEVELL_REG,
0, 63, 0, adc_dac_tlv),
SOC_SINGLE_TLV("PORT1_1_RX_LVL Volume", LM49453_P0_PORT1_RX_LVL1_REG,
0, 3, 0, port_tlv),
SOC_SINGLE_TLV("PORT1_2_RX_LVL Volume", LM49453_P0_PORT1_RX_LVL1_REG,
2, 3, 0, port_tlv),
SOC_SINGLE_TLV("PORT1_3_RX_LVL Volume", LM49453_P0_PORT1_RX_LVL1_REG,
4, 3, 0, port_tlv),
SOC_SINGLE_TLV("PORT1_4_RX_LVL Volume", LM49453_P0_PORT1_RX_LVL1_REG,
6, 3, 0, port_tlv),
SOC_SINGLE_TLV("PORT1_5_RX_LVL Volume", LM49453_P0_PORT1_RX_LVL2_REG,
0, 3, 0, port_tlv),
SOC_SINGLE_TLV("PORT1_6_RX_LVL Volume", LM49453_P0_PORT1_RX_LVL2_REG,
2, 3, 0, port_tlv),
SOC_SINGLE_TLV("PORT1_7_RX_LVL Volume", LM49453_P0_PORT1_RX_LVL2_REG,
4, 3, 0, port_tlv),
SOC_SINGLE_TLV("PORT1_8_RX_LVL Volume", LM49453_P0_PORT1_RX_LVL2_REG,
6, 3, 0, port_tlv),
SOC_SINGLE_TLV("PORT2_1_RX_LVL Volume", LM49453_P0_PORT2_RX_LVL_REG,
0, 3, 0, port_tlv),
SOC_SINGLE_TLV("PORT2_2_RX_LVL Volume", LM49453_P0_PORT2_RX_LVL_REG,
2, 3, 0, port_tlv),
SOC_SINGLE("Port1 Playback Switch", LM49453_P0_AUDIO_PORT1_BASIC_REG,
1, 1, 0),
SOC_SINGLE("Port2 Playback Switch", LM49453_P0_AUDIO_PORT2_BASIC_REG,
1, 1, 0),
SOC_SINGLE("Port1 Capture Switch", LM49453_P0_AUDIO_PORT1_BASIC_REG,
2, 1, 0),
SOC_SINGLE("Port2 Capture Switch", LM49453_P0_AUDIO_PORT2_BASIC_REG,
2, 1, 0)
};
/* DAPM widgets */
static const struct snd_soc_dapm_widget lm49453_dapm_widgets[] = {
/* All end points HP,EP, LS, Lineout and Haptic */
SND_SOC_DAPM_OUTPUT("HPOUTL"),
SND_SOC_DAPM_OUTPUT("HPOUTR"),
SND_SOC_DAPM_OUTPUT("EPOUT"),
SND_SOC_DAPM_OUTPUT("LSOUTL"),
SND_SOC_DAPM_OUTPUT("LSOUTR"),
SND_SOC_DAPM_OUTPUT("LOOUTR"),
SND_SOC_DAPM_OUTPUT("LOOUTL"),
SND_SOC_DAPM_OUTPUT("HAOUTL"),
SND_SOC_DAPM_OUTPUT("HAOUTR"),
SND_SOC_DAPM_INPUT("AMIC1"),
SND_SOC_DAPM_INPUT("AMIC2"),
SND_SOC_DAPM_INPUT("DMIC1DAT"),
SND_SOC_DAPM_INPUT("DMIC2DAT"),
SND_SOC_DAPM_INPUT("AUXL"),
SND_SOC_DAPM_INPUT("AUXR"),
SND_SOC_DAPM_PGA("PORT1_1_RX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("PORT1_2_RX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("PORT1_3_RX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("PORT1_4_RX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("PORT1_5_RX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("PORT1_6_RX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("PORT1_7_RX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("PORT1_8_RX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("PORT2_1_RX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("PORT2_2_RX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("AMIC1Bias", LM49453_P0_MICL_REG, 6, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("AMIC2Bias", LM49453_P0_MICR_REG, 6, 0, NULL, 0),
/* playback path driver enables */
SND_SOC_DAPM_OUT_DRV("Headset Switch",
LM49453_P0_PMC_SETUP_REG, 0, 0, NULL, 0),
SND_SOC_DAPM_OUT_DRV("Earpiece Switch",
LM49453_P0_EP_REG, 0, 0, NULL, 0),
SND_SOC_DAPM_OUT_DRV("Speaker Left Switch",
LM49453_P0_DIS_PKVL_FB_REG, 0, 1, NULL, 0),
SND_SOC_DAPM_OUT_DRV("Speaker Right Switch",
LM49453_P0_DIS_PKVL_FB_REG, 1, 1, NULL, 0),
SND_SOC_DAPM_OUT_DRV("Haptic Left Switch",
LM49453_P0_DIS_PKVL_FB_REG, 2, 1, NULL, 0),
SND_SOC_DAPM_OUT_DRV("Haptic Right Switch",
LM49453_P0_DIS_PKVL_FB_REG, 3, 1, NULL, 0),
/* DAC */
SND_SOC_DAPM_DAC("HPL DAC", "Headset", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("HPR DAC", "Headset", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("LSL DAC", "Speaker", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("LSR DAC", "Speaker", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("HAL DAC", "Haptic", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("HAR DAC", "Haptic", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("LOL DAC", "Lineout", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("LOR DAC", "Lineout", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_PGA("AUXL Input",
LM49453_P0_ANALOG_MIXER_ADC_REG, 2, 0, NULL, 0),
SND_SOC_DAPM_PGA("AUXR Input",
LM49453_P0_ANALOG_MIXER_ADC_REG, 3, 0, NULL, 0),
SND_SOC_DAPM_PGA("Sidetone", SND_SOC_NOPM, 0, 0, NULL, 0),
/* ADC */
SND_SOC_DAPM_ADC("DMIC1 Left", "Capture", SND_SOC_NOPM, 1, 0),
SND_SOC_DAPM_ADC("DMIC1 Right", "Capture", SND_SOC_NOPM, 1, 0),
SND_SOC_DAPM_ADC("DMIC2 Left", "Capture", SND_SOC_NOPM, 1, 0),
SND_SOC_DAPM_ADC("DMIC2 Right", "Capture", SND_SOC_NOPM, 1, 0),
SND_SOC_DAPM_ADC("ADC Left", "Capture", SND_SOC_NOPM, 1, 0),
SND_SOC_DAPM_ADC("ADC Right", "Capture", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_MUX("ADCL Mux", SND_SOC_NOPM, 0, 0,
&lm49453_adcl_mux_control),
SND_SOC_DAPM_MUX("ADCR Mux", SND_SOC_NOPM, 0, 0,
&lm49453_adcr_mux_control),
SND_SOC_DAPM_MUX("Mic1 Input",
SND_SOC_NOPM, 0, 0, &lm49453_adcl_mux_control),
SND_SOC_DAPM_MUX("Mic2 Input",
SND_SOC_NOPM, 0, 0, &lm49453_adcr_mux_control),
/* AIF */
SND_SOC_DAPM_AIF_IN("PORT1_SDI", NULL, 0,
LM49453_P0_PULL_CONFIG1_REG, 2, 0),
SND_SOC_DAPM_AIF_IN("PORT2_SDI", NULL, 0,
LM49453_P0_PULL_CONFIG1_REG, 6, 0),
SND_SOC_DAPM_AIF_OUT("PORT1_SDO", NULL, 0,
LM49453_P0_PULL_CONFIG1_REG, 3, 0),
SND_SOC_DAPM_AIF_OUT("PORT2_SDO", NULL, 0,
LM49453_P0_PULL_CONFIG1_REG, 7, 0),
/* Port1 TX controls */
SND_SOC_DAPM_OUT_DRV("P1_1_TX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_OUT_DRV("P1_2_TX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_OUT_DRV("P1_3_TX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_OUT_DRV("P1_4_TX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_OUT_DRV("P1_5_TX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_OUT_DRV("P1_6_TX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_OUT_DRV("P1_7_TX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_OUT_DRV("P1_8_TX", SND_SOC_NOPM, 0, 0, NULL, 0),
/* Port2 TX controls */
SND_SOC_DAPM_OUT_DRV("P2_1_TX", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_OUT_DRV("P2_2_TX", SND_SOC_NOPM, 0, 0, NULL, 0),
/* Sidetone Mixer */
SND_SOC_DAPM_MIXER("Sidetone Mixer", SND_SOC_NOPM, 0, 0,
lm49453_sidetone_mixer_controls,
ARRAY_SIZE(lm49453_sidetone_mixer_controls)),
/* DAC MIXERS */
SND_SOC_DAPM_MIXER("HPL Mixer", SND_SOC_NOPM, 0, 0,
lm49453_headset_left_mixer,
ARRAY_SIZE(lm49453_headset_left_mixer)),
SND_SOC_DAPM_MIXER("HPR Mixer", SND_SOC_NOPM, 0, 0,
lm49453_headset_right_mixer,
ARRAY_SIZE(lm49453_headset_right_mixer)),
SND_SOC_DAPM_MIXER("LOL Mixer", SND_SOC_NOPM, 0, 0,
lm49453_lineout_left_mixer,
ARRAY_SIZE(lm49453_lineout_left_mixer)),
SND_SOC_DAPM_MIXER("LOR Mixer", SND_SOC_NOPM, 0, 0,
lm49453_lineout_right_mixer,
ARRAY_SIZE(lm49453_lineout_right_mixer)),
SND_SOC_DAPM_MIXER("LSL Mixer", SND_SOC_NOPM, 0, 0,
lm49453_speaker_left_mixer,
ARRAY_SIZE(lm49453_speaker_left_mixer)),
SND_SOC_DAPM_MIXER("LSR Mixer", SND_SOC_NOPM, 0, 0,
lm49453_speaker_right_mixer,
ARRAY_SIZE(lm49453_speaker_right_mixer)),
SND_SOC_DAPM_MIXER("HAL Mixer", SND_SOC_NOPM, 0, 0,
lm49453_haptic_left_mixer,
ARRAY_SIZE(lm49453_haptic_left_mixer)),
SND_SOC_DAPM_MIXER("HAR Mixer", SND_SOC_NOPM, 0, 0,
lm49453_haptic_right_mixer,
ARRAY_SIZE(lm49453_haptic_right_mixer)),
/* Capture Mixer */
SND_SOC_DAPM_MIXER("Port1_1 Mixer", SND_SOC_NOPM, 0, 0,
lm49453_port1_tx1_mixer,
ARRAY_SIZE(lm49453_port1_tx1_mixer)),
SND_SOC_DAPM_MIXER("Port1_2 Mixer", SND_SOC_NOPM, 0, 0,
lm49453_port1_tx2_mixer,
ARRAY_SIZE(lm49453_port1_tx2_mixer)),
SND_SOC_DAPM_MIXER("Port1_3 Mixer", SND_SOC_NOPM, 0, 0,
lm49453_port1_tx3_mixer,
ARRAY_SIZE(lm49453_port1_tx3_mixer)),
SND_SOC_DAPM_MIXER("Port1_4 Mixer", SND_SOC_NOPM, 0, 0,
lm49453_port1_tx4_mixer,
ARRAY_SIZE(lm49453_port1_tx4_mixer)),
SND_SOC_DAPM_MIXER("Port1_5 Mixer", SND_SOC_NOPM, 0, 0,
lm49453_port1_tx5_mixer,
ARRAY_SIZE(lm49453_port1_tx5_mixer)),
SND_SOC_DAPM_MIXER("Port1_6 Mixer", SND_SOC_NOPM, 0, 0,
lm49453_port1_tx6_mixer,
ARRAY_SIZE(lm49453_port1_tx6_mixer)),
SND_SOC_DAPM_MIXER("Port1_7 Mixer", SND_SOC_NOPM, 0, 0,
lm49453_port1_tx7_mixer,
ARRAY_SIZE(lm49453_port1_tx7_mixer)),
SND_SOC_DAPM_MIXER("Port1_8 Mixer", SND_SOC_NOPM, 0, 0,
lm49453_port1_tx8_mixer,
ARRAY_SIZE(lm49453_port1_tx8_mixer)),
SND_SOC_DAPM_MIXER("Port2_1 Mixer", SND_SOC_NOPM, 0, 0,
lm49453_port2_tx1_mixer,
ARRAY_SIZE(lm49453_port2_tx1_mixer)),
SND_SOC_DAPM_MIXER("Port2_2 Mixer", SND_SOC_NOPM, 0, 0,
lm49453_port2_tx2_mixer,
ARRAY_SIZE(lm49453_port2_tx2_mixer)),
};
static const struct snd_soc_dapm_route lm49453_audio_map[] = {
/* Port SDI mapping */
{ "PORT1_1_RX", "Port1 Playback Switch", "PORT1_SDI" },
{ "PORT1_2_RX", "Port1 Playback Switch", "PORT1_SDI" },
{ "PORT1_3_RX", "Port1 Playback Switch", "PORT1_SDI" },
{ "PORT1_4_RX", "Port1 Playback Switch", "PORT1_SDI" },
{ "PORT1_5_RX", "Port1 Playback Switch", "PORT1_SDI" },
{ "PORT1_6_RX", "Port1 Playback Switch", "PORT1_SDI" },
{ "PORT1_7_RX", "Port1 Playback Switch", "PORT1_SDI" },
{ "PORT1_8_RX", "Port1 Playback Switch", "PORT1_SDI" },
{ "PORT2_1_RX", "Port2 Playback Switch", "PORT2_SDI" },
{ "PORT2_2_RX", "Port2 Playback Switch", "PORT2_SDI" },
/* HP mapping */
{ "HPL Mixer", "Port1_1 Switch", "PORT1_1_RX" },
{ "HPL Mixer", "Port1_2 Switch", "PORT1_2_RX" },
{ "HPL Mixer", "Port1_3 Switch", "PORT1_3_RX" },
{ "HPL Mixer", "Port1_4 Switch", "PORT1_4_RX" },
{ "HPL Mixer", "Port1_5 Switch", "PORT1_5_RX" },
{ "HPL Mixer", "Port1_6 Switch", "PORT1_6_RX" },
{ "HPL Mixer", "Port1_7 Switch", "PORT1_7_RX" },
{ "HPL Mixer", "Port1_8 Switch", "PORT1_8_RX" },
{ "HPL Mixer", "Port2_1 Switch", "PORT2_1_RX" },
{ "HPL Mixer", "Port2_2 Switch", "PORT2_2_RX" },
{ "HPL Mixer", "ADCL Switch", "ADC Left" },
{ "HPL Mixer", "ADCR Switch", "ADC Right" },
{ "HPL Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "HPL Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "HPL Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "HPL Mixer", "DMIC2R Switch", "DMIC2 Right" },
{ "HPL Mixer", "Sidetone Switch", "Sidetone" },
{ "HPL DAC", NULL, "HPL Mixer" },
{ "HPR Mixer", "Port1_1 Switch", "PORT1_1_RX" },
{ "HPR Mixer", "Port1_2 Switch", "PORT1_2_RX" },
{ "HPR Mixer", "Port1_3 Switch", "PORT1_3_RX" },
{ "HPR Mixer", "Port1_4 Switch", "PORT1_4_RX" },
{ "HPR Mixer", "Port1_5 Switch", "PORT1_5_RX" },
{ "HPR Mixer", "Port1_6 Switch", "PORT1_6_RX" },
{ "HPR Mixer", "Port1_7 Switch", "PORT1_7_RX" },
{ "HPR Mixer", "Port1_8 Switch", "PORT1_8_RX" },
/* Port 2 */
{ "HPR Mixer", "Port2_1 Switch", "PORT2_1_RX" },
{ "HPR Mixer", "Port2_2 Switch", "PORT2_2_RX" },
{ "HPR Mixer", "ADCL Switch", "ADC Left" },
{ "HPR Mixer", "ADCR Switch", "ADC Right" },
{ "HPR Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "HPR Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "HPR Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "HPR Mixer", "DMIC2L Switch", "DMIC2 Right" },
{ "HPR Mixer", "Sidetone Switch", "Sidetone" },
{ "HPR DAC", NULL, "HPR Mixer" },
{ "HPOUTL", "Headset Switch", "HPL DAC"},
{ "HPOUTR", "Headset Switch", "HPR DAC"},
/* EP map */
{ "EPOUT", "Earpiece Switch", "HPL DAC" },
/* Speaker map */
{ "LSL Mixer", "Port1_1 Switch", "PORT1_1_RX" },
{ "LSL Mixer", "Port1_2 Switch", "PORT1_2_RX" },
{ "LSL Mixer", "Port1_3 Switch", "PORT1_3_RX" },
{ "LSL Mixer", "Port1_4 Switch", "PORT1_4_RX" },
{ "LSL Mixer", "Port1_5 Switch", "PORT1_5_RX" },
{ "LSL Mixer", "Port1_6 Switch", "PORT1_6_RX" },
{ "LSL Mixer", "Port1_7 Switch", "PORT1_7_RX" },
{ "LSL Mixer", "Port1_8 Switch", "PORT1_8_RX" },
/* Port 2 */
{ "LSL Mixer", "Port2_1 Switch", "PORT2_1_RX" },
{ "LSL Mixer", "Port2_2 Switch", "PORT2_2_RX" },
{ "LSL Mixer", "ADCL Switch", "ADC Left" },
{ "LSL Mixer", "ADCR Switch", "ADC Right" },
{ "LSL Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "LSL Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "LSL Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "LSL Mixer", "DMIC2R Switch", "DMIC2 Right" },
{ "LSL Mixer", "Sidetone Switch", "Sidetone" },
{ "LSL DAC", NULL, "LSL Mixer" },
{ "LSR Mixer", "Port1_1 Switch", "PORT1_1_RX" },
{ "LSR Mixer", "Port1_2 Switch", "PORT1_2_RX" },
{ "LSR Mixer", "Port1_3 Switch", "PORT1_3_RX" },
{ "LSR Mixer", "Port1_4 Switch", "PORT1_4_RX" },
{ "LSR Mixer", "Port1_5 Switch", "PORT1_5_RX" },
{ "LSR Mixer", "Port1_6 Switch", "PORT1_6_RX" },
{ "LSR Mixer", "Port1_7 Switch", "PORT1_7_RX" },
{ "LSR Mixer", "Port1_8 Switch", "PORT1_8_RX" },
/* Port 2 */
{ "LSR Mixer", "Port2_1 Switch", "PORT2_1_RX" },
{ "LSR Mixer", "Port2_2 Switch", "PORT2_2_RX" },
{ "LSR Mixer", "ADCL Switch", "ADC Left" },
{ "LSR Mixer", "ADCR Switch", "ADC Right" },
{ "LSR Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "LSR Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "LSR Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "LSR Mixer", "DMIC2R Switch", "DMIC2 Right" },
{ "LSR Mixer", "Sidetone Switch", "Sidetone" },
{ "LSR DAC", NULL, "LSR Mixer" },
{ "LSOUTL", "Speaker Left Switch", "LSL DAC"},
{ "LSOUTR", "Speaker Left Switch", "LSR DAC"},
/* Haptic map */
{ "HAL Mixer", "Port1_1 Switch", "PORT1_1_RX" },
{ "HAL Mixer", "Port1_2 Switch", "PORT1_2_RX" },
{ "HAL Mixer", "Port1_3 Switch", "PORT1_3_RX" },
{ "HAL Mixer", "Port1_4 Switch", "PORT1_4_RX" },
{ "HAL Mixer", "Port1_5 Switch", "PORT1_5_RX" },
{ "HAL Mixer", "Port1_6 Switch", "PORT1_6_RX" },
{ "HAL Mixer", "Port1_7 Switch", "PORT1_7_RX" },
{ "HAL Mixer", "Port1_8 Switch", "PORT1_8_RX" },
/* Port 2 */
{ "HAL Mixer", "Port2_1 Switch", "PORT2_1_RX" },
{ "HAL Mixer", "Port2_2 Switch", "PORT2_2_RX" },
{ "HAL Mixer", "ADCL Switch", "ADC Left" },
{ "HAL Mixer", "ADCR Switch", "ADC Right" },
{ "HAL Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "HAL Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "HAL Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "HAL Mixer", "DMIC2R Switch", "DMIC2 Right" },
{ "HAL Mixer", "Sidetone Switch", "Sidetone" },
{ "HAL DAC", NULL, "HAL Mixer" },
{ "HAR Mixer", "Port1_1 Switch", "PORT1_1_RX" },
{ "HAR Mixer", "Port1_2 Switch", "PORT1_2_RX" },
{ "HAR Mixer", "Port1_3 Switch", "PORT1_3_RX" },
{ "HAR Mixer", "Port1_4 Switch", "PORT1_4_RX" },
{ "HAR Mixer", "Port1_5 Switch", "PORT1_5_RX" },
{ "HAR Mixer", "Port1_6 Switch", "PORT1_6_RX" },
{ "HAR Mixer", "Port1_7 Switch", "PORT1_7_RX" },
{ "HAR Mixer", "Port1_8 Switch", "PORT1_8_RX" },
/* Port 2 */
{ "HAR Mixer", "Port2_1 Switch", "PORT2_1_RX" },
{ "HAR Mixer", "Port2_2 Switch", "PORT2_2_RX" },
{ "HAR Mixer", "ADCL Switch", "ADC Left" },
{ "HAR Mixer", "ADCR Switch", "ADC Right" },
{ "HAR Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "HAR Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "HAR Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "HAR Mixer", "DMIC2R Switch", "DMIC2 Right" },
{ "HAR Mixer", "Sideton Switch", "Sidetone" },
{ "HAR DAC", NULL, "HAR Mixer" },
{ "HAOUTL", "Haptic Left Switch", "HAL DAC" },
{ "HAOUTR", "Haptic Right Switch", "HAR DAC" },
/* Lineout map */
{ "LOL Mixer", "Port1_1 Switch", "PORT1_1_RX" },
{ "LOL Mixer", "Port1_2 Switch", "PORT1_2_RX" },
{ "LOL Mixer", "Port1_3 Switch", "PORT1_3_RX" },
{ "LOL Mixer", "Port1_4 Switch", "PORT1_4_RX" },
{ "LOL Mixer", "Port1_5 Switch", "PORT1_5_RX" },
{ "LOL Mixer", "Port1_6 Switch", "PORT1_6_RX" },
{ "LOL Mixer", "Port1_7 Switch", "PORT1_7_RX" },
{ "LOL Mixer", "Port1_8 Switch", "PORT1_8_RX" },
/* Port 2 */
{ "LOL Mixer", "Port2_1 Switch", "PORT2_1_RX" },
{ "LOL Mixer", "Port2_2 Switch", "PORT2_2_RX" },
{ "LOL Mixer", "ADCL Switch", "ADC Left" },
{ "LOL Mixer", "ADCR Switch", "ADC Right" },
{ "LOL Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "LOL Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "LOL Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "LOL Mixer", "DMIC2R Switch", "DMIC2 Right" },
{ "LOL Mixer", "Sidetone Switch", "Sidetone" },
{ "LOL DAC", NULL, "LOL Mixer" },
{ "LOR Mixer", "Port1_1 Switch", "PORT1_1_RX" },
{ "LOR Mixer", "Port1_2 Switch", "PORT1_2_RX" },
{ "LOR Mixer", "Port1_3 Switch", "PORT1_3_RX" },
{ "LOR Mixer", "Port1_4 Switch", "PORT1_4_RX" },
{ "LOR Mixer", "Port1_5 Switch", "PORT1_5_RX" },
{ "LOR Mixer", "Port1_6 Switch", "PORT1_6_RX" },
{ "LOR Mixer", "Port1_7 Switch", "PORT1_7_RX" },
{ "LOR Mixer", "Port1_8 Switch", "PORT1_8_RX" },
/* Port 2 */
{ "LOR Mixer", "Port2_1 Switch", "PORT2_1_RX" },
{ "LOR Mixer", "Port2_2 Switch", "PORT2_2_RX" },
{ "LOR Mixer", "ADCL Switch", "ADC Left" },
{ "LOR Mixer", "ADCR Switch", "ADC Right" },
{ "LOR Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "LOR Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "LOR Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "LOR Mixer", "DMIC2R Switch", "DMIC2 Right" },
{ "LOR Mixer", "Sidetone Switch", "Sidetone" },
{ "LOR DAC", NULL, "LOR Mixer" },
{ "LOOUTL", NULL, "LOL DAC" },
{ "LOOUTR", NULL, "LOR DAC" },
/* TX map */
/* Port1 mappings */
{ "Port1_1 Mixer", "ADCL Switch", "ADC Left" },
{ "Port1_1 Mixer", "ADCR Switch", "ADC Right" },
{ "Port1_1 Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "Port1_1 Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "Port1_1 Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "Port1_1 Mixer", "DMIC2R Switch", "DMIC2 Right" },
{ "Port1_2 Mixer", "ADCL Switch", "ADC Left" },
{ "Port1_2 Mixer", "ADCR Switch", "ADC Right" },
{ "Port1_2 Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "Port1_2 Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "Port1_2 Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "Port1_2 Mixer", "DMIC2R Switch", "DMIC2 Right" },
{ "Port1_3 Mixer", "ADCL Switch", "ADC Left" },
{ "Port1_3 Mixer", "ADCR Switch", "ADC Right" },
{ "Port1_3 Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "Port1_3 Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "Port1_3 Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "Port1_3 Mixer", "DMIC2R Switch", "DMIC2 Right" },
{ "Port1_4 Mixer", "ADCL Switch", "ADC Left" },
{ "Port1_4 Mixer", "ADCR Switch", "ADC Right" },
{ "Port1_4 Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "Port1_4 Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "Port1_4 Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "Port1_4 Mixer", "DMIC2R Switch", "DMIC2 Right" },
{ "Port1_5 Mixer", "ADCL Switch", "ADC Left" },
{ "Port1_5 Mixer", "ADCR Switch", "ADC Right" },
{ "Port1_5 Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "Port1_5 Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "Port1_5 Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "Port1_5 Mixer", "DMIC2R Switch", "DMIC2 Right" },
{ "Port1_6 Mixer", "ADCL Switch", "ADC Left" },
{ "Port1_6 Mixer", "ADCR Switch", "ADC Right" },
{ "Port1_6 Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "Port1_6 Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "Port1_6 Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "Port1_6 Mixer", "DMIC2R Switch", "DMIC2 Right" },
{ "Port1_7 Mixer", "ADCL Switch", "ADC Left" },
{ "Port1_7 Mixer", "ADCR Switch", "ADC Right" },
{ "Port1_7 Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "Port1_7 Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "Port1_7 Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "Port1_7 Mixer", "DMIC2R Switch", "DMIC2 Right" },
{ "Port1_8 Mixer", "ADCL Switch", "ADC Left" },
{ "Port1_8 Mixer", "ADCR Switch", "ADC Right" },
{ "Port1_8 Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "Port1_8 Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "Port1_8 Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "Port1_8 Mixer", "DMIC2R Switch", "DMIC2 Right" },
{ "Port2_1 Mixer", "ADCL Switch", "ADC Left" },
{ "Port2_1 Mixer", "ADCR Switch", "ADC Right" },
{ "Port2_1 Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "Port2_1 Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "Port2_1 Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "Port2_1 Mixer", "DMIC2R Switch", "DMIC2 Right" },
{ "Port2_2 Mixer", "ADCL Switch", "ADC Left" },
{ "Port2_2 Mixer", "ADCR Switch", "ADC Right" },
{ "Port2_2 Mixer", "DMIC1L Switch", "DMIC1 Left" },
{ "Port2_2 Mixer", "DMIC1R Switch", "DMIC1 Right" },
{ "Port2_2 Mixer", "DMIC2L Switch", "DMIC2 Left" },
{ "Port2_2 Mixer", "DMIC2R Switch", "DMIC2 Right" },
{ "P1_1_TX", NULL, "Port1_1 Mixer" },
{ "P1_2_TX", NULL, "Port1_2 Mixer" },
{ "P1_3_TX", NULL, "Port1_3 Mixer" },
{ "P1_4_TX", NULL, "Port1_4 Mixer" },
{ "P1_5_TX", NULL, "Port1_5 Mixer" },
{ "P1_6_TX", NULL, "Port1_6 Mixer" },
{ "P1_7_TX", NULL, "Port1_7 Mixer" },
{ "P1_8_TX", NULL, "Port1_8 Mixer" },
{ "P2_1_TX", NULL, "Port2_1 Mixer" },
{ "P2_2_TX", NULL, "Port2_2 Mixer" },
{ "PORT1_SDO", "Port1 Capture Switch", "P1_1_TX"},
{ "PORT1_SDO", "Port1 Capture Switch", "P1_2_TX"},
{ "PORT1_SDO", "Port1 Capture Switch", "P1_3_TX"},
{ "PORT1_SDO", "Port1 Capture Switch", "P1_4_TX"},
{ "PORT1_SDO", "Port1 Capture Switch", "P1_5_TX"},
{ "PORT1_SDO", "Port1 Capture Switch", "P1_6_TX"},
{ "PORT1_SDO", "Port1 Capture Switch", "P1_7_TX"},
{ "PORT1_SDO", "Port1 Capture Switch", "P1_8_TX"},
{ "PORT2_SDO", "Port2 Capture Switch", "P2_1_TX"},
{ "PORT2_SDO", "Port2 Capture Switch", "P2_2_TX"},
{ "Mic1 Input", NULL, "AMIC1" },
{ "Mic2 Input", NULL, "AMIC2" },
{ "AUXL Input", NULL, "AUXL" },
{ "AUXR Input", NULL, "AUXR" },
/* AUX connections */
{ "ADCL Mux", "Aux_L", "AUXL Input" },
{ "ADCL Mux", "MIC1", "Mic1 Input" },
{ "ADCR Mux", "Aux_R", "AUXR Input" },
{ "ADCR Mux", "MIC2", "Mic2 Input" },
/* ADC connection */
{ "ADC Left", NULL, "ADCL Mux"},
{ "ADC Right", NULL, "ADCR Mux"},
{ "DMIC1 Left", NULL, "DMIC1DAT"},
{ "DMIC1 Right", NULL, "DMIC1DAT"},
{ "DMIC2 Left", NULL, "DMIC2DAT"},
{ "DMIC2 Right", NULL, "DMIC2DAT"},
/* Sidetone map */
{ "Sidetone Mixer", NULL, "ADC Left" },
{ "Sidetone Mixer", NULL, "ADC Right" },
{ "Sidetone Mixer", NULL, "DMIC1 Left" },
{ "Sidetone Mixer", NULL, "DMIC1 Right" },
{ "Sidetone Mixer", NULL, "DMIC2 Left" },
{ "Sidetone Mixer", NULL, "DMIC2 Right" },
{ "Sidetone", "Sidetone Switch", "Sidetone Mixer" },
};
static int lm49453_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
struct lm49453_priv *lm49453 = snd_soc_codec_get_drvdata(codec);
u16 clk_div = 0;
lm49453->fs_rate = params_rate(params);
/* Setting DAC clock dividers based on substream sample rate. */
switch (lm49453->fs_rate) {
case 8000:
case 16000:
case 32000:
case 24000:
case 48000:
clk_div = 256;
break;
case 11025:
case 22050:
case 44100:
clk_div = 216;
break;
case 96000:
clk_div = 127;
break;
default:
return -EINVAL;
}
snd_soc_write(codec, LM49453_P0_ADC_CLK_DIV_REG, clk_div);
snd_soc_write(codec, LM49453_P0_DAC_HP_CLK_DIV_REG, clk_div);
return 0;
}
static int lm49453_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
u16 aif_val;
int mode = 0;
int clk_phase = 0;
int clk_shift = 0;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
aif_val = 0;
break;
case SND_SOC_DAIFMT_CBS_CFM:
aif_val = LM49453_AUDIO_PORT1_BASIC_SYNC_MS;
break;
case SND_SOC_DAIFMT_CBM_CFS:
aif_val = LM49453_AUDIO_PORT1_BASIC_CLK_MS;
break;
case SND_SOC_DAIFMT_CBM_CFM:
aif_val = LM49453_AUDIO_PORT1_BASIC_CLK_MS |
LM49453_AUDIO_PORT1_BASIC_SYNC_MS;
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
break;
case SND_SOC_DAIFMT_DSP_A:
mode = 1;
clk_phase = (1 << 5);
clk_shift = 1;
break;
case SND_SOC_DAIFMT_DSP_B:
mode = 1;
clk_phase = (1 << 5);
clk_shift = 0;
break;
default:
return -EINVAL;
}
snd_soc_update_bits(codec, LM49453_P0_AUDIO_PORT1_BASIC_REG,
LM49453_AUDIO_PORT1_BASIC_FMT_MASK|BIT(0)|BIT(5),
(aif_val | mode | clk_phase));
snd_soc_write(codec, LM49453_P0_AUDIO_PORT1_RX_MSB_REG, clk_shift);
return 0;
}
static int lm49453_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
struct snd_soc_codec *codec = dai->codec;
u16 pll_clk = 0;
switch (freq) {
case 12288000:
case 26000000:
case 19200000:
/* pll clk slection */
pll_clk = 0;
break;
case 48000:
case 32576:
/* fll clk slection */
pll_clk = BIT(4);
return 0;
default:
return -EINVAL;
}
snd_soc_update_bits(codec, LM49453_P0_PMC_SETUP_REG, BIT(4), pll_clk);
return 0;
}
static int lm49453_hp_mute(struct snd_soc_dai *dai, int mute)
{
snd_soc_update_bits(dai->codec, LM49453_P0_DAC_DSP_REG, BIT(1)|BIT(0),
(mute ? (BIT(1)|BIT(0)) : 0));
return 0;
}
static int lm49453_lo_mute(struct snd_soc_dai *dai, int mute)
{
snd_soc_update_bits(dai->codec, LM49453_P0_DAC_DSP_REG, BIT(3)|BIT(2),
(mute ? (BIT(3)|BIT(2)) : 0));
return 0;
}
static int lm49453_ls_mute(struct snd_soc_dai *dai, int mute)
{
snd_soc_update_bits(dai->codec, LM49453_P0_DAC_DSP_REG, BIT(5)|BIT(4),
(mute ? (BIT(5)|BIT(4)) : 0));
return 0;
}
static int lm49453_ep_mute(struct snd_soc_dai *dai, int mute)
{
snd_soc_update_bits(dai->codec, LM49453_P0_DAC_DSP_REG, BIT(4),
(mute ? BIT(4) : 0));
return 0;
}
static int lm49453_ha_mute(struct snd_soc_dai *dai, int mute)
{
snd_soc_update_bits(dai->codec, LM49453_P0_DAC_DSP_REG, BIT(7)|BIT(6),
(mute ? (BIT(7)|BIT(6)) : 0));
return 0;
}
static int lm49453_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
struct lm49453_priv *lm49453 = snd_soc_codec_get_drvdata(codec);
switch (level) {
case SND_SOC_BIAS_ON:
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF)
regcache_sync(lm49453->regmap);
snd_soc_update_bits(codec, LM49453_P0_PMC_SETUP_REG,
LM49453_PMC_SETUP_CHIP_EN, LM49453_CHIP_EN);
break;
case SND_SOC_BIAS_OFF:
snd_soc_update_bits(codec, LM49453_P0_PMC_SETUP_REG,
LM49453_PMC_SETUP_CHIP_EN, 0);
break;
}
return 0;
}
/* Formates supported by LM49453 driver. */
#define LM49453_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static struct snd_soc_dai_ops lm49453_headset_dai_ops = {
.hw_params = lm49453_hw_params,
.set_sysclk = lm49453_set_dai_sysclk,
.set_fmt = lm49453_set_dai_fmt,
.digital_mute = lm49453_hp_mute,
};
static struct snd_soc_dai_ops lm49453_speaker_dai_ops = {
.hw_params = lm49453_hw_params,
.set_sysclk = lm49453_set_dai_sysclk,
.set_fmt = lm49453_set_dai_fmt,
.digital_mute = lm49453_ls_mute,
};
static struct snd_soc_dai_ops lm49453_haptic_dai_ops = {
.hw_params = lm49453_hw_params,
.set_sysclk = lm49453_set_dai_sysclk,
.set_fmt = lm49453_set_dai_fmt,
.digital_mute = lm49453_ha_mute,
};
static struct snd_soc_dai_ops lm49453_ep_dai_ops = {
.hw_params = lm49453_hw_params,
.set_sysclk = lm49453_set_dai_sysclk,
.set_fmt = lm49453_set_dai_fmt,
.digital_mute = lm49453_ep_mute,
};
static struct snd_soc_dai_ops lm49453_lineout_dai_ops = {
.hw_params = lm49453_hw_params,
.set_sysclk = lm49453_set_dai_sysclk,
.set_fmt = lm49453_set_dai_fmt,
.digital_mute = lm49453_lo_mute,
};
/* LM49453 dai structure. */
static struct snd_soc_dai_driver lm49453_dai[] = {
{
.name = "LM49453 Headset",
.playback = {
.stream_name = "Headset",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = LM49453_FORMATS,
},
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 5,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = LM49453_FORMATS,
},
.ops = &lm49453_headset_dai_ops,
.symmetric_rates = 1,
},
{
.name = "LM49453 Speaker",
.playback = {
.stream_name = "Speaker",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = LM49453_FORMATS,
},
.ops = &lm49453_speaker_dai_ops,
},
{
.name = "LM49453 Haptic",
.playback = {
.stream_name = "Haptic",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = LM49453_FORMATS,
},
.ops = &lm49453_haptic_dai_ops,
},
{
.name = "LM49453 Earpiece",
.playback = {
.stream_name = "Earpiece",
.channels_min = 1,
.channels_max = 1,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = LM49453_FORMATS,
},
.ops = &lm49453_ep_dai_ops,
},
{
.name = "LM49453 line out",
.playback = {
.stream_name = "Lineout",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = LM49453_FORMATS,
},
.ops = &lm49453_lineout_dai_ops,
},
};
static struct snd_soc_codec_driver soc_codec_dev_lm49453 = {
.set_bias_level = lm49453_set_bias_level,
.controls = lm49453_snd_controls,
.num_controls = ARRAY_SIZE(lm49453_snd_controls),
.dapm_widgets = lm49453_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(lm49453_dapm_widgets),
.dapm_routes = lm49453_audio_map,
.num_dapm_routes = ARRAY_SIZE(lm49453_audio_map),
.idle_bias_off = true,
};
static const struct regmap_config lm49453_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = LM49453_MAX_REGISTER,
.reg_defaults = lm49453_reg_defs,
.num_reg_defaults = ARRAY_SIZE(lm49453_reg_defs),
.cache_type = REGCACHE_RBTREE,
};
static int lm49453_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct lm49453_priv *lm49453;
int ret = 0;
lm49453 = devm_kzalloc(&i2c->dev, sizeof(struct lm49453_priv),
GFP_KERNEL);
if (lm49453 == NULL)
return -ENOMEM;
i2c_set_clientdata(i2c, lm49453);
lm49453->regmap = devm_regmap_init_i2c(i2c, &lm49453_regmap_config);
if (IS_ERR(lm49453->regmap)) {
ret = PTR_ERR(lm49453->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
return ret;
}
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_lm49453,
lm49453_dai, ARRAY_SIZE(lm49453_dai));
if (ret < 0)
dev_err(&i2c->dev, "Failed to register codec: %d\n", ret);
return ret;
}
static int lm49453_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
}
static const struct i2c_device_id lm49453_i2c_id[] = {
{ "lm49453", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm49453_i2c_id);
static struct i2c_driver lm49453_i2c_driver = {
.driver = {
.name = "lm49453",
.owner = THIS_MODULE,
},
.probe = lm49453_i2c_probe,
.remove = lm49453_i2c_remove,
.id_table = lm49453_i2c_id,
};
module_i2c_driver(lm49453_i2c_driver);
MODULE_DESCRIPTION("ASoC LM49453 driver");
MODULE_AUTHOR("M R Swami Reddy <MR.Swami.Reddy@ti.com>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
project-voodoo-vibrant/linux_sgh-t759 | fs/btrfs/super.c | 702 | 22424 | /*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/blkdev.h>
#include <linux/module.h>
#include <linux/buffer_head.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mount.h>
#include <linux/mpage.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/statfs.h>
#include <linux/compat.h>
#include <linux/parser.h>
#include <linux/ctype.h>
#include <linux/namei.h>
#include <linux/miscdevice.h>
#include <linux/magic.h>
#include <linux/slab.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "ioctl.h"
#include "print-tree.h"
#include "xattr.h"
#include "volumes.h"
#include "version.h"
#include "export.h"
#include "compression.h"
static const struct super_operations btrfs_super_ops;
static void btrfs_put_super(struct super_block *sb)
{
struct btrfs_root *root = btrfs_sb(sb);
int ret;
ret = close_ctree(root);
sb->s_fs_info = NULL;
}
enum {
Opt_degraded, Opt_subvol, Opt_subvolid, Opt_device, Opt_nodatasum,
Opt_nodatacow, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, Opt_ssd,
Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress,
Opt_compress_force, Opt_notreelog, Opt_ratio, Opt_flushoncommit,
Opt_discard, Opt_err,
};
static match_table_t tokens = {
{Opt_degraded, "degraded"},
{Opt_subvol, "subvol=%s"},
{Opt_subvolid, "subvolid=%d"},
{Opt_device, "device=%s"},
{Opt_nodatasum, "nodatasum"},
{Opt_nodatacow, "nodatacow"},
{Opt_nobarrier, "nobarrier"},
{Opt_max_inline, "max_inline=%s"},
{Opt_alloc_start, "alloc_start=%s"},
{Opt_thread_pool, "thread_pool=%d"},
{Opt_compress, "compress"},
{Opt_compress_force, "compress-force"},
{Opt_ssd, "ssd"},
{Opt_ssd_spread, "ssd_spread"},
{Opt_nossd, "nossd"},
{Opt_noacl, "noacl"},
{Opt_notreelog, "notreelog"},
{Opt_flushoncommit, "flushoncommit"},
{Opt_ratio, "metadata_ratio=%d"},
{Opt_discard, "discard"},
{Opt_err, NULL},
};
/*
* Regular mount options parser. Everything that is needed only when
* reading in a new superblock is parsed here.
*/
int btrfs_parse_options(struct btrfs_root *root, char *options)
{
struct btrfs_fs_info *info = root->fs_info;
substring_t args[MAX_OPT_ARGS];
char *p, *num, *orig;
int intarg;
int ret = 0;
if (!options)
return 0;
/*
* strsep changes the string, duplicate it because parse_options
* gets called twice
*/
options = kstrdup(options, GFP_NOFS);
if (!options)
return -ENOMEM;
orig = options;
while ((p = strsep(&options, ",")) != NULL) {
int token;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_degraded:
printk(KERN_INFO "btrfs: allowing degraded mounts\n");
btrfs_set_opt(info->mount_opt, DEGRADED);
break;
case Opt_subvol:
case Opt_subvolid:
case Opt_device:
/*
* These are parsed by btrfs_parse_early_options
* and can be happily ignored here.
*/
break;
case Opt_nodatasum:
printk(KERN_INFO "btrfs: setting nodatasum\n");
btrfs_set_opt(info->mount_opt, NODATASUM);
break;
case Opt_nodatacow:
printk(KERN_INFO "btrfs: setting nodatacow\n");
btrfs_set_opt(info->mount_opt, NODATACOW);
btrfs_set_opt(info->mount_opt, NODATASUM);
break;
case Opt_compress:
printk(KERN_INFO "btrfs: use compression\n");
btrfs_set_opt(info->mount_opt, COMPRESS);
break;
case Opt_compress_force:
printk(KERN_INFO "btrfs: forcing compression\n");
btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
btrfs_set_opt(info->mount_opt, COMPRESS);
break;
case Opt_ssd:
printk(KERN_INFO "btrfs: use ssd allocation scheme\n");
btrfs_set_opt(info->mount_opt, SSD);
break;
case Opt_ssd_spread:
printk(KERN_INFO "btrfs: use spread ssd "
"allocation scheme\n");
btrfs_set_opt(info->mount_opt, SSD);
btrfs_set_opt(info->mount_opt, SSD_SPREAD);
break;
case Opt_nossd:
printk(KERN_INFO "btrfs: not using ssd allocation "
"scheme\n");
btrfs_set_opt(info->mount_opt, NOSSD);
btrfs_clear_opt(info->mount_opt, SSD);
btrfs_clear_opt(info->mount_opt, SSD_SPREAD);
break;
case Opt_nobarrier:
printk(KERN_INFO "btrfs: turning off barriers\n");
btrfs_set_opt(info->mount_opt, NOBARRIER);
break;
case Opt_thread_pool:
intarg = 0;
match_int(&args[0], &intarg);
if (intarg) {
info->thread_pool_size = intarg;
printk(KERN_INFO "btrfs: thread pool %d\n",
info->thread_pool_size);
}
break;
case Opt_max_inline:
num = match_strdup(&args[0]);
if (num) {
info->max_inline = memparse(num, NULL);
kfree(num);
if (info->max_inline) {
info->max_inline = max_t(u64,
info->max_inline,
root->sectorsize);
}
printk(KERN_INFO "btrfs: max_inline at %llu\n",
(unsigned long long)info->max_inline);
}
break;
case Opt_alloc_start:
num = match_strdup(&args[0]);
if (num) {
info->alloc_start = memparse(num, NULL);
kfree(num);
printk(KERN_INFO
"btrfs: allocations start at %llu\n",
(unsigned long long)info->alloc_start);
}
break;
case Opt_noacl:
root->fs_info->sb->s_flags &= ~MS_POSIXACL;
break;
case Opt_notreelog:
printk(KERN_INFO "btrfs: disabling tree log\n");
btrfs_set_opt(info->mount_opt, NOTREELOG);
break;
case Opt_flushoncommit:
printk(KERN_INFO "btrfs: turning on flush-on-commit\n");
btrfs_set_opt(info->mount_opt, FLUSHONCOMMIT);
break;
case Opt_ratio:
intarg = 0;
match_int(&args[0], &intarg);
if (intarg) {
info->metadata_ratio = intarg;
printk(KERN_INFO "btrfs: metadata ratio %d\n",
info->metadata_ratio);
}
break;
case Opt_discard:
btrfs_set_opt(info->mount_opt, DISCARD);
break;
case Opt_err:
printk(KERN_INFO "btrfs: unrecognized mount option "
"'%s'\n", p);
ret = -EINVAL;
goto out;
default:
break;
}
}
out:
kfree(orig);
return ret;
}
/*
* Parse mount options that are required early in the mount process.
*
* All other options will be parsed on much later in the mount process and
* only when we need to allocate a new super block.
*/
static int btrfs_parse_early_options(const char *options, fmode_t flags,
void *holder, char **subvol_name, u64 *subvol_objectid,
struct btrfs_fs_devices **fs_devices)
{
substring_t args[MAX_OPT_ARGS];
char *opts, *p;
int error = 0;
int intarg;
if (!options)
goto out;
/*
* strsep changes the string, duplicate it because parse_options
* gets called twice
*/
opts = kstrdup(options, GFP_KERNEL);
if (!opts)
return -ENOMEM;
while ((p = strsep(&opts, ",")) != NULL) {
int token;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_subvol:
*subvol_name = match_strdup(&args[0]);
break;
case Opt_subvolid:
intarg = 0;
error = match_int(&args[0], &intarg);
if (!error) {
/* we want the original fs_tree */
if (!intarg)
*subvol_objectid =
BTRFS_FS_TREE_OBJECTID;
else
*subvol_objectid = intarg;
}
break;
case Opt_device:
error = btrfs_scan_one_device(match_strdup(&args[0]),
flags, holder, fs_devices);
if (error)
goto out_free_opts;
break;
default:
break;
}
}
out_free_opts:
kfree(opts);
out:
/*
* If no subvolume name is specified we use the default one. Allocate
* a copy of the string "." here so that code later in the
* mount path doesn't care if it's the default volume or another one.
*/
if (!*subvol_name) {
*subvol_name = kstrdup(".", GFP_KERNEL);
if (!*subvol_name)
return -ENOMEM;
}
return error;
}
static struct dentry *get_default_root(struct super_block *sb,
u64 subvol_objectid)
{
struct btrfs_root *root = sb->s_fs_info;
struct btrfs_root *new_root;
struct btrfs_dir_item *di;
struct btrfs_path *path;
struct btrfs_key location;
struct inode *inode;
struct dentry *dentry;
u64 dir_id;
int new = 0;
/*
* We have a specific subvol we want to mount, just setup location and
* go look up the root.
*/
if (subvol_objectid) {
location.objectid = subvol_objectid;
location.type = BTRFS_ROOT_ITEM_KEY;
location.offset = (u64)-1;
goto find_root;
}
path = btrfs_alloc_path();
if (!path)
return ERR_PTR(-ENOMEM);
path->leave_spinning = 1;
/*
* Find the "default" dir item which points to the root item that we
* will mount by default if we haven't been given a specific subvolume
* to mount.
*/
dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
if (IS_ERR(di))
return ERR_CAST(di);
if (!di) {
/*
* Ok the default dir item isn't there. This is weird since
* it's always been there, but don't freak out, just try and
* mount to root most subvolume.
*/
btrfs_free_path(path);
dir_id = BTRFS_FIRST_FREE_OBJECTID;
new_root = root->fs_info->fs_root;
goto setup_root;
}
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
btrfs_free_path(path);
find_root:
new_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
if (IS_ERR(new_root))
return ERR_PTR(PTR_ERR(new_root));
if (btrfs_root_refs(&new_root->root_item) == 0)
return ERR_PTR(-ENOENT);
dir_id = btrfs_root_dirid(&new_root->root_item);
setup_root:
location.objectid = dir_id;
location.type = BTRFS_INODE_ITEM_KEY;
location.offset = 0;
inode = btrfs_iget(sb, &location, new_root, &new);
if (IS_ERR(inode))
return ERR_CAST(inode);
/*
* If we're just mounting the root most subvol put the inode and return
* a reference to the dentry. We will have already gotten a reference
* to the inode in btrfs_fill_super so we're good to go.
*/
if (!new && sb->s_root->d_inode == inode) {
iput(inode);
return dget(sb->s_root);
}
if (new) {
const struct qstr name = { .name = "/", .len = 1 };
/*
* New inode, we need to make the dentry a sibling of s_root so
* everything gets cleaned up properly on unmount.
*/
dentry = d_alloc(sb->s_root, &name);
if (!dentry) {
iput(inode);
return ERR_PTR(-ENOMEM);
}
d_splice_alias(inode, dentry);
} else {
/*
* We found the inode in cache, just find a dentry for it and
* put the reference to the inode we just got.
*/
dentry = d_find_alias(inode);
iput(inode);
}
return dentry;
}
static int btrfs_fill_super(struct super_block *sb,
struct btrfs_fs_devices *fs_devices,
void *data, int silent)
{
struct inode *inode;
struct dentry *root_dentry;
struct btrfs_super_block *disk_super;
struct btrfs_root *tree_root;
struct btrfs_key key;
int err;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_magic = BTRFS_SUPER_MAGIC;
sb->s_op = &btrfs_super_ops;
sb->s_export_op = &btrfs_export_ops;
sb->s_xattr = btrfs_xattr_handlers;
sb->s_time_gran = 1;
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
sb->s_flags |= MS_POSIXACL;
#endif
tree_root = open_ctree(sb, fs_devices, (char *)data);
if (IS_ERR(tree_root)) {
printk("btrfs: open_ctree failed\n");
return PTR_ERR(tree_root);
}
sb->s_fs_info = tree_root;
disk_super = &tree_root->fs_info->super_copy;
key.objectid = BTRFS_FIRST_FREE_OBJECTID;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
inode = btrfs_iget(sb, &key, tree_root->fs_info->fs_root, NULL);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto fail_close;
}
root_dentry = d_alloc_root(inode);
if (!root_dentry) {
iput(inode);
err = -ENOMEM;
goto fail_close;
}
sb->s_root = root_dentry;
save_mount_options(sb, data);
return 0;
fail_close:
close_ctree(tree_root);
return err;
}
int btrfs_sync_fs(struct super_block *sb, int wait)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = btrfs_sb(sb);
int ret;
if (!wait) {
filemap_flush(root->fs_info->btree_inode->i_mapping);
return 0;
}
btrfs_start_delalloc_inodes(root, 0);
btrfs_wait_ordered_extents(root, 0, 0);
trans = btrfs_start_transaction(root, 0);
ret = btrfs_commit_transaction(trans, root);
return ret;
}
static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
{
struct btrfs_root *root = btrfs_sb(vfs->mnt_sb);
struct btrfs_fs_info *info = root->fs_info;
if (btrfs_test_opt(root, DEGRADED))
seq_puts(seq, ",degraded");
if (btrfs_test_opt(root, NODATASUM))
seq_puts(seq, ",nodatasum");
if (btrfs_test_opt(root, NODATACOW))
seq_puts(seq, ",nodatacow");
if (btrfs_test_opt(root, NOBARRIER))
seq_puts(seq, ",nobarrier");
if (info->max_inline != 8192 * 1024)
seq_printf(seq, ",max_inline=%llu",
(unsigned long long)info->max_inline);
if (info->alloc_start != 0)
seq_printf(seq, ",alloc_start=%llu",
(unsigned long long)info->alloc_start);
if (info->thread_pool_size != min_t(unsigned long,
num_online_cpus() + 2, 8))
seq_printf(seq, ",thread_pool=%d", info->thread_pool_size);
if (btrfs_test_opt(root, COMPRESS))
seq_puts(seq, ",compress");
if (btrfs_test_opt(root, NOSSD))
seq_puts(seq, ",nossd");
if (btrfs_test_opt(root, SSD_SPREAD))
seq_puts(seq, ",ssd_spread");
else if (btrfs_test_opt(root, SSD))
seq_puts(seq, ",ssd");
if (btrfs_test_opt(root, NOTREELOG))
seq_puts(seq, ",notreelog");
if (btrfs_test_opt(root, FLUSHONCOMMIT))
seq_puts(seq, ",flushoncommit");
if (btrfs_test_opt(root, DISCARD))
seq_puts(seq, ",discard");
if (!(root->fs_info->sb->s_flags & MS_POSIXACL))
seq_puts(seq, ",noacl");
return 0;
}
static int btrfs_test_super(struct super_block *s, void *data)
{
struct btrfs_fs_devices *test_fs_devices = data;
struct btrfs_root *root = btrfs_sb(s);
return root->fs_info->fs_devices == test_fs_devices;
}
/*
* Find a superblock for the given device / mount point.
*
* Note: This is based on get_sb_bdev from fs/super.c with a few additions
* for multiple device setup. Make sure to keep it in sync.
*/
static int btrfs_get_sb(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data, struct vfsmount *mnt)
{
struct block_device *bdev = NULL;
struct super_block *s;
struct dentry *root;
struct btrfs_fs_devices *fs_devices = NULL;
fmode_t mode = FMODE_READ;
char *subvol_name = NULL;
u64 subvol_objectid = 0;
int error = 0;
int found = 0;
if (!(flags & MS_RDONLY))
mode |= FMODE_WRITE;
error = btrfs_parse_early_options(data, mode, fs_type,
&subvol_name, &subvol_objectid,
&fs_devices);
if (error)
return error;
error = btrfs_scan_one_device(dev_name, mode, fs_type, &fs_devices);
if (error)
goto error_free_subvol_name;
error = btrfs_open_devices(fs_devices, mode, fs_type);
if (error)
goto error_free_subvol_name;
if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
error = -EACCES;
goto error_close_devices;
}
bdev = fs_devices->latest_bdev;
s = sget(fs_type, btrfs_test_super, set_anon_super, fs_devices);
if (IS_ERR(s))
goto error_s;
if (s->s_root) {
if ((flags ^ s->s_flags) & MS_RDONLY) {
deactivate_locked_super(s);
error = -EBUSY;
goto error_close_devices;
}
found = 1;
btrfs_close_devices(fs_devices);
} else {
char b[BDEVNAME_SIZE];
s->s_flags = flags;
strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
error = btrfs_fill_super(s, fs_devices, data,
flags & MS_SILENT ? 1 : 0);
if (error) {
deactivate_locked_super(s);
goto error_free_subvol_name;
}
btrfs_sb(s)->fs_info->bdev_holder = fs_type;
s->s_flags |= MS_ACTIVE;
}
root = get_default_root(s, subvol_objectid);
if (IS_ERR(root)) {
error = PTR_ERR(root);
deactivate_locked_super(s);
goto error;
}
/* if they gave us a subvolume name bind mount into that */
if (strcmp(subvol_name, ".")) {
struct dentry *new_root;
mutex_lock(&root->d_inode->i_mutex);
new_root = lookup_one_len(subvol_name, root,
strlen(subvol_name));
mutex_unlock(&root->d_inode->i_mutex);
if (IS_ERR(new_root)) {
deactivate_locked_super(s);
error = PTR_ERR(new_root);
dput(root);
goto error_close_devices;
}
if (!new_root->d_inode) {
dput(root);
dput(new_root);
deactivate_locked_super(s);
error = -ENXIO;
goto error_close_devices;
}
dput(root);
root = new_root;
}
mnt->mnt_sb = s;
mnt->mnt_root = root;
kfree(subvol_name);
return 0;
error_s:
error = PTR_ERR(s);
error_close_devices:
btrfs_close_devices(fs_devices);
error_free_subvol_name:
kfree(subvol_name);
error:
return error;
}
static int btrfs_remount(struct super_block *sb, int *flags, char *data)
{
struct btrfs_root *root = btrfs_sb(sb);
int ret;
ret = btrfs_parse_options(root, data);
if (ret)
return -EINVAL;
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
return 0;
if (*flags & MS_RDONLY) {
sb->s_flags |= MS_RDONLY;
ret = btrfs_commit_super(root);
WARN_ON(ret);
} else {
if (root->fs_info->fs_devices->rw_devices == 0)
return -EACCES;
if (btrfs_super_log_root(&root->fs_info->super_copy) != 0)
return -EINVAL;
ret = btrfs_cleanup_fs_roots(root->fs_info);
WARN_ON(ret);
/* recover relocation */
ret = btrfs_recover_relocation(root);
WARN_ON(ret);
sb->s_flags &= ~MS_RDONLY;
}
return 0;
}
static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct btrfs_root *root = btrfs_sb(dentry->d_sb);
struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
struct list_head *head = &root->fs_info->space_info;
struct btrfs_space_info *found;
u64 total_used = 0;
int bits = dentry->d_sb->s_blocksize_bits;
__be32 *fsid = (__be32 *)root->fs_info->fsid;
rcu_read_lock();
list_for_each_entry_rcu(found, head, list)
total_used += found->disk_used;
rcu_read_unlock();
buf->f_namelen = BTRFS_NAME_LEN;
buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits;
buf->f_bfree = buf->f_blocks - (total_used >> bits);
buf->f_bavail = buf->f_bfree;
buf->f_bsize = dentry->d_sb->s_blocksize;
buf->f_type = BTRFS_SUPER_MAGIC;
/* We treat it as constant endianness (it doesn't matter _which_)
because we want the fsid to come out the same whether mounted
on a big-endian or little-endian host */
buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
/* Mask in the root object ID too, to disambiguate subvols */
buf->f_fsid.val[0] ^= BTRFS_I(dentry->d_inode)->root->objectid >> 32;
buf->f_fsid.val[1] ^= BTRFS_I(dentry->d_inode)->root->objectid;
return 0;
}
static struct file_system_type btrfs_fs_type = {
.owner = THIS_MODULE,
.name = "btrfs",
.get_sb = btrfs_get_sb,
.kill_sb = kill_anon_super,
.fs_flags = FS_REQUIRES_DEV,
};
/*
* used by btrfsctl to scan devices when no FS is mounted
*/
static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct btrfs_ioctl_vol_args *vol;
struct btrfs_fs_devices *fs_devices;
int ret = -ENOTTY;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
vol = memdup_user((void __user *)arg, sizeof(*vol));
if (IS_ERR(vol))
return PTR_ERR(vol);
switch (cmd) {
case BTRFS_IOC_SCAN_DEV:
ret = btrfs_scan_one_device(vol->name, FMODE_READ,
&btrfs_fs_type, &fs_devices);
break;
}
kfree(vol);
return ret;
}
static int btrfs_freeze(struct super_block *sb)
{
struct btrfs_root *root = btrfs_sb(sb);
mutex_lock(&root->fs_info->transaction_kthread_mutex);
mutex_lock(&root->fs_info->cleaner_mutex);
return 0;
}
static int btrfs_unfreeze(struct super_block *sb)
{
struct btrfs_root *root = btrfs_sb(sb);
mutex_unlock(&root->fs_info->cleaner_mutex);
mutex_unlock(&root->fs_info->transaction_kthread_mutex);
return 0;
}
static const struct super_operations btrfs_super_ops = {
.drop_inode = btrfs_drop_inode,
.delete_inode = btrfs_delete_inode,
.put_super = btrfs_put_super,
.sync_fs = btrfs_sync_fs,
.show_options = btrfs_show_options,
.write_inode = btrfs_write_inode,
.dirty_inode = btrfs_dirty_inode,
.alloc_inode = btrfs_alloc_inode,
.destroy_inode = btrfs_destroy_inode,
.statfs = btrfs_statfs,
.remount_fs = btrfs_remount,
.freeze_fs = btrfs_freeze,
.unfreeze_fs = btrfs_unfreeze,
};
static const struct file_operations btrfs_ctl_fops = {
.unlocked_ioctl = btrfs_control_ioctl,
.compat_ioctl = btrfs_control_ioctl,
.owner = THIS_MODULE,
};
static struct miscdevice btrfs_misc = {
.minor = BTRFS_MINOR,
.name = "btrfs-control",
.fops = &btrfs_ctl_fops
};
MODULE_ALIAS_MISCDEV(BTRFS_MINOR);
MODULE_ALIAS("devname:btrfs-control");
static int btrfs_interface_init(void)
{
return misc_register(&btrfs_misc);
}
static void btrfs_interface_exit(void)
{
if (misc_deregister(&btrfs_misc) < 0)
printk(KERN_INFO "misc_deregister failed for control device");
}
static int __init init_btrfs_fs(void)
{
int err;
err = btrfs_init_sysfs();
if (err)
return err;
err = btrfs_init_cachep();
if (err)
goto free_sysfs;
err = extent_io_init();
if (err)
goto free_cachep;
err = extent_map_init();
if (err)
goto free_extent_io;
err = btrfs_interface_init();
if (err)
goto free_extent_map;
err = register_filesystem(&btrfs_fs_type);
if (err)
goto unregister_ioctl;
printk(KERN_INFO "%s loaded\n", BTRFS_BUILD_VERSION);
return 0;
unregister_ioctl:
btrfs_interface_exit();
free_extent_map:
extent_map_exit();
free_extent_io:
extent_io_exit();
free_cachep:
btrfs_destroy_cachep();
free_sysfs:
btrfs_exit_sysfs();
return err;
}
static void __exit exit_btrfs_fs(void)
{
btrfs_destroy_cachep();
extent_map_exit();
extent_io_exit();
btrfs_interface_exit();
unregister_filesystem(&btrfs_fs_type);
btrfs_exit_sysfs();
btrfs_cleanup_fs_uuids();
btrfs_zlib_exit();
}
module_init(init_btrfs_fs)
module_exit(exit_btrfs_fs)
MODULE_LICENSE("GPL");
| gpl-2.0 |
streamagame/streamagame_kernel | net/sunrpc/rpcb_clnt.c | 958 | 30618 | /*
* In-kernel rpcbind client supporting versions 2, 3, and 4 of the rpcbind
* protocol
*
* Based on RFC 1833: "Binding Protocols for ONC RPC Version 2" and
* RFC 3530: "Network File System (NFS) version 4 Protocol"
*
* Original: Gilles Quillard, Bull Open Source, 2005 <gilles.quillard@bull.net>
* Updated: Chuck Lever, Oracle Corporation, 2007 <chuck.lever@oracle.com>
*
* Descended from net/sunrpc/pmap_clnt.c,
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <net/ipv6.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/xprtsock.h>
#include "netns.h"
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY RPCDBG_BIND
#endif
#define RPCBIND_SOCK_PATHNAME "/var/run/rpcbind.sock"
#define RPCBIND_PROGRAM (100000u)
#define RPCBIND_PORT (111u)
#define RPCBVERS_2 (2u)
#define RPCBVERS_3 (3u)
#define RPCBVERS_4 (4u)
enum {
RPCBPROC_NULL,
RPCBPROC_SET,
RPCBPROC_UNSET,
RPCBPROC_GETPORT,
RPCBPROC_GETADDR = 3, /* alias for GETPORT */
RPCBPROC_DUMP,
RPCBPROC_CALLIT,
RPCBPROC_BCAST = 5, /* alias for CALLIT */
RPCBPROC_GETTIME,
RPCBPROC_UADDR2TADDR,
RPCBPROC_TADDR2UADDR,
RPCBPROC_GETVERSADDR,
RPCBPROC_INDIRECT,
RPCBPROC_GETADDRLIST,
RPCBPROC_GETSTAT,
};
/*
* r_owner
*
* The "owner" is allowed to unset a service in the rpcbind database.
*
* For AF_LOCAL SET/UNSET requests, rpcbind treats this string as a
* UID which it maps to a local user name via a password lookup.
* In all other cases it is ignored.
*
* For SET/UNSET requests, user space provides a value, even for
* network requests, and GETADDR uses an empty string. We follow
* those precedents here.
*/
#define RPCB_OWNER_STRING "0"
#define RPCB_MAXOWNERLEN sizeof(RPCB_OWNER_STRING)
/*
* XDR data type sizes
*/
#define RPCB_program_sz (1)
#define RPCB_version_sz (1)
#define RPCB_protocol_sz (1)
#define RPCB_port_sz (1)
#define RPCB_boolean_sz (1)
#define RPCB_netid_sz (1 + XDR_QUADLEN(RPCBIND_MAXNETIDLEN))
#define RPCB_addr_sz (1 + XDR_QUADLEN(RPCBIND_MAXUADDRLEN))
#define RPCB_ownerstring_sz (1 + XDR_QUADLEN(RPCB_MAXOWNERLEN))
/*
* XDR argument and result sizes
*/
#define RPCB_mappingargs_sz (RPCB_program_sz + RPCB_version_sz + \
RPCB_protocol_sz + RPCB_port_sz)
#define RPCB_getaddrargs_sz (RPCB_program_sz + RPCB_version_sz + \
RPCB_netid_sz + RPCB_addr_sz + \
RPCB_ownerstring_sz)
#define RPCB_getportres_sz RPCB_port_sz
#define RPCB_setres_sz RPCB_boolean_sz
/*
* Note that RFC 1833 does not put any size restrictions on the
* address string returned by the remote rpcbind database.
*/
#define RPCB_getaddrres_sz RPCB_addr_sz
static void rpcb_getport_done(struct rpc_task *, void *);
static void rpcb_map_release(void *data);
static const struct rpc_program rpcb_program;
struct rpcbind_args {
struct rpc_xprt * r_xprt;
u32 r_prog;
u32 r_vers;
u32 r_prot;
unsigned short r_port;
const char * r_netid;
const char * r_addr;
const char * r_owner;
int r_status;
};
static struct rpc_procinfo rpcb_procedures2[];
static struct rpc_procinfo rpcb_procedures3[];
static struct rpc_procinfo rpcb_procedures4[];
struct rpcb_info {
u32 rpc_vers;
struct rpc_procinfo * rpc_proc;
};
static const struct rpcb_info rpcb_next_version[];
static const struct rpcb_info rpcb_next_version6[];
static const struct rpc_call_ops rpcb_getport_ops = {
.rpc_call_done = rpcb_getport_done,
.rpc_release = rpcb_map_release,
};
static void rpcb_wake_rpcbind_waiters(struct rpc_xprt *xprt, int status)
{
xprt_clear_binding(xprt);
rpc_wake_up_status(&xprt->binding, status);
}
static void rpcb_map_release(void *data)
{
struct rpcbind_args *map = data;
rpcb_wake_rpcbind_waiters(map->r_xprt, map->r_status);
xprt_put(map->r_xprt);
kfree(map->r_addr);
kfree(map);
}
static int rpcb_get_local(struct net *net)
{
int cnt;
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
spin_lock(&sn->rpcb_clnt_lock);
if (sn->rpcb_users)
sn->rpcb_users++;
cnt = sn->rpcb_users;
spin_unlock(&sn->rpcb_clnt_lock);
return cnt;
}
void rpcb_put_local(struct net *net)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
struct rpc_clnt *clnt = sn->rpcb_local_clnt;
struct rpc_clnt *clnt4 = sn->rpcb_local_clnt4;
int shutdown = 0;
spin_lock(&sn->rpcb_clnt_lock);
if (sn->rpcb_users) {
if (--sn->rpcb_users == 0) {
sn->rpcb_local_clnt = NULL;
sn->rpcb_local_clnt4 = NULL;
}
shutdown = !sn->rpcb_users;
}
spin_unlock(&sn->rpcb_clnt_lock);
if (shutdown) {
/*
* cleanup_rpcb_clnt - remove xprtsock's sysctls, unregister
*/
if (clnt4)
rpc_shutdown_client(clnt4);
if (clnt)
rpc_shutdown_client(clnt);
}
}
static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt,
struct rpc_clnt *clnt4,
bool is_af_local)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
/* Protected by rpcb_create_local_mutex */
sn->rpcb_local_clnt = clnt;
sn->rpcb_local_clnt4 = clnt4;
sn->rpcb_is_af_local = is_af_local ? 1 : 0;
smp_wmb();
sn->rpcb_users = 1;
dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: "
"%p, rpcb_local_clnt4: %p) for net %p%s\n",
sn->rpcb_local_clnt, sn->rpcb_local_clnt4,
net, (net == &init_net) ? " (init_net)" : "");
}
/*
* Returns zero on success, otherwise a negative errno value
* is returned.
*/
static int rpcb_create_local_unix(struct net *net)
{
static const struct sockaddr_un rpcb_localaddr_rpcbind = {
.sun_family = AF_LOCAL,
.sun_path = RPCBIND_SOCK_PATHNAME,
};
struct rpc_create_args args = {
.net = net,
.protocol = XPRT_TRANSPORT_LOCAL,
.address = (struct sockaddr *)&rpcb_localaddr_rpcbind,
.addrsize = sizeof(rpcb_localaddr_rpcbind),
.servername = "localhost",
.program = &rpcb_program,
.version = RPCBVERS_2,
.authflavor = RPC_AUTH_NULL,
/*
* We turn off the idle timeout to prevent the kernel
* from automatically disconnecting the socket.
* Otherwise, we'd have to cache the mount namespace
* of the caller and somehow pass that to the socket
* reconnect code.
*/
.flags = RPC_CLNT_CREATE_NO_IDLE_TIMEOUT,
};
struct rpc_clnt *clnt, *clnt4;
int result = 0;
/*
* Because we requested an RPC PING at transport creation time,
* this works only if the user space portmapper is rpcbind, and
* it's listening on AF_LOCAL on the named socket.
*/
clnt = rpc_create(&args);
if (IS_ERR(clnt)) {
dprintk("RPC: failed to create AF_LOCAL rpcbind "
"client (errno %ld).\n", PTR_ERR(clnt));
result = PTR_ERR(clnt);
goto out;
}
clnt4 = rpc_bind_new_program(clnt, &rpcb_program, RPCBVERS_4);
if (IS_ERR(clnt4)) {
dprintk("RPC: failed to bind second program to "
"rpcbind v4 client (errno %ld).\n",
PTR_ERR(clnt4));
clnt4 = NULL;
}
rpcb_set_local(net, clnt, clnt4, true);
out:
return result;
}
/*
* Returns zero on success, otherwise a negative errno value
* is returned.
*/
static int rpcb_create_local_net(struct net *net)
{
static const struct sockaddr_in rpcb_inaddr_loopback = {
.sin_family = AF_INET,
.sin_addr.s_addr = htonl(INADDR_LOOPBACK),
.sin_port = htons(RPCBIND_PORT),
};
struct rpc_create_args args = {
.net = net,
.protocol = XPRT_TRANSPORT_TCP,
.address = (struct sockaddr *)&rpcb_inaddr_loopback,
.addrsize = sizeof(rpcb_inaddr_loopback),
.servername = "localhost",
.program = &rpcb_program,
.version = RPCBVERS_2,
.authflavor = RPC_AUTH_UNIX,
.flags = RPC_CLNT_CREATE_NOPING,
};
struct rpc_clnt *clnt, *clnt4;
int result = 0;
clnt = rpc_create(&args);
if (IS_ERR(clnt)) {
dprintk("RPC: failed to create local rpcbind "
"client (errno %ld).\n", PTR_ERR(clnt));
result = PTR_ERR(clnt);
goto out;
}
/*
* This results in an RPC ping. On systems running portmapper,
* the v4 ping will fail. Proceed anyway, but disallow rpcb
* v4 upcalls.
*/
clnt4 = rpc_bind_new_program(clnt, &rpcb_program, RPCBVERS_4);
if (IS_ERR(clnt4)) {
dprintk("RPC: failed to bind second program to "
"rpcbind v4 client (errno %ld).\n",
PTR_ERR(clnt4));
clnt4 = NULL;
}
rpcb_set_local(net, clnt, clnt4, false);
out:
return result;
}
/*
* Returns zero on success, otherwise a negative errno value
* is returned.
*/
int rpcb_create_local(struct net *net)
{
static DEFINE_MUTEX(rpcb_create_local_mutex);
int result = 0;
if (rpcb_get_local(net))
return result;
mutex_lock(&rpcb_create_local_mutex);
if (rpcb_get_local(net))
goto out;
if (rpcb_create_local_unix(net) != 0)
result = rpcb_create_local_net(net);
out:
mutex_unlock(&rpcb_create_local_mutex);
return result;
}
static struct rpc_clnt *rpcb_create(struct net *net, const char *nodename,
const char *hostname,
struct sockaddr *srvaddr, size_t salen,
int proto, u32 version)
{
struct rpc_create_args args = {
.net = net,
.protocol = proto,
.address = srvaddr,
.addrsize = salen,
.servername = hostname,
.nodename = nodename,
.program = &rpcb_program,
.version = version,
.authflavor = RPC_AUTH_UNIX,
.flags = (RPC_CLNT_CREATE_NOPING |
RPC_CLNT_CREATE_NONPRIVPORT),
};
switch (srvaddr->sa_family) {
case AF_INET:
((struct sockaddr_in *)srvaddr)->sin_port = htons(RPCBIND_PORT);
break;
case AF_INET6:
((struct sockaddr_in6 *)srvaddr)->sin6_port = htons(RPCBIND_PORT);
break;
default:
return ERR_PTR(-EAFNOSUPPORT);
}
return rpc_create(&args);
}
static int rpcb_register_call(struct sunrpc_net *sn, struct rpc_clnt *clnt, struct rpc_message *msg, bool is_set)
{
int flags = RPC_TASK_NOCONNECT;
int error, result = 0;
if (is_set || !sn->rpcb_is_af_local)
flags = RPC_TASK_SOFTCONN;
msg->rpc_resp = &result;
error = rpc_call_sync(clnt, msg, flags);
if (error < 0) {
dprintk("RPC: failed to contact local rpcbind "
"server (errno %d).\n", -error);
return error;
}
if (!result)
return -EACCES;
return 0;
}
/**
* rpcb_register - set or unset a port registration with the local rpcbind svc
* @net: target network namespace
* @prog: RPC program number to bind
* @vers: RPC version number to bind
* @prot: transport protocol to register
* @port: port value to register
*
* Returns zero if the registration request was dispatched successfully
* and the rpcbind daemon returned success. Otherwise, returns an errno
* value that reflects the nature of the error (request could not be
* dispatched, timed out, or rpcbind returned an error).
*
* RPC services invoke this function to advertise their contact
* information via the system's rpcbind daemon. RPC services
* invoke this function once for each [program, version, transport]
* tuple they wish to advertise.
*
* Callers may also unregister RPC services that are no longer
* available by setting the passed-in port to zero. This removes
* all registered transports for [program, version] from the local
* rpcbind database.
*
* This function uses rpcbind protocol version 2 to contact the
* local rpcbind daemon.
*
* Registration works over both AF_INET and AF_INET6, and services
* registered via this function are advertised as available for any
* address. If the local rpcbind daemon is listening on AF_INET6,
* services registered via this function will be advertised on
* IN6ADDR_ANY (ie available for all AF_INET and AF_INET6
* addresses).
*/
int rpcb_register(struct net *net, u32 prog, u32 vers, int prot, unsigned short port)
{
struct rpcbind_args map = {
.r_prog = prog,
.r_vers = vers,
.r_prot = prot,
.r_port = port,
};
struct rpc_message msg = {
.rpc_argp = &map,
};
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
bool is_set = false;
dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
"rpcbind\n", (port ? "" : "un"),
prog, vers, prot, port);
msg.rpc_proc = &rpcb_procedures2[RPCBPROC_UNSET];
if (port != 0) {
msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
is_set = true;
}
return rpcb_register_call(sn, sn->rpcb_local_clnt, &msg, is_set);
}
/*
* Fill in AF_INET family-specific arguments to register
*/
static int rpcb_register_inet4(struct sunrpc_net *sn,
const struct sockaddr *sap,
struct rpc_message *msg)
{
const struct sockaddr_in *sin = (const struct sockaddr_in *)sap;
struct rpcbind_args *map = msg->rpc_argp;
unsigned short port = ntohs(sin->sin_port);
bool is_set = false;
int result;
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with "
"local rpcbind\n", (port ? "" : "un"),
map->r_prog, map->r_vers,
map->r_addr, map->r_netid);
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
if (port != 0) {
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
is_set = true;
}
result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
kfree(map->r_addr);
return result;
}
/*
* Fill in AF_INET6 family-specific arguments to register
*/
static int rpcb_register_inet6(struct sunrpc_net *sn,
const struct sockaddr *sap,
struct rpc_message *msg)
{
const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap;
struct rpcbind_args *map = msg->rpc_argp;
unsigned short port = ntohs(sin6->sin6_port);
bool is_set = false;
int result;
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with "
"local rpcbind\n", (port ? "" : "un"),
map->r_prog, map->r_vers,
map->r_addr, map->r_netid);
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
if (port != 0) {
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
is_set = true;
}
result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
kfree(map->r_addr);
return result;
}
static int rpcb_unregister_all_protofamilies(struct sunrpc_net *sn,
struct rpc_message *msg)
{
struct rpcbind_args *map = msg->rpc_argp;
dprintk("RPC: unregistering [%u, %u, '%s'] with "
"local rpcbind\n",
map->r_prog, map->r_vers, map->r_netid);
map->r_addr = "";
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
return rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, false);
}
/**
* rpcb_v4_register - set or unset a port registration with the local rpcbind
* @net: target network namespace
* @program: RPC program number of service to (un)register
* @version: RPC version number of service to (un)register
* @address: address family, IP address, and port to (un)register
* @netid: netid of transport protocol to (un)register
*
* Returns zero if the registration request was dispatched successfully
* and the rpcbind daemon returned success. Otherwise, returns an errno
* value that reflects the nature of the error (request could not be
* dispatched, timed out, or rpcbind returned an error).
*
* RPC services invoke this function to advertise their contact
* information via the system's rpcbind daemon. RPC services
* invoke this function once for each [program, version, address,
* netid] tuple they wish to advertise.
*
* Callers may also unregister RPC services that are registered at a
* specific address by setting the port number in @address to zero.
* They may unregister all registered protocol families at once for
* a service by passing a NULL @address argument. If @netid is ""
* then all netids for [program, version, address] are unregistered.
*
* This function uses rpcbind protocol version 4 to contact the
* local rpcbind daemon. The local rpcbind daemon must support
* version 4 of the rpcbind protocol in order for these functions
* to register a service successfully.
*
* Supported netids include "udp" and "tcp" for UDP and TCP over
* IPv4, and "udp6" and "tcp6" for UDP and TCP over IPv6,
* respectively.
*
* The contents of @address determine the address family and the
* port to be registered. The usual practice is to pass INADDR_ANY
* as the raw address, but specifying a non-zero address is also
* supported by this API if the caller wishes to advertise an RPC
* service on a specific network interface.
*
* Note that passing in INADDR_ANY does not create the same service
* registration as IN6ADDR_ANY. The former advertises an RPC
* service on any IPv4 address, but not on IPv6. The latter
* advertises the service on all IPv4 and IPv6 addresses.
*/
int rpcb_v4_register(struct net *net, const u32 program, const u32 version,
const struct sockaddr *address, const char *netid)
{
struct rpcbind_args map = {
.r_prog = program,
.r_vers = version,
.r_netid = netid,
.r_owner = RPCB_OWNER_STRING,
};
struct rpc_message msg = {
.rpc_argp = &map,
};
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
if (sn->rpcb_local_clnt4 == NULL)
return -EPROTONOSUPPORT;
if (address == NULL)
return rpcb_unregister_all_protofamilies(sn, &msg);
switch (address->sa_family) {
case AF_INET:
return rpcb_register_inet4(sn, address, &msg);
case AF_INET6:
return rpcb_register_inet6(sn, address, &msg);
}
return -EAFNOSUPPORT;
}
static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt, struct rpcbind_args *map, struct rpc_procinfo *proc)
{
struct rpc_message msg = {
.rpc_proc = proc,
.rpc_argp = map,
.rpc_resp = map,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = rpcb_clnt,
.rpc_message = &msg,
.callback_ops = &rpcb_getport_ops,
.callback_data = map,
.flags = RPC_TASK_ASYNC | RPC_TASK_SOFTCONN,
};
return rpc_run_task(&task_setup_data);
}
/*
* In the case where rpc clients have been cloned, we want to make
* sure that we use the program number/version etc of the actual
* owner of the xprt. To do so, we walk back up the tree of parents
* to find whoever created the transport and/or whoever has the
* autobind flag set.
*/
static struct rpc_clnt *rpcb_find_transport_owner(struct rpc_clnt *clnt)
{
struct rpc_clnt *parent = clnt->cl_parent;
struct rpc_xprt *xprt = rcu_dereference(clnt->cl_xprt);
while (parent != clnt) {
if (rcu_dereference(parent->cl_xprt) != xprt)
break;
if (clnt->cl_autobind)
break;
clnt = parent;
parent = parent->cl_parent;
}
return clnt;
}
/**
* rpcb_getport_async - obtain the port for a given RPC service on a given host
* @task: task that is waiting for portmapper request
*
* This one can be called for an ongoing RPC request, and can be used in
* an async (rpciod) context.
*/
void rpcb_getport_async(struct rpc_task *task)
{
struct rpc_clnt *clnt;
struct rpc_procinfo *proc;
u32 bind_version;
struct rpc_xprt *xprt;
struct rpc_clnt *rpcb_clnt;
struct rpcbind_args *map;
struct rpc_task *child;
struct sockaddr_storage addr;
struct sockaddr *sap = (struct sockaddr *)&addr;
size_t salen;
int status;
rcu_read_lock();
do {
clnt = rpcb_find_transport_owner(task->tk_client);
xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
} while (xprt == NULL);
rcu_read_unlock();
dprintk("RPC: %5u %s(%s, %u, %u, %d)\n",
task->tk_pid, __func__,
xprt->servername, clnt->cl_prog, clnt->cl_vers, xprt->prot);
/* Put self on the wait queue to ensure we get notified if
* some other task is already attempting to bind the port */
rpc_sleep_on(&xprt->binding, task, NULL);
if (xprt_test_and_set_binding(xprt)) {
dprintk("RPC: %5u %s: waiting for another binder\n",
task->tk_pid, __func__);
xprt_put(xprt);
return;
}
/* Someone else may have bound if we slept */
if (xprt_bound(xprt)) {
status = 0;
dprintk("RPC: %5u %s: already bound\n",
task->tk_pid, __func__);
goto bailout_nofree;
}
/* Parent transport's destination address */
salen = rpc_peeraddr(clnt, sap, sizeof(addr));
/* Don't ever use rpcbind v2 for AF_INET6 requests */
switch (sap->sa_family) {
case AF_INET:
proc = rpcb_next_version[xprt->bind_index].rpc_proc;
bind_version = rpcb_next_version[xprt->bind_index].rpc_vers;
break;
case AF_INET6:
proc = rpcb_next_version6[xprt->bind_index].rpc_proc;
bind_version = rpcb_next_version6[xprt->bind_index].rpc_vers;
break;
default:
status = -EAFNOSUPPORT;
dprintk("RPC: %5u %s: bad address family\n",
task->tk_pid, __func__);
goto bailout_nofree;
}
if (proc == NULL) {
xprt->bind_index = 0;
status = -EPFNOSUPPORT;
dprintk("RPC: %5u %s: no more getport versions available\n",
task->tk_pid, __func__);
goto bailout_nofree;
}
dprintk("RPC: %5u %s: trying rpcbind version %u\n",
task->tk_pid, __func__, bind_version);
rpcb_clnt = rpcb_create(xprt->xprt_net,
clnt->cl_nodename,
xprt->servername, sap, salen,
xprt->prot, bind_version);
if (IS_ERR(rpcb_clnt)) {
status = PTR_ERR(rpcb_clnt);
dprintk("RPC: %5u %s: rpcb_create failed, error %ld\n",
task->tk_pid, __func__, PTR_ERR(rpcb_clnt));
goto bailout_nofree;
}
map = kzalloc(sizeof(struct rpcbind_args), GFP_ATOMIC);
if (!map) {
status = -ENOMEM;
dprintk("RPC: %5u %s: no memory available\n",
task->tk_pid, __func__);
goto bailout_release_client;
}
map->r_prog = clnt->cl_prog;
map->r_vers = clnt->cl_vers;
map->r_prot = xprt->prot;
map->r_port = 0;
map->r_xprt = xprt;
map->r_status = -EIO;
switch (bind_version) {
case RPCBVERS_4:
case RPCBVERS_3:
map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
map->r_owner = "";
break;
case RPCBVERS_2:
map->r_addr = NULL;
break;
default:
BUG();
}
child = rpcb_call_async(rpcb_clnt, map, proc);
rpc_release_client(rpcb_clnt);
if (IS_ERR(child)) {
/* rpcb_map_release() has freed the arguments */
dprintk("RPC: %5u %s: rpc_run_task failed\n",
task->tk_pid, __func__);
return;
}
xprt->stat.bind_count++;
rpc_put_task(child);
return;
bailout_release_client:
rpc_release_client(rpcb_clnt);
bailout_nofree:
rpcb_wake_rpcbind_waiters(xprt, status);
task->tk_status = status;
xprt_put(xprt);
}
EXPORT_SYMBOL_GPL(rpcb_getport_async);
/*
* Rpcbind child task calls this callback via tk_exit.
*/
static void rpcb_getport_done(struct rpc_task *child, void *data)
{
struct rpcbind_args *map = data;
struct rpc_xprt *xprt = map->r_xprt;
int status = child->tk_status;
/* Garbage reply: retry with a lesser rpcbind version */
if (status == -EIO)
status = -EPROTONOSUPPORT;
/* rpcbind server doesn't support this rpcbind protocol version */
if (status == -EPROTONOSUPPORT)
xprt->bind_index++;
if (status < 0) {
/* rpcbind server not available on remote host? */
xprt->ops->set_port(xprt, 0);
} else if (map->r_port == 0) {
/* Requested RPC service wasn't registered on remote host */
xprt->ops->set_port(xprt, 0);
status = -EACCES;
} else {
/* Succeeded */
xprt->ops->set_port(xprt, map->r_port);
xprt_set_bound(xprt);
status = 0;
}
dprintk("RPC: %5u rpcb_getport_done(status %d, port %u)\n",
child->tk_pid, status, map->r_port);
map->r_status = status;
}
/*
* XDR functions for rpcbind
*/
static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr,
const struct rpcbind_args *rpcb)
{
__be32 *p;
dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n",
req->rq_task->tk_pid,
req->rq_task->tk_msg.rpc_proc->p_name,
rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port);
p = xdr_reserve_space(xdr, RPCB_mappingargs_sz << 2);
*p++ = cpu_to_be32(rpcb->r_prog);
*p++ = cpu_to_be32(rpcb->r_vers);
*p++ = cpu_to_be32(rpcb->r_prot);
*p = cpu_to_be32(rpcb->r_port);
}
static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr,
struct rpcbind_args *rpcb)
{
unsigned long port;
__be32 *p;
rpcb->r_port = 0;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return -EIO;
port = be32_to_cpup(p);
dprintk("RPC: %5u PMAP_%s result: %lu\n", req->rq_task->tk_pid,
req->rq_task->tk_msg.rpc_proc->p_name, port);
if (unlikely(port > USHRT_MAX))
return -EIO;
rpcb->r_port = port;
return 0;
}
static int rpcb_dec_set(struct rpc_rqst *req, struct xdr_stream *xdr,
unsigned int *boolp)
{
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return -EIO;
*boolp = 0;
if (*p != xdr_zero)
*boolp = 1;
dprintk("RPC: %5u RPCB_%s call %s\n",
req->rq_task->tk_pid,
req->rq_task->tk_msg.rpc_proc->p_name,
(*boolp ? "succeeded" : "failed"));
return 0;
}
static void encode_rpcb_string(struct xdr_stream *xdr, const char *string,
const u32 maxstrlen)
{
__be32 *p;
u32 len;
len = strlen(string);
WARN_ON_ONCE(len > maxstrlen);
if (len > maxstrlen)
/* truncate and hope for the best */
len = maxstrlen;
p = xdr_reserve_space(xdr, 4 + len);
xdr_encode_opaque(p, string, len);
}
static void rpcb_enc_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
const struct rpcbind_args *rpcb)
{
__be32 *p;
dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n",
req->rq_task->tk_pid,
req->rq_task->tk_msg.rpc_proc->p_name,
rpcb->r_prog, rpcb->r_vers,
rpcb->r_netid, rpcb->r_addr);
p = xdr_reserve_space(xdr, (RPCB_program_sz + RPCB_version_sz) << 2);
*p++ = cpu_to_be32(rpcb->r_prog);
*p = cpu_to_be32(rpcb->r_vers);
encode_rpcb_string(xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN);
encode_rpcb_string(xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN);
encode_rpcb_string(xdr, rpcb->r_owner, RPCB_MAXOWNERLEN);
}
static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
struct rpcbind_args *rpcb)
{
struct sockaddr_storage address;
struct sockaddr *sap = (struct sockaddr *)&address;
__be32 *p;
u32 len;
rpcb->r_port = 0;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_fail;
len = be32_to_cpup(p);
/*
* If the returned universal address is a null string,
* the requested RPC service was not registered.
*/
if (len == 0) {
dprintk("RPC: %5u RPCB reply: program not registered\n",
req->rq_task->tk_pid);
return 0;
}
if (unlikely(len > RPCBIND_MAXUADDRLEN))
goto out_fail;
p = xdr_inline_decode(xdr, len);
if (unlikely(p == NULL))
goto out_fail;
dprintk("RPC: %5u RPCB_%s reply: %s\n", req->rq_task->tk_pid,
req->rq_task->tk_msg.rpc_proc->p_name, (char *)p);
if (rpc_uaddr2sockaddr(req->rq_xprt->xprt_net, (char *)p, len,
sap, sizeof(address)) == 0)
goto out_fail;
rpcb->r_port = rpc_get_port(sap);
return 0;
out_fail:
dprintk("RPC: %5u malformed RPCB_%s reply\n",
req->rq_task->tk_pid,
req->rq_task->tk_msg.rpc_proc->p_name);
return -EIO;
}
/*
* Not all rpcbind procedures described in RFC 1833 are implemented
* since the Linux kernel RPC code requires only these.
*/
static struct rpc_procinfo rpcb_procedures2[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
.p_encode = (kxdreproc_t)rpcb_enc_mapping,
.p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
.p_timer = 0,
.p_name = "SET",
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
.p_encode = (kxdreproc_t)rpcb_enc_mapping,
.p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
.p_timer = 0,
.p_name = "UNSET",
},
[RPCBPROC_GETPORT] = {
.p_proc = RPCBPROC_GETPORT,
.p_encode = (kxdreproc_t)rpcb_enc_mapping,
.p_decode = (kxdrdproc_t)rpcb_dec_getport,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_getportres_sz,
.p_statidx = RPCBPROC_GETPORT,
.p_timer = 0,
.p_name = "GETPORT",
},
};
static struct rpc_procinfo rpcb_procedures3[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
.p_encode = (kxdreproc_t)rpcb_enc_getaddr,
.p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
.p_timer = 0,
.p_name = "SET",
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
.p_encode = (kxdreproc_t)rpcb_enc_getaddr,
.p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
.p_timer = 0,
.p_name = "UNSET",
},
[RPCBPROC_GETADDR] = {
.p_proc = RPCBPROC_GETADDR,
.p_encode = (kxdreproc_t)rpcb_enc_getaddr,
.p_decode = (kxdrdproc_t)rpcb_dec_getaddr,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_getaddrres_sz,
.p_statidx = RPCBPROC_GETADDR,
.p_timer = 0,
.p_name = "GETADDR",
},
};
static struct rpc_procinfo rpcb_procedures4[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
.p_encode = (kxdreproc_t)rpcb_enc_getaddr,
.p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
.p_timer = 0,
.p_name = "SET",
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
.p_encode = (kxdreproc_t)rpcb_enc_getaddr,
.p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
.p_timer = 0,
.p_name = "UNSET",
},
[RPCBPROC_GETADDR] = {
.p_proc = RPCBPROC_GETADDR,
.p_encode = (kxdreproc_t)rpcb_enc_getaddr,
.p_decode = (kxdrdproc_t)rpcb_dec_getaddr,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_getaddrres_sz,
.p_statidx = RPCBPROC_GETADDR,
.p_timer = 0,
.p_name = "GETADDR",
},
};
static const struct rpcb_info rpcb_next_version[] = {
{
.rpc_vers = RPCBVERS_2,
.rpc_proc = &rpcb_procedures2[RPCBPROC_GETPORT],
},
{
.rpc_proc = NULL,
},
};
static const struct rpcb_info rpcb_next_version6[] = {
{
.rpc_vers = RPCBVERS_4,
.rpc_proc = &rpcb_procedures4[RPCBPROC_GETADDR],
},
{
.rpc_vers = RPCBVERS_3,
.rpc_proc = &rpcb_procedures3[RPCBPROC_GETADDR],
},
{
.rpc_proc = NULL,
},
};
static const struct rpc_version rpcb_version2 = {
.number = RPCBVERS_2,
.nrprocs = ARRAY_SIZE(rpcb_procedures2),
.procs = rpcb_procedures2
};
static const struct rpc_version rpcb_version3 = {
.number = RPCBVERS_3,
.nrprocs = ARRAY_SIZE(rpcb_procedures3),
.procs = rpcb_procedures3
};
static const struct rpc_version rpcb_version4 = {
.number = RPCBVERS_4,
.nrprocs = ARRAY_SIZE(rpcb_procedures4),
.procs = rpcb_procedures4
};
static const struct rpc_version *rpcb_version[] = {
NULL,
NULL,
&rpcb_version2,
&rpcb_version3,
&rpcb_version4
};
static struct rpc_stat rpcb_stats;
static const struct rpc_program rpcb_program = {
.name = "rpcbind",
.number = RPCBIND_PROGRAM,
.nrvers = ARRAY_SIZE(rpcb_version),
.version = rpcb_version,
.stats = &rpcb_stats,
};
| gpl-2.0 |
Odin-Kernel/kernel_huawei_angler | drivers/dma/ioat/dca.c | 2750 | 18204 | /*
* Intel I/OAT DMA Linux driver
* Copyright(c) 2007 - 2009 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/dca.h>
/* either a kernel change is needed, or we need something like this in kernel */
#ifndef CONFIG_SMP
#include <asm/smp.h>
#undef cpu_physical_id
#define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
#endif
#include "dma.h"
#include "registers.h"
/*
* Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
* contain the bit number of the APIC ID to map into the DCA tag. If the valid
* bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
*/
#define DCA_TAG_MAP_VALID 0x80
#define DCA3_TAG_MAP_BIT_TO_INV 0x80
#define DCA3_TAG_MAP_BIT_TO_SEL 0x40
#define DCA3_TAG_MAP_LITERAL_VAL 0x1
#define DCA_TAG_MAP_MASK 0xDF
/* expected tag map bytes for I/OAT ver.2 */
#define DCA2_TAG_MAP_BYTE0 0x80
#define DCA2_TAG_MAP_BYTE1 0x0
#define DCA2_TAG_MAP_BYTE2 0x81
#define DCA2_TAG_MAP_BYTE3 0x82
#define DCA2_TAG_MAP_BYTE4 0x82
/* verify if tag map matches expected values */
static inline int dca2_tag_map_valid(u8 *tag_map)
{
return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) &&
(tag_map[1] == DCA2_TAG_MAP_BYTE1) &&
(tag_map[2] == DCA2_TAG_MAP_BYTE2) &&
(tag_map[3] == DCA2_TAG_MAP_BYTE3) &&
(tag_map[4] == DCA2_TAG_MAP_BYTE4));
}
/*
* "Legacy" DCA systems do not implement the DCA register set in the
* I/OAT device. Software needs direct support for their tag mappings.
*/
#define APICID_BIT(x) (DCA_TAG_MAP_VALID | (x))
#define IOAT_TAG_MAP_LEN 8
static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = {
1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = {
1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = {
1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), };
static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 };
/* pack PCI B/D/F into a u16 */
static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
{
return (pci->bus->number << 8) | pci->devfn;
}
static int dca_enabled_in_bios(struct pci_dev *pdev)
{
/* CPUID level 9 returns DCA configuration */
/* Bit 0 indicates DCA enabled by the BIOS */
unsigned long cpuid_level_9;
int res;
cpuid_level_9 = cpuid_eax(9);
res = test_bit(0, &cpuid_level_9);
if (!res)
dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");
return res;
}
int system_has_dca_enabled(struct pci_dev *pdev)
{
if (boot_cpu_has(X86_FEATURE_DCA))
return dca_enabled_in_bios(pdev);
dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
return 0;
}
struct ioat_dca_slot {
struct pci_dev *pdev; /* requester device */
u16 rid; /* requester id, as used by IOAT */
};
#define IOAT_DCA_MAX_REQ 6
#define IOAT3_DCA_MAX_REQ 2
struct ioat_dca_priv {
void __iomem *iobase;
void __iomem *dca_base;
int max_requesters;
int requester_count;
u8 tag_map[IOAT_TAG_MAP_LEN];
struct ioat_dca_slot req_slots[0];
};
/* 5000 series chipset DCA Port Requester ID Table Entry Format
* [15:8] PCI-Express Bus Number
* [7:3] PCI-Express Device Number
* [2:0] PCI-Express Function Number
*
* 5000 series chipset DCA control register format
* [7:1] Reserved (0)
* [0] Ignore Function Number
*/
static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
{
struct ioat_dca_priv *ioatdca = dca_priv(dca);
struct pci_dev *pdev;
int i;
u16 id;
/* This implementation only supports PCI-Express */
if (dev->bus != &pci_bus_type)
return -ENODEV;
pdev = to_pci_dev(dev);
id = dcaid_from_pcidev(pdev);
if (ioatdca->requester_count == ioatdca->max_requesters)
return -ENODEV;
for (i = 0; i < ioatdca->max_requesters; i++) {
if (ioatdca->req_slots[i].pdev == NULL) {
/* found an empty slot */
ioatdca->requester_count++;
ioatdca->req_slots[i].pdev = pdev;
ioatdca->req_slots[i].rid = id;
writew(id, ioatdca->dca_base + (i * 4));
/* make sure the ignore function bit is off */
writeb(0, ioatdca->dca_base + (i * 4) + 2);
return i;
}
}
/* Error, ioatdma->requester_count is out of whack */
return -EFAULT;
}
static int ioat_dca_remove_requester(struct dca_provider *dca,
struct device *dev)
{
struct ioat_dca_priv *ioatdca = dca_priv(dca);
struct pci_dev *pdev;
int i;
/* This implementation only supports PCI-Express */
if (dev->bus != &pci_bus_type)
return -ENODEV;
pdev = to_pci_dev(dev);
for (i = 0; i < ioatdca->max_requesters; i++) {
if (ioatdca->req_slots[i].pdev == pdev) {
writew(0, ioatdca->dca_base + (i * 4));
ioatdca->req_slots[i].pdev = NULL;
ioatdca->req_slots[i].rid = 0;
ioatdca->requester_count--;
return i;
}
}
return -ENODEV;
}
static u8 ioat_dca_get_tag(struct dca_provider *dca,
struct device *dev,
int cpu)
{
struct ioat_dca_priv *ioatdca = dca_priv(dca);
int i, apic_id, bit, value;
u8 entry, tag;
tag = 0;
apic_id = cpu_physical_id(cpu);
for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
entry = ioatdca->tag_map[i];
if (entry & DCA_TAG_MAP_VALID) {
bit = entry & ~DCA_TAG_MAP_VALID;
value = (apic_id & (1 << bit)) ? 1 : 0;
} else {
value = entry ? 1 : 0;
}
tag |= (value << i);
}
return tag;
}
static int ioat_dca_dev_managed(struct dca_provider *dca,
struct device *dev)
{
struct ioat_dca_priv *ioatdca = dca_priv(dca);
struct pci_dev *pdev;
int i;
pdev = to_pci_dev(dev);
for (i = 0; i < ioatdca->max_requesters; i++) {
if (ioatdca->req_slots[i].pdev == pdev)
return 1;
}
return 0;
}
static struct dca_ops ioat_dca_ops = {
.add_requester = ioat_dca_add_requester,
.remove_requester = ioat_dca_remove_requester,
.get_tag = ioat_dca_get_tag,
.dev_managed = ioat_dca_dev_managed,
};
struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
struct dca_provider *dca;
struct ioat_dca_priv *ioatdca;
u8 *tag_map = NULL;
int i;
int err;
u8 version;
u8 max_requesters;
if (!system_has_dca_enabled(pdev))
return NULL;
/* I/OAT v1 systems must have a known tag_map to support DCA */
switch (pdev->vendor) {
case PCI_VENDOR_ID_INTEL:
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_IOAT:
tag_map = ioat_tag_map_BNB;
break;
case PCI_DEVICE_ID_INTEL_IOAT_CNB:
tag_map = ioat_tag_map_CNB;
break;
case PCI_DEVICE_ID_INTEL_IOAT_SCNB:
tag_map = ioat_tag_map_SCNB;
break;
}
break;
case PCI_VENDOR_ID_UNISYS:
switch (pdev->device) {
case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR:
tag_map = ioat_tag_map_UNISYS;
break;
}
break;
}
if (tag_map == NULL)
return NULL;
version = readb(iobase + IOAT_VER_OFFSET);
if (version == IOAT_VER_3_0)
max_requesters = IOAT3_DCA_MAX_REQ;
else
max_requesters = IOAT_DCA_MAX_REQ;
dca = alloc_dca_provider(&ioat_dca_ops,
sizeof(*ioatdca) +
(sizeof(struct ioat_dca_slot) * max_requesters));
if (!dca)
return NULL;
ioatdca = dca_priv(dca);
ioatdca->max_requesters = max_requesters;
ioatdca->dca_base = iobase + 0x54;
/* copy over the APIC ID to DCA tag mapping */
for (i = 0; i < IOAT_TAG_MAP_LEN; i++)
ioatdca->tag_map[i] = tag_map[i];
err = register_dca_provider(dca, &pdev->dev);
if (err) {
free_dca_provider(dca);
return NULL;
}
return dca;
}
static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
{
struct ioat_dca_priv *ioatdca = dca_priv(dca);
struct pci_dev *pdev;
int i;
u16 id;
u16 global_req_table;
/* This implementation only supports PCI-Express */
if (dev->bus != &pci_bus_type)
return -ENODEV;
pdev = to_pci_dev(dev);
id = dcaid_from_pcidev(pdev);
if (ioatdca->requester_count == ioatdca->max_requesters)
return -ENODEV;
for (i = 0; i < ioatdca->max_requesters; i++) {
if (ioatdca->req_slots[i].pdev == NULL) {
/* found an empty slot */
ioatdca->requester_count++;
ioatdca->req_slots[i].pdev = pdev;
ioatdca->req_slots[i].rid = id;
global_req_table =
readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
writel(id | IOAT_DCA_GREQID_VALID,
ioatdca->iobase + global_req_table + (i * 4));
return i;
}
}
/* Error, ioatdma->requester_count is out of whack */
return -EFAULT;
}
static int ioat2_dca_remove_requester(struct dca_provider *dca,
struct device *dev)
{
struct ioat_dca_priv *ioatdca = dca_priv(dca);
struct pci_dev *pdev;
int i;
u16 global_req_table;
/* This implementation only supports PCI-Express */
if (dev->bus != &pci_bus_type)
return -ENODEV;
pdev = to_pci_dev(dev);
for (i = 0; i < ioatdca->max_requesters; i++) {
if (ioatdca->req_slots[i].pdev == pdev) {
global_req_table =
readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
writel(0, ioatdca->iobase + global_req_table + (i * 4));
ioatdca->req_slots[i].pdev = NULL;
ioatdca->req_slots[i].rid = 0;
ioatdca->requester_count--;
return i;
}
}
return -ENODEV;
}
static u8 ioat2_dca_get_tag(struct dca_provider *dca,
struct device *dev,
int cpu)
{
u8 tag;
tag = ioat_dca_get_tag(dca, dev, cpu);
tag = (~tag) & 0x1F;
return tag;
}
static struct dca_ops ioat2_dca_ops = {
.add_requester = ioat2_dca_add_requester,
.remove_requester = ioat2_dca_remove_requester,
.get_tag = ioat2_dca_get_tag,
.dev_managed = ioat_dca_dev_managed,
};
static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
{
int slots = 0;
u32 req;
u16 global_req_table;
global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET);
if (global_req_table == 0)
return 0;
do {
req = readl(iobase + global_req_table + (slots * sizeof(u32)));
slots++;
} while ((req & IOAT_DCA_GREQID_LASTID) == 0);
return slots;
}
struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
struct dca_provider *dca;
struct ioat_dca_priv *ioatdca;
int slots;
int i;
int err;
u32 tag_map;
u16 dca_offset;
u16 csi_fsb_control;
u16 pcie_control;
u8 bit;
if (!system_has_dca_enabled(pdev))
return NULL;
dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
if (dca_offset == 0)
return NULL;
slots = ioat2_dca_count_dca_slots(iobase, dca_offset);
if (slots == 0)
return NULL;
dca = alloc_dca_provider(&ioat2_dca_ops,
sizeof(*ioatdca)
+ (sizeof(struct ioat_dca_slot) * slots));
if (!dca)
return NULL;
ioatdca = dca_priv(dca);
ioatdca->iobase = iobase;
ioatdca->dca_base = iobase + dca_offset;
ioatdca->max_requesters = slots;
/* some bios might not know to turn these on */
csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) {
csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH;
writew(csi_fsb_control,
ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
}
pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) {
pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR;
writew(pcie_control,
ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
}
/* TODO version, compatibility and configuration checks */
/* copy out the APIC to DCA tag map */
tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET);
for (i = 0; i < 5; i++) {
bit = (tag_map >> (4 * i)) & 0x0f;
if (bit < 8)
ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID;
else
ioatdca->tag_map[i] = 0;
}
if (!dca2_tag_map_valid(ioatdca->tag_map)) {
WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
"%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
dev_driver_string(&pdev->dev),
dev_name(&pdev->dev));
free_dca_provider(dca);
return NULL;
}
err = register_dca_provider(dca, &pdev->dev);
if (err) {
free_dca_provider(dca);
return NULL;
}
return dca;
}
static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
{
struct ioat_dca_priv *ioatdca = dca_priv(dca);
struct pci_dev *pdev;
int i;
u16 id;
u16 global_req_table;
/* This implementation only supports PCI-Express */
if (dev->bus != &pci_bus_type)
return -ENODEV;
pdev = to_pci_dev(dev);
id = dcaid_from_pcidev(pdev);
if (ioatdca->requester_count == ioatdca->max_requesters)
return -ENODEV;
for (i = 0; i < ioatdca->max_requesters; i++) {
if (ioatdca->req_slots[i].pdev == NULL) {
/* found an empty slot */
ioatdca->requester_count++;
ioatdca->req_slots[i].pdev = pdev;
ioatdca->req_slots[i].rid = id;
global_req_table =
readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
writel(id | IOAT_DCA_GREQID_VALID,
ioatdca->iobase + global_req_table + (i * 4));
return i;
}
}
/* Error, ioatdma->requester_count is out of whack */
return -EFAULT;
}
static int ioat3_dca_remove_requester(struct dca_provider *dca,
struct device *dev)
{
struct ioat_dca_priv *ioatdca = dca_priv(dca);
struct pci_dev *pdev;
int i;
u16 global_req_table;
/* This implementation only supports PCI-Express */
if (dev->bus != &pci_bus_type)
return -ENODEV;
pdev = to_pci_dev(dev);
for (i = 0; i < ioatdca->max_requesters; i++) {
if (ioatdca->req_slots[i].pdev == pdev) {
global_req_table =
readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
writel(0, ioatdca->iobase + global_req_table + (i * 4));
ioatdca->req_slots[i].pdev = NULL;
ioatdca->req_slots[i].rid = 0;
ioatdca->requester_count--;
return i;
}
}
return -ENODEV;
}
static u8 ioat3_dca_get_tag(struct dca_provider *dca,
struct device *dev,
int cpu)
{
u8 tag;
struct ioat_dca_priv *ioatdca = dca_priv(dca);
int i, apic_id, bit, value;
u8 entry;
tag = 0;
apic_id = cpu_physical_id(cpu);
for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
entry = ioatdca->tag_map[i];
if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
bit = entry &
~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
value = (apic_id & (1 << bit)) ? 1 : 0;
} else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
value = (apic_id & (1 << bit)) ? 0 : 1;
} else {
value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
}
tag |= (value << i);
}
return tag;
}
static struct dca_ops ioat3_dca_ops = {
.add_requester = ioat3_dca_add_requester,
.remove_requester = ioat3_dca_remove_requester,
.get_tag = ioat3_dca_get_tag,
.dev_managed = ioat_dca_dev_managed,
};
static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
{
int slots = 0;
u32 req;
u16 global_req_table;
global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
if (global_req_table == 0)
return 0;
do {
req = readl(iobase + global_req_table + (slots * sizeof(u32)));
slots++;
} while ((req & IOAT_DCA_GREQID_LASTID) == 0);
return slots;
}
static inline int dca3_tag_map_invalid(u8 *tag_map)
{
/*
* If the tag map is not programmed by the BIOS the default is:
* 0x80 0x80 0x80 0x80 0x80 0x00 0x00 0x00
*
* This an invalid map and will result in only 2 possible tags
* 0x1F and 0x00. 0x00 is an invalid DCA tag so we know that
* this entire definition is invalid.
*/
return ((tag_map[0] == DCA_TAG_MAP_VALID) &&
(tag_map[1] == DCA_TAG_MAP_VALID) &&
(tag_map[2] == DCA_TAG_MAP_VALID) &&
(tag_map[3] == DCA_TAG_MAP_VALID) &&
(tag_map[4] == DCA_TAG_MAP_VALID));
}
struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{
struct dca_provider *dca;
struct ioat_dca_priv *ioatdca;
int slots;
int i;
int err;
u16 dca_offset;
u16 csi_fsb_control;
u16 pcie_control;
u8 bit;
union {
u64 full;
struct {
u32 low;
u32 high;
};
} tag_map;
if (!system_has_dca_enabled(pdev))
return NULL;
dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
if (dca_offset == 0)
return NULL;
slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
if (slots == 0)
return NULL;
dca = alloc_dca_provider(&ioat3_dca_ops,
sizeof(*ioatdca)
+ (sizeof(struct ioat_dca_slot) * slots));
if (!dca)
return NULL;
ioatdca = dca_priv(dca);
ioatdca->iobase = iobase;
ioatdca->dca_base = iobase + dca_offset;
ioatdca->max_requesters = slots;
/* some bios might not know to turn these on */
csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
writew(csi_fsb_control,
ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
}
pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
writew(pcie_control,
ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
}
/* TODO version, compatibility and configuration checks */
/* copy out the APIC to DCA tag map */
tag_map.low =
readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
tag_map.high =
readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
for (i = 0; i < 8; i++) {
bit = tag_map.full >> (8 * i);
ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
}
if (dca3_tag_map_invalid(ioatdca->tag_map)) {
WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
"%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
dev_driver_string(&pdev->dev),
dev_name(&pdev->dev));
free_dca_provider(dca);
return NULL;
}
err = register_dca_provider(dca, &pdev->dev);
if (err) {
free_dca_provider(dca);
return NULL;
}
return dca;
}
| gpl-2.0 |
curbthepain/revkernel_ubers5 | drivers/gpu/drm/radeon/evergreen.c | 2750 | 111356 | /*
* Copyright 2010 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*/
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_drm.h"
#include "evergreend.h"
#include "atom.h"
#include "avivod.h"
#include "evergreen_reg.h"
#include "evergreen_blit_shaders.h"
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl);
void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
unsigned *bankh, unsigned *mtaspect,
unsigned *tile_split)
{
*bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
*bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
*mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
*tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
switch (*bankw) {
default:
case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
}
switch (*bankh) {
default:
case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
}
switch (*mtaspect) {
default:
case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
}
}
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
u16 ctl, v;
int cap, err;
cap = pci_pcie_cap(rdev->pdev);
if (!cap)
return;
err = pci_read_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, &ctl);
if (err)
return;
v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
* to avoid hangs or perfomance issues
*/
if ((v == 0) || (v == 6) || (v == 7)) {
ctl &= ~PCI_EXP_DEVCTL_READRQ;
ctl |= (2 << 12);
pci_write_config_word(rdev->pdev, cap + PCI_EXP_DEVCTL, ctl);
}
}
void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
int i;
if (RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_MASTER_EN) {
for (i = 0; i < rdev->usec_timeout; i++) {
if (!(RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK))
break;
udelay(1);
}
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK)
break;
udelay(1);
}
}
}
void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
{
/* enable the pflip int */
radeon_irq_kms_pflip_irq_get(rdev, crtc);
}
void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
{
/* disable the pflip int */
radeon_irq_kms_pflip_irq_put(rdev, crtc);
}
u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
int i;
/* Lock the graphics update lock */
tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
/* update the scanout addresses */
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
upper_32_bits(crtc_base));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
(u32)crtc_base);
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
upper_32_bits(crtc_base));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
(u32)crtc_base);
/* Wait for update_pending to go high. */
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
break;
udelay(1);
}
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
/* Return current update_pending status: */
return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
}
/* get temperature in millidegrees */
int evergreen_get_temp(struct radeon_device *rdev)
{
u32 temp, toffset;
int actual_temp = 0;
if (rdev->family == CHIP_JUNIPER) {
toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
TOFFSET_SHIFT;
temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
TS0_ADC_DOUT_SHIFT;
if (toffset & 0x100)
actual_temp = temp / 2 - (0x200 - toffset);
else
actual_temp = temp / 2 + toffset;
actual_temp = actual_temp * 1000;
} else {
temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
ASIC_T_SHIFT;
if (temp & 0x400)
actual_temp = -256;
else if (temp & 0x200)
actual_temp = 255;
else if (temp & 0x100) {
actual_temp = temp & 0x1ff;
actual_temp |= ~0x1ff;
} else
actual_temp = temp & 0xff;
actual_temp = (actual_temp * 1000) / 2;
}
return actual_temp;
}
int sumo_get_temp(struct radeon_device *rdev)
{
u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
int actual_temp = temp - 49;
return actual_temp * 1000;
}
void sumo_pm_init_profile(struct radeon_device *rdev)
{
int idx;
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
/* low,mid sh/mh */
if (rdev->flags & RADEON_IS_MOBILITY)
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
else
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high sh/mh */
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
rdev->pm.power_state[idx].num_clock_modes - 1;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
rdev->pm.power_state[idx].num_clock_modes - 1;
}
void evergreen_pm_misc(struct radeon_device *rdev)
{
int req_ps_idx = rdev->pm.requested_power_state_index;
int req_cm_idx = rdev->pm.requested_clock_mode_index;
struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
if (voltage->type == VOLTAGE_SW) {
/* 0xff01 is a flag rather then an actual voltage */
if (voltage->voltage == 0xff01)
return;
if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
}
/* 0xff01 is a flag rather then an actual voltage */
if (voltage->vddci == 0xff01)
return;
if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
rdev->pm.current_vddci = voltage->vddci;
DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
}
}
}
void evergreen_pm_prepare(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
/* disable any active CRTCs */
list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (radeon_crtc->enabled) {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
}
}
}
void evergreen_pm_finish(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
/* enable any active CRTCs */
list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (radeon_crtc->enabled) {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset);
tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
}
}
}
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
bool connected = false;
switch (hpd) {
case RADEON_HPD_1:
if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
case RADEON_HPD_2:
if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
case RADEON_HPD_3:
if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
case RADEON_HPD_4:
if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
case RADEON_HPD_5:
if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
case RADEON_HPD_6:
if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
break;
default:
break;
}
return connected;
}
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd)
{
u32 tmp;
bool connected = evergreen_hpd_sense(rdev, hpd);
switch (hpd) {
case RADEON_HPD_1:
tmp = RREG32(DC_HPD1_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD1_INT_CONTROL, tmp);
break;
case RADEON_HPD_2:
tmp = RREG32(DC_HPD2_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD2_INT_CONTROL, tmp);
break;
case RADEON_HPD_3:
tmp = RREG32(DC_HPD3_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD3_INT_CONTROL, tmp);
break;
case RADEON_HPD_4:
tmp = RREG32(DC_HPD4_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD4_INT_CONTROL, tmp);
break;
case RADEON_HPD_5:
tmp = RREG32(DC_HPD5_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD5_INT_CONTROL, tmp);
break;
case RADEON_HPD_6:
tmp = RREG32(DC_HPD6_INT_CONTROL);
if (connected)
tmp &= ~DC_HPDx_INT_POLARITY;
else
tmp |= DC_HPDx_INT_POLARITY;
WREG32(DC_HPD6_INT_CONTROL, tmp);
break;
default:
break;
}
}
void evergreen_hpd_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, tmp);
rdev->irq.hpd[1] = true;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, tmp);
rdev->irq.hpd[2] = true;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, tmp);
rdev->irq.hpd[3] = true;
break;
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, tmp);
rdev->irq.hpd[4] = true;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, tmp);
rdev->irq.hpd[5] = true;
break;
default:
break;
}
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
if (rdev->irq.installed)
evergreen_irq_set(rdev);
}
void evergreen_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, 0);
rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, 0);
rdev->irq.hpd[1] = false;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, 0);
rdev->irq.hpd[2] = false;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, 0);
rdev->irq.hpd[3] = false;
break;
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, 0);
rdev->irq.hpd[4] = false;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, 0);
rdev->irq.hpd[5] = false;
break;
default:
break;
}
}
}
/* watermark setup */
static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
struct radeon_crtc *radeon_crtc,
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
u32 tmp;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
* DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
* the display controllers. The paritioning is done via one of four
* preset allocations specified in bits 2:0:
* first display controller
* 0 - first half of lb (3840 * 2)
* 1 - first 3/4 of lb (5760 * 2)
* 2 - whole lb (7680 * 2), other crtc must be disabled
* 3 - first 1/4 of lb (1920 * 2)
* second display controller
* 4 - second half of lb (3840 * 2)
* 5 - second 3/4 of lb (5760 * 2)
* 6 - whole lb (7680 * 2), other crtc must be disabled
* 7 - last 1/4 of lb (1920 * 2)
*/
/* this can get tricky if we have two large displays on a paired group
* of crtcs. Ideally for multiple large displays we'd assign them to
* non-linked crtcs for maximum line buffer allocation.
*/
if (radeon_crtc->base.enabled && mode) {
if (other_mode)
tmp = 0; /* 1/2 */
else
tmp = 2; /* whole */
} else
tmp = 0;
/* second controller of the pair uses second half of the lb */
if (radeon_crtc->crtc_id % 2)
tmp += 4;
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
if (radeon_crtc->base.enabled && mode) {
switch (tmp) {
case 0:
case 4:
default:
if (ASIC_IS_DCE5(rdev))
return 4096 * 2;
else
return 3840 * 2;
case 1:
case 5:
if (ASIC_IS_DCE5(rdev))
return 6144 * 2;
else
return 5760 * 2;
case 2:
case 6:
if (ASIC_IS_DCE5(rdev))
return 8192 * 2;
else
return 7680 * 2;
case 3:
case 7:
if (ASIC_IS_DCE5(rdev))
return 2048 * 2;
else
return 1920 * 2;
}
}
/* controller not enabled, so no lb used */
return 0;
}
u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
{
u32 tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
default:
return 1;
case 1:
return 2;
case 2:
return 4;
case 3:
return 8;
}
}
struct evergreen_wm_params {
u32 dram_channels; /* number of dram channels */
u32 yclk; /* bandwidth per dram data pin in kHz */
u32 sclk; /* engine clock in kHz */
u32 disp_clk; /* display clock in kHz */
u32 src_width; /* viewport width */
u32 active_time; /* active display time in ns */
u32 blank_time; /* blank time in ns */
bool interlaced; /* mode is interlaced */
fixed20_12 vsc; /* vertical scale ratio */
u32 num_heads; /* number of active crtcs */
u32 bytes_per_pixel; /* bytes per pixel display + overlay */
u32 lb_size; /* line buffer allocated to pipe */
u32 vtaps; /* vertical scaler taps */
};
static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
{
/* Calculate DRAM Bandwidth and the part allocated to display. */
fixed20_12 dram_efficiency; /* 0.7 */
fixed20_12 yclk, dram_channels, bandwidth;
fixed20_12 a;
a.full = dfixed_const(1000);
yclk.full = dfixed_const(wm->yclk);
yclk.full = dfixed_div(yclk, a);
dram_channels.full = dfixed_const(wm->dram_channels * 4);
a.full = dfixed_const(10);
dram_efficiency.full = dfixed_const(7);
dram_efficiency.full = dfixed_div(dram_efficiency, a);
bandwidth.full = dfixed_mul(dram_channels, yclk);
bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
return dfixed_trunc(bandwidth);
}
static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
{
/* Calculate DRAM Bandwidth and the part allocated to display. */
fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
fixed20_12 yclk, dram_channels, bandwidth;
fixed20_12 a;
a.full = dfixed_const(1000);
yclk.full = dfixed_const(wm->yclk);
yclk.full = dfixed_div(yclk, a);
dram_channels.full = dfixed_const(wm->dram_channels * 4);
a.full = dfixed_const(10);
disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
bandwidth.full = dfixed_mul(dram_channels, yclk);
bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
return dfixed_trunc(bandwidth);
}
static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
{
/* Calculate the display Data return Bandwidth */
fixed20_12 return_efficiency; /* 0.8 */
fixed20_12 sclk, bandwidth;
fixed20_12 a;
a.full = dfixed_const(1000);
sclk.full = dfixed_const(wm->sclk);
sclk.full = dfixed_div(sclk, a);
a.full = dfixed_const(10);
return_efficiency.full = dfixed_const(8);
return_efficiency.full = dfixed_div(return_efficiency, a);
a.full = dfixed_const(32);
bandwidth.full = dfixed_mul(a, sclk);
bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
return dfixed_trunc(bandwidth);
}
static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
{
/* Calculate the DMIF Request Bandwidth */
fixed20_12 disp_clk_request_efficiency; /* 0.8 */
fixed20_12 disp_clk, bandwidth;
fixed20_12 a;
a.full = dfixed_const(1000);
disp_clk.full = dfixed_const(wm->disp_clk);
disp_clk.full = dfixed_div(disp_clk, a);
a.full = dfixed_const(10);
disp_clk_request_efficiency.full = dfixed_const(8);
disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
a.full = dfixed_const(32);
bandwidth.full = dfixed_mul(a, disp_clk);
bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
return dfixed_trunc(bandwidth);
}
static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
{
/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
}
static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
{
/* Calculate the display mode Average Bandwidth
* DisplayMode should contain the source and destination dimensions,
* timing, etc.
*/
fixed20_12 bpp;
fixed20_12 line_time;
fixed20_12 src_width;
fixed20_12 bandwidth;
fixed20_12 a;
a.full = dfixed_const(1000);
line_time.full = dfixed_const(wm->active_time + wm->blank_time);
line_time.full = dfixed_div(line_time, a);
bpp.full = dfixed_const(wm->bytes_per_pixel);
src_width.full = dfixed_const(wm->src_width);
bandwidth.full = dfixed_mul(src_width, bpp);
bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
bandwidth.full = dfixed_div(bandwidth, line_time);
return dfixed_trunc(bandwidth);
}
static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
{
/* First calcualte the latency in ns */
u32 mc_latency = 2000; /* 2000 ns. */
u32 available_bandwidth = evergreen_available_bandwidth(wm);
u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
(wm->num_heads * cursor_line_pair_return_time);
u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
fixed20_12 a, b, c;
if (wm->num_heads == 0)
return 0;
a.full = dfixed_const(2);
b.full = dfixed_const(1);
if ((wm->vsc.full > a.full) ||
((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
(wm->vtaps >= 5) ||
((wm->vsc.full >= a.full) && wm->interlaced))
max_src_lines_per_dst_line = 4;
else
max_src_lines_per_dst_line = 2;
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
b.full = dfixed_const(1000);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(c, b);
c.full = dfixed_const(wm->bytes_per_pixel);
b.full = dfixed_mul(b, c);
lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
c.full = dfixed_const(lb_fill_bw);
b.full = dfixed_div(c, b);
a.full = dfixed_div(a, b);
line_fill_time = dfixed_trunc(a);
if (line_fill_time < wm->active_time)
return latency;
else
return latency + (line_fill_time - wm->active_time);
}
static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
{
if (evergreen_average_bandwidth(wm) <=
(evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
return true;
else
return false;
};
static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
{
if (evergreen_average_bandwidth(wm) <=
(evergreen_available_bandwidth(wm) / wm->num_heads))
return true;
else
return false;
};
static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
{
u32 lb_partitions = wm->lb_size / wm->src_width;
u32 line_time = wm->active_time + wm->blank_time;
u32 latency_tolerant_lines;
u32 latency_hiding;
fixed20_12 a;
a.full = dfixed_const(1);
if (wm->vsc.full > a.full)
latency_tolerant_lines = 1;
else {
if (lb_partitions <= (wm->vtaps + 1))
latency_tolerant_lines = 1;
else
latency_tolerant_lines = 2;
}
latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
if (evergreen_latency_watermark(wm) <= latency_hiding)
return true;
else
return false;
}
static void evergreen_program_watermarks(struct radeon_device *rdev,
struct radeon_crtc *radeon_crtc,
u32 lb_size, u32 num_heads)
{
struct drm_display_mode *mode = &radeon_crtc->base.mode;
struct evergreen_wm_params wm;
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 priority_a_mark = 0, priority_b_mark = 0;
u32 priority_a_cnt = PRIORITY_OFF;
u32 priority_b_cnt = PRIORITY_OFF;
u32 pipe_offset = radeon_crtc->crtc_id * 16;
u32 tmp, arb_control3;
fixed20_12 a, b, c;
if (radeon_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
wm.yclk = rdev->pm.current_mclk * 10;
wm.sclk = rdev->pm.current_sclk * 10;
wm.disp_clk = mode->clock;
wm.src_width = mode->crtc_hdisplay;
wm.active_time = mode->crtc_hdisplay * pixel_period;
wm.blank_time = line_time - wm.active_time;
wm.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
wm.interlaced = true;
wm.vsc = radeon_crtc->vsc;
wm.vtaps = 1;
if (radeon_crtc->rmx_type != RMX_OFF)
wm.vtaps = 2;
wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
wm.lb_size = lb_size;
wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
wm.num_heads = num_heads;
/* set for high clocks */
latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
/* set for low clocks */
/* wm.yclk = low clk; wm.sclk = low clk */
latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
/* possibly force display priority to high */
/* should really do this at mode validation time... */
if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
!evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
!evergreen_check_latency_hiding(&wm) ||
(rdev->disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
priority_a_cnt |= PRIORITY_ALWAYS_ON;
priority_b_cnt |= PRIORITY_ALWAYS_ON;
}
a.full = dfixed_const(1000);
b.full = dfixed_const(mode->clock);
b.full = dfixed_div(b, a);
c.full = dfixed_const(latency_watermark_a);
c.full = dfixed_mul(c, b);
c.full = dfixed_mul(c, radeon_crtc->hsc);
c.full = dfixed_div(c, a);
a.full = dfixed_const(16);
c.full = dfixed_div(c, a);
priority_a_mark = dfixed_trunc(c);
priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
a.full = dfixed_const(1000);
b.full = dfixed_const(mode->clock);
b.full = dfixed_div(b, a);
c.full = dfixed_const(latency_watermark_b);
c.full = dfixed_mul(c, b);
c.full = dfixed_mul(c, radeon_crtc->hsc);
c.full = dfixed_div(c, a);
a.full = dfixed_const(16);
c.full = dfixed_div(c, a);
priority_b_mark = dfixed_trunc(c);
priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
}
/* select wm A */
arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
tmp = arb_control3;
tmp &= ~LATENCY_WATERMARK_MASK(3);
tmp |= LATENCY_WATERMARK_MASK(1);
WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
(LATENCY_LOW_WATERMARK(latency_watermark_a) |
LATENCY_HIGH_WATERMARK(line_time)));
/* select wm B */
tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
tmp &= ~LATENCY_WATERMARK_MASK(3);
tmp |= LATENCY_WATERMARK_MASK(2);
WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
(LATENCY_LOW_WATERMARK(latency_watermark_b) |
LATENCY_HIGH_WATERMARK(line_time)));
/* restore original selection */
WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
/* write the priority marks */
WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
}
void evergreen_bandwidth_update(struct radeon_device *rdev)
{
struct drm_display_mode *mode0 = NULL;
struct drm_display_mode *mode1 = NULL;
u32 num_heads = 0, lb_size;
int i;
radeon_update_display_priority(rdev);
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i]->base.enabled)
num_heads++;
}
for (i = 0; i < rdev->num_crtc; i += 2) {
mode0 = &rdev->mode_info.crtcs[i]->base.mode;
mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
}
}
int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
u32 tmp;
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
tmp = RREG32(SRBM_STATUS) & 0x1F00;
if (!tmp)
return 0;
udelay(1);
}
return -1;
}
/*
* GART
*/
void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
{
unsigned i;
u32 tmp;
WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
if (tmp == 2) {
printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
return;
}
if (tmp) {
return;
}
udelay(1);
}
}
int evergreen_pcie_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
int r;
if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
EFFECTIVE_L2_QUEUE_SIZE(7));
WREG32(VM_L2_CNTL2, 0);
WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
/* Setup TLB control */
tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
if (rdev->flags & RADEON_IS_IGP) {
WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
} else {
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
}
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL, 0);
evergreen_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
void evergreen_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
EFFECTIVE_L2_QUEUE_SIZE(7));
WREG32(VM_L2_CNTL2, 0);
WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
/* Setup TLB control */
tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
radeon_gart_table_vram_unpin(rdev);
}
void evergreen_pcie_gart_fini(struct radeon_device *rdev)
{
evergreen_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
radeon_gart_fini(rdev);
}
void evergreen_agp_enable(struct radeon_device *rdev)
{
u32 tmp;
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
EFFECTIVE_L2_QUEUE_SIZE(7));
WREG32(VM_L2_CNTL2, 0);
WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
/* Setup TLB control */
tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
}
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
save->vga_control[0] = RREG32(D1VGA_CONTROL);
save->vga_control[1] = RREG32(D2VGA_CONTROL);
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
if (rdev->num_crtc >= 4) {
save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
}
if (rdev->num_crtc >= 6) {
save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
}
/* Stop all video */
WREG32(VGA_RENDER_CONTROL, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
}
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(D1VGA_CONTROL, 0);
WREG32(D2VGA_CONTROL, 0);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_D3VGA_CONTROL, 0);
WREG32(EVERGREEN_D4VGA_CONTROL, 0);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_D5VGA_CONTROL, 0);
WREG32(EVERGREEN_D6VGA_CONTROL, 0);
}
}
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
}
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
/* Unlock host access */
WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
/* Restore video state */
WREG32(D1VGA_CONTROL, save->vga_control[0]);
WREG32(D2VGA_CONTROL, save->vga_control[1]);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
}
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
}
void evergreen_mc_program(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
u32 tmp;
int i, j;
/* Initialize HDP */
for (i = 0, j = 0; i < 32; i++, j += 0x18) {
WREG32((0x2c14 + j), 0x00000000);
WREG32((0x2c18 + j), 0x00000000);
WREG32((0x2c1c + j), 0x00000000);
WREG32((0x2c20 + j), 0x00000000);
WREG32((0x2c24 + j), 0x00000000);
}
WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Lockout access through VGA aperture*/
WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
/* Update configuration */
if (rdev->flags & RADEON_IS_AGP) {
if (rdev->mc.vram_start < rdev->mc.gtt_start) {
/* VRAM before AGP */
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
rdev->mc.vram_start >> 12);
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
rdev->mc.gtt_end >> 12);
} else {
/* VRAM after AGP */
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
rdev->mc.gtt_start >> 12);
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
rdev->mc.vram_end >> 12);
}
} else {
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
rdev->mc.vram_start >> 12);
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
rdev->mc.vram_end >> 12);
}
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
/* llano/ontario only */
if ((rdev->family == CHIP_PALM) ||
(rdev->family == CHIP_SUMO) ||
(rdev->family == CHIP_SUMO2)) {
tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
WREG32(MC_FUS_VM_FB_OFFSET, tmp);
}
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
if (rdev->flags & RADEON_IS_AGP) {
WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
} else {
WREG32(MC_VM_AGP_BASE, 0);
WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
}
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
evergreen_mc_resume(rdev, &save);
/* we need to own VRAM, so turn off the VGA renderer here
* to stop it overwriting our objects */
rv515_vga_render_disable(rdev);
}
/*
* CP.
*/
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
/* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(ring, 1);
/* FIXME: implement */
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(ring, ib->length_dw);
}
static int evergreen_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
int i;
if (!rdev->me_fw || !rdev->pfp_fw)
return -EINVAL;
r700_cp_stop(rdev);
WREG32(CP_RB_CNTL,
#ifdef __BIG_ENDIAN
BUF_SWAP_32BIT |
#endif
RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
fw_data = (const __be32 *)rdev->pfp_fw->data;
WREG32(CP_PFP_UCODE_ADDR, 0);
for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
WREG32(CP_PFP_UCODE_ADDR, 0);
fw_data = (const __be32 *)rdev->me_fw->data;
WREG32(CP_ME_RAM_WADDR, 0);
for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
WREG32(CP_PFP_UCODE_ADDR, 0);
WREG32(CP_ME_RAM_WADDR, 0);
WREG32(CP_ME_RAM_RADDR, 0);
return 0;
}
static int evergreen_cp_start(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
uint32_t cp_me;
r = radeon_ring_lock(rdev, ring, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(ring, 0x1);
radeon_ring_write(ring, 0x0);
radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring);
cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me);
r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
/* setup clear context state */
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
for (i = 0; i < evergreen_default_size; i++)
radeon_ring_write(ring, evergreen_default_state[i]);
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
/* set clear context state */
radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(ring, 0);
/* SQ_VTX_BASE_VTX_LOC */
radeon_ring_write(ring, 0xc0026f00);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000000);
/* Clear consts */
radeon_ring_write(ring, 0xc0036f00);
radeon_ring_write(ring, 0x00000bc4);
radeon_ring_write(ring, 0xffffffff);
radeon_ring_write(ring, 0xffffffff);
radeon_ring_write(ring, 0xffffffff);
radeon_ring_write(ring, 0xc0026900);
radeon_ring_write(ring, 0x00000316);
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* */
radeon_ring_unlock_commit(rdev, ring);
return 0;
}
int evergreen_cp_resume(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp;
u32 rb_bufsz;
int r;
/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
SOFT_RESET_PA |
SOFT_RESET_SH |
SOFT_RESET_VGT |
SOFT_RESET_SPI |
SOFT_RESET_SX));
RREG32(GRBM_SOFT_RESET);
mdelay(15);
WREG32(GRBM_SOFT_RESET, 0);
RREG32(GRBM_SOFT_RESET);
/* Set ring buffer size */
rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB_CNTL, tmp);
WREG32(CP_SEM_WAIT_TIMER, 0x0);
WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
ring->wptr = 0;
WREG32(CP_RB_WPTR, ring->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
if (rdev->wb.enabled)
WREG32(SCRATCH_UMSK, 0xff);
else {
tmp |= RB_NO_UPDATE;
WREG32(SCRATCH_UMSK, 0);
}
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
ring->rptr = RREG32(CP_RB_RPTR);
evergreen_cp_start(rdev);
ring->ready = true;
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
if (r) {
ring->ready = false;
return r;
}
return 0;
}
/*
* Core functions
*/
static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
u32 backend_map = 0;
u32 enabled_backends_mask = 0;
u32 enabled_backends_count = 0;
u32 cur_pipe;
u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
u32 cur_backend = 0;
u32 i;
bool force_no_swizzle;
if (num_tile_pipes > EVERGREEN_MAX_PIPES)
num_tile_pipes = EVERGREEN_MAX_PIPES;
if (num_tile_pipes < 1)
num_tile_pipes = 1;
if (num_backends > EVERGREEN_MAX_BACKENDS)
num_backends = EVERGREEN_MAX_BACKENDS;
if (num_backends < 1)
num_backends = 1;
for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
if (((backend_disable_mask >> i) & 1) == 0) {
enabled_backends_mask |= (1 << i);
++enabled_backends_count;
}
if (enabled_backends_count == num_backends)
break;
}
if (enabled_backends_count == 0) {
enabled_backends_mask = 1;
enabled_backends_count = 1;
}
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
switch (rdev->family) {
case CHIP_CEDAR:
case CHIP_REDWOOD:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_TURKS:
case CHIP_CAICOS:
force_no_swizzle = false;
break;
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_JUNIPER:
case CHIP_BARTS:
default:
force_no_swizzle = true;
break;
}
if (force_no_swizzle) {
bool last_backend_enabled = false;
force_no_swizzle = false;
for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
if (((enabled_backends_mask >> i) & 1) == 1) {
if (last_backend_enabled)
force_no_swizzle = true;
last_backend_enabled = true;
} else
last_backend_enabled = false;
}
}
switch (num_tile_pipes) {
case 1:
case 3:
case 5:
case 7:
DRM_ERROR("odd number of pipes!\n");
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 4:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 1;
swizzle_pipe[3] = 3;
}
break;
case 6:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 1;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 5;
}
break;
case 8:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
swizzle_pipe[7] = 7;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
swizzle_pipe[7] = 7;
}
break;
}
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
while (((1 << cur_backend) & enabled_backends_mask) == 0)
cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
}
return backend_map;
}
static void evergreen_gpu_init(struct radeon_device *rdev)
{
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config;
u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg;
u32 gb_backend_map;
u32 grbm_gfx_index;
u32 sx_debug_1;
u32 smx_dc_ctl0;
u32 sq_config;
u32 sq_lds_resource_mgmt;
u32 sq_gpr_resource_mgmt_1;
u32 sq_gpr_resource_mgmt_2;
u32 sq_gpr_resource_mgmt_3;
u32 sq_thread_resource_mgmt;
u32 sq_thread_resource_mgmt_2;
u32 sq_stack_resource_mgmt_1;
u32 sq_stack_resource_mgmt_2;
u32 sq_stack_resource_mgmt_3;
u32 vgt_cache_invalidation;
u32 hdp_host_path_cntl, tmp;
int i, j, num_shader_engines, ps_thread_count;
switch (rdev->family) {
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
rdev->config.evergreen.num_ses = 2;
rdev->config.evergreen.max_pipes = 4;
rdev->config.evergreen.max_tile_pipes = 8;
rdev->config.evergreen.max_simds = 10;
rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 248;
rdev->config.evergreen.max_gs_threads = 32;
rdev->config.evergreen.max_stack_entries = 512;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 256;
rdev->config.evergreen.sx_max_export_pos_size = 64;
rdev->config.evergreen.sx_max_export_smx_size = 192;
rdev->config.evergreen.max_hw_contexts = 8;
rdev->config.evergreen.sq_num_cf_insts = 2;
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_JUNIPER:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 4;
rdev->config.evergreen.max_tile_pipes = 4;
rdev->config.evergreen.max_simds = 10;
rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 248;
rdev->config.evergreen.max_gs_threads = 32;
rdev->config.evergreen.max_stack_entries = 512;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 256;
rdev->config.evergreen.sx_max_export_pos_size = 64;
rdev->config.evergreen.sx_max_export_smx_size = 192;
rdev->config.evergreen.max_hw_contexts = 8;
rdev->config.evergreen.sq_num_cf_insts = 2;
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_REDWOOD:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 4;
rdev->config.evergreen.max_tile_pipes = 4;
rdev->config.evergreen.max_simds = 5;
rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 248;
rdev->config.evergreen.max_gs_threads = 32;
rdev->config.evergreen.max_stack_entries = 256;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 256;
rdev->config.evergreen.sx_max_export_pos_size = 64;
rdev->config.evergreen.sx_max_export_smx_size = 192;
rdev->config.evergreen.max_hw_contexts = 8;
rdev->config.evergreen.sq_num_cf_insts = 2;
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_CEDAR:
default:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 2;
rdev->config.evergreen.max_tile_pipes = 2;
rdev->config.evergreen.max_simds = 2;
rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 192;
rdev->config.evergreen.max_gs_threads = 16;
rdev->config.evergreen.max_stack_entries = 256;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 128;
rdev->config.evergreen.sx_max_export_pos_size = 32;
rdev->config.evergreen.sx_max_export_smx_size = 96;
rdev->config.evergreen.max_hw_contexts = 4;
rdev->config.evergreen.sq_num_cf_insts = 1;
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_PALM:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 2;
rdev->config.evergreen.max_tile_pipes = 2;
rdev->config.evergreen.max_simds = 2;
rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 192;
rdev->config.evergreen.max_gs_threads = 16;
rdev->config.evergreen.max_stack_entries = 256;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 128;
rdev->config.evergreen.sx_max_export_pos_size = 32;
rdev->config.evergreen.sx_max_export_smx_size = 96;
rdev->config.evergreen.max_hw_contexts = 4;
rdev->config.evergreen.sq_num_cf_insts = 1;
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_SUMO:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 4;
rdev->config.evergreen.max_tile_pipes = 2;
if (rdev->pdev->device == 0x9648)
rdev->config.evergreen.max_simds = 3;
else if ((rdev->pdev->device == 0x9647) ||
(rdev->pdev->device == 0x964a))
rdev->config.evergreen.max_simds = 4;
else
rdev->config.evergreen.max_simds = 5;
rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 248;
rdev->config.evergreen.max_gs_threads = 32;
rdev->config.evergreen.max_stack_entries = 256;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 256;
rdev->config.evergreen.sx_max_export_pos_size = 64;
rdev->config.evergreen.sx_max_export_smx_size = 192;
rdev->config.evergreen.max_hw_contexts = 8;
rdev->config.evergreen.sq_num_cf_insts = 2;
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_SUMO2:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 4;
rdev->config.evergreen.max_tile_pipes = 4;
rdev->config.evergreen.max_simds = 2;
rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 248;
rdev->config.evergreen.max_gs_threads = 32;
rdev->config.evergreen.max_stack_entries = 512;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 256;
rdev->config.evergreen.sx_max_export_pos_size = 64;
rdev->config.evergreen.sx_max_export_smx_size = 192;
rdev->config.evergreen.max_hw_contexts = 8;
rdev->config.evergreen.sq_num_cf_insts = 2;
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_BARTS:
rdev->config.evergreen.num_ses = 2;
rdev->config.evergreen.max_pipes = 4;
rdev->config.evergreen.max_tile_pipes = 8;
rdev->config.evergreen.max_simds = 7;
rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 248;
rdev->config.evergreen.max_gs_threads = 32;
rdev->config.evergreen.max_stack_entries = 512;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 256;
rdev->config.evergreen.sx_max_export_pos_size = 64;
rdev->config.evergreen.sx_max_export_smx_size = 192;
rdev->config.evergreen.max_hw_contexts = 8;
rdev->config.evergreen.sq_num_cf_insts = 2;
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_TURKS:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 4;
rdev->config.evergreen.max_tile_pipes = 4;
rdev->config.evergreen.max_simds = 6;
rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 248;
rdev->config.evergreen.max_gs_threads = 32;
rdev->config.evergreen.max_stack_entries = 256;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 256;
rdev->config.evergreen.sx_max_export_pos_size = 64;
rdev->config.evergreen.sx_max_export_smx_size = 192;
rdev->config.evergreen.max_hw_contexts = 8;
rdev->config.evergreen.sq_num_cf_insts = 2;
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
case CHIP_CAICOS:
rdev->config.evergreen.num_ses = 1;
rdev->config.evergreen.max_pipes = 4;
rdev->config.evergreen.max_tile_pipes = 2;
rdev->config.evergreen.max_simds = 2;
rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
rdev->config.evergreen.max_gprs = 256;
rdev->config.evergreen.max_threads = 192;
rdev->config.evergreen.max_gs_threads = 16;
rdev->config.evergreen.max_stack_entries = 256;
rdev->config.evergreen.sx_num_of_sets = 4;
rdev->config.evergreen.sx_max_export_size = 128;
rdev->config.evergreen.sx_max_export_pos_size = 32;
rdev->config.evergreen.sx_max_export_smx_size = 96;
rdev->config.evergreen.max_hw_contexts = 4;
rdev->config.evergreen.sq_num_cf_insts = 1;
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
break;
}
/* Initialize HDP */
for (i = 0, j = 0; i < 32; i++, j += 0x18) {
WREG32((0x2c14 + j), 0x00000000);
WREG32((0x2c18 + j), 0x00000000);
WREG32((0x2c1c + j), 0x00000000);
WREG32((0x2c20 + j), 0x00000000);
WREG32((0x2c24 + j), 0x00000000);
}
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
evergreen_fix_pci_max_read_req_size(rdev);
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
cc_gc_shader_pipe_config |=
INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
& EVERGREEN_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
& EVERGREEN_MAX_SIMDS_MASK);
cc_rb_backend_disable =
BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
& EVERGREEN_MAX_BACKENDS_MASK);
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
if ((rdev->family == CHIP_PALM) ||
(rdev->family == CHIP_SUMO) ||
(rdev->family == CHIP_SUMO2))
mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
else
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
switch (rdev->config.evergreen.max_tile_pipes) {
case 1:
default:
gb_addr_config |= NUM_PIPES(0);
break;
case 2:
gb_addr_config |= NUM_PIPES(1);
break;
case 4:
gb_addr_config |= NUM_PIPES(2);
break;
case 8:
gb_addr_config |= NUM_PIPES(3);
break;
}
gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
gb_addr_config |= ROW_SIZE(2);
else
gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
if (rdev->ddev->pdev->device == 0x689e) {
u32 efuse_straps_4;
u32 efuse_straps_3;
u8 efuse_box_bit_131_124;
WREG32(RCU_IND_INDEX, 0x204);
efuse_straps_4 = RREG32(RCU_IND_DATA);
WREG32(RCU_IND_INDEX, 0x203);
efuse_straps_3 = RREG32(RCU_IND_DATA);
efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
switch(efuse_box_bit_131_124) {
case 0x00:
gb_backend_map = 0x76543210;
break;
case 0x55:
gb_backend_map = 0x77553311;
break;
case 0x56:
gb_backend_map = 0x77553300;
break;
case 0x59:
gb_backend_map = 0x77552211;
break;
case 0x66:
gb_backend_map = 0x77443300;
break;
case 0x99:
gb_backend_map = 0x66552211;
break;
case 0x5a:
gb_backend_map = 0x77552200;
break;
case 0xaa:
gb_backend_map = 0x66442200;
break;
case 0x95:
gb_backend_map = 0x66553311;
break;
default:
DRM_ERROR("bad backend map, using default\n");
gb_backend_map =
evergreen_get_tile_pipe_to_backend_map(rdev,
rdev->config.evergreen.max_tile_pipes,
rdev->config.evergreen.max_backends,
((EVERGREEN_MAX_BACKENDS_MASK <<
rdev->config.evergreen.max_backends) &
EVERGREEN_MAX_BACKENDS_MASK));
break;
}
} else if (rdev->ddev->pdev->device == 0x68b9) {
u32 efuse_straps_3;
u8 efuse_box_bit_127_124;
WREG32(RCU_IND_INDEX, 0x203);
efuse_straps_3 = RREG32(RCU_IND_DATA);
efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
switch(efuse_box_bit_127_124) {
case 0x0:
gb_backend_map = 0x00003210;
break;
case 0x5:
case 0x6:
case 0x9:
case 0xa:
gb_backend_map = 0x00003311;
break;
default:
DRM_ERROR("bad backend map, using default\n");
gb_backend_map =
evergreen_get_tile_pipe_to_backend_map(rdev,
rdev->config.evergreen.max_tile_pipes,
rdev->config.evergreen.max_backends,
((EVERGREEN_MAX_BACKENDS_MASK <<
rdev->config.evergreen.max_backends) &
EVERGREEN_MAX_BACKENDS_MASK));
break;
}
} else {
switch (rdev->family) {
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_BARTS:
gb_backend_map = 0x66442200;
break;
case CHIP_JUNIPER:
gb_backend_map = 0x00002200;
break;
default:
gb_backend_map =
evergreen_get_tile_pipe_to_backend_map(rdev,
rdev->config.evergreen.max_tile_pipes,
rdev->config.evergreen.max_backends,
((EVERGREEN_MAX_BACKENDS_MASK <<
rdev->config.evergreen.max_backends) &
EVERGREEN_MAX_BACKENDS_MASK));
}
}
/* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes
* bits 7:4 num_banks
* bits 11:8 group_size
* bits 15:12 row_size
*/
rdev->config.evergreen.tile_config = 0;
switch (rdev->config.evergreen.max_tile_pipes) {
case 1:
default:
rdev->config.evergreen.tile_config |= (0 << 0);
break;
case 2:
rdev->config.evergreen.tile_config |= (1 << 0);
break;
case 4:
rdev->config.evergreen.tile_config |= (2 << 0);
break;
case 8:
rdev->config.evergreen.tile_config |= (3 << 0);
break;
}
/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP)
rdev->config.evergreen.tile_config |= 1 << 4;
else
rdev->config.evergreen.tile_config |=
((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
rdev->config.evergreen.tile_config |=
((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
rdev->config.evergreen.tile_config |=
((gb_addr_config & 0x30000000) >> 28) << 12;
rdev->config.evergreen.backend_map = gb_backend_map;
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
u32 rb = cc_rb_backend_disable | (0xf0 << 16);
u32 sp = cc_gc_shader_pipe_config;
u32 gfx = grbm_gfx_index | SE_INDEX(i);
if (i == num_shader_engines) {
rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
}
WREG32(GRBM_GFX_INDEX, gfx);
WREG32(RLC_GFX_INDEX, gfx);
WREG32(CC_RB_BACKEND_DISABLE, rb);
WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
}
grbm_gfx_index |= SE_BROADCAST_WRITES;
WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
WREG32(RLC_GFX_INDEX, grbm_gfx_index);
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
WREG32(CGTS_USER_TCC_DISABLE, 0);
/* set HW defaults for 3D engine */
WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
ROQ_IB2_START(0x2b)));
WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
SYNC_GRADIENT |
SYNC_WALKER |
SYNC_ALIGNER));
sx_debug_1 = RREG32(SX_DEBUG_1);
sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
WREG32(SX_DEBUG_1, sx_debug_1);
smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
WREG32(SMX_DC_CTL0, smx_dc_ctl0);
WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
WREG32(VGT_NUM_INSTANCES, 1);
WREG32(SPI_CONFIG_CNTL, 0);
WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
WREG32(CP_PERFMON_CNTL, 0);
WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
FETCH_FIFO_HIWATER(0x4) |
DONE_FIFO_HIWATER(0xe0) |
ALU_UPDATE_FIFO_HIWATER(0x8)));
sq_config = RREG32(SQ_CONFIG);
sq_config &= ~(PS_PRIO(3) |
VS_PRIO(3) |
GS_PRIO(3) |
ES_PRIO(3));
sq_config |= (VC_ENABLE |
EXPORT_SRC_C |
PS_PRIO(0) |
VS_PRIO(1) |
GS_PRIO(2) |
ES_PRIO(3));
switch (rdev->family) {
case CHIP_CEDAR:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_CAICOS:
/* no vertex cache */
sq_config &= ~VC_ENABLE;
break;
default:
break;
}
sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
sq_gpr_resource_mgmt_2 |= NUM_ES_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
switch (rdev->family) {
case CHIP_CEDAR:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
ps_thread_count = 96;
break;
default:
ps_thread_count = 128;
break;
}
sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
sq_stack_resource_mgmt_2 = NUM_GS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
sq_stack_resource_mgmt_2 |= NUM_ES_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
sq_stack_resource_mgmt_3 = NUM_HS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
sq_stack_resource_mgmt_3 |= NUM_LS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
WREG32(SQ_CONFIG, sq_config);
WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
WREG32(SQ_GPR_RESOURCE_MGMT_3, sq_gpr_resource_mgmt_3);
WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
WREG32(SQ_THREAD_RESOURCE_MGMT_2, sq_thread_resource_mgmt_2);
WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
WREG32(SQ_STACK_RESOURCE_MGMT_3, sq_stack_resource_mgmt_3);
WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0);
WREG32(SQ_LDS_RESOURCE_MGMT, sq_lds_resource_mgmt);
WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
FORCE_EOV_MAX_REZ_CNT(255)));
switch (rdev->family) {
case CHIP_CEDAR:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_CAICOS:
vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
break;
default:
vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
break;
}
vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
WREG32(VGT_GS_VERTEX_REUSE, 16);
WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
WREG32(VGT_OUT_DEALLOC_CNTL, 16);
WREG32(CB_PERF_CTR0_SEL_0, 0);
WREG32(CB_PERF_CTR0_SEL_1, 0);
WREG32(CB_PERF_CTR1_SEL_0, 0);
WREG32(CB_PERF_CTR1_SEL_1, 0);
WREG32(CB_PERF_CTR2_SEL_0, 0);
WREG32(CB_PERF_CTR2_SEL_1, 0);
WREG32(CB_PERF_CTR3_SEL_0, 0);
WREG32(CB_PERF_CTR3_SEL_1, 0);
/* clear render buffer base addresses */
WREG32(CB_COLOR0_BASE, 0);
WREG32(CB_COLOR1_BASE, 0);
WREG32(CB_COLOR2_BASE, 0);
WREG32(CB_COLOR3_BASE, 0);
WREG32(CB_COLOR4_BASE, 0);
WREG32(CB_COLOR5_BASE, 0);
WREG32(CB_COLOR6_BASE, 0);
WREG32(CB_COLOR7_BASE, 0);
WREG32(CB_COLOR8_BASE, 0);
WREG32(CB_COLOR9_BASE, 0);
WREG32(CB_COLOR10_BASE, 0);
WREG32(CB_COLOR11_BASE, 0);
/* set the shader const cache sizes to 0 */
for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
WREG32(i, 0);
for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
WREG32(i, 0);
tmp = RREG32(HDP_MISC_CNTL);
tmp |= HDP_FLUSH_INVALIDATE_CACHE;
WREG32(HDP_MISC_CNTL, tmp);
hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
udelay(50);
}
int evergreen_mc_init(struct radeon_device *rdev)
{
u32 tmp;
int chansize, numchan;
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
if ((rdev->family == CHIP_PALM) ||
(rdev->family == CHIP_SUMO) ||
(rdev->family == CHIP_SUMO2))
tmp = RREG32(FUS_MC_ARB_RAMCFG);
else
tmp = RREG32(MC_ARB_RAMCFG);
if (tmp & CHANSIZE_OVERRIDE) {
chansize = 16;
} else if (tmp & CHANSIZE_MASK) {
chansize = 64;
} else {
chansize = 32;
}
tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
default:
numchan = 1;
break;
case 1:
numchan = 2;
break;
case 2:
numchan = 4;
break;
case 3:
numchan = 8;
break;
}
rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* Setup GPU memory space */
if ((rdev->family == CHIP_PALM) ||
(rdev->family == CHIP_SUMO) ||
(rdev->family == CHIP_SUMO2)) {
/* size in bytes on fusion */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
} else {
/* size in MB on evergreen/cayman/tn */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
}
rdev->mc.visible_vram_size = rdev->mc.aper_size;
r700_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
return 0;
}
bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 srbm_status;
u32 grbm_status;
u32 grbm_status_se0, grbm_status_se1;
struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
int r;
srbm_status = RREG32(SRBM_STATUS);
grbm_status = RREG32(GRBM_STATUS);
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
r100_gpu_lockup_update(lockup, ring);
return false;
}
/* force CP activities */
r = radeon_ring_lock(rdev, ring, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(ring, 0x80000000);
radeon_ring_write(ring, 0x80000000);
radeon_ring_unlock_commit(rdev, ring);
}
ring->rptr = RREG32(CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, lockup, ring);
}
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
u32 grbm_reset = 0;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
return 0;
dev_info(rdev->dev, "GPU softreset \n");
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
RREG32(GRBM_STATUS_SE0));
dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
/* reset all the gfx blocks */
grbm_reset = (SOFT_RESET_CP |
SOFT_RESET_CB |
SOFT_RESET_DB |
SOFT_RESET_PA |
SOFT_RESET_SC |
SOFT_RESET_SPI |
SOFT_RESET_SH |
SOFT_RESET_SX |
SOFT_RESET_TC |
SOFT_RESET_TA |
SOFT_RESET_VC |
SOFT_RESET_VGT);
dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
WREG32(GRBM_SOFT_RESET, grbm_reset);
(void)RREG32(GRBM_SOFT_RESET);
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
RREG32(GRBM_STATUS_SE0));
dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
evergreen_mc_resume(rdev, &save);
return 0;
}
int evergreen_asic_reset(struct radeon_device *rdev)
{
return evergreen_gpu_soft_reset(rdev);
}
/* Interrupts */
u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
{
switch (crtc) {
case 0:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
case 1:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
case 2:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
case 3:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
case 4:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
case 5:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
default:
return 0;
}
}
void evergreen_disable_interrupt_state(struct radeon_device *rdev)
{
u32 tmp;
if (rdev->family >= CHIP_CAYMAN) {
cayman_cp_int_cntl_setup(rdev, 0,
CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
cayman_cp_int_cntl_setup(rdev, 1, 0);
cayman_cp_int_cntl_setup(rdev, 2, 0);
} else
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
}
if (rdev->num_crtc >= 6) {
WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
}
if (rdev->num_crtc >= 6) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
/* only one DAC on DCE6 */
if (!ASIC_IS_DCE6(rdev))
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD1_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD2_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD3_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD4_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD5_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
int evergreen_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
return -EINVAL;
}
/* don't enable anything if the ih is disabled */
if (!rdev->ih.enabled) {
r600_disable_interrupts(rdev);
/* force the active interrupt state to all disabled */
evergreen_disable_interrupt_state(rdev);
return 0;
}
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
if (rdev->family >= CHIP_CAYMAN) {
/* enable CP interrupts on all rings */
if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
}
if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
}
} else {
if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
}
if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
crtc1 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[1] ||
rdev->irq.pflip[1]) {
DRM_DEBUG("evergreen_irq_set: vblank 1\n");
crtc2 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[2] ||
rdev->irq.pflip[2]) {
DRM_DEBUG("evergreen_irq_set: vblank 2\n");
crtc3 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[3] ||
rdev->irq.pflip[3]) {
DRM_DEBUG("evergreen_irq_set: vblank 3\n");
crtc4 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[4] ||
rdev->irq.pflip[4]) {
DRM_DEBUG("evergreen_irq_set: vblank 4\n");
crtc5 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[5] ||
rdev->irq.pflip[5]) {
DRM_DEBUG("evergreen_irq_set: vblank 5\n");
crtc6 |= VBLANK_INT_MASK;
}
if (rdev->irq.hpd[0]) {
DRM_DEBUG("evergreen_irq_set: hpd 1\n");
hpd1 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[1]) {
DRM_DEBUG("evergreen_irq_set: hpd 2\n");
hpd2 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[2]) {
DRM_DEBUG("evergreen_irq_set: hpd 3\n");
hpd3 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[3]) {
DRM_DEBUG("evergreen_irq_set: hpd 4\n");
hpd4 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[4]) {
DRM_DEBUG("evergreen_irq_set: hpd 5\n");
hpd5 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[5]) {
DRM_DEBUG("evergreen_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
if (rdev->irq.gui_idle) {
DRM_DEBUG("gui idle\n");
grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
}
if (rdev->family >= CHIP_CAYMAN) {
cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
} else
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
if (rdev->num_crtc >= 4) {
WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
}
if (rdev->num_crtc >= 6) {
WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
}
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
if (rdev->num_crtc >= 4) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
}
if (rdev->num_crtc >= 6) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
}
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
WREG32(DC_HPD3_INT_CONTROL, hpd3);
WREG32(DC_HPD4_INT_CONTROL, hpd4);
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
return 0;
}
static void evergreen_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
if (rdev->num_crtc >= 4) {
rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
}
if (rdev->num_crtc >= 6) {
rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
}
if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
if (rdev->num_crtc >= 4) {
if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
}
if (rdev->num_crtc >= 6) {
if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
}
if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
tmp = RREG32(DC_HPD1_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD1_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
tmp = RREG32(DC_HPD2_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD2_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
tmp = RREG32(DC_HPD3_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD3_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
tmp = RREG32(DC_HPD4_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD4_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
}
void evergreen_irq_disable(struct radeon_device *rdev)
{
r600_disable_interrupts(rdev);
/* Wait and acknowledge irq */
mdelay(1);
evergreen_irq_ack(rdev);
evergreen_disable_interrupt_state(rdev);
}
void evergreen_irq_suspend(struct radeon_device *rdev)
{
evergreen_irq_disable(rdev);
r600_rlc_stop(rdev);
}
static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
{
u32 wptr, tmp;
if (rdev->wb.enabled)
wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
else
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
}
return (wptr & rdev->ih.ptr_mask);
}
int evergreen_irq_process(struct radeon_device *rdev)
{
u32 wptr;
u32 rptr;
u32 src_id, src_data;
u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
wptr = evergreen_get_ih_wptr(rdev);
rptr = rdev->ih.rptr;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
spin_lock_irqsave(&rdev->ih.lock, flags);
if (rptr == wptr) {
spin_unlock_irqrestore(&rdev->ih.lock, flags);
return IRQ_NONE;
}
restart_ih:
/* Order reading of wptr vs. reading of IH ring data */
rmb();
/* display interrupts */
evergreen_irq_ack(rdev);
rdev->ih.wptr = wptr;
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
switch (src_id) {
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[0]) {
drm_handle_vblank(rdev->ddev, 0);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[0])
radeon_crtc_handle_flip(rdev, 0);
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
break;
case 1: /* D1 vline */
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
DRM_DEBUG("IH: D1 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[1]) {
drm_handle_vblank(rdev->ddev, 1);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[1])
radeon_crtc_handle_flip(rdev, 1);
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
break;
case 1: /* D2 vline */
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
DRM_DEBUG("IH: D2 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[2]) {
drm_handle_vblank(rdev->ddev, 2);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[2])
radeon_crtc_handle_flip(rdev, 2);
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
}
break;
case 1: /* D3 vline */
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
DRM_DEBUG("IH: D3 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[3]) {
drm_handle_vblank(rdev->ddev, 3);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[3])
radeon_crtc_handle_flip(rdev, 3);
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
}
break;
case 1: /* D4 vline */
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
DRM_DEBUG("IH: D4 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[4]) {
drm_handle_vblank(rdev->ddev, 4);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[4])
radeon_crtc_handle_flip(rdev, 4);
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
}
break;
case 1: /* D5 vline */
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
DRM_DEBUG("IH: D5 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[5]) {
drm_handle_vblank(rdev->ddev, 5);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (rdev->irq.pflip[5])
radeon_crtc_handle_flip(rdev, 5);
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
}
break;
case 1: /* D6 vline */
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
DRM_DEBUG("IH: D6 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD1\n");
}
break;
case 1:
if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD2\n");
}
break;
case 2:
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD3\n");
}
break;
case 3:
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD4\n");
}
break;
case 4:
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD5\n");
}
break;
case 5:
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD6\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
if (rdev->family >= CHIP_CAYMAN) {
switch (src_data) {
case 0:
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 1:
radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
break;
case 2:
radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
break;
}
} else
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue);
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
/* wptr/rptr are in bytes! */
rptr += 16;
rptr &= rdev->ih.ptr_mask;
}
/* make sure wptr hasn't changed while processing */
wptr = evergreen_get_ih_wptr(rdev);
if (wptr != rdev->ih.wptr)
goto restart_ih;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
return IRQ_HANDLED;
}
static int evergreen_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
evergreen_pcie_gen2_enable(rdev);
if (ASIC_IS_DCE5(rdev)) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
r = ni_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
r = ni_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
return r;
}
} else {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
}
r = r600_vram_scratch_init(rdev);
if (r)
return r;
evergreen_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
evergreen_agp_enable(rdev);
} else {
r = evergreen_pcie_gart_enable(rdev);
if (r)
return r;
}
evergreen_gpu_init(rdev);
r = evergreen_blit_init(rdev);
if (r) {
r600_blit_fini(rdev);
rdev->asic->copy.copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
return r;
r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
return r;
}
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
radeon_irq_kms_fini(rdev);
return r;
}
evergreen_irq_set(rdev);
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
r = evergreen_cp_load_microcode(rdev);
if (r)
return r;
r = evergreen_cp_resume(rdev);
if (r)
return r;
r = radeon_ib_pool_start(rdev);
if (r)
return r;
r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
return r;
}
r = r600_audio_init(rdev);
if (r) {
DRM_ERROR("radeon: audio init failed\n");
return r;
}
return 0;
}
int evergreen_resume(struct radeon_device *rdev)
{
int r;
/* reset the asic, the gfx blocks are often in a bad state
* after the driver is unloaded or after a resume
*/
if (radeon_asic_reset(rdev))
dev_warn(rdev->dev, "GPU reset failed !\n");
/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
* posting will perform necessary task to bring back GPU into good
* shape.
*/
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
rdev->accel_working = true;
r = evergreen_startup(rdev);
if (r) {
DRM_ERROR("evergreen startup failed on resume\n");
rdev->accel_working = false;
return r;
}
return r;
}
int evergreen_suspend(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
radeon_ib_pool_suspend(rdev);
r600_blit_suspend(rdev);
r700_cp_stop(rdev);
ring->ready = false;
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
return 0;
}
/* Plan is to move initialization in that function and use
* helper function so that radeon_device_init pretty much
* do nothing more than calling asic specific function. This
* should also allow to remove a bunch of callback function
* like vram_info.
*/
int evergreen_init(struct radeon_device *rdev)
{
int r;
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
return -EINVAL;
}
/* Must be an ATOMBIOS */
if (!rdev->is_atom_bios) {
dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
return -EINVAL;
}
r = radeon_atombios_init(rdev);
if (r)
return r;
/* reset the asic, the gfx blocks are often in a bad state
* after the driver is unloaded or after a resume
*/
if (radeon_asic_reset(rdev))
dev_warn(rdev->dev, "GPU reset failed !\n");
/* Post card if necessary */
if (!radeon_card_posted(rdev)) {
if (!rdev->bios) {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
}
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
/* Initialize scratch registers */
r600_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
return r;
/* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r)
radeon_agp_disable(rdev);
}
/* initialize memory controller */
r = evergreen_mc_init(rdev);
if (r)
return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
return r;
r = radeon_irq_kms_init(rdev);
if (r)
return r;
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
r = r600_pcie_gart_init(rdev);
if (r)
return r;
r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
rdev->accel_working = false;
}
r = evergreen_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
/* Don't start up if the MC ucode is missing on BTC parts.
* The default clocks and voltages before the MC ucode
* is loaded are not suffient for advanced operations.
*/
if (ASIC_IS_DCE5(rdev)) {
if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
DRM_ERROR("radeon: MC ucode required for NI+.\n");
return -EINVAL;
}
}
return 0;
}
void evergreen_fini(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
r600_blit_fini(rdev);
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
}
void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
{
u32 link_width_cntl, speed_cntl;
if (radeon_pcie_gen2 == 0)
return;
if (rdev->flags & RADEON_IS_IGP)
return;
if (!(rdev->flags & RADEON_IS_PCIE))
return;
/* x2 cards have a special sequence */
if (ASIC_IS_X2(rdev))
return;
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_GEN2_EN_STRAP;
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
} else {
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
if (1)
link_width_cntl |= LC_UPCONFIGURE_DIS;
else
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
| gpl-2.0 |
ramseydsilva/linux | arch/mips/lasat/serial.c | 4542 | 3051 | /*
* Registration of Lasat UART platform device.
*
* Copyright (C) 2007 Brian Murphy <brian@murphy.dk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <asm/lasat/lasat.h>
#include <asm/lasat/serial.h>
static struct resource lasat_serial_res[2] __initdata;
static struct plat_serial8250_port lasat_serial8250_port[] = {
{
.iotype = UPIO_MEM,
.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF |
UPF_SKIP_TEST,
},
{},
};
static __init int lasat_uart_add(void)
{
struct platform_device *pdev;
int retval;
pdev = platform_device_alloc("serial8250", -1);
if (!pdev)
return -ENOMEM;
if (!IS_LASAT_200()) {
lasat_serial_res[0].start = KSEG1ADDR(LASAT_UART_REGS_BASE_100);
lasat_serial_res[0].end = lasat_serial_res[0].start + LASAT_UART_REGS_SHIFT_100 * 8 - 1;
lasat_serial_res[0].flags = IORESOURCE_MEM;
lasat_serial_res[1].start = LASATINT_UART_100;
lasat_serial_res[1].end = LASATINT_UART_100;
lasat_serial_res[1].flags = IORESOURCE_IRQ;
lasat_serial8250_port[0].mapbase = LASAT_UART_REGS_BASE_100;
lasat_serial8250_port[0].uartclk = LASAT_BASE_BAUD_100 * 16;
lasat_serial8250_port[0].regshift = LASAT_UART_REGS_SHIFT_100;
lasat_serial8250_port[0].irq = LASATINT_UART_100;
} else {
lasat_serial_res[0].start = KSEG1ADDR(LASAT_UART_REGS_BASE_200);
lasat_serial_res[0].end = lasat_serial_res[0].start + LASAT_UART_REGS_SHIFT_200 * 8 - 1;
lasat_serial_res[0].flags = IORESOURCE_MEM;
lasat_serial_res[1].start = LASATINT_UART_200;
lasat_serial_res[1].end = LASATINT_UART_200;
lasat_serial_res[1].flags = IORESOURCE_IRQ;
lasat_serial8250_port[0].mapbase = LASAT_UART_REGS_BASE_200;
lasat_serial8250_port[0].uartclk = LASAT_BASE_BAUD_200 * 16;
lasat_serial8250_port[0].regshift = LASAT_UART_REGS_SHIFT_200;
lasat_serial8250_port[0].irq = LASATINT_UART_200;
}
pdev->id = PLAT8250_DEV_PLATFORM;
pdev->dev.platform_data = lasat_serial8250_port;
retval = platform_device_add_resources(pdev, lasat_serial_res, ARRAY_SIZE(lasat_serial_res));
if (retval)
goto err_free_device;
retval = platform_device_add(pdev);
if (retval)
goto err_free_device;
return 0;
err_free_device:
platform_device_put(pdev);
return retval;
}
device_initcall(lasat_uart_add);
| gpl-2.0 |
byzvulture/android_kernel_xiaomi_armor | arch/powerpc/platforms/pseries/eeh_cache.c | 4542 | 8999 | /*
* PCI address cache; allows the lookup of PCI devices based on I/O address
*
* Copyright IBM Corporation 2004
* Copyright Linas Vepstas <linas@austin.ibm.com> 2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
/**
* The pci address cache subsystem. This subsystem places
* PCI device address resources into a red-black tree, sorted
* according to the address range, so that given only an i/o
* address, the corresponding PCI device can be **quickly**
* found. It is safe to perform an address lookup in an interrupt
* context; this ability is an important feature.
*
* Currently, the only customer of this code is the EEH subsystem;
* thus, this code has been somewhat tailored to suit EEH better.
* In particular, the cache does *not* hold the addresses of devices
* for which EEH is not enabled.
*
* (Implementation Note: The RB tree seems to be better/faster
* than any hash algo I could think of for this problem, even
* with the penalty of slow pointer chases for d-cache misses).
*/
struct pci_io_addr_range {
struct rb_node rb_node;
unsigned long addr_lo;
unsigned long addr_hi;
struct pci_dev *pcidev;
unsigned int flags;
};
static struct pci_io_addr_cache {
struct rb_root rb_root;
spinlock_t piar_lock;
} pci_io_addr_cache_root;
static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr)
{
struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node;
while (n) {
struct pci_io_addr_range *piar;
piar = rb_entry(n, struct pci_io_addr_range, rb_node);
if (addr < piar->addr_lo) {
n = n->rb_left;
} else {
if (addr > piar->addr_hi) {
n = n->rb_right;
} else {
pci_dev_get(piar->pcidev);
return piar->pcidev;
}
}
}
return NULL;
}
/**
* pci_addr_cache_get_device - Get device, given only address
* @addr: mmio (PIO) phys address or i/o port number
*
* Given an mmio phys address, or a port number, find a pci device
* that implements this address. Be sure to pci_dev_put the device
* when finished. I/O port numbers are assumed to be offset
* from zero (that is, they do *not* have pci_io_addr added in).
* It is safe to call this function within an interrupt.
*/
struct pci_dev *pci_addr_cache_get_device(unsigned long addr)
{
struct pci_dev *dev;
unsigned long flags;
spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
dev = __pci_addr_cache_get_device(addr);
spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
return dev;
}
#ifdef DEBUG
/*
* Handy-dandy debug print routine, does nothing more
* than print out the contents of our addr cache.
*/
static void pci_addr_cache_print(struct pci_io_addr_cache *cache)
{
struct rb_node *n;
int cnt = 0;
n = rb_first(&cache->rb_root);
while (n) {
struct pci_io_addr_range *piar;
piar = rb_entry(n, struct pci_io_addr_range, rb_node);
printk(KERN_DEBUG "PCI: %s addr range %d [%lx-%lx]: %s\n",
(piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev));
cnt++;
n = rb_next(n);
}
}
#endif
/* Insert address range into the rb tree. */
static struct pci_io_addr_range *
pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
unsigned long ahi, unsigned int flags)
{
struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node;
struct rb_node *parent = NULL;
struct pci_io_addr_range *piar;
/* Walk tree, find a place to insert into tree */
while (*p) {
parent = *p;
piar = rb_entry(parent, struct pci_io_addr_range, rb_node);
if (ahi < piar->addr_lo) {
p = &parent->rb_left;
} else if (alo > piar->addr_hi) {
p = &parent->rb_right;
} else {
if (dev != piar->pcidev ||
alo != piar->addr_lo || ahi != piar->addr_hi) {
printk(KERN_WARNING "PIAR: overlapping address range\n");
}
return piar;
}
}
piar = kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
if (!piar)
return NULL;
pci_dev_get(dev);
piar->addr_lo = alo;
piar->addr_hi = ahi;
piar->pcidev = dev;
piar->flags = flags;
#ifdef DEBUG
printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n",
alo, ahi, pci_name(dev));
#endif
rb_link_node(&piar->rb_node, parent, p);
rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root);
return piar;
}
static void __pci_addr_cache_insert_device(struct pci_dev *dev)
{
struct device_node *dn;
struct eeh_dev *edev;
int i;
dn = pci_device_to_OF_node(dev);
if (!dn) {
printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n", pci_name(dev));
return;
}
edev = of_node_to_eeh_dev(dn);
if (!edev) {
pr_warning("PCI: no EEH dev found for dn=%s\n",
dn->full_name);
return;
}
/* Skip any devices for which EEH is not enabled. */
if (!(edev->mode & EEH_MODE_SUPPORTED) ||
edev->mode & EEH_MODE_NOCHECK) {
#ifdef DEBUG
pr_info("PCI: skip building address cache for=%s - %s\n",
pci_name(dev), dn->full_name);
#endif
return;
}
/* Walk resources on this device, poke them into the tree */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
unsigned long start = pci_resource_start(dev,i);
unsigned long end = pci_resource_end(dev,i);
unsigned int flags = pci_resource_flags(dev,i);
/* We are interested only bus addresses, not dma or other stuff */
if (0 == (flags & (IORESOURCE_IO | IORESOURCE_MEM)))
continue;
if (start == 0 || ~start == 0 || end == 0 || ~end == 0)
continue;
pci_addr_cache_insert(dev, start, end, flags);
}
}
/**
* pci_addr_cache_insert_device - Add a device to the address cache
* @dev: PCI device whose I/O addresses we are interested in.
*
* In order to support the fast lookup of devices based on addresses,
* we maintain a cache of devices that can be quickly searched.
* This routine adds a device to that cache.
*/
void pci_addr_cache_insert_device(struct pci_dev *dev)
{
unsigned long flags;
/* Ignore PCI bridges */
if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
return;
spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
__pci_addr_cache_insert_device(dev);
spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
}
static inline void __pci_addr_cache_remove_device(struct pci_dev *dev)
{
struct rb_node *n;
restart:
n = rb_first(&pci_io_addr_cache_root.rb_root);
while (n) {
struct pci_io_addr_range *piar;
piar = rb_entry(n, struct pci_io_addr_range, rb_node);
if (piar->pcidev == dev) {
rb_erase(n, &pci_io_addr_cache_root.rb_root);
pci_dev_put(piar->pcidev);
kfree(piar);
goto restart;
}
n = rb_next(n);
}
}
/**
* pci_addr_cache_remove_device - remove pci device from addr cache
* @dev: device to remove
*
* Remove a device from the addr-cache tree.
* This is potentially expensive, since it will walk
* the tree multiple times (once per resource).
* But so what; device removal doesn't need to be that fast.
*/
void pci_addr_cache_remove_device(struct pci_dev *dev)
{
unsigned long flags;
spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
__pci_addr_cache_remove_device(dev);
spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
}
/**
* pci_addr_cache_build - Build a cache of I/O addresses
*
* Build a cache of pci i/o addresses. This cache will be used to
* find the pci device that corresponds to a given address.
* This routine scans all pci busses to build the cache.
* Must be run late in boot process, after the pci controllers
* have been scanned for devices (after all device resources are known).
*/
void __init pci_addr_cache_build(void)
{
struct device_node *dn;
struct eeh_dev *edev;
struct pci_dev *dev = NULL;
spin_lock_init(&pci_io_addr_cache_root.piar_lock);
for_each_pci_dev(dev) {
pci_addr_cache_insert_device(dev);
dn = pci_device_to_OF_node(dev);
if (!dn)
continue;
edev = of_node_to_eeh_dev(dn);
if (!edev)
continue;
pci_dev_get(dev); /* matching put is in eeh_remove_device() */
dev->dev.archdata.edev = edev;
edev->pdev = dev;
eeh_sysfs_add_device(dev);
}
#ifdef DEBUG
/* Verify tree built up above, echo back the list of addrs. */
pci_addr_cache_print(&pci_io_addr_cache_root);
#endif
}
| gpl-2.0 |
MattCrystal/glowing-happiness | drivers/message/fusion/mptbase.c | 4798 | 243053 | /*
* linux/drivers/message/fusion/mptbase.c
* This is the Fusion MPT base driver which supports multiple
* (SCSI + LAN) specialized protocol drivers.
* For use with LSI PCI chip/adapter(s)
* running LSI Fusion MPT (Message Passing Technology) firmware.
*
* Copyright (c) 1999-2008 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
NO WARRANTY
THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
solely responsible for determining the appropriateness of using and
distributing the Program and assumes all risks associated with its
exercise of rights under this Agreement, including but not limited to
the risks and costs of program errors, damage to or loss of data,
programs or equipment, and unavailability or interruption of operations.
DISCLAIMER OF LIABILITY
NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h> /* needed for in_interrupt() proto */
#include <linux/dma-mapping.h>
#include <asm/io.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
#include <linux/kthread.h>
#include <scsi/scsi_host.h>
#include "mptbase.h"
#include "lsi/mpi_log_fc.h"
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
#define my_NAME "Fusion MPT base driver"
#define my_VERSION MPT_LINUX_VERSION_COMMON
#define MYNAM "mptbase"
MODULE_AUTHOR(MODULEAUTHOR);
MODULE_DESCRIPTION(my_NAME);
MODULE_LICENSE("GPL");
MODULE_VERSION(my_VERSION);
/*
* cmd line parameters
*/
static int mpt_msi_enable_spi;
module_param(mpt_msi_enable_spi, int, 0);
MODULE_PARM_DESC(mpt_msi_enable_spi,
" Enable MSI Support for SPI controllers (default=0)");
static int mpt_msi_enable_fc;
module_param(mpt_msi_enable_fc, int, 0);
MODULE_PARM_DESC(mpt_msi_enable_fc,
" Enable MSI Support for FC controllers (default=0)");
static int mpt_msi_enable_sas;
module_param(mpt_msi_enable_sas, int, 0);
MODULE_PARM_DESC(mpt_msi_enable_sas,
" Enable MSI Support for SAS controllers (default=0)");
static int mpt_channel_mapping;
module_param(mpt_channel_mapping, int, 0);
MODULE_PARM_DESC(mpt_channel_mapping, " Mapping id's to channels (default=0)");
static int mpt_debug_level;
static int mpt_set_debug_level(const char *val, struct kernel_param *kp);
module_param_call(mpt_debug_level, mpt_set_debug_level, param_get_int,
&mpt_debug_level, 0600);
MODULE_PARM_DESC(mpt_debug_level,
" debug level - refer to mptdebug.h - (default=0)");
int mpt_fwfault_debug;
EXPORT_SYMBOL(mpt_fwfault_debug);
module_param(mpt_fwfault_debug, int, 0600);
MODULE_PARM_DESC(mpt_fwfault_debug,
"Enable detection of Firmware fault and halt Firmware on fault - (default=0)");
static char MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS]
[MPT_MAX_CALLBACKNAME_LEN+1];
#ifdef MFCNT
static int mfcounter = 0;
#define PRINT_MF_COUNT 20000
#endif
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Public data...
*/
#define WHOINIT_UNKNOWN 0xAA
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Private data...
*/
/* Adapter link list */
LIST_HEAD(ioc_list);
/* Callback lookup table */
static MPT_CALLBACK MptCallbacks[MPT_MAX_PROTOCOL_DRIVERS];
/* Protocol driver class lookup table */
static int MptDriverClass[MPT_MAX_PROTOCOL_DRIVERS];
/* Event handler lookup table */
static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
/* Reset handler lookup table */
static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *mpt_proc_root_dir;
#endif
/*
* Driver Callback Index's
*/
static u8 mpt_base_index = MPT_MAX_PROTOCOL_DRIVERS;
static u8 last_drv_idx;
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Forward protos...
*/
static irqreturn_t mpt_interrupt(int irq, void *bus_id);
static int mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
MPT_FRAME_HDR *reply);
static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes,
u32 *req, int replyBytes, u16 *u16reply, int maxwait,
int sleepFlag);
static int mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag);
static void mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev);
static void mpt_adapter_disable(MPT_ADAPTER *ioc);
static void mpt_adapter_dispose(MPT_ADAPTER *ioc);
static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc);
static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag);
static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason);
static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag);
static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag);
static int mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag);
static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag);
static int PrimeIocFifos(MPT_ADAPTER *ioc);
static int WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
static int WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
static int WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
static int GetLanConfigPages(MPT_ADAPTER *ioc);
static int GetIoUnitPage2(MPT_ADAPTER *ioc);
int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc);
static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch,
int sleepFlag);
static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
#ifdef CONFIG_PROC_FS
static const struct file_operations mpt_summary_proc_fops;
static const struct file_operations mpt_version_proc_fops;
static const struct file_operations mpt_iocinfo_proc_fops;
#endif
static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc);
static int ProcessEventNotification(MPT_ADAPTER *ioc,
EventNotificationReply_t *evReply, int *evHandlers);
static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info);
static void mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info , u8 cb_idx);
static int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc);
static void mpt_inactive_raid_list_free(MPT_ADAPTER *ioc);
/* module entry point */
static int __init fusion_init (void);
static void __exit fusion_exit (void);
#define CHIPREG_READ32(addr) readl_relaxed(addr)
#define CHIPREG_READ32_dmasync(addr) readl(addr)
#define CHIPREG_WRITE32(addr,val) writel(val, addr)
#define CHIPREG_PIO_WRITE32(addr,val) outl(val, (unsigned long)addr)
#define CHIPREG_PIO_READ32(addr) inl((unsigned long)addr)
static void
pci_disable_io_access(struct pci_dev *pdev)
{
u16 command_reg;
pci_read_config_word(pdev, PCI_COMMAND, &command_reg);
command_reg &= ~1;
pci_write_config_word(pdev, PCI_COMMAND, command_reg);
}
static void
pci_enable_io_access(struct pci_dev *pdev)
{
u16 command_reg;
pci_read_config_word(pdev, PCI_COMMAND, &command_reg);
command_reg |= 1;
pci_write_config_word(pdev, PCI_COMMAND, command_reg);
}
static int mpt_set_debug_level(const char *val, struct kernel_param *kp)
{
int ret = param_set_int(val, kp);
MPT_ADAPTER *ioc;
if (ret)
return ret;
list_for_each_entry(ioc, &ioc_list, list)
ioc->debug_level = mpt_debug_level;
return 0;
}
/**
* mpt_get_cb_idx - obtain cb_idx for registered driver
* @dclass: class driver enum
*
* Returns cb_idx, or zero means it wasn't found
**/
static u8
mpt_get_cb_idx(MPT_DRIVER_CLASS dclass)
{
u8 cb_idx;
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--)
if (MptDriverClass[cb_idx] == dclass)
return cb_idx;
return 0;
}
/**
* mpt_is_discovery_complete - determine if discovery has completed
* @ioc: per adatper instance
*
* Returns 1 when discovery completed, else zero.
*/
static int
mpt_is_discovery_complete(MPT_ADAPTER *ioc)
{
ConfigExtendedPageHeader_t hdr;
CONFIGPARMS cfg;
SasIOUnitPage0_t *buffer;
dma_addr_t dma_handle;
int rc = 0;
memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
memset(&cfg, 0, sizeof(CONFIGPARMS));
hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
cfg.cfghdr.ehdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
if ((mpt_config(ioc, &cfg)))
goto out;
if (!hdr.ExtPageLength)
goto out;
buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
&dma_handle);
if (!buffer)
goto out;
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if ((mpt_config(ioc, &cfg)))
goto out_free_consistent;
if (!(buffer->PhyData[0].PortFlags &
MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS))
rc = 1;
out_free_consistent:
pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
buffer, dma_handle);
out:
return rc;
}
/**
* mpt_remove_dead_ioc_func - kthread context to remove dead ioc
* @arg: input argument, used to derive ioc
*
* Return 0 if controller is removed from pci subsystem.
* Return -1 for other case.
*/
static int mpt_remove_dead_ioc_func(void *arg)
{
MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
struct pci_dev *pdev;
if ((ioc == NULL))
return -1;
pdev = ioc->pcidev;
if ((pdev == NULL))
return -1;
pci_stop_and_remove_bus_device(pdev);
return 0;
}
/**
* mpt_fault_reset_work - work performed on workq after ioc fault
* @work: input argument, used to derive ioc
*
**/
static void
mpt_fault_reset_work(struct work_struct *work)
{
MPT_ADAPTER *ioc =
container_of(work, MPT_ADAPTER, fault_reset_work.work);
u32 ioc_raw_state;
int rc;
unsigned long flags;
MPT_SCSI_HOST *hd;
struct task_struct *p;
if (ioc->ioc_reset_in_progress || !ioc->active)
goto out;
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_MASK) {
printk(MYIOC_s_INFO_FMT "%s: IOC is non-operational !!!!\n",
ioc->name, __func__);
/*
* Call mptscsih_flush_pending_cmds callback so that we
* flush all pending commands back to OS.
* This call is required to aovid deadlock at block layer.
* Dead IOC will fail to do diag reset,and this call is safe
* since dead ioc will never return any command back from HW.
*/
hd = shost_priv(ioc->sh);
ioc->schedule_dead_ioc_flush_running_cmds(hd);
/*Remove the Dead Host */
p = kthread_run(mpt_remove_dead_ioc_func, ioc,
"mpt_dead_ioc_%d", ioc->id);
if (IS_ERR(p)) {
printk(MYIOC_s_ERR_FMT
"%s: Running mpt_dead_ioc thread failed !\n",
ioc->name, __func__);
} else {
printk(MYIOC_s_WARN_FMT
"%s: Running mpt_dead_ioc thread success !\n",
ioc->name, __func__);
}
return; /* don't rearm timer */
}
if ((ioc_raw_state & MPI_IOC_STATE_MASK)
== MPI_IOC_STATE_FAULT) {
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n",
ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
ioc->name, __func__);
rc = mpt_HardResetHandler(ioc, CAN_SLEEP);
printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name,
__func__, (rc == 0) ? "success" : "failed");
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT)
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
"reset (%04xh)\n", ioc->name, ioc_raw_state &
MPI_DOORBELL_DATA_MASK);
} else if (ioc->bus_type == SAS && ioc->sas_discovery_quiesce_io) {
if ((mpt_is_discovery_complete(ioc))) {
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "clearing "
"discovery_quiesce_io flag\n", ioc->name));
ioc->sas_discovery_quiesce_io = 0;
}
}
out:
/*
* Take turns polling alternate controller
*/
if (ioc->alt_ioc)
ioc = ioc->alt_ioc;
/* rearm the timer */
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->reset_work_q)
queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
msecs_to_jiffies(MPT_POLLING_INTERVAL));
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
}
/*
* Process turbo (context) reply...
*/
static void
mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa)
{
MPT_FRAME_HDR *mf = NULL;
MPT_FRAME_HDR *mr = NULL;
u16 req_idx = 0;
u8 cb_idx;
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got TURBO reply req_idx=%08x\n",
ioc->name, pa));
switch (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT) {
case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
req_idx = pa & 0x0000FFFF;
cb_idx = (pa & 0x00FF0000) >> 16;
mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
break;
case MPI_CONTEXT_REPLY_TYPE_LAN:
cb_idx = mpt_get_cb_idx(MPTLAN_DRIVER);
/*
* Blind set of mf to NULL here was fatal
* after lan_reply says "freeme"
* Fix sort of combined with an optimization here;
* added explicit check for case where lan_reply
* was just returning 1 and doing nothing else.
* For this case skip the callback, but set up
* proper mf value first here:-)
*/
if ((pa & 0x58000000) == 0x58000000) {
req_idx = pa & 0x0000FFFF;
mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
mpt_free_msg_frame(ioc, mf);
mb();
return;
break;
}
mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
break;
case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
cb_idx = mpt_get_cb_idx(MPTSTM_DRIVER);
mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
break;
default:
cb_idx = 0;
BUG();
}
/* Check for (valid) IO callback! */
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
MptCallbacks[cb_idx] == NULL) {
printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
__func__, ioc->name, cb_idx);
goto out;
}
if (MptCallbacks[cb_idx](ioc, mf, mr))
mpt_free_msg_frame(ioc, mf);
out:
mb();
}
static void
mpt_reply(MPT_ADAPTER *ioc, u32 pa)
{
MPT_FRAME_HDR *mf;
MPT_FRAME_HDR *mr;
u16 req_idx;
u8 cb_idx;
int freeme;
u32 reply_dma_low;
u16 ioc_stat;
/* non-TURBO reply! Hmmm, something may be up...
* Newest turbo reply mechanism; get address
* via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
*/
/* Map DMA address of reply header to cpu address.
* pa is 32 bits - but the dma address may be 32 or 64 bits
* get offset based only only the low addresses
*/
reply_dma_low = (pa <<= 1);
mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
(reply_dma_low - ioc->reply_frames_low_dma));
req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
DBG_DUMP_REPLY_FRAME(ioc, (u32 *)mr);
/* Check/log IOC log info
*/
ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
if (ioc->bus_type == FC)
mpt_fc_log_info(ioc, log_info);
else if (ioc->bus_type == SPI)
mpt_spi_log_info(ioc, log_info);
else if (ioc->bus_type == SAS)
mpt_sas_log_info(ioc, log_info, cb_idx);
}
if (ioc_stat & MPI_IOCSTATUS_MASK)
mpt_iocstatus_info(ioc, (u32)ioc_stat, mf);
/* Check for (valid) IO callback! */
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
MptCallbacks[cb_idx] == NULL) {
printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
__func__, ioc->name, cb_idx);
freeme = 0;
goto out;
}
freeme = MptCallbacks[cb_idx](ioc, mf, mr);
out:
/* Flush (non-TURBO) reply with a WRITE! */
CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
if (freeme)
mpt_free_msg_frame(ioc, mf);
mb();
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_interrupt - MPT adapter (IOC) specific interrupt handler.
* @irq: irq number (not used)
* @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
*
* This routine is registered via the request_irq() kernel API call,
* and handles all interrupts generated from a specific MPT adapter
* (also referred to as a IO Controller or IOC).
* This routine must clear the interrupt from the adapter and does
* so by reading the reply FIFO. Multiple replies may be processed
* per single call to this routine.
*
* This routine handles register-level access of the adapter but
* dispatches (calls) a protocol-specific callback routine to handle
* the protocol-specific details of the MPT request completion.
*/
static irqreturn_t
mpt_interrupt(int irq, void *bus_id)
{
MPT_ADAPTER *ioc = bus_id;
u32 pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
if (pa == 0xFFFFFFFF)
return IRQ_NONE;
/*
* Drain the reply FIFO!
*/
do {
if (pa & MPI_ADDRESS_REPLY_A_BIT)
mpt_reply(ioc, pa);
else
mpt_turbo_reply(ioc, pa);
pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
} while (pa != 0xFFFFFFFF);
return IRQ_HANDLED;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptbase_reply - MPT base driver's callback routine
* @ioc: Pointer to MPT_ADAPTER structure
* @req: Pointer to original MPT request frame
* @reply: Pointer to MPT reply frame (NULL if TurboReply)
*
* MPT base driver's callback routine; all base driver
* "internal" request/reply processing is routed here.
* Currently used for EventNotification and EventAck handling.
*
* Returns 1 indicating original alloc'd request frame ptr
* should be freed, or 0 if it shouldn't.
*/
static int
mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
{
EventNotificationReply_t *pEventReply;
u8 event;
int evHandlers;
int freereq = 1;
switch (reply->u.hdr.Function) {
case MPI_FUNCTION_EVENT_NOTIFICATION:
pEventReply = (EventNotificationReply_t *)reply;
evHandlers = 0;
ProcessEventNotification(ioc, pEventReply, &evHandlers);
event = le32_to_cpu(pEventReply->Event) & 0xFF;
if (pEventReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)
freereq = 0;
if (event != MPI_EVENT_EVENT_CHANGE)
break;
case MPI_FUNCTION_CONFIG:
case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
if (reply) {
ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
memcpy(ioc->mptbase_cmds.reply, reply,
min(MPT_DEFAULT_FRAME_SIZE,
4 * reply->u.reply.MsgLength));
}
if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
ioc->mptbase_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
complete(&ioc->mptbase_cmds.done);
} else
freereq = 0;
if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_FREE_MF)
freereq = 1;
break;
case MPI_FUNCTION_EVENT_ACK:
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"EventAck reply received\n", ioc->name));
break;
default:
printk(MYIOC_s_ERR_FMT
"Unexpected msg function (=%02Xh) reply received!\n",
ioc->name, reply->u.hdr.Function);
break;
}
/*
* Conditionally tell caller to free the original
* EventNotification/EventAck/unexpected request frame!
*/
return freereq;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_register - Register protocol-specific main callback handler.
* @cbfunc: callback function pointer
* @dclass: Protocol driver's class (%MPT_DRIVER_CLASS enum value)
* @func_name: call function's name
*
* This routine is called by a protocol-specific driver (SCSI host,
* LAN, SCSI target) to register its reply callback routine. Each
* protocol-specific driver must do this before it will be able to
* use any IOC resources, such as obtaining request frames.
*
* NOTES: The SCSI protocol driver currently calls this routine thrice
* in order to register separate callbacks; one for "normal" SCSI IO;
* one for MptScsiTaskMgmt requests; one for Scan/DV requests.
*
* Returns u8 valued "handle" in the range (and S.O.D. order)
* {N,...,7,6,5,...,1} if successful.
* A return value of MPT_MAX_PROTOCOL_DRIVERS (including zero!) should be
* considered an error by the caller.
*/
u8
mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass, char *func_name)
{
u8 cb_idx;
last_drv_idx = MPT_MAX_PROTOCOL_DRIVERS;
/*
* Search for empty callback slot in this order: {N,...,7,6,5,...,1}
* (slot/handle 0 is reserved!)
*/
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptCallbacks[cb_idx] == NULL) {
MptCallbacks[cb_idx] = cbfunc;
MptDriverClass[cb_idx] = dclass;
MptEvHandlers[cb_idx] = NULL;
last_drv_idx = cb_idx;
strlcpy(MptCallbacksName[cb_idx], func_name,
MPT_MAX_CALLBACKNAME_LEN+1);
break;
}
}
return last_drv_idx;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_deregister - Deregister a protocol drivers resources.
* @cb_idx: previously registered callback handle
*
* Each protocol-specific driver should call this routine when its
* module is unloaded.
*/
void
mpt_deregister(u8 cb_idx)
{
if (cb_idx && (cb_idx < MPT_MAX_PROTOCOL_DRIVERS)) {
MptCallbacks[cb_idx] = NULL;
MptDriverClass[cb_idx] = MPTUNKNOWN_DRIVER;
MptEvHandlers[cb_idx] = NULL;
last_drv_idx++;
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_event_register - Register protocol-specific event callback handler.
* @cb_idx: previously registered (via mpt_register) callback handle
* @ev_cbfunc: callback function
*
* This routine can be called by one or more protocol-specific drivers
* if/when they choose to be notified of MPT events.
*
* Returns 0 for success.
*/
int
mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc)
{
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return -1;
MptEvHandlers[cb_idx] = ev_cbfunc;
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_event_deregister - Deregister protocol-specific event callback handler
* @cb_idx: previously registered callback handle
*
* Each protocol-specific driver should call this routine
* when it does not (or can no longer) handle events,
* or when its module is unloaded.
*/
void
mpt_event_deregister(u8 cb_idx)
{
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return;
MptEvHandlers[cb_idx] = NULL;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_reset_register - Register protocol-specific IOC reset handler.
* @cb_idx: previously registered (via mpt_register) callback handle
* @reset_func: reset function
*
* This routine can be called by one or more protocol-specific drivers
* if/when they choose to be notified of IOC resets.
*
* Returns 0 for success.
*/
int
mpt_reset_register(u8 cb_idx, MPT_RESETHANDLER reset_func)
{
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return -1;
MptResetHandlers[cb_idx] = reset_func;
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_reset_deregister - Deregister protocol-specific IOC reset handler.
* @cb_idx: previously registered callback handle
*
* Each protocol-specific driver should call this routine
* when it does not (or can no longer) handle IOC reset handling,
* or when its module is unloaded.
*/
void
mpt_reset_deregister(u8 cb_idx)
{
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return;
MptResetHandlers[cb_idx] = NULL;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_device_driver_register - Register device driver hooks
* @dd_cbfunc: driver callbacks struct
* @cb_idx: MPT protocol driver index
*/
int
mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, u8 cb_idx)
{
MPT_ADAPTER *ioc;
const struct pci_device_id *id;
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return -EINVAL;
MptDeviceDriverHandlers[cb_idx] = dd_cbfunc;
/* call per pci device probe entry point */
list_for_each_entry(ioc, &ioc_list, list) {
id = ioc->pcidev->driver ?
ioc->pcidev->driver->id_table : NULL;
if (dd_cbfunc->probe)
dd_cbfunc->probe(ioc->pcidev, id);
}
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_device_driver_deregister - DeRegister device driver hooks
* @cb_idx: MPT protocol driver index
*/
void
mpt_device_driver_deregister(u8 cb_idx)
{
struct mpt_pci_driver *dd_cbfunc;
MPT_ADAPTER *ioc;
if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
return;
dd_cbfunc = MptDeviceDriverHandlers[cb_idx];
list_for_each_entry(ioc, &ioc_list, list) {
if (dd_cbfunc->remove)
dd_cbfunc->remove(ioc->pcidev);
}
MptDeviceDriverHandlers[cb_idx] = NULL;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_get_msg_frame - Obtain an MPT request frame from the pool
* @cb_idx: Handle of registered MPT protocol driver
* @ioc: Pointer to MPT adapter structure
*
* Obtain an MPT request frame from the pool (of 1024) that are
* allocated per MPT adapter.
*
* Returns pointer to a MPT request frame or %NULL if none are available
* or IOC is not active.
*/
MPT_FRAME_HDR*
mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc)
{
MPT_FRAME_HDR *mf;
unsigned long flags;
u16 req_idx; /* Request index */
/* validate handle and ioc identifier */
#ifdef MFCNT
if (!ioc->active)
printk(MYIOC_s_WARN_FMT "IOC Not Active! mpt_get_msg_frame "
"returning NULL!\n", ioc->name);
#endif
/* If interrupts are not attached, do not return a request frame */
if (!ioc->active)
return NULL;
spin_lock_irqsave(&ioc->FreeQlock, flags);
if (!list_empty(&ioc->FreeQ)) {
int req_offset;
mf = list_entry(ioc->FreeQ.next, MPT_FRAME_HDR,
u.frame.linkage.list);
list_del(&mf->u.frame.linkage.list);
mf->u.frame.linkage.arg1 = 0;
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
/* u16! */
req_idx = req_offset / ioc->req_sz;
mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
/* Default, will be changed if necessary in SG generation */
ioc->RequestNB[req_idx] = ioc->NB_for_64_byte_frame;
#ifdef MFCNT
ioc->mfcnt++;
#endif
}
else
mf = NULL;
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
#ifdef MFCNT
if (mf == NULL)
printk(MYIOC_s_WARN_FMT "IOC Active. No free Msg Frames! "
"Count 0x%x Max 0x%x\n", ioc->name, ioc->mfcnt,
ioc->req_depth);
mfcounter++;
if (mfcounter == PRINT_MF_COUNT)
printk(MYIOC_s_INFO_FMT "MF Count 0x%x Max 0x%x \n", ioc->name,
ioc->mfcnt, ioc->req_depth);
#endif
dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_get_msg_frame(%d,%d), got mf=%p\n",
ioc->name, cb_idx, ioc->id, mf));
return mf;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_put_msg_frame - Send a protocol-specific MPT request frame to an IOC
* @cb_idx: Handle of registered MPT protocol driver
* @ioc: Pointer to MPT adapter structure
* @mf: Pointer to MPT request frame
*
* This routine posts an MPT request frame to the request post FIFO of a
* specific MPT adapter.
*/
void
mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
u32 mf_dma_addr;
int req_offset;
u16 req_idx; /* Request index */
/* ensure values are reset properly! */
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
/* u16! */
req_idx = req_offset / ioc->req_sz;
mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
DBG_DUMP_PUT_MSG_FRAME(ioc, (u32 *)mf);
mf_dma_addr = (ioc->req_frames_low_dma + req_offset) | ioc->RequestNB[req_idx];
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mf_dma_addr=%x req_idx=%d "
"RequestNB=%x\n", ioc->name, mf_dma_addr, req_idx,
ioc->RequestNB[req_idx]));
CHIPREG_WRITE32(&ioc->chip->RequestFifo, mf_dma_addr);
}
/**
* mpt_put_msg_frame_hi_pri - Send a hi-pri protocol-specific MPT request frame
* @cb_idx: Handle of registered MPT protocol driver
* @ioc: Pointer to MPT adapter structure
* @mf: Pointer to MPT request frame
*
* Send a protocol-specific MPT request frame to an IOC using
* hi-priority request queue.
*
* This routine posts an MPT request frame to the request post FIFO of a
* specific MPT adapter.
**/
void
mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
u32 mf_dma_addr;
int req_offset;
u16 req_idx; /* Request index */
/* ensure values are reset properly! */
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
req_idx = req_offset / ioc->req_sz;
mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
DBG_DUMP_PUT_MSG_FRAME(ioc, (u32 *)mf);
mf_dma_addr = (ioc->req_frames_low_dma + req_offset);
dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mf_dma_addr=%x req_idx=%d\n",
ioc->name, mf_dma_addr, req_idx));
CHIPREG_WRITE32(&ioc->chip->RequestHiPriFifo, mf_dma_addr);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_free_msg_frame - Place MPT request frame back on FreeQ.
* @ioc: Pointer to MPT adapter structure
* @mf: Pointer to MPT request frame
*
* This routine places a MPT request frame back on the MPT adapter's
* FreeQ.
*/
void
mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
unsigned long flags;
/* Put Request back on FreeQ! */
spin_lock_irqsave(&ioc->FreeQlock, flags);
if (cpu_to_le32(mf->u.frame.linkage.arg1) == 0xdeadbeaf)
goto out;
/* signature to know if this mf is freed */
mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf);
list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
#ifdef MFCNT
ioc->mfcnt--;
#endif
out:
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_add_sge - Place a simple 32 bit SGE at address pAddr.
* @pAddr: virtual address for SGE
* @flagslength: SGE flags and data transfer length
* @dma_addr: Physical address
*
* This routine places a MPT request frame back on the MPT adapter's
* FreeQ.
*/
static void
mpt_add_sge(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
{
SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
pSge->FlagsLength = cpu_to_le32(flagslength);
pSge->Address = cpu_to_le32(dma_addr);
}
/**
* mpt_add_sge_64bit - Place a simple 64 bit SGE at address pAddr.
* @pAddr: virtual address for SGE
* @flagslength: SGE flags and data transfer length
* @dma_addr: Physical address
*
* This routine places a MPT request frame back on the MPT adapter's
* FreeQ.
**/
static void
mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
{
SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
pSge->Address.Low = cpu_to_le32
(lower_32_bits(dma_addr));
pSge->Address.High = cpu_to_le32
(upper_32_bits(dma_addr));
pSge->FlagsLength = cpu_to_le32
((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
}
/**
* mpt_add_sge_64bit_1078 - Place a simple 64 bit SGE at address pAddr (1078 workaround).
* @pAddr: virtual address for SGE
* @flagslength: SGE flags and data transfer length
* @dma_addr: Physical address
*
* This routine places a MPT request frame back on the MPT adapter's
* FreeQ.
**/
static void
mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
{
SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
u32 tmp;
pSge->Address.Low = cpu_to_le32
(lower_32_bits(dma_addr));
tmp = (u32)(upper_32_bits(dma_addr));
/*
* 1078 errata workaround for the 36GB limitation
*/
if ((((u64)dma_addr + MPI_SGE_LENGTH(flagslength)) >> 32) == 9) {
flagslength |=
MPI_SGE_SET_FLAGS(MPI_SGE_FLAGS_LOCAL_ADDRESS);
tmp |= (1<<31);
if (mpt_debug_level & MPT_DEBUG_36GB_MEM)
printk(KERN_DEBUG "1078 P0M2 addressing for "
"addr = 0x%llx len = %d\n",
(unsigned long long)dma_addr,
MPI_SGE_LENGTH(flagslength));
}
pSge->Address.High = cpu_to_le32(tmp);
pSge->FlagsLength = cpu_to_le32(
(flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_add_chain - Place a 32 bit chain SGE at address pAddr.
* @pAddr: virtual address for SGE
* @next: nextChainOffset value (u32's)
* @length: length of next SGL segment
* @dma_addr: Physical address
*
*/
static void
mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
{
SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
pChain->Length = cpu_to_le16(length);
pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
pChain->NextChainOffset = next;
pChain->Address = cpu_to_le32(dma_addr);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_add_chain_64bit - Place a 64 bit chain SGE at address pAddr.
* @pAddr: virtual address for SGE
* @next: nextChainOffset value (u32's)
* @length: length of next SGL segment
* @dma_addr: Physical address
*
*/
static void
mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
{
SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
u32 tmp = dma_addr & 0xFFFFFFFF;
pChain->Length = cpu_to_le16(length);
pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
MPI_SGE_FLAGS_64_BIT_ADDRESSING);
pChain->NextChainOffset = next;
pChain->Address.Low = cpu_to_le32(tmp);
tmp = (u32)(upper_32_bits(dma_addr));
pChain->Address.High = cpu_to_le32(tmp);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_send_handshake_request - Send MPT request via doorbell handshake method.
* @cb_idx: Handle of registered MPT protocol driver
* @ioc: Pointer to MPT adapter structure
* @reqBytes: Size of the request in bytes
* @req: Pointer to MPT request frame
* @sleepFlag: Use schedule if CAN_SLEEP else use udelay.
*
* This routine is used exclusively to send MptScsiTaskMgmt
* requests since they are required to be sent via doorbell handshake.
*
* NOTE: It is the callers responsibility to byte-swap fields in the
* request which are greater than 1 byte in size.
*
* Returns 0 for success, non-zero for failure.
*/
int
mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag)
{
int r = 0;
u8 *req_as_bytes;
int ii;
/* State is known to be good upon entering
* this function so issue the bus reset
* request.
*/
/*
* Emulate what mpt_put_msg_frame() does /wrt to sanity
* setting cb_idx/req_idx. But ONLY if this request
* is in proper (pre-alloc'd) request buffer range...
*/
ii = MFPTR_2_MPT_INDEX(ioc,(MPT_FRAME_HDR*)req);
if (reqBytes >= 12 && ii >= 0 && ii < ioc->req_depth) {
MPT_FRAME_HDR *mf = (MPT_FRAME_HDR*)req;
mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(ii);
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
}
/* Make sure there are no doorbells */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
CHIPREG_WRITE32(&ioc->chip->Doorbell,
((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) |
((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT)));
/* Wait for IOC doorbell int */
if ((ii = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0) {
return ii;
}
/* Read doorbell and check for active bit */
if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE))
return -5;
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_send_handshake_request start, WaitCnt=%d\n",
ioc->name, ii));
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
return -2;
}
/* Send request via doorbell handshake */
req_as_bytes = (u8 *) req;
for (ii = 0; ii < reqBytes/4; ii++) {
u32 word;
word = ((req_as_bytes[(ii*4) + 0] << 0) |
(req_as_bytes[(ii*4) + 1] << 8) |
(req_as_bytes[(ii*4) + 2] << 16) |
(req_as_bytes[(ii*4) + 3] << 24));
CHIPREG_WRITE32(&ioc->chip->Doorbell, word);
if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
r = -3;
break;
}
}
if (r >= 0 && WaitForDoorbellInt(ioc, 10, sleepFlag) >= 0)
r = 0;
else
r = -4;
/* Make sure there are no doorbells */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
return r;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_host_page_access_control - control the IOC's Host Page Buffer access
* @ioc: Pointer to MPT adapter structure
* @access_control_value: define bits below
* @sleepFlag: Specifies whether the process can sleep
*
* Provides mechanism for the host driver to control the IOC's
* Host Page Buffer access.
*
* Access Control Value - bits[15:12]
* 0h Reserved
* 1h Enable Access { MPI_DB_HPBAC_ENABLE_ACCESS }
* 2h Disable Access { MPI_DB_HPBAC_DISABLE_ACCESS }
* 3h Free Buffer { MPI_DB_HPBAC_FREE_BUFFER }
*
* Returns 0 for success, non-zero for failure.
*/
static int
mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag)
{
int r = 0;
/* return if in use */
if (CHIPREG_READ32(&ioc->chip->Doorbell)
& MPI_DOORBELL_ACTIVE)
return -1;
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
CHIPREG_WRITE32(&ioc->chip->Doorbell,
((MPI_FUNCTION_HOST_PAGEBUF_ACCESS_CONTROL
<<MPI_DOORBELL_FUNCTION_SHIFT) |
(access_control_value<<12)));
/* Wait for IOC to clear Doorbell Status bit */
if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
return -2;
}else
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_host_page_alloc - allocate system memory for the fw
* @ioc: Pointer to pointer to IOC adapter
* @ioc_init: Pointer to ioc init config page
*
* If we already allocated memory in past, then resend the same pointer.
* Returns 0 for success, non-zero for failure.
*/
static int
mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
{
char *psge;
int flags_length;
u32 host_page_buffer_sz=0;
if(!ioc->HostPageBuffer) {
host_page_buffer_sz =
le32_to_cpu(ioc->facts.HostPageBufferSGE.FlagsLength) & 0xFFFFFF;
if(!host_page_buffer_sz)
return 0; /* fw doesn't need any host buffers */
/* spin till we get enough memory */
while(host_page_buffer_sz > 0) {
if((ioc->HostPageBuffer = pci_alloc_consistent(
ioc->pcidev,
host_page_buffer_sz,
&ioc->HostPageBuffer_dma)) != NULL) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
ioc->name, ioc->HostPageBuffer,
(u32)ioc->HostPageBuffer_dma,
host_page_buffer_sz));
ioc->alloc_total += host_page_buffer_sz;
ioc->HostPageBuffer_sz = host_page_buffer_sz;
break;
}
host_page_buffer_sz -= (4*1024);
}
}
if(!ioc->HostPageBuffer) {
printk(MYIOC_s_ERR_FMT
"Failed to alloc memory for host_page_buffer!\n",
ioc->name);
return -999;
}
psge = (char *)&ioc_init->HostPageBufferSGE;
flags_length = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
MPI_SGE_FLAGS_SYSTEM_ADDRESS |
MPI_SGE_FLAGS_HOST_TO_IOC |
MPI_SGE_FLAGS_END_OF_BUFFER;
flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
flags_length |= ioc->HostPageBuffer_sz;
ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_verify_adapter - Given IOC identifier, set pointer to its adapter structure.
* @iocid: IOC unique identifier (integer)
* @iocpp: Pointer to pointer to IOC adapter
*
* Given a unique IOC identifier, set pointer to the associated MPT
* adapter structure.
*
* Returns iocid and sets iocpp if iocid is found.
* Returns -1 if iocid is not found.
*/
int
mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
{
MPT_ADAPTER *ioc;
list_for_each_entry(ioc,&ioc_list,list) {
if (ioc->id == iocid) {
*iocpp =ioc;
return iocid;
}
}
*iocpp = NULL;
return -1;
}
/**
* mpt_get_product_name - returns product string
* @vendor: pci vendor id
* @device: pci device id
* @revision: pci revision id
* @prod_name: string returned
*
* Returns product string displayed when driver loads,
* in /proc/mpt/summary and /sysfs/class/scsi_host/host<X>/version_product
*
**/
static void
mpt_get_product_name(u16 vendor, u16 device, u8 revision, char *prod_name)
{
char *product_str = NULL;
if (vendor == PCI_VENDOR_ID_BROCADE) {
switch (device)
{
case MPI_MANUFACTPAGE_DEVICEID_FC949E:
switch (revision)
{
case 0x00:
product_str = "BRE040 A0";
break;
case 0x01:
product_str = "BRE040 A1";
break;
default:
product_str = "BRE040";
break;
}
break;
}
goto out;
}
switch (device)
{
case MPI_MANUFACTPAGE_DEVICEID_FC909:
product_str = "LSIFC909 B1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC919:
product_str = "LSIFC919 B0";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC929:
product_str = "LSIFC929 B0";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC919X:
if (revision < 0x80)
product_str = "LSIFC919X A0";
else
product_str = "LSIFC919XL A1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC929X:
if (revision < 0x80)
product_str = "LSIFC929X A0";
else
product_str = "LSIFC929XL A1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC939X:
product_str = "LSIFC939X A1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC949X:
product_str = "LSIFC949X A1";
break;
case MPI_MANUFACTPAGE_DEVICEID_FC949E:
switch (revision)
{
case 0x00:
product_str = "LSIFC949E A0";
break;
case 0x01:
product_str = "LSIFC949E A1";
break;
default:
product_str = "LSIFC949E";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_53C1030:
switch (revision)
{
case 0x00:
product_str = "LSI53C1030 A0";
break;
case 0x01:
product_str = "LSI53C1030 B0";
break;
case 0x03:
product_str = "LSI53C1030 B1";
break;
case 0x07:
product_str = "LSI53C1030 B2";
break;
case 0x08:
product_str = "LSI53C1030 C0";
break;
case 0x80:
product_str = "LSI53C1030T A0";
break;
case 0x83:
product_str = "LSI53C1030T A2";
break;
case 0x87:
product_str = "LSI53C1030T A3";
break;
case 0xc1:
product_str = "LSI53C1020A A1";
break;
default:
product_str = "LSI53C1030";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
switch (revision)
{
case 0x03:
product_str = "LSI53C1035 A2";
break;
case 0x04:
product_str = "LSI53C1035 B0";
break;
default:
product_str = "LSI53C1035";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1064:
switch (revision)
{
case 0x00:
product_str = "LSISAS1064 A1";
break;
case 0x01:
product_str = "LSISAS1064 A2";
break;
case 0x02:
product_str = "LSISAS1064 A3";
break;
case 0x03:
product_str = "LSISAS1064 A4";
break;
default:
product_str = "LSISAS1064";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1064E:
switch (revision)
{
case 0x00:
product_str = "LSISAS1064E A0";
break;
case 0x01:
product_str = "LSISAS1064E B0";
break;
case 0x02:
product_str = "LSISAS1064E B1";
break;
case 0x04:
product_str = "LSISAS1064E B2";
break;
case 0x08:
product_str = "LSISAS1064E B3";
break;
default:
product_str = "LSISAS1064E";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1068:
switch (revision)
{
case 0x00:
product_str = "LSISAS1068 A0";
break;
case 0x01:
product_str = "LSISAS1068 B0";
break;
case 0x02:
product_str = "LSISAS1068 B1";
break;
default:
product_str = "LSISAS1068";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1068E:
switch (revision)
{
case 0x00:
product_str = "LSISAS1068E A0";
break;
case 0x01:
product_str = "LSISAS1068E B0";
break;
case 0x02:
product_str = "LSISAS1068E B1";
break;
case 0x04:
product_str = "LSISAS1068E B2";
break;
case 0x08:
product_str = "LSISAS1068E B3";
break;
default:
product_str = "LSISAS1068E";
break;
}
break;
case MPI_MANUFACTPAGE_DEVID_SAS1078:
switch (revision)
{
case 0x00:
product_str = "LSISAS1078 A0";
break;
case 0x01:
product_str = "LSISAS1078 B0";
break;
case 0x02:
product_str = "LSISAS1078 C0";
break;
case 0x03:
product_str = "LSISAS1078 C1";
break;
case 0x04:
product_str = "LSISAS1078 C2";
break;
default:
product_str = "LSISAS1078";
break;
}
break;
}
out:
if (product_str)
sprintf(prod_name, "%s", product_str);
}
/**
* mpt_mapresources - map in memory mapped io
* @ioc: Pointer to pointer to IOC adapter
*
**/
static int
mpt_mapresources(MPT_ADAPTER *ioc)
{
u8 __iomem *mem;
int ii;
resource_size_t mem_phys;
unsigned long port;
u32 msize;
u32 psize;
u8 revision;
int r = -ENODEV;
struct pci_dev *pdev;
pdev = ioc->pcidev;
ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
if (pci_enable_device_mem(pdev)) {
printk(MYIOC_s_ERR_FMT "pci_enable_device_mem() "
"failed\n", ioc->name);
return r;
}
if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) {
printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with "
"MEM failed\n", ioc->name);
return r;
}
pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
if (sizeof(dma_addr_t) > 4) {
const uint64_t required_mask = dma_get_required_mask
(&pdev->dev);
if (required_mask > DMA_BIT_MASK(32)
&& !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
&& !pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(64))) {
ioc->dma_mask = DMA_BIT_MASK(64);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
ioc->name));
} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
&& !pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32))) {
ioc->dma_mask = DMA_BIT_MASK(32);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
ioc->name));
} else {
printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
ioc->name, pci_name(pdev));
pci_release_selected_regions(pdev, ioc->bars);
return r;
}
} else {
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
&& !pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32))) {
ioc->dma_mask = DMA_BIT_MASK(32);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
ioc->name));
} else {
printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
ioc->name, pci_name(pdev));
pci_release_selected_regions(pdev, ioc->bars);
return r;
}
}
mem_phys = msize = 0;
port = psize = 0;
for (ii = 0; ii < DEVICE_COUNT_RESOURCE; ii++) {
if (pci_resource_flags(pdev, ii) & PCI_BASE_ADDRESS_SPACE_IO) {
if (psize)
continue;
/* Get I/O space! */
port = pci_resource_start(pdev, ii);
psize = pci_resource_len(pdev, ii);
} else {
if (msize)
continue;
/* Get memmap */
mem_phys = pci_resource_start(pdev, ii);
msize = pci_resource_len(pdev, ii);
}
}
ioc->mem_size = msize;
mem = NULL;
/* Get logical ptr for PciMem0 space */
/*mem = ioremap(mem_phys, msize);*/
mem = ioremap(mem_phys, msize);
if (mem == NULL) {
printk(MYIOC_s_ERR_FMT ": ERROR - Unable to map adapter"
" memory!\n", ioc->name);
pci_release_selected_regions(pdev, ioc->bars);
return -EINVAL;
}
ioc->memmap = mem;
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
ioc->name, mem, (unsigned long long)mem_phys));
ioc->mem_phys = mem_phys;
ioc->chip = (SYSIF_REGS __iomem *)mem;
/* Save Port IO values in case we need to do downloadboot */
ioc->pio_mem_phys = port;
ioc->pio_chip = (SYSIF_REGS __iomem *)port;
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_attach - Install a PCI intelligent MPT adapter.
* @pdev: Pointer to pci_dev structure
* @id: PCI device ID information
*
* This routine performs all the steps necessary to bring the IOC of
* a MPT adapter to a OPERATIONAL state. This includes registering
* memory regions, registering the interrupt, and allocating request
* and reply memory pools.
*
* This routine also pre-fetches the LAN MAC address of a Fibre Channel
* MPT adapter.
*
* Returns 0 for success, non-zero for failure.
*
* TODO: Add support for polled controllers
*/
int
mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
{
MPT_ADAPTER *ioc;
u8 cb_idx;
int r = -ENODEV;
u8 revision;
u8 pcixcmd;
static int mpt_ids = 0;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *dent;
#endif
ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC);
if (ioc == NULL) {
printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n");
return -ENOMEM;
}
ioc->id = mpt_ids++;
sprintf(ioc->name, "ioc%d", ioc->id);
dinitprintk(ioc, printk(KERN_WARNING MYNAM ": mpt_adapter_install\n"));
/*
* set initial debug level
* (refer to mptdebug.h)
*
*/
ioc->debug_level = mpt_debug_level;
if (mpt_debug_level)
printk(KERN_INFO "mpt_debug_level=%xh\n", mpt_debug_level);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": mpt_adapter_install\n", ioc->name));
ioc->pcidev = pdev;
if (mpt_mapresources(ioc)) {
kfree(ioc);
return r;
}
/*
* Setting up proper handlers for scatter gather handling
*/
if (ioc->dma_mask == DMA_BIT_MASK(64)) {
if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
ioc->add_sge = &mpt_add_sge_64bit_1078;
else
ioc->add_sge = &mpt_add_sge_64bit;
ioc->add_chain = &mpt_add_chain_64bit;
ioc->sg_addr_size = 8;
} else {
ioc->add_sge = &mpt_add_sge;
ioc->add_chain = &mpt_add_chain;
ioc->sg_addr_size = 4;
}
ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
ioc->alloc_total = sizeof(MPT_ADAPTER);
ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */
ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
spin_lock_init(&ioc->taskmgmt_lock);
mutex_init(&ioc->internal_cmds.mutex);
init_completion(&ioc->internal_cmds.done);
mutex_init(&ioc->mptbase_cmds.mutex);
init_completion(&ioc->mptbase_cmds.done);
mutex_init(&ioc->taskmgmt_cmds.mutex);
init_completion(&ioc->taskmgmt_cmds.done);
/* Initialize the event logging.
*/
ioc->eventTypes = 0; /* None */
ioc->eventContext = 0;
ioc->eventLogSize = 0;
ioc->events = NULL;
#ifdef MFCNT
ioc->mfcnt = 0;
#endif
ioc->sh = NULL;
ioc->cached_fw = NULL;
/* Initialize SCSI Config Data structure
*/
memset(&ioc->spi_data, 0, sizeof(SpiCfgData));
/* Initialize the fc rport list head.
*/
INIT_LIST_HEAD(&ioc->fc_rports);
/* Find lookup slot. */
INIT_LIST_HEAD(&ioc->list);
/* Initialize workqueue */
INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
"mpt_poll_%d", ioc->id);
ioc->reset_work_q =
create_singlethread_workqueue(ioc->reset_work_q_name);
if (!ioc->reset_work_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
pci_release_selected_regions(pdev, ioc->bars);
kfree(ioc);
return -ENOMEM;
}
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
ioc->name, &ioc->facts, &ioc->pfacts[0]));
pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
mpt_get_product_name(pdev->vendor, pdev->device, revision, ioc->prod_name);
switch (pdev->device)
{
case MPI_MANUFACTPAGE_DEVICEID_FC939X:
case MPI_MANUFACTPAGE_DEVICEID_FC949X:
ioc->errata_flag_1064 = 1;
case MPI_MANUFACTPAGE_DEVICEID_FC909:
case MPI_MANUFACTPAGE_DEVICEID_FC929:
case MPI_MANUFACTPAGE_DEVICEID_FC919:
case MPI_MANUFACTPAGE_DEVICEID_FC949E:
ioc->bus_type = FC;
break;
case MPI_MANUFACTPAGE_DEVICEID_FC929X:
if (revision < XL_929) {
/* 929X Chip Fix. Set Split transactions level
* for PCIX. Set MOST bits to zero.
*/
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
} else {
/* 929XL Chip Fix. Set MMRBC to 0x08.
*/
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd |= 0x08;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
}
ioc->bus_type = FC;
break;
case MPI_MANUFACTPAGE_DEVICEID_FC919X:
/* 919X Chip Fix. Set Split transactions level
* for PCIX. Set MOST bits to zero.
*/
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
ioc->bus_type = FC;
break;
case MPI_MANUFACTPAGE_DEVID_53C1030:
/* 1030 Chip Fix. Disable Split transactions
* for PCIX. Set MOST bits to zero if Rev < C0( = 8).
*/
if (revision < C0_1030) {
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
}
case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
ioc->bus_type = SPI;
break;
case MPI_MANUFACTPAGE_DEVID_SAS1064:
case MPI_MANUFACTPAGE_DEVID_SAS1068:
ioc->errata_flag_1064 = 1;
ioc->bus_type = SAS;
break;
case MPI_MANUFACTPAGE_DEVID_SAS1064E:
case MPI_MANUFACTPAGE_DEVID_SAS1068E:
case MPI_MANUFACTPAGE_DEVID_SAS1078:
ioc->bus_type = SAS;
break;
}
switch (ioc->bus_type) {
case SAS:
ioc->msi_enable = mpt_msi_enable_sas;
break;
case SPI:
ioc->msi_enable = mpt_msi_enable_spi;
break;
case FC:
ioc->msi_enable = mpt_msi_enable_fc;
break;
default:
ioc->msi_enable = 0;
break;
}
ioc->fw_events_off = 1;
if (ioc->errata_flag_1064)
pci_disable_io_access(pdev);
spin_lock_init(&ioc->FreeQlock);
/* Disable all! */
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
/* Set IOC ptr in the pcidev's driver data. */
pci_set_drvdata(ioc->pcidev, ioc);
/* Set lookup ptr. */
list_add_tail(&ioc->list, &ioc_list);
/* Check for "bound ports" (929, 929X, 1030, 1035) to reduce redundant resets.
*/
mpt_detect_bound_ports(ioc, pdev);
INIT_LIST_HEAD(&ioc->fw_event_list);
spin_lock_init(&ioc->fw_event_lock);
snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name);
if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
CAN_SLEEP)) != 0){
printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n",
ioc->name, r);
list_del(&ioc->list);
if (ioc->alt_ioc)
ioc->alt_ioc->alt_ioc = NULL;
iounmap(ioc->memmap);
if (r != -5)
pci_release_selected_regions(pdev, ioc->bars);
destroy_workqueue(ioc->reset_work_q);
ioc->reset_work_q = NULL;
kfree(ioc);
pci_set_drvdata(pdev, NULL);
return r;
}
/* call per device driver probe entry point */
for(cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) {
if(MptDeviceDriverHandlers[cb_idx] &&
MptDeviceDriverHandlers[cb_idx]->probe) {
MptDeviceDriverHandlers[cb_idx]->probe(pdev,id);
}
}
#ifdef CONFIG_PROC_FS
/*
* Create "/proc/mpt/iocN" subdirectory entry for each MPT adapter.
*/
dent = proc_mkdir(ioc->name, mpt_proc_root_dir);
if (dent) {
proc_create_data("info", S_IRUGO, dent, &mpt_iocinfo_proc_fops, ioc);
proc_create_data("summary", S_IRUGO, dent, &mpt_summary_proc_fops, ioc);
}
#endif
if (!ioc->alt_ioc)
queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
msecs_to_jiffies(MPT_POLLING_INTERVAL));
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_detach - Remove a PCI intelligent MPT adapter.
* @pdev: Pointer to pci_dev structure
*/
void
mpt_detach(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
char pname[32];
u8 cb_idx;
unsigned long flags;
struct workqueue_struct *wq;
/*
* Stop polling ioc for fault condition
*/
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
wq = ioc->reset_work_q;
ioc->reset_work_q = NULL;
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
cancel_delayed_work(&ioc->fault_reset_work);
destroy_workqueue(wq);
spin_lock_irqsave(&ioc->fw_event_lock, flags);
wq = ioc->fw_event_q;
ioc->fw_event_q = NULL;
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
destroy_workqueue(wq);
sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
remove_proc_entry(pname, NULL);
sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/info", ioc->name);
remove_proc_entry(pname, NULL);
sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name);
remove_proc_entry(pname, NULL);
/* call per device driver remove entry point */
for(cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) {
if(MptDeviceDriverHandlers[cb_idx] &&
MptDeviceDriverHandlers[cb_idx]->remove) {
MptDeviceDriverHandlers[cb_idx]->remove(pdev);
}
}
/* Disable interrupts! */
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
synchronize_irq(pdev->irq);
/* Clear any lingering interrupt */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
CHIPREG_READ32(&ioc->chip->IntStatus);
mpt_adapter_dispose(ioc);
}
/**************************************************************************
* Power Management
*/
#ifdef CONFIG_PM
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_suspend - Fusion MPT base driver suspend routine.
* @pdev: Pointer to pci_dev structure
* @state: new state to enter
*/
int
mpt_suspend(struct pci_dev *pdev, pm_message_t state)
{
u32 device_state;
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
device_state = pci_choose_state(pdev, state);
printk(MYIOC_s_INFO_FMT "pci-suspend: pdev=0x%p, slot=%s, Entering "
"operating state [D%d]\n", ioc->name, pdev, pci_name(pdev),
device_state);
/* put ioc into READY_STATE */
if(SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
printk(MYIOC_s_ERR_FMT
"pci-suspend: IOC msg unit reset failed!\n", ioc->name);
}
/* disable interrupts */
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
/* Clear any lingering interrupt */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
free_irq(ioc->pci_irq, ioc);
if (ioc->msi_enable)
pci_disable_msi(ioc->pcidev);
ioc->pci_irq = -1;
pci_save_state(pdev);
pci_disable_device(pdev);
pci_release_selected_regions(pdev, ioc->bars);
pci_set_power_state(pdev, device_state);
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_resume - Fusion MPT base driver resume routine.
* @pdev: Pointer to pci_dev structure
*/
int
mpt_resume(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
u32 device_state = pdev->current_state;
int recovery_state;
int err;
printk(MYIOC_s_INFO_FMT "pci-resume: pdev=0x%p, slot=%s, Previous "
"operating state [D%d]\n", ioc->name, pdev, pci_name(pdev),
device_state);
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake(pdev, PCI_D0, 0);
pci_restore_state(pdev);
ioc->pcidev = pdev;
err = mpt_mapresources(ioc);
if (err)
return err;
if (ioc->dma_mask == DMA_BIT_MASK(64)) {
if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
ioc->add_sge = &mpt_add_sge_64bit_1078;
else
ioc->add_sge = &mpt_add_sge_64bit;
ioc->add_chain = &mpt_add_chain_64bit;
ioc->sg_addr_size = 8;
} else {
ioc->add_sge = &mpt_add_sge;
ioc->add_chain = &mpt_add_chain;
ioc->sg_addr_size = 4;
}
ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n",
ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT),
CHIPREG_READ32(&ioc->chip->Doorbell));
/*
* Errata workaround for SAS pci express:
* Upon returning to the D0 state, the contents of the doorbell will be
* stale data, and this will incorrectly signal to the host driver that
* the firmware is ready to process mpt commands. The workaround is
* to issue a diagnostic reset.
*/
if (ioc->bus_type == SAS && (pdev->device ==
MPI_MANUFACTPAGE_DEVID_SAS1068E || pdev->device ==
MPI_MANUFACTPAGE_DEVID_SAS1064E)) {
if (KickStart(ioc, 1, CAN_SLEEP) < 0) {
printk(MYIOC_s_WARN_FMT "pci-resume: Cannot recover\n",
ioc->name);
goto out;
}
}
/* bring ioc to operational state */
printk(MYIOC_s_INFO_FMT "Sending mpt_do_ioc_recovery\n", ioc->name);
recovery_state = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
CAN_SLEEP);
if (recovery_state != 0)
printk(MYIOC_s_WARN_FMT "pci-resume: Cannot recover, "
"error:[%x]\n", ioc->name, recovery_state);
else
printk(MYIOC_s_INFO_FMT
"pci-resume: success\n", ioc->name);
out:
return 0;
}
#endif
static int
mpt_signal_reset(u8 index, MPT_ADAPTER *ioc, int reset_phase)
{
if ((MptDriverClass[index] == MPTSPI_DRIVER &&
ioc->bus_type != SPI) ||
(MptDriverClass[index] == MPTFC_DRIVER &&
ioc->bus_type != FC) ||
(MptDriverClass[index] == MPTSAS_DRIVER &&
ioc->bus_type != SAS))
/* make sure we only call the relevant reset handler
* for the bus */
return 0;
return (MptResetHandlers[index])(ioc, reset_phase);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_do_ioc_recovery - Initialize or recover MPT adapter.
* @ioc: Pointer to MPT adapter structure
* @reason: Event word / reason
* @sleepFlag: Use schedule if CAN_SLEEP else use udelay.
*
* This routine performs all the steps necessary to bring the IOC
* to a OPERATIONAL state.
*
* This routine also pre-fetches the LAN MAC address of a Fibre Channel
* MPT adapter.
*
* Returns:
* 0 for success
* -1 if failed to get board READY
* -2 if READY but IOCFacts Failed
* -3 if READY but PrimeIOCFifos Failed
* -4 if READY but IOCInit Failed
* -5 if failed to enable_device and/or request_selected_regions
* -6 if failed to upload firmware
*/
static int
mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
{
int hard_reset_done = 0;
int alt_ioc_ready = 0;
int hard;
int rc=0;
int ii;
int ret = 0;
int reset_alt_ioc_active = 0;
int irq_allocated = 0;
u8 *a;
printk(MYIOC_s_INFO_FMT "Initiating %s\n", ioc->name,
reason == MPT_HOSTEVENT_IOC_BRINGUP ? "bringup" : "recovery");
/* Disable reply interrupts (also blocks FreeQ) */
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
if (ioc->alt_ioc) {
if (ioc->alt_ioc->active ||
reason == MPT_HOSTEVENT_IOC_RECOVER) {
reset_alt_ioc_active = 1;
/* Disable alt-IOC's reply interrupts
* (and FreeQ) for a bit
**/
CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
0xFFFFFFFF);
ioc->alt_ioc->active = 0;
}
}
hard = 1;
if (reason == MPT_HOSTEVENT_IOC_BRINGUP)
hard = 0;
if ((hard_reset_done = MakeIocReady(ioc, hard, sleepFlag)) < 0) {
if (hard_reset_done == -4) {
printk(MYIOC_s_WARN_FMT "Owned by PEER..skipping!\n",
ioc->name);
if (reset_alt_ioc_active && ioc->alt_ioc) {
/* (re)Enable alt-IOC! (reply interrupt, FreeQ) */
dprintk(ioc, printk(MYIOC_s_INFO_FMT
"alt_ioc reply irq re-enabled\n", ioc->alt_ioc->name));
CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, MPI_HIM_DIM);
ioc->alt_ioc->active = 1;
}
} else {
printk(MYIOC_s_WARN_FMT
"NOT READY WARNING!\n", ioc->name);
}
ret = -1;
goto out;
}
/* hard_reset_done = 0 if a soft reset was performed
* and 1 if a hard reset was performed.
*/
if (hard_reset_done && reset_alt_ioc_active && ioc->alt_ioc) {
if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0)
alt_ioc_ready = 1;
else
printk(MYIOC_s_WARN_FMT
": alt-ioc Not ready WARNING!\n",
ioc->alt_ioc->name);
}
for (ii=0; ii<5; ii++) {
/* Get IOC facts! Allow 5 retries */
if ((rc = GetIocFacts(ioc, sleepFlag, reason)) == 0)
break;
}
if (ii == 5) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Retry IocFacts failed rc=%x\n", ioc->name, rc));
ret = -2;
} else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
MptDisplayIocCapabilities(ioc);
}
if (alt_ioc_ready) {
if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Initial Alt IocFacts failed rc=%x\n",
ioc->name, rc));
/* Retry - alt IOC was initialized once
*/
rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason);
}
if (rc) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Retry Alt IocFacts failed rc=%x\n", ioc->name, rc));
alt_ioc_ready = 0;
reset_alt_ioc_active = 0;
} else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
MptDisplayIocCapabilities(ioc->alt_ioc);
}
}
if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP) &&
(ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)) {
pci_release_selected_regions(ioc->pcidev, ioc->bars);
ioc->bars = pci_select_bars(ioc->pcidev, IORESOURCE_MEM |
IORESOURCE_IO);
if (pci_enable_device(ioc->pcidev))
return -5;
if (pci_request_selected_regions(ioc->pcidev, ioc->bars,
"mpt"))
return -5;
}
/*
* Device is reset now. It must have de-asserted the interrupt line
* (if it was asserted) and it should be safe to register for the
* interrupt now.
*/
if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
ioc->pci_irq = -1;
if (ioc->pcidev->irq) {
if (ioc->msi_enable && !pci_enable_msi(ioc->pcidev))
printk(MYIOC_s_INFO_FMT "PCI-MSI enabled\n",
ioc->name);
else
ioc->msi_enable = 0;
rc = request_irq(ioc->pcidev->irq, mpt_interrupt,
IRQF_SHARED, ioc->name, ioc);
if (rc < 0) {
printk(MYIOC_s_ERR_FMT "Unable to allocate "
"interrupt %d!\n",
ioc->name, ioc->pcidev->irq);
if (ioc->msi_enable)
pci_disable_msi(ioc->pcidev);
ret = -EBUSY;
goto out;
}
irq_allocated = 1;
ioc->pci_irq = ioc->pcidev->irq;
pci_set_master(ioc->pcidev); /* ?? */
pci_set_drvdata(ioc->pcidev, ioc);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
"installed at interrupt %d\n", ioc->name,
ioc->pcidev->irq));
}
}
/* Prime reply & request queues!
* (mucho alloc's) Must be done prior to
* init as upper addresses are needed for init.
* If fails, continue with alt-ioc processing
*/
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "PrimeIocFifos\n",
ioc->name));
if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0))
ret = -3;
/* May need to check/upload firmware & data here!
* If fails, continue with alt-ioc processing
*/
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "SendIocInit\n",
ioc->name));
if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0))
ret = -4;
// NEW!
if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) {
printk(MYIOC_s_WARN_FMT
": alt-ioc (%d) FIFO mgmt alloc WARNING!\n",
ioc->alt_ioc->name, rc);
alt_ioc_ready = 0;
reset_alt_ioc_active = 0;
}
if (alt_ioc_ready) {
if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) {
alt_ioc_ready = 0;
reset_alt_ioc_active = 0;
printk(MYIOC_s_WARN_FMT
": alt-ioc: (%d) init failure WARNING!\n",
ioc->alt_ioc->name, rc);
}
}
if (reason == MPT_HOSTEVENT_IOC_BRINGUP){
if (ioc->upload_fw) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"firmware upload required!\n", ioc->name));
/* Controller is not operational, cannot do upload
*/
if (ret == 0) {
rc = mpt_do_upload(ioc, sleepFlag);
if (rc == 0) {
if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
/*
* Maintain only one pointer to FW memory
* so there will not be two attempt to
* downloadboot onboard dual function
* chips (mpt_adapter_disable,
* mpt_diag_reset)
*/
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"mpt_upload: alt_%s has cached_fw=%p \n",
ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw));
ioc->cached_fw = NULL;
}
} else {
printk(MYIOC_s_WARN_FMT
"firmware upload failure!\n", ioc->name);
ret = -6;
}
}
}
}
/* Enable MPT base driver management of EventNotification
* and EventAck handling.
*/
if ((ret == 0) && (!ioc->facts.EventState)) {
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
"SendEventNotification\n",
ioc->name));
ret = SendEventNotification(ioc, 1, sleepFlag); /* 1=Enable */
}
if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
rc = SendEventNotification(ioc->alt_ioc, 1, sleepFlag);
if (ret == 0) {
/* Enable! (reply interrupt) */
CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
ioc->active = 1;
}
if (rc == 0) { /* alt ioc */
if (reset_alt_ioc_active && ioc->alt_ioc) {
/* (re)Enable alt-IOC! (reply interrupt) */
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "alt-ioc"
"reply irq re-enabled\n",
ioc->alt_ioc->name));
CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
MPI_HIM_DIM);
ioc->alt_ioc->active = 1;
}
}
/* Add additional "reason" check before call to GetLanConfigPages
* (combined with GetIoUnitPage2 call). This prevents a somewhat
* recursive scenario; GetLanConfigPages times out, timer expired
* routine calls HardResetHandler, which calls into here again,
* and we try GetLanConfigPages again...
*/
if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
/*
* Initialize link list for inactive raid volumes.
*/
mutex_init(&ioc->raid_data.inactive_list_mutex);
INIT_LIST_HEAD(&ioc->raid_data.inactive_list);
switch (ioc->bus_type) {
case SAS:
/* clear persistency table */
if(ioc->facts.IOCExceptions &
MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) {
ret = mptbase_sas_persist_operation(ioc,
MPI_SAS_OP_CLEAR_NOT_PRESENT);
if(ret != 0)
goto out;
}
/* Find IM volumes
*/
mpt_findImVolumes(ioc);
/* Check, and possibly reset, the coalescing value
*/
mpt_read_ioc_pg_1(ioc);
break;
case FC:
if ((ioc->pfacts[0].ProtocolFlags &
MPI_PORTFACTS_PROTOCOL_LAN) &&
(ioc->lan_cnfg_page0.Header.PageLength == 0)) {
/*
* Pre-fetch the ports LAN MAC address!
* (LANPage1_t stuff)
*/
(void) GetLanConfigPages(ioc);
a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"LanAddr = %02X:%02X:%02X"
":%02X:%02X:%02X\n",
ioc->name, a[5], a[4],
a[3], a[2], a[1], a[0]));
}
break;
case SPI:
/* Get NVRAM and adapter maximums from SPP 0 and 2
*/
mpt_GetScsiPortSettings(ioc, 0);
/* Get version and length of SDP 1
*/
mpt_readScsiDevicePageHeaders(ioc, 0);
/* Find IM volumes
*/
if (ioc->facts.MsgVersion >= MPI_VERSION_01_02)
mpt_findImVolumes(ioc);
/* Check, and possibly reset, the coalescing value
*/
mpt_read_ioc_pg_1(ioc);
mpt_read_ioc_pg_4(ioc);
break;
}
GetIoUnitPage2(ioc);
mpt_get_manufacturing_pg_0(ioc);
}
out:
if ((ret != 0) && irq_allocated) {
free_irq(ioc->pci_irq, ioc);
if (ioc->msi_enable)
pci_disable_msi(ioc->pcidev);
}
return ret;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_detect_bound_ports - Search for matching PCI bus/dev_function
* @ioc: Pointer to MPT adapter structure
* @pdev: Pointer to (struct pci_dev) structure
*
* Search for PCI bus/dev_function which matches
* PCI bus/dev_function (+/-1) for newly discovered 929,
* 929X, 1030 or 1035.
*
* If match on PCI dev_function +/-1 is found, bind the two MPT adapters
* using alt_ioc pointer fields in their %MPT_ADAPTER structures.
*/
static void
mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
{
struct pci_dev *peer=NULL;
unsigned int slot = PCI_SLOT(pdev->devfn);
unsigned int func = PCI_FUNC(pdev->devfn);
MPT_ADAPTER *ioc_srch;
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PCI device %s devfn=%x/%x,"
" searching for devfn match on %x or %x\n",
ioc->name, pci_name(pdev), pdev->bus->number,
pdev->devfn, func-1, func+1));
peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func-1));
if (!peer) {
peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func+1));
if (!peer)
return;
}
list_for_each_entry(ioc_srch, &ioc_list, list) {
struct pci_dev *_pcidev = ioc_srch->pcidev;
if (_pcidev == peer) {
/* Paranoia checks */
if (ioc->alt_ioc != NULL) {
printk(MYIOC_s_WARN_FMT
"Oops, already bound (%s <==> %s)!\n",
ioc->name, ioc->name, ioc->alt_ioc->name);
break;
} else if (ioc_srch->alt_ioc != NULL) {
printk(MYIOC_s_WARN_FMT
"Oops, already bound (%s <==> %s)!\n",
ioc_srch->name, ioc_srch->name,
ioc_srch->alt_ioc->name);
break;
}
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"FOUND! binding %s <==> %s\n",
ioc->name, ioc->name, ioc_srch->name));
ioc_srch->alt_ioc = ioc;
ioc->alt_ioc = ioc_srch;
}
}
pci_dev_put(peer);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_adapter_disable - Disable misbehaving MPT adapter.
* @ioc: Pointer to MPT adapter structure
*/
static void
mpt_adapter_disable(MPT_ADAPTER *ioc)
{
int sz;
int ret;
if (ioc->cached_fw != NULL) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: Pushing FW onto adapter\n", __func__, ioc->name));
if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
ioc->cached_fw, CAN_SLEEP)) < 0) {
printk(MYIOC_s_WARN_FMT
": firmware downloadboot failure (%d)!\n",
ioc->name, ret);
}
}
/*
* Put the controller into ready state (if its not already)
*/
if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) {
if (!SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET,
CAN_SLEEP)) {
if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY)
printk(MYIOC_s_ERR_FMT "%s: IOC msg unit "
"reset failed to put ioc in ready state!\n",
ioc->name, __func__);
} else
printk(MYIOC_s_ERR_FMT "%s: IOC msg unit reset "
"failed!\n", ioc->name, __func__);
}
/* Disable adapter interrupts! */
synchronize_irq(ioc->pcidev->irq);
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
/* Clear any lingering interrupt */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
CHIPREG_READ32(&ioc->chip->IntStatus);
if (ioc->alloc != NULL) {
sz = ioc->alloc_sz;
dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "free @ %p, sz=%d bytes\n",
ioc->name, ioc->alloc, ioc->alloc_sz));
pci_free_consistent(ioc->pcidev, sz,
ioc->alloc, ioc->alloc_dma);
ioc->reply_frames = NULL;
ioc->req_frames = NULL;
ioc->alloc = NULL;
ioc->alloc_total -= sz;
}
if (ioc->sense_buf_pool != NULL) {
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
pci_free_consistent(ioc->pcidev, sz,
ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
ioc->sense_buf_pool = NULL;
ioc->alloc_total -= sz;
}
if (ioc->events != NULL){
sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS);
kfree(ioc->events);
ioc->events = NULL;
ioc->alloc_total -= sz;
}
mpt_free_fw_memory(ioc);
kfree(ioc->spi_data.nvram);
mpt_inactive_raid_list_free(ioc);
kfree(ioc->raid_data.pIocPg2);
kfree(ioc->raid_data.pIocPg3);
ioc->spi_data.nvram = NULL;
ioc->raid_data.pIocPg3 = NULL;
if (ioc->spi_data.pIocPg4 != NULL) {
sz = ioc->spi_data.IocPg4Sz;
pci_free_consistent(ioc->pcidev, sz,
ioc->spi_data.pIocPg4,
ioc->spi_data.IocPg4_dma);
ioc->spi_data.pIocPg4 = NULL;
ioc->alloc_total -= sz;
}
if (ioc->ReqToChain != NULL) {
kfree(ioc->ReqToChain);
kfree(ioc->RequestNB);
ioc->ReqToChain = NULL;
}
kfree(ioc->ChainToChain);
ioc->ChainToChain = NULL;
if (ioc->HostPageBuffer != NULL) {
if((ret = mpt_host_page_access_control(ioc,
MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) {
printk(MYIOC_s_ERR_FMT
": %s: host page buffers free failed (%d)!\n",
ioc->name, __func__, ret);
}
dexitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"HostPageBuffer free @ %p, sz=%d bytes\n",
ioc->name, ioc->HostPageBuffer,
ioc->HostPageBuffer_sz));
pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
ioc->HostPageBuffer = NULL;
ioc->HostPageBuffer_sz = 0;
ioc->alloc_total -= ioc->HostPageBuffer_sz;
}
pci_set_drvdata(ioc->pcidev, NULL);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_adapter_dispose - Free all resources associated with an MPT adapter
* @ioc: Pointer to MPT adapter structure
*
* This routine unregisters h/w resources and frees all alloc'd memory
* associated with a MPT adapter structure.
*/
static void
mpt_adapter_dispose(MPT_ADAPTER *ioc)
{
int sz_first, sz_last;
if (ioc == NULL)
return;
sz_first = ioc->alloc_total;
mpt_adapter_disable(ioc);
if (ioc->pci_irq != -1) {
free_irq(ioc->pci_irq, ioc);
if (ioc->msi_enable)
pci_disable_msi(ioc->pcidev);
ioc->pci_irq = -1;
}
if (ioc->memmap != NULL) {
iounmap(ioc->memmap);
ioc->memmap = NULL;
}
pci_disable_device(ioc->pcidev);
pci_release_selected_regions(ioc->pcidev, ioc->bars);
#if defined(CONFIG_MTRR) && 0
if (ioc->mtrr_reg > 0) {
mtrr_del(ioc->mtrr_reg, 0, 0);
dprintk(ioc, printk(MYIOC_s_INFO_FMT "MTRR region de-registered\n", ioc->name));
}
#endif
/* Zap the adapter lookup ptr! */
list_del(&ioc->list);
sz_last = ioc->alloc_total;
dprintk(ioc, printk(MYIOC_s_INFO_FMT "free'd %d of %d bytes\n",
ioc->name, sz_first-sz_last+(int)sizeof(*ioc), sz_first));
if (ioc->alt_ioc)
ioc->alt_ioc->alt_ioc = NULL;
kfree(ioc);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* MptDisplayIocCapabilities - Disply IOC's capabilities.
* @ioc: Pointer to MPT adapter structure
*/
static void
MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
{
int i = 0;
printk(KERN_INFO "%s: ", ioc->name);
if (ioc->prod_name)
printk("%s: ", ioc->prod_name);
printk("Capabilities={");
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
printk("Initiator");
i++;
}
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
printk("%sTarget", i ? "," : "");
i++;
}
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
printk("%sLAN", i ? "," : "");
i++;
}
#if 0
/*
* This would probably evoke more questions than it's worth
*/
if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
printk("%sLogBusAddr", i ? "," : "");
i++;
}
#endif
printk("}\n");
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* MakeIocReady - Get IOC to a READY state, using KickStart if needed.
* @ioc: Pointer to MPT_ADAPTER structure
* @force: Force hard KickStart of IOC
* @sleepFlag: Specifies whether the process can sleep
*
* Returns:
* 1 - DIAG reset and READY
* 0 - READY initially OR soft reset and READY
* -1 - Any failure on KickStart
* -2 - Msg Unit Reset Failed
* -3 - IO Unit Reset Failed
* -4 - IOC owned by a PEER
*/
static int
MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
{
u32 ioc_state;
int statefault = 0;
int cntdn;
int hard_reset_done = 0;
int r;
int ii;
int whoinit;
/* Get current [raw] IOC state */
ioc_state = mpt_GetIocState(ioc, 0);
dhsprintk(ioc, printk(MYIOC_s_INFO_FMT "MakeIocReady [raw] state=%08x\n", ioc->name, ioc_state));
/*
* Check to see if IOC got left/stuck in doorbell handshake
* grip of death. If so, hard reset the IOC.
*/
if (ioc_state & MPI_DOORBELL_ACTIVE) {
statefault = 1;
printk(MYIOC_s_WARN_FMT "Unexpected doorbell active!\n",
ioc->name);
}
/* Is it already READY? */
if (!statefault &&
((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)) {
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
"IOC is in READY state\n", ioc->name));
return 0;
}
/*
* Check to see if IOC is in FAULT state.
*/
if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
statefault = 2;
printk(MYIOC_s_WARN_FMT "IOC is in FAULT state!!!\n",
ioc->name);
printk(MYIOC_s_WARN_FMT " FAULT code = %04xh\n",
ioc->name, ioc_state & MPI_DOORBELL_DATA_MASK);
}
/*
* Hmmm... Did it get left operational?
*/
if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOC operational unexpected\n",
ioc->name));
/* Check WhoInit.
* If PCI Peer, exit.
* Else, if no fault conditions are present, issue a MessageUnitReset
* Else, fall through to KickStart case
*/
whoinit = (ioc_state & MPI_DOORBELL_WHO_INIT_MASK) >> MPI_DOORBELL_WHO_INIT_SHIFT;
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
"whoinit 0x%x statefault %d force %d\n",
ioc->name, whoinit, statefault, force));
if (whoinit == MPI_WHOINIT_PCI_PEER)
return -4;
else {
if ((statefault == 0 ) && (force == 0)) {
if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) == 0)
return 0;
}
statefault = 3;
}
}
hard_reset_done = KickStart(ioc, statefault||force, sleepFlag);
if (hard_reset_done < 0)
return -1;
/*
* Loop here waiting for IOC to come READY.
*/
ii = 0;
cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 5; /* 5 seconds */
while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
if (ioc_state == MPI_IOC_STATE_OPERATIONAL) {
/*
* BIOS or previous driver load left IOC in OP state.
* Reset messaging FIFOs.
*/
if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) != 0) {
printk(MYIOC_s_ERR_FMT "IOC msg unit reset failed!\n", ioc->name);
return -2;
}
} else if (ioc_state == MPI_IOC_STATE_RESET) {
/*
* Something is wrong. Try to get IOC back
* to a known state.
*/
if ((r = SendIocReset(ioc, MPI_FUNCTION_IO_UNIT_RESET, sleepFlag)) != 0) {
printk(MYIOC_s_ERR_FMT "IO unit reset failed!\n", ioc->name);
return -3;
}
}
ii++; cntdn--;
if (!cntdn) {
printk(MYIOC_s_ERR_FMT
"Wait IOC_READY state (0x%x) timeout(%d)!\n",
ioc->name, ioc_state, (int)((ii+5)/HZ));
return -ETIME;
}
if (sleepFlag == CAN_SLEEP) {
msleep(1);
} else {
mdelay (1); /* 1 msec delay */
}
}
if (statefault < 3) {
printk(MYIOC_s_INFO_FMT "Recovered from %s\n", ioc->name,
statefault == 1 ? "stuck handshake" : "IOC FAULT");
}
return hard_reset_done;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_GetIocState - Get the current state of a MPT adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @cooked: Request raw or cooked IOC state
*
* Returns all IOC Doorbell register bits if cooked==0, else just the
* Doorbell bits in MPI_IOC_STATE_MASK.
*/
u32
mpt_GetIocState(MPT_ADAPTER *ioc, int cooked)
{
u32 s, sc;
/* Get! */
s = CHIPREG_READ32(&ioc->chip->Doorbell);
sc = s & MPI_IOC_STATE_MASK;
/* Save! */
ioc->last_state = sc;
return cooked ? sc : s;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* GetIocFacts - Send IOCFacts request to MPT adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @sleepFlag: Specifies whether the process can sleep
* @reason: If recovery, only update facts.
*
* Returns 0 for success, non-zero for failure.
*/
static int
GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
{
IOCFacts_t get_facts;
IOCFactsReply_t *facts;
int r;
int req_sz;
int reply_sz;
int sz;
u32 status, vv;
u8 shiftFactor=1;
/* IOC *must* NOT be in RESET state! */
if (ioc->last_state == MPI_IOC_STATE_RESET) {
printk(KERN_ERR MYNAM
": ERROR - Can't get IOCFacts, %s NOT READY! (%08x)\n",
ioc->name, ioc->last_state);
return -44;
}
facts = &ioc->facts;
/* Destination (reply area)... */
reply_sz = sizeof(*facts);
memset(facts, 0, reply_sz);
/* Request area (get_facts on the stack right now!) */
req_sz = sizeof(get_facts);
memset(&get_facts, 0, req_sz);
get_facts.Function = MPI_FUNCTION_IOC_FACTS;
/* Assert: All other get_facts fields are zero! */
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Sending get IocFacts request req_sz=%d reply_sz=%d\n",
ioc->name, req_sz, reply_sz));
/* No non-zero fields in the get_facts request are greater than
* 1 byte in size, so we can just fire it off as is.
*/
r = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_facts,
reply_sz, (u16*)facts, 5 /*seconds*/, sleepFlag);
if (r != 0)
return r;
/*
* Now byte swap (GRRR) the necessary fields before any further
* inspection of reply contents.
*
* But need to do some sanity checks on MsgLength (byte) field
* to make sure we don't zero IOC's req_sz!
*/
/* Did we get a valid reply? */
if (facts->MsgLength > offsetof(IOCFactsReply_t, RequestFrameSize)/sizeof(u32)) {
if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
/*
* If not been here, done that, save off first WhoInit value
*/
if (ioc->FirstWhoInit == WHOINIT_UNKNOWN)
ioc->FirstWhoInit = facts->WhoInit;
}
facts->MsgVersion = le16_to_cpu(facts->MsgVersion);
facts->MsgContext = le32_to_cpu(facts->MsgContext);
facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions);
facts->IOCStatus = le16_to_cpu(facts->IOCStatus);
facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo);
status = le16_to_cpu(facts->IOCStatus) & MPI_IOCSTATUS_MASK;
/* CHECKME! IOCStatus, IOCLogInfo */
facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth);
facts->RequestFrameSize = le16_to_cpu(facts->RequestFrameSize);
/*
* FC f/w version changed between 1.1 and 1.2
* Old: u16{Major(4),Minor(4),SubMinor(8)}
* New: u32{Major(8),Minor(8),Unit(8),Dev(8)}
*/
if (facts->MsgVersion < MPI_VERSION_01_02) {
/*
* Handle old FC f/w style, convert to new...
*/
u16 oldv = le16_to_cpu(facts->Reserved_0101_FWVersion);
facts->FWVersion.Word =
((oldv<<12) & 0xFF000000) |
((oldv<<8) & 0x000FFF00);
} else
facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word);
facts->ProductID = le16_to_cpu(facts->ProductID);
if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
> MPI_FW_HEADER_PID_PROD_TARGET_SCSI)
ioc->ir_firmware = 1;
facts->CurrentHostMfaHighAddr =
le32_to_cpu(facts->CurrentHostMfaHighAddr);
facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits);
facts->CurrentSenseBufferHighAddr =
le32_to_cpu(facts->CurrentSenseBufferHighAddr);
facts->CurReplyFrameSize =
le16_to_cpu(facts->CurReplyFrameSize);
facts->IOCCapabilities = le32_to_cpu(facts->IOCCapabilities);
/*
* Handle NEW (!) IOCFactsReply fields in MPI-1.01.xx
* Older MPI-1.00.xx struct had 13 dwords, and enlarged
* to 14 in MPI-1.01.0x.
*/
if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 &&
facts->MsgVersion > MPI_VERSION_01_00) {
facts->FWImageSize = le32_to_cpu(facts->FWImageSize);
}
sz = facts->FWImageSize;
if ( sz & 0x01 )
sz += 1;
if ( sz & 0x02 )
sz += 2;
facts->FWImageSize = sz;
if (!facts->RequestFrameSize) {
/* Something is wrong! */
printk(MYIOC_s_ERR_FMT "IOC reported invalid 0 request size!\n",
ioc->name);
return -55;
}
r = sz = facts->BlockSize;
vv = ((63 / (sz * 4)) + 1) & 0x03;
ioc->NB_for_64_byte_frame = vv;
while ( sz )
{
shiftFactor++;
sz = sz >> 1;
}
ioc->NBShiftFactor = shiftFactor;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"NB_for_64_byte_frame=%x NBShiftFactor=%x BlockSize=%x\n",
ioc->name, vv, shiftFactor, r));
if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
/*
* Set values for this IOC's request & reply frame sizes,
* and request & reply queue depths...
*/
ioc->req_sz = min(MPT_DEFAULT_FRAME_SIZE, facts->RequestFrameSize * 4);
ioc->req_depth = min_t(int, MPT_MAX_REQ_DEPTH, facts->GlobalCredits);
ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
ioc->reply_depth = min_t(int, MPT_DEFAULT_REPLY_DEPTH, facts->ReplyQueueDepth);
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "reply_sz=%3d, reply_depth=%4d\n",
ioc->name, ioc->reply_sz, ioc->reply_depth));
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "req_sz =%3d, req_depth =%4d\n",
ioc->name, ioc->req_sz, ioc->req_depth));
/* Get port facts! */
if ( (r = GetPortFacts(ioc, 0, sleepFlag)) != 0 )
return r;
}
} else {
printk(MYIOC_s_ERR_FMT
"Invalid IOC facts reply, msgLength=%d offsetof=%zd!\n",
ioc->name, facts->MsgLength, (offsetof(IOCFactsReply_t,
RequestFrameSize)/sizeof(u32)));
return -66;
}
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* GetPortFacts - Send PortFacts request to MPT adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @portnum: Port number
* @sleepFlag: Specifies whether the process can sleep
*
* Returns 0 for success, non-zero for failure.
*/
static int
GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
{
PortFacts_t get_pfacts;
PortFactsReply_t *pfacts;
int ii;
int req_sz;
int reply_sz;
int max_id;
/* IOC *must* NOT be in RESET state! */
if (ioc->last_state == MPI_IOC_STATE_RESET) {
printk(MYIOC_s_ERR_FMT "Can't get PortFacts NOT READY! (%08x)\n",
ioc->name, ioc->last_state );
return -4;
}
pfacts = &ioc->pfacts[portnum];
/* Destination (reply area)... */
reply_sz = sizeof(*pfacts);
memset(pfacts, 0, reply_sz);
/* Request area (get_pfacts on the stack right now!) */
req_sz = sizeof(get_pfacts);
memset(&get_pfacts, 0, req_sz);
get_pfacts.Function = MPI_FUNCTION_PORT_FACTS;
get_pfacts.PortNumber = portnum;
/* Assert: All other get_pfacts fields are zero! */
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending get PortFacts(%d) request\n",
ioc->name, portnum));
/* No non-zero fields in the get_pfacts request are greater than
* 1 byte in size, so we can just fire it off as is.
*/
ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_pfacts,
reply_sz, (u16*)pfacts, 5 /*seconds*/, sleepFlag);
if (ii != 0)
return ii;
/* Did we get a valid reply? */
/* Now byte swap the necessary fields in the response. */
pfacts->MsgContext = le32_to_cpu(pfacts->MsgContext);
pfacts->IOCStatus = le16_to_cpu(pfacts->IOCStatus);
pfacts->IOCLogInfo = le32_to_cpu(pfacts->IOCLogInfo);
pfacts->MaxDevices = le16_to_cpu(pfacts->MaxDevices);
pfacts->PortSCSIID = le16_to_cpu(pfacts->PortSCSIID);
pfacts->ProtocolFlags = le16_to_cpu(pfacts->ProtocolFlags);
pfacts->MaxPostedCmdBuffers = le16_to_cpu(pfacts->MaxPostedCmdBuffers);
pfacts->MaxPersistentIDs = le16_to_cpu(pfacts->MaxPersistentIDs);
pfacts->MaxLanBuckets = le16_to_cpu(pfacts->MaxLanBuckets);
max_id = (ioc->bus_type == SAS) ? pfacts->PortSCSIID :
pfacts->MaxDevices;
ioc->devices_per_bus = (max_id > 255) ? 256 : max_id;
ioc->number_of_buses = (ioc->devices_per_bus < 256) ? 1 : max_id/256;
/*
* Place all the devices on channels
*
* (for debuging)
*/
if (mpt_channel_mapping) {
ioc->devices_per_bus = 1;
ioc->number_of_buses = (max_id > 255) ? 255 : max_id;
}
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* SendIocInit - Send IOCInit request to MPT adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @sleepFlag: Specifies whether the process can sleep
*
* Send IOCInit followed by PortEnable to bring IOC to OPERATIONAL state.
*
* Returns 0 for success, non-zero for failure.
*/
static int
SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
{
IOCInit_t ioc_init;
MPIDefaultReply_t init_reply;
u32 state;
int r;
int count;
int cntdn;
memset(&ioc_init, 0, sizeof(ioc_init));
memset(&init_reply, 0, sizeof(init_reply));
ioc_init.WhoInit = MPI_WHOINIT_HOST_DRIVER;
ioc_init.Function = MPI_FUNCTION_IOC_INIT;
/* If we are in a recovery mode and we uploaded the FW image,
* then this pointer is not NULL. Skip the upload a second time.
* Set this flag if cached_fw set for either IOC.
*/
if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
ioc->upload_fw = 1;
else
ioc->upload_fw = 0;
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "upload_fw %d facts.Flags=%x\n",
ioc->name, ioc->upload_fw, ioc->facts.Flags));
ioc_init.MaxDevices = (U8)ioc->devices_per_bus;
ioc_init.MaxBuses = (U8)ioc->number_of_buses;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n",
ioc->name, ioc->facts.MsgVersion));
if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) {
// set MsgVersion and HeaderVersion host driver was built with
ioc_init.MsgVersion = cpu_to_le16(MPI_VERSION);
ioc_init.HeaderVersion = cpu_to_le16(MPI_HEADER_VERSION);
if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_HOST_PAGE_BUFFER_PERSISTENT) {
ioc_init.HostPageBufferSGE = ioc->facts.HostPageBufferSGE;
} else if(mpt_host_page_alloc(ioc, &ioc_init))
return -99;
}
ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */
if (ioc->sg_addr_size == sizeof(u64)) {
/* Save the upper 32-bits of the request
* (reply) and sense buffers.
*/
ioc_init.HostMfaHighAddr = cpu_to_le32((u32)((u64)ioc->alloc_dma >> 32));
ioc_init.SenseBufferHighAddr = cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32));
} else {
/* Force 32-bit addressing */
ioc_init.HostMfaHighAddr = cpu_to_le32(0);
ioc_init.SenseBufferHighAddr = cpu_to_le32(0);
}
ioc->facts.CurrentHostMfaHighAddr = ioc_init.HostMfaHighAddr;
ioc->facts.CurrentSenseBufferHighAddr = ioc_init.SenseBufferHighAddr;
ioc->facts.MaxDevices = ioc_init.MaxDevices;
ioc->facts.MaxBuses = ioc_init.MaxBuses;
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending IOCInit (req @ %p)\n",
ioc->name, &ioc_init));
r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init,
sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag);
if (r != 0) {
printk(MYIOC_s_ERR_FMT "Sending IOCInit failed(%d)!\n",ioc->name, r);
return r;
}
/* No need to byte swap the multibyte fields in the reply
* since we don't even look at its contents.
*/
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending PortEnable (req @ %p)\n",
ioc->name, &ioc_init));
if ((r = SendPortEnable(ioc, 0, sleepFlag)) != 0) {
printk(MYIOC_s_ERR_FMT "Sending PortEnable failed(%d)!\n",ioc->name, r);
return r;
}
/* YIKES! SUPER IMPORTANT!!!
* Poll IocState until _OPERATIONAL while IOC is doing
* LoopInit and TargetDiscovery!
*/
count = 0;
cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 60; /* 60 seconds */
state = mpt_GetIocState(ioc, 1);
while (state != MPI_IOC_STATE_OPERATIONAL && --cntdn) {
if (sleepFlag == CAN_SLEEP) {
msleep(1);
} else {
mdelay(1);
}
if (!cntdn) {
printk(MYIOC_s_ERR_FMT "Wait IOC_OP state timeout(%d)!\n",
ioc->name, (int)((count+5)/HZ));
return -9;
}
state = mpt_GetIocState(ioc, 1);
count++;
}
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Wait IOC_OPERATIONAL state (cnt=%d)\n",
ioc->name, count));
ioc->aen_event_read_flag=0;
return r;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* SendPortEnable - Send PortEnable request to MPT adapter port.
* @ioc: Pointer to MPT_ADAPTER structure
* @portnum: Port number to enable
* @sleepFlag: Specifies whether the process can sleep
*
* Send PortEnable to bring IOC to OPERATIONAL state.
*
* Returns 0 for success, non-zero for failure.
*/
static int
SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
{
PortEnable_t port_enable;
MPIDefaultReply_t reply_buf;
int rc;
int req_sz;
int reply_sz;
/* Destination... */
reply_sz = sizeof(MPIDefaultReply_t);
memset(&reply_buf, 0, reply_sz);
req_sz = sizeof(PortEnable_t);
memset(&port_enable, 0, req_sz);
port_enable.Function = MPI_FUNCTION_PORT_ENABLE;
port_enable.PortNumber = portnum;
/* port_enable.ChainOffset = 0; */
/* port_enable.MsgFlags = 0; */
/* port_enable.MsgContext = 0; */
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Port(%d)Enable (req @ %p)\n",
ioc->name, portnum, &port_enable));
/* RAID FW may take a long time to enable
*/
if (ioc->ir_firmware || ioc->bus_type == SAS) {
rc = mpt_handshake_req_reply_wait(ioc, req_sz,
(u32*)&port_enable, reply_sz, (u16*)&reply_buf,
300 /*seconds*/, sleepFlag);
} else {
rc = mpt_handshake_req_reply_wait(ioc, req_sz,
(u32*)&port_enable, reply_sz, (u16*)&reply_buf,
30 /*seconds*/, sleepFlag);
}
return rc;
}
/**
* mpt_alloc_fw_memory - allocate firmware memory
* @ioc: Pointer to MPT_ADAPTER structure
* @size: total FW bytes
*
* If memory has already been allocated, the same (cached) value
* is returned.
*
* Return 0 if successful, or non-zero for failure
**/
int
mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
{
int rc;
if (ioc->cached_fw) {
rc = 0; /* use already allocated memory */
goto out;
}
else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */
ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma;
rc = 0;
goto out;
}
ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma);
if (!ioc->cached_fw) {
printk(MYIOC_s_ERR_FMT "Unable to allocate memory for the cached firmware image!\n",
ioc->name);
rc = -1;
} else {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Image @ %p[%p], sz=%d[%x] bytes\n",
ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, size, size));
ioc->alloc_total += size;
rc = 0;
}
out:
return rc;
}
/**
* mpt_free_fw_memory - free firmware memory
* @ioc: Pointer to MPT_ADAPTER structure
*
* If alt_img is NULL, delete from ioc structure.
* Else, delete a secondary image in same format.
**/
void
mpt_free_fw_memory(MPT_ADAPTER *ioc)
{
int sz;
if (!ioc->cached_fw)
return;
sz = ioc->facts.FWImageSize;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n",
ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
pci_free_consistent(ioc->pcidev, sz, ioc->cached_fw, ioc->cached_fw_dma);
ioc->alloc_total -= sz;
ioc->cached_fw = NULL;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_do_upload - Construct and Send FWUpload request to MPT adapter port.
* @ioc: Pointer to MPT_ADAPTER structure
* @sleepFlag: Specifies whether the process can sleep
*
* Returns 0 for success, >0 for handshake failure
* <0 for fw upload failure.
*
* Remark: If bound IOC and a successful FWUpload was performed
* on the bound IOC, the second image is discarded
* and memory is free'd. Both channels must upload to prevent
* IOC from running in degraded mode.
*/
static int
mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
{
u8 reply[sizeof(FWUploadReply_t)];
FWUpload_t *prequest;
FWUploadReply_t *preply;
FWUploadTCSGE_t *ptcsge;
u32 flagsLength;
int ii, sz, reply_sz;
int cmdStatus;
int request_size;
/* If the image size is 0, we are done.
*/
if ((sz = ioc->facts.FWImageSize) == 0)
return 0;
if (mpt_alloc_fw_memory(ioc, ioc->facts.FWImageSize) != 0)
return -ENOMEM;
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Image @ %p[%p], sz=%d[%x] bytes\n",
ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
prequest = (sleepFlag == NO_SLEEP) ? kzalloc(ioc->req_sz, GFP_ATOMIC) :
kzalloc(ioc->req_sz, GFP_KERNEL);
if (!prequest) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed "
"while allocating memory \n", ioc->name));
mpt_free_fw_memory(ioc);
return -ENOMEM;
}
preply = (FWUploadReply_t *)&reply;
reply_sz = sizeof(reply);
memset(preply, 0, reply_sz);
prequest->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
prequest->Function = MPI_FUNCTION_FW_UPLOAD;
ptcsge = (FWUploadTCSGE_t *) &prequest->SGL;
ptcsge->DetailsLength = 12;
ptcsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
ptcsge->ImageSize = cpu_to_le32(sz);
ptcsge++;
flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz;
ioc->add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma);
request_size = offsetof(FWUpload_t, SGL) + sizeof(FWUploadTCSGE_t) +
ioc->SGE_size;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending FW Upload "
" (req @ %p) fw_size=%d mf_request_size=%d\n", ioc->name, prequest,
ioc->facts.FWImageSize, request_size));
DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest);
ii = mpt_handshake_req_reply_wait(ioc, request_size, (u32 *)prequest,
reply_sz, (u16 *)preply, 65 /*seconds*/, sleepFlag);
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Upload completed "
"rc=%x \n", ioc->name, ii));
cmdStatus = -EFAULT;
if (ii == 0) {
/* Handshake transfer was complete and successful.
* Check the Reply Frame.
*/
int status;
status = le16_to_cpu(preply->IOCStatus) &
MPI_IOCSTATUS_MASK;
if (status == MPI_IOCSTATUS_SUCCESS &&
ioc->facts.FWImageSize ==
le32_to_cpu(preply->ActualImageSize))
cmdStatus = 0;
}
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n",
ioc->name, cmdStatus));
if (cmdStatus) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed, "
"freeing image \n", ioc->name));
mpt_free_fw_memory(ioc);
}
kfree(prequest);
return cmdStatus;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_downloadboot - DownloadBoot code
* @ioc: Pointer to MPT_ADAPTER structure
* @pFwHeader: Pointer to firmware header info
* @sleepFlag: Specifies whether the process can sleep
*
* FwDownloadBoot requires Programmed IO access.
*
* Returns 0 for success
* -1 FW Image size is 0
* -2 No valid cached_fw Pointer
* <0 for fw upload failure.
*/
static int
mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag)
{
MpiExtImageHeader_t *pExtImage;
u32 fwSize;
u32 diag0val;
int count;
u32 *ptrFw;
u32 diagRwData;
u32 nextImage;
u32 load_addr;
u32 ioc_state=0;
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot: fw size 0x%x (%d), FW Ptr %p\n",
ioc->name, pFwHeader->ImageSize, pFwHeader->ImageSize, pFwHeader));
CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM));
/* wait 1 msec */
if (sleepFlag == CAN_SLEEP) {
msleep(1);
} else {
mdelay (1);
}
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER);
for (count = 0; count < 30; count ++) {
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RESET_ADAPTER cleared, count=%d\n",
ioc->name, count));
break;
}
/* wait .1 sec */
if (sleepFlag == CAN_SLEEP) {
msleep (100);
} else {
mdelay (100);
}
}
if ( count == 30 ) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot failed! "
"Unable to get MPI_DIAG_DRWE mode, diag0val=%x\n",
ioc->name, diag0val));
return -3;
}
CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
/* Set the DiagRwEn and Disable ARM bits */
CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM));
fwSize = (pFwHeader->ImageSize + 3)/4;
ptrFw = (u32 *) pFwHeader;
/* Write the LoadStartAddress to the DiagRw Address Register
* using Programmed IO
*/
if (ioc->errata_flag_1064)
pci_enable_io_access(ioc->pcidev);
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, pFwHeader->LoadStartAddress);
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "LoadStart addr written 0x%x \n",
ioc->name, pFwHeader->LoadStartAddress));
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write FW Image: 0x%x bytes @ %p\n",
ioc->name, fwSize*4, ptrFw));
while (fwSize--) {
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptrFw++);
}
nextImage = pFwHeader->NextImageHeaderOffset;
while (nextImage) {
pExtImage = (MpiExtImageHeader_t *) ((char *)pFwHeader + nextImage);
load_addr = pExtImage->LoadStartAddress;
fwSize = (pExtImage->ImageSize + 3) >> 2;
ptrFw = (u32 *)pExtImage;
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write Ext Image: 0x%x (%d) bytes @ %p load_addr=%x\n",
ioc->name, fwSize*4, fwSize*4, ptrFw, load_addr));
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, load_addr);
while (fwSize--) {
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptrFw++);
}
nextImage = pExtImage->NextImageHeaderOffset;
}
/* Write the IopResetVectorRegAddr */
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write IopResetVector Addr=%x! \n", ioc->name, pFwHeader->IopResetRegAddr));
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, pFwHeader->IopResetRegAddr);
/* Write the IopResetVectorValue */
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write IopResetVector Value=%x! \n", ioc->name, pFwHeader->IopResetVectorValue));
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, pFwHeader->IopResetVectorValue);
/* Clear the internal flash bad bit - autoincrementing register,
* so must do two writes.
*/
if (ioc->bus_type == SPI) {
/*
* 1030 and 1035 H/W errata, workaround to access
* the ClearFlashBadSignatureBit
*/
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData);
diagRwData |= 0x40000000;
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData);
} else /* if((ioc->bus_type == SAS) || (ioc->bus_type == FC)) */ {
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val |
MPI_DIAG_CLEAR_FLASH_BAD_SIG);
/* wait 1 msec */
if (sleepFlag == CAN_SLEEP) {
msleep (1);
} else {
mdelay (1);
}
}
if (ioc->errata_flag_1064)
pci_disable_io_access(ioc->pcidev);
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot diag0val=%x, "
"turning off PREVENT_IOC_BOOT, DISABLE_ARM, RW_ENABLE\n",
ioc->name, diag0val));
diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM | MPI_DIAG_RW_ENABLE);
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot now diag0val=%x\n",
ioc->name, diag0val));
CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val);
/* Write 0xFF to reset the sequencer */
CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
if (ioc->bus_type == SAS) {
ioc_state = mpt_GetIocState(ioc, 0);
if ( (GetIocFacts(ioc, sleepFlag,
MPT_HOSTEVENT_IOC_BRINGUP)) != 0 ) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "GetIocFacts failed: IocState=%x\n",
ioc->name, ioc_state));
return -EFAULT;
}
}
for (count=0; count<HZ*20; count++) {
if ((ioc_state = mpt_GetIocState(ioc, 0)) & MPI_IOC_STATE_READY) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"downloadboot successful! (count=%d) IocState=%x\n",
ioc->name, count, ioc_state));
if (ioc->bus_type == SAS) {
return 0;
}
if ((SendIocInit(ioc, sleepFlag)) != 0) {
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"downloadboot: SendIocInit failed\n",
ioc->name));
return -EFAULT;
}
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"downloadboot: SendIocInit successful\n",
ioc->name));
return 0;
}
if (sleepFlag == CAN_SLEEP) {
msleep (10);
} else {
mdelay (10);
}
}
ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"downloadboot failed! IocState=%x\n",ioc->name, ioc_state));
return -EFAULT;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* KickStart - Perform hard reset of MPT adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @force: Force hard reset
* @sleepFlag: Specifies whether the process can sleep
*
* This routine places MPT adapter in diagnostic mode via the
* WriteSequence register, and then performs a hard reset of adapter
* via the Diagnostic register.
*
* Inputs: sleepflag - CAN_SLEEP (non-interrupt thread)
* or NO_SLEEP (interrupt thread, use mdelay)
* force - 1 if doorbell active, board fault state
* board operational, IOC_RECOVERY or
* IOC_BRINGUP and there is an alt_ioc.
* 0 else
*
* Returns:
* 1 - hard reset, READY
* 0 - no reset due to History bit, READY
* -1 - no reset due to History bit but not READY
* OR reset but failed to come READY
* -2 - no reset, could not enter DIAG mode
* -3 - reset but bad FW bit
*/
static int
KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
{
int hard_reset_done = 0;
u32 ioc_state=0;
int cnt,cntdn;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "KickStarting!\n", ioc->name));
if (ioc->bus_type == SPI) {
/* Always issue a Msg Unit Reset first. This will clear some
* SCSI bus hang conditions.
*/
SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag);
if (sleepFlag == CAN_SLEEP) {
msleep (1000);
} else {
mdelay (1000);
}
}
hard_reset_done = mpt_diag_reset(ioc, force, sleepFlag);
if (hard_reset_done < 0)
return hard_reset_done;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Diagnostic reset successful!\n",
ioc->name));
cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 2; /* 2 seconds */
for (cnt=0; cnt<cntdn; cnt++) {
ioc_state = mpt_GetIocState(ioc, 1);
if ((ioc_state == MPI_IOC_STATE_READY) || (ioc_state == MPI_IOC_STATE_OPERATIONAL)) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "KickStart successful! (cnt=%d)\n",
ioc->name, cnt));
return hard_reset_done;
}
if (sleepFlag == CAN_SLEEP) {
msleep (10);
} else {
mdelay (10);
}
}
dinitprintk(ioc, printk(MYIOC_s_ERR_FMT "Failed to come READY after reset! IocState=%x\n",
ioc->name, mpt_GetIocState(ioc, 0)));
return -1;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_diag_reset - Perform hard reset of the adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @ignore: Set if to honor and clear to ignore
* the reset history bit
* @sleepFlag: CAN_SLEEP if called in a non-interrupt thread,
* else set to NO_SLEEP (use mdelay instead)
*
* This routine places the adapter in diagnostic mode via the
* WriteSequence register and then performs a hard reset of adapter
* via the Diagnostic register. Adapter should be in ready state
* upon successful completion.
*
* Returns: 1 hard reset successful
* 0 no reset performed because reset history bit set
* -2 enabling diagnostic mode failed
* -3 diagnostic reset failed
*/
static int
mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
{
u32 diag0val;
u32 doorbell;
int hard_reset_done = 0;
int count = 0;
u32 diag1val = 0;
MpiFwHeader_t *cached_fw; /* Pointer to FW */
u8 cb_idx;
/* Clear any existing interrupts */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
if (!ignore)
return 0;
drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset "
"address=%p\n", ioc->name, __func__,
&ioc->chip->Doorbell, &ioc->chip->Reset_1078));
CHIPREG_WRITE32(&ioc->chip->Reset_1078, 0x07);
if (sleepFlag == CAN_SLEEP)
msleep(1);
else
mdelay(1);
/*
* Call each currently registered protocol IOC reset handler
* with pre-reset indication.
* NOTE: If we're doing _IOC_BRINGUP, there can be no
* MptResetHandlers[] registered yet.
*/
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx])
(*(MptResetHandlers[cb_idx]))(ioc,
MPT_IOC_PRE_RESET);
}
for (count = 0; count < 60; count ++) {
doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
doorbell &= MPI_IOC_STATE_MASK;
drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"looking for READY STATE: doorbell=%x"
" count=%d\n",
ioc->name, doorbell, count));
if (doorbell == MPI_IOC_STATE_READY) {
return 1;
}
/* wait 1 sec */
if (sleepFlag == CAN_SLEEP)
msleep(1000);
else
mdelay(1000);
}
return -1;
}
/* Use "Diagnostic reset" method! (only thing available!) */
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
if (ioc->debug_level & MPT_DEBUG) {
if (ioc->alt_ioc)
diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG1: diag0=%08x, diag1=%08x\n",
ioc->name, diag0val, diag1val));
}
/* Do the reset if we are told to ignore the reset history
* or if the reset history is 0
*/
if (ignore || !(diag0val & MPI_DIAG_RESET_HISTORY)) {
while ((diag0val & MPI_DIAG_DRWE) == 0) {
/* Write magic sequence to WriteSequence register
* Loop until in diagnostic mode
*/
CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
/* wait 100 msec */
if (sleepFlag == CAN_SLEEP) {
msleep (100);
} else {
mdelay (100);
}
count++;
if (count > 20) {
printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n",
ioc->name, diag0val);
return -2;
}
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Wrote magic DiagWriteEn sequence (%x)\n",
ioc->name, diag0val));
}
if (ioc->debug_level & MPT_DEBUG) {
if (ioc->alt_ioc)
diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG2: diag0=%08x, diag1=%08x\n",
ioc->name, diag0val, diag1val));
}
/*
* Disable the ARM (Bug fix)
*
*/
CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_DISABLE_ARM);
mdelay(1);
/*
* Now hit the reset bit in the Diagnostic register
* (THE BIG HAMMER!) (Clears DRWE bit).
*/
CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER);
hard_reset_done = 1;
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Diagnostic reset performed\n",
ioc->name));
/*
* Call each currently registered protocol IOC reset handler
* with pre-reset indication.
* NOTE: If we're doing _IOC_BRINGUP, there can be no
* MptResetHandlers[] registered yet.
*/
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx]) {
mpt_signal_reset(cb_idx,
ioc, MPT_IOC_PRE_RESET);
if (ioc->alt_ioc) {
mpt_signal_reset(cb_idx,
ioc->alt_ioc, MPT_IOC_PRE_RESET);
}
}
}
if (ioc->cached_fw)
cached_fw = (MpiFwHeader_t *)ioc->cached_fw;
else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw)
cached_fw = (MpiFwHeader_t *)ioc->alt_ioc->cached_fw;
else
cached_fw = NULL;
if (cached_fw) {
/* If the DownloadBoot operation fails, the
* IOC will be left unusable. This is a fatal error
* case. _diag_reset will return < 0
*/
for (count = 0; count < 30; count ++) {
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
break;
}
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "cached_fw: diag0val=%x count=%d\n",
ioc->name, diag0val, count));
/* wait 1 sec */
if (sleepFlag == CAN_SLEEP) {
msleep (1000);
} else {
mdelay (1000);
}
}
if ((count = mpt_downloadboot(ioc, cached_fw, sleepFlag)) < 0) {
printk(MYIOC_s_WARN_FMT
"firmware downloadboot failure (%d)!\n", ioc->name, count);
}
} else {
/* Wait for FW to reload and for board
* to go to the READY state.
* Maximum wait is 60 seconds.
* If fail, no error will check again
* with calling program.
*/
for (count = 0; count < 60; count ++) {
doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
doorbell &= MPI_IOC_STATE_MASK;
drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"looking for READY STATE: doorbell=%x"
" count=%d\n", ioc->name, doorbell, count));
if (doorbell == MPI_IOC_STATE_READY) {
break;
}
/* wait 1 sec */
if (sleepFlag == CAN_SLEEP) {
msleep (1000);
} else {
mdelay (1000);
}
}
if (doorbell != MPI_IOC_STATE_READY)
printk(MYIOC_s_ERR_FMT "Failed to come READY "
"after reset! IocState=%x", ioc->name,
doorbell);
}
}
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
if (ioc->debug_level & MPT_DEBUG) {
if (ioc->alt_ioc)
diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG3: diag0=%08x, diag1=%08x\n",
ioc->name, diag0val, diag1val));
}
/* Clear RESET_HISTORY bit! Place board in the
* diagnostic mode to update the diag register.
*/
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
count = 0;
while ((diag0val & MPI_DIAG_DRWE) == 0) {
/* Write magic sequence to WriteSequence register
* Loop until in diagnostic mode
*/
CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
/* wait 100 msec */
if (sleepFlag == CAN_SLEEP) {
msleep (100);
} else {
mdelay (100);
}
count++;
if (count > 20) {
printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n",
ioc->name, diag0val);
break;
}
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
}
diag0val &= ~MPI_DIAG_RESET_HISTORY;
CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val);
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
if (diag0val & MPI_DIAG_RESET_HISTORY) {
printk(MYIOC_s_WARN_FMT "ResetHistory bit failed to clear!\n",
ioc->name);
}
/* Disable Diagnostic Mode
*/
CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFFFFFFFF);
/* Check FW reload status flags.
*/
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
if (diag0val & (MPI_DIAG_FLASH_BAD_SIG | MPI_DIAG_RESET_ADAPTER | MPI_DIAG_DISABLE_ARM)) {
printk(MYIOC_s_ERR_FMT "Diagnostic reset FAILED! (%02xh)\n",
ioc->name, diag0val);
return -3;
}
if (ioc->debug_level & MPT_DEBUG) {
if (ioc->alt_ioc)
diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG4: diag0=%08x, diag1=%08x\n",
ioc->name, diag0val, diag1val));
}
/*
* Reset flag that says we've enabled event notification
*/
ioc->facts.EventState = 0;
if (ioc->alt_ioc)
ioc->alt_ioc->facts.EventState = 0;
return hard_reset_done;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* SendIocReset - Send IOCReset request to MPT adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @reset_type: reset type, expected values are
* %MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET or %MPI_FUNCTION_IO_UNIT_RESET
* @sleepFlag: Specifies whether the process can sleep
*
* Send IOCReset request to the MPT adapter.
*
* Returns 0 for success, non-zero for failure.
*/
static int
SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag)
{
int r;
u32 state;
int cntdn, count;
drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending IOC reset(0x%02x)!\n",
ioc->name, reset_type));
CHIPREG_WRITE32(&ioc->chip->Doorbell, reset_type<<MPI_DOORBELL_FUNCTION_SHIFT);
if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
return r;
/* FW ACK'd request, wait for READY state
*/
count = 0;
cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 15; /* 15 seconds */
while ((state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
cntdn--;
count++;
if (!cntdn) {
if (sleepFlag != CAN_SLEEP)
count *= 10;
printk(MYIOC_s_ERR_FMT
"Wait IOC_READY state (0x%x) timeout(%d)!\n",
ioc->name, state, (int)((count+5)/HZ));
return -ETIME;
}
if (sleepFlag == CAN_SLEEP) {
msleep(1);
} else {
mdelay (1); /* 1 msec delay */
}
}
/* TODO!
* Cleanup all event stuff for this IOC; re-issue EventNotification
* request if needed.
*/
if (ioc->facts.Function)
ioc->facts.EventState = 0;
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* initChainBuffers - Allocate memory for and initialize chain buffers
* @ioc: Pointer to MPT_ADAPTER structure
*
* Allocates memory for and initializes chain buffers,
* chain buffer control arrays and spinlock.
*/
static int
initChainBuffers(MPT_ADAPTER *ioc)
{
u8 *mem;
int sz, ii, num_chain;
int scale, num_sge, numSGE;
/* ReqToChain size must equal the req_depth
* index = req_idx
*/
if (ioc->ReqToChain == NULL) {
sz = ioc->req_depth * sizeof(int);
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL)
return -1;
ioc->ReqToChain = (int *) mem;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReqToChain alloc @ %p, sz=%d bytes\n",
ioc->name, mem, sz));
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL)
return -1;
ioc->RequestNB = (int *) mem;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestNB alloc @ %p, sz=%d bytes\n",
ioc->name, mem, sz));
}
for (ii = 0; ii < ioc->req_depth; ii++) {
ioc->ReqToChain[ii] = MPT_HOST_NO_CHAIN;
}
/* ChainToChain size must equal the total number
* of chain buffers to be allocated.
* index = chain_idx
*
* Calculate the number of chain buffers needed(plus 1) per I/O
* then multiply the maximum number of simultaneous cmds
*
* num_sge = num sge in request frame + last chain buffer
* scale = num sge per chain buffer if no chain element
*/
scale = ioc->req_sz / ioc->SGE_size;
if (ioc->sg_addr_size == sizeof(u64))
num_sge = scale + (ioc->req_sz - 60) / ioc->SGE_size;
else
num_sge = 1 + scale + (ioc->req_sz - 64) / ioc->SGE_size;
if (ioc->sg_addr_size == sizeof(u64)) {
numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale +
(ioc->req_sz - 60) / ioc->SGE_size;
} else {
numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) +
scale + (ioc->req_sz - 64) / ioc->SGE_size;
}
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n",
ioc->name, num_sge, numSGE));
if (ioc->bus_type == FC) {
if (numSGE > MPT_SCSI_FC_SG_DEPTH)
numSGE = MPT_SCSI_FC_SG_DEPTH;
} else {
if (numSGE > MPT_SCSI_SG_DEPTH)
numSGE = MPT_SCSI_SG_DEPTH;
}
num_chain = 1;
while (numSGE - num_sge > 0) {
num_chain++;
num_sge += (scale - 1);
}
num_chain++;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Now numSGE=%d num_sge=%d num_chain=%d\n",
ioc->name, numSGE, num_sge, num_chain));
if (ioc->bus_type == SPI)
num_chain *= MPT_SCSI_CAN_QUEUE;
else if (ioc->bus_type == SAS)
num_chain *= MPT_SAS_CAN_QUEUE;
else
num_chain *= MPT_FC_CAN_QUEUE;
ioc->num_chain = num_chain;
sz = num_chain * sizeof(int);
if (ioc->ChainToChain == NULL) {
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL)
return -1;
ioc->ChainToChain = (int *) mem;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainToChain alloc @ %p, sz=%d bytes\n",
ioc->name, mem, sz));
} else {
mem = (u8 *) ioc->ChainToChain;
}
memset(mem, 0xFF, sz);
return num_chain;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* PrimeIocFifos - Initialize IOC request and reply FIFOs.
* @ioc: Pointer to MPT_ADAPTER structure
*
* This routine allocates memory for the MPT reply and request frame
* pools (if necessary), and primes the IOC reply FIFO with
* reply frames.
*
* Returns 0 for success, non-zero for failure.
*/
static int
PrimeIocFifos(MPT_ADAPTER *ioc)
{
MPT_FRAME_HDR *mf;
unsigned long flags;
dma_addr_t alloc_dma;
u8 *mem;
int i, reply_sz, sz, total_size, num_chain;
u64 dma_mask;
dma_mask = 0;
/* Prime reply FIFO... */
if (ioc->reply_frames == NULL) {
if ( (num_chain = initChainBuffers(ioc)) < 0)
return -1;
/*
* 1078 errata workaround for the 36GB limitation
*/
if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 &&
ioc->dma_mask > DMA_BIT_MASK(35)) {
if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32))
&& !pci_set_consistent_dma_mask(ioc->pcidev,
DMA_BIT_MASK(32))) {
dma_mask = DMA_BIT_MASK(35);
d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"setting 35 bit addressing for "
"Request/Reply/Chain and Sense Buffers\n",
ioc->name));
} else {
/*Reseting DMA mask to 64 bit*/
pci_set_dma_mask(ioc->pcidev,
DMA_BIT_MASK(64));
pci_set_consistent_dma_mask(ioc->pcidev,
DMA_BIT_MASK(64));
printk(MYIOC_s_ERR_FMT
"failed setting 35 bit addressing for "
"Request/Reply/Chain and Sense Buffers\n",
ioc->name);
return -1;
}
}
total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth);
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n",
ioc->name, ioc->reply_sz, ioc->reply_depth));
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d[%x] bytes\n",
ioc->name, reply_sz, reply_sz));
sz = (ioc->req_sz * ioc->req_depth);
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffer sz=%d bytes, RequestDepth=%d\n",
ioc->name, ioc->req_sz, ioc->req_depth));
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffer sz=%d[%x] bytes\n",
ioc->name, sz, sz));
total_size += sz;
sz = num_chain * ioc->req_sz; /* chain buffer pool size */
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffer sz=%d bytes, ChainDepth=%d\n",
ioc->name, ioc->req_sz, num_chain));
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffer sz=%d[%x] bytes num_chain=%d\n",
ioc->name, sz, sz, num_chain));
total_size += sz;
mem = pci_alloc_consistent(ioc->pcidev, total_size, &alloc_dma);
if (mem == NULL) {
printk(MYIOC_s_ERR_FMT "Unable to allocate Reply, Request, Chain Buffers!\n",
ioc->name);
goto out_fail;
}
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Total alloc @ %p[%p], sz=%d[%x] bytes\n",
ioc->name, mem, (void *)(ulong)alloc_dma, total_size, total_size));
memset(mem, 0, total_size);
ioc->alloc_total += total_size;
ioc->alloc = mem;
ioc->alloc_dma = alloc_dma;
ioc->alloc_sz = total_size;
ioc->reply_frames = (MPT_FRAME_HDR *) mem;
ioc->reply_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffers @ %p[%p]\n",
ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma));
alloc_dma += reply_sz;
mem += reply_sz;
/* Request FIFO - WE manage this! */
ioc->req_frames = (MPT_FRAME_HDR *) mem;
ioc->req_frames_dma = alloc_dma;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffers @ %p[%p]\n",
ioc->name, mem, (void *)(ulong)alloc_dma));
ioc->req_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
#if defined(CONFIG_MTRR) && 0
/*
* Enable Write Combining MTRR for IOC's memory region.
* (at least as much as we can; "size and base must be
* multiples of 4 kiB"
*/
ioc->mtrr_reg = mtrr_add(ioc->req_frames_dma,
sz,
MTRR_TYPE_WRCOMB, 1);
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MTRR region registered (base:size=%08x:%x)\n",
ioc->name, ioc->req_frames_dma, sz));
#endif
for (i = 0; i < ioc->req_depth; i++) {
alloc_dma += ioc->req_sz;
mem += ioc->req_sz;
}
ioc->ChainBuffer = mem;
ioc->ChainBufferDMA = alloc_dma;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffers @ %p(%p)\n",
ioc->name, ioc->ChainBuffer, (void *)(ulong)ioc->ChainBufferDMA));
/* Initialize the free chain Q.
*/
INIT_LIST_HEAD(&ioc->FreeChainQ);
/* Post the chain buffers to the FreeChainQ.
*/
mem = (u8 *)ioc->ChainBuffer;
for (i=0; i < num_chain; i++) {
mf = (MPT_FRAME_HDR *) mem;
list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeChainQ);
mem += ioc->req_sz;
}
/* Initialize Request frames linked list
*/
alloc_dma = ioc->req_frames_dma;
mem = (u8 *) ioc->req_frames;
spin_lock_irqsave(&ioc->FreeQlock, flags);
INIT_LIST_HEAD(&ioc->FreeQ);
for (i = 0; i < ioc->req_depth; i++) {
mf = (MPT_FRAME_HDR *) mem;
/* Queue REQUESTs *internally*! */
list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
mem += ioc->req_sz;
}
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
ioc->sense_buf_pool =
pci_alloc_consistent(ioc->pcidev, sz, &ioc->sense_buf_pool_dma);
if (ioc->sense_buf_pool == NULL) {
printk(MYIOC_s_ERR_FMT "Unable to allocate Sense Buffers!\n",
ioc->name);
goto out_fail;
}
ioc->sense_buf_low_dma = (u32) (ioc->sense_buf_pool_dma & 0xFFFFFFFF);
ioc->alloc_total += sz;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SenseBuffers @ %p[%p]\n",
ioc->name, ioc->sense_buf_pool, (void *)(ulong)ioc->sense_buf_pool_dma));
}
/* Post Reply frames to FIFO
*/
alloc_dma = ioc->alloc_dma;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffers @ %p[%p]\n",
ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma));
for (i = 0; i < ioc->reply_depth; i++) {
/* Write each address to the IOC! */
CHIPREG_WRITE32(&ioc->chip->ReplyFifo, alloc_dma);
alloc_dma += ioc->reply_sz;
}
if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev,
ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev,
ioc->dma_mask))
d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"restoring 64 bit addressing\n", ioc->name));
return 0;
out_fail:
if (ioc->alloc != NULL) {
sz = ioc->alloc_sz;
pci_free_consistent(ioc->pcidev,
sz,
ioc->alloc, ioc->alloc_dma);
ioc->reply_frames = NULL;
ioc->req_frames = NULL;
ioc->alloc_total -= sz;
}
if (ioc->sense_buf_pool != NULL) {
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
pci_free_consistent(ioc->pcidev,
sz,
ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
ioc->sense_buf_pool = NULL;
}
if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev,
DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev,
DMA_BIT_MASK(64)))
d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"restoring 64 bit addressing\n", ioc->name));
return -1;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_handshake_req_reply_wait - Send MPT request to and receive reply
* from IOC via doorbell handshake method.
* @ioc: Pointer to MPT_ADAPTER structure
* @reqBytes: Size of the request in bytes
* @req: Pointer to MPT request frame
* @replyBytes: Expected size of the reply in bytes
* @u16reply: Pointer to area where reply should be written
* @maxwait: Max wait time for a reply (in seconds)
* @sleepFlag: Specifies whether the process can sleep
*
* NOTES: It is the callers responsibility to byte-swap fields in the
* request which are greater than 1 byte in size. It is also the
* callers responsibility to byte-swap response fields which are
* greater than 1 byte in size.
*
* Returns 0 for success, non-zero for failure.
*/
static int
mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req,
int replyBytes, u16 *u16reply, int maxwait, int sleepFlag)
{
MPIDefaultReply_t *mptReply;
int failcnt = 0;
int t;
/*
* Get ready to cache a handshake reply
*/
ioc->hs_reply_idx = 0;
mptReply = (MPIDefaultReply_t *) ioc->hs_reply;
mptReply->MsgLength = 0;
/*
* Make sure there are no doorbells (WRITE 0 to IntStatus reg),
* then tell IOC that we want to handshake a request of N words.
* (WRITE u32val to Doorbell reg).
*/
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
CHIPREG_WRITE32(&ioc->chip->Doorbell,
((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) |
((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT)));
/*
* Wait for IOC's doorbell handshake int
*/
if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
failcnt++;
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HandShake request start reqBytes=%d, WaitCnt=%d%s\n",
ioc->name, reqBytes, t, failcnt ? " - MISSING DOORBELL HANDSHAKE!" : ""));
/* Read doorbell and check for active bit */
if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE))
return -1;
/*
* Clear doorbell int (WRITE 0 to IntStatus reg),
* then wait for IOC to ACKnowledge that it's ready for
* our handshake request.
*/
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
if (!failcnt && (t = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
failcnt++;
if (!failcnt) {
int ii;
u8 *req_as_bytes = (u8 *) req;
/*
* Stuff request words via doorbell handshake,
* with ACK from IOC for each.
*/
for (ii = 0; !failcnt && ii < reqBytes/4; ii++) {
u32 word = ((req_as_bytes[(ii*4) + 0] << 0) |
(req_as_bytes[(ii*4) + 1] << 8) |
(req_as_bytes[(ii*4) + 2] << 16) |
(req_as_bytes[(ii*4) + 3] << 24));
CHIPREG_WRITE32(&ioc->chip->Doorbell, word);
if ((t = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
failcnt++;
}
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Handshake request frame (@%p) header\n", ioc->name, req));
DBG_DUMP_REQUEST_FRAME_HDR(ioc, (u32 *)req);
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HandShake request post done, WaitCnt=%d%s\n",
ioc->name, t, failcnt ? " - MISSING DOORBELL ACK!" : ""));
/*
* Wait for completion of doorbell handshake reply from the IOC
*/
if (!failcnt && (t = WaitForDoorbellReply(ioc, maxwait, sleepFlag)) < 0)
failcnt++;
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HandShake reply count=%d%s\n",
ioc->name, t, failcnt ? " - MISSING DOORBELL REPLY!" : ""));
/*
* Copy out the cached reply...
*/
for (ii=0; ii < min(replyBytes/2,mptReply->MsgLength*2); ii++)
u16reply[ii] = ioc->hs_reply[ii];
} else {
return -99;
}
return -failcnt;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* WaitForDoorbellAck - Wait for IOC doorbell handshake acknowledge
* @ioc: Pointer to MPT_ADAPTER structure
* @howlong: How long to wait (in seconds)
* @sleepFlag: Specifies whether the process can sleep
*
* This routine waits (up to ~2 seconds max) for IOC doorbell
* handshake ACKnowledge, indicated by the IOP_DOORBELL_STATUS
* bit in its IntStatus register being clear.
*
* Returns a negative value on failure, else wait loop count.
*/
static int
WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
{
int cntdn;
int count = 0;
u32 intstat=0;
cntdn = 1000 * howlong;
if (sleepFlag == CAN_SLEEP) {
while (--cntdn) {
msleep (1);
intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
break;
count++;
}
} else {
while (--cntdn) {
udelay (1000);
intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
break;
count++;
}
}
if (cntdn) {
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitForDoorbell ACK (count=%d)\n",
ioc->name, count));
return count;
}
printk(MYIOC_s_ERR_FMT "Doorbell ACK timeout (count=%d), IntStatus=%x!\n",
ioc->name, count, intstat);
return -1;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* WaitForDoorbellInt - Wait for IOC to set its doorbell interrupt bit
* @ioc: Pointer to MPT_ADAPTER structure
* @howlong: How long to wait (in seconds)
* @sleepFlag: Specifies whether the process can sleep
*
* This routine waits (up to ~2 seconds max) for IOC doorbell interrupt
* (MPI_HIS_DOORBELL_INTERRUPT) to be set in the IntStatus register.
*
* Returns a negative value on failure, else wait loop count.
*/
static int
WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
{
int cntdn;
int count = 0;
u32 intstat=0;
cntdn = 1000 * howlong;
if (sleepFlag == CAN_SLEEP) {
while (--cntdn) {
intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
if (intstat & MPI_HIS_DOORBELL_INTERRUPT)
break;
msleep(1);
count++;
}
} else {
while (--cntdn) {
intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
if (intstat & MPI_HIS_DOORBELL_INTERRUPT)
break;
udelay (1000);
count++;
}
}
if (cntdn) {
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitForDoorbell INT (cnt=%d) howlong=%d\n",
ioc->name, count, howlong));
return count;
}
printk(MYIOC_s_ERR_FMT "Doorbell INT timeout (count=%d), IntStatus=%x!\n",
ioc->name, count, intstat);
return -1;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* WaitForDoorbellReply - Wait for and capture an IOC handshake reply.
* @ioc: Pointer to MPT_ADAPTER structure
* @howlong: How long to wait (in seconds)
* @sleepFlag: Specifies whether the process can sleep
*
* This routine polls the IOC for a handshake reply, 16 bits at a time.
* Reply is cached to IOC private area large enough to hold a maximum
* of 128 bytes of reply data.
*
* Returns a negative value on failure, else size of reply in WORDS.
*/
static int
WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
{
int u16cnt = 0;
int failcnt = 0;
int t;
u16 *hs_reply = ioc->hs_reply;
volatile MPIDefaultReply_t *mptReply = (MPIDefaultReply_t *) ioc->hs_reply;
u16 hword;
hs_reply[0] = hs_reply[1] = hs_reply[7] = 0;
/*
* Get first two u16's so we can look at IOC's intended reply MsgLength
*/
u16cnt=0;
if ((t = WaitForDoorbellInt(ioc, howlong, sleepFlag)) < 0) {
failcnt++;
} else {
hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
failcnt++;
else {
hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
}
}
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitCnt=%d First handshake reply word=%08x%s\n",
ioc->name, t, le32_to_cpu(*(u32 *)hs_reply),
failcnt ? " - MISSING DOORBELL HANDSHAKE!" : ""));
/*
* If no error (and IOC said MsgLength is > 0), piece together
* reply 16 bits at a time.
*/
for (u16cnt=2; !failcnt && u16cnt < (2 * mptReply->MsgLength); u16cnt++) {
if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
failcnt++;
hword = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
/* don't overflow our IOC hs_reply[] buffer! */
if (u16cnt < ARRAY_SIZE(ioc->hs_reply))
hs_reply[u16cnt] = hword;
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
}
if (!failcnt && (t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
failcnt++;
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
if (failcnt) {
printk(MYIOC_s_ERR_FMT "Handshake reply failure!\n",
ioc->name);
return -failcnt;
}
#if 0
else if (u16cnt != (2 * mptReply->MsgLength)) {
return -101;
}
else if ((mptReply->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
return -102;
}
#endif
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got Handshake reply:\n", ioc->name));
DBG_DUMP_REPLY_FRAME(ioc, (u32 *)mptReply);
dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitForDoorbell REPLY WaitCnt=%d (sz=%d)\n",
ioc->name, t, u16cnt/2));
return u16cnt/2;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* GetLanConfigPages - Fetch LANConfig pages.
* @ioc: Pointer to MPT_ADAPTER structure
*
* Return: 0 for success
* -ENOMEM if no memory available
* -EPERM if not allowed due to ISR context
* -EAGAIN if no msg frames currently available
* -EFAULT for non-successful reply or no reply (timeout)
*/
static int
GetLanConfigPages(MPT_ADAPTER *ioc)
{
ConfigPageHeader_t hdr;
CONFIGPARMS cfg;
LANPage0_t *ppage0_alloc;
dma_addr_t page0_dma;
LANPage1_t *ppage1_alloc;
dma_addr_t page1_dma;
int rc = 0;
int data_sz;
int copy_sz;
/* Get LAN Page 0 header */
hdr.PageVersion = 0;
hdr.PageLength = 0;
hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_LAN;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.pageAddr = 0;
cfg.timeout = 0;
if ((rc = mpt_config(ioc, &cfg)) != 0)
return rc;
if (hdr.PageLength > 0) {
data_sz = hdr.PageLength * 4;
ppage0_alloc = (LANPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
rc = -ENOMEM;
if (ppage0_alloc) {
memset((u8 *)ppage0_alloc, 0, data_sz);
cfg.physAddr = page0_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if ((rc = mpt_config(ioc, &cfg)) == 0) {
/* save the data */
copy_sz = min_t(int, sizeof(LANPage0_t), data_sz);
memcpy(&ioc->lan_cnfg_page0, ppage0_alloc, copy_sz);
}
pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma);
/* FIXME!
* Normalize endianness of structure data,
* by byte-swapping all > 1 byte fields!
*/
}
if (rc)
return rc;
}
/* Get LAN Page 1 header */
hdr.PageVersion = 0;
hdr.PageLength = 0;
hdr.PageNumber = 1;
hdr.PageType = MPI_CONFIG_PAGETYPE_LAN;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.pageAddr = 0;
if ((rc = mpt_config(ioc, &cfg)) != 0)
return rc;
if (hdr.PageLength == 0)
return 0;
data_sz = hdr.PageLength * 4;
rc = -ENOMEM;
ppage1_alloc = (LANPage1_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma);
if (ppage1_alloc) {
memset((u8 *)ppage1_alloc, 0, data_sz);
cfg.physAddr = page1_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if ((rc = mpt_config(ioc, &cfg)) == 0) {
/* save the data */
copy_sz = min_t(int, sizeof(LANPage1_t), data_sz);
memcpy(&ioc->lan_cnfg_page1, ppage1_alloc, copy_sz);
}
pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage1_alloc, page1_dma);
/* FIXME!
* Normalize endianness of structure data,
* by byte-swapping all > 1 byte fields!
*/
}
return rc;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mptbase_sas_persist_operation - Perform operation on SAS Persistent Table
* @ioc: Pointer to MPT_ADAPTER structure
* @persist_opcode: see below
*
* MPI_SAS_OP_CLEAR_NOT_PRESENT - Free all persist TargetID mappings for
* devices not currently present.
* MPI_SAS_OP_CLEAR_ALL_PERSISTENT - Clear al persist TargetID mappings
*
* NOTE: Don't use not this function during interrupt time.
*
* Returns 0 for success, non-zero error
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
int
mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
{
SasIoUnitControlRequest_t *sasIoUnitCntrReq;
SasIoUnitControlReply_t *sasIoUnitCntrReply;
MPT_FRAME_HDR *mf = NULL;
MPIHeader_t *mpi_hdr;
int ret = 0;
unsigned long timeleft;
mutex_lock(&ioc->mptbase_cmds.mutex);
/* init the internal cmd struct */
memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
/* insure garbage is not sent to fw */
switch(persist_opcode) {
case MPI_SAS_OP_CLEAR_NOT_PRESENT:
case MPI_SAS_OP_CLEAR_ALL_PERSISTENT:
break;
default:
ret = -1;
goto out;
}
printk(KERN_DEBUG "%s: persist_opcode=%x\n",
__func__, persist_opcode);
/* Get a MF for this command.
*/
if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
printk(KERN_DEBUG "%s: no msg frames!\n", __func__);
ret = -1;
goto out;
}
mpi_hdr = (MPIHeader_t *) mf;
sasIoUnitCntrReq = (SasIoUnitControlRequest_t *)mf;
memset(sasIoUnitCntrReq,0,sizeof(SasIoUnitControlRequest_t));
sasIoUnitCntrReq->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL;
sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext;
sasIoUnitCntrReq->Operation = persist_opcode;
mpt_put_msg_frame(mpt_base_index, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done, 10*HZ);
if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
ret = -ETIME;
printk(KERN_DEBUG "%s: failed\n", __func__);
if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out;
if (!timeleft) {
printk(MYIOC_s_WARN_FMT
"Issuing Reset from %s!!, doorbell=0x%08x\n",
ioc->name, __func__, mpt_GetIocState(ioc, 0));
mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
}
goto out;
}
if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
ret = -1;
goto out;
}
sasIoUnitCntrReply =
(SasIoUnitControlReply_t *)ioc->mptbase_cmds.reply;
if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
printk(KERN_DEBUG "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
__func__, sasIoUnitCntrReply->IOCStatus,
sasIoUnitCntrReply->IOCLogInfo);
printk(KERN_DEBUG "%s: failed\n", __func__);
ret = -1;
} else
printk(KERN_DEBUG "%s: success\n", __func__);
out:
CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
mutex_unlock(&ioc->mptbase_cmds.mutex);
return ret;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static void
mptbase_raid_process_event_data(MPT_ADAPTER *ioc,
MpiEventDataRaid_t * pRaidEventData)
{
int volume;
int reason;
int disk;
int status;
int flags;
int state;
volume = pRaidEventData->VolumeID;
reason = pRaidEventData->ReasonCode;
disk = pRaidEventData->PhysDiskNum;
status = le32_to_cpu(pRaidEventData->SettingsStatus);
flags = (status >> 0) & 0xff;
state = (status >> 8) & 0xff;
if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) {
return;
}
if ((reason >= MPI_EVENT_RAID_RC_PHYSDISK_CREATED &&
reason <= MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED) ||
(reason == MPI_EVENT_RAID_RC_SMART_DATA)) {
printk(MYIOC_s_INFO_FMT "RAID STATUS CHANGE for PhysDisk %d id=%d\n",
ioc->name, disk, volume);
} else {
printk(MYIOC_s_INFO_FMT "RAID STATUS CHANGE for VolumeID %d\n",
ioc->name, volume);
}
switch(reason) {
case MPI_EVENT_RAID_RC_VOLUME_CREATED:
printk(MYIOC_s_INFO_FMT " volume has been created\n",
ioc->name);
break;
case MPI_EVENT_RAID_RC_VOLUME_DELETED:
printk(MYIOC_s_INFO_FMT " volume has been deleted\n",
ioc->name);
break;
case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
printk(MYIOC_s_INFO_FMT " volume settings have been changed\n",
ioc->name);
break;
case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
printk(MYIOC_s_INFO_FMT " volume is now %s%s%s%s\n",
ioc->name,
state == MPI_RAIDVOL0_STATUS_STATE_OPTIMAL
? "optimal"
: state == MPI_RAIDVOL0_STATUS_STATE_DEGRADED
? "degraded"
: state == MPI_RAIDVOL0_STATUS_STATE_FAILED
? "failed"
: "state unknown",
flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED
? ", enabled" : "",
flags & MPI_RAIDVOL0_STATUS_FLAG_QUIESCED
? ", quiesced" : "",
flags & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
? ", resync in progress" : "" );
break;
case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
printk(MYIOC_s_INFO_FMT " volume membership of PhysDisk %d has changed\n",
ioc->name, disk);
break;
case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
printk(MYIOC_s_INFO_FMT " PhysDisk has been created\n",
ioc->name);
break;
case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
printk(MYIOC_s_INFO_FMT " PhysDisk has been deleted\n",
ioc->name);
break;
case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
printk(MYIOC_s_INFO_FMT " PhysDisk settings have been changed\n",
ioc->name);
break;
case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
printk(MYIOC_s_INFO_FMT " PhysDisk is now %s%s%s\n",
ioc->name,
state == MPI_PHYSDISK0_STATUS_ONLINE
? "online"
: state == MPI_PHYSDISK0_STATUS_MISSING
? "missing"
: state == MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE
? "not compatible"
: state == MPI_PHYSDISK0_STATUS_FAILED
? "failed"
: state == MPI_PHYSDISK0_STATUS_INITIALIZING
? "initializing"
: state == MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED
? "offline requested"
: state == MPI_PHYSDISK0_STATUS_FAILED_REQUESTED
? "failed requested"
: state == MPI_PHYSDISK0_STATUS_OTHER_OFFLINE
? "offline"
: "state unknown",
flags & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
? ", out of sync" : "",
flags & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED
? ", quiesced" : "" );
break;
case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
printk(MYIOC_s_INFO_FMT " Domain Validation needed for PhysDisk %d\n",
ioc->name, disk);
break;
case MPI_EVENT_RAID_RC_SMART_DATA:
printk(MYIOC_s_INFO_FMT " SMART data received, ASC/ASCQ = %02xh/%02xh\n",
ioc->name, pRaidEventData->ASC, pRaidEventData->ASCQ);
break;
case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
printk(MYIOC_s_INFO_FMT " replacement of PhysDisk %d has started\n",
ioc->name, disk);
break;
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* GetIoUnitPage2 - Retrieve BIOS version and boot order information.
* @ioc: Pointer to MPT_ADAPTER structure
*
* Returns: 0 for success
* -ENOMEM if no memory available
* -EPERM if not allowed due to ISR context
* -EAGAIN if no msg frames currently available
* -EFAULT for non-successful reply or no reply (timeout)
*/
static int
GetIoUnitPage2(MPT_ADAPTER *ioc)
{
ConfigPageHeader_t hdr;
CONFIGPARMS cfg;
IOUnitPage2_t *ppage_alloc;
dma_addr_t page_dma;
int data_sz;
int rc;
/* Get the page header */
hdr.PageVersion = 0;
hdr.PageLength = 0;
hdr.PageNumber = 2;
hdr.PageType = MPI_CONFIG_PAGETYPE_IO_UNIT;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.pageAddr = 0;
cfg.timeout = 0;
if ((rc = mpt_config(ioc, &cfg)) != 0)
return rc;
if (hdr.PageLength == 0)
return 0;
/* Read the config page */
data_sz = hdr.PageLength * 4;
rc = -ENOMEM;
ppage_alloc = (IOUnitPage2_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
if (ppage_alloc) {
memset((u8 *)ppage_alloc, 0, data_sz);
cfg.physAddr = page_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
/* If Good, save data */
if ((rc = mpt_config(ioc, &cfg)) == 0)
ioc->biosVersion = le32_to_cpu(ppage_alloc->BiosVersion);
pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage_alloc, page_dma);
}
return rc;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_GetScsiPortSettings - read SCSI Port Page 0 and 2
* @ioc: Pointer to a Adapter Strucutre
* @portnum: IOC port number
*
* Return: -EFAULT if read of config page header fails
* or if no nvram
* If read of SCSI Port Page 0 fails,
* NVRAM = MPT_HOST_NVRAM_INVALID (0xFFFFFFFF)
* Adapter settings: async, narrow
* Return 1
* If read of SCSI Port Page 2 fails,
* Adapter settings valid
* NVRAM = MPT_HOST_NVRAM_INVALID (0xFFFFFFFF)
* Return 1
* Else
* Both valid
* Return 0
* CHECK - what type of locking mechanisms should be used????
*/
static int
mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
{
u8 *pbuf;
dma_addr_t buf_dma;
CONFIGPARMS cfg;
ConfigPageHeader_t header;
int ii;
int data, rc = 0;
/* Allocate memory
*/
if (!ioc->spi_data.nvram) {
int sz;
u8 *mem;
sz = MPT_MAX_SCSI_DEVICES * sizeof(int);
mem = kmalloc(sz, GFP_ATOMIC);
if (mem == NULL)
return -EFAULT;
ioc->spi_data.nvram = (int *) mem;
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SCSI device NVRAM settings @ %p, sz=%d\n",
ioc->name, ioc->spi_data.nvram, sz));
}
/* Invalidate NVRAM information
*/
for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
ioc->spi_data.nvram[ii] = MPT_HOST_NVRAM_INVALID;
}
/* Read SPP0 header, allocate memory, then read page.
*/
header.PageVersion = 0;
header.PageLength = 0;
header.PageNumber = 0;
header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = portnum;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.timeout = 0; /* use default */
if (mpt_config(ioc, &cfg) != 0)
return -EFAULT;
if (header.PageLength > 0) {
pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma);
if (pbuf) {
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.physAddr = buf_dma;
if (mpt_config(ioc, &cfg) != 0) {
ioc->spi_data.maxBusWidth = MPT_NARROW;
ioc->spi_data.maxSyncOffset = 0;
ioc->spi_data.minSyncFactor = MPT_ASYNC;
ioc->spi_data.busType = MPT_HOST_BUS_UNKNOWN;
rc = 1;
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Unable to read PortPage0 minSyncFactor=%x\n",
ioc->name, ioc->spi_data.minSyncFactor));
} else {
/* Save the Port Page 0 data
*/
SCSIPortPage0_t *pPP0 = (SCSIPortPage0_t *) pbuf;
pPP0->Capabilities = le32_to_cpu(pPP0->Capabilities);
pPP0->PhysicalInterface = le32_to_cpu(pPP0->PhysicalInterface);
if ( (pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_QAS) == 0 ) {
ioc->spi_data.noQas |= MPT_TARGET_NO_NEGO_QAS;
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"noQas due to Capabilities=%x\n",
ioc->name, pPP0->Capabilities));
}
ioc->spi_data.maxBusWidth = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_WIDE ? 1 : 0;
data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MAX_SYNC_OFFSET_MASK;
if (data) {
ioc->spi_data.maxSyncOffset = (u8) (data >> 16);
data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK;
ioc->spi_data.minSyncFactor = (u8) (data >> 8);
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"PortPage0 minSyncFactor=%x\n",
ioc->name, ioc->spi_data.minSyncFactor));
} else {
ioc->spi_data.maxSyncOffset = 0;
ioc->spi_data.minSyncFactor = MPT_ASYNC;
}
ioc->spi_data.busType = pPP0->PhysicalInterface & MPI_SCSIPORTPAGE0_PHY_SIGNAL_TYPE_MASK;
/* Update the minSyncFactor based on bus type.
*/
if ((ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_HVD) ||
(ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_SE)) {
if (ioc->spi_data.minSyncFactor < MPT_ULTRA) {
ioc->spi_data.minSyncFactor = MPT_ULTRA;
ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"HVD or SE detected, minSyncFactor=%x\n",
ioc->name, ioc->spi_data.minSyncFactor));
}
}
}
if (pbuf) {
pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma);
}
}
}
/* SCSI Port Page 2 - Read the header then the page.
*/
header.PageVersion = 0;
header.PageLength = 0;
header.PageNumber = 2;
header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = portnum;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
if (mpt_config(ioc, &cfg) != 0)
return -EFAULT;
if (header.PageLength > 0) {
/* Allocate memory and read SCSI Port Page 2
*/
pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma);
if (pbuf) {
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_NVRAM;
cfg.physAddr = buf_dma;
if (mpt_config(ioc, &cfg) != 0) {
/* Nvram data is left with INVALID mark
*/
rc = 1;
} else if (ioc->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
/* This is an ATTO adapter, read Page2 accordingly
*/
ATTO_SCSIPortPage2_t *pPP2 = (ATTO_SCSIPortPage2_t *) pbuf;
ATTODeviceInfo_t *pdevice = NULL;
u16 ATTOFlags;
/* Save the Port Page 2 data
* (reformat into a 32bit quantity)
*/
for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
pdevice = &pPP2->DeviceSettings[ii];
ATTOFlags = le16_to_cpu(pdevice->ATTOFlags);
data = 0;
/* Translate ATTO device flags to LSI format
*/
if (ATTOFlags & ATTOFLAG_DISC)
data |= (MPI_SCSIPORTPAGE2_DEVICE_DISCONNECT_ENABLE);
if (ATTOFlags & ATTOFLAG_ID_ENB)
data |= (MPI_SCSIPORTPAGE2_DEVICE_ID_SCAN_ENABLE);
if (ATTOFlags & ATTOFLAG_LUN_ENB)
data |= (MPI_SCSIPORTPAGE2_DEVICE_LUN_SCAN_ENABLE);
if (ATTOFlags & ATTOFLAG_TAGGED)
data |= (MPI_SCSIPORTPAGE2_DEVICE_TAG_QUEUE_ENABLE);
if (!(ATTOFlags & ATTOFLAG_WIDE_ENB))
data |= (MPI_SCSIPORTPAGE2_DEVICE_WIDE_DISABLE);
data = (data << 16) | (pdevice->Period << 8) | 10;
ioc->spi_data.nvram[ii] = data;
}
} else {
SCSIPortPage2_t *pPP2 = (SCSIPortPage2_t *) pbuf;
MpiDeviceInfo_t *pdevice = NULL;
/*
* Save "Set to Avoid SCSI Bus Resets" flag
*/
ioc->spi_data.bus_reset =
(le32_to_cpu(pPP2->PortFlags) &
MPI_SCSIPORTPAGE2_PORT_FLAGS_AVOID_SCSI_RESET) ?
0 : 1 ;
/* Save the Port Page 2 data
* (reformat into a 32bit quantity)
*/
data = le32_to_cpu(pPP2->PortFlags) & MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
ioc->spi_data.PortFlags = data;
for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
pdevice = &pPP2->DeviceSettings[ii];
data = (le16_to_cpu(pdevice->DeviceFlags) << 16) |
(pdevice->SyncFactor << 8) | pdevice->Timeout;
ioc->spi_data.nvram[ii] = data;
}
}
pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma);
}
}
/* Update Adapter limits with those from NVRAM
* Comment: Don't need to do this. Target performance
* parameters will never exceed the adapters limits.
*/
return rc;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_readScsiDevicePageHeaders - save version and length of SDP1
* @ioc: Pointer to a Adapter Strucutre
* @portnum: IOC port number
*
* Return: -EFAULT if read of config page header fails
* or 0 if success.
*/
static int
mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum)
{
CONFIGPARMS cfg;
ConfigPageHeader_t header;
/* Read the SCSI Device Page 1 header
*/
header.PageVersion = 0;
header.PageLength = 0;
header.PageNumber = 1;
header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = portnum;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.timeout = 0;
if (mpt_config(ioc, &cfg) != 0)
return -EFAULT;
ioc->spi_data.sdp1version = cfg.cfghdr.hdr->PageVersion;
ioc->spi_data.sdp1length = cfg.cfghdr.hdr->PageLength;
header.PageVersion = 0;
header.PageLength = 0;
header.PageNumber = 0;
header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
if (mpt_config(ioc, &cfg) != 0)
return -EFAULT;
ioc->spi_data.sdp0version = cfg.cfghdr.hdr->PageVersion;
ioc->spi_data.sdp0length = cfg.cfghdr.hdr->PageLength;
dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Headers: 0: version %d length %d\n",
ioc->name, ioc->spi_data.sdp0version, ioc->spi_data.sdp0length));
dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Headers: 1: version %d length %d\n",
ioc->name, ioc->spi_data.sdp1version, ioc->spi_data.sdp1length));
return 0;
}
/**
* mpt_inactive_raid_list_free - This clears this link list.
* @ioc : pointer to per adapter structure
**/
static void
mpt_inactive_raid_list_free(MPT_ADAPTER *ioc)
{
struct inactive_raid_component_info *component_info, *pNext;
if (list_empty(&ioc->raid_data.inactive_list))
return;
mutex_lock(&ioc->raid_data.inactive_list_mutex);
list_for_each_entry_safe(component_info, pNext,
&ioc->raid_data.inactive_list, list) {
list_del(&component_info->list);
kfree(component_info);
}
mutex_unlock(&ioc->raid_data.inactive_list_mutex);
}
/**
* mpt_inactive_raid_volumes - sets up link list of phy_disk_nums for devices belonging in an inactive volume
*
* @ioc : pointer to per adapter structure
* @channel : volume channel
* @id : volume target id
**/
static void
mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id)
{
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
dma_addr_t dma_handle;
pRaidVolumePage0_t buffer = NULL;
int i;
RaidPhysDiskPage0_t phys_disk;
struct inactive_raid_component_info *component_info;
int handle_inactive_volumes;
memset(&cfg, 0 , sizeof(CONFIGPARMS));
memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
cfg.pageAddr = (channel << 8) + id;
cfg.cfghdr.hdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
if (mpt_config(ioc, &cfg) != 0)
goto out;
if (!hdr.PageLength)
goto out;
buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
&dma_handle);
if (!buffer)
goto out;
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if (mpt_config(ioc, &cfg) != 0)
goto out;
if (!buffer->NumPhysDisks)
goto out;
handle_inactive_volumes =
(buffer->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE ||
(buffer->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED) == 0 ||
buffer->VolumeStatus.State == MPI_RAIDVOL0_STATUS_STATE_FAILED ||
buffer->VolumeStatus.State == MPI_RAIDVOL0_STATUS_STATE_MISSING) ? 1 : 0;
if (!handle_inactive_volumes)
goto out;
mutex_lock(&ioc->raid_data.inactive_list_mutex);
for (i = 0; i < buffer->NumPhysDisks; i++) {
if(mpt_raid_phys_disk_pg0(ioc,
buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
continue;
if ((component_info = kmalloc(sizeof (*component_info),
GFP_KERNEL)) == NULL)
continue;
component_info->volumeID = id;
component_info->volumeBus = channel;
component_info->d.PhysDiskNum = phys_disk.PhysDiskNum;
component_info->d.PhysDiskBus = phys_disk.PhysDiskBus;
component_info->d.PhysDiskID = phys_disk.PhysDiskID;
component_info->d.PhysDiskIOC = phys_disk.PhysDiskIOC;
list_add_tail(&component_info->list,
&ioc->raid_data.inactive_list);
}
mutex_unlock(&ioc->raid_data.inactive_list_mutex);
out:
if (buffer)
pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
dma_handle);
}
/**
* mpt_raid_phys_disk_pg0 - returns phys disk page zero
* @ioc: Pointer to a Adapter Structure
* @phys_disk_num: io unit unique phys disk num generated by the ioc
* @phys_disk: requested payload data returned
*
* Return:
* 0 on success
* -EFAULT if read of config page header fails or data pointer not NULL
* -ENOMEM if pci_alloc failed
**/
int
mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num,
RaidPhysDiskPage0_t *phys_disk)
{
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
dma_addr_t dma_handle;
pRaidPhysDiskPage0_t buffer = NULL;
int rc;
memset(&cfg, 0 , sizeof(CONFIGPARMS));
memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
memset(phys_disk, 0, sizeof(RaidPhysDiskPage0_t));
hdr.PageVersion = MPI_RAIDPHYSDISKPAGE0_PAGEVERSION;
hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
if (mpt_config(ioc, &cfg) != 0) {
rc = -EFAULT;
goto out;
}
if (!hdr.PageLength) {
rc = -EFAULT;
goto out;
}
buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
&dma_handle);
if (!buffer) {
rc = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.pageAddr = phys_disk_num;
if (mpt_config(ioc, &cfg) != 0) {
rc = -EFAULT;
goto out;
}
rc = 0;
memcpy(phys_disk, buffer, sizeof(*buffer));
phys_disk->MaxLBA = le32_to_cpu(buffer->MaxLBA);
out:
if (buffer)
pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
dma_handle);
return rc;
}
/**
* mpt_raid_phys_disk_get_num_paths - returns number paths associated to this phys_num
* @ioc: Pointer to a Adapter Structure
* @phys_disk_num: io unit unique phys disk num generated by the ioc
*
* Return:
* returns number paths
**/
int
mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num)
{
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
dma_addr_t dma_handle;
pRaidPhysDiskPage1_t buffer = NULL;
int rc;
memset(&cfg, 0 , sizeof(CONFIGPARMS));
memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
hdr.PageNumber = 1;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
if (mpt_config(ioc, &cfg) != 0) {
rc = 0;
goto out;
}
if (!hdr.PageLength) {
rc = 0;
goto out;
}
buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
&dma_handle);
if (!buffer) {
rc = 0;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.pageAddr = phys_disk_num;
if (mpt_config(ioc, &cfg) != 0) {
rc = 0;
goto out;
}
rc = buffer->NumPhysDiskPaths;
out:
if (buffer)
pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
dma_handle);
return rc;
}
EXPORT_SYMBOL(mpt_raid_phys_disk_get_num_paths);
/**
* mpt_raid_phys_disk_pg1 - returns phys disk page 1
* @ioc: Pointer to a Adapter Structure
* @phys_disk_num: io unit unique phys disk num generated by the ioc
* @phys_disk: requested payload data returned
*
* Return:
* 0 on success
* -EFAULT if read of config page header fails or data pointer not NULL
* -ENOMEM if pci_alloc failed
**/
int
mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
RaidPhysDiskPage1_t *phys_disk)
{
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
dma_addr_t dma_handle;
pRaidPhysDiskPage1_t buffer = NULL;
int rc;
int i;
__le64 sas_address;
memset(&cfg, 0 , sizeof(CONFIGPARMS));
memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
rc = 0;
hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
hdr.PageNumber = 1;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
if (mpt_config(ioc, &cfg) != 0) {
rc = -EFAULT;
goto out;
}
if (!hdr.PageLength) {
rc = -EFAULT;
goto out;
}
buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
&dma_handle);
if (!buffer) {
rc = -ENOMEM;
goto out;
}
cfg.physAddr = dma_handle;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.pageAddr = phys_disk_num;
if (mpt_config(ioc, &cfg) != 0) {
rc = -EFAULT;
goto out;
}
phys_disk->NumPhysDiskPaths = buffer->NumPhysDiskPaths;
phys_disk->PhysDiskNum = phys_disk_num;
for (i = 0; i < phys_disk->NumPhysDiskPaths; i++) {
phys_disk->Path[i].PhysDiskID = buffer->Path[i].PhysDiskID;
phys_disk->Path[i].PhysDiskBus = buffer->Path[i].PhysDiskBus;
phys_disk->Path[i].OwnerIdentifier =
buffer->Path[i].OwnerIdentifier;
phys_disk->Path[i].Flags = le16_to_cpu(buffer->Path[i].Flags);
memcpy(&sas_address, &buffer->Path[i].WWID, sizeof(__le64));
sas_address = le64_to_cpu(sas_address);
memcpy(&phys_disk->Path[i].WWID, &sas_address, sizeof(__le64));
memcpy(&sas_address,
&buffer->Path[i].OwnerWWID, sizeof(__le64));
sas_address = le64_to_cpu(sas_address);
memcpy(&phys_disk->Path[i].OwnerWWID,
&sas_address, sizeof(__le64));
}
out:
if (buffer)
pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
dma_handle);
return rc;
}
EXPORT_SYMBOL(mpt_raid_phys_disk_pg1);
/**
* mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes
* @ioc: Pointer to a Adapter Strucutre
*
* Return:
* 0 on success
* -EFAULT if read of config page header fails or data pointer not NULL
* -ENOMEM if pci_alloc failed
**/
int
mpt_findImVolumes(MPT_ADAPTER *ioc)
{
IOCPage2_t *pIoc2;
u8 *mem;
dma_addr_t ioc2_dma;
CONFIGPARMS cfg;
ConfigPageHeader_t header;
int rc = 0;
int iocpage2sz;
int i;
if (!ioc->ir_firmware)
return 0;
/* Free the old page
*/
kfree(ioc->raid_data.pIocPg2);
ioc->raid_data.pIocPg2 = NULL;
mpt_inactive_raid_list_free(ioc);
/* Read IOCP2 header then the page.
*/
header.PageVersion = 0;
header.PageLength = 0;
header.PageNumber = 2;
header.PageType = MPI_CONFIG_PAGETYPE_IOC;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.timeout = 0;
if (mpt_config(ioc, &cfg) != 0)
return -EFAULT;
if (header.PageLength == 0)
return -EFAULT;
iocpage2sz = header.PageLength * 4;
pIoc2 = pci_alloc_consistent(ioc->pcidev, iocpage2sz, &ioc2_dma);
if (!pIoc2)
return -ENOMEM;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.physAddr = ioc2_dma;
if (mpt_config(ioc, &cfg) != 0)
goto out;
mem = kmalloc(iocpage2sz, GFP_KERNEL);
if (!mem) {
rc = -ENOMEM;
goto out;
}
memcpy(mem, (u8 *)pIoc2, iocpage2sz);
ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem;
mpt_read_ioc_pg_3(ioc);
for (i = 0; i < pIoc2->NumActiveVolumes ; i++)
mpt_inactive_raid_volumes(ioc,
pIoc2->RaidVolume[i].VolumeBus,
pIoc2->RaidVolume[i].VolumeID);
out:
pci_free_consistent(ioc->pcidev, iocpage2sz, pIoc2, ioc2_dma);
return rc;
}
static int
mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
{
IOCPage3_t *pIoc3;
u8 *mem;
CONFIGPARMS cfg;
ConfigPageHeader_t header;
dma_addr_t ioc3_dma;
int iocpage3sz = 0;
/* Free the old page
*/
kfree(ioc->raid_data.pIocPg3);
ioc->raid_data.pIocPg3 = NULL;
/* There is at least one physical disk.
* Read and save IOC Page 3
*/
header.PageVersion = 0;
header.PageLength = 0;
header.PageNumber = 3;
header.PageType = MPI_CONFIG_PAGETYPE_IOC;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.timeout = 0;
if (mpt_config(ioc, &cfg) != 0)
return 0;
if (header.PageLength == 0)
return 0;
/* Read Header good, alloc memory
*/
iocpage3sz = header.PageLength * 4;
pIoc3 = pci_alloc_consistent(ioc->pcidev, iocpage3sz, &ioc3_dma);
if (!pIoc3)
return 0;
/* Read the Page and save the data
* into malloc'd memory.
*/
cfg.physAddr = ioc3_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if (mpt_config(ioc, &cfg) == 0) {
mem = kmalloc(iocpage3sz, GFP_KERNEL);
if (mem) {
memcpy(mem, (u8 *)pIoc3, iocpage3sz);
ioc->raid_data.pIocPg3 = (IOCPage3_t *) mem;
}
}
pci_free_consistent(ioc->pcidev, iocpage3sz, pIoc3, ioc3_dma);
return 0;
}
static void
mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
{
IOCPage4_t *pIoc4;
CONFIGPARMS cfg;
ConfigPageHeader_t header;
dma_addr_t ioc4_dma;
int iocpage4sz;
/* Read and save IOC Page 4
*/
header.PageVersion = 0;
header.PageLength = 0;
header.PageNumber = 4;
header.PageType = MPI_CONFIG_PAGETYPE_IOC;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.timeout = 0;
if (mpt_config(ioc, &cfg) != 0)
return;
if (header.PageLength == 0)
return;
if ( (pIoc4 = ioc->spi_data.pIocPg4) == NULL ) {
iocpage4sz = (header.PageLength + 4) * 4; /* Allow 4 additional SEP's */
pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma);
if (!pIoc4)
return;
ioc->alloc_total += iocpage4sz;
} else {
ioc4_dma = ioc->spi_data.IocPg4_dma;
iocpage4sz = ioc->spi_data.IocPg4Sz;
}
/* Read the Page into dma memory.
*/
cfg.physAddr = ioc4_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if (mpt_config(ioc, &cfg) == 0) {
ioc->spi_data.pIocPg4 = (IOCPage4_t *) pIoc4;
ioc->spi_data.IocPg4_dma = ioc4_dma;
ioc->spi_data.IocPg4Sz = iocpage4sz;
} else {
pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma);
ioc->spi_data.pIocPg4 = NULL;
ioc->alloc_total -= iocpage4sz;
}
}
static void
mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
{
IOCPage1_t *pIoc1;
CONFIGPARMS cfg;
ConfigPageHeader_t header;
dma_addr_t ioc1_dma;
int iocpage1sz = 0;
u32 tmp;
/* Check the Coalescing Timeout in IOC Page 1
*/
header.PageVersion = 0;
header.PageLength = 0;
header.PageNumber = 1;
header.PageType = MPI_CONFIG_PAGETYPE_IOC;
cfg.cfghdr.hdr = &header;
cfg.physAddr = -1;
cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0;
cfg.timeout = 0;
if (mpt_config(ioc, &cfg) != 0)
return;
if (header.PageLength == 0)
return;
/* Read Header good, alloc memory
*/
iocpage1sz = header.PageLength * 4;
pIoc1 = pci_alloc_consistent(ioc->pcidev, iocpage1sz, &ioc1_dma);
if (!pIoc1)
return;
/* Read the Page and check coalescing timeout
*/
cfg.physAddr = ioc1_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
if (mpt_config(ioc, &cfg) == 0) {
tmp = le32_to_cpu(pIoc1->Flags) & MPI_IOCPAGE1_REPLY_COALESCING;
if (tmp == MPI_IOCPAGE1_REPLY_COALESCING) {
tmp = le32_to_cpu(pIoc1->CoalescingTimeout);
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Coalescing Enabled Timeout = %d\n",
ioc->name, tmp));
if (tmp > MPT_COALESCING_TIMEOUT) {
pIoc1->CoalescingTimeout = cpu_to_le32(MPT_COALESCING_TIMEOUT);
/* Write NVRAM and current
*/
cfg.dir = 1;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
if (mpt_config(ioc, &cfg) == 0) {
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Reset Current Coalescing Timeout to = %d\n",
ioc->name, MPT_COALESCING_TIMEOUT));
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM;
if (mpt_config(ioc, &cfg) == 0) {
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Reset NVRAM Coalescing Timeout to = %d\n",
ioc->name, MPT_COALESCING_TIMEOUT));
} else {
dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Reset NVRAM Coalescing Timeout Failed\n",
ioc->name));
}
} else {
dprintk(ioc, printk(MYIOC_s_WARN_FMT
"Reset of Current Coalescing Timeout Failed!\n",
ioc->name));
}
}
} else {
dprintk(ioc, printk(MYIOC_s_WARN_FMT "Coalescing Disabled\n", ioc->name));
}
}
pci_free_consistent(ioc->pcidev, iocpage1sz, pIoc1, ioc1_dma);
return;
}
static void
mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
{
CONFIGPARMS cfg;
ConfigPageHeader_t hdr;
dma_addr_t buf_dma;
ManufacturingPage0_t *pbuf = NULL;
memset(&cfg, 0 , sizeof(CONFIGPARMS));
memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING;
cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.timeout = 10;
if (mpt_config(ioc, &cfg) != 0)
goto out;
if (!cfg.cfghdr.hdr->PageLength)
goto out;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma);
if (!pbuf)
goto out;
cfg.physAddr = buf_dma;
if (mpt_config(ioc, &cfg) != 0)
goto out;
memcpy(ioc->board_name, pbuf->BoardName, sizeof(ioc->board_name));
memcpy(ioc->board_assembly, pbuf->BoardAssembly, sizeof(ioc->board_assembly));
memcpy(ioc->board_tracer, pbuf->BoardTracerNumber, sizeof(ioc->board_tracer));
out:
if (pbuf)
pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* SendEventNotification - Send EventNotification (on or off) request to adapter
* @ioc: Pointer to MPT_ADAPTER structure
* @EvSwitch: Event switch flags
* @sleepFlag: Specifies whether the process can sleep
*/
static int
SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, int sleepFlag)
{
EventNotification_t evn;
MPIDefaultReply_t reply_buf;
memset(&evn, 0, sizeof(EventNotification_t));
memset(&reply_buf, 0, sizeof(MPIDefaultReply_t));
evn.Function = MPI_FUNCTION_EVENT_NOTIFICATION;
evn.Switch = EvSwitch;
evn.MsgContext = cpu_to_le32(mpt_base_index << 16);
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Sending EventNotification (%d) request %p\n",
ioc->name, EvSwitch, &evn));
return mpt_handshake_req_reply_wait(ioc, sizeof(EventNotification_t),
(u32 *)&evn, sizeof(MPIDefaultReply_t), (u16 *)&reply_buf, 30,
sleepFlag);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* SendEventAck - Send EventAck request to MPT adapter.
* @ioc: Pointer to MPT_ADAPTER structure
* @evnp: Pointer to original EventNotification request
*/
static int
SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
{
EventAck_t *pAck;
if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
ioc->name, __func__));
return -1;
}
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending EventAck\n", ioc->name));
pAck->Function = MPI_FUNCTION_EVENT_ACK;
pAck->ChainOffset = 0;
pAck->Reserved[0] = pAck->Reserved[1] = 0;
pAck->MsgFlags = 0;
pAck->Reserved1[0] = pAck->Reserved1[1] = pAck->Reserved1[2] = 0;
pAck->Event = evnp->Event;
pAck->EventContext = evnp->EventContext;
mpt_put_msg_frame(mpt_base_index, ioc, (MPT_FRAME_HDR *)pAck);
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_config - Generic function to issue config message
* @ioc: Pointer to an adapter structure
* @pCfg: Pointer to a configuration structure. Struct contains
* action, page address, direction, physical address
* and pointer to a configuration page header
* Page header is updated.
*
* Returns 0 for success
* -EPERM if not allowed due to ISR context
* -EAGAIN if no msg frames currently available
* -EFAULT for non-successful reply or no reply (timeout)
*/
int
mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
{
Config_t *pReq;
ConfigReply_t *pReply;
ConfigExtendedPageHeader_t *pExtHdr = NULL;
MPT_FRAME_HDR *mf;
int ii;
int flagsLength;
long timeout;
int ret;
u8 page_type = 0, extend_page;
unsigned long timeleft;
unsigned long flags;
int in_isr;
u8 issue_hard_reset = 0;
u8 retry_count = 0;
/* Prevent calling wait_event() (below), if caller happens
* to be in ISR context, because that is fatal!
*/
in_isr = in_interrupt();
if (in_isr) {
dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n",
ioc->name));
return -EPERM;
}
/* don't send a config page during diag reset */
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: busy with host reset\n", ioc->name, __func__));
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
return -EBUSY;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
/* don't send if no chance of success */
if (!ioc->active ||
mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_OPERATIONAL) {
dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: ioc not operational, %d, %xh\n",
ioc->name, __func__, ioc->active,
mpt_GetIocState(ioc, 0)));
return -EFAULT;
}
retry_config:
mutex_lock(&ioc->mptbase_cmds.mutex);
/* init the internal cmd struct */
memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
/* Get and Populate a free Frame
*/
if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
dcprintk(ioc, printk(MYIOC_s_WARN_FMT
"mpt_config: no msg frames!\n", ioc->name));
ret = -EAGAIN;
goto out;
}
pReq = (Config_t *)mf;
pReq->Action = pCfg->action;
pReq->Reserved = 0;
pReq->ChainOffset = 0;
pReq->Function = MPI_FUNCTION_CONFIG;
/* Assume page type is not extended and clear "reserved" fields. */
pReq->ExtPageLength = 0;
pReq->ExtPageType = 0;
pReq->MsgFlags = 0;
for (ii=0; ii < 8; ii++)
pReq->Reserved2[ii] = 0;
pReq->Header.PageVersion = pCfg->cfghdr.hdr->PageVersion;
pReq->Header.PageLength = pCfg->cfghdr.hdr->PageLength;
pReq->Header.PageNumber = pCfg->cfghdr.hdr->PageNumber;
pReq->Header.PageType = (pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) {
pExtHdr = (ConfigExtendedPageHeader_t *)pCfg->cfghdr.ehdr;
pReq->ExtPageLength = cpu_to_le16(pExtHdr->ExtPageLength);
pReq->ExtPageType = pExtHdr->ExtPageType;
pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
/* Page Length must be treated as a reserved field for the
* extended header.
*/
pReq->Header.PageLength = 0;
}
pReq->PageAddress = cpu_to_le32(pCfg->pageAddr);
/* Add a SGE to the config request.
*/
if (pCfg->dir)
flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE;
else
flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
MPI_CONFIG_PAGETYPE_EXTENDED) {
flagsLength |= pExtHdr->ExtPageLength * 4;
page_type = pReq->ExtPageType;
extend_page = 1;
} else {
flagsLength |= pCfg->cfghdr.hdr->PageLength * 4;
page_type = pReq->Header.PageType;
extend_page = 0;
}
dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Sending Config request type 0x%x, page 0x%x and action %d\n",
ioc->name, page_type, pReq->Header.PageNumber, pReq->Action));
ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr);
timeout = (pCfg->timeout < 15) ? HZ*15 : HZ*pCfg->timeout;
mpt_put_msg_frame(mpt_base_index, ioc, mf);
timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done,
timeout);
if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
ret = -ETIME;
dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Failed Sending Config request type 0x%x, page 0x%x,"
" action %d, status %xh, time left %ld\n\n",
ioc->name, page_type, pReq->Header.PageNumber,
pReq->Action, ioc->mptbase_cmds.status, timeleft));
if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
goto out;
if (!timeleft) {
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock,
flags);
printk(MYIOC_s_INFO_FMT "%s: host reset in"
" progress mpt_config timed out.!!\n",
__func__, ioc->name);
return -EFAULT;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
issue_hard_reset = 1;
}
goto out;
}
if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
ret = -1;
goto out;
}
pReply = (ConfigReply_t *)ioc->mptbase_cmds.reply;
ret = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
if (ret == MPI_IOCSTATUS_SUCCESS) {
if (extend_page) {
pCfg->cfghdr.ehdr->ExtPageLength =
le16_to_cpu(pReply->ExtPageLength);
pCfg->cfghdr.ehdr->ExtPageType =
pReply->ExtPageType;
}
pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
}
if (retry_count)
printk(MYIOC_s_INFO_FMT "Retry completed "
"ret=0x%x timeleft=%ld\n",
ioc->name, ret, timeleft);
dcprintk(ioc, printk(KERN_DEBUG "IOCStatus=%04xh, IOCLogInfo=%08xh\n",
ret, le32_to_cpu(pReply->IOCLogInfo)));
out:
CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
mutex_unlock(&ioc->mptbase_cmds.mutex);
if (issue_hard_reset) {
issue_hard_reset = 0;
printk(MYIOC_s_WARN_FMT
"Issuing Reset from %s!!, doorbell=0x%08x\n",
ioc->name, __func__, mpt_GetIocState(ioc, 0));
if (retry_count == 0) {
if (mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP) != 0)
retry_count++;
} else
mpt_HardResetHandler(ioc, CAN_SLEEP);
mpt_free_msg_frame(ioc, mf);
/* attempt one retry for a timed out command */
if (retry_count < 2) {
printk(MYIOC_s_INFO_FMT
"Attempting Retry Config request"
" type 0x%x, page 0x%x,"
" action %d\n", ioc->name, page_type,
pCfg->cfghdr.hdr->PageNumber, pCfg->action);
retry_count++;
goto retry_config;
}
}
return ret;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_ioc_reset - Base cleanup for hard reset
* @ioc: Pointer to the adapter structure
* @reset_phase: Indicates pre- or post-reset functionality
*
* Remark: Frees resources with internally generated commands.
*/
static int
mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
{
switch (reset_phase) {
case MPT_IOC_SETUP_RESET:
ioc->taskmgmt_quiesce_io = 1;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
break;
case MPT_IOC_PRE_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
break;
case MPT_IOC_POST_RESET:
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
/* wake up mptbase_cmds */
if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
ioc->mptbase_cmds.status |=
MPT_MGMT_STATUS_DID_IOCRESET;
complete(&ioc->mptbase_cmds.done);
}
/* wake up taskmgmt_cmds */
if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
ioc->taskmgmt_cmds.status |=
MPT_MGMT_STATUS_DID_IOCRESET;
complete(&ioc->taskmgmt_cmds.done);
}
break;
default:
break;
}
return 1; /* currently means nothing really */
}
#ifdef CONFIG_PROC_FS /* { */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* procfs (%MPT_PROCFS_MPTBASEDIR/...) support stuff...
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* procmpt_create - Create %MPT_PROCFS_MPTBASEDIR entries.
*
* Returns 0 for success, non-zero for failure.
*/
static int
procmpt_create(void)
{
mpt_proc_root_dir = proc_mkdir(MPT_PROCFS_MPTBASEDIR, NULL);
if (mpt_proc_root_dir == NULL)
return -ENOTDIR;
proc_create("summary", S_IRUGO, mpt_proc_root_dir, &mpt_summary_proc_fops);
proc_create("version", S_IRUGO, mpt_proc_root_dir, &mpt_version_proc_fops);
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* procmpt_destroy - Tear down %MPT_PROCFS_MPTBASEDIR entries.
*
* Returns 0 for success, non-zero for failure.
*/
static void
procmpt_destroy(void)
{
remove_proc_entry("version", mpt_proc_root_dir);
remove_proc_entry("summary", mpt_proc_root_dir);
remove_proc_entry(MPT_PROCFS_MPTBASEDIR, NULL);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Handles read request from /proc/mpt/summary or /proc/mpt/iocN/summary.
*/
static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan);
static int mpt_summary_proc_show(struct seq_file *m, void *v)
{
MPT_ADAPTER *ioc = m->private;
if (ioc) {
seq_mpt_print_ioc_summary(ioc, m, 1);
} else {
list_for_each_entry(ioc, &ioc_list, list) {
seq_mpt_print_ioc_summary(ioc, m, 1);
}
}
return 0;
}
static int mpt_summary_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, mpt_summary_proc_show, PDE(inode)->data);
}
static const struct file_operations mpt_summary_proc_fops = {
.owner = THIS_MODULE,
.open = mpt_summary_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int mpt_version_proc_show(struct seq_file *m, void *v)
{
u8 cb_idx;
int scsi, fc, sas, lan, ctl, targ, dmp;
char *drvname;
seq_printf(m, "%s-%s\n", "mptlinux", MPT_LINUX_VERSION_COMMON);
seq_printf(m, " Fusion MPT base driver\n");
scsi = fc = sas = lan = ctl = targ = dmp = 0;
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
drvname = NULL;
if (MptCallbacks[cb_idx]) {
switch (MptDriverClass[cb_idx]) {
case MPTSPI_DRIVER:
if (!scsi++) drvname = "SPI host";
break;
case MPTFC_DRIVER:
if (!fc++) drvname = "FC host";
break;
case MPTSAS_DRIVER:
if (!sas++) drvname = "SAS host";
break;
case MPTLAN_DRIVER:
if (!lan++) drvname = "LAN";
break;
case MPTSTM_DRIVER:
if (!targ++) drvname = "SCSI target";
break;
case MPTCTL_DRIVER:
if (!ctl++) drvname = "ioctl";
break;
}
if (drvname)
seq_printf(m, " Fusion MPT %s driver\n", drvname);
}
}
return 0;
}
static int mpt_version_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, mpt_version_proc_show, NULL);
}
static const struct file_operations mpt_version_proc_fops = {
.owner = THIS_MODULE,
.open = mpt_version_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
{
MPT_ADAPTER *ioc = m->private;
char expVer[32];
int sz;
int p;
mpt_get_fw_exp_ver(expVer, ioc);
seq_printf(m, "%s:", ioc->name);
if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
seq_printf(m, " (f/w download boot flag set)");
// if (ioc->facts.IOCExceptions & MPI_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL)
// seq_printf(m, " CONFIG_CHECKSUM_FAIL!");
seq_printf(m, "\n ProductID = 0x%04x (%s)\n",
ioc->facts.ProductID,
ioc->prod_name);
seq_printf(m, " FWVersion = 0x%08x%s", ioc->facts.FWVersion.Word, expVer);
if (ioc->facts.FWImageSize)
seq_printf(m, " (fw_size=%d)", ioc->facts.FWImageSize);
seq_printf(m, "\n MsgVersion = 0x%04x\n", ioc->facts.MsgVersion);
seq_printf(m, " FirstWhoInit = 0x%02x\n", ioc->FirstWhoInit);
seq_printf(m, " EventState = 0x%02x\n", ioc->facts.EventState);
seq_printf(m, " CurrentHostMfaHighAddr = 0x%08x\n",
ioc->facts.CurrentHostMfaHighAddr);
seq_printf(m, " CurrentSenseBufferHighAddr = 0x%08x\n",
ioc->facts.CurrentSenseBufferHighAddr);
seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
(void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
/*
* Rounding UP to nearest 4-kB boundary here...
*/
sz = (ioc->req_sz * ioc->req_depth) + 128;
sz = ((sz + 0x1000UL - 1UL) / 0x1000) * 0x1000;
seq_printf(m, " {CurReqSz=%d} x {CurReqDepth=%d} = %d bytes ^= 0x%x\n",
ioc->req_sz, ioc->req_depth, ioc->req_sz*ioc->req_depth, sz);
seq_printf(m, " {MaxReqSz=%d} {MaxReqDepth=%d}\n",
4*ioc->facts.RequestFrameSize,
ioc->facts.GlobalCredits);
seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
(void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
sz = (ioc->reply_sz * ioc->reply_depth) + 128;
seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
seq_printf(m, " {MaxRepSz=%d} {MaxRepDepth=%d}\n",
ioc->facts.CurReplyFrameSize,
ioc->facts.ReplyQueueDepth);
seq_printf(m, " MaxDevices = %d\n",
(ioc->facts.MaxDevices==0) ? 255 : ioc->facts.MaxDevices);
seq_printf(m, " MaxBuses = %d\n", ioc->facts.MaxBuses);
/* per-port info */
for (p=0; p < ioc->facts.NumberOfPorts; p++) {
seq_printf(m, " PortNumber = %d (of %d)\n",
p+1,
ioc->facts.NumberOfPorts);
if (ioc->bus_type == FC) {
if (ioc->pfacts[p].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
seq_printf(m, " LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
a[5], a[4], a[3], a[2], a[1], a[0]);
}
seq_printf(m, " WWN = %08X%08X:%08X%08X\n",
ioc->fc_port_page0[p].WWNN.High,
ioc->fc_port_page0[p].WWNN.Low,
ioc->fc_port_page0[p].WWPN.High,
ioc->fc_port_page0[p].WWPN.Low);
}
}
return 0;
}
static int mpt_iocinfo_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, mpt_iocinfo_proc_show, PDE(inode)->data);
}
static const struct file_operations mpt_iocinfo_proc_fops = {
.owner = THIS_MODULE,
.open = mpt_iocinfo_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif /* CONFIG_PROC_FS } */
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
static void
mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc)
{
buf[0] ='\0';
if ((ioc->facts.FWVersion.Word >> 24) == 0x0E) {
sprintf(buf, " (Exp %02d%02d)",
(ioc->facts.FWVersion.Word >> 16) & 0x00FF, /* Month */
(ioc->facts.FWVersion.Word >> 8) & 0x1F); /* Day */
/* insider hack! */
if ((ioc->facts.FWVersion.Word >> 8) & 0x80)
strcat(buf, " [MDBG]");
}
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_print_ioc_summary - Write ASCII summary of IOC to a buffer.
* @ioc: Pointer to MPT_ADAPTER structure
* @buffer: Pointer to buffer where IOC summary info should be written
* @size: Pointer to number of bytes we wrote (set by this routine)
* @len: Offset at which to start writing in buffer
* @showlan: Display LAN stuff?
*
* This routine writes (english readable) ASCII text, which represents
* a summary of IOC information, to a buffer.
*/
void
mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int showlan)
{
char expVer[32];
int y;
mpt_get_fw_exp_ver(expVer, ioc);
/*
* Shorter summary of attached ioc's...
*/
y = sprintf(buffer+len, "%s: %s, %s%08xh%s, Ports=%d, MaxQ=%d",
ioc->name,
ioc->prod_name,
MPT_FW_REV_MAGIC_ID_STRING, /* "FwRev=" or somesuch */
ioc->facts.FWVersion.Word,
expVer,
ioc->facts.NumberOfPorts,
ioc->req_depth);
if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
y += sprintf(buffer+len+y, ", LanAddr=%02X:%02X:%02X:%02X:%02X:%02X",
a[5], a[4], a[3], a[2], a[1], a[0]);
}
y += sprintf(buffer+len+y, ", IRQ=%d", ioc->pci_irq);
if (!ioc->active)
y += sprintf(buffer+len+y, " (disabled)");
y += sprintf(buffer+len+y, "\n");
*size = y;
}
static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan)
{
char expVer[32];
mpt_get_fw_exp_ver(expVer, ioc);
/*
* Shorter summary of attached ioc's...
*/
seq_printf(m, "%s: %s, %s%08xh%s, Ports=%d, MaxQ=%d",
ioc->name,
ioc->prod_name,
MPT_FW_REV_MAGIC_ID_STRING, /* "FwRev=" or somesuch */
ioc->facts.FWVersion.Word,
expVer,
ioc->facts.NumberOfPorts,
ioc->req_depth);
if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
seq_printf(m, ", LanAddr=%02X:%02X:%02X:%02X:%02X:%02X",
a[5], a[4], a[3], a[2], a[1], a[0]);
}
seq_printf(m, ", IRQ=%d", ioc->pci_irq);
if (!ioc->active)
seq_printf(m, " (disabled)");
seq_putc(m, '\n');
}
/**
* mpt_set_taskmgmt_in_progress_flag - set flags associated with task management
* @ioc: Pointer to MPT_ADAPTER structure
*
* Returns 0 for SUCCESS or -1 if FAILED.
*
* If -1 is return, then it was not possible to set the flags
**/
int
mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress || ioc->taskmgmt_in_progress ||
(ioc->alt_ioc && ioc->alt_ioc->taskmgmt_in_progress)) {
retval = -1;
goto out;
}
retval = 0;
ioc->taskmgmt_in_progress = 1;
ioc->taskmgmt_quiesce_io = 1;
if (ioc->alt_ioc) {
ioc->alt_ioc->taskmgmt_in_progress = 1;
ioc->alt_ioc->taskmgmt_quiesce_io = 1;
}
out:
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
return retval;
}
EXPORT_SYMBOL(mpt_set_taskmgmt_in_progress_flag);
/**
* mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task management
* @ioc: Pointer to MPT_ADAPTER structure
*
**/
void
mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
{
unsigned long flags;
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
ioc->taskmgmt_in_progress = 0;
ioc->taskmgmt_quiesce_io = 0;
if (ioc->alt_ioc) {
ioc->alt_ioc->taskmgmt_in_progress = 0;
ioc->alt_ioc->taskmgmt_quiesce_io = 0;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
}
EXPORT_SYMBOL(mpt_clear_taskmgmt_in_progress_flag);
/**
* mpt_halt_firmware - Halts the firmware if it is operational and panic
* the kernel
* @ioc: Pointer to MPT_ADAPTER structure
*
**/
void
mpt_halt_firmware(MPT_ADAPTER *ioc)
{
u32 ioc_raw_state;
ioc_raw_state = mpt_GetIocState(ioc, 0);
if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
printk(MYIOC_s_ERR_FMT "IOC is in FAULT state (%04xh)!!!\n",
ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
panic("%s: IOC Fault (%04xh)!!!\n", ioc->name,
ioc_raw_state & MPI_DOORBELL_DATA_MASK);
} else {
CHIPREG_WRITE32(&ioc->chip->Doorbell, 0xC0FFEE00);
panic("%s: Firmware is halted due to command timeout\n",
ioc->name);
}
}
EXPORT_SYMBOL(mpt_halt_firmware);
/**
* mpt_SoftResetHandler - Issues a less expensive reset
* @ioc: Pointer to MPT_ADAPTER structure
* @sleepFlag: Indicates if sleep or schedule must be called.
*
* Returns 0 for SUCCESS or -1 if FAILED.
*
* Message Unit Reset - instructs the IOC to reset the Reply Post and
* Free FIFO's. All the Message Frames on Reply Free FIFO are discarded.
* All posted buffers are freed, and event notification is turned off.
* IOC doesn't reply to any outstanding request. This will transfer IOC
* to READY state.
**/
int
mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
{
int rc;
int ii;
u8 cb_idx;
unsigned long flags;
u32 ioc_state;
unsigned long time_count;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SoftResetHandler Entered!\n",
ioc->name));
ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK;
if (mpt_fwfault_debug)
mpt_halt_firmware(ioc);
if (ioc_state == MPI_IOC_STATE_FAULT ||
ioc_state == MPI_IOC_STATE_RESET) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"skipping, either in FAULT or RESET state!\n", ioc->name));
return -1;
}
if (ioc->bus_type == FC) {
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"skipping, because the bus type is FC!\n", ioc->name));
return -1;
}
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
return -1;
}
ioc->ioc_reset_in_progress = 1;
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
rc = -1;
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx])
mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
}
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->taskmgmt_in_progress) {
ioc->ioc_reset_in_progress = 0;
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
return -1;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
/* Disable reply interrupts (also blocks FreeQ) */
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
ioc->active = 0;
time_count = jiffies;
rc = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag);
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx])
mpt_signal_reset(cb_idx, ioc, MPT_IOC_PRE_RESET);
}
if (rc)
goto out;
ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK;
if (ioc_state != MPI_IOC_STATE_READY)
goto out;
for (ii = 0; ii < 5; ii++) {
/* Get IOC facts! Allow 5 retries */
rc = GetIocFacts(ioc, sleepFlag,
MPT_HOSTEVENT_IOC_RECOVER);
if (rc == 0)
break;
if (sleepFlag == CAN_SLEEP)
msleep(100);
else
mdelay(100);
}
if (ii == 5)
goto out;
rc = PrimeIocFifos(ioc);
if (rc != 0)
goto out;
rc = SendIocInit(ioc, sleepFlag);
if (rc != 0)
goto out;
rc = SendEventNotification(ioc, 1, sleepFlag);
if (rc != 0)
goto out;
if (ioc->hard_resets < -1)
ioc->hard_resets++;
/*
* At this point, we know soft reset succeeded.
*/
ioc->active = 1;
CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
out:
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
ioc->ioc_reset_in_progress = 0;
ioc->taskmgmt_quiesce_io = 0;
ioc->taskmgmt_in_progress = 0;
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
if (ioc->active) { /* otherwise, hard reset coming */
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx])
mpt_signal_reset(cb_idx, ioc,
MPT_IOC_POST_RESET);
}
}
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"SoftResetHandler: completed (%d seconds): %s\n",
ioc->name, jiffies_to_msecs(jiffies - time_count)/1000,
((rc == 0) ? "SUCCESS" : "FAILED")));
return rc;
}
/**
* mpt_Soft_Hard_ResetHandler - Try less expensive reset
* @ioc: Pointer to MPT_ADAPTER structure
* @sleepFlag: Indicates if sleep or schedule must be called.
*
* Returns 0 for SUCCESS or -1 if FAILED.
* Try for softreset first, only if it fails go for expensive
* HardReset.
**/
int
mpt_Soft_Hard_ResetHandler(MPT_ADAPTER *ioc, int sleepFlag) {
int ret = -1;
ret = mpt_SoftResetHandler(ioc, sleepFlag);
if (ret == 0)
return ret;
ret = mpt_HardResetHandler(ioc, sleepFlag);
return ret;
}
EXPORT_SYMBOL(mpt_Soft_Hard_ResetHandler);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
* Reset Handling
*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_HardResetHandler - Generic reset handler
* @ioc: Pointer to MPT_ADAPTER structure
* @sleepFlag: Indicates if sleep or schedule must be called.
*
* Issues SCSI Task Management call based on input arg values.
* If TaskMgmt fails, returns associated SCSI request.
*
* Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
* or a non-interrupt thread. In the former, must not call schedule().
*
* Note: A return of -1 is a FATAL error case, as it means a
* FW reload/initialization failed.
*
* Returns 0 for SUCCESS or -1 if FAILED.
*/
int
mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
{
int rc;
u8 cb_idx;
unsigned long flags;
unsigned long time_count;
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler Entered!\n", ioc->name));
#ifdef MFCNT
printk(MYIOC_s_INFO_FMT "HardResetHandler Entered!\n", ioc->name);
printk("MF count 0x%x !\n", ioc->mfcnt);
#endif
if (mpt_fwfault_debug)
mpt_halt_firmware(ioc);
/* Reset the adapter. Prevent more than 1 call to
* mpt_do_ioc_recovery at any instant in time.
*/
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
if (ioc->ioc_reset_in_progress) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
ioc->wait_on_reset_completion = 1;
do {
ssleep(1);
} while (ioc->ioc_reset_in_progress == 1);
ioc->wait_on_reset_completion = 0;
return ioc->reset_status;
}
if (ioc->wait_on_reset_completion) {
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
rc = 0;
time_count = jiffies;
goto exit;
}
ioc->ioc_reset_in_progress = 1;
if (ioc->alt_ioc)
ioc->alt_ioc->ioc_reset_in_progress = 1;
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
/* The SCSI driver needs to adjust timeouts on all current
* commands prior to the diagnostic reset being issued.
* Prevents timeouts occurring during a diagnostic reset...very bad.
* For all other protocol drivers, this is a no-op.
*/
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx]) {
mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
if (ioc->alt_ioc)
mpt_signal_reset(cb_idx, ioc->alt_ioc,
MPT_IOC_SETUP_RESET);
}
}
time_count = jiffies;
rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag);
if (rc != 0) {
printk(KERN_WARNING MYNAM
": WARNING - (%d) Cannot recover %s, doorbell=0x%08x\n",
rc, ioc->name, mpt_GetIocState(ioc, 0));
} else {
if (ioc->hard_resets < -1)
ioc->hard_resets++;
}
spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
ioc->ioc_reset_in_progress = 0;
ioc->taskmgmt_quiesce_io = 0;
ioc->taskmgmt_in_progress = 0;
ioc->reset_status = rc;
if (ioc->alt_ioc) {
ioc->alt_ioc->ioc_reset_in_progress = 0;
ioc->alt_ioc->taskmgmt_quiesce_io = 0;
ioc->alt_ioc->taskmgmt_in_progress = 0;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptResetHandlers[cb_idx]) {
mpt_signal_reset(cb_idx, ioc, MPT_IOC_POST_RESET);
if (ioc->alt_ioc)
mpt_signal_reset(cb_idx,
ioc->alt_ioc, MPT_IOC_POST_RESET);
}
}
exit:
dtmprintk(ioc,
printk(MYIOC_s_DEBUG_FMT
"HardResetHandler: completed (%d seconds): %s\n", ioc->name,
jiffies_to_msecs(jiffies - time_count)/1000, ((rc == 0) ?
"SUCCESS" : "FAILED")));
return rc;
}
#ifdef CONFIG_FUSION_LOGGING
static void
mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
{
char *ds = NULL;
u32 evData0;
int ii;
u8 event;
char *evStr = ioc->evStr;
event = le32_to_cpu(pEventReply->Event) & 0xFF;
evData0 = le32_to_cpu(pEventReply->Data[0]);
switch(event) {
case MPI_EVENT_NONE:
ds = "None";
break;
case MPI_EVENT_LOG_DATA:
ds = "Log Data";
break;
case MPI_EVENT_STATE_CHANGE:
ds = "State Change";
break;
case MPI_EVENT_UNIT_ATTENTION:
ds = "Unit Attention";
break;
case MPI_EVENT_IOC_BUS_RESET:
ds = "IOC Bus Reset";
break;
case MPI_EVENT_EXT_BUS_RESET:
ds = "External Bus Reset";
break;
case MPI_EVENT_RESCAN:
ds = "Bus Rescan Event";
break;
case MPI_EVENT_LINK_STATUS_CHANGE:
if (evData0 == MPI_EVENT_LINK_STATUS_FAILURE)
ds = "Link Status(FAILURE) Change";
else
ds = "Link Status(ACTIVE) Change";
break;
case MPI_EVENT_LOOP_STATE_CHANGE:
if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP)
ds = "Loop State(LIP) Change";
else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE)
ds = "Loop State(LPE) Change";
else
ds = "Loop State(LPB) Change";
break;
case MPI_EVENT_LOGOUT:
ds = "Logout";
break;
case MPI_EVENT_EVENT_CHANGE:
if (evData0)
ds = "Events ON";
else
ds = "Events OFF";
break;
case MPI_EVENT_INTEGRATED_RAID:
{
u8 ReasonCode = (u8)(evData0 >> 16);
switch (ReasonCode) {
case MPI_EVENT_RAID_RC_VOLUME_CREATED :
ds = "Integrated Raid: Volume Created";
break;
case MPI_EVENT_RAID_RC_VOLUME_DELETED :
ds = "Integrated Raid: Volume Deleted";
break;
case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED :
ds = "Integrated Raid: Volume Settings Changed";
break;
case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED :
ds = "Integrated Raid: Volume Status Changed";
break;
case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED :
ds = "Integrated Raid: Volume Physdisk Changed";
break;
case MPI_EVENT_RAID_RC_PHYSDISK_CREATED :
ds = "Integrated Raid: Physdisk Created";
break;
case MPI_EVENT_RAID_RC_PHYSDISK_DELETED :
ds = "Integrated Raid: Physdisk Deleted";
break;
case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED :
ds = "Integrated Raid: Physdisk Settings Changed";
break;
case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED :
ds = "Integrated Raid: Physdisk Status Changed";
break;
case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED :
ds = "Integrated Raid: Domain Validation Needed";
break;
case MPI_EVENT_RAID_RC_SMART_DATA :
ds = "Integrated Raid; Smart Data";
break;
case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED :
ds = "Integrated Raid: Replace Action Started";
break;
default:
ds = "Integrated Raid";
break;
}
break;
}
case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE:
ds = "SCSI Device Status Change";
break;
case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
{
u8 id = (u8)(evData0);
u8 channel = (u8)(evData0 >> 8);
u8 ReasonCode = (u8)(evData0 >> 16);
switch (ReasonCode) {
case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Added: "
"id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Deleted: "
"id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: SMART Data: "
"id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: No Persistancy: "
"id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Unsupported Device "
"Discovered : id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Internal Device "
"Reset : id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Internal Task "
"Abort : id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Internal Abort "
"Task Set : id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Internal Clear "
"Task Set : id=%d channel=%d", id, channel);
break;
case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Internal Query "
"Task : id=%d channel=%d", id, channel);
break;
default:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Device Status Change: Unknown: "
"id=%d channel=%d", id, channel);
break;
}
break;
}
case MPI_EVENT_ON_BUS_TIMER_EXPIRED:
ds = "Bus Timer Expired";
break;
case MPI_EVENT_QUEUE_FULL:
{
u16 curr_depth = (u16)(evData0 >> 16);
u8 channel = (u8)(evData0 >> 8);
u8 id = (u8)(evData0);
snprintf(evStr, EVENT_DESCR_STR_SZ,
"Queue Full: channel=%d id=%d depth=%d",
channel, id, curr_depth);
break;
}
case MPI_EVENT_SAS_SES:
ds = "SAS SES Event";
break;
case MPI_EVENT_PERSISTENT_TABLE_FULL:
ds = "Persistent Table Full";
break;
case MPI_EVENT_SAS_PHY_LINK_STATUS:
{
u8 LinkRates = (u8)(evData0 >> 8);
u8 PhyNumber = (u8)(evData0);
LinkRates = (LinkRates & MPI_EVENT_SAS_PLS_LR_CURRENT_MASK) >>
MPI_EVENT_SAS_PLS_LR_CURRENT_SHIFT;
switch (LinkRates) {
case MPI_EVENT_SAS_PLS_LR_RATE_UNKNOWN:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Rate Unknown",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_PHY_DISABLED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Phy Disabled",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_FAILED_SPEED_NEGOTIATION:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Failed Speed Nego",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Sata OOB Completed",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_1_5:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Rate 1.5 Gbps",PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_3_0:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Rate 3.0 Gbps", PhyNumber);
break;
case MPI_EVENT_SAS_PLS_LR_RATE_6_0:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
" Rate 6.0 Gbps", PhyNumber);
break;
default:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d", PhyNumber);
break;
}
break;
}
case MPI_EVENT_SAS_DISCOVERY_ERROR:
ds = "SAS Discovery Error";
break;
case MPI_EVENT_IR_RESYNC_UPDATE:
{
u8 resync_complete = (u8)(evData0 >> 16);
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR Resync Update: Complete = %d:",resync_complete);
break;
}
case MPI_EVENT_IR2:
{
u8 id = (u8)(evData0);
u8 channel = (u8)(evData0 >> 8);
u8 phys_num = (u8)(evData0 >> 24);
u8 ReasonCode = (u8)(evData0 >> 16);
switch (ReasonCode) {
case MPI_EVENT_IR2_RC_LD_STATE_CHANGED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: LD State Changed: "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_PD_STATE_CHANGED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: PD State Changed "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_BAD_BLOCK_TABLE_FULL:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: Bad Block Table Full: "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_PD_INSERTED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: PD Inserted: "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_PD_REMOVED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: PD Removed: "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: Foreign CFG Detected: "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: Rebuild Medium Error: "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: Dual Port Added: "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"IR2: Dual Port Removed: "
"id=%d channel=%d phys_num=%d",
id, channel, phys_num);
break;
default:
ds = "IR2";
break;
}
break;
}
case MPI_EVENT_SAS_DISCOVERY:
{
if (evData0)
ds = "SAS Discovery: Start";
else
ds = "SAS Discovery: Stop";
break;
}
case MPI_EVENT_LOG_ENTRY_ADDED:
ds = "SAS Log Entry Added";
break;
case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
{
u8 phy_num = (u8)(evData0);
u8 port_num = (u8)(evData0 >> 8);
u8 port_width = (u8)(evData0 >> 16);
u8 primative = (u8)(evData0 >> 24);
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Broadcase Primative: phy=%d port=%d "
"width=%d primative=0x%02x",
phy_num, port_num, port_width, primative);
break;
}
case MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
{
u8 reason = (u8)(evData0);
switch (reason) {
case MPI_EVENT_SAS_INIT_RC_ADDED:
ds = "SAS Initiator Status Change: Added";
break;
case MPI_EVENT_SAS_INIT_RC_REMOVED:
ds = "SAS Initiator Status Change: Deleted";
break;
default:
ds = "SAS Initiator Status Change";
break;
}
break;
}
case MPI_EVENT_SAS_INIT_TABLE_OVERFLOW:
{
u8 max_init = (u8)(evData0);
u8 current_init = (u8)(evData0 >> 8);
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS Initiator Device Table Overflow: max initiators=%02d "
"current initators=%02d",
max_init, current_init);
break;
}
case MPI_EVENT_SAS_SMP_ERROR:
{
u8 status = (u8)(evData0);
u8 port_num = (u8)(evData0 >> 8);
u8 result = (u8)(evData0 >> 16);
if (status == MPI_EVENT_SAS_SMP_FUNCTION_RESULT_VALID)
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS SMP Error: port=%d result=0x%02x",
port_num, result);
else if (status == MPI_EVENT_SAS_SMP_CRC_ERROR)
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS SMP Error: port=%d : CRC Error",
port_num);
else if (status == MPI_EVENT_SAS_SMP_TIMEOUT)
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS SMP Error: port=%d : Timeout",
port_num);
else if (status == MPI_EVENT_SAS_SMP_NO_DESTINATION)
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS SMP Error: port=%d : No Destination",
port_num);
else if (status == MPI_EVENT_SAS_SMP_BAD_DESTINATION)
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS SMP Error: port=%d : Bad Destination",
port_num);
else
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS SMP Error: port=%d : status=0x%02x",
port_num, status);
break;
}
case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
{
u8 reason = (u8)(evData0);
switch (reason) {
case MPI_EVENT_SAS_EXP_RC_ADDED:
ds = "Expander Status Change: Added";
break;
case MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING:
ds = "Expander Status Change: Deleted";
break;
default:
ds = "Expander Status Change";
break;
}
break;
}
/*
* MPT base "custom" events may be added here...
*/
default:
ds = "Unknown";
break;
}
if (ds)
strncpy(evStr, ds, EVENT_DESCR_STR_SZ);
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"MPT event:(%02Xh) : %s\n",
ioc->name, event, evStr));
devtverboseprintk(ioc, printk(KERN_DEBUG MYNAM
": Event data:\n"));
for (ii = 0; ii < le16_to_cpu(pEventReply->EventDataLength); ii++)
devtverboseprintk(ioc, printk(" %08x",
le32_to_cpu(pEventReply->Data[ii])));
devtverboseprintk(ioc, printk(KERN_DEBUG "\n"));
}
#endif
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* ProcessEventNotification - Route EventNotificationReply to all event handlers
* @ioc: Pointer to MPT_ADAPTER structure
* @pEventReply: Pointer to EventNotification reply frame
* @evHandlers: Pointer to integer, number of event handlers
*
* Routes a received EventNotificationReply to all currently registered
* event handlers.
* Returns sum of event handlers return values.
*/
static int
ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply, int *evHandlers)
{
u16 evDataLen;
u32 evData0 = 0;
int ii;
u8 cb_idx;
int r = 0;
int handlers = 0;
u8 event;
/*
* Do platform normalization of values
*/
event = le32_to_cpu(pEventReply->Event) & 0xFF;
evDataLen = le16_to_cpu(pEventReply->EventDataLength);
if (evDataLen) {
evData0 = le32_to_cpu(pEventReply->Data[0]);
}
#ifdef CONFIG_FUSION_LOGGING
if (evDataLen)
mpt_display_event_info(ioc, pEventReply);
#endif
/*
* Do general / base driver event processing
*/
switch(event) {
case MPI_EVENT_EVENT_CHANGE: /* 0A */
if (evDataLen) {
u8 evState = evData0 & 0xFF;
/* CHECKME! What if evState unexpectedly says OFF (0)? */
/* Update EventState field in cached IocFacts */
if (ioc->facts.Function) {
ioc->facts.EventState = evState;
}
}
break;
case MPI_EVENT_INTEGRATED_RAID:
mptbase_raid_process_event_data(ioc,
(MpiEventDataRaid_t *)pEventReply->Data);
break;
default:
break;
}
/*
* Should this event be logged? Events are written sequentially.
* When buffer is full, start again at the top.
*/
if (ioc->events && (ioc->eventTypes & ( 1 << event))) {
int idx;
idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE;
ioc->events[idx].event = event;
ioc->events[idx].eventContext = ioc->eventContext;
for (ii = 0; ii < 2; ii++) {
if (ii < evDataLen)
ioc->events[idx].data[ii] = le32_to_cpu(pEventReply->Data[ii]);
else
ioc->events[idx].data[ii] = 0;
}
ioc->eventContext++;
}
/*
* Call each currently registered protocol event handler.
*/
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
if (MptEvHandlers[cb_idx]) {
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"Routing Event to event handler #%d\n",
ioc->name, cb_idx));
r += (*(MptEvHandlers[cb_idx]))(ioc, pEventReply);
handlers++;
}
}
/* FIXME? Examine results here? */
/*
* If needed, send (a single) EventAck.
*/
if (pEventReply->AckRequired == MPI_EVENT_NOTIFICATION_ACK_REQUIRED) {
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"EventAck required\n",ioc->name));
if ((ii = SendEventAck(ioc, pEventReply)) != 0) {
devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SendEventAck returned %d\n",
ioc->name, ii));
}
}
*evHandlers = handlers;
return r;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_fc_log_info - Log information returned from Fibre Channel IOC.
* @ioc: Pointer to MPT_ADAPTER structure
* @log_info: U32 LogInfo reply word from the IOC
*
* Refer to lsi/mpi_log_fc.h.
*/
static void
mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info)
{
char *desc = "unknown";
switch (log_info & 0xFF000000) {
case MPI_IOCLOGINFO_FC_INIT_BASE:
desc = "FCP Initiator";
break;
case MPI_IOCLOGINFO_FC_TARGET_BASE:
desc = "FCP Target";
break;
case MPI_IOCLOGINFO_FC_LAN_BASE:
desc = "LAN";
break;
case MPI_IOCLOGINFO_FC_MSG_BASE:
desc = "MPI Message Layer";
break;
case MPI_IOCLOGINFO_FC_LINK_BASE:
desc = "FC Link";
break;
case MPI_IOCLOGINFO_FC_CTX_BASE:
desc = "Context Manager";
break;
case MPI_IOCLOGINFO_FC_INVALID_FIELD_BYTE_OFFSET:
desc = "Invalid Field Offset";
break;
case MPI_IOCLOGINFO_FC_STATE_CHANGE:
desc = "State Change Info";
break;
}
printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): SubClass={%s}, Value=(0x%06x)\n",
ioc->name, log_info, desc, (log_info & 0xFFFFFF));
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_spi_log_info - Log information returned from SCSI Parallel IOC.
* @ioc: Pointer to MPT_ADAPTER structure
* @log_info: U32 LogInfo word from the IOC
*
* Refer to lsi/sp_log.h.
*/
static void
mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
{
u32 info = log_info & 0x00FF0000;
char *desc = "unknown";
switch (info) {
case 0x00010000:
desc = "bug! MID not found";
break;
case 0x00020000:
desc = "Parity Error";
break;
case 0x00030000:
desc = "ASYNC Outbound Overrun";
break;
case 0x00040000:
desc = "SYNC Offset Error";
break;
case 0x00050000:
desc = "BM Change";
break;
case 0x00060000:
desc = "Msg In Overflow";
break;
case 0x00070000:
desc = "DMA Error";
break;
case 0x00080000:
desc = "Outbound DMA Overrun";
break;
case 0x00090000:
desc = "Task Management";
break;
case 0x000A0000:
desc = "Device Problem";
break;
case 0x000B0000:
desc = "Invalid Phase Change";
break;
case 0x000C0000:
desc = "Untagged Table Size";
break;
}
printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc);
}
/* strings for sas loginfo */
static char *originator_str[] = {
"IOP", /* 00h */
"PL", /* 01h */
"IR" /* 02h */
};
static char *iop_code_str[] = {
NULL, /* 00h */
"Invalid SAS Address", /* 01h */
NULL, /* 02h */
"Invalid Page", /* 03h */
"Diag Message Error", /* 04h */
"Task Terminated", /* 05h */
"Enclosure Management", /* 06h */
"Target Mode" /* 07h */
};
static char *pl_code_str[] = {
NULL, /* 00h */
"Open Failure", /* 01h */
"Invalid Scatter Gather List", /* 02h */
"Wrong Relative Offset or Frame Length", /* 03h */
"Frame Transfer Error", /* 04h */
"Transmit Frame Connected Low", /* 05h */
"SATA Non-NCQ RW Error Bit Set", /* 06h */
"SATA Read Log Receive Data Error", /* 07h */
"SATA NCQ Fail All Commands After Error", /* 08h */
"SATA Error in Receive Set Device Bit FIS", /* 09h */
"Receive Frame Invalid Message", /* 0Ah */
"Receive Context Message Valid Error", /* 0Bh */
"Receive Frame Current Frame Error", /* 0Ch */
"SATA Link Down", /* 0Dh */
"Discovery SATA Init W IOS", /* 0Eh */
"Config Invalid Page", /* 0Fh */
"Discovery SATA Init Timeout", /* 10h */
"Reset", /* 11h */
"Abort", /* 12h */
"IO Not Yet Executed", /* 13h */
"IO Executed", /* 14h */
"Persistent Reservation Out Not Affiliation "
"Owner", /* 15h */
"Open Transmit DMA Abort", /* 16h */
"IO Device Missing Delay Retry", /* 17h */
"IO Cancelled Due to Receive Error", /* 18h */
NULL, /* 19h */
NULL, /* 1Ah */
NULL, /* 1Bh */
NULL, /* 1Ch */
NULL, /* 1Dh */
NULL, /* 1Eh */
NULL, /* 1Fh */
"Enclosure Management" /* 20h */
};
static char *ir_code_str[] = {
"Raid Action Error", /* 00h */
NULL, /* 00h */
NULL, /* 01h */
NULL, /* 02h */
NULL, /* 03h */
NULL, /* 04h */
NULL, /* 05h */
NULL, /* 06h */
NULL /* 07h */
};
static char *raid_sub_code_str[] = {
NULL, /* 00h */
"Volume Creation Failed: Data Passed too "
"Large", /* 01h */
"Volume Creation Failed: Duplicate Volumes "
"Attempted", /* 02h */
"Volume Creation Failed: Max Number "
"Supported Volumes Exceeded", /* 03h */
"Volume Creation Failed: DMA Error", /* 04h */
"Volume Creation Failed: Invalid Volume Type", /* 05h */
"Volume Creation Failed: Error Reading "
"MFG Page 4", /* 06h */
"Volume Creation Failed: Creating Internal "
"Structures", /* 07h */
NULL, /* 08h */
NULL, /* 09h */
NULL, /* 0Ah */
NULL, /* 0Bh */
NULL, /* 0Ch */
NULL, /* 0Dh */
NULL, /* 0Eh */
NULL, /* 0Fh */
"Activation failed: Already Active Volume", /* 10h */
"Activation failed: Unsupported Volume Type", /* 11h */
"Activation failed: Too Many Active Volumes", /* 12h */
"Activation failed: Volume ID in Use", /* 13h */
"Activation failed: Reported Failure", /* 14h */
"Activation failed: Importing a Volume", /* 15h */
NULL, /* 16h */
NULL, /* 17h */
NULL, /* 18h */
NULL, /* 19h */
NULL, /* 1Ah */
NULL, /* 1Bh */
NULL, /* 1Ch */
NULL, /* 1Dh */
NULL, /* 1Eh */
NULL, /* 1Fh */
"Phys Disk failed: Too Many Phys Disks", /* 20h */
"Phys Disk failed: Data Passed too Large", /* 21h */
"Phys Disk failed: DMA Error", /* 22h */
"Phys Disk failed: Invalid <channel:id>", /* 23h */
"Phys Disk failed: Creating Phys Disk Config "
"Page", /* 24h */
NULL, /* 25h */
NULL, /* 26h */
NULL, /* 27h */
NULL, /* 28h */
NULL, /* 29h */
NULL, /* 2Ah */
NULL, /* 2Bh */
NULL, /* 2Ch */
NULL, /* 2Dh */
NULL, /* 2Eh */
NULL, /* 2Fh */
"Compatibility Error: IR Disabled", /* 30h */
"Compatibility Error: Inquiry Command Failed", /* 31h */
"Compatibility Error: Device not Direct Access "
"Device ", /* 32h */
"Compatibility Error: Removable Device Found", /* 33h */
"Compatibility Error: Device SCSI Version not "
"2 or Higher", /* 34h */
"Compatibility Error: SATA Device, 48 BIT LBA "
"not Supported", /* 35h */
"Compatibility Error: Device doesn't have "
"512 Byte Block Sizes", /* 36h */
"Compatibility Error: Volume Type Check Failed", /* 37h */
"Compatibility Error: Volume Type is "
"Unsupported by FW", /* 38h */
"Compatibility Error: Disk Drive too Small for "
"use in Volume", /* 39h */
"Compatibility Error: Phys Disk for Create "
"Volume not Found", /* 3Ah */
"Compatibility Error: Too Many or too Few "
"Disks for Volume Type", /* 3Bh */
"Compatibility Error: Disk stripe Sizes "
"Must be 64KB", /* 3Ch */
"Compatibility Error: IME Size Limited to < 2TB", /* 3Dh */
};
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_sas_log_info - Log information returned from SAS IOC.
* @ioc: Pointer to MPT_ADAPTER structure
* @log_info: U32 LogInfo reply word from the IOC
* @cb_idx: callback function's handle
*
* Refer to lsi/mpi_log_sas.h.
**/
static void
mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info, u8 cb_idx)
{
union loginfo_type {
u32 loginfo;
struct {
u32 subcode:16;
u32 code:8;
u32 originator:4;
u32 bus_type:4;
}dw;
};
union loginfo_type sas_loginfo;
char *originator_desc = NULL;
char *code_desc = NULL;
char *sub_code_desc = NULL;
sas_loginfo.loginfo = log_info;
if ((sas_loginfo.dw.bus_type != 3 /*SAS*/) &&
(sas_loginfo.dw.originator < ARRAY_SIZE(originator_str)))
return;
originator_desc = originator_str[sas_loginfo.dw.originator];
switch (sas_loginfo.dw.originator) {
case 0: /* IOP */
if (sas_loginfo.dw.code <
ARRAY_SIZE(iop_code_str))
code_desc = iop_code_str[sas_loginfo.dw.code];
break;
case 1: /* PL */
if (sas_loginfo.dw.code <
ARRAY_SIZE(pl_code_str))
code_desc = pl_code_str[sas_loginfo.dw.code];
break;
case 2: /* IR */
if (sas_loginfo.dw.code >=
ARRAY_SIZE(ir_code_str))
break;
code_desc = ir_code_str[sas_loginfo.dw.code];
if (sas_loginfo.dw.subcode >=
ARRAY_SIZE(raid_sub_code_str))
break;
if (sas_loginfo.dw.code == 0)
sub_code_desc =
raid_sub_code_str[sas_loginfo.dw.subcode];
break;
default:
return;
}
if (sub_code_desc != NULL)
printk(MYIOC_s_INFO_FMT
"LogInfo(0x%08x): Originator={%s}, Code={%s},"
" SubCode={%s} cb_idx %s\n",
ioc->name, log_info, originator_desc, code_desc,
sub_code_desc, MptCallbacksName[cb_idx]);
else if (code_desc != NULL)
printk(MYIOC_s_INFO_FMT
"LogInfo(0x%08x): Originator={%s}, Code={%s},"
" SubCode(0x%04x) cb_idx %s\n",
ioc->name, log_info, originator_desc, code_desc,
sas_loginfo.dw.subcode, MptCallbacksName[cb_idx]);
else
printk(MYIOC_s_INFO_FMT
"LogInfo(0x%08x): Originator={%s}, Code=(0x%02x),"
" SubCode(0x%04x) cb_idx %s\n",
ioc->name, log_info, originator_desc,
sas_loginfo.dw.code, sas_loginfo.dw.subcode,
MptCallbacksName[cb_idx]);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_iocstatus_info_config - IOCSTATUS information for config pages
* @ioc: Pointer to MPT_ADAPTER structure
* @ioc_status: U32 IOCStatus word from IOC
* @mf: Pointer to MPT request frame
*
* Refer to lsi/mpi.h.
**/
static void
mpt_iocstatus_info_config(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
{
Config_t *pReq = (Config_t *)mf;
char extend_desc[EVENT_DESCR_STR_SZ];
char *desc = NULL;
u32 form;
u8 page_type;
if (pReq->Header.PageType == MPI_CONFIG_PAGETYPE_EXTENDED)
page_type = pReq->ExtPageType;
else
page_type = pReq->Header.PageType;
/*
* ignore invalid page messages for GET_NEXT_HANDLE
*/
form = le32_to_cpu(pReq->PageAddress);
if (ioc_status == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
if (page_type == MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE ||
page_type == MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER ||
page_type == MPI_CONFIG_EXTPAGETYPE_ENCLOSURE) {
if ((form >> MPI_SAS_DEVICE_PGAD_FORM_SHIFT) ==
MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE)
return;
}
if (page_type == MPI_CONFIG_PAGETYPE_FC_DEVICE)
if ((form & MPI_FC_DEVICE_PGAD_FORM_MASK) ==
MPI_FC_DEVICE_PGAD_FORM_NEXT_DID)
return;
}
snprintf(extend_desc, EVENT_DESCR_STR_SZ,
"type=%02Xh, page=%02Xh, action=%02Xh, form=%08Xh",
page_type, pReq->Header.PageNumber, pReq->Action, form);
switch (ioc_status) {
case MPI_IOCSTATUS_CONFIG_INVALID_ACTION: /* 0x0020 */
desc = "Config Page Invalid Action";
break;
case MPI_IOCSTATUS_CONFIG_INVALID_TYPE: /* 0x0021 */
desc = "Config Page Invalid Type";
break;
case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: /* 0x0022 */
desc = "Config Page Invalid Page";
break;
case MPI_IOCSTATUS_CONFIG_INVALID_DATA: /* 0x0023 */
desc = "Config Page Invalid Data";
break;
case MPI_IOCSTATUS_CONFIG_NO_DEFAULTS: /* 0x0024 */
desc = "Config Page No Defaults";
break;
case MPI_IOCSTATUS_CONFIG_CANT_COMMIT: /* 0x0025 */
desc = "Config Page Can't Commit";
break;
}
if (!desc)
return;
dreplyprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOCStatus(0x%04X): %s: %s\n",
ioc->name, ioc_status, desc, extend_desc));
}
/**
* mpt_iocstatus_info - IOCSTATUS information returned from IOC.
* @ioc: Pointer to MPT_ADAPTER structure
* @ioc_status: U32 IOCStatus word from IOC
* @mf: Pointer to MPT request frame
*
* Refer to lsi/mpi.h.
**/
static void
mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
{
u32 status = ioc_status & MPI_IOCSTATUS_MASK;
char *desc = NULL;
switch (status) {
/****************************************************************************/
/* Common IOCStatus values for all replies */
/****************************************************************************/
case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */
desc = "Invalid Function";
break;
case MPI_IOCSTATUS_BUSY: /* 0x0002 */
desc = "Busy";
break;
case MPI_IOCSTATUS_INVALID_SGL: /* 0x0003 */
desc = "Invalid SGL";
break;
case MPI_IOCSTATUS_INTERNAL_ERROR: /* 0x0004 */
desc = "Internal Error";
break;
case MPI_IOCSTATUS_RESERVED: /* 0x0005 */
desc = "Reserved";
break;
case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: /* 0x0006 */
desc = "Insufficient Resources";
break;
case MPI_IOCSTATUS_INVALID_FIELD: /* 0x0007 */
desc = "Invalid Field";
break;
case MPI_IOCSTATUS_INVALID_STATE: /* 0x0008 */
desc = "Invalid State";
break;
/****************************************************************************/
/* Config IOCStatus values */
/****************************************************************************/
case MPI_IOCSTATUS_CONFIG_INVALID_ACTION: /* 0x0020 */
case MPI_IOCSTATUS_CONFIG_INVALID_TYPE: /* 0x0021 */
case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: /* 0x0022 */
case MPI_IOCSTATUS_CONFIG_INVALID_DATA: /* 0x0023 */
case MPI_IOCSTATUS_CONFIG_NO_DEFAULTS: /* 0x0024 */
case MPI_IOCSTATUS_CONFIG_CANT_COMMIT: /* 0x0025 */
mpt_iocstatus_info_config(ioc, status, mf);
break;
/****************************************************************************/
/* SCSIIO Reply (SPI, FCP, SAS) initiator values */
/* */
/* Look at mptscsih_iocstatus_info_scsiio in mptscsih.c */
/* */
/****************************************************************************/
case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
case MPI_IOCSTATUS_SCSI_INVALID_BUS: /* 0x0041 */
case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: /* 0x0042 */
case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */
case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: /* 0x004A */
case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
break;
/****************************************************************************/
/* SCSI Target values */
/****************************************************************************/
case MPI_IOCSTATUS_TARGET_PRIORITY_IO: /* 0x0060 */
desc = "Target: Priority IO";
break;
case MPI_IOCSTATUS_TARGET_INVALID_PORT: /* 0x0061 */
desc = "Target: Invalid Port";
break;
case MPI_IOCSTATUS_TARGET_INVALID_IO_INDEX: /* 0x0062 */
desc = "Target Invalid IO Index:";
break;
case MPI_IOCSTATUS_TARGET_ABORTED: /* 0x0063 */
desc = "Target: Aborted";
break;
case MPI_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: /* 0x0064 */
desc = "Target: No Conn Retryable";
break;
case MPI_IOCSTATUS_TARGET_NO_CONNECTION: /* 0x0065 */
desc = "Target: No Connection";
break;
case MPI_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: /* 0x006A */
desc = "Target: Transfer Count Mismatch";
break;
case MPI_IOCSTATUS_TARGET_STS_DATA_NOT_SENT: /* 0x006B */
desc = "Target: STS Data not Sent";
break;
case MPI_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: /* 0x006D */
desc = "Target: Data Offset Error";
break;
case MPI_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: /* 0x006E */
desc = "Target: Too Much Write Data";
break;
case MPI_IOCSTATUS_TARGET_IU_TOO_SHORT: /* 0x006F */
desc = "Target: IU Too Short";
break;
case MPI_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: /* 0x0070 */
desc = "Target: ACK NAK Timeout";
break;
case MPI_IOCSTATUS_TARGET_NAK_RECEIVED: /* 0x0071 */
desc = "Target: Nak Received";
break;
/****************************************************************************/
/* Fibre Channel Direct Access values */
/****************************************************************************/
case MPI_IOCSTATUS_FC_ABORTED: /* 0x0066 */
desc = "FC: Aborted";
break;
case MPI_IOCSTATUS_FC_RX_ID_INVALID: /* 0x0067 */
desc = "FC: RX ID Invalid";
break;
case MPI_IOCSTATUS_FC_DID_INVALID: /* 0x0068 */
desc = "FC: DID Invalid";
break;
case MPI_IOCSTATUS_FC_NODE_LOGGED_OUT: /* 0x0069 */
desc = "FC: Node Logged Out";
break;
case MPI_IOCSTATUS_FC_EXCHANGE_CANCELED: /* 0x006C */
desc = "FC: Exchange Canceled";
break;
/****************************************************************************/
/* LAN values */
/****************************************************************************/
case MPI_IOCSTATUS_LAN_DEVICE_NOT_FOUND: /* 0x0080 */
desc = "LAN: Device not Found";
break;
case MPI_IOCSTATUS_LAN_DEVICE_FAILURE: /* 0x0081 */
desc = "LAN: Device Failure";
break;
case MPI_IOCSTATUS_LAN_TRANSMIT_ERROR: /* 0x0082 */
desc = "LAN: Transmit Error";
break;
case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED: /* 0x0083 */
desc = "LAN: Transmit Aborted";
break;
case MPI_IOCSTATUS_LAN_RECEIVE_ERROR: /* 0x0084 */
desc = "LAN: Receive Error";
break;
case MPI_IOCSTATUS_LAN_RECEIVE_ABORTED: /* 0x0085 */
desc = "LAN: Receive Aborted";
break;
case MPI_IOCSTATUS_LAN_PARTIAL_PACKET: /* 0x0086 */
desc = "LAN: Partial Packet";
break;
case MPI_IOCSTATUS_LAN_CANCELED: /* 0x0087 */
desc = "LAN: Canceled";
break;
/****************************************************************************/
/* Serial Attached SCSI values */
/****************************************************************************/
case MPI_IOCSTATUS_SAS_SMP_REQUEST_FAILED: /* 0x0090 */
desc = "SAS: SMP Request Failed";
break;
case MPI_IOCSTATUS_SAS_SMP_DATA_OVERRUN: /* 0x0090 */
desc = "SAS: SMP Data Overrun";
break;
default:
desc = "Others";
break;
}
if (!desc)
return;
dreplyprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOCStatus(0x%04X): %s\n",
ioc->name, status, desc));
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
EXPORT_SYMBOL(mpt_attach);
EXPORT_SYMBOL(mpt_detach);
#ifdef CONFIG_PM
EXPORT_SYMBOL(mpt_resume);
EXPORT_SYMBOL(mpt_suspend);
#endif
EXPORT_SYMBOL(ioc_list);
EXPORT_SYMBOL(mpt_register);
EXPORT_SYMBOL(mpt_deregister);
EXPORT_SYMBOL(mpt_event_register);
EXPORT_SYMBOL(mpt_event_deregister);
EXPORT_SYMBOL(mpt_reset_register);
EXPORT_SYMBOL(mpt_reset_deregister);
EXPORT_SYMBOL(mpt_device_driver_register);
EXPORT_SYMBOL(mpt_device_driver_deregister);
EXPORT_SYMBOL(mpt_get_msg_frame);
EXPORT_SYMBOL(mpt_put_msg_frame);
EXPORT_SYMBOL(mpt_put_msg_frame_hi_pri);
EXPORT_SYMBOL(mpt_free_msg_frame);
EXPORT_SYMBOL(mpt_send_handshake_request);
EXPORT_SYMBOL(mpt_verify_adapter);
EXPORT_SYMBOL(mpt_GetIocState);
EXPORT_SYMBOL(mpt_print_ioc_summary);
EXPORT_SYMBOL(mpt_HardResetHandler);
EXPORT_SYMBOL(mpt_config);
EXPORT_SYMBOL(mpt_findImVolumes);
EXPORT_SYMBOL(mpt_alloc_fw_memory);
EXPORT_SYMBOL(mpt_free_fw_memory);
EXPORT_SYMBOL(mptbase_sas_persist_operation);
EXPORT_SYMBOL(mpt_raid_phys_disk_pg0);
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* fusion_init - Fusion MPT base driver initialization routine.
*
* Returns 0 for success, non-zero for failure.
*/
static int __init
fusion_init(void)
{
u8 cb_idx;
show_mptmod_ver(my_NAME, my_VERSION);
printk(KERN_INFO COPYRIGHT "\n");
for (cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) {
MptCallbacks[cb_idx] = NULL;
MptDriverClass[cb_idx] = MPTUNKNOWN_DRIVER;
MptEvHandlers[cb_idx] = NULL;
MptResetHandlers[cb_idx] = NULL;
}
/* Register ourselves (mptbase) in order to facilitate
* EventNotification handling.
*/
mpt_base_index = mpt_register(mptbase_reply, MPTBASE_DRIVER,
"mptbase_reply");
/* Register for hard reset handling callbacks.
*/
mpt_reset_register(mpt_base_index, mpt_ioc_reset);
#ifdef CONFIG_PROC_FS
(void) procmpt_create();
#endif
return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* fusion_exit - Perform driver unload cleanup.
*
* This routine frees all resources associated with each MPT adapter
* and removes all %MPT_PROCFS_MPTBASEDIR entries.
*/
static void __exit
fusion_exit(void)
{
mpt_reset_deregister(mpt_base_index);
#ifdef CONFIG_PROC_FS
procmpt_destroy();
#endif
}
module_init(fusion_init);
module_exit(fusion_exit);
| gpl-2.0 |
luckasfb/android_kernel_iocean_x7 | net/ipv6/protocol.c | 7870 | 1598 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* PF_INET6 protocol dispatch tables.
*
* Authors: Pedro Roque <roque@di.fc.ul.pt>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/*
* Changes:
*
* Vince Laviano (vince@cs.stanford.edu) 16 May 2001
* - Removed unused variable 'inet6_protocol_base'
* - Modified inet6_del_protocol() to correctly maintain copy bit.
*/
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <net/protocol.h>
const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly;
int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
{
int hash = protocol & (MAX_INET_PROTOS - 1);
return !cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
NULL, prot) ? 0 : -1;
}
EXPORT_SYMBOL(inet6_add_protocol);
/*
* Remove a protocol from the hash tables.
*/
int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol)
{
int ret, hash = protocol & (MAX_INET_PROTOS - 1);
ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
prot, NULL) == prot) ? 0 : -1;
synchronize_net();
return ret;
}
EXPORT_SYMBOL(inet6_del_protocol);
| gpl-2.0 |
hexiaolong2008/linux-2.6.32 | sound/mips/ad1843.c | 12478 | 16402 | /*
* AD1843 low level driver
*
* Copyright 2003 Vivien Chappelier <vivien.chappelier@linux-mips.org>
* Copyright 2008 Thomas Bogendoerfer <tsbogend@alpha.franken.de>
*
* inspired from vwsnd.c (SGI VW audio driver)
* Copyright 1999 Silicon Graphics, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/ad1843.h>
/*
* AD1843 bitfield definitions. All are named as in the AD1843 data
* sheet, with ad1843_ prepended and individual bit numbers removed.
*
* E.g., bits LSS0 through LSS2 become ad1843_LSS.
*
* Only the bitfields we need are defined.
*/
struct ad1843_bitfield {
char reg;
char lo_bit;
char nbits;
};
static const struct ad1843_bitfield
ad1843_PDNO = { 0, 14, 1 }, /* Converter Power-Down Flag */
ad1843_INIT = { 0, 15, 1 }, /* Clock Initialization Flag */
ad1843_RIG = { 2, 0, 4 }, /* Right ADC Input Gain */
ad1843_RMGE = { 2, 4, 1 }, /* Right ADC Mic Gain Enable */
ad1843_RSS = { 2, 5, 3 }, /* Right ADC Source Select */
ad1843_LIG = { 2, 8, 4 }, /* Left ADC Input Gain */
ad1843_LMGE = { 2, 12, 1 }, /* Left ADC Mic Gain Enable */
ad1843_LSS = { 2, 13, 3 }, /* Left ADC Source Select */
ad1843_RD2M = { 3, 0, 5 }, /* Right DAC 2 Mix Gain/Atten */
ad1843_RD2MM = { 3, 7, 1 }, /* Right DAC 2 Mix Mute */
ad1843_LD2M = { 3, 8, 5 }, /* Left DAC 2 Mix Gain/Atten */
ad1843_LD2MM = { 3, 15, 1 }, /* Left DAC 2 Mix Mute */
ad1843_RX1M = { 4, 0, 5 }, /* Right Aux 1 Mix Gain/Atten */
ad1843_RX1MM = { 4, 7, 1 }, /* Right Aux 1 Mix Mute */
ad1843_LX1M = { 4, 8, 5 }, /* Left Aux 1 Mix Gain/Atten */
ad1843_LX1MM = { 4, 15, 1 }, /* Left Aux 1 Mix Mute */
ad1843_RX2M = { 5, 0, 5 }, /* Right Aux 2 Mix Gain/Atten */
ad1843_RX2MM = { 5, 7, 1 }, /* Right Aux 2 Mix Mute */
ad1843_LX2M = { 5, 8, 5 }, /* Left Aux 2 Mix Gain/Atten */
ad1843_LX2MM = { 5, 15, 1 }, /* Left Aux 2 Mix Mute */
ad1843_RMCM = { 7, 0, 5 }, /* Right Mic Mix Gain/Atten */
ad1843_RMCMM = { 7, 7, 1 }, /* Right Mic Mix Mute */
ad1843_LMCM = { 7, 8, 5 }, /* Left Mic Mix Gain/Atten */
ad1843_LMCMM = { 7, 15, 1 }, /* Left Mic Mix Mute */
ad1843_HPOS = { 8, 4, 1 }, /* Headphone Output Voltage Swing */
ad1843_HPOM = { 8, 5, 1 }, /* Headphone Output Mute */
ad1843_MPOM = { 8, 6, 1 }, /* Mono Output Mute */
ad1843_RDA1G = { 9, 0, 6 }, /* Right DAC1 Analog/Digital Gain */
ad1843_RDA1GM = { 9, 7, 1 }, /* Right DAC1 Analog Mute */
ad1843_LDA1G = { 9, 8, 6 }, /* Left DAC1 Analog/Digital Gain */
ad1843_LDA1GM = { 9, 15, 1 }, /* Left DAC1 Analog Mute */
ad1843_RDA2G = { 10, 0, 6 }, /* Right DAC2 Analog/Digital Gain */
ad1843_RDA2GM = { 10, 7, 1 }, /* Right DAC2 Analog Mute */
ad1843_LDA2G = { 10, 8, 6 }, /* Left DAC2 Analog/Digital Gain */
ad1843_LDA2GM = { 10, 15, 1 }, /* Left DAC2 Analog Mute */
ad1843_RDA1AM = { 11, 7, 1 }, /* Right DAC1 Digital Mute */
ad1843_LDA1AM = { 11, 15, 1 }, /* Left DAC1 Digital Mute */
ad1843_RDA2AM = { 12, 7, 1 }, /* Right DAC2 Digital Mute */
ad1843_LDA2AM = { 12, 15, 1 }, /* Left DAC2 Digital Mute */
ad1843_ADLC = { 15, 0, 2 }, /* ADC Left Sample Rate Source */
ad1843_ADRC = { 15, 2, 2 }, /* ADC Right Sample Rate Source */
ad1843_DA1C = { 15, 8, 2 }, /* DAC1 Sample Rate Source */
ad1843_DA2C = { 15, 10, 2 }, /* DAC2 Sample Rate Source */
ad1843_C1C = { 17, 0, 16 }, /* Clock 1 Sample Rate Select */
ad1843_C2C = { 20, 0, 16 }, /* Clock 2 Sample Rate Select */
ad1843_C3C = { 23, 0, 16 }, /* Clock 3 Sample Rate Select */
ad1843_DAADL = { 25, 4, 2 }, /* Digital ADC Left Source Select */
ad1843_DAADR = { 25, 6, 2 }, /* Digital ADC Right Source Select */
ad1843_DAMIX = { 25, 14, 1 }, /* DAC Digital Mix Enable */
ad1843_DRSFLT = { 25, 15, 1 }, /* Digital Reampler Filter Mode */
ad1843_ADLF = { 26, 0, 2 }, /* ADC Left Channel Data Format */
ad1843_ADRF = { 26, 2, 2 }, /* ADC Right Channel Data Format */
ad1843_ADTLK = { 26, 4, 1 }, /* ADC Transmit Lock Mode Select */
ad1843_SCF = { 26, 7, 1 }, /* SCLK Frequency Select */
ad1843_DA1F = { 26, 8, 2 }, /* DAC1 Data Format Select */
ad1843_DA2F = { 26, 10, 2 }, /* DAC2 Data Format Select */
ad1843_DA1SM = { 26, 14, 1 }, /* DAC1 Stereo/Mono Mode Select */
ad1843_DA2SM = { 26, 15, 1 }, /* DAC2 Stereo/Mono Mode Select */
ad1843_ADLEN = { 27, 0, 1 }, /* ADC Left Channel Enable */
ad1843_ADREN = { 27, 1, 1 }, /* ADC Right Channel Enable */
ad1843_AAMEN = { 27, 4, 1 }, /* Analog to Analog Mix Enable */
ad1843_ANAEN = { 27, 7, 1 }, /* Analog Channel Enable */
ad1843_DA1EN = { 27, 8, 1 }, /* DAC1 Enable */
ad1843_DA2EN = { 27, 9, 1 }, /* DAC2 Enable */
ad1843_DDMEN = { 27, 12, 1 }, /* DAC2 to DAC1 Mix Enable */
ad1843_C1EN = { 28, 11, 1 }, /* Clock Generator 1 Enable */
ad1843_C2EN = { 28, 12, 1 }, /* Clock Generator 2 Enable */
ad1843_C3EN = { 28, 13, 1 }, /* Clock Generator 3 Enable */
ad1843_PDNI = { 28, 15, 1 }; /* Converter Power Down */
/*
* The various registers of the AD1843 use three different formats for
* specifying gain. The ad1843_gain structure parameterizes the
* formats.
*/
struct ad1843_gain {
int negative; /* nonzero if gain is negative. */
const struct ad1843_bitfield *lfield;
const struct ad1843_bitfield *rfield;
const struct ad1843_bitfield *lmute;
const struct ad1843_bitfield *rmute;
};
static const struct ad1843_gain ad1843_gain_RECLEV = {
.negative = 0,
.lfield = &ad1843_LIG,
.rfield = &ad1843_RIG
};
static const struct ad1843_gain ad1843_gain_LINE = {
.negative = 1,
.lfield = &ad1843_LX1M,
.rfield = &ad1843_RX1M,
.lmute = &ad1843_LX1MM,
.rmute = &ad1843_RX1MM
};
static const struct ad1843_gain ad1843_gain_LINE_2 = {
.negative = 1,
.lfield = &ad1843_LDA2G,
.rfield = &ad1843_RDA2G,
.lmute = &ad1843_LDA2GM,
.rmute = &ad1843_RDA2GM
};
static const struct ad1843_gain ad1843_gain_MIC = {
.negative = 1,
.lfield = &ad1843_LMCM,
.rfield = &ad1843_RMCM,
.lmute = &ad1843_LMCMM,
.rmute = &ad1843_RMCMM
};
static const struct ad1843_gain ad1843_gain_PCM_0 = {
.negative = 1,
.lfield = &ad1843_LDA1G,
.rfield = &ad1843_RDA1G,
.lmute = &ad1843_LDA1GM,
.rmute = &ad1843_RDA1GM
};
static const struct ad1843_gain ad1843_gain_PCM_1 = {
.negative = 1,
.lfield = &ad1843_LD2M,
.rfield = &ad1843_RD2M,
.lmute = &ad1843_LD2MM,
.rmute = &ad1843_RD2MM
};
static const struct ad1843_gain *ad1843_gain[AD1843_GAIN_SIZE] =
{
&ad1843_gain_RECLEV,
&ad1843_gain_LINE,
&ad1843_gain_LINE_2,
&ad1843_gain_MIC,
&ad1843_gain_PCM_0,
&ad1843_gain_PCM_1,
};
/* read the current value of an AD1843 bitfield. */
static int ad1843_read_bits(struct snd_ad1843 *ad1843,
const struct ad1843_bitfield *field)
{
int w;
w = ad1843->read(ad1843->chip, field->reg);
return w >> field->lo_bit & ((1 << field->nbits) - 1);
}
/*
* write a new value to an AD1843 bitfield and return the old value.
*/
static int ad1843_write_bits(struct snd_ad1843 *ad1843,
const struct ad1843_bitfield *field,
int newval)
{
int w, mask, oldval, newbits;
w = ad1843->read(ad1843->chip, field->reg);
mask = ((1 << field->nbits) - 1) << field->lo_bit;
oldval = (w & mask) >> field->lo_bit;
newbits = (newval << field->lo_bit) & mask;
w = (w & ~mask) | newbits;
ad1843->write(ad1843->chip, field->reg, w);
return oldval;
}
/*
* ad1843_read_multi reads multiple bitfields from the same AD1843
* register. It uses a single read cycle to do it. (Reading the
* ad1843 requires 256 bit times at 12.288 MHz, or nearly 20
* microseconds.)
*
* Called like this.
*
* ad1843_read_multi(ad1843, nfields,
* &ad1843_FIELD1, &val1,
* &ad1843_FIELD2, &val2, ...);
*/
static void ad1843_read_multi(struct snd_ad1843 *ad1843, int argcount, ...)
{
va_list ap;
const struct ad1843_bitfield *fp;
int w = 0, mask, *value, reg = -1;
va_start(ap, argcount);
while (--argcount >= 0) {
fp = va_arg(ap, const struct ad1843_bitfield *);
value = va_arg(ap, int *);
if (reg == -1) {
reg = fp->reg;
w = ad1843->read(ad1843->chip, reg);
}
mask = (1 << fp->nbits) - 1;
*value = w >> fp->lo_bit & mask;
}
va_end(ap);
}
/*
* ad1843_write_multi stores multiple bitfields into the same AD1843
* register. It uses one read and one write cycle to do it.
*
* Called like this.
*
* ad1843_write_multi(ad1843, nfields,
* &ad1843_FIELD1, val1,
* &ad1843_FIELF2, val2, ...);
*/
static void ad1843_write_multi(struct snd_ad1843 *ad1843, int argcount, ...)
{
va_list ap;
int reg;
const struct ad1843_bitfield *fp;
int value;
int w, m, mask, bits;
mask = 0;
bits = 0;
reg = -1;
va_start(ap, argcount);
while (--argcount >= 0) {
fp = va_arg(ap, const struct ad1843_bitfield *);
value = va_arg(ap, int);
if (reg == -1)
reg = fp->reg;
else
BUG_ON(reg != fp->reg);
m = ((1 << fp->nbits) - 1) << fp->lo_bit;
mask |= m;
bits |= (value << fp->lo_bit) & m;
}
va_end(ap);
if (~mask & 0xFFFF)
w = ad1843->read(ad1843->chip, reg);
else
w = 0;
w = (w & ~mask) | bits;
ad1843->write(ad1843->chip, reg, w);
}
int ad1843_get_gain_max(struct snd_ad1843 *ad1843, int id)
{
const struct ad1843_gain *gp = ad1843_gain[id];
int ret;
ret = (1 << gp->lfield->nbits);
if (!gp->lmute)
ret -= 1;
return ret;
}
/*
* ad1843_get_gain reads the specified register and extracts the gain value
* using the supplied gain type.
*/
int ad1843_get_gain(struct snd_ad1843 *ad1843, int id)
{
int lg, rg, lm, rm;
const struct ad1843_gain *gp = ad1843_gain[id];
unsigned short mask = (1 << gp->lfield->nbits) - 1;
ad1843_read_multi(ad1843, 2, gp->lfield, &lg, gp->rfield, &rg);
if (gp->negative) {
lg = mask - lg;
rg = mask - rg;
}
if (gp->lmute) {
ad1843_read_multi(ad1843, 2, gp->lmute, &lm, gp->rmute, &rm);
if (lm)
lg = 0;
if (rm)
rg = 0;
}
return lg << 0 | rg << 8;
}
/*
* Set an audio channel's gain.
*
* Returns the new gain, which may be lower than the old gain.
*/
int ad1843_set_gain(struct snd_ad1843 *ad1843, int id, int newval)
{
const struct ad1843_gain *gp = ad1843_gain[id];
unsigned short mask = (1 << gp->lfield->nbits) - 1;
int lg = (newval >> 0) & mask;
int rg = (newval >> 8) & mask;
int lm = (lg == 0) ? 1 : 0;
int rm = (rg == 0) ? 1 : 0;
if (gp->negative) {
lg = mask - lg;
rg = mask - rg;
}
if (gp->lmute)
ad1843_write_multi(ad1843, 2, gp->lmute, lm, gp->rmute, rm);
ad1843_write_multi(ad1843, 2, gp->lfield, lg, gp->rfield, rg);
return ad1843_get_gain(ad1843, id);
}
/* Returns the current recording source */
int ad1843_get_recsrc(struct snd_ad1843 *ad1843)
{
int val = ad1843_read_bits(ad1843, &ad1843_LSS);
if (val < 0 || val > 2) {
val = 2;
ad1843_write_multi(ad1843, 2,
&ad1843_LSS, val, &ad1843_RSS, val);
}
return val;
}
/*
* Set recording source.
*
* Returns newsrc on success, -errno on failure.
*/
int ad1843_set_recsrc(struct snd_ad1843 *ad1843, int newsrc)
{
if (newsrc < 0 || newsrc > 2)
return -EINVAL;
ad1843_write_multi(ad1843, 2, &ad1843_LSS, newsrc, &ad1843_RSS, newsrc);
return newsrc;
}
/* Setup ad1843 for D/A conversion. */
void ad1843_setup_dac(struct snd_ad1843 *ad1843,
unsigned int id,
unsigned int framerate,
snd_pcm_format_t fmt,
unsigned int channels)
{
int ad_fmt = 0, ad_mode = 0;
switch (fmt) {
case SNDRV_PCM_FORMAT_S8:
ad_fmt = 0;
break;
case SNDRV_PCM_FORMAT_U8:
ad_fmt = 0;
break;
case SNDRV_PCM_FORMAT_S16_LE:
ad_fmt = 1;
break;
case SNDRV_PCM_FORMAT_MU_LAW:
ad_fmt = 2;
break;
case SNDRV_PCM_FORMAT_A_LAW:
ad_fmt = 3;
break;
default:
break;
}
switch (channels) {
case 2:
ad_mode = 0;
break;
case 1:
ad_mode = 1;
break;
default:
break;
}
if (id) {
ad1843_write_bits(ad1843, &ad1843_C2C, framerate);
ad1843_write_multi(ad1843, 2,
&ad1843_DA2SM, ad_mode,
&ad1843_DA2F, ad_fmt);
} else {
ad1843_write_bits(ad1843, &ad1843_C1C, framerate);
ad1843_write_multi(ad1843, 2,
&ad1843_DA1SM, ad_mode,
&ad1843_DA1F, ad_fmt);
}
}
void ad1843_shutdown_dac(struct snd_ad1843 *ad1843, unsigned int id)
{
if (id)
ad1843_write_bits(ad1843, &ad1843_DA2F, 1);
else
ad1843_write_bits(ad1843, &ad1843_DA1F, 1);
}
void ad1843_setup_adc(struct snd_ad1843 *ad1843,
unsigned int framerate,
snd_pcm_format_t fmt,
unsigned int channels)
{
int da_fmt = 0;
switch (fmt) {
case SNDRV_PCM_FORMAT_S8: da_fmt = 0; break;
case SNDRV_PCM_FORMAT_U8: da_fmt = 0; break;
case SNDRV_PCM_FORMAT_S16_LE: da_fmt = 1; break;
case SNDRV_PCM_FORMAT_MU_LAW: da_fmt = 2; break;
case SNDRV_PCM_FORMAT_A_LAW: da_fmt = 3; break;
default: break;
}
ad1843_write_bits(ad1843, &ad1843_C3C, framerate);
ad1843_write_multi(ad1843, 2,
&ad1843_ADLF, da_fmt, &ad1843_ADRF, da_fmt);
}
void ad1843_shutdown_adc(struct snd_ad1843 *ad1843)
{
/* nothing to do */
}
/*
* Fully initialize the ad1843. As described in the AD1843 data
* sheet, section "START-UP SEQUENCE". The numbered comments are
* subsection headings from the data sheet. See the data sheet, pages
* 52-54, for more info.
*
* return 0 on success, -errno on failure. */
int ad1843_init(struct snd_ad1843 *ad1843)
{
unsigned long later;
if (ad1843_read_bits(ad1843, &ad1843_INIT) != 0) {
printk(KERN_ERR "ad1843: AD1843 won't initialize\n");
return -EIO;
}
ad1843_write_bits(ad1843, &ad1843_SCF, 1);
/* 4. Put the conversion resources into standby. */
ad1843_write_bits(ad1843, &ad1843_PDNI, 0);
later = jiffies + msecs_to_jiffies(500);
while (ad1843_read_bits(ad1843, &ad1843_PDNO)) {
if (time_after(jiffies, later)) {
printk(KERN_ERR
"ad1843: AD1843 won't power up\n");
return -EIO;
}
schedule_timeout_interruptible(5);
}
/* 5. Power up the clock generators and enable clock output pins. */
ad1843_write_multi(ad1843, 3,
&ad1843_C1EN, 1,
&ad1843_C2EN, 1,
&ad1843_C3EN, 1);
/* 6. Configure conversion resources while they are in standby. */
/* DAC1/2 use clock 1/2 as source, ADC uses clock 3. Always. */
ad1843_write_multi(ad1843, 4,
&ad1843_DA1C, 1,
&ad1843_DA2C, 2,
&ad1843_ADLC, 3,
&ad1843_ADRC, 3);
/* 7. Enable conversion resources. */
ad1843_write_bits(ad1843, &ad1843_ADTLK, 1);
ad1843_write_multi(ad1843, 7,
&ad1843_ANAEN, 1,
&ad1843_AAMEN, 1,
&ad1843_DA1EN, 1,
&ad1843_DA2EN, 1,
&ad1843_DDMEN, 1,
&ad1843_ADLEN, 1,
&ad1843_ADREN, 1);
/* 8. Configure conversion resources while they are enabled. */
/* set gain to 0 for all channels */
ad1843_set_gain(ad1843, AD1843_GAIN_RECLEV, 0);
ad1843_set_gain(ad1843, AD1843_GAIN_LINE, 0);
ad1843_set_gain(ad1843, AD1843_GAIN_LINE_2, 0);
ad1843_set_gain(ad1843, AD1843_GAIN_MIC, 0);
ad1843_set_gain(ad1843, AD1843_GAIN_PCM_0, 0);
ad1843_set_gain(ad1843, AD1843_GAIN_PCM_1, 0);
/* Unmute all channels. */
/* DAC1 */
ad1843_write_multi(ad1843, 2, &ad1843_LDA1GM, 0, &ad1843_RDA1GM, 0);
/* DAC2 */
ad1843_write_multi(ad1843, 2, &ad1843_LDA2GM, 0, &ad1843_RDA2GM, 0);
/* Set default recording source to Line In and set
* mic gain to +20 dB.
*/
ad1843_set_recsrc(ad1843, 2);
ad1843_write_multi(ad1843, 2, &ad1843_LMGE, 1, &ad1843_RMGE, 1);
/* Set Speaker Out level to +/- 4V and unmute it. */
ad1843_write_multi(ad1843, 3,
&ad1843_HPOS, 1,
&ad1843_HPOM, 0,
&ad1843_MPOM, 0);
return 0;
}
| gpl-2.0 |
sk806/N5_Kernel | sound/pci/echoaudio/mia_dsp.c | 12478 | 5622 | /****************************************************************************
Copyright Echo Digital Audio Corporation (c) 1998 - 2004
All rights reserved
www.echoaudio.com
This file is part of Echo Digital Audio's generic driver library.
Echo Digital Audio's generic driver library is free software;
you can redistribute it and/or modify it under the terms of
the GNU General Public License as published by the Free Software
Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston,
MA 02111-1307, USA.
*************************************************************************
Translation from C++ and adaptation for use in ALSA-Driver
were made by Giuliano Pochini <pochini@shiny.it>
****************************************************************************/
static int set_input_clock(struct echoaudio *chip, u16 clock);
static int set_professional_spdif(struct echoaudio *chip, char prof);
static int update_flags(struct echoaudio *chip);
static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe,
int gain);
static int update_vmixer_level(struct echoaudio *chip);
static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
DE_INIT(("init_hw() - Mia\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != MIA))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
DE_INIT(("init_hw - could not initialize DSP comm page\n"));
return err;
}
chip->device_id = device_id;
chip->subdevice_id = subdevice_id;
chip->bad_board = TRUE;
chip->dsp_code_to_load = FW_MIA_DSP;
/* Since this card has no ASIC, mark it as loaded so everything
works OK */
chip->asic_loaded = TRUE;
if ((subdevice_id & 0x0000f) == MIA_MIDI_REV)
chip->has_midi = TRUE;
chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL |
ECHO_CLOCK_BIT_SPDIF;
if ((err = load_firmware(chip)) < 0)
return err;
chip->bad_board = FALSE;
DE_INIT(("init_hw done\n"));
return err;
}
static int set_mixer_defaults(struct echoaudio *chip)
{
return init_line_levels(chip);
}
static u32 detect_input_clocks(const struct echoaudio *chip)
{
u32 clocks_from_dsp, clock_bits;
/* Map the DSP clock detect bits to the generic driver clock
detect bits */
clocks_from_dsp = le32_to_cpu(chip->comm_page->status_clocks);
clock_bits = ECHO_CLOCK_BIT_INTERNAL;
if (clocks_from_dsp & GLDM_CLOCK_DETECT_BIT_SPDIF)
clock_bits |= ECHO_CLOCK_BIT_SPDIF;
return clock_bits;
}
/* The Mia has no ASIC. Just do nothing */
static int load_asic(struct echoaudio *chip)
{
return 0;
}
static int set_sample_rate(struct echoaudio *chip, u32 rate)
{
u32 control_reg;
switch (rate) {
case 96000:
control_reg = MIA_96000;
break;
case 88200:
control_reg = MIA_88200;
break;
case 48000:
control_reg = MIA_48000;
break;
case 44100:
control_reg = MIA_44100;
break;
case 32000:
control_reg = MIA_32000;
break;
default:
DE_ACT(("set_sample_rate: %d invalid!\n", rate));
return -EINVAL;
}
/* Override the clock setting if this Mia is set to S/PDIF clock */
if (chip->input_clock == ECHO_CLOCK_SPDIF)
control_reg |= MIA_SPDIF;
/* Set the control register if it has changed */
if (control_reg != le32_to_cpu(chip->comm_page->control_register)) {
if (wait_handshake(chip))
return -EIO;
chip->comm_page->sample_rate = cpu_to_le32(rate); /* ignored by the DSP */
chip->comm_page->control_register = cpu_to_le32(control_reg);
chip->sample_rate = rate;
clear_handshake(chip);
return send_vector(chip, DSP_VC_UPDATE_CLOCKS);
}
return 0;
}
static int set_input_clock(struct echoaudio *chip, u16 clock)
{
DE_ACT(("set_input_clock(%d)\n", clock));
if (snd_BUG_ON(clock != ECHO_CLOCK_INTERNAL &&
clock != ECHO_CLOCK_SPDIF))
return -EINVAL;
chip->input_clock = clock;
return set_sample_rate(chip, chip->sample_rate);
}
/* This function routes the sound from a virtual channel to a real output */
static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe,
int gain)
{
int index;
if (snd_BUG_ON(pipe >= num_pipes_out(chip) ||
output >= num_busses_out(chip)))
return -EINVAL;
if (wait_handshake(chip))
return -EIO;
chip->vmixer_gain[output][pipe] = gain;
index = output * num_pipes_out(chip) + pipe;
chip->comm_page->vmixer[index] = gain;
DE_ACT(("set_vmixer_gain: pipe %d, out %d = %d\n", pipe, output, gain));
return 0;
}
/* Tell the DSP to read and update virtual mixer levels in comm page. */
static int update_vmixer_level(struct echoaudio *chip)
{
if (wait_handshake(chip))
return -EIO;
clear_handshake(chip);
return send_vector(chip, DSP_VC_SET_VMIXER_GAIN);
}
/* Tell the DSP to reread the flags from the comm page */
static int update_flags(struct echoaudio *chip)
{
if (wait_handshake(chip))
return -EIO;
clear_handshake(chip);
return send_vector(chip, DSP_VC_UPDATE_FLAGS);
}
static int set_professional_spdif(struct echoaudio *chip, char prof)
{
DE_ACT(("set_professional_spdif %d\n", prof));
if (prof)
chip->comm_page->flags |=
cpu_to_le32(DSP_FLAG_PROFESSIONAL_SPDIF);
else
chip->comm_page->flags &=
~cpu_to_le32(DSP_FLAG_PROFESSIONAL_SPDIF);
chip->professional_spdif = prof;
return update_flags(chip);
}
| gpl-2.0 |
sjp38/linux.doc_trans_membarrier | drivers/net/wireless/intel/iwlwifi/iwl-6000.c | 191 | 12310 | /******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
#include "iwl-agn-hw.h"
#include "dvm/commands.h" /* needed for BT for now */
/* Highest firmware API version supported */
#define IWL6000_UCODE_API_MAX 6
#define IWL6050_UCODE_API_MAX 5
#define IWL6000G2_UCODE_API_MAX 6
#define IWL6035_UCODE_API_MAX 6
/* Lowest firmware API version supported */
#define IWL6000_UCODE_API_MIN 4
#define IWL6050_UCODE_API_MIN 4
#define IWL6000G2_UCODE_API_MIN 5
#define IWL6035_UCODE_API_MIN 6
/* EEPROM versions */
#define EEPROM_6000_TX_POWER_VERSION (4)
#define EEPROM_6000_EEPROM_VERSION (0x423)
#define EEPROM_6050_TX_POWER_VERSION (4)
#define EEPROM_6050_EEPROM_VERSION (0x532)
#define EEPROM_6150_TX_POWER_VERSION (6)
#define EEPROM_6150_EEPROM_VERSION (0x553)
#define EEPROM_6005_TX_POWER_VERSION (6)
#define EEPROM_6005_EEPROM_VERSION (0x709)
#define EEPROM_6030_TX_POWER_VERSION (6)
#define EEPROM_6030_EEPROM_VERSION (0x709)
#define EEPROM_6035_TX_POWER_VERSION (6)
#define EEPROM_6035_EEPROM_VERSION (0x753)
#define IWL6000_FW_PRE "iwlwifi-6000-"
#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
#define IWL6050_FW_PRE "iwlwifi-6050-"
#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE __stringify(api) ".ucode"
#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE __stringify(api) ".ucode"
#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl6000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 51,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
.scd_chain_ext_wa = true,
};
static const struct iwl_base_params iwl6050_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
.shadow_ram_support = true,
.led_compensation = 51,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 1024,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
.scd_chain_ext_wa = true,
};
static const struct iwl_base_params iwl6000_g2_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
.scd_chain_ext_wa = true,
};
static const struct iwl_ht_params iwl6000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
static const struct iwl_eeprom_params iwl6000_eeprom_params = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
EEPROM_REG_BAND_2_CHANNELS,
EEPROM_REG_BAND_3_CHANNELS,
EEPROM_REG_BAND_4_CHANNELS,
EEPROM_REG_BAND_5_CHANNELS,
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
.enhanced_txpower = true,
};
#define IWL_DEVICE_6005 \
.fw_name_pre = IWL6005_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6005, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_6005_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6005_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
IWL_DEVICE_6005,
.ht_params = &iwl6000_ht_params,
};
const struct iwl_cfg iwl6005_2abg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205 ABG",
IWL_DEVICE_6005,
};
const struct iwl_cfg iwl6005_2bg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205 BG",
IWL_DEVICE_6005,
};
const struct iwl_cfg iwl6005_2agn_sff_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205S AGN",
IWL_DEVICE_6005,
.ht_params = &iwl6000_ht_params,
};
const struct iwl_cfg iwl6005_2agn_d_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6205D AGN",
IWL_DEVICE_6005,
.ht_params = &iwl6000_ht_params,
};
const struct iwl_cfg iwl6005_2agn_mow1_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6206 AGN",
IWL_DEVICE_6005,
.ht_params = &iwl6000_ht_params,
};
const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6207 AGN",
IWL_DEVICE_6005,
.ht_params = &iwl6000_ht_params,
};
#define IWL_DEVICE_6030 \
.fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_6030_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6030_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
};
const struct iwl_cfg iwl6030_2abg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 ABG",
IWL_DEVICE_6030,
};
const struct iwl_cfg iwl6030_2bgn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 BGN",
IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
};
const struct iwl_cfg iwl6030_2bg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6230 BG",
IWL_DEVICE_6030,
};
#define IWL_DEVICE_6035 \
.fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6035_UCODE_API_MAX, \
.ucode_api_min = IWL6035_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_6030_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6035_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
IWL_DEVICE_6035,
.ht_params = &iwl6000_ht_params,
};
const struct iwl_cfg iwl6035_2agn_sff_cfg = {
.name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
IWL_DEVICE_6035,
.ht_params = &iwl6000_ht_params,
};
const struct iwl_cfg iwl1030_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
};
const struct iwl_cfg iwl1030_bg_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1030 BG",
IWL_DEVICE_6030,
};
const struct iwl_cfg iwl130_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 130 BGN",
IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
.rx_with_siso_diversity = true,
};
const struct iwl_cfg iwl130_bg_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 130 BG",
IWL_DEVICE_6030,
.rx_with_siso_diversity = true,
};
/*
* "i": Internal configuration, use internal Power Amplifier
*/
#define IWL_DEVICE_6000i \
.fw_name_pre = IWL6000_FW_PRE, \
.ucode_api_max = IWL6000_UCODE_API_MAX, \
.ucode_api_min = IWL6000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6000i, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
.valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
.nvm_ver = EEPROM_6000_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
.base_params = &iwl6000_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6000i_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
IWL_DEVICE_6000i,
.ht_params = &iwl6000_ht_params,
};
const struct iwl_cfg iwl6000i_2abg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
IWL_DEVICE_6000i,
};
const struct iwl_cfg iwl6000i_2bg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
IWL_DEVICE_6000i,
};
#define IWL_DEVICE_6050 \
.fw_name_pre = IWL6050_FW_PRE, \
.ucode_api_max = IWL6050_UCODE_API_MAX, \
.ucode_api_min = IWL6050_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6050, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
.valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
.nvm_ver = EEPROM_6050_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6050_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
IWL_DEVICE_6050,
.ht_params = &iwl6000_ht_params,
};
const struct iwl_cfg iwl6050_2abg_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
IWL_DEVICE_6050,
};
#define IWL_DEVICE_6150 \
.fw_name_pre = IWL6050_FW_PRE, \
.ucode_api_max = IWL6050_UCODE_API_MAX, \
.ucode_api_min = IWL6050_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6150, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.nvm_ver = EEPROM_6150_EEPROM_VERSION, \
.nvm_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
.eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
const struct iwl_cfg iwl6150_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
IWL_DEVICE_6150,
.ht_params = &iwl6000_ht_params,
};
const struct iwl_cfg iwl6150_bg_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
IWL_DEVICE_6150,
};
const struct iwl_cfg iwl6000_3agn_cfg = {
.name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
.ucode_api_min = IWL6000_UCODE_API_MIN,
.device_family = IWL_DEVICE_FAMILY_6000,
.max_inst_size = IWL60_RTC_INST_SIZE,
.max_data_size = IWL60_RTC_DATA_SIZE,
.nvm_ver = EEPROM_6000_EEPROM_VERSION,
.nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION,
.base_params = &iwl6000_base_params,
.eeprom_params = &iwl6000_eeprom_params,
.ht_params = &iwl6000_ht_params,
.led_mode = IWL_LED_BLINK,
};
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_MAX));
| gpl-2.0 |
arjen75/LG-Optimus-Chic-Kernel-2.6.32.59- | arch/ia64/mm/init.c | 447 | 19764 | /*
* Initialize MMU support.
*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/efi.h>
#include <linux/elf.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/module.h>
#include <linux/personality.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/bitops.h>
#include <linux/kexec.h>
#include <asm/dma.h>
#include <asm/ia32.h>
#include <asm/io.h>
#include <asm/machvec.h>
#include <asm/numa.h>
#include <asm/patch.h>
#include <asm/pgalloc.h>
#include <asm/sal.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/tlb.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/mca.h>
#include <asm/paravirt.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
extern void ia64_tlb_init (void);
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
#ifdef CONFIG_VIRTUAL_MEM_MAP
unsigned long vmalloc_end = VMALLOC_END_INIT;
EXPORT_SYMBOL(vmalloc_end);
struct page *vmem_map;
EXPORT_SYMBOL(vmem_map);
#endif
struct page *zero_page_memmap_ptr; /* map entry for zero page */
EXPORT_SYMBOL(zero_page_memmap_ptr);
void
__ia64_sync_icache_dcache (pte_t pte)
{
unsigned long addr;
struct page *page;
page = pte_page(pte);
addr = (unsigned long) page_address(page);
if (test_bit(PG_arch_1, &page->flags))
return; /* i-cache is already coherent with d-cache */
flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
/*
* Since DMA is i-cache coherent, any (complete) pages that were written via
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
void
dma_mark_clean(void *addr, size_t size)
{
unsigned long pg_addr, end;
pg_addr = PAGE_ALIGN((unsigned long) addr);
end = (unsigned long) addr + size;
while (pg_addr + PAGE_SIZE <= end) {
struct page *page = virt_to_page(pg_addr);
set_bit(PG_arch_1, &page->flags);
pg_addr += PAGE_SIZE;
}
}
inline void
ia64_set_rbs_bot (void)
{
unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;
if (stack_size > MAX_USER_STACK_SIZE)
stack_size = MAX_USER_STACK_SIZE;
current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
}
/*
* This performs some platform-dependent address space initialization.
* On IA-64, we want to setup the VM area for the register backing
* store (which grows upwards) and install the gateway page which is
* used for signal trampolines, etc.
*/
void
ia64_init_addr_space (void)
{
struct vm_area_struct *vma;
ia64_set_rbs_bot();
/*
* If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
* the problem. When the process attempts to write to the register backing store
* for the first time, it will get a SEGFAULT in this case.
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
vma->vm_mm = current->mm;
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
down_write(¤t->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(¤t->mm->mmap_sem);
kmem_cache_free(vm_area_cachep, vma);
return;
}
up_write(¤t->mm->mmap_sem);
}
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
if (!(current->personality & MMAP_PAGE_ZERO)) {
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
vma->vm_mm = current->mm;
vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
down_write(¤t->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(¤t->mm->mmap_sem);
kmem_cache_free(vm_area_cachep, vma);
return;
}
up_write(¤t->mm->mmap_sem);
}
}
}
void
free_initmem (void)
{
unsigned long addr, eaddr;
addr = (unsigned long) ia64_imva(__init_begin);
eaddr = (unsigned long) ia64_imva(__init_end);
while (addr < eaddr) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
free_page(addr);
++totalram_pages;
addr += PAGE_SIZE;
}
printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
(__init_end - __init_begin) >> 10);
}
void __init
free_initrd_mem (unsigned long start, unsigned long end)
{
struct page *page;
/*
* EFI uses 4KB pages while the kernel can use 4KB or bigger.
* Thus EFI and the kernel may have different page sizes. It is
* therefore possible to have the initrd share the same page as
* the end of the kernel (given current setup).
*
* To avoid freeing/using the wrong page (kernel sized) we:
* - align up the beginning of initrd
* - align down the end of initrd
*
* | |
* |=============| a000
* | |
* | |
* | | 9000
* |/////////////|
* |/////////////|
* |=============| 8000
* |///INITRD////|
* |/////////////|
* |/////////////| 7000
* | |
* |KKKKKKKKKKKKK|
* |=============| 6000
* |KKKKKKKKKKKKK|
* |KKKKKKKKKKKKK|
* K=kernel using 8KB pages
*
* In this example, we must free page 8000 ONLY. So we must align up
* initrd_start and keep initrd_end as is.
*/
start = PAGE_ALIGN(start);
end = end & PAGE_MASK;
if (start < end)
printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
if (!virt_addr_valid(start))
continue;
page = virt_to_page(start);
ClearPageReserved(page);
init_page_count(page);
free_page(start);
++totalram_pages;
}
}
/*
* This installs a clean page in the kernel's page table.
*/
static struct page * __init
put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if (!PageReserved(page))
printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
page_address(page));
pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
{
pud = pud_alloc(&init_mm, pgd, address);
if (!pud)
goto out;
pmd = pmd_alloc(&init_mm, pud, address);
if (!pmd)
goto out;
pte = pte_alloc_kernel(pmd, address);
if (!pte)
goto out;
if (!pte_none(*pte))
goto out;
set_pte(pte, mk_pte(page, pgprot));
}
out:
/* no need for flush_tlb */
return page;
}
static void __init
setup_gate (void)
{
void *gate_section;
struct page *page;
/*
* Map the gate page twice: once read-only to export the ELF
* headers etc. and once execute-only page to enable
* privilege-promotion via "epc":
*/
gate_section = paravirt_get_gate_section();
page = virt_to_page(ia64_imva(gate_section));
put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
#ifdef HAVE_BUGGY_SEGREL
page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE));
put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
#else
put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
/* Fill in the holes (if any) with read-only zero pages: */
{
unsigned long addr;
for (addr = GATE_ADDR + PAGE_SIZE;
addr < GATE_ADDR + PERCPU_PAGE_SIZE;
addr += PAGE_SIZE)
{
put_kernel_page(ZERO_PAGE(0), addr,
PAGE_READONLY);
put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
PAGE_READONLY);
}
}
#endif
ia64_patch_gate();
}
void __devinit
ia64_mmu_init (void *my_cpu_data)
{
unsigned long pta, impl_va_bits;
extern void __devinit tlb_init (void);
#ifdef CONFIG_DISABLE_VHPT
# define VHPT_ENABLE_BIT 0
#else
# define VHPT_ENABLE_BIT 1
#endif
/*
* Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
* address space. The IA-64 architecture guarantees that at least 50 bits of
* virtual address space are implemented but if we pick a large enough page size
* (e.g., 64KB), the mapped address space is big enough that it will overlap with
* VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
* IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
* problem in practice. Alternatively, we could truncate the top of the mapped
* address space to not permit mappings that would overlap with the VMLPT.
* --davidm 00/12/06
*/
# define pte_bits 3
# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
/*
* The virtual page table has to cover the entire implemented address space within
* a region even though not all of this space may be mappable. The reason for
* this is that the Access bit and Dirty bit fault handlers perform
* non-speculative accesses to the virtual page table, so the address range of the
* virtual page table itself needs to be covered by virtual page table.
*/
# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
# define POW2(n) (1ULL << (n))
impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
if (impl_va_bits < 51 || impl_va_bits > 61)
panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
/*
* mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
* which must fit into "vmlpt_bits - pte_bits" slots. Second half of
* the test makes sure that our mapped space doesn't overlap the
* unimplemented hole in the middle of the region.
*/
if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
(mapped_space_bits > impl_va_bits - 1))
panic("Cannot build a big enough virtual-linear page table"
" to cover mapped address space.\n"
" Try using a smaller page size.\n");
/* place the VMLPT at the end of each page-table mapped region: */
pta = POW2(61) - POW2(vmlpt_bits);
/*
* Set the (virtually mapped linear) page table address. Bit
* 8 selects between the short and long format, bits 2-7 the
* size of the table, and bit 0 whether the VHPT walker is
* enabled.
*/
ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
ia64_tlb_init();
#ifdef CONFIG_HUGETLB_PAGE
ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
ia64_srlz_d();
#endif
}
#ifdef CONFIG_VIRTUAL_MEM_MAP
int vmemmap_find_next_valid_pfn(int node, int i)
{
unsigned long end_address, hole_next_pfn;
unsigned long stop_address;
pg_data_t *pgdat = NODE_DATA(node);
end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
end_address = PAGE_ALIGN(end_address);
stop_address = (unsigned long) &vmem_map[
pgdat->node_start_pfn + pgdat->node_spanned_pages];
do {
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset_k(end_address);
if (pgd_none(*pgd)) {
end_address += PGDIR_SIZE;
continue;
}
pud = pud_offset(pgd, end_address);
if (pud_none(*pud)) {
end_address += PUD_SIZE;
continue;
}
pmd = pmd_offset(pud, end_address);
if (pmd_none(*pmd)) {
end_address += PMD_SIZE;
continue;
}
pte = pte_offset_kernel(pmd, end_address);
retry_pte:
if (pte_none(*pte)) {
end_address += PAGE_SIZE;
pte++;
if ((end_address < stop_address) &&
(end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
goto retry_pte;
continue;
}
/* Found next valid vmem_map page */
break;
} while (end_address < stop_address);
end_address = min(end_address, stop_address);
end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
hole_next_pfn = end_address / sizeof(struct page);
return hole_next_pfn - pgdat->node_start_pfn;
}
int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
{
unsigned long address, start_page, end_page;
struct page *map_start, *map_end;
int node;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
start_page = (unsigned long) map_start & PAGE_MASK;
end_page = PAGE_ALIGN((unsigned long) map_end);
node = paddr_to_nid(__pa(start));
for (address = start_page; address < end_page; address += PAGE_SIZE) {
pgd = pgd_offset_k(address);
if (pgd_none(*pgd))
pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
pud = pud_offset(pgd, address);
if (pud_none(*pud))
pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
pte = pte_offset_kernel(pmd, address);
if (pte_none(*pte))
set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
PAGE_KERNEL));
}
return 0;
}
struct memmap_init_callback_data {
struct page *start;
struct page *end;
int nid;
unsigned long zone;
};
static int __meminit
virtual_memmap_init(u64 start, u64 end, void *arg)
{
struct memmap_init_callback_data *args;
struct page *map_start, *map_end;
args = (struct memmap_init_callback_data *) arg;
map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
if (map_start < args->start)
map_start = args->start;
if (map_end > args->end)
map_end = args->end;
/*
* We have to initialize "out of bounds" struct page elements that fit completely
* on the same pages that were allocated for the "in bounds" elements because they
* may be referenced later (and found to be "reserved").
*/
map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
/ sizeof(struct page));
if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start),
args->nid, args->zone, page_to_pfn(map_start),
MEMMAP_EARLY);
return 0;
}
void __meminit
memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
if (!vmem_map)
memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
else {
struct page *start;
struct memmap_init_callback_data args;
start = pfn_to_page(start_pfn);
args.start = start;
args.end = start + size;
args.nid = nid;
args.zone = zone;
efi_memmap_walk(virtual_memmap_init, &args);
}
}
int
ia64_pfn_valid (unsigned long pfn)
{
char byte;
struct page *pg = pfn_to_page(pfn);
return (__get_user(byte, (char __user *) pg) == 0)
&& ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
|| (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
}
EXPORT_SYMBOL(ia64_pfn_valid);
int __init find_largest_hole(u64 start, u64 end, void *arg)
{
u64 *max_gap = arg;
static u64 last_end = PAGE_OFFSET;
/* NOTE: this algorithm assumes efi memmap table is ordered */
if (*max_gap < (start - last_end))
*max_gap = start - last_end;
last_end = end;
return 0;
}
#endif /* CONFIG_VIRTUAL_MEM_MAP */
int __init register_active_ranges(u64 start, u64 len, int nid)
{
u64 end = start + len;
#ifdef CONFIG_KEXEC
if (start > crashk_res.start && start < crashk_res.end)
start = crashk_res.end;
if (end > crashk_res.start && end < crashk_res.end)
end = crashk_res.start;
#endif
if (start < end)
add_active_range(nid, __pa(start) >> PAGE_SHIFT,
__pa(end) >> PAGE_SHIFT);
return 0;
}
static int __init
count_reserved_pages(u64 start, u64 end, void *arg)
{
unsigned long num_reserved = 0;
unsigned long *count = arg;
for (; start < end; start += PAGE_SIZE)
if (PageReserved(virt_to_page(start)))
++num_reserved;
*count += num_reserved;
return 0;
}
int
find_max_min_low_pfn (u64 start, u64 end, void *arg)
{
unsigned long pfn_start, pfn_end;
#ifdef CONFIG_FLATMEM
pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
#else
pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
#endif
min_low_pfn = min(min_low_pfn, pfn_start);
max_low_pfn = max(max_low_pfn, pfn_end);
return 0;
}
/*
* Boot command-line option "nolwsys" can be used to disable the use of any light-weight
* system call handler. When this option is in effect, all fsyscalls will end up bubbling
* down into the kernel and calling the normal (heavy-weight) syscall handler. This is
* useful for performance testing, but conceivably could also come in handy for debugging
* purposes.
*/
static int nolwsys __initdata;
static int __init
nolwsys_setup (char *s)
{
nolwsys = 1;
return 1;
}
__setup("nolwsys", nolwsys_setup);
void __init
mem_init (void)
{
long reserved_pages, codesize, datasize, initsize;
pg_data_t *pgdat;
int i;
BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
#ifdef CONFIG_PCI
/*
* This needs to be called _after_ the command line has been parsed but _before_
* any drivers that may need the PCI DMA interface are initialized or bootmem has
* been freed.
*/
platform_dma_init();
#endif
#ifdef CONFIG_FLATMEM
BUG_ON(!mem_map);
max_mapnr = max_low_pfn;
#endif
high_memory = __va(max_low_pfn * PAGE_SIZE);
for_each_online_pgdat(pgdat)
if (pgdat->bdata->node_bootmem_map)
totalram_pages += free_all_bootmem_node(pgdat);
reserved_pages = 0;
efi_memmap_walk(count_reserved_pages, &reserved_pages);
codesize = (unsigned long) _etext - (unsigned long) _stext;
datasize = (unsigned long) _edata - (unsigned long) _etext;
initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
"%luk data, %luk init)\n", nr_free_pages() << (PAGE_SHIFT - 10),
num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
/*
* For fsyscall entrpoints with no light-weight handler, use the ordinary
* (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
* code can tell them apart.
*/
for (i = 0; i < NR_syscalls; ++i) {
extern unsigned long sys_call_table[NR_syscalls];
unsigned long *fsyscall_table = paravirt_get_fsyscall_table();
if (!fsyscall_table[i] || nolwsys)
fsyscall_table[i] = sys_call_table[i] | 1;
}
setup_gate();
#ifdef CONFIG_IA32_SUPPORT
ia32_mem_init();
#endif
}
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size)
{
pg_data_t *pgdat;
struct zone *zone;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
pgdat = NODE_DATA(nid);
zone = pgdat->node_zones + ZONE_NORMAL;
ret = __add_pages(nid, zone, start_pfn, nr_pages);
if (ret)
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
__func__, ret);
return ret;
}
#endif
/*
* Even when CONFIG_IA32_SUPPORT is not enabled it is
* useful to have the Linux/x86 domain registered to
* avoid an attempted module load when emulators call
* personality(PER_LINUX32). This saves several milliseconds
* on each such call.
*/
static struct exec_domain ia32_exec_domain;
static int __init
per_linux32_init(void)
{
ia32_exec_domain.name = "Linux/x86";
ia32_exec_domain.handler = NULL;
ia32_exec_domain.pers_low = PER_LINUX32;
ia32_exec_domain.pers_high = PER_LINUX32;
ia32_exec_domain.signal_map = default_exec_domain.signal_map;
ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
register_exec_domain(&ia32_exec_domain);
return 0;
}
__initcall(per_linux32_init);
| gpl-2.0 |
LorDClockaN/LorDmodUE | sound/soc/s3c24xx/s3c-ac97.c | 703 | 13327 | /* sound/soc/s3c24xx/s3c-ac97.c
*
* ALSA SoC Audio Layer - S3C AC97 Controller driver
* Evolved from s3c2443-ac97.c
*
* Copyright (c) 2010 Samsung Electronics Co. Ltd
* Author: Jaswinder Singh <jassi.brar@samsung.com>
* Credits: Graeme Gregory, Sean Choi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <sound/soc.h>
#include <plat/regs-ac97.h>
#include <mach/dma.h>
#include <plat/audio.h>
#include "s3c-dma.h"
#include "s3c-ac97.h"
#define AC_CMD_ADDR(x) (x << 16)
#define AC_CMD_DATA(x) (x & 0xffff)
struct s3c_ac97_info {
unsigned state;
struct clk *ac97_clk;
void __iomem *regs;
struct mutex lock;
struct completion done;
};
static struct s3c_ac97_info s3c_ac97;
static struct s3c2410_dma_client s3c_dma_client_out = {
.name = "AC97 PCMOut"
};
static struct s3c2410_dma_client s3c_dma_client_in = {
.name = "AC97 PCMIn"
};
static struct s3c2410_dma_client s3c_dma_client_micin = {
.name = "AC97 MicIn"
};
static struct s3c_dma_params s3c_ac97_pcm_out = {
.client = &s3c_dma_client_out,
.dma_size = 4,
};
static struct s3c_dma_params s3c_ac97_pcm_in = {
.client = &s3c_dma_client_in,
.dma_size = 4,
};
static struct s3c_dma_params s3c_ac97_mic_in = {
.client = &s3c_dma_client_micin,
.dma_size = 4,
};
static void s3c_ac97_activate(struct snd_ac97 *ac97)
{
u32 ac_glbctrl, stat;
stat = readl(s3c_ac97.regs + S3C_AC97_GLBSTAT) & 0x7;
if (stat == S3C_AC97_GLBSTAT_MAINSTATE_ACTIVE)
return; /* Return if already active */
INIT_COMPLETION(s3c_ac97.done);
ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
ac_glbctrl = S3C_AC97_GLBCTRL_ACLINKON;
writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
msleep(1);
ac_glbctrl |= S3C_AC97_GLBCTRL_TRANSFERDATAENABLE;
writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
msleep(1);
ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
ac_glbctrl |= S3C_AC97_GLBCTRL_CODECREADYIE;
writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
printk(KERN_ERR "AC97: Unable to activate!");
}
static unsigned short s3c_ac97_read(struct snd_ac97 *ac97,
unsigned short reg)
{
u32 ac_glbctrl, ac_codec_cmd;
u32 stat, addr, data;
mutex_lock(&s3c_ac97.lock);
s3c_ac97_activate(ac97);
INIT_COMPLETION(s3c_ac97.done);
ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
ac_codec_cmd = S3C_AC97_CODEC_CMD_READ | AC_CMD_ADDR(reg);
writel(ac_codec_cmd, s3c_ac97.regs + S3C_AC97_CODEC_CMD);
udelay(50);
ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
ac_glbctrl |= S3C_AC97_GLBCTRL_CODECREADYIE;
writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
printk(KERN_ERR "AC97: Unable to read!");
stat = readl(s3c_ac97.regs + S3C_AC97_STAT);
addr = (stat >> 16) & 0x7f;
data = (stat & 0xffff);
if (addr != reg)
printk(KERN_ERR "s3c-ac97: req addr = %02x, rep addr = %02x\n", reg, addr);
mutex_unlock(&s3c_ac97.lock);
return (unsigned short)data;
}
static void s3c_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
unsigned short val)
{
u32 ac_glbctrl, ac_codec_cmd;
mutex_lock(&s3c_ac97.lock);
s3c_ac97_activate(ac97);
INIT_COMPLETION(s3c_ac97.done);
ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
ac_codec_cmd = AC_CMD_ADDR(reg) | AC_CMD_DATA(val);
writel(ac_codec_cmd, s3c_ac97.regs + S3C_AC97_CODEC_CMD);
udelay(50);
ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
ac_glbctrl |= S3C_AC97_GLBCTRL_CODECREADYIE;
writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
printk(KERN_ERR "AC97: Unable to write!");
ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
ac_codec_cmd |= S3C_AC97_CODEC_CMD_READ;
writel(ac_codec_cmd, s3c_ac97.regs + S3C_AC97_CODEC_CMD);
mutex_unlock(&s3c_ac97.lock);
}
static void s3c_ac97_cold_reset(struct snd_ac97 *ac97)
{
writel(S3C_AC97_GLBCTRL_COLDRESET,
s3c_ac97.regs + S3C_AC97_GLBCTRL);
msleep(1);
writel(0, s3c_ac97.regs + S3C_AC97_GLBCTRL);
msleep(1);
}
static void s3c_ac97_warm_reset(struct snd_ac97 *ac97)
{
u32 stat;
stat = readl(s3c_ac97.regs + S3C_AC97_GLBSTAT) & 0x7;
if (stat == S3C_AC97_GLBSTAT_MAINSTATE_ACTIVE)
return; /* Return if already active */
writel(S3C_AC97_GLBCTRL_WARMRESET, s3c_ac97.regs + S3C_AC97_GLBCTRL);
msleep(1);
writel(0, s3c_ac97.regs + S3C_AC97_GLBCTRL);
msleep(1);
s3c_ac97_activate(ac97);
}
static irqreturn_t s3c_ac97_irq(int irq, void *dev_id)
{
u32 ac_glbctrl, ac_glbstat;
ac_glbstat = readl(s3c_ac97.regs + S3C_AC97_GLBSTAT);
if (ac_glbstat & S3C_AC97_GLBSTAT_CODECREADY) {
ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
ac_glbctrl &= ~S3C_AC97_GLBCTRL_CODECREADYIE;
writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
complete(&s3c_ac97.done);
}
ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
ac_glbctrl |= (1<<30); /* Clear interrupt */
writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
return IRQ_HANDLED;
}
struct snd_ac97_bus_ops soc_ac97_ops = {
.read = s3c_ac97_read,
.write = s3c_ac97_write,
.warm_reset = s3c_ac97_warm_reset,
.reset = s3c_ac97_cold_reset,
};
EXPORT_SYMBOL_GPL(soc_ac97_ops);
static int s3c_ac97_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
struct s3c_dma_params *dma_data;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dma_data = &s3c_ac97_pcm_out;
else
dma_data = &s3c_ac97_pcm_in;
snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
return 0;
}
static int s3c_ac97_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
u32 ac_glbctrl;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct s3c_dma_params *dma_data =
snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream);
ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
ac_glbctrl &= ~S3C_AC97_GLBCTRL_PCMINTM_MASK;
else
ac_glbctrl &= ~S3C_AC97_GLBCTRL_PCMOUTTM_MASK;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
ac_glbctrl |= S3C_AC97_GLBCTRL_PCMINTM_DMA;
else
ac_glbctrl |= S3C_AC97_GLBCTRL_PCMOUTTM_DMA;
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
break;
}
writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED);
return 0;
}
static int s3c_ac97_hw_mic_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
return -ENODEV;
else
snd_soc_dai_set_dma_data(cpu_dai, substream, &s3c_ac97_mic_in);
return 0;
}
static int s3c_ac97_mic_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
u32 ac_glbctrl;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct s3c_dma_params *dma_data =
snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream);
ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
ac_glbctrl &= ~S3C_AC97_GLBCTRL_MICINTM_MASK;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
ac_glbctrl |= S3C_AC97_GLBCTRL_MICINTM_DMA;
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
break;
}
writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED);
return 0;
}
static struct snd_soc_dai_ops s3c_ac97_dai_ops = {
.hw_params = s3c_ac97_hw_params,
.trigger = s3c_ac97_trigger,
};
static struct snd_soc_dai_ops s3c_ac97_mic_dai_ops = {
.hw_params = s3c_ac97_hw_mic_params,
.trigger = s3c_ac97_mic_trigger,
};
struct snd_soc_dai s3c_ac97_dai[] = {
[S3C_AC97_DAI_PCM] = {
.name = "s3c-ac97",
.id = S3C_AC97_DAI_PCM,
.ac97_control = 1,
.playback = {
.stream_name = "AC97 Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,},
.capture = {
.stream_name = "AC97 Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,},
.ops = &s3c_ac97_dai_ops,
},
[S3C_AC97_DAI_MIC] = {
.name = "s3c-ac97-mic",
.id = S3C_AC97_DAI_MIC,
.ac97_control = 1,
.capture = {
.stream_name = "AC97 Mic Capture",
.channels_min = 1,
.channels_max = 1,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,},
.ops = &s3c_ac97_mic_dai_ops,
},
};
EXPORT_SYMBOL_GPL(s3c_ac97_dai);
static __devinit int s3c_ac97_probe(struct platform_device *pdev)
{
struct resource *mem_res, *dmatx_res, *dmarx_res, *dmamic_res, *irq_res;
struct s3c_audio_pdata *ac97_pdata;
int ret;
ac97_pdata = pdev->dev.platform_data;
if (!ac97_pdata || !ac97_pdata->cfg_gpio) {
dev_err(&pdev->dev, "cfg_gpio callback not provided!\n");
return -EINVAL;
}
/* Check for availability of necessary resource */
dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!dmatx_res) {
dev_err(&pdev->dev, "Unable to get AC97-TX dma resource\n");
return -ENXIO;
}
dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (!dmarx_res) {
dev_err(&pdev->dev, "Unable to get AC97-RX dma resource\n");
return -ENXIO;
}
dmamic_res = platform_get_resource(pdev, IORESOURCE_DMA, 2);
if (!dmamic_res) {
dev_err(&pdev->dev, "Unable to get AC97-MIC dma resource\n");
return -ENXIO;
}
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem_res) {
dev_err(&pdev->dev, "Unable to get register resource\n");
return -ENXIO;
}
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq_res) {
dev_err(&pdev->dev, "AC97 IRQ not provided!\n");
return -ENXIO;
}
if (!request_mem_region(mem_res->start,
resource_size(mem_res), "s3c-ac97")) {
dev_err(&pdev->dev, "Unable to request register region\n");
return -EBUSY;
}
s3c_ac97_pcm_out.channel = dmatx_res->start;
s3c_ac97_pcm_out.dma_addr = mem_res->start + S3C_AC97_PCM_DATA;
s3c_ac97_pcm_in.channel = dmarx_res->start;
s3c_ac97_pcm_in.dma_addr = mem_res->start + S3C_AC97_PCM_DATA;
s3c_ac97_mic_in.channel = dmamic_res->start;
s3c_ac97_mic_in.dma_addr = mem_res->start + S3C_AC97_MIC_DATA;
init_completion(&s3c_ac97.done);
mutex_init(&s3c_ac97.lock);
s3c_ac97.regs = ioremap(mem_res->start, resource_size(mem_res));
if (s3c_ac97.regs == NULL) {
dev_err(&pdev->dev, "Unable to ioremap register region\n");
ret = -ENXIO;
goto err1;
}
s3c_ac97.ac97_clk = clk_get(&pdev->dev, "ac97");
if (IS_ERR(s3c_ac97.ac97_clk)) {
dev_err(&pdev->dev, "s3c-ac97 failed to get ac97_clock\n");
ret = -ENODEV;
goto err2;
}
clk_enable(s3c_ac97.ac97_clk);
if (ac97_pdata->cfg_gpio(pdev)) {
dev_err(&pdev->dev, "Unable to configure gpio\n");
ret = -EINVAL;
goto err3;
}
ret = request_irq(irq_res->start, s3c_ac97_irq,
IRQF_DISABLED, "AC97", NULL);
if (ret < 0) {
printk(KERN_ERR "s3c-ac97: interrupt request failed.\n");
goto err4;
}
s3c_ac97_dai[S3C_AC97_DAI_PCM].dev = &pdev->dev;
s3c_ac97_dai[S3C_AC97_DAI_MIC].dev = &pdev->dev;
ret = snd_soc_register_dais(s3c_ac97_dai, ARRAY_SIZE(s3c_ac97_dai));
if (ret)
goto err5;
return 0;
err5:
free_irq(irq_res->start, NULL);
err4:
err3:
clk_disable(s3c_ac97.ac97_clk);
clk_put(s3c_ac97.ac97_clk);
err2:
iounmap(s3c_ac97.regs);
err1:
release_mem_region(mem_res->start, resource_size(mem_res));
return ret;
}
static __devexit int s3c_ac97_remove(struct platform_device *pdev)
{
struct resource *mem_res, *irq_res;
snd_soc_unregister_dais(s3c_ac97_dai, ARRAY_SIZE(s3c_ac97_dai));
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (irq_res)
free_irq(irq_res->start, NULL);
clk_disable(s3c_ac97.ac97_clk);
clk_put(s3c_ac97.ac97_clk);
iounmap(s3c_ac97.regs);
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (mem_res)
release_mem_region(mem_res->start, resource_size(mem_res));
return 0;
}
static struct platform_driver s3c_ac97_driver = {
.probe = s3c_ac97_probe,
.remove = s3c_ac97_remove,
.driver = {
.name = "s3c-ac97",
.owner = THIS_MODULE,
},
};
static int __init s3c_ac97_init(void)
{
return platform_driver_register(&s3c_ac97_driver);
}
module_init(s3c_ac97_init);
static void __exit s3c_ac97_exit(void)
{
platform_driver_unregister(&s3c_ac97_driver);
}
module_exit(s3c_ac97_exit);
MODULE_AUTHOR("Jaswinder Singh, <jassi.brar@samsung.com>");
MODULE_DESCRIPTION("AC97 driver for the Samsung SoC");
MODULE_LICENSE("GPL");
| gpl-2.0 |
dwindsor/linux-next | drivers/gpu/drm/nouveau/nvkm/subdev/bus/nv04.c | 959 | 2335 | /*
* Copyright 2012 Nouveau Community
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Martin Peres <martin.peres@labri.fr>
* Ben Skeggs
*/
#include "priv.h"
#include <subdev/gpio.h>
#include <subdev/gpio.h>
static void
nv04_bus_intr(struct nvkm_bus *bus)
{
struct nvkm_subdev *subdev = &bus->subdev;
struct nvkm_device *device = subdev->device;
u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140);
if (stat & 0x00000001) {
nvkm_error(subdev, "BUS ERROR\n");
stat &= ~0x00000001;
nvkm_wr32(device, 0x001100, 0x00000001);
}
if (stat & 0x00000110) {
struct nvkm_gpio *gpio = device->gpio;
if (gpio)
nvkm_subdev_intr(&gpio->subdev);
stat &= ~0x00000110;
nvkm_wr32(device, 0x001100, 0x00000110);
}
if (stat) {
nvkm_error(subdev, "intr %08x\n", stat);
nvkm_mask(device, 0x001140, stat, 0x00000000);
}
}
static void
nv04_bus_init(struct nvkm_bus *bus)
{
struct nvkm_device *device = bus->subdev.device;
nvkm_wr32(device, 0x001100, 0xffffffff);
nvkm_wr32(device, 0x001140, 0x00000111);
}
static const struct nvkm_bus_func
nv04_bus = {
.init = nv04_bus_init,
.intr = nv04_bus_intr,
};
int
nv04_bus_new(struct nvkm_device *device, int index, struct nvkm_bus **pbus)
{
return nvkm_bus_new_(&nv04_bus, device, index, pbus);
}
| gpl-2.0 |
kbukin1/pnotify-linux-4.1.6 | drivers/gpu/host1x/hw/host1x01.c | 2239 | 1228 | /*
* Host1x init for T20 and T30 Architecture Chips
*
* Copyright (c) 2011-2013, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* include hw specification */
#include "host1x01.h"
#include "host1x01_hardware.h"
/* include code */
#include "cdma_hw.c"
#include "channel_hw.c"
#include "debug_hw.c"
#include "intr_hw.c"
#include "syncpt_hw.c"
#include "../dev.h"
int host1x01_init(struct host1x *host)
{
host->channel_op = &host1x_channel_ops;
host->cdma_op = &host1x_cdma_ops;
host->cdma_pb_op = &host1x_pushbuffer_ops;
host->syncpt_op = &host1x_syncpt_ops;
host->intr_op = &host1x_intr_ops;
host->debug_op = &host1x_debug_ops;
return 0;
}
| gpl-2.0 |
md5555/S6-UniKernel | drivers/platform/x86/toshiba_acpi.c | 2239 | 34264 | /*
* toshiba_acpi.c - Toshiba Laptop ACPI Extras
*
*
* Copyright (C) 2002-2004 John Belmonte
* Copyright (C) 2008 Philip Langdale
* Copyright (C) 2010 Pierre Ducroquet
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* The devolpment page for this driver is located at
* http://memebeam.org/toys/ToshibaAcpiDriver.
*
* Credits:
* Jonathan A. Buzzard - Toshiba HCI info, and critical tips on reverse
* engineering the Windows drivers
* Yasushi Nagato - changes for linux kernel 2.4 -> 2.5
* Rob Miller - TV out and hotkeys help
*
*
* TODO
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define TOSHIBA_ACPI_VERSION "0.19"
#define PROC_INTERFACE_VERSION 1
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/backlight.h>
#include <linux/rfkill.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/i8042.h>
#include <asm/uaccess.h>
#include <acpi/acpi_drivers.h>
MODULE_AUTHOR("John Belmonte");
MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
MODULE_LICENSE("GPL");
#define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
/* Scan code for Fn key on TOS1900 models */
#define TOS1900_FN_SCAN 0x6e
/* Toshiba ACPI method paths */
#define METHOD_VIDEO_OUT "\\_SB_.VALX.DSSX"
/* Toshiba HCI interface definitions
*
* HCI is Toshiba's "Hardware Control Interface" which is supposed to
* be uniform across all their models. Ideally we would just call
* dedicated ACPI methods instead of using this primitive interface.
* However the ACPI methods seem to be incomplete in some areas (for
* example they allow setting, but not reading, the LCD brightness value),
* so this is still useful.
*/
#define HCI_WORDS 6
/* operations */
#define HCI_SET 0xff00
#define HCI_GET 0xfe00
/* return codes */
#define HCI_SUCCESS 0x0000
#define HCI_FAILURE 0x1000
#define HCI_NOT_SUPPORTED 0x8000
#define HCI_EMPTY 0x8c00
/* registers */
#define HCI_FAN 0x0004
#define HCI_TR_BACKLIGHT 0x0005
#define HCI_SYSTEM_EVENT 0x0016
#define HCI_VIDEO_OUT 0x001c
#define HCI_HOTKEY_EVENT 0x001e
#define HCI_LCD_BRIGHTNESS 0x002a
#define HCI_WIRELESS 0x0056
/* field definitions */
#define HCI_HOTKEY_DISABLE 0x0b
#define HCI_HOTKEY_ENABLE 0x09
#define HCI_LCD_BRIGHTNESS_BITS 3
#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
#define HCI_LCD_BRIGHTNESS_LEVELS (1 << HCI_LCD_BRIGHTNESS_BITS)
#define HCI_VIDEO_OUT_LCD 0x1
#define HCI_VIDEO_OUT_CRT 0x2
#define HCI_VIDEO_OUT_TV 0x4
#define HCI_WIRELESS_KILL_SWITCH 0x01
#define HCI_WIRELESS_BT_PRESENT 0x0f
#define HCI_WIRELESS_BT_ATTACH 0x40
#define HCI_WIRELESS_BT_POWER 0x80
struct toshiba_acpi_dev {
struct acpi_device *acpi_dev;
const char *method_hci;
struct rfkill *bt_rfk;
struct input_dev *hotkey_dev;
struct work_struct hotkey_work;
struct backlight_device *backlight_dev;
struct led_classdev led_dev;
int force_fan;
int last_key_event;
int key_event_valid;
unsigned int illumination_supported:1;
unsigned int video_supported:1;
unsigned int fan_supported:1;
unsigned int system_event_supported:1;
unsigned int ntfy_supported:1;
unsigned int info_supported:1;
unsigned int tr_backlight_supported:1;
struct mutex mutex;
};
static struct toshiba_acpi_dev *toshiba_acpi;
static const struct acpi_device_id toshiba_device_ids[] = {
{"TOS6200", 0},
{"TOS6208", 0},
{"TOS1900", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, toshiba_device_ids);
static const struct key_entry toshiba_acpi_keymap[] = {
{ KE_KEY, 0x101, { KEY_MUTE } },
{ KE_KEY, 0x102, { KEY_ZOOMOUT } },
{ KE_KEY, 0x103, { KEY_ZOOMIN } },
{ KE_KEY, 0x12c, { KEY_KBDILLUMTOGGLE } },
{ KE_KEY, 0x139, { KEY_ZOOMRESET } },
{ KE_KEY, 0x13b, { KEY_COFFEE } },
{ KE_KEY, 0x13c, { KEY_BATTERY } },
{ KE_KEY, 0x13d, { KEY_SLEEP } },
{ KE_KEY, 0x13e, { KEY_SUSPEND } },
{ KE_KEY, 0x13f, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x140, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, 0x141, { KEY_BRIGHTNESSUP } },
{ KE_KEY, 0x142, { KEY_WLAN } },
{ KE_KEY, 0x143, { KEY_TOUCHPAD_TOGGLE } },
{ KE_KEY, 0x17f, { KEY_FN } },
{ KE_KEY, 0xb05, { KEY_PROG2 } },
{ KE_KEY, 0xb06, { KEY_WWW } },
{ KE_KEY, 0xb07, { KEY_MAIL } },
{ KE_KEY, 0xb30, { KEY_STOP } },
{ KE_KEY, 0xb31, { KEY_PREVIOUSSONG } },
{ KE_KEY, 0xb32, { KEY_NEXTSONG } },
{ KE_KEY, 0xb33, { KEY_PLAYPAUSE } },
{ KE_KEY, 0xb5a, { KEY_MEDIA } },
{ KE_IGNORE, 0x1430, { KEY_RESERVED } },
{ KE_END, 0 },
};
/* utility
*/
static __inline__ void _set_bit(u32 * word, u32 mask, int value)
{
*word = (*word & ~mask) | (mask * value);
}
/* acpi interface wrappers
*/
static int write_acpi_int(const char *methodName, int val)
{
struct acpi_object_list params;
union acpi_object in_objs[1];
acpi_status status;
params.count = ARRAY_SIZE(in_objs);
params.pointer = in_objs;
in_objs[0].type = ACPI_TYPE_INTEGER;
in_objs[0].integer.value = val;
status = acpi_evaluate_object(NULL, (char *)methodName, ¶ms, NULL);
return (status == AE_OK) ? 0 : -EIO;
}
/* Perform a raw HCI call. Here we don't care about input or output buffer
* format.
*/
static acpi_status hci_raw(struct toshiba_acpi_dev *dev,
const u32 in[HCI_WORDS], u32 out[HCI_WORDS])
{
struct acpi_object_list params;
union acpi_object in_objs[HCI_WORDS];
struct acpi_buffer results;
union acpi_object out_objs[HCI_WORDS + 1];
acpi_status status;
int i;
params.count = HCI_WORDS;
params.pointer = in_objs;
for (i = 0; i < HCI_WORDS; ++i) {
in_objs[i].type = ACPI_TYPE_INTEGER;
in_objs[i].integer.value = in[i];
}
results.length = sizeof(out_objs);
results.pointer = out_objs;
status = acpi_evaluate_object(dev->acpi_dev->handle,
(char *)dev->method_hci, ¶ms,
&results);
if ((status == AE_OK) && (out_objs->package.count <= HCI_WORDS)) {
for (i = 0; i < out_objs->package.count; ++i) {
out[i] = out_objs->package.elements[i].integer.value;
}
}
return status;
}
/* common hci tasks (get or set one or two value)
*
* In addition to the ACPI status, the HCI system returns a result which
* may be useful (such as "not supported").
*/
static acpi_status hci_write1(struct toshiba_acpi_dev *dev, u32 reg,
u32 in1, u32 *result)
{
u32 in[HCI_WORDS] = { HCI_SET, reg, in1, 0, 0, 0 };
u32 out[HCI_WORDS];
acpi_status status = hci_raw(dev, in, out);
*result = (status == AE_OK) ? out[0] : HCI_FAILURE;
return status;
}
static acpi_status hci_read1(struct toshiba_acpi_dev *dev, u32 reg,
u32 *out1, u32 *result)
{
u32 in[HCI_WORDS] = { HCI_GET, reg, 0, 0, 0, 0 };
u32 out[HCI_WORDS];
acpi_status status = hci_raw(dev, in, out);
*out1 = out[2];
*result = (status == AE_OK) ? out[0] : HCI_FAILURE;
return status;
}
static acpi_status hci_write2(struct toshiba_acpi_dev *dev, u32 reg,
u32 in1, u32 in2, u32 *result)
{
u32 in[HCI_WORDS] = { HCI_SET, reg, in1, in2, 0, 0 };
u32 out[HCI_WORDS];
acpi_status status = hci_raw(dev, in, out);
*result = (status == AE_OK) ? out[0] : HCI_FAILURE;
return status;
}
static acpi_status hci_read2(struct toshiba_acpi_dev *dev, u32 reg,
u32 *out1, u32 *out2, u32 *result)
{
u32 in[HCI_WORDS] = { HCI_GET, reg, *out1, *out2, 0, 0 };
u32 out[HCI_WORDS];
acpi_status status = hci_raw(dev, in, out);
*out1 = out[2];
*out2 = out[3];
*result = (status == AE_OK) ? out[0] : HCI_FAILURE;
return status;
}
/* Illumination support */
static int toshiba_illumination_available(struct toshiba_acpi_dev *dev)
{
u32 in[HCI_WORDS] = { 0, 0, 0, 0, 0, 0 };
u32 out[HCI_WORDS];
acpi_status status;
in[0] = 0xf100;
status = hci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_info("Illumination device not available\n");
return 0;
}
in[0] = 0xf400;
status = hci_raw(dev, in, out);
return 1;
}
static void toshiba_illumination_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
struct toshiba_acpi_dev *dev = container_of(cdev,
struct toshiba_acpi_dev, led_dev);
u32 in[HCI_WORDS] = { 0, 0, 0, 0, 0, 0 };
u32 out[HCI_WORDS];
acpi_status status;
/* First request : initialize communication. */
in[0] = 0xf100;
status = hci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_info("Illumination device not available\n");
return;
}
if (brightness) {
/* Switch the illumination on */
in[0] = 0xf400;
in[1] = 0x14e;
in[2] = 1;
status = hci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_info("ACPI call for illumination failed\n");
return;
}
} else {
/* Switch the illumination off */
in[0] = 0xf400;
in[1] = 0x14e;
in[2] = 0;
status = hci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_info("ACPI call for illumination failed.\n");
return;
}
}
/* Last request : close communication. */
in[0] = 0xf200;
in[1] = 0;
in[2] = 0;
hci_raw(dev, in, out);
}
static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
{
struct toshiba_acpi_dev *dev = container_of(cdev,
struct toshiba_acpi_dev, led_dev);
u32 in[HCI_WORDS] = { 0, 0, 0, 0, 0, 0 };
u32 out[HCI_WORDS];
acpi_status status;
enum led_brightness result;
/* First request : initialize communication. */
in[0] = 0xf100;
status = hci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_info("Illumination device not available\n");
return LED_OFF;
}
/* Check the illumination */
in[0] = 0xf300;
in[1] = 0x14e;
status = hci_raw(dev, in, out);
if (ACPI_FAILURE(status)) {
pr_info("ACPI call for illumination failed.\n");
return LED_OFF;
}
result = out[2] ? LED_FULL : LED_OFF;
/* Last request : close communication. */
in[0] = 0xf200;
in[1] = 0;
in[2] = 0;
hci_raw(dev, in, out);
return result;
}
/* Bluetooth rfkill handlers */
static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present)
{
u32 hci_result;
u32 value, value2;
value = 0;
value2 = 0;
hci_read2(dev, HCI_WIRELESS, &value, &value2, &hci_result);
if (hci_result == HCI_SUCCESS)
*present = (value & HCI_WIRELESS_BT_PRESENT) ? true : false;
return hci_result;
}
static u32 hci_get_radio_state(struct toshiba_acpi_dev *dev, bool *radio_state)
{
u32 hci_result;
u32 value, value2;
value = 0;
value2 = 0x0001;
hci_read2(dev, HCI_WIRELESS, &value, &value2, &hci_result);
*radio_state = value & HCI_WIRELESS_KILL_SWITCH;
return hci_result;
}
static int bt_rfkill_set_block(void *data, bool blocked)
{
struct toshiba_acpi_dev *dev = data;
u32 result1, result2;
u32 value;
int err;
bool radio_state;
value = (blocked == false);
mutex_lock(&dev->mutex);
if (hci_get_radio_state(dev, &radio_state) != HCI_SUCCESS) {
err = -EIO;
goto out;
}
if (!radio_state) {
err = 0;
goto out;
}
hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_POWER, &result1);
hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_ATTACH, &result2);
if (result1 != HCI_SUCCESS || result2 != HCI_SUCCESS)
err = -EIO;
else
err = 0;
out:
mutex_unlock(&dev->mutex);
return err;
}
static void bt_rfkill_poll(struct rfkill *rfkill, void *data)
{
bool new_rfk_state;
bool value;
u32 hci_result;
struct toshiba_acpi_dev *dev = data;
mutex_lock(&dev->mutex);
hci_result = hci_get_radio_state(dev, &value);
if (hci_result != HCI_SUCCESS) {
/* Can't do anything useful */
mutex_unlock(&dev->mutex);
return;
}
new_rfk_state = value;
mutex_unlock(&dev->mutex);
if (rfkill_set_hw_state(rfkill, !new_rfk_state))
bt_rfkill_set_block(data, true);
}
static const struct rfkill_ops toshiba_rfk_ops = {
.set_block = bt_rfkill_set_block,
.poll = bt_rfkill_poll,
};
static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled)
{
u32 hci_result;
u32 status;
hci_read1(dev, HCI_TR_BACKLIGHT, &status, &hci_result);
*enabled = !status;
return hci_result == HCI_SUCCESS ? 0 : -EIO;
}
static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
{
u32 hci_result;
u32 value = !enable;
hci_write1(dev, HCI_TR_BACKLIGHT, value, &hci_result);
return hci_result == HCI_SUCCESS ? 0 : -EIO;
}
static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ;
static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
{
u32 hci_result;
u32 value;
int brightness = 0;
if (dev->tr_backlight_supported) {
bool enabled;
int ret = get_tr_backlight_status(dev, &enabled);
if (ret)
return ret;
if (enabled)
return 0;
brightness++;
}
hci_read1(dev, HCI_LCD_BRIGHTNESS, &value, &hci_result);
if (hci_result == HCI_SUCCESS)
return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
return -EIO;
}
static int get_lcd_brightness(struct backlight_device *bd)
{
struct toshiba_acpi_dev *dev = bl_get_data(bd);
return __get_lcd_brightness(dev);
}
static int lcd_proc_show(struct seq_file *m, void *v)
{
struct toshiba_acpi_dev *dev = m->private;
int value;
int levels;
if (!dev->backlight_dev)
return -ENODEV;
levels = dev->backlight_dev->props.max_brightness + 1;
value = get_lcd_brightness(dev->backlight_dev);
if (value >= 0) {
seq_printf(m, "brightness: %d\n", value);
seq_printf(m, "brightness_levels: %d\n", levels);
return 0;
}
pr_err("Error reading LCD brightness\n");
return -EIO;
}
static int lcd_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, lcd_proc_show, PDE_DATA(inode));
}
static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
{
u32 hci_result;
if (dev->tr_backlight_supported) {
bool enable = !value;
int ret = set_tr_backlight_status(dev, enable);
if (ret)
return ret;
if (value)
value--;
}
value = value << HCI_LCD_BRIGHTNESS_SHIFT;
hci_write1(dev, HCI_LCD_BRIGHTNESS, value, &hci_result);
return hci_result == HCI_SUCCESS ? 0 : -EIO;
}
static int set_lcd_status(struct backlight_device *bd)
{
struct toshiba_acpi_dev *dev = bl_get_data(bd);
return set_lcd_brightness(dev, bd->props.brightness);
}
static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
char cmd[42];
size_t len;
int value;
int ret;
int levels = dev->backlight_dev->props.max_brightness + 1;
len = min(count, sizeof(cmd) - 1);
if (copy_from_user(cmd, buf, len))
return -EFAULT;
cmd[len] = '\0';
if (sscanf(cmd, " brightness : %i", &value) == 1 &&
value >= 0 && value < levels) {
ret = set_lcd_brightness(dev, value);
if (ret == 0)
ret = count;
} else {
ret = -EINVAL;
}
return ret;
}
static const struct file_operations lcd_proc_fops = {
.owner = THIS_MODULE,
.open = lcd_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = lcd_proc_write,
};
static int get_video_status(struct toshiba_acpi_dev *dev, u32 *status)
{
u32 hci_result;
hci_read1(dev, HCI_VIDEO_OUT, status, &hci_result);
return hci_result == HCI_SUCCESS ? 0 : -EIO;
}
static int video_proc_show(struct seq_file *m, void *v)
{
struct toshiba_acpi_dev *dev = m->private;
u32 value;
int ret;
ret = get_video_status(dev, &value);
if (!ret) {
int is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0;
int is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0;
int is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0;
seq_printf(m, "lcd_out: %d\n", is_lcd);
seq_printf(m, "crt_out: %d\n", is_crt);
seq_printf(m, "tv_out: %d\n", is_tv);
}
return ret;
}
static int video_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, video_proc_show, PDE_DATA(inode));
}
static ssize_t video_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
char *cmd, *buffer;
int ret;
int value;
int remain = count;
int lcd_out = -1;
int crt_out = -1;
int tv_out = -1;
u32 video_out;
cmd = kmalloc(count + 1, GFP_KERNEL);
if (!cmd)
return -ENOMEM;
if (copy_from_user(cmd, buf, count)) {
kfree(cmd);
return -EFAULT;
}
cmd[count] = '\0';
buffer = cmd;
/* scan expression. Multiple expressions may be delimited with ;
*
* NOTE: to keep scanning simple, invalid fields are ignored
*/
while (remain) {
if (sscanf(buffer, " lcd_out : %i", &value) == 1)
lcd_out = value & 1;
else if (sscanf(buffer, " crt_out : %i", &value) == 1)
crt_out = value & 1;
else if (sscanf(buffer, " tv_out : %i", &value) == 1)
tv_out = value & 1;
/* advance to one character past the next ; */
do {
++buffer;
--remain;
}
while (remain && *(buffer - 1) != ';');
}
kfree(cmd);
ret = get_video_status(dev, &video_out);
if (!ret) {
unsigned int new_video_out = video_out;
if (lcd_out != -1)
_set_bit(&new_video_out, HCI_VIDEO_OUT_LCD, lcd_out);
if (crt_out != -1)
_set_bit(&new_video_out, HCI_VIDEO_OUT_CRT, crt_out);
if (tv_out != -1)
_set_bit(&new_video_out, HCI_VIDEO_OUT_TV, tv_out);
/* To avoid unnecessary video disruption, only write the new
* video setting if something changed. */
if (new_video_out != video_out)
ret = write_acpi_int(METHOD_VIDEO_OUT, new_video_out);
}
return ret ? ret : count;
}
static const struct file_operations video_proc_fops = {
.owner = THIS_MODULE,
.open = video_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = video_proc_write,
};
static int get_fan_status(struct toshiba_acpi_dev *dev, u32 *status)
{
u32 hci_result;
hci_read1(dev, HCI_FAN, status, &hci_result);
return hci_result == HCI_SUCCESS ? 0 : -EIO;
}
static int fan_proc_show(struct seq_file *m, void *v)
{
struct toshiba_acpi_dev *dev = m->private;
int ret;
u32 value;
ret = get_fan_status(dev, &value);
if (!ret) {
seq_printf(m, "running: %d\n", (value > 0));
seq_printf(m, "force_on: %d\n", dev->force_fan);
}
return ret;
}
static int fan_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, fan_proc_show, PDE_DATA(inode));
}
static ssize_t fan_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
char cmd[42];
size_t len;
int value;
u32 hci_result;
len = min(count, sizeof(cmd) - 1);
if (copy_from_user(cmd, buf, len))
return -EFAULT;
cmd[len] = '\0';
if (sscanf(cmd, " force_on : %i", &value) == 1 &&
value >= 0 && value <= 1) {
hci_write1(dev, HCI_FAN, value, &hci_result);
if (hci_result != HCI_SUCCESS)
return -EIO;
else
dev->force_fan = value;
} else {
return -EINVAL;
}
return count;
}
static const struct file_operations fan_proc_fops = {
.owner = THIS_MODULE,
.open = fan_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = fan_proc_write,
};
static int keys_proc_show(struct seq_file *m, void *v)
{
struct toshiba_acpi_dev *dev = m->private;
u32 hci_result;
u32 value;
if (!dev->key_event_valid && dev->system_event_supported) {
hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
if (hci_result == HCI_SUCCESS) {
dev->key_event_valid = 1;
dev->last_key_event = value;
} else if (hci_result == HCI_EMPTY) {
/* better luck next time */
} else if (hci_result == HCI_NOT_SUPPORTED) {
/* This is a workaround for an unresolved issue on
* some machines where system events sporadically
* become disabled. */
hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
pr_notice("Re-enabled hotkeys\n");
} else {
pr_err("Error reading hotkey status\n");
return -EIO;
}
}
seq_printf(m, "hotkey_ready: %d\n", dev->key_event_valid);
seq_printf(m, "hotkey: 0x%04x\n", dev->last_key_event);
return 0;
}
static int keys_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, keys_proc_show, PDE_DATA(inode));
}
static ssize_t keys_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
char cmd[42];
size_t len;
int value;
len = min(count, sizeof(cmd) - 1);
if (copy_from_user(cmd, buf, len))
return -EFAULT;
cmd[len] = '\0';
if (sscanf(cmd, " hotkey_ready : %i", &value) == 1 && value == 0) {
dev->key_event_valid = 0;
} else {
return -EINVAL;
}
return count;
}
static const struct file_operations keys_proc_fops = {
.owner = THIS_MODULE,
.open = keys_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = keys_proc_write,
};
static int version_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "driver: %s\n", TOSHIBA_ACPI_VERSION);
seq_printf(m, "proc_interface: %d\n", PROC_INTERFACE_VERSION);
return 0;
}
static int version_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, version_proc_show, PDE_DATA(inode));
}
static const struct file_operations version_proc_fops = {
.owner = THIS_MODULE,
.open = version_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/* proc and module init
*/
#define PROC_TOSHIBA "toshiba"
static void create_toshiba_proc_entries(struct toshiba_acpi_dev *dev)
{
if (dev->backlight_dev)
proc_create_data("lcd", S_IRUGO | S_IWUSR, toshiba_proc_dir,
&lcd_proc_fops, dev);
if (dev->video_supported)
proc_create_data("video", S_IRUGO | S_IWUSR, toshiba_proc_dir,
&video_proc_fops, dev);
if (dev->fan_supported)
proc_create_data("fan", S_IRUGO | S_IWUSR, toshiba_proc_dir,
&fan_proc_fops, dev);
if (dev->hotkey_dev)
proc_create_data("keys", S_IRUGO | S_IWUSR, toshiba_proc_dir,
&keys_proc_fops, dev);
proc_create_data("version", S_IRUGO, toshiba_proc_dir,
&version_proc_fops, dev);
}
static void remove_toshiba_proc_entries(struct toshiba_acpi_dev *dev)
{
if (dev->backlight_dev)
remove_proc_entry("lcd", toshiba_proc_dir);
if (dev->video_supported)
remove_proc_entry("video", toshiba_proc_dir);
if (dev->fan_supported)
remove_proc_entry("fan", toshiba_proc_dir);
if (dev->hotkey_dev)
remove_proc_entry("keys", toshiba_proc_dir);
remove_proc_entry("version", toshiba_proc_dir);
}
static const struct backlight_ops toshiba_backlight_data = {
.options = BL_CORE_SUSPENDRESUME,
.get_brightness = get_lcd_brightness,
.update_status = set_lcd_status,
};
static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
{
if (str & 0x20)
return false;
if (unlikely(data == 0xe0))
return false;
if ((data & 0x7f) == TOS1900_FN_SCAN) {
schedule_work(&toshiba_acpi->hotkey_work);
return true;
}
return false;
}
static void toshiba_acpi_hotkey_work(struct work_struct *work)
{
acpi_handle ec_handle = ec_get_handle();
acpi_status status;
if (!ec_handle)
return;
status = acpi_evaluate_object(ec_handle, "NTFY", NULL, NULL);
if (ACPI_FAILURE(status))
pr_err("ACPI NTFY method execution failed\n");
}
/*
* Returns hotkey scancode, or < 0 on failure.
*/
static int toshiba_acpi_query_hotkey(struct toshiba_acpi_dev *dev)
{
struct acpi_buffer buf;
union acpi_object out_obj;
acpi_status status;
buf.pointer = &out_obj;
buf.length = sizeof(out_obj);
status = acpi_evaluate_object(dev->acpi_dev->handle, "INFO",
NULL, &buf);
if (ACPI_FAILURE(status) || out_obj.type != ACPI_TYPE_INTEGER) {
pr_err("ACPI INFO method execution failed\n");
return -EIO;
}
return out_obj.integer.value;
}
static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
int scancode)
{
if (scancode == 0x100)
return;
/* act on key press; ignore key release */
if (scancode & 0x80)
return;
if (!sparse_keymap_report_event(dev->hotkey_dev, scancode, 1, true))
pr_info("Unknown key %x\n", scancode);
}
static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
{
acpi_status status;
acpi_handle ec_handle, handle;
int error;
u32 hci_result;
dev->hotkey_dev = input_allocate_device();
if (!dev->hotkey_dev) {
pr_info("Unable to register input device\n");
return -ENOMEM;
}
dev->hotkey_dev->name = "Toshiba input device";
dev->hotkey_dev->phys = "toshiba_acpi/input0";
dev->hotkey_dev->id.bustype = BUS_HOST;
error = sparse_keymap_setup(dev->hotkey_dev, toshiba_acpi_keymap, NULL);
if (error)
goto err_free_dev;
/*
* For some machines the SCI responsible for providing hotkey
* notification doesn't fire. We can trigger the notification
* whenever the Fn key is pressed using the NTFY method, if
* supported, so if it's present set up an i8042 key filter
* for this purpose.
*/
status = AE_ERROR;
ec_handle = ec_get_handle();
if (ec_handle)
status = acpi_get_handle(ec_handle, "NTFY", &handle);
if (ACPI_SUCCESS(status)) {
INIT_WORK(&dev->hotkey_work, toshiba_acpi_hotkey_work);
error = i8042_install_filter(toshiba_acpi_i8042_filter);
if (error) {
pr_err("Error installing key filter\n");
goto err_free_keymap;
}
dev->ntfy_supported = 1;
}
/*
* Determine hotkey query interface. Prefer using the INFO
* method when it is available.
*/
status = acpi_get_handle(dev->acpi_dev->handle, "INFO", &handle);
if (ACPI_SUCCESS(status)) {
dev->info_supported = 1;
} else {
hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
if (hci_result == HCI_SUCCESS)
dev->system_event_supported = 1;
}
if (!dev->info_supported && !dev->system_event_supported) {
pr_warn("No hotkey query interface found\n");
goto err_remove_filter;
}
status = acpi_evaluate_object(dev->acpi_dev->handle, "ENAB", NULL, NULL);
if (ACPI_FAILURE(status)) {
pr_info("Unable to enable hotkeys\n");
error = -ENODEV;
goto err_remove_filter;
}
error = input_register_device(dev->hotkey_dev);
if (error) {
pr_info("Unable to register input device\n");
goto err_remove_filter;
}
hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE, &hci_result);
return 0;
err_remove_filter:
if (dev->ntfy_supported)
i8042_remove_filter(toshiba_acpi_i8042_filter);
err_free_keymap:
sparse_keymap_free(dev->hotkey_dev);
err_free_dev:
input_free_device(dev->hotkey_dev);
dev->hotkey_dev = NULL;
return error;
}
static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
{
struct backlight_properties props;
int brightness;
int ret;
bool enabled;
/*
* Some machines don't support the backlight methods at all, and
* others support it read-only. Either of these is pretty useless,
* so only register the backlight device if the backlight method
* supports both reads and writes.
*/
brightness = __get_lcd_brightness(dev);
if (brightness < 0)
return 0;
ret = set_lcd_brightness(dev, brightness);
if (ret) {
pr_debug("Backlight method is read-only, disabling backlight support\n");
return 0;
}
/* Determine whether or not BIOS supports transflective backlight */
ret = get_tr_backlight_status(dev, &enabled);
dev->tr_backlight_supported = !ret;
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
/* adding an extra level and having 0 change to transflective mode */
if (dev->tr_backlight_supported)
props.max_brightness++;
dev->backlight_dev = backlight_device_register("toshiba",
&dev->acpi_dev->dev,
dev,
&toshiba_backlight_data,
&props);
if (IS_ERR(dev->backlight_dev)) {
ret = PTR_ERR(dev->backlight_dev);
pr_err("Could not register toshiba backlight device\n");
dev->backlight_dev = NULL;
return ret;
}
dev->backlight_dev->props.brightness = brightness;
return 0;
}
static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
remove_toshiba_proc_entries(dev);
if (dev->ntfy_supported) {
i8042_remove_filter(toshiba_acpi_i8042_filter);
cancel_work_sync(&dev->hotkey_work);
}
if (dev->hotkey_dev) {
input_unregister_device(dev->hotkey_dev);
sparse_keymap_free(dev->hotkey_dev);
}
if (dev->bt_rfk) {
rfkill_unregister(dev->bt_rfk);
rfkill_destroy(dev->bt_rfk);
}
if (dev->backlight_dev)
backlight_device_unregister(dev->backlight_dev);
if (dev->illumination_supported)
led_classdev_unregister(&dev->led_dev);
if (toshiba_acpi)
toshiba_acpi = NULL;
kfree(dev);
return 0;
}
static const char *find_hci_method(acpi_handle handle)
{
acpi_status status;
acpi_handle hci_handle;
status = acpi_get_handle(handle, "GHCI", &hci_handle);
if (ACPI_SUCCESS(status))
return "GHCI";
status = acpi_get_handle(handle, "SPFC", &hci_handle);
if (ACPI_SUCCESS(status))
return "SPFC";
return NULL;
}
static int toshiba_acpi_add(struct acpi_device *acpi_dev)
{
struct toshiba_acpi_dev *dev;
const char *hci_method;
u32 dummy;
bool bt_present;
int ret = 0;
if (toshiba_acpi)
return -EBUSY;
pr_info("Toshiba Laptop ACPI Extras version %s\n",
TOSHIBA_ACPI_VERSION);
hci_method = find_hci_method(acpi_dev->handle);
if (!hci_method) {
pr_err("HCI interface not found\n");
return -ENODEV;
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->acpi_dev = acpi_dev;
dev->method_hci = hci_method;
acpi_dev->driver_data = dev;
if (toshiba_acpi_setup_keyboard(dev))
pr_info("Unable to activate hotkeys\n");
mutex_init(&dev->mutex);
ret = toshiba_acpi_setup_backlight(dev);
if (ret)
goto error;
/* Register rfkill switch for Bluetooth */
if (hci_get_bt_present(dev, &bt_present) == HCI_SUCCESS && bt_present) {
dev->bt_rfk = rfkill_alloc("Toshiba Bluetooth",
&acpi_dev->dev,
RFKILL_TYPE_BLUETOOTH,
&toshiba_rfk_ops,
dev);
if (!dev->bt_rfk) {
pr_err("unable to allocate rfkill device\n");
ret = -ENOMEM;
goto error;
}
ret = rfkill_register(dev->bt_rfk);
if (ret) {
pr_err("unable to register rfkill device\n");
rfkill_destroy(dev->bt_rfk);
goto error;
}
}
if (toshiba_illumination_available(dev)) {
dev->led_dev.name = "toshiba::illumination";
dev->led_dev.max_brightness = 1;
dev->led_dev.brightness_set = toshiba_illumination_set;
dev->led_dev.brightness_get = toshiba_illumination_get;
if (!led_classdev_register(&acpi_dev->dev, &dev->led_dev))
dev->illumination_supported = 1;
}
/* Determine whether or not BIOS supports fan and video interfaces */
ret = get_video_status(dev, &dummy);
dev->video_supported = !ret;
ret = get_fan_status(dev, &dummy);
dev->fan_supported = !ret;
create_toshiba_proc_entries(dev);
toshiba_acpi = dev;
return 0;
error:
toshiba_acpi_remove(acpi_dev);
return ret;
}
static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
u32 hci_result, value;
int retries = 3;
int scancode;
if (event != 0x80)
return;
if (dev->info_supported) {
scancode = toshiba_acpi_query_hotkey(dev);
if (scancode < 0)
pr_err("Failed to query hotkey event\n");
else if (scancode != 0)
toshiba_acpi_report_hotkey(dev, scancode);
} else if (dev->system_event_supported) {
do {
hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
switch (hci_result) {
case HCI_SUCCESS:
toshiba_acpi_report_hotkey(dev, (int)value);
break;
case HCI_NOT_SUPPORTED:
/*
* This is a workaround for an unresolved
* issue on some machines where system events
* sporadically become disabled.
*/
hci_write1(dev, HCI_SYSTEM_EVENT, 1,
&hci_result);
pr_notice("Re-enabled hotkeys\n");
/* fall through */
default:
retries--;
break;
}
} while (retries && hci_result != HCI_EMPTY);
}
}
#ifdef CONFIG_PM_SLEEP
static int toshiba_acpi_suspend(struct device *device)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
u32 result;
if (dev->hotkey_dev)
hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_DISABLE, &result);
return 0;
}
static int toshiba_acpi_resume(struct device *device)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
u32 result;
if (dev->hotkey_dev)
hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE, &result);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(toshiba_acpi_pm,
toshiba_acpi_suspend, toshiba_acpi_resume);
static struct acpi_driver toshiba_acpi_driver = {
.name = "Toshiba ACPI driver",
.owner = THIS_MODULE,
.ids = toshiba_device_ids,
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = toshiba_acpi_add,
.remove = toshiba_acpi_remove,
.notify = toshiba_acpi_notify,
},
.drv.pm = &toshiba_acpi_pm,
};
static int __init toshiba_acpi_init(void)
{
int ret;
/*
* Machines with this WMI guid aren't supported due to bugs in
* their AML. This check relies on wmi initializing before
* toshiba_acpi to guarantee guids have been identified.
*/
if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID))
return -ENODEV;
toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir);
if (!toshiba_proc_dir) {
pr_err("Unable to create proc dir " PROC_TOSHIBA "\n");
return -ENODEV;
}
ret = acpi_bus_register_driver(&toshiba_acpi_driver);
if (ret) {
pr_err("Failed to register ACPI driver: %d\n", ret);
remove_proc_entry(PROC_TOSHIBA, acpi_root_dir);
}
return ret;
}
static void __exit toshiba_acpi_exit(void)
{
acpi_bus_unregister_driver(&toshiba_acpi_driver);
if (toshiba_proc_dir)
remove_proc_entry(PROC_TOSHIBA, acpi_root_dir);
}
module_init(toshiba_acpi_init);
module_exit(toshiba_acpi_exit);
| gpl-2.0 |
bsmitty83/B-Team-Sense | kernel/ksysfs.c | 2751 | 5248 | /*
* kernel/ksysfs.c - sysfs attributes in /sys/kernel, which
* are not related to any other subsystem
*
* Copyright (C) 2004 Kay Sievers <kay.sievers@vrfy.org>
*
* This file is release under the GPLv2
*
*/
#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kexec.h>
#include <linux/profile.h>
#include <linux/sched.h>
#include <linux/capability.h>
#define KERNEL_ATTR_RO(_name) \
static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
#define KERNEL_ATTR_RW(_name) \
static struct kobj_attribute _name##_attr = \
__ATTR(_name, 0644, _name##_show, _name##_store)
#if defined(CONFIG_HOTPLUG)
/* current uevent sequence number */
static ssize_t uevent_seqnum_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%llu\n", (unsigned long long)uevent_seqnum);
}
KERNEL_ATTR_RO(uevent_seqnum);
/* uevent helper program, used during early boot */
static ssize_t uevent_helper_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", uevent_helper);
}
static ssize_t uevent_helper_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
if (count+1 > UEVENT_HELPER_PATH_LEN)
return -ENOENT;
memcpy(uevent_helper, buf, count);
uevent_helper[count] = '\0';
if (count && uevent_helper[count-1] == '\n')
uevent_helper[count-1] = '\0';
return count;
}
KERNEL_ATTR_RW(uevent_helper);
#endif
#ifdef CONFIG_PROFILING
static ssize_t profiling_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", prof_on);
}
static ssize_t profiling_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int ret;
if (prof_on)
return -EEXIST;
/*
* This eventually calls into get_option() which
* has a ton of callers and is not const. It is
* easiest to cast it away here.
*/
profile_setup((char *)buf);
ret = profile_init();
if (ret)
return ret;
ret = create_proc_profile();
if (ret)
return ret;
return count;
}
KERNEL_ATTR_RW(profiling);
#endif
#ifdef CONFIG_KEXEC
static ssize_t kexec_loaded_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", !!kexec_image);
}
KERNEL_ATTR_RO(kexec_loaded);
static ssize_t kexec_crash_loaded_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", !!kexec_crash_image);
}
KERNEL_ATTR_RO(kexec_crash_loaded);
static ssize_t kexec_crash_size_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%zu\n", crash_get_memory_size());
}
static ssize_t kexec_crash_size_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
unsigned long cnt;
int ret;
if (strict_strtoul(buf, 0, &cnt))
return -EINVAL;
ret = crash_shrink_memory(cnt);
return ret < 0 ? ret : count;
}
KERNEL_ATTR_RW(kexec_crash_size);
static ssize_t vmcoreinfo_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%lx %x\n",
paddr_vmcoreinfo_note(),
(unsigned int)vmcoreinfo_max_size);
}
KERNEL_ATTR_RO(vmcoreinfo);
#endif /* CONFIG_KEXEC */
/* whether file capabilities are enabled */
static ssize_t fscaps_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", file_caps_enabled);
}
KERNEL_ATTR_RO(fscaps);
/*
* Make /sys/kernel/notes give the raw contents of our kernel .notes section.
*/
extern const void __start_notes __attribute__((weak));
extern const void __stop_notes __attribute__((weak));
#define notes_size (&__stop_notes - &__start_notes)
static ssize_t notes_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
memcpy(buf, &__start_notes + off, count);
return count;
}
static struct bin_attribute notes_attr = {
.attr = {
.name = "notes",
.mode = S_IRUGO,
},
.read = ¬es_read,
};
struct kobject *kernel_kobj;
EXPORT_SYMBOL_GPL(kernel_kobj);
static struct attribute * kernel_attrs[] = {
&fscaps_attr.attr,
#if defined(CONFIG_HOTPLUG)
&uevent_seqnum_attr.attr,
&uevent_helper_attr.attr,
#endif
#ifdef CONFIG_PROFILING
&profiling_attr.attr,
#endif
#ifdef CONFIG_KEXEC
&kexec_loaded_attr.attr,
&kexec_crash_loaded_attr.attr,
&kexec_crash_size_attr.attr,
&vmcoreinfo_attr.attr,
#endif
NULL
};
static struct attribute_group kernel_attr_group = {
.attrs = kernel_attrs,
};
static int __init ksysfs_init(void)
{
int error;
kernel_kobj = kobject_create_and_add("kernel", NULL);
if (!kernel_kobj) {
error = -ENOMEM;
goto exit;
}
error = sysfs_create_group(kernel_kobj, &kernel_attr_group);
if (error)
goto kset_exit;
if (notes_size > 0) {
notes_attr.size = notes_size;
error = sysfs_create_bin_file(kernel_kobj, ¬es_attr);
if (error)
goto group_exit;
}
return 0;
group_exit:
sysfs_remove_group(kernel_kobj, &kernel_attr_group);
kset_exit:
kobject_put(kernel_kobj);
exit:
return error;
}
core_initcall(ksysfs_init);
| gpl-2.0 |
Ozric/kernel_asus_OzricErnel | drivers/media/dvb/frontends/tda1004x.c | 3263 | 39791 | /*
Driver for Philips tda1004xh OFDM Demodulator
(c) 2003, 2004 Andrew de Quincey & Robert Schlabbach
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* This driver needs external firmware. Please use the commands
* "<kerneldir>/Documentation/dvb/get_dvb_firmware tda10045",
* "<kerneldir>/Documentation/dvb/get_dvb_firmware tda10046" to
* download/extract them, and then copy them to /usr/lib/hotplug/firmware
* or /lib/firmware (depending on configuration of firmware hotplug).
*/
#define TDA10045_DEFAULT_FIRMWARE "dvb-fe-tda10045.fw"
#define TDA10046_DEFAULT_FIRMWARE "dvb-fe-tda10046.fw"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/slab.h>
#include "dvb_frontend.h"
#include "tda1004x.h"
static int debug;
#define dprintk(args...) \
do { \
if (debug) printk(KERN_DEBUG "tda1004x: " args); \
} while (0)
#define TDA1004X_CHIPID 0x00
#define TDA1004X_AUTO 0x01
#define TDA1004X_IN_CONF1 0x02
#define TDA1004X_IN_CONF2 0x03
#define TDA1004X_OUT_CONF1 0x04
#define TDA1004X_OUT_CONF2 0x05
#define TDA1004X_STATUS_CD 0x06
#define TDA1004X_CONFC4 0x07
#define TDA1004X_DSSPARE2 0x0C
#define TDA10045H_CODE_IN 0x0D
#define TDA10045H_FWPAGE 0x0E
#define TDA1004X_SCAN_CPT 0x10
#define TDA1004X_DSP_CMD 0x11
#define TDA1004X_DSP_ARG 0x12
#define TDA1004X_DSP_DATA1 0x13
#define TDA1004X_DSP_DATA2 0x14
#define TDA1004X_CONFADC1 0x15
#define TDA1004X_CONFC1 0x16
#define TDA10045H_S_AGC 0x1a
#define TDA10046H_AGC_TUN_LEVEL 0x1a
#define TDA1004X_SNR 0x1c
#define TDA1004X_CONF_TS1 0x1e
#define TDA1004X_CONF_TS2 0x1f
#define TDA1004X_CBER_RESET 0x20
#define TDA1004X_CBER_MSB 0x21
#define TDA1004X_CBER_LSB 0x22
#define TDA1004X_CVBER_LUT 0x23
#define TDA1004X_VBER_MSB 0x24
#define TDA1004X_VBER_MID 0x25
#define TDA1004X_VBER_LSB 0x26
#define TDA1004X_UNCOR 0x27
#define TDA10045H_CONFPLL_P 0x2D
#define TDA10045H_CONFPLL_M_MSB 0x2E
#define TDA10045H_CONFPLL_M_LSB 0x2F
#define TDA10045H_CONFPLL_N 0x30
#define TDA10046H_CONFPLL1 0x2D
#define TDA10046H_CONFPLL2 0x2F
#define TDA10046H_CONFPLL3 0x30
#define TDA10046H_TIME_WREF1 0x31
#define TDA10046H_TIME_WREF2 0x32
#define TDA10046H_TIME_WREF3 0x33
#define TDA10046H_TIME_WREF4 0x34
#define TDA10046H_TIME_WREF5 0x35
#define TDA10045H_UNSURW_MSB 0x31
#define TDA10045H_UNSURW_LSB 0x32
#define TDA10045H_WREF_MSB 0x33
#define TDA10045H_WREF_MID 0x34
#define TDA10045H_WREF_LSB 0x35
#define TDA10045H_MUXOUT 0x36
#define TDA1004X_CONFADC2 0x37
#define TDA10045H_IOFFSET 0x38
#define TDA10046H_CONF_TRISTATE1 0x3B
#define TDA10046H_CONF_TRISTATE2 0x3C
#define TDA10046H_CONF_POLARITY 0x3D
#define TDA10046H_FREQ_OFFSET 0x3E
#define TDA10046H_GPIO_OUT_SEL 0x41
#define TDA10046H_GPIO_SELECT 0x42
#define TDA10046H_AGC_CONF 0x43
#define TDA10046H_AGC_THR 0x44
#define TDA10046H_AGC_RENORM 0x45
#define TDA10046H_AGC_GAINS 0x46
#define TDA10046H_AGC_TUN_MIN 0x47
#define TDA10046H_AGC_TUN_MAX 0x48
#define TDA10046H_AGC_IF_MIN 0x49
#define TDA10046H_AGC_IF_MAX 0x4A
#define TDA10046H_FREQ_PHY2_MSB 0x4D
#define TDA10046H_FREQ_PHY2_LSB 0x4E
#define TDA10046H_CVBER_CTRL 0x4F
#define TDA10046H_AGC_IF_LEVEL 0x52
#define TDA10046H_CODE_CPT 0x57
#define TDA10046H_CODE_IN 0x58
static int tda1004x_write_byteI(struct tda1004x_state *state, int reg, int data)
{
int ret;
u8 buf[] = { reg, data };
struct i2c_msg msg = { .flags = 0, .buf = buf, .len = 2 };
dprintk("%s: reg=0x%x, data=0x%x\n", __func__, reg, data);
msg.addr = state->config->demod_address;
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
dprintk("%s: error reg=0x%x, data=0x%x, ret=%i\n",
__func__, reg, data, ret);
dprintk("%s: success reg=0x%x, data=0x%x, ret=%i\n", __func__,
reg, data, ret);
return (ret != 1) ? -1 : 0;
}
static int tda1004x_read_byte(struct tda1004x_state *state, int reg)
{
int ret;
u8 b0[] = { reg };
u8 b1[] = { 0 };
struct i2c_msg msg[] = {{ .flags = 0, .buf = b0, .len = 1 },
{ .flags = I2C_M_RD, .buf = b1, .len = 1 }};
dprintk("%s: reg=0x%x\n", __func__, reg);
msg[0].addr = state->config->demod_address;
msg[1].addr = state->config->demod_address;
ret = i2c_transfer(state->i2c, msg, 2);
if (ret != 2) {
dprintk("%s: error reg=0x%x, ret=%i\n", __func__, reg,
ret);
return -EINVAL;
}
dprintk("%s: success reg=0x%x, data=0x%x, ret=%i\n", __func__,
reg, b1[0], ret);
return b1[0];
}
static int tda1004x_write_mask(struct tda1004x_state *state, int reg, int mask, int data)
{
int val;
dprintk("%s: reg=0x%x, mask=0x%x, data=0x%x\n", __func__, reg,
mask, data);
// read a byte and check
val = tda1004x_read_byte(state, reg);
if (val < 0)
return val;
// mask if off
val = val & ~mask;
val |= data & 0xff;
// write it out again
return tda1004x_write_byteI(state, reg, val);
}
static int tda1004x_write_buf(struct tda1004x_state *state, int reg, unsigned char *buf, int len)
{
int i;
int result;
dprintk("%s: reg=0x%x, len=0x%x\n", __func__, reg, len);
result = 0;
for (i = 0; i < len; i++) {
result = tda1004x_write_byteI(state, reg + i, buf[i]);
if (result != 0)
break;
}
return result;
}
static int tda1004x_enable_tuner_i2c(struct tda1004x_state *state)
{
int result;
dprintk("%s\n", __func__);
result = tda1004x_write_mask(state, TDA1004X_CONFC4, 2, 2);
msleep(20);
return result;
}
static int tda1004x_disable_tuner_i2c(struct tda1004x_state *state)
{
dprintk("%s\n", __func__);
return tda1004x_write_mask(state, TDA1004X_CONFC4, 2, 0);
}
static int tda10045h_set_bandwidth(struct tda1004x_state *state,
fe_bandwidth_t bandwidth)
{
static u8 bandwidth_6mhz[] = { 0x02, 0x00, 0x3d, 0x00, 0x60, 0x1e, 0xa7, 0x45, 0x4f };
static u8 bandwidth_7mhz[] = { 0x02, 0x00, 0x37, 0x00, 0x4a, 0x2f, 0x6d, 0x76, 0xdb };
static u8 bandwidth_8mhz[] = { 0x02, 0x00, 0x3d, 0x00, 0x48, 0x17, 0x89, 0xc7, 0x14 };
switch (bandwidth) {
case BANDWIDTH_6_MHZ:
tda1004x_write_buf(state, TDA10045H_CONFPLL_P, bandwidth_6mhz, sizeof(bandwidth_6mhz));
break;
case BANDWIDTH_7_MHZ:
tda1004x_write_buf(state, TDA10045H_CONFPLL_P, bandwidth_7mhz, sizeof(bandwidth_7mhz));
break;
case BANDWIDTH_8_MHZ:
tda1004x_write_buf(state, TDA10045H_CONFPLL_P, bandwidth_8mhz, sizeof(bandwidth_8mhz));
break;
default:
return -EINVAL;
}
tda1004x_write_byteI(state, TDA10045H_IOFFSET, 0);
return 0;
}
static int tda10046h_set_bandwidth(struct tda1004x_state *state,
fe_bandwidth_t bandwidth)
{
static u8 bandwidth_6mhz_53M[] = { 0x7b, 0x2e, 0x11, 0xf0, 0xd2 };
static u8 bandwidth_7mhz_53M[] = { 0x6a, 0x02, 0x6a, 0x43, 0x9f };
static u8 bandwidth_8mhz_53M[] = { 0x5c, 0x32, 0xc2, 0x96, 0x6d };
static u8 bandwidth_6mhz_48M[] = { 0x70, 0x02, 0x49, 0x24, 0x92 };
static u8 bandwidth_7mhz_48M[] = { 0x60, 0x02, 0xaa, 0xaa, 0xab };
static u8 bandwidth_8mhz_48M[] = { 0x54, 0x03, 0x0c, 0x30, 0xc3 };
int tda10046_clk53m;
if ((state->config->if_freq == TDA10046_FREQ_045) ||
(state->config->if_freq == TDA10046_FREQ_052))
tda10046_clk53m = 0;
else
tda10046_clk53m = 1;
switch (bandwidth) {
case BANDWIDTH_6_MHZ:
if (tda10046_clk53m)
tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_6mhz_53M,
sizeof(bandwidth_6mhz_53M));
else
tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_6mhz_48M,
sizeof(bandwidth_6mhz_48M));
if (state->config->if_freq == TDA10046_FREQ_045) {
tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0a);
tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0xab);
}
break;
case BANDWIDTH_7_MHZ:
if (tda10046_clk53m)
tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_7mhz_53M,
sizeof(bandwidth_7mhz_53M));
else
tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_7mhz_48M,
sizeof(bandwidth_7mhz_48M));
if (state->config->if_freq == TDA10046_FREQ_045) {
tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0c);
tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x00);
}
break;
case BANDWIDTH_8_MHZ:
if (tda10046_clk53m)
tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_8mhz_53M,
sizeof(bandwidth_8mhz_53M));
else
tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_8mhz_48M,
sizeof(bandwidth_8mhz_48M));
if (state->config->if_freq == TDA10046_FREQ_045) {
tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0d);
tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x55);
}
break;
default:
return -EINVAL;
}
return 0;
}
static int tda1004x_do_upload(struct tda1004x_state *state,
const unsigned char *mem, unsigned int len,
u8 dspCodeCounterReg, u8 dspCodeInReg)
{
u8 buf[65];
struct i2c_msg fw_msg = { .flags = 0, .buf = buf, .len = 0 };
int tx_size;
int pos = 0;
/* clear code counter */
tda1004x_write_byteI(state, dspCodeCounterReg, 0);
fw_msg.addr = state->config->demod_address;
buf[0] = dspCodeInReg;
while (pos != len) {
// work out how much to send this time
tx_size = len - pos;
if (tx_size > 0x10)
tx_size = 0x10;
// send the chunk
memcpy(buf + 1, mem + pos, tx_size);
fw_msg.len = tx_size + 1;
if (i2c_transfer(state->i2c, &fw_msg, 1) != 1) {
printk(KERN_ERR "tda1004x: Error during firmware upload\n");
return -EIO;
}
pos += tx_size;
dprintk("%s: fw_pos=0x%x\n", __func__, pos);
}
// give the DSP a chance to settle 03/10/05 Hac
msleep(100);
return 0;
}
static int tda1004x_check_upload_ok(struct tda1004x_state *state)
{
u8 data1, data2;
unsigned long timeout;
if (state->demod_type == TDA1004X_DEMOD_TDA10046) {
timeout = jiffies + 2 * HZ;
while(!(tda1004x_read_byte(state, TDA1004X_STATUS_CD) & 0x20)) {
if (time_after(jiffies, timeout)) {
printk(KERN_ERR "tda1004x: timeout waiting for DSP ready\n");
break;
}
msleep(1);
}
} else
msleep(100);
// check upload was OK
tda1004x_write_mask(state, TDA1004X_CONFC4, 0x10, 0); // we want to read from the DSP
tda1004x_write_byteI(state, TDA1004X_DSP_CMD, 0x67);
data1 = tda1004x_read_byte(state, TDA1004X_DSP_DATA1);
data2 = tda1004x_read_byte(state, TDA1004X_DSP_DATA2);
if (data1 != 0x67 || data2 < 0x20 || data2 > 0x2e) {
printk(KERN_INFO "tda1004x: found firmware revision %x -- invalid\n", data2);
return -EIO;
}
printk(KERN_INFO "tda1004x: found firmware revision %x -- ok\n", data2);
return 0;
}
static int tda10045_fwupload(struct dvb_frontend* fe)
{
struct tda1004x_state* state = fe->demodulator_priv;
int ret;
const struct firmware *fw;
/* don't re-upload unless necessary */
if (tda1004x_check_upload_ok(state) == 0)
return 0;
/* request the firmware, this will block until someone uploads it */
printk(KERN_INFO "tda1004x: waiting for firmware upload (%s)...\n", TDA10045_DEFAULT_FIRMWARE);
ret = state->config->request_firmware(fe, &fw, TDA10045_DEFAULT_FIRMWARE);
if (ret) {
printk(KERN_ERR "tda1004x: no firmware upload (timeout or file not found?)\n");
return ret;
}
/* reset chip */
tda1004x_write_mask(state, TDA1004X_CONFC4, 0x10, 0);
tda1004x_write_mask(state, TDA1004X_CONFC4, 8, 8);
tda1004x_write_mask(state, TDA1004X_CONFC4, 8, 0);
msleep(10);
/* set parameters */
tda10045h_set_bandwidth(state, BANDWIDTH_8_MHZ);
ret = tda1004x_do_upload(state, fw->data, fw->size, TDA10045H_FWPAGE, TDA10045H_CODE_IN);
release_firmware(fw);
if (ret)
return ret;
printk(KERN_INFO "tda1004x: firmware upload complete\n");
/* wait for DSP to initialise */
/* DSPREADY doesn't seem to work on the TDA10045H */
msleep(100);
return tda1004x_check_upload_ok(state);
}
static void tda10046_init_plls(struct dvb_frontend* fe)
{
struct tda1004x_state* state = fe->demodulator_priv;
int tda10046_clk53m;
if ((state->config->if_freq == TDA10046_FREQ_045) ||
(state->config->if_freq == TDA10046_FREQ_052))
tda10046_clk53m = 0;
else
tda10046_clk53m = 1;
tda1004x_write_byteI(state, TDA10046H_CONFPLL1, 0xf0);
if(tda10046_clk53m) {
printk(KERN_INFO "tda1004x: setting up plls for 53MHz sampling clock\n");
tda1004x_write_byteI(state, TDA10046H_CONFPLL2, 0x08); // PLL M = 8
} else {
printk(KERN_INFO "tda1004x: setting up plls for 48MHz sampling clock\n");
tda1004x_write_byteI(state, TDA10046H_CONFPLL2, 0x03); // PLL M = 3
}
if (state->config->xtal_freq == TDA10046_XTAL_4M ) {
dprintk("%s: setting up PLLs for a 4 MHz Xtal\n", __func__);
tda1004x_write_byteI(state, TDA10046H_CONFPLL3, 0); // PLL P = N = 0
} else {
dprintk("%s: setting up PLLs for a 16 MHz Xtal\n", __func__);
tda1004x_write_byteI(state, TDA10046H_CONFPLL3, 3); // PLL P = 0, N = 3
}
if(tda10046_clk53m)
tda1004x_write_byteI(state, TDA10046H_FREQ_OFFSET, 0x67);
else
tda1004x_write_byteI(state, TDA10046H_FREQ_OFFSET, 0x72);
/* Note clock frequency is handled implicitly */
switch (state->config->if_freq) {
case TDA10046_FREQ_045:
tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0c);
tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x00);
break;
case TDA10046_FREQ_052:
tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0d);
tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0xc7);
break;
case TDA10046_FREQ_3617:
tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0xd7);
tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x59);
break;
case TDA10046_FREQ_3613:
tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0xd7);
tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x3f);
break;
}
tda10046h_set_bandwidth(state, BANDWIDTH_8_MHZ); // default bandwidth 8 MHz
/* let the PLLs settle */
msleep(120);
}
static int tda10046_fwupload(struct dvb_frontend* fe)
{
struct tda1004x_state* state = fe->demodulator_priv;
int ret, confc4;
const struct firmware *fw;
/* reset + wake up chip */
if (state->config->xtal_freq == TDA10046_XTAL_4M) {
confc4 = 0;
} else {
dprintk("%s: 16MHz Xtal, reducing I2C speed\n", __func__);
confc4 = 0x80;
}
tda1004x_write_byteI(state, TDA1004X_CONFC4, confc4);
tda1004x_write_mask(state, TDA10046H_CONF_TRISTATE1, 1, 0);
/* set GPIO 1 and 3 */
if (state->config->gpio_config != TDA10046_GPTRI) {
tda1004x_write_byteI(state, TDA10046H_CONF_TRISTATE2, 0x33);
tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0x0f, state->config->gpio_config &0x0f);
}
/* let the clocks recover from sleep */
msleep(10);
/* The PLLs need to be reprogrammed after sleep */
tda10046_init_plls(fe);
tda1004x_write_mask(state, TDA1004X_CONFADC2, 0xc0, 0);
/* don't re-upload unless necessary */
if (tda1004x_check_upload_ok(state) == 0)
return 0;
/*
For i2c normal work, we need to slow down the bus speed.
However, the slow down breaks the eeprom firmware load.
So, use normal speed for eeprom booting and then restore the
i2c speed after that. Tested with MSI TV @nyware A/D board,
that comes with firmware version 29 inside their eeprom.
It should also be noticed that no other I2C transfer should
be in course while booting from eeprom, otherwise, tda10046
goes into an instable state. So, proper locking are needed
at the i2c bus master.
*/
printk(KERN_INFO "tda1004x: trying to boot from eeprom\n");
tda1004x_write_byteI(state, TDA1004X_CONFC4, 4);
msleep(300);
tda1004x_write_byteI(state, TDA1004X_CONFC4, confc4);
/* Checks if eeprom firmware went without troubles */
if (tda1004x_check_upload_ok(state) == 0)
return 0;
/* eeprom firmware didn't work. Load one manually. */
if (state->config->request_firmware != NULL) {
/* request the firmware, this will block until someone uploads it */
printk(KERN_INFO "tda1004x: waiting for firmware upload...\n");
ret = state->config->request_firmware(fe, &fw, TDA10046_DEFAULT_FIRMWARE);
if (ret) {
/* remain compatible to old bug: try to load with tda10045 image name */
ret = state->config->request_firmware(fe, &fw, TDA10045_DEFAULT_FIRMWARE);
if (ret) {
printk(KERN_ERR "tda1004x: no firmware upload (timeout or file not found?)\n");
return ret;
} else {
printk(KERN_INFO "tda1004x: please rename the firmware file to %s\n",
TDA10046_DEFAULT_FIRMWARE);
}
}
} else {
printk(KERN_ERR "tda1004x: no request function defined, can't upload from file\n");
return -EIO;
}
tda1004x_write_mask(state, TDA1004X_CONFC4, 8, 8); // going to boot from HOST
ret = tda1004x_do_upload(state, fw->data, fw->size, TDA10046H_CODE_CPT, TDA10046H_CODE_IN);
release_firmware(fw);
return tda1004x_check_upload_ok(state);
}
static int tda1004x_encode_fec(int fec)
{
// convert known FEC values
switch (fec) {
case FEC_1_2:
return 0;
case FEC_2_3:
return 1;
case FEC_3_4:
return 2;
case FEC_5_6:
return 3;
case FEC_7_8:
return 4;
}
// unsupported
return -EINVAL;
}
static int tda1004x_decode_fec(int tdafec)
{
// convert known FEC values
switch (tdafec) {
case 0:
return FEC_1_2;
case 1:
return FEC_2_3;
case 2:
return FEC_3_4;
case 3:
return FEC_5_6;
case 4:
return FEC_7_8;
}
// unsupported
return -1;
}
static int tda1004x_write(struct dvb_frontend* fe, const u8 buf[], int len)
{
struct tda1004x_state* state = fe->demodulator_priv;
if (len != 2)
return -EINVAL;
return tda1004x_write_byteI(state, buf[0], buf[1]);
}
static int tda10045_init(struct dvb_frontend* fe)
{
struct tda1004x_state* state = fe->demodulator_priv;
dprintk("%s\n", __func__);
if (tda10045_fwupload(fe)) {
printk("tda1004x: firmware upload failed\n");
return -EIO;
}
tda1004x_write_mask(state, TDA1004X_CONFADC1, 0x10, 0); // wake up the ADC
// tda setup
tda1004x_write_mask(state, TDA1004X_CONFC4, 0x20, 0); // disable DSP watchdog timer
tda1004x_write_mask(state, TDA1004X_AUTO, 8, 0); // select HP stream
tda1004x_write_mask(state, TDA1004X_CONFC1, 0x40, 0); // set polarity of VAGC signal
tda1004x_write_mask(state, TDA1004X_CONFC1, 0x80, 0x80); // enable pulse killer
tda1004x_write_mask(state, TDA1004X_AUTO, 0x10, 0x10); // enable auto offset
tda1004x_write_mask(state, TDA1004X_IN_CONF2, 0xC0, 0x0); // no frequency offset
tda1004x_write_byteI(state, TDA1004X_CONF_TS1, 0); // setup MPEG2 TS interface
tda1004x_write_byteI(state, TDA1004X_CONF_TS2, 0); // setup MPEG2 TS interface
tda1004x_write_mask(state, TDA1004X_VBER_MSB, 0xe0, 0xa0); // 10^6 VBER measurement bits
tda1004x_write_mask(state, TDA1004X_CONFC1, 0x10, 0); // VAGC polarity
tda1004x_write_byteI(state, TDA1004X_CONFADC1, 0x2e);
tda1004x_write_mask(state, 0x1f, 0x01, state->config->invert_oclk);
return 0;
}
static int tda10046_init(struct dvb_frontend* fe)
{
struct tda1004x_state* state = fe->demodulator_priv;
dprintk("%s\n", __func__);
if (tda10046_fwupload(fe)) {
printk("tda1004x: firmware upload failed\n");
return -EIO;
}
// tda setup
tda1004x_write_mask(state, TDA1004X_CONFC4, 0x20, 0); // disable DSP watchdog timer
tda1004x_write_byteI(state, TDA1004X_AUTO, 0x87); // 100 ppm crystal, select HP stream
tda1004x_write_byteI(state, TDA1004X_CONFC1, 0x88); // enable pulse killer
switch (state->config->agc_config) {
case TDA10046_AGC_DEFAULT:
tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x00); // AGC setup
tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0xf0, 0x60); // set AGC polarities
break;
case TDA10046_AGC_IFO_AUTO_NEG:
tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x0a); // AGC setup
tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0xf0, 0x60); // set AGC polarities
break;
case TDA10046_AGC_IFO_AUTO_POS:
tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x0a); // AGC setup
tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0xf0, 0x00); // set AGC polarities
break;
case TDA10046_AGC_TDA827X:
tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x02); // AGC setup
tda1004x_write_byteI(state, TDA10046H_AGC_THR, 0x70); // AGC Threshold
tda1004x_write_byteI(state, TDA10046H_AGC_RENORM, 0x08); // Gain Renormalize
tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0xf0, 0x60); // set AGC polarities
break;
}
if (state->config->ts_mode == 0) {
tda1004x_write_mask(state, TDA10046H_CONF_TRISTATE1, 0xc0, 0x40);
tda1004x_write_mask(state, 0x3a, 0x80, state->config->invert_oclk << 7);
} else {
tda1004x_write_mask(state, TDA10046H_CONF_TRISTATE1, 0xc0, 0x80);
tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0x10,
state->config->invert_oclk << 4);
}
tda1004x_write_byteI(state, TDA1004X_CONFADC2, 0x38);
tda1004x_write_mask (state, TDA10046H_CONF_TRISTATE1, 0x3e, 0x38); // Turn IF AGC output on
tda1004x_write_byteI(state, TDA10046H_AGC_TUN_MIN, 0); // }
tda1004x_write_byteI(state, TDA10046H_AGC_TUN_MAX, 0xff); // } AGC min/max values
tda1004x_write_byteI(state, TDA10046H_AGC_IF_MIN, 0); // }
tda1004x_write_byteI(state, TDA10046H_AGC_IF_MAX, 0xff); // }
tda1004x_write_byteI(state, TDA10046H_AGC_GAINS, 0x12); // IF gain 2, TUN gain 1
tda1004x_write_byteI(state, TDA10046H_CVBER_CTRL, 0x1a); // 10^6 VBER measurement bits
tda1004x_write_byteI(state, TDA1004X_CONF_TS1, 7); // MPEG2 interface config
tda1004x_write_byteI(state, TDA1004X_CONF_TS2, 0xc0); // MPEG2 interface config
// tda1004x_write_mask(state, 0x50, 0x80, 0x80); // handle out of guard echoes
return 0;
}
static int tda1004x_set_fe(struct dvb_frontend* fe,
struct dvb_frontend_parameters *fe_params)
{
struct tda1004x_state* state = fe->demodulator_priv;
int tmp;
int inversion;
dprintk("%s\n", __func__);
if (state->demod_type == TDA1004X_DEMOD_TDA10046) {
// setup auto offset
tda1004x_write_mask(state, TDA1004X_AUTO, 0x10, 0x10);
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x80, 0);
tda1004x_write_mask(state, TDA1004X_IN_CONF2, 0xC0, 0);
// disable agc_conf[2]
tda1004x_write_mask(state, TDA10046H_AGC_CONF, 4, 0);
}
// set frequency
if (fe->ops.tuner_ops.set_params) {
fe->ops.tuner_ops.set_params(fe, fe_params);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
// Hardcoded to use auto as much as possible on the TDA10045 as it
// is very unreliable if AUTO mode is _not_ used.
if (state->demod_type == TDA1004X_DEMOD_TDA10045) {
fe_params->u.ofdm.code_rate_HP = FEC_AUTO;
fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_AUTO;
fe_params->u.ofdm.transmission_mode = TRANSMISSION_MODE_AUTO;
}
// Set standard params.. or put them to auto
if ((fe_params->u.ofdm.code_rate_HP == FEC_AUTO) ||
(fe_params->u.ofdm.code_rate_LP == FEC_AUTO) ||
(fe_params->u.ofdm.constellation == QAM_AUTO) ||
(fe_params->u.ofdm.hierarchy_information == HIERARCHY_AUTO)) {
tda1004x_write_mask(state, TDA1004X_AUTO, 1, 1); // enable auto
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x03, 0); // turn off constellation bits
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x60, 0); // turn off hierarchy bits
tda1004x_write_mask(state, TDA1004X_IN_CONF2, 0x3f, 0); // turn off FEC bits
} else {
tda1004x_write_mask(state, TDA1004X_AUTO, 1, 0); // disable auto
// set HP FEC
tmp = tda1004x_encode_fec(fe_params->u.ofdm.code_rate_HP);
if (tmp < 0)
return tmp;
tda1004x_write_mask(state, TDA1004X_IN_CONF2, 7, tmp);
// set LP FEC
tmp = tda1004x_encode_fec(fe_params->u.ofdm.code_rate_LP);
if (tmp < 0)
return tmp;
tda1004x_write_mask(state, TDA1004X_IN_CONF2, 0x38, tmp << 3);
// set constellation
switch (fe_params->u.ofdm.constellation) {
case QPSK:
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 3, 0);
break;
case QAM_16:
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 3, 1);
break;
case QAM_64:
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 3, 2);
break;
default:
return -EINVAL;
}
// set hierarchy
switch (fe_params->u.ofdm.hierarchy_information) {
case HIERARCHY_NONE:
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x60, 0 << 5);
break;
case HIERARCHY_1:
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x60, 1 << 5);
break;
case HIERARCHY_2:
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x60, 2 << 5);
break;
case HIERARCHY_4:
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x60, 3 << 5);
break;
default:
return -EINVAL;
}
}
// set bandwidth
switch (state->demod_type) {
case TDA1004X_DEMOD_TDA10045:
tda10045h_set_bandwidth(state, fe_params->u.ofdm.bandwidth);
break;
case TDA1004X_DEMOD_TDA10046:
tda10046h_set_bandwidth(state, fe_params->u.ofdm.bandwidth);
break;
}
// set inversion
inversion = fe_params->inversion;
if (state->config->invert)
inversion = inversion ? INVERSION_OFF : INVERSION_ON;
switch (inversion) {
case INVERSION_OFF:
tda1004x_write_mask(state, TDA1004X_CONFC1, 0x20, 0);
break;
case INVERSION_ON:
tda1004x_write_mask(state, TDA1004X_CONFC1, 0x20, 0x20);
break;
default:
return -EINVAL;
}
// set guard interval
switch (fe_params->u.ofdm.guard_interval) {
case GUARD_INTERVAL_1_32:
tda1004x_write_mask(state, TDA1004X_AUTO, 2, 0);
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x0c, 0 << 2);
break;
case GUARD_INTERVAL_1_16:
tda1004x_write_mask(state, TDA1004X_AUTO, 2, 0);
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x0c, 1 << 2);
break;
case GUARD_INTERVAL_1_8:
tda1004x_write_mask(state, TDA1004X_AUTO, 2, 0);
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x0c, 2 << 2);
break;
case GUARD_INTERVAL_1_4:
tda1004x_write_mask(state, TDA1004X_AUTO, 2, 0);
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x0c, 3 << 2);
break;
case GUARD_INTERVAL_AUTO:
tda1004x_write_mask(state, TDA1004X_AUTO, 2, 2);
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x0c, 0 << 2);
break;
default:
return -EINVAL;
}
// set transmission mode
switch (fe_params->u.ofdm.transmission_mode) {
case TRANSMISSION_MODE_2K:
tda1004x_write_mask(state, TDA1004X_AUTO, 4, 0);
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x10, 0 << 4);
break;
case TRANSMISSION_MODE_8K:
tda1004x_write_mask(state, TDA1004X_AUTO, 4, 0);
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x10, 1 << 4);
break;
case TRANSMISSION_MODE_AUTO:
tda1004x_write_mask(state, TDA1004X_AUTO, 4, 4);
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x10, 0);
break;
default:
return -EINVAL;
}
// start the lock
switch (state->demod_type) {
case TDA1004X_DEMOD_TDA10045:
tda1004x_write_mask(state, TDA1004X_CONFC4, 8, 8);
tda1004x_write_mask(state, TDA1004X_CONFC4, 8, 0);
break;
case TDA1004X_DEMOD_TDA10046:
tda1004x_write_mask(state, TDA1004X_AUTO, 0x40, 0x40);
msleep(1);
tda1004x_write_mask(state, TDA10046H_AGC_CONF, 4, 1);
break;
}
msleep(10);
return 0;
}
static int tda1004x_get_fe(struct dvb_frontend* fe, struct dvb_frontend_parameters *fe_params)
{
struct tda1004x_state* state = fe->demodulator_priv;
dprintk("%s\n", __func__);
// inversion status
fe_params->inversion = INVERSION_OFF;
if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
fe_params->inversion = INVERSION_ON;
if (state->config->invert)
fe_params->inversion = fe_params->inversion ? INVERSION_OFF : INVERSION_ON;
// bandwidth
switch (state->demod_type) {
case TDA1004X_DEMOD_TDA10045:
switch (tda1004x_read_byte(state, TDA10045H_WREF_LSB)) {
case 0x14:
fe_params->u.ofdm.bandwidth = BANDWIDTH_8_MHZ;
break;
case 0xdb:
fe_params->u.ofdm.bandwidth = BANDWIDTH_7_MHZ;
break;
case 0x4f:
fe_params->u.ofdm.bandwidth = BANDWIDTH_6_MHZ;
break;
}
break;
case TDA1004X_DEMOD_TDA10046:
switch (tda1004x_read_byte(state, TDA10046H_TIME_WREF1)) {
case 0x5c:
case 0x54:
fe_params->u.ofdm.bandwidth = BANDWIDTH_8_MHZ;
break;
case 0x6a:
case 0x60:
fe_params->u.ofdm.bandwidth = BANDWIDTH_7_MHZ;
break;
case 0x7b:
case 0x70:
fe_params->u.ofdm.bandwidth = BANDWIDTH_6_MHZ;
break;
}
break;
}
// FEC
fe_params->u.ofdm.code_rate_HP =
tda1004x_decode_fec(tda1004x_read_byte(state, TDA1004X_OUT_CONF2) & 7);
fe_params->u.ofdm.code_rate_LP =
tda1004x_decode_fec((tda1004x_read_byte(state, TDA1004X_OUT_CONF2) >> 3) & 7);
// constellation
switch (tda1004x_read_byte(state, TDA1004X_OUT_CONF1) & 3) {
case 0:
fe_params->u.ofdm.constellation = QPSK;
break;
case 1:
fe_params->u.ofdm.constellation = QAM_16;
break;
case 2:
fe_params->u.ofdm.constellation = QAM_64;
break;
}
// transmission mode
fe_params->u.ofdm.transmission_mode = TRANSMISSION_MODE_2K;
if (tda1004x_read_byte(state, TDA1004X_OUT_CONF1) & 0x10)
fe_params->u.ofdm.transmission_mode = TRANSMISSION_MODE_8K;
// guard interval
switch ((tda1004x_read_byte(state, TDA1004X_OUT_CONF1) & 0x0c) >> 2) {
case 0:
fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_1_32;
break;
case 1:
fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_1_16;
break;
case 2:
fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_1_8;
break;
case 3:
fe_params->u.ofdm.guard_interval = GUARD_INTERVAL_1_4;
break;
}
// hierarchy
switch ((tda1004x_read_byte(state, TDA1004X_OUT_CONF1) & 0x60) >> 5) {
case 0:
fe_params->u.ofdm.hierarchy_information = HIERARCHY_NONE;
break;
case 1:
fe_params->u.ofdm.hierarchy_information = HIERARCHY_1;
break;
case 2:
fe_params->u.ofdm.hierarchy_information = HIERARCHY_2;
break;
case 3:
fe_params->u.ofdm.hierarchy_information = HIERARCHY_4;
break;
}
return 0;
}
static int tda1004x_read_status(struct dvb_frontend* fe, fe_status_t * fe_status)
{
struct tda1004x_state* state = fe->demodulator_priv;
int status;
int cber;
int vber;
dprintk("%s\n", __func__);
// read status
status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
if (status == -1)
return -EIO;
// decode
*fe_status = 0;
if (status & 4)
*fe_status |= FE_HAS_SIGNAL;
if (status & 2)
*fe_status |= FE_HAS_CARRIER;
if (status & 8)
*fe_status |= FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
// if we don't already have VITERBI (i.e. not LOCKED), see if the viterbi
// is getting anything valid
if (!(*fe_status & FE_HAS_VITERBI)) {
// read the CBER
cber = tda1004x_read_byte(state, TDA1004X_CBER_LSB);
if (cber == -1)
return -EIO;
status = tda1004x_read_byte(state, TDA1004X_CBER_MSB);
if (status == -1)
return -EIO;
cber |= (status << 8);
// The address 0x20 should be read to cope with a TDA10046 bug
tda1004x_read_byte(state, TDA1004X_CBER_RESET);
if (cber != 65535)
*fe_status |= FE_HAS_VITERBI;
}
// if we DO have some valid VITERBI output, but don't already have SYNC
// bytes (i.e. not LOCKED), see if the RS decoder is getting anything valid.
if ((*fe_status & FE_HAS_VITERBI) && (!(*fe_status & FE_HAS_SYNC))) {
// read the VBER
vber = tda1004x_read_byte(state, TDA1004X_VBER_LSB);
if (vber == -1)
return -EIO;
status = tda1004x_read_byte(state, TDA1004X_VBER_MID);
if (status == -1)
return -EIO;
vber |= (status << 8);
status = tda1004x_read_byte(state, TDA1004X_VBER_MSB);
if (status == -1)
return -EIO;
vber |= (status & 0x0f) << 16;
// The CVBER_LUT should be read to cope with TDA10046 hardware bug
tda1004x_read_byte(state, TDA1004X_CVBER_LUT);
// if RS has passed some valid TS packets, then we must be
// getting some SYNC bytes
if (vber < 16632)
*fe_status |= FE_HAS_SYNC;
}
// success
dprintk("%s: fe_status=0x%x\n", __func__, *fe_status);
return 0;
}
static int tda1004x_read_signal_strength(struct dvb_frontend* fe, u16 * signal)
{
struct tda1004x_state* state = fe->demodulator_priv;
int tmp;
int reg = 0;
dprintk("%s\n", __func__);
// determine the register to use
switch (state->demod_type) {
case TDA1004X_DEMOD_TDA10045:
reg = TDA10045H_S_AGC;
break;
case TDA1004X_DEMOD_TDA10046:
reg = TDA10046H_AGC_IF_LEVEL;
break;
}
// read it
tmp = tda1004x_read_byte(state, reg);
if (tmp < 0)
return -EIO;
*signal = (tmp << 8) | tmp;
dprintk("%s: signal=0x%x\n", __func__, *signal);
return 0;
}
static int tda1004x_read_snr(struct dvb_frontend* fe, u16 * snr)
{
struct tda1004x_state* state = fe->demodulator_priv;
int tmp;
dprintk("%s\n", __func__);
// read it
tmp = tda1004x_read_byte(state, TDA1004X_SNR);
if (tmp < 0)
return -EIO;
tmp = 255 - tmp;
*snr = ((tmp << 8) | tmp);
dprintk("%s: snr=0x%x\n", __func__, *snr);
return 0;
}
static int tda1004x_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
{
struct tda1004x_state* state = fe->demodulator_priv;
int tmp;
int tmp2;
int counter;
dprintk("%s\n", __func__);
// read the UCBLOCKS and reset
counter = 0;
tmp = tda1004x_read_byte(state, TDA1004X_UNCOR);
if (tmp < 0)
return -EIO;
tmp &= 0x7f;
while (counter++ < 5) {
tda1004x_write_mask(state, TDA1004X_UNCOR, 0x80, 0);
tda1004x_write_mask(state, TDA1004X_UNCOR, 0x80, 0);
tda1004x_write_mask(state, TDA1004X_UNCOR, 0x80, 0);
tmp2 = tda1004x_read_byte(state, TDA1004X_UNCOR);
if (tmp2 < 0)
return -EIO;
tmp2 &= 0x7f;
if ((tmp2 < tmp) || (tmp2 == 0))
break;
}
if (tmp != 0x7f)
*ucblocks = tmp;
else
*ucblocks = 0xffffffff;
dprintk("%s: ucblocks=0x%x\n", __func__, *ucblocks);
return 0;
}
static int tda1004x_read_ber(struct dvb_frontend* fe, u32* ber)
{
struct tda1004x_state* state = fe->demodulator_priv;
int tmp;
dprintk("%s\n", __func__);
// read it in
tmp = tda1004x_read_byte(state, TDA1004X_CBER_LSB);
if (tmp < 0)
return -EIO;
*ber = tmp << 1;
tmp = tda1004x_read_byte(state, TDA1004X_CBER_MSB);
if (tmp < 0)
return -EIO;
*ber |= (tmp << 9);
// The address 0x20 should be read to cope with a TDA10046 bug
tda1004x_read_byte(state, TDA1004X_CBER_RESET);
dprintk("%s: ber=0x%x\n", __func__, *ber);
return 0;
}
static int tda1004x_sleep(struct dvb_frontend* fe)
{
struct tda1004x_state* state = fe->demodulator_priv;
int gpio_conf;
switch (state->demod_type) {
case TDA1004X_DEMOD_TDA10045:
tda1004x_write_mask(state, TDA1004X_CONFADC1, 0x10, 0x10);
break;
case TDA1004X_DEMOD_TDA10046:
/* set outputs to tristate */
tda1004x_write_byteI(state, TDA10046H_CONF_TRISTATE1, 0xff);
/* invert GPIO 1 and 3 if desired*/
gpio_conf = state->config->gpio_config;
if (gpio_conf >= TDA10046_GP00_I)
tda1004x_write_mask(state, TDA10046H_CONF_POLARITY, 0x0f,
(gpio_conf & 0x0f) ^ 0x0a);
tda1004x_write_mask(state, TDA1004X_CONFADC2, 0xc0, 0xc0);
tda1004x_write_mask(state, TDA1004X_CONFC4, 1, 1);
break;
}
return 0;
}
static int tda1004x_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
{
struct tda1004x_state* state = fe->demodulator_priv;
if (enable) {
return tda1004x_enable_tuner_i2c(state);
} else {
return tda1004x_disable_tuner_i2c(state);
}
}
static int tda1004x_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fesettings)
{
fesettings->min_delay_ms = 800;
/* Drift compensation makes no sense for DVB-T */
fesettings->step_size = 0;
fesettings->max_drift = 0;
return 0;
}
static void tda1004x_release(struct dvb_frontend* fe)
{
struct tda1004x_state *state = fe->demodulator_priv;
kfree(state);
}
static struct dvb_frontend_ops tda10045_ops = {
.info = {
.name = "Philips TDA10045H DVB-T",
.type = FE_OFDM,
.frequency_min = 51000000,
.frequency_max = 858000000,
.frequency_stepsize = 166667,
.caps =
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO
},
.release = tda1004x_release,
.init = tda10045_init,
.sleep = tda1004x_sleep,
.write = tda1004x_write,
.i2c_gate_ctrl = tda1004x_i2c_gate_ctrl,
.set_frontend = tda1004x_set_fe,
.get_frontend = tda1004x_get_fe,
.get_tune_settings = tda1004x_get_tune_settings,
.read_status = tda1004x_read_status,
.read_ber = tda1004x_read_ber,
.read_signal_strength = tda1004x_read_signal_strength,
.read_snr = tda1004x_read_snr,
.read_ucblocks = tda1004x_read_ucblocks,
};
struct dvb_frontend* tda10045_attach(const struct tda1004x_config* config,
struct i2c_adapter* i2c)
{
struct tda1004x_state *state;
int id;
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct tda1004x_state), GFP_KERNEL);
if (!state) {
printk(KERN_ERR "Can't alocate memory for tda10045 state\n");
return NULL;
}
/* setup the state */
state->config = config;
state->i2c = i2c;
state->demod_type = TDA1004X_DEMOD_TDA10045;
/* check if the demod is there */
id = tda1004x_read_byte(state, TDA1004X_CHIPID);
if (id < 0) {
printk(KERN_ERR "tda10045: chip is not answering. Giving up.\n");
kfree(state);
return NULL;
}
if (id != 0x25) {
printk(KERN_ERR "Invalid tda1004x ID = 0x%02x. Can't proceed\n", id);
kfree(state);
return NULL;
}
/* create dvb_frontend */
memcpy(&state->frontend.ops, &tda10045_ops, sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
}
static struct dvb_frontend_ops tda10046_ops = {
.info = {
.name = "Philips TDA10046H DVB-T",
.type = FE_OFDM,
.frequency_min = 51000000,
.frequency_max = 858000000,
.frequency_stepsize = 166667,
.caps =
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO
},
.release = tda1004x_release,
.init = tda10046_init,
.sleep = tda1004x_sleep,
.write = tda1004x_write,
.i2c_gate_ctrl = tda1004x_i2c_gate_ctrl,
.set_frontend = tda1004x_set_fe,
.get_frontend = tda1004x_get_fe,
.get_tune_settings = tda1004x_get_tune_settings,
.read_status = tda1004x_read_status,
.read_ber = tda1004x_read_ber,
.read_signal_strength = tda1004x_read_signal_strength,
.read_snr = tda1004x_read_snr,
.read_ucblocks = tda1004x_read_ucblocks,
};
struct dvb_frontend* tda10046_attach(const struct tda1004x_config* config,
struct i2c_adapter* i2c)
{
struct tda1004x_state *state;
int id;
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct tda1004x_state), GFP_KERNEL);
if (!state) {
printk(KERN_ERR "Can't alocate memory for tda10046 state\n");
return NULL;
}
/* setup the state */
state->config = config;
state->i2c = i2c;
state->demod_type = TDA1004X_DEMOD_TDA10046;
/* check if the demod is there */
id = tda1004x_read_byte(state, TDA1004X_CHIPID);
if (id < 0) {
printk(KERN_ERR "tda10046: chip is not answering. Giving up.\n");
kfree(state);
return NULL;
}
if (id != 0x46) {
printk(KERN_ERR "Invalid tda1004x ID = 0x%02x. Can't proceed\n", id);
kfree(state);
return NULL;
}
/* create dvb_frontend */
memcpy(&state->frontend.ops, &tda10046_ops, sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
}
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("Philips TDA10045H & TDA10046H DVB-T Demodulator");
MODULE_AUTHOR("Andrew de Quincey & Robert Schlabbach");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(tda10045_attach);
EXPORT_SYMBOL(tda10046_attach);
| gpl-2.0 |
d3trax/asuswrt-merlin | release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/leds/leds-atmel-pwm.c | 4031 | 3834 | #include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/io.h>
#include <linux/atmel_pwm.h>
#include <linux/slab.h>
struct pwmled {
struct led_classdev cdev;
struct pwm_channel pwmc;
struct gpio_led *desc;
u32 mult;
u8 active_low;
};
/*
* For simplicity, we use "brightness" as if it were a linear function
* of PWM duty cycle. However, a logarithmic function of duty cycle is
* probably a better match for perceived brightness: two is half as bright
* as four, four is half as bright as eight, etc
*/
static void pwmled_brightness(struct led_classdev *cdev, enum led_brightness b)
{
struct pwmled *led;
/* update the duty cycle for the *next* period */
led = container_of(cdev, struct pwmled, cdev);
pwm_channel_writel(&led->pwmc, PWM_CUPD, led->mult * (unsigned) b);
}
/*
* NOTE: we reuse the platform_data structure of GPIO leds,
* but repurpose its "gpio" number as a PWM channel number.
*/
static int __init pwmled_probe(struct platform_device *pdev)
{
const struct gpio_led_platform_data *pdata;
struct pwmled *leds;
int i;
int status;
pdata = pdev->dev.platform_data;
if (!pdata || pdata->num_leds < 1)
return -ENODEV;
leds = kcalloc(pdata->num_leds, sizeof(*leds), GFP_KERNEL);
if (!leds)
return -ENOMEM;
for (i = 0; i < pdata->num_leds; i++) {
struct pwmled *led = leds + i;
const struct gpio_led *dat = pdata->leds + i;
u32 tmp;
led->cdev.name = dat->name;
led->cdev.brightness = LED_OFF;
led->cdev.brightness_set = pwmled_brightness;
led->cdev.default_trigger = dat->default_trigger;
led->active_low = dat->active_low;
status = pwm_channel_alloc(dat->gpio, &led->pwmc);
if (status < 0)
goto err;
/*
* Prescale clock by 2^x, so PWM counts in low MHz.
* Start each cycle with the LED active, so increasing
* the duty cycle gives us more time on (== brighter).
*/
tmp = 5;
if (!led->active_low)
tmp |= PWM_CPR_CPOL;
pwm_channel_writel(&led->pwmc, PWM_CMR, tmp);
/*
* Pick a period so PWM cycles at 100+ Hz; and a multiplier
* for scaling duty cycle: brightness * mult.
*/
tmp = (led->pwmc.mck / (1 << 5)) / 100;
tmp /= 255;
led->mult = tmp;
pwm_channel_writel(&led->pwmc, PWM_CDTY,
led->cdev.brightness * 255);
pwm_channel_writel(&led->pwmc, PWM_CPRD,
LED_FULL * tmp);
pwm_channel_enable(&led->pwmc);
/* Hand it over to the LED framework */
status = led_classdev_register(&pdev->dev, &led->cdev);
if (status < 0) {
pwm_channel_free(&led->pwmc);
goto err;
}
}
platform_set_drvdata(pdev, leds);
return 0;
err:
if (i > 0) {
for (i = i - 1; i >= 0; i--) {
led_classdev_unregister(&leds[i].cdev);
pwm_channel_free(&leds[i].pwmc);
}
}
kfree(leds);
return status;
}
static int __exit pwmled_remove(struct platform_device *pdev)
{
const struct gpio_led_platform_data *pdata;
struct pwmled *leds;
unsigned i;
pdata = pdev->dev.platform_data;
leds = platform_get_drvdata(pdev);
for (i = 0; i < pdata->num_leds; i++) {
struct pwmled *led = leds + i;
led_classdev_unregister(&led->cdev);
pwm_channel_free(&led->pwmc);
}
kfree(leds);
platform_set_drvdata(pdev, NULL);
return 0;
}
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:leds-atmel-pwm");
static struct platform_driver pwmled_driver = {
.driver = {
.name = "leds-atmel-pwm",
.owner = THIS_MODULE,
},
/* REVISIT add suspend() and resume() methods */
.remove = __exit_p(pwmled_remove),
};
static int __init modinit(void)
{
return platform_driver_probe(&pwmled_driver, pwmled_probe);
}
module_init(modinit);
static void __exit modexit(void)
{
platform_driver_unregister(&pwmled_driver);
}
module_exit(modexit);
MODULE_DESCRIPTION("Driver for LEDs with PWM-controlled brightness");
MODULE_LICENSE("GPL");
| gpl-2.0 |
mrjaydee82/SinLessKernel-4.4.4 | drivers/net/wan/lmc/lmc_main.c | 4799 | 63117 | /*
* Copyright (c) 1997-2000 LAN Media Corporation (LMC)
* All rights reserved. www.lanmedia.com
* Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
*
* This code is written by:
* Andrew Stanley-Jones (asj@cban.com)
* Rob Braun (bbraun@vix.com),
* Michael Graff (explorer@vix.com) and
* Matt Thomas (matt@3am-software.com).
*
* With Help By:
* David Boggs
* Ron Crane
* Alan Cox
*
* This software may be used and distributed according to the terms
* of the GNU General Public License version 2, incorporated herein by reference.
*
* Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
*
* To control link specific options lmcctl is required.
* It can be obtained from ftp.lanmedia.com.
*
* Linux driver notes:
* Linux uses the device struct lmc_private to pass private information
* around.
*
* The initialization portion of this driver (the lmc_reset() and the
* lmc_dec_reset() functions, as well as the led controls and the
* lmc_initcsrs() functions.
*
* The watchdog function runs every second and checks to see if
* we still have link, and that the timing source is what we expected
* it to be. If link is lost, the interface is marked down, and
* we no longer can transmit.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/hdlc.h>
#include <linux/init.h>
#include <linux/in.h>
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/inet.h>
#include <linux/bitops.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/uaccess.h>
//#include <asm/spinlock.h>
#define DRIVER_MAJOR_VERSION 1
#define DRIVER_MINOR_VERSION 34
#define DRIVER_SUB_VERSION 0
#define DRIVER_VERSION ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
#include "lmc.h"
#include "lmc_var.h"
#include "lmc_ioctl.h"
#include "lmc_debug.h"
#include "lmc_proto.h"
static int LMC_PKT_BUF_SZ = 1542;
static DEFINE_PCI_DEVICE_TABLE(lmc_pci_tbl) = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
PCI_VENDOR_ID_LMC, PCI_ANY_ID },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
PCI_ANY_ID, PCI_VENDOR_ID_LMC },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);
MODULE_LICENSE("GPL v2");
static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int lmc_rx (struct net_device *dev);
static int lmc_open(struct net_device *dev);
static int lmc_close(struct net_device *dev);
static struct net_device_stats *lmc_get_stats(struct net_device *dev);
static irqreturn_t lmc_interrupt(int irq, void *dev_instance);
static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t csr_size);
static void lmc_softreset(lmc_softc_t * const);
static void lmc_running_reset(struct net_device *dev);
static int lmc_ifdown(struct net_device * const);
static void lmc_watchdog(unsigned long data);
static void lmc_reset(lmc_softc_t * const sc);
static void lmc_dec_reset(lmc_softc_t * const sc);
static void lmc_driver_timeout(struct net_device *dev);
/*
* linux reserves 16 device specific IOCTLs. We call them
* LMCIOC* to control various bits of our world.
*/
int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
{
lmc_softc_t *sc = dev_to_sc(dev);
lmc_ctl_t ctl;
int ret = -EOPNOTSUPP;
u16 regVal;
unsigned long flags;
lmc_trace(dev, "lmc_ioctl in");
/*
* Most functions mess with the structure
* Disable interrupts while we do the polling
*/
switch (cmd) {
/*
* Return current driver state. Since we keep this up
* To date internally, just copy this out to the user.
*/
case LMCIOCGINFO: /*fold01*/
if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof(lmc_ctl_t)))
ret = -EFAULT;
else
ret = 0;
break;
case LMCIOCSINFO: /*fold01*/
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
break;
}
if(dev->flags & IFF_UP){
ret = -EBUSY;
break;
}
if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
ret = -EFAULT;
break;
}
spin_lock_irqsave(&sc->lmc_lock, flags);
sc->lmc_media->set_status (sc, &ctl);
if(ctl.crc_length != sc->ictl.crc_length) {
sc->lmc_media->set_crc_length(sc, ctl.crc_length);
if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16)
sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
else
sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;
}
spin_unlock_irqrestore(&sc->lmc_lock, flags);
ret = 0;
break;
case LMCIOCIFTYPE: /*fold01*/
{
u16 old_type = sc->if_type;
u16 new_type;
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
break;
}
if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) {
ret = -EFAULT;
break;
}
if (new_type == old_type)
{
ret = 0 ;
break; /* no change */
}
spin_lock_irqsave(&sc->lmc_lock, flags);
lmc_proto_close(sc);
sc->if_type = new_type;
lmc_proto_attach(sc);
ret = lmc_proto_open(sc);
spin_unlock_irqrestore(&sc->lmc_lock, flags);
break;
}
case LMCIOCGETXINFO: /*fold01*/
spin_lock_irqsave(&sc->lmc_lock, flags);
sc->lmc_xinfo.Magic0 = 0xBEEFCAFE;
sc->lmc_xinfo.PciCardType = sc->lmc_cardtype;
sc->lmc_xinfo.PciSlotNumber = 0;
sc->lmc_xinfo.DriverMajorVersion = DRIVER_MAJOR_VERSION;
sc->lmc_xinfo.DriverMinorVersion = DRIVER_MINOR_VERSION;
sc->lmc_xinfo.DriverSubVersion = DRIVER_SUB_VERSION;
sc->lmc_xinfo.XilinxRevisionNumber =
lmc_mii_readreg (sc, 0, 3) & 0xf;
sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ;
sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc);
sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16);
spin_unlock_irqrestore(&sc->lmc_lock, flags);
sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo,
sizeof(struct lmc_xinfo)))
ret = -EFAULT;
else
ret = 0;
break;
case LMCIOCGETLMCSTATS:
spin_lock_irqsave(&sc->lmc_lock, flags);
if (sc->lmc_cardtype == LMC_CARDTYPE_T1) {
lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB);
sc->extra_stats.framingBitErrorCount +=
lmc_mii_readreg(sc, 0, 18) & 0xff;
lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB);
sc->extra_stats.framingBitErrorCount +=
(lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB);
sc->extra_stats.lineCodeViolationCount +=
lmc_mii_readreg(sc, 0, 18) & 0xff;
lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB);
sc->extra_stats.lineCodeViolationCount +=
(lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR);
regVal = lmc_mii_readreg(sc, 0, 18) & 0xff;
sc->extra_stats.lossOfFrameCount +=
(regVal & T1FRAMER_LOF_MASK) >> 4;
sc->extra_stats.changeOfFrameAlignmentCount +=
(regVal & T1FRAMER_COFA_MASK) >> 2;
sc->extra_stats.severelyErroredFrameCount +=
regVal & T1FRAMER_SEF_MASK;
}
spin_unlock_irqrestore(&sc->lmc_lock, flags);
if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats,
sizeof(sc->lmc_device->stats)) ||
copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats),
&sc->extra_stats, sizeof(sc->extra_stats)))
ret = -EFAULT;
else
ret = 0;
break;
case LMCIOCCLEARLMCSTATS:
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
break;
}
spin_lock_irqsave(&sc->lmc_lock, flags);
memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats));
memset(&sc->extra_stats, 0, sizeof(sc->extra_stats));
sc->extra_stats.check = STATCHECK;
sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
spin_unlock_irqrestore(&sc->lmc_lock, flags);
ret = 0;
break;
case LMCIOCSETCIRCUIT: /*fold01*/
if (!capable(CAP_NET_ADMIN)){
ret = -EPERM;
break;
}
if(dev->flags & IFF_UP){
ret = -EBUSY;
break;
}
if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
ret = -EFAULT;
break;
}
spin_lock_irqsave(&sc->lmc_lock, flags);
sc->lmc_media->set_circuit_type(sc, ctl.circuit_type);
sc->ictl.circuit_type = ctl.circuit_type;
spin_unlock_irqrestore(&sc->lmc_lock, flags);
ret = 0;
break;
case LMCIOCRESET: /*fold01*/
if (!capable(CAP_NET_ADMIN)){
ret = -EPERM;
break;
}
spin_lock_irqsave(&sc->lmc_lock, flags);
/* Reset driver and bring back to current state */
printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
lmc_running_reset (dev);
printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
spin_unlock_irqrestore(&sc->lmc_lock, flags);
ret = 0;
break;
#ifdef DEBUG
case LMCIOCDUMPEVENTLOG:
if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof(u32))) {
ret = -EFAULT;
break;
}
if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf,
sizeof(lmcEventLogBuf)))
ret = -EFAULT;
else
ret = 0;
break;
#endif /* end ifdef _DBG_EVENTLOG */
case LMCIOCT1CONTROL: /*fold01*/
if (sc->lmc_cardtype != LMC_CARDTYPE_T1){
ret = -EOPNOTSUPP;
break;
}
break;
case LMCIOCXILINX: /*fold01*/
{
struct lmc_xilinx_control xc; /*fold02*/
if (!capable(CAP_NET_ADMIN)){
ret = -EPERM;
break;
}
/*
* Stop the xwitter whlie we restart the hardware
*/
netif_stop_queue(dev);
if (copy_from_user(&xc, ifr->ifr_data, sizeof(struct lmc_xilinx_control))) {
ret = -EFAULT;
break;
}
switch(xc.command){
case lmc_xilinx_reset: /*fold02*/
{
u16 mii;
spin_lock_irqsave(&sc->lmc_lock, flags);
mii = lmc_mii_readreg (sc, 0, 16);
/*
* Make all of them 0 and make input
*/
lmc_gpio_mkinput(sc, 0xff);
/*
* make the reset output
*/
lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
/*
* RESET low to force configuration. This also forces
* the transmitter clock to be internal, but we expect to reset
* that later anyway.
*/
sc->lmc_gpio &= ~LMC_GEP_RESET;
LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
/*
* hold for more than 10 microseconds
*/
udelay(50);
sc->lmc_gpio |= LMC_GEP_RESET;
LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
/*
* stop driving Xilinx-related signals
*/
lmc_gpio_mkinput(sc, 0xff);
/* Reset the frammer hardware */
sc->lmc_media->set_link_status (sc, 1);
sc->lmc_media->set_status (sc, NULL);
// lmc_softreset(sc);
{
int i;
for(i = 0; i < 5; i++){
lmc_led_on(sc, LMC_DS3_LED0);
mdelay(100);
lmc_led_off(sc, LMC_DS3_LED0);
lmc_led_on(sc, LMC_DS3_LED1);
mdelay(100);
lmc_led_off(sc, LMC_DS3_LED1);
lmc_led_on(sc, LMC_DS3_LED3);
mdelay(100);
lmc_led_off(sc, LMC_DS3_LED3);
lmc_led_on(sc, LMC_DS3_LED2);
mdelay(100);
lmc_led_off(sc, LMC_DS3_LED2);
}
}
spin_unlock_irqrestore(&sc->lmc_lock, flags);
ret = 0x0;
}
break;
case lmc_xilinx_load_prom: /*fold02*/
{
u16 mii;
int timeout = 500000;
spin_lock_irqsave(&sc->lmc_lock, flags);
mii = lmc_mii_readreg (sc, 0, 16);
/*
* Make all of them 0 and make input
*/
lmc_gpio_mkinput(sc, 0xff);
/*
* make the reset output
*/
lmc_gpio_mkoutput(sc, LMC_GEP_DP | LMC_GEP_RESET);
/*
* RESET low to force configuration. This also forces
* the transmitter clock to be internal, but we expect to reset
* that later anyway.
*/
sc->lmc_gpio &= ~(LMC_GEP_RESET | LMC_GEP_DP);
LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
/*
* hold for more than 10 microseconds
*/
udelay(50);
sc->lmc_gpio |= LMC_GEP_DP | LMC_GEP_RESET;
LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
/*
* busy wait for the chip to reset
*/
while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
(timeout-- > 0))
cpu_relax();
/*
* stop driving Xilinx-related signals
*/
lmc_gpio_mkinput(sc, 0xff);
spin_unlock_irqrestore(&sc->lmc_lock, flags);
ret = 0x0;
break;
}
case lmc_xilinx_load: /*fold02*/
{
char *data;
int pos;
int timeout = 500000;
if (!xc.data) {
ret = -EINVAL;
break;
}
data = kmalloc(xc.len, GFP_KERNEL);
if (!data) {
ret = -ENOMEM;
break;
}
if(copy_from_user(data, xc.data, xc.len))
{
kfree(data);
ret = -ENOMEM;
break;
}
printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
spin_lock_irqsave(&sc->lmc_lock, flags);
lmc_gpio_mkinput(sc, 0xff);
/*
* Clear the Xilinx and start prgramming from the DEC
*/
/*
* Set ouput as:
* Reset: 0 (active)
* DP: 0 (active)
* Mode: 1
*
*/
sc->lmc_gpio = 0x00;
sc->lmc_gpio &= ~LMC_GEP_DP;
sc->lmc_gpio &= ~LMC_GEP_RESET;
sc->lmc_gpio |= LMC_GEP_MODE;
LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
lmc_gpio_mkoutput(sc, LMC_GEP_MODE | LMC_GEP_DP | LMC_GEP_RESET);
/*
* Wait at least 10 us 20 to be safe
*/
udelay(50);
/*
* Clear reset and activate programming lines
* Reset: Input
* DP: Input
* Clock: Output
* Data: Output
* Mode: Output
*/
lmc_gpio_mkinput(sc, LMC_GEP_DP | LMC_GEP_RESET);
/*
* Set LOAD, DATA, Clock to 1
*/
sc->lmc_gpio = 0x00;
sc->lmc_gpio |= LMC_GEP_MODE;
sc->lmc_gpio |= LMC_GEP_DATA;
sc->lmc_gpio |= LMC_GEP_CLK;
LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
lmc_gpio_mkoutput(sc, LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_MODE );
/*
* busy wait for the chip to reset
*/
while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
(timeout-- > 0))
cpu_relax();
printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear it's memory\n", dev->name, 500000-timeout);
for(pos = 0; pos < xc.len; pos++){
switch(data[pos]){
case 0:
sc->lmc_gpio &= ~LMC_GEP_DATA; /* Data is 0 */
break;
case 1:
sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */
break;
default:
printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]);
sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */
}
sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */
sc->lmc_gpio |= LMC_GEP_MODE;
LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
udelay(1);
sc->lmc_gpio |= LMC_GEP_CLK; /* Put the clack back to one */
sc->lmc_gpio |= LMC_GEP_MODE;
LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
udelay(1);
}
if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){
printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name);
}
else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){
printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name);
}
else {
printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos);
}
lmc_gpio_mkinput(sc, 0xff);
sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
spin_unlock_irqrestore(&sc->lmc_lock, flags);
kfree(data);
ret = 0;
break;
}
default: /*fold02*/
ret = -EBADE;
break;
}
netif_wake_queue(dev);
sc->lmc_txfull = 0;
}
break;
default: /*fold01*/
/* If we don't know what to do, give the protocol a shot. */
ret = lmc_proto_ioctl (sc, ifr, cmd);
break;
}
lmc_trace(dev, "lmc_ioctl out");
return ret;
}
/* the watchdog process that cruises around */
static void lmc_watchdog (unsigned long data) /*fold00*/
{
struct net_device *dev = (struct net_device *)data;
lmc_softc_t *sc = dev_to_sc(dev);
int link_status;
u32 ticks;
unsigned long flags;
lmc_trace(dev, "lmc_watchdog in");
spin_lock_irqsave(&sc->lmc_lock, flags);
if(sc->check != 0xBEAFCAFE){
printk("LMC: Corrupt net_device struct, breaking out\n");
spin_unlock_irqrestore(&sc->lmc_lock, flags);
return;
}
/* Make sure the tx jabber and rx watchdog are off,
* and the transmit and receive processes are running.
*/
LMC_CSR_WRITE (sc, csr_15, 0x00000011);
sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN;
LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
if (sc->lmc_ok == 0)
goto kick_timer;
LMC_EVENT_LOG(LMC_EVENT_WATCHDOG, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
/* --- begin time out check -----------------------------------
* check for a transmit interrupt timeout
* Has the packet xmt vs xmt serviced threshold been exceeded */
if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
sc->tx_TimeoutInd == 0)
{
/* wait for the watchdog to come around again */
sc->tx_TimeoutInd = 1;
}
else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
sc->tx_TimeoutInd)
{
LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);
sc->tx_TimeoutDisplay = 1;
sc->extra_stats.tx_TimeoutCnt++;
/* DEC chip is stuck, hit it with a RESET!!!! */
lmc_running_reset (dev);
/* look at receive & transmit process state to make sure they are running */
LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
/* look at: DSR - 02 for Reg 16
* CTS - 08
* DCD - 10
* RI - 20
* for Reg 17
*/
LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg (sc, 0, 16), lmc_mii_readreg (sc, 0, 17));
/* reset the transmit timeout detection flag */
sc->tx_TimeoutInd = 0;
sc->lastlmc_taint_tx = sc->lmc_taint_tx;
sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
} else {
sc->tx_TimeoutInd = 0;
sc->lastlmc_taint_tx = sc->lmc_taint_tx;
sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
}
/* --- end time out check ----------------------------------- */
link_status = sc->lmc_media->get_link_status (sc);
/*
* hardware level link lost, but the interface is marked as up.
* Mark it as down.
*/
if ((link_status == 0) && (sc->last_link_status != 0)) {
printk(KERN_WARNING "%s: hardware/physical link down\n", dev->name);
sc->last_link_status = 0;
/* lmc_reset (sc); Why reset??? The link can go down ok */
/* Inform the world that link has been lost */
netif_carrier_off(dev);
}
/*
* hardware link is up, but the interface is marked as down.
* Bring it back up again.
*/
if (link_status != 0 && sc->last_link_status == 0) {
printk(KERN_WARNING "%s: hardware/physical link up\n", dev->name);
sc->last_link_status = 1;
/* lmc_reset (sc); Again why reset??? */
netif_carrier_on(dev);
}
/* Call media specific watchdog functions */
sc->lmc_media->watchdog(sc);
/*
* Poke the transmitter to make sure it
* never stops, even if we run out of mem
*/
LMC_CSR_WRITE(sc, csr_rxpoll, 0);
/*
* Check for code that failed
* and try and fix it as appropriate
*/
if(sc->failed_ring == 1){
/*
* Failed to setup the recv/xmit rin
* Try again
*/
sc->failed_ring = 0;
lmc_softreset(sc);
}
if(sc->failed_recv_alloc == 1){
/*
* We failed to alloc mem in the
* interrupt handler, go through the rings
* and rebuild them
*/
sc->failed_recv_alloc = 0;
lmc_softreset(sc);
}
/*
* remember the timer value
*/
kick_timer:
ticks = LMC_CSR_READ (sc, csr_gp_timer);
LMC_CSR_WRITE (sc, csr_gp_timer, 0xffffffffUL);
sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff);
/*
* restart this timer.
*/
sc->timer.expires = jiffies + (HZ);
add_timer (&sc->timer);
spin_unlock_irqrestore(&sc->lmc_lock, flags);
lmc_trace(dev, "lmc_watchdog out");
}
static int lmc_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
return 0;
return -EINVAL;
}
static const struct net_device_ops lmc_ops = {
.ndo_open = lmc_open,
.ndo_stop = lmc_close,
.ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = lmc_ioctl,
.ndo_tx_timeout = lmc_driver_timeout,
.ndo_get_stats = lmc_get_stats,
};
static int __devinit lmc_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
lmc_softc_t *sc;
struct net_device *dev;
u16 subdevice;
u16 AdapModelNum;
int err;
static int cards_found;
/* lmc_trace(dev, "lmc_init_one in"); */
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
return err;
}
err = pci_request_regions(pdev, "lmc");
if (err) {
printk(KERN_ERR "lmc: pci_request_region failed\n");
goto err_req_io;
}
/*
* Allocate our own device structure
*/
sc = kzalloc(sizeof(lmc_softc_t), GFP_KERNEL);
if (!sc) {
err = -ENOMEM;
goto err_kzalloc;
}
dev = alloc_hdlcdev(sc);
if (!dev) {
printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
goto err_hdlcdev;
}
dev->type = ARPHRD_HDLC;
dev_to_hdlc(dev)->xmit = lmc_start_xmit;
dev_to_hdlc(dev)->attach = lmc_attach;
dev->netdev_ops = &lmc_ops;
dev->watchdog_timeo = HZ; /* 1 second */
dev->tx_queue_len = 100;
sc->lmc_device = dev;
sc->name = dev->name;
sc->if_type = LMC_PPP;
sc->check = 0xBEAFCAFE;
dev->base_addr = pci_resource_start(pdev, 0);
dev->irq = pdev->irq;
pci_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
/*
* This will get the protocol layer ready and do any 1 time init's
* Must have a valid sc and dev structure
*/
lmc_proto_attach(sc);
/* Init the spin lock so can call it latter */
spin_lock_init(&sc->lmc_lock);
pci_set_master(pdev);
printk(KERN_INFO "%s: detected at %lx, irq %d\n", dev->name,
dev->base_addr, dev->irq);
err = register_hdlc_device(dev);
if (err) {
printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);
free_netdev(dev);
goto err_hdlcdev;
}
sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
/*
*
* Check either the subvendor or the subdevice, some systems reverse
* the setting in the bois, seems to be version and arch dependent?
* Fix the error, exchange the two values
*/
if ((subdevice = pdev->subsystem_device) == PCI_VENDOR_ID_LMC)
subdevice = pdev->subsystem_vendor;
switch (subdevice) {
case PCI_DEVICE_ID_LMC_HSSI:
printk(KERN_INFO "%s: LMC HSSI\n", dev->name);
sc->lmc_cardtype = LMC_CARDTYPE_HSSI;
sc->lmc_media = &lmc_hssi_media;
break;
case PCI_DEVICE_ID_LMC_DS3:
printk(KERN_INFO "%s: LMC DS3\n", dev->name);
sc->lmc_cardtype = LMC_CARDTYPE_DS3;
sc->lmc_media = &lmc_ds3_media;
break;
case PCI_DEVICE_ID_LMC_SSI:
printk(KERN_INFO "%s: LMC SSI\n", dev->name);
sc->lmc_cardtype = LMC_CARDTYPE_SSI;
sc->lmc_media = &lmc_ssi_media;
break;
case PCI_DEVICE_ID_LMC_T1:
printk(KERN_INFO "%s: LMC T1\n", dev->name);
sc->lmc_cardtype = LMC_CARDTYPE_T1;
sc->lmc_media = &lmc_t1_media;
break;
default:
printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name);
break;
}
lmc_initcsrs (sc, dev->base_addr, 8);
lmc_gpio_mkinput (sc, 0xff);
sc->lmc_gpio = 0; /* drive no signals yet */
sc->lmc_media->defaults (sc);
sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
/* verify that the PCI Sub System ID matches the Adapter Model number
* from the MII register
*/
AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;
if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */
subdevice != PCI_DEVICE_ID_LMC_T1) &&
(AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */
subdevice != PCI_DEVICE_ID_LMC_SSI) &&
(AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */
subdevice != PCI_DEVICE_ID_LMC_DS3) &&
(AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */
subdevice != PCI_DEVICE_ID_LMC_HSSI))
printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI"
" Subsystem ID = 0x%04x\n",
dev->name, AdapModelNum, subdevice);
/*
* reset clock
*/
LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);
sc->board_idx = cards_found++;
sc->extra_stats.check = STATCHECK;
sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
sc->lmc_ok = 0;
sc->last_link_status = 0;
lmc_trace(dev, "lmc_init_one out");
return 0;
err_hdlcdev:
pci_set_drvdata(pdev, NULL);
kfree(sc);
err_kzalloc:
pci_release_regions(pdev);
err_req_io:
pci_disable_device(pdev);
return err;
}
/*
* Called from pci when removing module.
*/
static void __devexit lmc_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (dev) {
printk(KERN_DEBUG "%s: removing...\n", dev->name);
unregister_hdlc_device(dev);
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
/* After this is called, packets can be sent.
* Does not initialize the addresses
*/
static int lmc_open(struct net_device *dev)
{
lmc_softc_t *sc = dev_to_sc(dev);
int err;
lmc_trace(dev, "lmc_open in");
lmc_led_on(sc, LMC_DS3_LED0);
lmc_dec_reset(sc);
lmc_reset(sc);
LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0);
LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16),
lmc_mii_readreg(sc, 0, 17));
if (sc->lmc_ok){
lmc_trace(dev, "lmc_open lmc_ok out");
return 0;
}
lmc_softreset (sc);
/* Since we have to use PCI bus, this should work on x86,alpha,ppc */
if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){
printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
lmc_trace(dev, "lmc_open irq failed out");
return -EAGAIN;
}
sc->got_irq = 1;
/* Assert Terminal Active */
sc->lmc_miireg16 |= LMC_MII16_LED_ALL;
sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
/*
* reset to last state.
*/
sc->lmc_media->set_status (sc, NULL);
/* setup default bits to be used in tulip_desc_t transmit descriptor
* -baz */
sc->TxDescriptControlInit = (
LMC_TDES_INTERRUPT_ON_COMPLETION
| LMC_TDES_FIRST_SEGMENT
| LMC_TDES_LAST_SEGMENT
| LMC_TDES_SECOND_ADDR_CHAINED
| LMC_TDES_DISABLE_PADDING
);
if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) {
/* disable 32 bit CRC generated by ASIC */
sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
}
sc->lmc_media->set_crc_length(sc, sc->ictl.crc_length);
/* Acknoledge the Terminal Active and light LEDs */
/* dev->flags |= IFF_UP; */
if ((err = lmc_proto_open(sc)) != 0)
return err;
netif_start_queue(dev);
sc->extra_stats.tx_tbusy0++;
/*
* select what interrupts we want to get
*/
sc->lmc_intrmask = 0;
/* Should be using the default interrupt mask defined in the .h file. */
sc->lmc_intrmask |= (TULIP_STS_NORMALINTR
| TULIP_STS_RXINTR
| TULIP_STS_TXINTR
| TULIP_STS_ABNRMLINTR
| TULIP_STS_SYSERROR
| TULIP_STS_TXSTOPPED
| TULIP_STS_TXUNDERFLOW
| TULIP_STS_RXSTOPPED
| TULIP_STS_RXNOBUF
);
LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
sc->lmc_cmdmode |= TULIP_CMD_TXRUN;
sc->lmc_cmdmode |= TULIP_CMD_RXRUN;
LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
sc->lmc_ok = 1; /* Run watchdog */
/*
* Set the if up now - pfb
*/
sc->last_link_status = 1;
/*
* Setup a timer for the watchdog on probe, and start it running.
* Since lmc_ok == 0, it will be a NOP for now.
*/
init_timer (&sc->timer);
sc->timer.expires = jiffies + HZ;
sc->timer.data = (unsigned long) dev;
sc->timer.function = lmc_watchdog;
add_timer (&sc->timer);
lmc_trace(dev, "lmc_open out");
return 0;
}
/* Total reset to compensate for the AdTran DSU doing bad things
* under heavy load
*/
static void lmc_running_reset (struct net_device *dev) /*fold00*/
{
lmc_softc_t *sc = dev_to_sc(dev);
lmc_trace(dev, "lmc_runnig_reset in");
/* stop interrupts */
/* Clear the interrupt mask */
LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
lmc_dec_reset (sc);
lmc_reset (sc);
lmc_softreset (sc);
/* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
sc->lmc_media->set_link_status (sc, 1);
sc->lmc_media->set_status (sc, NULL);
netif_wake_queue(dev);
sc->lmc_txfull = 0;
sc->extra_stats.tx_tbusy0++;
sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;
LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
lmc_trace(dev, "lmc_runnin_reset_out");
}
/* This is what is called when you ifconfig down a device.
* This disables the timer for the watchdog and keepalives,
* and disables the irq for dev.
*/
static int lmc_close(struct net_device *dev)
{
/* not calling release_region() as we should */
lmc_softc_t *sc = dev_to_sc(dev);
lmc_trace(dev, "lmc_close in");
sc->lmc_ok = 0;
sc->lmc_media->set_link_status (sc, 0);
del_timer (&sc->timer);
lmc_proto_close(sc);
lmc_ifdown (dev);
lmc_trace(dev, "lmc_close out");
return 0;
}
/* Ends the transfer of packets */
/* When the interface goes down, this is called */
static int lmc_ifdown (struct net_device *dev) /*fold00*/
{
lmc_softc_t *sc = dev_to_sc(dev);
u32 csr6;
int i;
lmc_trace(dev, "lmc_ifdown in");
/* Don't let anything else go on right now */
// dev->start = 0;
netif_stop_queue(dev);
sc->extra_stats.tx_tbusy1++;
/* stop interrupts */
/* Clear the interrupt mask */
LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
/* Stop Tx and Rx on the chip */
csr6 = LMC_CSR_READ (sc, csr_command);
csr6 &= ~LMC_DEC_ST; /* Turn off the Transmission bit */
csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */
LMC_CSR_WRITE (sc, csr_command, csr6);
sc->lmc_device->stats.rx_missed_errors +=
LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
/* release the interrupt */
if(sc->got_irq == 1){
free_irq (dev->irq, dev);
sc->got_irq = 0;
}
/* free skbuffs in the Rx queue */
for (i = 0; i < LMC_RXDESCS; i++)
{
struct sk_buff *skb = sc->lmc_rxq[i];
sc->lmc_rxq[i] = NULL;
sc->lmc_rxring[i].status = 0;
sc->lmc_rxring[i].length = 0;
sc->lmc_rxring[i].buffer1 = 0xDEADBEEF;
if (skb != NULL)
dev_kfree_skb(skb);
sc->lmc_rxq[i] = NULL;
}
for (i = 0; i < LMC_TXDESCS; i++)
{
if (sc->lmc_txq[i] != NULL)
dev_kfree_skb(sc->lmc_txq[i]);
sc->lmc_txq[i] = NULL;
}
lmc_led_off (sc, LMC_MII16_LED_ALL);
netif_wake_queue(dev);
sc->extra_stats.tx_tbusy0++;
lmc_trace(dev, "lmc_ifdown out");
return 0;
}
/* Interrupt handling routine. This will take an incoming packet, or clean
* up after a trasmit.
*/
static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
{
struct net_device *dev = (struct net_device *) dev_instance;
lmc_softc_t *sc = dev_to_sc(dev);
u32 csr;
int i;
s32 stat;
unsigned int badtx;
u32 firstcsr;
int max_work = LMC_RXDESCS;
int handled = 0;
lmc_trace(dev, "lmc_interrupt in");
spin_lock(&sc->lmc_lock);
/*
* Read the csr to find what interrupts we have (if any)
*/
csr = LMC_CSR_READ (sc, csr_status);
/*
* Make sure this is our interrupt
*/
if ( ! (csr & sc->lmc_intrmask)) {
goto lmc_int_fail_out;
}
firstcsr = csr;
/* always go through this loop at least once */
while (csr & sc->lmc_intrmask) {
handled = 1;
/*
* Clear interrupt bits, we handle all case below
*/
LMC_CSR_WRITE (sc, csr_status, csr);
/*
* One of
* - Transmit process timed out CSR5<1>
* - Transmit jabber timeout CSR5<3>
* - Transmit underflow CSR5<5>
* - Transmit Receiver buffer unavailable CSR5<7>
* - Receive process stopped CSR5<8>
* - Receive watchdog timeout CSR5<9>
* - Early transmit interrupt CSR5<10>
*
* Is this really right? Should we do a running reset for jabber?
* (being a WAN card and all)
*/
if (csr & TULIP_STS_ABNRMLINTR){
lmc_running_reset (dev);
break;
}
if (csr & TULIP_STS_RXINTR){
lmc_trace(dev, "rx interrupt");
lmc_rx (dev);
}
if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) {
int n_compl = 0 ;
/* reset the transmit timeout detection flag -baz */
sc->extra_stats.tx_NoCompleteCnt = 0;
badtx = sc->lmc_taint_tx;
i = badtx % LMC_TXDESCS;
while ((badtx < sc->lmc_next_tx)) {
stat = sc->lmc_txring[i].status;
LMC_EVENT_LOG (LMC_EVENT_XMTINT, stat,
sc->lmc_txring[i].length);
/*
* If bit 31 is 1 the tulip owns it break out of the loop
*/
if (stat & 0x80000000)
break;
n_compl++ ; /* i.e., have an empty slot in ring */
/*
* If we have no skbuff or have cleared it
* Already continue to the next buffer
*/
if (sc->lmc_txq[i] == NULL)
continue;
/*
* Check the total error summary to look for any errors
*/
if (stat & 0x8000) {
sc->lmc_device->stats.tx_errors++;
if (stat & 0x4104)
sc->lmc_device->stats.tx_aborted_errors++;
if (stat & 0x0C00)
sc->lmc_device->stats.tx_carrier_errors++;
if (stat & 0x0200)
sc->lmc_device->stats.tx_window_errors++;
if (stat & 0x0002)
sc->lmc_device->stats.tx_fifo_errors++;
} else {
sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
sc->lmc_device->stats.tx_packets++;
}
// dev_kfree_skb(sc->lmc_txq[i]);
dev_kfree_skb_irq(sc->lmc_txq[i]);
sc->lmc_txq[i] = NULL;
badtx++;
i = badtx % LMC_TXDESCS;
}
if (sc->lmc_next_tx - badtx > LMC_TXDESCS)
{
printk ("%s: out of sync pointer\n", dev->name);
badtx += LMC_TXDESCS;
}
LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);
sc->lmc_txfull = 0;
netif_wake_queue(dev);
sc->extra_stats.tx_tbusy0++;
#ifdef DEBUG
sc->extra_stats.dirtyTx = badtx;
sc->extra_stats.lmc_next_tx = sc->lmc_next_tx;
sc->extra_stats.lmc_txfull = sc->lmc_txfull;
#endif
sc->lmc_taint_tx = badtx;
/*
* Why was there a break here???
*/
} /* end handle transmit interrupt */
if (csr & TULIP_STS_SYSERROR) {
u32 error;
printk (KERN_WARNING "%s: system bus error csr: %#8.8x\n", dev->name, csr);
error = csr>>23 & 0x7;
switch(error){
case 0x000:
printk(KERN_WARNING "%s: Parity Fault (bad)\n", dev->name);
break;
case 0x001:
printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
break;
case 0x010:
printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
break;
default:
printk(KERN_WARNING "%s: This bus error code was supposed to be reserved!\n", dev->name);
}
lmc_dec_reset (sc);
lmc_reset (sc);
LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
LMC_EVENT_LOG(LMC_EVENT_RESET2,
lmc_mii_readreg (sc, 0, 16),
lmc_mii_readreg (sc, 0, 17));
}
if(max_work-- <= 0)
break;
/*
* Get current csr status to make sure
* we've cleared all interrupts
*/
csr = LMC_CSR_READ (sc, csr_status);
} /* end interrupt loop */
LMC_EVENT_LOG(LMC_EVENT_INT, firstcsr, csr);
lmc_int_fail_out:
spin_unlock(&sc->lmc_lock);
lmc_trace(dev, "lmc_interrupt out");
return IRQ_RETVAL(handled);
}
static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
lmc_softc_t *sc = dev_to_sc(dev);
u32 flag;
int entry;
unsigned long flags;
lmc_trace(dev, "lmc_start_xmit in");
spin_lock_irqsave(&sc->lmc_lock, flags);
/* normal path, tbusy known to be zero */
entry = sc->lmc_next_tx % LMC_TXDESCS;
sc->lmc_txq[entry] = skb;
sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data);
LMC_CONSOLE_LOG("xmit", skb->data, skb->len);
#ifndef GCOM
/* If the queue is less than half full, don't interrupt */
if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS / 2)
{
/* Do not interrupt on completion of this packet */
flag = 0x60000000;
netif_wake_queue(dev);
}
else if (sc->lmc_next_tx - sc->lmc_taint_tx == LMC_TXDESCS / 2)
{
/* This generates an interrupt on completion of this packet */
flag = 0xe0000000;
netif_wake_queue(dev);
}
else if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS - 1)
{
/* Do not interrupt on completion of this packet */
flag = 0x60000000;
netif_wake_queue(dev);
}
else
{
/* This generates an interrupt on completion of this packet */
flag = 0xe0000000;
sc->lmc_txfull = 1;
netif_stop_queue(dev);
}
#else
flag = LMC_TDES_INTERRUPT_ON_COMPLETION;
if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)
{ /* ring full, go busy */
sc->lmc_txfull = 1;
netif_stop_queue(dev);
sc->extra_stats.tx_tbusy1++;
LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);
}
#endif
if (entry == LMC_TXDESCS - 1) /* last descriptor in ring */
flag |= LMC_TDES_END_OF_RING; /* flag as such for Tulip */
/* don't pad small packets either */
flag = sc->lmc_txring[entry].length = (skb->len) | flag |
sc->TxDescriptControlInit;
/* set the transmit timeout flag to be checked in
* the watchdog timer handler. -baz
*/
sc->extra_stats.tx_NoCompleteCnt++;
sc->lmc_next_tx++;
/* give ownership to the chip */
LMC_EVENT_LOG(LMC_EVENT_XMT, flag, entry);
sc->lmc_txring[entry].status = 0x80000000;
/* send now! */
LMC_CSR_WRITE (sc, csr_txpoll, 0);
spin_unlock_irqrestore(&sc->lmc_lock, flags);
lmc_trace(dev, "lmc_start_xmit_out");
return NETDEV_TX_OK;
}
static int lmc_rx(struct net_device *dev)
{
lmc_softc_t *sc = dev_to_sc(dev);
int i;
int rx_work_limit = LMC_RXDESCS;
unsigned int next_rx;
int rxIntLoopCnt; /* debug -baz */
int localLengthErrCnt = 0;
long stat;
struct sk_buff *skb, *nsb;
u16 len;
lmc_trace(dev, "lmc_rx in");
lmc_led_on(sc, LMC_DS3_LED3);
rxIntLoopCnt = 0; /* debug -baz */
i = sc->lmc_next_rx % LMC_RXDESCS;
next_rx = sc->lmc_next_rx;
while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4)
{
rxIntLoopCnt++; /* debug -baz */
len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);
if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */
if ((stat & 0x0000ffff) != 0x7fff) {
/* Oversized frame */
sc->lmc_device->stats.rx_length_errors++;
goto skip_packet;
}
}
if (stat & 0x00000008) { /* Catch a dribbling bit error */
sc->lmc_device->stats.rx_errors++;
sc->lmc_device->stats.rx_frame_errors++;
goto skip_packet;
}
if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */
sc->lmc_device->stats.rx_errors++;
sc->lmc_device->stats.rx_crc_errors++;
goto skip_packet;
}
if (len > LMC_PKT_BUF_SZ) {
sc->lmc_device->stats.rx_length_errors++;
localLengthErrCnt++;
goto skip_packet;
}
if (len < sc->lmc_crcSize + 2) {
sc->lmc_device->stats.rx_length_errors++;
sc->extra_stats.rx_SmallPktCnt++;
localLengthErrCnt++;
goto skip_packet;
}
if(stat & 0x00004000){
printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);
}
len -= sc->lmc_crcSize;
skb = sc->lmc_rxq[i];
/*
* We ran out of memory at some point
* just allocate an skb buff and continue.
*/
if (!skb) {
nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
if (nsb) {
sc->lmc_rxq[i] = nsb;
nsb->dev = dev;
sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
}
sc->failed_recv_alloc = 1;
goto skip_packet;
}
sc->lmc_device->stats.rx_packets++;
sc->lmc_device->stats.rx_bytes += len;
LMC_CONSOLE_LOG("recv", skb->data, len);
/*
* I'm not sure of the sanity of this
* Packets could be arriving at a constant
* 44.210mbits/sec and we're going to copy
* them into a new buffer??
*/
if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */
/*
* If it's a large packet don't copy it just hand it up
*/
give_it_anyways:
sc->lmc_rxq[i] = NULL;
sc->lmc_rxring[i].buffer1 = 0x0;
skb_put (skb, len);
skb->protocol = lmc_proto_type(sc, skb);
skb_reset_mac_header(skb);
/* skb_reset_network_header(skb); */
skb->dev = dev;
lmc_proto_netif(sc, skb);
/*
* This skb will be destroyed by the upper layers, make a new one
*/
nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
if (nsb) {
sc->lmc_rxq[i] = nsb;
nsb->dev = dev;
sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
/* Transferred to 21140 below */
}
else {
/*
* We've run out of memory, stop trying to allocate
* memory and exit the interrupt handler
*
* The chip may run out of receivers and stop
* in which care we'll try to allocate the buffer
* again. (once a second)
*/
sc->extra_stats.rx_BuffAllocErr++;
LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
sc->failed_recv_alloc = 1;
goto skip_out_of_mem;
}
}
else {
nsb = dev_alloc_skb(len);
if(!nsb) {
goto give_it_anyways;
}
skb_copy_from_linear_data(skb, skb_put(nsb, len), len);
nsb->protocol = lmc_proto_type(sc, nsb);
skb_reset_mac_header(nsb);
/* skb_reset_network_header(nsb); */
nsb->dev = dev;
lmc_proto_netif(sc, nsb);
}
skip_packet:
LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4;
sc->lmc_next_rx++;
i = sc->lmc_next_rx % LMC_RXDESCS;
rx_work_limit--;
if (rx_work_limit < 0)
break;
}
/* detect condition for LMC1000 where DSU cable attaches and fills
* descriptors with bogus packets
*
if (localLengthErrCnt > LMC_RXDESCS - 3) {
sc->extra_stats.rx_BadPktSurgeCnt++;
LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
sc->extra_stats.rx_BadPktSurgeCnt);
} */
/* save max count of receive descriptors serviced */
if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt)
sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
#ifdef DEBUG
if (rxIntLoopCnt == 0)
{
for (i = 0; i < LMC_RXDESCS; i++)
{
if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT)
!= DESC_OWNED_BY_DC21X4)
{
rxIntLoopCnt++;
}
}
LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0);
}
#endif
lmc_led_off(sc, LMC_DS3_LED3);
skip_out_of_mem:
lmc_trace(dev, "lmc_rx out");
return 0;
}
static struct net_device_stats *lmc_get_stats(struct net_device *dev)
{
lmc_softc_t *sc = dev_to_sc(dev);
unsigned long flags;
lmc_trace(dev, "lmc_get_stats in");
spin_lock_irqsave(&sc->lmc_lock, flags);
sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
spin_unlock_irqrestore(&sc->lmc_lock, flags);
lmc_trace(dev, "lmc_get_stats out");
return &sc->lmc_device->stats;
}
static struct pci_driver lmc_driver = {
.name = "lmc",
.id_table = lmc_pci_tbl,
.probe = lmc_init_one,
.remove = __devexit_p(lmc_remove_one),
};
static int __init init_lmc(void)
{
return pci_register_driver(&lmc_driver);
}
static void __exit exit_lmc(void)
{
pci_unregister_driver(&lmc_driver);
}
module_init(init_lmc);
module_exit(exit_lmc);
unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
{
int i;
int command = (0xf6 << 10) | (devaddr << 5) | regno;
int retval = 0;
lmc_trace(sc->lmc_device, "lmc_mii_readreg in");
LMC_MII_SYNC (sc);
lmc_trace(sc->lmc_device, "lmc_mii_readreg: done sync");
for (i = 15; i >= 0; i--)
{
int dataval = (command & (1 << i)) ? 0x20000 : 0;
LMC_CSR_WRITE (sc, csr_9, dataval);
lmc_delay ();
/* __SLOW_DOWN_IO; */
LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000);
lmc_delay ();
/* __SLOW_DOWN_IO; */
}
lmc_trace(sc->lmc_device, "lmc_mii_readreg: done1");
for (i = 19; i > 0; i--)
{
LMC_CSR_WRITE (sc, csr_9, 0x40000);
lmc_delay ();
/* __SLOW_DOWN_IO; */
retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0);
LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000);
lmc_delay ();
/* __SLOW_DOWN_IO; */
}
lmc_trace(sc->lmc_device, "lmc_mii_readreg out");
return (retval >> 1) & 0xffff;
}
void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/
{
int i = 32;
int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data;
lmc_trace(sc->lmc_device, "lmc_mii_writereg in");
LMC_MII_SYNC (sc);
i = 31;
while (i >= 0)
{
int datav;
if (command & (1 << i))
datav = 0x20000;
else
datav = 0x00000;
LMC_CSR_WRITE (sc, csr_9, datav);
lmc_delay ();
/* __SLOW_DOWN_IO; */
LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000));
lmc_delay ();
/* __SLOW_DOWN_IO; */
i--;
}
i = 2;
while (i > 0)
{
LMC_CSR_WRITE (sc, csr_9, 0x40000);
lmc_delay ();
/* __SLOW_DOWN_IO; */
LMC_CSR_WRITE (sc, csr_9, 0x50000);
lmc_delay ();
/* __SLOW_DOWN_IO; */
i--;
}
lmc_trace(sc->lmc_device, "lmc_mii_writereg out");
}
static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
{
int i;
lmc_trace(sc->lmc_device, "lmc_softreset in");
/* Initialize the receive rings and buffers. */
sc->lmc_txfull = 0;
sc->lmc_next_rx = 0;
sc->lmc_next_tx = 0;
sc->lmc_taint_rx = 0;
sc->lmc_taint_tx = 0;
/*
* Setup each one of the receiver buffers
* allocate an skbuff for each one, setup the descriptor table
* and point each buffer at the next one
*/
for (i = 0; i < LMC_RXDESCS; i++)
{
struct sk_buff *skb;
if (sc->lmc_rxq[i] == NULL)
{
skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
if(skb == NULL){
printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name);
sc->failed_ring = 1;
break;
}
else{
sc->lmc_rxq[i] = skb;
}
}
else
{
skb = sc->lmc_rxq[i];
}
skb->dev = sc->lmc_device;
/* owned by 21140 */
sc->lmc_rxring[i].status = 0x80000000;
/* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
sc->lmc_rxring[i].length = skb_tailroom(skb);
/* use to be tail which is dumb since you're thinking why write
* to the end of the packj,et but since there's nothing there tail == data
*/
sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data);
/* This is fair since the structure is static and we have the next address */
sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]);
}
/*
* Sets end of ring
*/
if (i != 0) {
sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */
sc->lmc_rxring[i - 1].buffer2 = virt_to_bus(&sc->lmc_rxring[0]); /* Point back to the start */
}
LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */
/* Initialize the transmit rings and buffers */
for (i = 0; i < LMC_TXDESCS; i++)
{
if (sc->lmc_txq[i] != NULL){ /* have buffer */
dev_kfree_skb(sc->lmc_txq[i]); /* free it */
sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */
}
sc->lmc_txq[i] = NULL;
sc->lmc_txring[i].status = 0x00000000;
sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]);
}
sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]);
LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring));
lmc_trace(sc->lmc_device, "lmc_softreset out");
}
void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/
{
lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in");
sc->lmc_gpio_io &= ~bits;
LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out");
}
void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/
{
lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in");
sc->lmc_gpio_io |= bits;
LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out");
}
void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/
{
lmc_trace(sc->lmc_device, "lmc_led_on in");
if((~sc->lmc_miireg16) & led){ /* Already on! */
lmc_trace(sc->lmc_device, "lmc_led_on aon out");
return;
}
sc->lmc_miireg16 &= ~led;
lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
lmc_trace(sc->lmc_device, "lmc_led_on out");
}
void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/
{
lmc_trace(sc->lmc_device, "lmc_led_off in");
if(sc->lmc_miireg16 & led){ /* Already set don't do anything */
lmc_trace(sc->lmc_device, "lmc_led_off aoff out");
return;
}
sc->lmc_miireg16 |= led;
lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
lmc_trace(sc->lmc_device, "lmc_led_off out");
}
static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
{
lmc_trace(sc->lmc_device, "lmc_reset in");
sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
/*
* make some of the GPIO pins be outputs
*/
lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
/*
* RESET low to force state reset. This also forces
* the transmitter clock to be internal, but we expect to reset
* that later anyway.
*/
sc->lmc_gpio &= ~(LMC_GEP_RESET);
LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
/*
* hold for more than 10 microseconds
*/
udelay(50);
/*
* stop driving Xilinx-related signals
*/
lmc_gpio_mkinput(sc, LMC_GEP_RESET);
/*
* Call media specific init routine
*/
sc->lmc_media->init(sc);
sc->extra_stats.resetCount++;
lmc_trace(sc->lmc_device, "lmc_reset out");
}
static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
{
u32 val;
lmc_trace(sc->lmc_device, "lmc_dec_reset in");
/*
* disable all interrupts
*/
sc->lmc_intrmask = 0;
LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask);
/*
* Reset the chip with a software reset command.
* Wait 10 microseconds (actually 50 PCI cycles but at
* 33MHz that comes to two microseconds but wait a
* bit longer anyways)
*/
LMC_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET);
udelay(25);
#ifdef __sparc__
sc->lmc_busmode = LMC_CSR_READ(sc, csr_busmode);
sc->lmc_busmode = 0x00100000;
sc->lmc_busmode &= ~TULIP_BUSMODE_SWRESET;
LMC_CSR_WRITE(sc, csr_busmode, sc->lmc_busmode);
#endif
sc->lmc_cmdmode = LMC_CSR_READ(sc, csr_command);
/*
* We want:
* no ethernet address in frames we write
* disable padding (txdesc, padding disable)
* ignore runt frames (rdes0 bit 15)
* no receiver watchdog or transmitter jabber timer
* (csr15 bit 0,14 == 1)
* if using 16-bit CRC, turn off CRC (trans desc, crc disable)
*/
sc->lmc_cmdmode |= ( TULIP_CMD_PROMISCUOUS
| TULIP_CMD_FULLDUPLEX
| TULIP_CMD_PASSBADPKT
| TULIP_CMD_NOHEARTBEAT
| TULIP_CMD_PORTSELECT
| TULIP_CMD_RECEIVEALL
| TULIP_CMD_MUSTBEONE
);
sc->lmc_cmdmode &= ~( TULIP_CMD_OPERMODE
| TULIP_CMD_THRESHOLDCTL
| TULIP_CMD_STOREFWD
| TULIP_CMD_TXTHRSHLDCTL
);
LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
/*
* disable receiver watchdog and transmit jabber
*/
val = LMC_CSR_READ(sc, csr_sia_general);
val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE);
LMC_CSR_WRITE(sc, csr_sia_general, val);
lmc_trace(sc->lmc_device, "lmc_dec_reset out");
}
static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/
size_t csr_size)
{
lmc_trace(sc->lmc_device, "lmc_initcsrs in");
sc->lmc_csrs.csr_busmode = csr_base + 0 * csr_size;
sc->lmc_csrs.csr_txpoll = csr_base + 1 * csr_size;
sc->lmc_csrs.csr_rxpoll = csr_base + 2 * csr_size;
sc->lmc_csrs.csr_rxlist = csr_base + 3 * csr_size;
sc->lmc_csrs.csr_txlist = csr_base + 4 * csr_size;
sc->lmc_csrs.csr_status = csr_base + 5 * csr_size;
sc->lmc_csrs.csr_command = csr_base + 6 * csr_size;
sc->lmc_csrs.csr_intr = csr_base + 7 * csr_size;
sc->lmc_csrs.csr_missed_frames = csr_base + 8 * csr_size;
sc->lmc_csrs.csr_9 = csr_base + 9 * csr_size;
sc->lmc_csrs.csr_10 = csr_base + 10 * csr_size;
sc->lmc_csrs.csr_11 = csr_base + 11 * csr_size;
sc->lmc_csrs.csr_12 = csr_base + 12 * csr_size;
sc->lmc_csrs.csr_13 = csr_base + 13 * csr_size;
sc->lmc_csrs.csr_14 = csr_base + 14 * csr_size;
sc->lmc_csrs.csr_15 = csr_base + 15 * csr_size;
lmc_trace(sc->lmc_device, "lmc_initcsrs out");
}
static void lmc_driver_timeout(struct net_device *dev)
{
lmc_softc_t *sc = dev_to_sc(dev);
u32 csr6;
unsigned long flags;
lmc_trace(dev, "lmc_driver_timeout in");
spin_lock_irqsave(&sc->lmc_lock, flags);
printk("%s: Xmitter busy|\n", dev->name);
sc->extra_stats.tx_tbusy_calls++;
if (jiffies - dev_trans_start(dev) < TX_TIMEOUT)
goto bug_out;
/*
* Chip seems to have locked up
* Reset it
* This whips out all our decriptor
* table and starts from scartch
*/
LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,
LMC_CSR_READ (sc, csr_status),
sc->extra_stats.tx_ProcTimeout);
lmc_running_reset (dev);
LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
LMC_EVENT_LOG(LMC_EVENT_RESET2,
lmc_mii_readreg (sc, 0, 16),
lmc_mii_readreg (sc, 0, 17));
/* restart the tx processes */
csr6 = LMC_CSR_READ (sc, csr_command);
LMC_CSR_WRITE (sc, csr_command, csr6 | 0x0002);
LMC_CSR_WRITE (sc, csr_command, csr6 | 0x2002);
/* immediate transmit */
LMC_CSR_WRITE (sc, csr_txpoll, 0);
sc->lmc_device->stats.tx_errors++;
sc->extra_stats.tx_ProcTimeout++; /* -baz */
dev->trans_start = jiffies; /* prevent tx timeout */
bug_out:
spin_unlock_irqrestore(&sc->lmc_lock, flags);
lmc_trace(dev, "lmc_driver_timout out");
}
| gpl-2.0 |
ngxson/android_kernel_sony_msm8x27 | drivers/staging/wlags49_h2/wl_netdev.c | 4799 | 61490 | /*******************************************************************************
* Agere Systems Inc.
* Wireless device driver for Linux (wlags49).
*
* Copyright (c) 1998-2003 Agere Systems Inc.
* All rights reserved.
* http://www.agere.com
*
* Initially developed by TriplePoint, Inc.
* http://www.triplepoint.com
*
*------------------------------------------------------------------------------
*
* This file contains handler functions registered with the net_device
* structure.
*
*------------------------------------------------------------------------------
*
* SOFTWARE LICENSE
*
* This software is provided subject to the following terms and conditions,
* which you should read carefully before using the software. Using this
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
* Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
* modifications, are permitted provided that the following conditions are met:
*
* . Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following Disclaimer as comments in the code as
* well as in the documentation and/or other materials provided with the
* distribution.
*
* . Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following Disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* . Neither the name of Agere Systems Inc. nor the names of the contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* Disclaimer
*
* THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
* RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
******************************************************************************/
/*******************************************************************************
* include files
******************************************************************************/
#include <wl_version.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
// #include <linux/sched.h>
// #include <linux/ptrace.h>
// #include <linux/slab.h>
// #include <linux/ctype.h>
// #include <linux/string.h>
//#include <linux/timer.h>
// #include <linux/interrupt.h>
// #include <linux/in.h>
// #include <linux/delay.h>
// #include <linux/skbuff.h>
// #include <asm/io.h>
// // #include <asm/bitops.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
// #include <linux/skbuff.h>
// #include <linux/if_arp.h>
// #include <linux/ioport.h>
#include <debug.h>
#include <hcf.h>
#include <dhf.h>
// #include <hcfdef.h>
#include <wl_if.h>
#include <wl_internal.h>
#include <wl_util.h>
#include <wl_priv.h>
#include <wl_main.h>
#include <wl_netdev.h>
#include <wl_wext.h>
#ifdef USE_PROFILE
#include <wl_profile.h>
#endif /* USE_PROFILE */
#ifdef BUS_PCMCIA
#include <wl_cs.h>
#endif /* BUS_PCMCIA */
#ifdef BUS_PCI
#include <wl_pci.h>
#endif /* BUS_PCI */
/*******************************************************************************
* global variables
******************************************************************************/
#if DBG
extern dbg_info_t *DbgInfo;
#endif /* DBG */
#if HCF_ENCAP
#define MTU_MAX (HCF_MAX_MSG - ETH_HLEN - 8)
#else
#define MTU_MAX (HCF_MAX_MSG - ETH_HLEN)
#endif
//static int mtu = MTU_MAX;
//MODULE_PARM(mtu, "i");
//MODULE_PARM_DESC(mtu, "MTU");
/*******************************************************************************
* macros
******************************************************************************/
#define BLOCK_INPUT(buf, len) \
desc->buf_addr = buf; \
desc->BUF_SIZE = len; \
status = hcf_rcv_msg(&(lp->hcfCtx), desc, 0)
#define BLOCK_INPUT_DMA(buf, len) memcpy( buf, desc_next->buf_addr, pktlen )
/*******************************************************************************
* function prototypes
******************************************************************************/
/*******************************************************************************
* wl_init()
*******************************************************************************
*
* DESCRIPTION:
*
* We never need to do anything when a "Wireless" device is "initialized"
* by the net software, because we only register already-found cards.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure
*
* RETURNS:
*
* 0 on success
* errno value otherwise
*
******************************************************************************/
int wl_init( struct net_device *dev )
{
// unsigned long flags;
// struct wl_private *lp = wl_priv(dev);
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_init" );
DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
/* Nothing to do, but grab the spinlock anyway just in case we ever need
this routine */
// wl_lock( lp, &flags );
// wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return 0;
} // wl_init
/*============================================================================*/
/*******************************************************************************
* wl_config()
*******************************************************************************
*
* DESCRIPTION:
*
* Implement the SIOCSIFMAP interface.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure
* map - a pointer to the device's ifmap structure
*
* RETURNS:
*
* 0 on success
* errno otherwise
*
******************************************************************************/
int wl_config( struct net_device *dev, struct ifmap *map )
{
DBG_FUNC( "wl_config" );
DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
DBG_PARAM( DbgInfo, "map", "0x%p", map );
/* The only thing we care about here is a port change. Since this not needed,
ignore the request. */
DBG_TRACE(DbgInfo, "%s: %s called.\n", dev->name, __func__);
DBG_LEAVE( DbgInfo );
return 0;
} // wl_config
/*============================================================================*/
/*******************************************************************************
* wl_stats()
*******************************************************************************
*
* DESCRIPTION:
*
* Return the current device statistics.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure
*
* RETURNS:
*
* a pointer to a net_device_stats structure containing the network
* statistics.
*
******************************************************************************/
struct net_device_stats *wl_stats( struct net_device *dev )
{
#ifdef USE_WDS
int count;
#endif /* USE_WDS */
unsigned long flags;
struct net_device_stats *pStats;
struct wl_private *lp = wl_priv(dev);
/*------------------------------------------------------------------------*/
//DBG_FUNC( "wl_stats" );
//DBG_ENTER( DbgInfo );
//DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
pStats = NULL;
wl_lock( lp, &flags );
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
wl_unlock( lp, &flags );
//DBG_LEAVE( DbgInfo );
return NULL;
}
#endif /* USE_RTS */
/* Return the statistics for the appropriate device */
#ifdef USE_WDS
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( dev == lp->wds_port[count].dev ) {
pStats = &( lp->wds_port[count].stats );
}
}
#endif /* USE_WDS */
/* If pStats is still NULL, then the device is not a WDS port */
if( pStats == NULL ) {
pStats = &( lp->stats );
}
wl_unlock( lp, &flags );
//DBG_LEAVE( DbgInfo );
return pStats;
} // wl_stats
/*============================================================================*/
/*******************************************************************************
* wl_open()
*******************************************************************************
*
* DESCRIPTION:
*
* Open the device.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure
*
* RETURNS:
*
* 0 on success
* errno otherwise
*
******************************************************************************/
int wl_open(struct net_device *dev)
{
int status = HCF_SUCCESS;
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_open" );
DBG_ENTER( DbgInfo );
wl_lock( lp, &flags );
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping device open, in RTS mode\n" );
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return -EIO;
}
#endif /* USE_RTS */
#ifdef USE_PROFILE
parse_config( dev );
#endif
if( lp->portState == WVLAN_PORT_STATE_DISABLED ) {
DBG_TRACE( DbgInfo, "Enabling Port 0\n" );
status = wl_enable( lp );
if( status != HCF_SUCCESS ) {
DBG_TRACE( DbgInfo, "Enable port 0 failed: 0x%x\n", status );
}
}
// Holding the lock too long, make a gap to allow other processes
wl_unlock(lp, &flags);
wl_lock( lp, &flags );
if ( strlen( lp->fw_image_filename ) ) {
DBG_TRACE( DbgInfo, ";???? Kludgy way to force a download\n" );
status = wl_go( lp );
} else {
status = wl_apply( lp );
}
// Holding the lock too long, make a gap to allow other processes
wl_unlock(lp, &flags);
wl_lock( lp, &flags );
if( status != HCF_SUCCESS ) {
// Unsuccessful, try reset of the card to recover
status = wl_reset( dev );
}
// Holding the lock too long, make a gap to allow other processes
wl_unlock(lp, &flags);
wl_lock( lp, &flags );
if( status == HCF_SUCCESS ) {
netif_carrier_on( dev );
WL_WDS_NETIF_CARRIER_ON( lp );
lp->is_handling_int = WL_HANDLING_INT; // Start handling interrupts
wl_act_int_on( lp );
netif_start_queue( dev );
WL_WDS_NETIF_START_QUEUE( lp );
} else {
wl_hcf_error( dev, status ); /* Report the error */
netif_device_detach( dev ); /* Stop the device and queue */
}
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return status;
} // wl_open
/*============================================================================*/
/*******************************************************************************
* wl_close()
*******************************************************************************
*
* DESCRIPTION:
*
* Close the device.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure
*
* RETURNS:
*
* 0 on success
* errno otherwise
*
******************************************************************************/
int wl_close( struct net_device *dev )
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
/*------------------------------------------------------------------------*/
DBG_FUNC("wl_close");
DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
/* Mark the adapter as busy */
netif_stop_queue( dev );
WL_WDS_NETIF_STOP_QUEUE( lp );
netif_carrier_off( dev );
WL_WDS_NETIF_CARRIER_OFF( lp );
/* Shutdown the adapter:
Disable adapter interrupts
Stop Tx/Rx
Update statistics
Set low power mode
*/
wl_lock( lp, &flags );
wl_act_int_off( lp );
lp->is_handling_int = WL_NOT_HANDLING_INT; // Stop handling interrupts
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping device close, in RTS mode\n" );
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return -EIO;
}
#endif /* USE_RTS */
/* Disable the ports */
wl_disable( lp );
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return 0;
} // wl_close
/*============================================================================*/
static void wl_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strncpy(info->driver, DRIVER_NAME, sizeof(info->driver) - 1);
strncpy(info->version, DRV_VERSION_STR, sizeof(info->version) - 1);
// strncpy(info.fw_version, priv->fw_name,
// sizeof(info.fw_version) - 1);
if (dev->dev.parent) {
dev_set_name(dev->dev.parent, "%s", info->bus_info);
//strncpy(info->bus_info, dev->dev.parent->bus_id,
// sizeof(info->bus_info) - 1);
} else {
snprintf(info->bus_info, sizeof(info->bus_info) - 1,
"PCMCIA FIXME");
// "PCMCIA 0x%lx", priv->hw.iobase);
}
} // wl_get_drvinfo
static struct ethtool_ops wl_ethtool_ops = {
.get_drvinfo = wl_get_drvinfo,
.get_link = ethtool_op_get_link,
};
/*******************************************************************************
* wl_ioctl()
*******************************************************************************
*
* DESCRIPTION:
*
* The IOCTL handler for the device.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device struct.
* rq - a pointer to the IOCTL request buffer.
* cmd - the IOCTL command code.
*
* RETURNS:
*
* 0 on success
* errno value otherwise
*
******************************************************************************/
int wl_ioctl( struct net_device *dev, struct ifreq *rq, int cmd )
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_ioctl" );
DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
DBG_PARAM(DbgInfo, "rq", "0x%p", rq);
DBG_PARAM(DbgInfo, "cmd", "0x%04x", cmd);
wl_lock( lp, &flags );
wl_act_int_off( lp );
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
/* Handle any RTS IOCTL here */
if( cmd == WL_IOCTL_RTS ) {
DBG_TRACE( DbgInfo, "IOCTL: WL_IOCTL_RTS\n" );
ret = wvlan_rts( (struct rtsreq *)rq, dev->base_addr );
} else {
DBG_TRACE( DbgInfo, "IOCTL not supported in RTS mode: 0x%X\n", cmd );
ret = -EOPNOTSUPP;
}
goto out_act_int_on_unlock;
}
#endif /* USE_RTS */
/* Only handle UIL IOCTL requests when the UIL has the system blocked. */
if( !(( lp->flags & WVLAN2_UIL_BUSY ) && ( cmd != WVLAN2_IOCTL_UIL ))) {
#ifdef USE_UIL
struct uilreq *urq = (struct uilreq *)rq;
#endif /* USE_UIL */
switch( cmd ) {
// ================== Private IOCTLs (up to 16) ==================
#ifdef USE_UIL
case WVLAN2_IOCTL_UIL:
DBG_TRACE( DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL\n" );
ret = wvlan_uil( urq, lp );
break;
#endif /* USE_UIL */
default:
DBG_TRACE(DbgInfo, "IOCTL CODE NOT SUPPORTED: 0x%X\n", cmd );
ret = -EOPNOTSUPP;
break;
}
} else {
DBG_WARNING( DbgInfo, "DEVICE IS BUSY, CANNOT PROCESS REQUEST\n" );
ret = -EBUSY;
}
#ifdef USE_RTS
out_act_int_on_unlock:
#endif /* USE_RTS */
wl_act_int_on( lp );
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return ret;
} // wl_ioctl
/*============================================================================*/
#ifdef CONFIG_NET_POLL_CONTROLLER
void wl_poll(struct net_device *dev)
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
struct pt_regs regs;
wl_lock( lp, &flags );
wl_isr(dev->irq, dev, ®s);
wl_unlock( lp, &flags );
}
#endif
/*******************************************************************************
* wl_tx_timeout()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler called when, for some reason, a Tx request is not completed.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device struct.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_tx_timeout( struct net_device *dev )
{
#ifdef USE_WDS
int count;
#endif /* USE_WDS */
unsigned long flags;
struct wl_private *lp = wl_priv(dev);
struct net_device_stats *pStats = NULL;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_tx_timeout" );
DBG_ENTER( DbgInfo );
DBG_WARNING( DbgInfo, "%s: Transmit timeout.\n", dev->name );
wl_lock( lp, &flags );
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping tx_timeout handler, in RTS mode\n" );
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return;
}
#endif /* USE_RTS */
/* Figure out which device (the "root" device or WDS port) this timeout
is for */
#ifdef USE_WDS
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( dev == lp->wds_port[count].dev ) {
pStats = &( lp->wds_port[count].stats );
/* Break the loop so that we can use the counter to access WDS
information in the private structure */
break;
}
}
#endif /* USE_WDS */
/* If pStats is still NULL, then the device is not a WDS port */
if( pStats == NULL ) {
pStats = &( lp->stats );
}
/* Accumulate the timeout error */
pStats->tx_errors++;
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return;
} // wl_tx_timeout
/*============================================================================*/
/*******************************************************************************
* wl_send()
*******************************************************************************
*
* DESCRIPTION:
*
* The routine which performs data transmits.
*
* PARAMETERS:
*
* lp - a pointer to the device's wl_private struct.
*
* RETURNS:
*
* 0 on success
* 1 on error
*
******************************************************************************/
int wl_send( struct wl_private *lp )
{
int status;
DESC_STRCT *desc;
WVLAN_LFRAME *txF = NULL;
struct list_head *element;
int len;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_send" );
if( lp == NULL ) {
DBG_ERROR( DbgInfo, "Private adapter struct is NULL\n" );
return FALSE;
}
if( lp->dev == NULL ) {
DBG_ERROR( DbgInfo, "net_device struct in wl_private is NULL\n" );
return FALSE;
}
/* Check for the availability of FIDs; if none are available, don't take any
frames off the txQ */
if( lp->hcfCtx.IFB_RscInd == 0 ) {
return FALSE;
}
/* Reclaim the TxQ Elements and place them back on the free queue */
if( !list_empty( &( lp->txQ[0] ))) {
element = lp->txQ[0].next;
txF = (WVLAN_LFRAME * )list_entry( element, WVLAN_LFRAME, node );
if( txF != NULL ) {
lp->txF.skb = txF->frame.skb;
lp->txF.port = txF->frame.port;
txF->frame.skb = NULL;
txF->frame.port = 0;
list_del( &( txF->node ));
list_add( element, &( lp->txFree ));
lp->txQ_count--;
if( lp->txQ_count < TX_Q_LOW_WATER_MARK ) {
if( lp->netif_queue_on == FALSE ) {
DBG_TX( DbgInfo, "Kickstarting Q: %d\n", lp->txQ_count );
netif_wake_queue( lp->dev );
WL_WDS_NETIF_WAKE_QUEUE( lp );
lp->netif_queue_on = TRUE;
}
}
}
}
if( lp->txF.skb == NULL ) {
return FALSE;
}
/* If the device has resources (FIDs) available, then Tx the packet */
/* Format the TxRequest and send it to the adapter */
len = lp->txF.skb->len < ETH_ZLEN ? ETH_ZLEN : lp->txF.skb->len;
desc = &( lp->desc_tx );
desc->buf_addr = lp->txF.skb->data;
desc->BUF_CNT = len;
desc->next_desc_addr = NULL;
status = hcf_send_msg( &( lp->hcfCtx ), desc, lp->txF.port );
if( status == HCF_SUCCESS ) {
lp->dev->trans_start = jiffies;
DBG_TX( DbgInfo, "Transmit...\n" );
if( lp->txF.port == HCF_PORT_0 ) {
lp->stats.tx_packets++;
lp->stats.tx_bytes += lp->txF.skb->len;
}
#ifdef USE_WDS
else
{
lp->wds_port[(( lp->txF.port >> 8 ) - 1)].stats.tx_packets++;
lp->wds_port[(( lp->txF.port >> 8 ) - 1)].stats.tx_bytes += lp->txF.skb->len;
}
#endif /* USE_WDS */
/* Free the skb and perform queue cleanup, as the buffer was
transmitted successfully */
dev_kfree_skb( lp->txF.skb );
lp->txF.skb = NULL;
lp->txF.port = 0;
}
return TRUE;
} // wl_send
/*============================================================================*/
/*******************************************************************************
* wl_tx()
*******************************************************************************
*
* DESCRIPTION:
*
* The Tx handler function for the network layer.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff structure containing the data to transfer.
* dev - a pointer to the device's net_device structure.
*
* RETURNS:
*
* 0 on success
* 1 on error
*
******************************************************************************/
int wl_tx( struct sk_buff *skb, struct net_device *dev, int port )
{
unsigned long flags;
struct wl_private *lp = wl_priv(dev);
WVLAN_LFRAME *txF = NULL;
struct list_head *element;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_tx" );
/* Grab the spinlock */
wl_lock( lp, &flags );
if( lp->flags & WVLAN2_UIL_BUSY ) {
DBG_WARNING( DbgInfo, "UIL has device blocked\n" );
/* Start dropping packets here??? */
wl_unlock( lp, &flags );
return 1;
}
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_PRINT( "RTS: we're getting a Tx...\n" );
wl_unlock( lp, &flags );
return 1;
}
#endif /* USE_RTS */
if( !lp->use_dma ) {
/* Get an element from the queue */
element = lp->txFree.next;
txF = (WVLAN_LFRAME *)list_entry( element, WVLAN_LFRAME, node );
if( txF == NULL ) {
DBG_ERROR( DbgInfo, "Problem with list_entry\n" );
wl_unlock( lp, &flags );
return 1;
}
/* Fill out the frame */
txF->frame.skb = skb;
txF->frame.port = port;
/* Move the frame to the txQ */
/* NOTE: Here's where we would do priority queueing */
list_del( &( txF->node ));
list_add( &( txF->node ), &( lp->txQ[0] ));
lp->txQ_count++;
if( lp->txQ_count >= DEFAULT_NUM_TX_FRAMES ) {
DBG_TX( DbgInfo, "Q Full: %d\n", lp->txQ_count );
if( lp->netif_queue_on == TRUE ) {
netif_stop_queue( lp->dev );
WL_WDS_NETIF_STOP_QUEUE( lp );
lp->netif_queue_on = FALSE;
}
}
}
wl_act_int_off( lp ); /* Disable Interrupts */
/* Send the data to the hardware using the appropriate method */
#ifdef ENABLE_DMA
if( lp->use_dma ) {
wl_send_dma( lp, skb, port );
}
else
#endif
{
wl_send( lp );
}
/* Re-enable Interrupts, release the spinlock and return */
wl_act_int_on( lp );
wl_unlock( lp, &flags );
return 0;
} // wl_tx
/*============================================================================*/
/*******************************************************************************
* wl_rx()
*******************************************************************************
*
* DESCRIPTION:
*
* The routine which performs data reception.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure.
*
* RETURNS:
*
* 0 on success
* 1 on error
*
******************************************************************************/
int wl_rx(struct net_device *dev)
{
int port;
struct sk_buff *skb;
struct wl_private *lp = wl_priv(dev);
int status;
hcf_16 pktlen;
hcf_16 hfs_stat;
DESC_STRCT *desc;
/*------------------------------------------------------------------------*/
DBG_FUNC("wl_rx")
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
if(!( lp->flags & WVLAN2_UIL_BUSY )) {
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_PRINT( "RTS: We're getting an Rx...\n" );
return -EIO;
}
#endif /* USE_RTS */
/* Read the HFS_STAT register from the lookahead buffer */
hfs_stat = (hcf_16)(( lp->lookAheadBuf[HFS_STAT] ) |
( lp->lookAheadBuf[HFS_STAT + 1] << 8 ));
/* Make sure the frame isn't bad */
if(( hfs_stat & HFS_STAT_ERR ) != HCF_SUCCESS ) {
DBG_WARNING( DbgInfo, "HFS_STAT_ERROR (0x%x) in Rx Packet\n",
lp->lookAheadBuf[HFS_STAT] );
return -EIO;
}
/* Determine what port this packet is for */
port = ( hfs_stat >> 8 ) & 0x0007;
DBG_RX( DbgInfo, "Rx frame for port %d\n", port );
pktlen = lp->hcfCtx.IFB_RxLen;
if (pktlen != 0) {
skb = ALLOC_SKB(pktlen);
if (skb != NULL) {
/* Set the netdev based on the port */
switch( port ) {
#ifdef USE_WDS
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
skb->dev = lp->wds_port[port-1].dev;
break;
#endif /* USE_WDS */
case 0:
default:
skb->dev = dev;
break;
}
desc = &( lp->desc_rx );
desc->next_desc_addr = NULL;
/*
#define BLOCK_INPUT(buf, len) \
desc->buf_addr = buf; \
desc->BUF_SIZE = len; \
status = hcf_rcv_msg(&(lp->hcfCtx), desc, 0)
*/
GET_PACKET( skb->dev, skb, pktlen );
if( status == HCF_SUCCESS ) {
netif_rx( skb );
if( port == 0 ) {
lp->stats.rx_packets++;
lp->stats.rx_bytes += pktlen;
}
#ifdef USE_WDS
else
{
lp->wds_port[port-1].stats.rx_packets++;
lp->wds_port[port-1].stats.rx_bytes += pktlen;
}
#endif /* USE_WDS */
dev->last_rx = jiffies;
#ifdef WIRELESS_EXT
#ifdef WIRELESS_SPY
if( lp->spydata.spy_number > 0 ) {
char *srcaddr = skb->mac.raw + MAC_ADDR_SIZE;
wl_spy_gather( dev, srcaddr );
}
#endif /* WIRELESS_SPY */
#endif /* WIRELESS_EXT */
} else {
DBG_ERROR( DbgInfo, "Rx request to card FAILED\n" );
if( port == 0 ) {
lp->stats.rx_dropped++;
}
#ifdef USE_WDS
else
{
lp->wds_port[port-1].stats.rx_dropped++;
}
#endif /* USE_WDS */
dev_kfree_skb( skb );
}
} else {
DBG_ERROR( DbgInfo, "Could not alloc skb\n" );
if( port == 0 ) {
lp->stats.rx_dropped++;
}
#ifdef USE_WDS
else
{
lp->wds_port[port-1].stats.rx_dropped++;
}
#endif /* USE_WDS */
}
}
}
return 0;
} // wl_rx
/*============================================================================*/
/*******************************************************************************
* wl_multicast()
*******************************************************************************
*
* DESCRIPTION:
*
* Function to handle multicast packets
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
#ifdef NEW_MULTICAST
void wl_multicast( struct net_device *dev )
{
#if 1 //;? (HCF_TYPE) & HCF_TYPE_STA //;?should we return an error status in AP mode
//;?seems reasonable that even an AP-only driver could afford this small additional footprint
int x;
struct netdev_hw_addr *ha;
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_multicast" );
DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
if( !wl_adapter_is_open( dev )) {
DBG_LEAVE( DbgInfo );
return;
}
#if DBG
if( DBG_FLAGS( DbgInfo ) & DBG_PARAM_ON ) {
DBG_PRINT(" flags: %s%s%s\n",
( dev->flags & IFF_PROMISC ) ? "Promiscous " : "",
( dev->flags & IFF_MULTICAST ) ? "Multicast " : "",
( dev->flags & IFF_ALLMULTI ) ? "All-Multicast" : "" );
DBG_PRINT( " mc_count: %d\n", netdev_mc_count(dev));
netdev_for_each_mc_addr(ha, dev)
DBG_PRINT(" %pM (%d)\n", ha->addr, dev->addr_len);
}
#endif /* DBG */
if(!( lp->flags & WVLAN2_UIL_BUSY )) {
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping multicast, in RTS mode\n" );
DBG_LEAVE( DbgInfo );
return;
}
#endif /* USE_RTS */
wl_lock( lp, &flags );
wl_act_int_off( lp );
if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_STA ) {
if( dev->flags & IFF_PROMISC ) {
/* Enable promiscuous mode */
lp->ltvRecord.len = 2;
lp->ltvRecord.typ = CFG_PROMISCUOUS_MODE;
lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 1 );
DBG_PRINT( "Enabling Promiscuous mode (IFF_PROMISC)\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
}
else if ((netdev_mc_count(dev) > HCF_MAX_MULTICAST) ||
( dev->flags & IFF_ALLMULTI )) {
/* Shutting off this filter will enable all multicast frames to
be sent up from the device; however, this is a static RID, so
a call to wl_apply() is needed */
lp->ltvRecord.len = 2;
lp->ltvRecord.typ = CFG_CNF_RX_ALL_GROUP_ADDR;
lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 0 );
DBG_PRINT( "Enabling all multicast mode (IFF_ALLMULTI)\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
wl_apply( lp );
}
else if (!netdev_mc_empty(dev)) {
/* Set the multicast addresses */
lp->ltvRecord.len = ( netdev_mc_count(dev) * 3 ) + 1;
lp->ltvRecord.typ = CFG_GROUP_ADDR;
x = 0;
netdev_for_each_mc_addr(ha, dev)
memcpy(&(lp->ltvRecord.u.u8[x++ * ETH_ALEN]),
ha->addr, ETH_ALEN);
DBG_PRINT( "Setting multicast list\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
} else {
/* Disable promiscuous mode */
lp->ltvRecord.len = 2;
lp->ltvRecord.typ = CFG_PROMISCUOUS_MODE;
lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 0 );
DBG_PRINT( "Disabling Promiscuous mode\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
/* Disable multicast mode */
lp->ltvRecord.len = 2;
lp->ltvRecord.typ = CFG_GROUP_ADDR;
DBG_PRINT( "Disabling Multicast mode\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
/* Turning on this filter will prevent all multicast frames from
being sent up from the device; however, this is a static RID,
so a call to wl_apply() is needed */
lp->ltvRecord.len = 2;
lp->ltvRecord.typ = CFG_CNF_RX_ALL_GROUP_ADDR;
lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 1 );
DBG_PRINT( "Disabling all multicast mode (IFF_ALLMULTI)\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
wl_apply( lp );
}
}
wl_act_int_on( lp );
wl_unlock( lp, &flags );
}
DBG_LEAVE( DbgInfo );
#endif /* HCF_STA */
} // wl_multicast
/*============================================================================*/
#else /* NEW_MULTICAST */
void wl_multicast( struct net_device *dev, int num_addrs, void *addrs )
{
DBG_FUNC( "wl_multicast");
DBG_ENTER(DbgInfo);
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
DBG_PARAM( DbgInfo, "num_addrs", "%d", num_addrs );
DBG_PARAM( DbgInfo, "addrs", "0x%p", addrs );
#error Obsolete set multicast interface!
DBG_LEAVE( DbgInfo );
} // wl_multicast
/*============================================================================*/
#endif /* NEW_MULTICAST */
static const struct net_device_ops wl_netdev_ops =
{
.ndo_start_xmit = &wl_tx_port0,
.ndo_set_config = &wl_config,
.ndo_get_stats = &wl_stats,
.ndo_set_rx_mode = &wl_multicast,
.ndo_init = &wl_insert,
.ndo_open = &wl_adapter_open,
.ndo_stop = &wl_adapter_close,
.ndo_do_ioctl = &wl_ioctl,
.ndo_tx_timeout = &wl_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = wl_poll,
#endif
};
/*******************************************************************************
* wl_device_alloc()
*******************************************************************************
*
* DESCRIPTION:
*
* Create instances of net_device and wl_private for the new adapter
* and register the device's entry points in the net_device structure.
*
* PARAMETERS:
*
* N/A
*
* RETURNS:
*
* a pointer to an allocated and initialized net_device struct for this
* device.
*
******************************************************************************/
struct net_device * wl_device_alloc( void )
{
struct net_device *dev = NULL;
struct wl_private *lp = NULL;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_device_alloc" );
DBG_ENTER( DbgInfo );
/* Alloc a net_device struct */
dev = alloc_etherdev(sizeof(struct wl_private));
if (!dev)
return NULL;
/* Initialize the 'next' pointer in the struct. Currently only used for PCI,
but do it here just in case it's used for other buses in the future */
lp = wl_priv(dev);
/* Check MTU */
if( dev->mtu > MTU_MAX )
{
DBG_WARNING( DbgInfo, "%s: MTU set too high, limiting to %d.\n",
dev->name, MTU_MAX );
dev->mtu = MTU_MAX;
}
/* Setup the function table in the device structure. */
dev->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
lp->wireless_data.spy_data = &lp->spy_data;
dev->wireless_data = &lp->wireless_data;
dev->netdev_ops = &wl_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
dev->ethtool_ops = &wl_ethtool_ops;
netif_stop_queue( dev );
/* Allocate virutal devices for WDS support if needed */
WL_WDS_DEVICE_ALLOC( lp );
DBG_LEAVE( DbgInfo );
return dev;
} // wl_device_alloc
/*============================================================================*/
/*******************************************************************************
* wl_device_dealloc()
*******************************************************************************
*
* DESCRIPTION:
*
* Free instances of net_device and wl_private strcutres for an adapter
* and perform basic cleanup.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_device_dealloc( struct net_device *dev )
{
// struct wl_private *lp = wl_priv(dev);
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_device_dealloc" );
DBG_ENTER( DbgInfo );
/* Dealloc the WDS ports */
WL_WDS_DEVICE_DEALLOC( lp );
free_netdev( dev );
DBG_LEAVE( DbgInfo );
return;
} // wl_device_dealloc
/*============================================================================*/
/*******************************************************************************
* wl_tx_port0()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_0.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_0.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port0( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 0\n" );
return wl_tx( skb, dev, HCF_PORT_0 );
#ifdef ENABLE_DMA
return wl_tx_dma( skb, dev, HCF_PORT_0 );
#endif
} // wl_tx_port0
/*============================================================================*/
#ifdef USE_WDS
/*******************************************************************************
* wl_tx_port1()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_1.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_1.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port1( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 1\n" );
return wl_tx( skb, dev, HCF_PORT_1 );
} // wl_tx_port1
/*============================================================================*/
/*******************************************************************************
* wl_tx_port2()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_2.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_2.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port2( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 2\n" );
return wl_tx( skb, dev, HCF_PORT_2 );
} // wl_tx_port2
/*============================================================================*/
/*******************************************************************************
* wl_tx_port3()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_3.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_3.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port3( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 3\n" );
return wl_tx( skb, dev, HCF_PORT_3 );
} // wl_tx_port3
/*============================================================================*/
/*******************************************************************************
* wl_tx_port4()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_4.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_4.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port4( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 4\n" );
return wl_tx( skb, dev, HCF_PORT_4 );
} // wl_tx_port4
/*============================================================================*/
/*******************************************************************************
* wl_tx_port5()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_5.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_5.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port5( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 5\n" );
return wl_tx( skb, dev, HCF_PORT_5 );
} // wl_tx_port5
/*============================================================================*/
/*******************************************************************************
* wl_tx_port6()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_6.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_6.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port6( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 6\n" );
return wl_tx( skb, dev, HCF_PORT_6 );
} // wl_tx_port6
/*============================================================================*/
/*******************************************************************************
* wl_wds_device_alloc()
*******************************************************************************
*
* DESCRIPTION:
*
* Create instances of net_device to represent the WDS ports, and register
* the device's entry points in the net_device structure.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A, but will place pointers to the allocated and initialized net_device
* structs in the private adapter structure.
*
******************************************************************************/
void wl_wds_device_alloc( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_wds_device_alloc" );
DBG_ENTER( DbgInfo );
/* WDS support requires additional net_device structs to be allocated,
so that user space apps can use these virtual devices to specify the
port on which to Tx/Rx */
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
struct net_device *dev_wds = NULL;
dev_wds = kmalloc( sizeof( struct net_device ), GFP_KERNEL );
memset( dev_wds, 0, sizeof( struct net_device ));
ether_setup( dev_wds );
lp->wds_port[count].dev = dev_wds;
/* Re-use wl_init for all the devices, as it currently does nothing, but
is required. Re-use the stats/tx_timeout handler for all as well; the
WDS port which is requesting these operations can be determined by
the net_device pointer. Set the private member of all devices to point
to the same net_device struct; that way, all information gets
funnelled through the one "real" net_device. Name the WDS ports
"wds<n>" */
lp->wds_port[count].dev->init = &wl_init;
lp->wds_port[count].dev->get_stats = &wl_stats;
lp->wds_port[count].dev->tx_timeout = &wl_tx_timeout;
lp->wds_port[count].dev->watchdog_timeo = TX_TIMEOUT;
lp->wds_port[count].dev->priv = lp;
sprintf( lp->wds_port[count].dev->name, "wds%d", count );
}
/* Register the Tx handlers */
lp->wds_port[0].dev->hard_start_xmit = &wl_tx_port1;
lp->wds_port[1].dev->hard_start_xmit = &wl_tx_port2;
lp->wds_port[2].dev->hard_start_xmit = &wl_tx_port3;
lp->wds_port[3].dev->hard_start_xmit = &wl_tx_port4;
lp->wds_port[4].dev->hard_start_xmit = &wl_tx_port5;
lp->wds_port[5].dev->hard_start_xmit = &wl_tx_port6;
WL_WDS_NETIF_STOP_QUEUE( lp );
DBG_LEAVE( DbgInfo );
return;
} // wl_wds_device_alloc
/*============================================================================*/
/*******************************************************************************
* wl_wds_device_dealloc()
*******************************************************************************
*
* DESCRIPTION:
*
* Free instances of net_device structures used to support WDS.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_device_dealloc( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_wds_device_dealloc" );
DBG_ENTER( DbgInfo );
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
struct net_device *dev_wds = NULL;
dev_wds = lp->wds_port[count].dev;
if( dev_wds != NULL ) {
if( dev_wds->flags & IFF_UP ) {
dev_close( dev_wds );
dev_wds->flags &= ~( IFF_UP | IFF_RUNNING );
}
free_netdev(dev_wds);
lp->wds_port[count].dev = NULL;
}
}
DBG_LEAVE( DbgInfo );
return;
} // wl_wds_device_dealloc
/*============================================================================*/
/*******************************************************************************
* wl_wds_netif_start_queue()
*******************************************************************************
*
* DESCRIPTION:
*
* Used to start the netif queues of all the "virtual" network devices
* which repesent the WDS ports.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_netif_start_queue( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
if( lp != NULL ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( lp->wds_port[count].is_registered &&
lp->wds_port[count].netif_queue_on == FALSE ) {
netif_start_queue( lp->wds_port[count].dev );
lp->wds_port[count].netif_queue_on = TRUE;
}
}
}
return;
} // wl_wds_netif_start_queue
/*============================================================================*/
/*******************************************************************************
* wl_wds_netif_stop_queue()
*******************************************************************************
*
* DESCRIPTION:
*
* Used to stop the netif queues of all the "virtual" network devices
* which repesent the WDS ports.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_netif_stop_queue( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
if( lp != NULL ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( lp->wds_port[count].is_registered &&
lp->wds_port[count].netif_queue_on == TRUE ) {
netif_stop_queue( lp->wds_port[count].dev );
lp->wds_port[count].netif_queue_on = FALSE;
}
}
}
return;
} // wl_wds_netif_stop_queue
/*============================================================================*/
/*******************************************************************************
* wl_wds_netif_wake_queue()
*******************************************************************************
*
* DESCRIPTION:
*
* Used to wake the netif queues of all the "virtual" network devices
* which repesent the WDS ports.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_netif_wake_queue( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
if( lp != NULL ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( lp->wds_port[count].is_registered &&
lp->wds_port[count].netif_queue_on == FALSE ) {
netif_wake_queue( lp->wds_port[count].dev );
lp->wds_port[count].netif_queue_on = TRUE;
}
}
}
return;
} // wl_wds_netif_wake_queue
/*============================================================================*/
/*******************************************************************************
* wl_wds_netif_carrier_on()
*******************************************************************************
*
* DESCRIPTION:
*
* Used to signal the network layer that carrier is present on all of the
* "virtual" network devices which repesent the WDS ports.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_netif_carrier_on( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
if( lp != NULL ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( lp->wds_port[count].is_registered ) {
netif_carrier_on( lp->wds_port[count].dev );
}
}
}
return;
} // wl_wds_netif_carrier_on
/*============================================================================*/
/*******************************************************************************
* wl_wds_netif_carrier_off()
*******************************************************************************
*
* DESCRIPTION:
*
* Used to signal the network layer that carrier is NOT present on all of
* the "virtual" network devices which repesent the WDS ports.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_netif_carrier_off( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
if( lp != NULL ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( lp->wds_port[count].is_registered ) {
netif_carrier_off( lp->wds_port[count].dev );
}
}
}
return;
} // wl_wds_netif_carrier_off
/*============================================================================*/
#endif /* USE_WDS */
#ifdef ENABLE_DMA
/*******************************************************************************
* wl_send_dma()
*******************************************************************************
*
* DESCRIPTION:
*
* The routine which performs data transmits when using busmaster DMA.
*
* PARAMETERS:
*
* lp - a pointer to the device's wl_private struct.
* skb - a pointer to the network layer's data buffer.
* port - the Hermes port on which to transmit.
*
* RETURNS:
*
* 0 on success
* 1 on error
*
******************************************************************************/
int wl_send_dma( struct wl_private *lp, struct sk_buff *skb, int port )
{
int len;
DESC_STRCT *desc = NULL;
DESC_STRCT *desc_next = NULL;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_send_dma" );
if( lp == NULL )
{
DBG_ERROR( DbgInfo, "Private adapter struct is NULL\n" );
return FALSE;
}
if( lp->dev == NULL )
{
DBG_ERROR( DbgInfo, "net_device struct in wl_private is NULL\n" );
return FALSE;
}
/* AGAIN, ALL THE QUEUEING DONE HERE IN I/O MODE IS NOT PERFORMED */
if( skb == NULL )
{
DBG_WARNING (DbgInfo, "Nothing to send.\n");
return FALSE;
}
len = skb->len;
/* Get a free descriptor */
desc = wl_pci_dma_get_tx_packet( lp );
if( desc == NULL )
{
if( lp->netif_queue_on == TRUE ) {
netif_stop_queue( lp->dev );
WL_WDS_NETIF_STOP_QUEUE( lp );
lp->netif_queue_on = FALSE;
dev_kfree_skb( skb );
return 0;
}
}
SET_BUF_CNT( desc, /*HCF_DMA_FD_CNT*/HFS_ADDR_DEST );
SET_BUF_SIZE( desc, HCF_DMA_TX_BUF1_SIZE );
desc_next = desc->next_desc_addr;
if( desc_next->buf_addr == NULL )
{
DBG_ERROR( DbgInfo, "DMA descriptor buf_addr is NULL\n" );
return FALSE;
}
/* Copy the payload into the DMA packet */
memcpy( desc_next->buf_addr, skb->data, len );
SET_BUF_CNT( desc_next, len );
SET_BUF_SIZE( desc_next, HCF_MAX_PACKET_SIZE );
hcf_dma_tx_put( &( lp->hcfCtx ), desc, 0 );
/* Free the skb and perform queue cleanup, as the buffer was
transmitted successfully */
dev_kfree_skb( skb );
return TRUE;
} // wl_send_dma
/*============================================================================*/
/*******************************************************************************
* wl_rx_dma()
*******************************************************************************
*
* DESCRIPTION:
*
* The routine which performs data reception when using busmaster DMA.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure.
*
* RETURNS:
*
* 0 on success
* 1 on error
*
******************************************************************************/
int wl_rx_dma( struct net_device *dev )
{
int port;
hcf_16 pktlen;
hcf_16 hfs_stat;
struct sk_buff *skb;
struct wl_private *lp = NULL;
DESC_STRCT *desc, *desc_next;
//CFG_MB_INFO_RANGE2_STRCT x;
/*------------------------------------------------------------------------*/
DBG_FUNC("wl_rx")
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
if((( lp = dev->priv ) != NULL ) &&
!( lp->flags & WVLAN2_UIL_BUSY )) {
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_PRINT( "RTS: We're getting an Rx...\n" );
return -EIO;
}
#endif /* USE_RTS */
//if( lp->dma.status == 0 )
//{
desc = hcf_dma_rx_get( &( lp->hcfCtx ));
if( desc != NULL )
{
/* Check and see if we rcvd. a WMP frame */
/*
if((( *(hcf_8 *)&desc->buf_addr[HFS_STAT] ) &
( HFS_STAT_MSG_TYPE | HFS_STAT_ERR )) == HFS_STAT_WMP_MSG )
{
DBG_TRACE( DbgInfo, "Got a WMP frame\n" );
x.len = sizeof( CFG_MB_INFO_RANGE2_STRCT ) / sizeof( hcf_16 );
x.typ = CFG_MB_INFO;
x.base_typ = CFG_WMP;
x.frag_cnt = 2;
x.frag_buf[0].frag_len = GET_BUF_CNT( descp ) / sizeof( hcf_16 );
x.frag_buf[0].frag_addr = (hcf_8 *) descp->buf_addr ;
x.frag_buf[1].frag_len = ( GET_BUF_CNT( descp->next_desc_addr ) + 1 ) / sizeof( hcf_16 );
x.frag_buf[1].frag_addr = (hcf_8 *) descp->next_desc_addr->buf_addr ;
hcf_put_info( &( lp->hcfCtx ), (LTVP)&x );
}
*/
desc_next = desc->next_desc_addr;
/* Make sure the buffer isn't empty */
if( GET_BUF_CNT( desc ) == 0 ) {
DBG_WARNING( DbgInfo, "Buffer is empty!\n" );
/* Give the descriptor back to the HCF */
hcf_dma_rx_put( &( lp->hcfCtx ), desc );
return -EIO;
}
/* Read the HFS_STAT register from the lookahead buffer */
hfs_stat = (hcf_16)( desc->buf_addr[HFS_STAT/2] );
/* Make sure the frame isn't bad */
if(( hfs_stat & HFS_STAT_ERR ) != HCF_SUCCESS )
{
DBG_WARNING( DbgInfo, "HFS_STAT_ERROR (0x%x) in Rx Packet\n",
desc->buf_addr[HFS_STAT/2] );
/* Give the descriptor back to the HCF */
hcf_dma_rx_put( &( lp->hcfCtx ), desc );
return -EIO;
}
/* Determine what port this packet is for */
port = ( hfs_stat >> 8 ) & 0x0007;
DBG_RX( DbgInfo, "Rx frame for port %d\n", port );
pktlen = GET_BUF_CNT(desc_next);
if (pktlen != 0) {
skb = ALLOC_SKB(pktlen);
if (skb != NULL) {
switch( port ) {
#ifdef USE_WDS
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
skb->dev = lp->wds_port[port-1].dev;
break;
#endif /* USE_WDS */
case 0:
default:
skb->dev = dev;
break;
}
GET_PACKET_DMA( skb->dev, skb, pktlen );
/* Give the descriptor back to the HCF */
hcf_dma_rx_put( &( lp->hcfCtx ), desc );
netif_rx( skb );
if( port == 0 ) {
lp->stats.rx_packets++;
lp->stats.rx_bytes += pktlen;
}
#ifdef USE_WDS
else
{
lp->wds_port[port-1].stats.rx_packets++;
lp->wds_port[port-1].stats.rx_bytes += pktlen;
}
#endif /* USE_WDS */
dev->last_rx = jiffies;
} else {
DBG_ERROR( DbgInfo, "Could not alloc skb\n" );
if( port == 0 )
{
lp->stats.rx_dropped++;
}
#ifdef USE_WDS
else
{
lp->wds_port[port-1].stats.rx_dropped++;
}
#endif /* USE_WDS */
}
}
}
//}
}
return 0;
} // wl_rx_dma
/*============================================================================*/
#endif // ENABLE_DMA
| gpl-2.0 |
TheTypoMaster/kernel_condor | drivers/net/hippi/rrunner.c | 4799 | 42522 | /*
* rrunner.c: Linux driver for the Essential RoadRunner HIPPI board.
*
* Copyright (C) 1998-2002 by Jes Sorensen, <jes@wildopensource.com>.
*
* Thanks to Essential Communication for providing us with hardware
* and very comprehensive documentation without which I would not have
* been able to write this driver. A special thank you to John Gibbon
* for sorting out the legal issues, with the NDA, allowing the code to
* be released under the GPL.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Thanks to Jayaram Bhat from ODS/Essential for fixing some of the
* stupid bugs in my code.
*
* Softnet support and various other patches from Val Henson of
* ODS/Essential.
*
* PCI DMA mapping code partly based on work by Francois Romieu.
*/
#define DEBUG 1
#define RX_DMA_SKBUFF 1
#define PKT_COPY_THRESHOLD 512
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/hippidevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <asm/cache.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#define rr_if_busy(dev) netif_queue_stopped(dev)
#define rr_if_running(dev) netif_running(dev)
#include "rrunner.h"
#define RUN_AT(x) (jiffies + (x))
MODULE_AUTHOR("Jes Sorensen <jes@wildopensource.com>");
MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver");
MODULE_LICENSE("GPL");
static char version[] __devinitdata = "rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n";
static const struct net_device_ops rr_netdev_ops = {
.ndo_open = rr_open,
.ndo_stop = rr_close,
.ndo_do_ioctl = rr_ioctl,
.ndo_start_xmit = rr_start_xmit,
.ndo_change_mtu = hippi_change_mtu,
.ndo_set_mac_address = hippi_mac_addr,
};
/*
* Implementation notes:
*
* The DMA engine only allows for DMA within physical 64KB chunks of
* memory. The current approach of the driver (and stack) is to use
* linear blocks of memory for the skbuffs. However, as the data block
* is always the first part of the skb and skbs are 2^n aligned so we
* are guarantted to get the whole block within one 64KB align 64KB
* chunk.
*
* On the long term, relying on being able to allocate 64KB linear
* chunks of memory is not feasible and the skb handling code and the
* stack will need to know about I/O vectors or something similar.
*/
static int __devinit rr_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *dev;
static int version_disp;
u8 pci_latency;
struct rr_private *rrpriv;
void *tmpptr;
dma_addr_t ring_dma;
int ret = -ENOMEM;
dev = alloc_hippi_dev(sizeof(struct rr_private));
if (!dev)
goto out3;
ret = pci_enable_device(pdev);
if (ret) {
ret = -ENODEV;
goto out2;
}
rrpriv = netdev_priv(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_request_regions(pdev, "rrunner")) {
ret = -EIO;
goto out;
}
pci_set_drvdata(pdev, dev);
rrpriv->pci_dev = pdev;
spin_lock_init(&rrpriv->lock);
dev->irq = pdev->irq;
dev->netdev_ops = &rr_netdev_ops;
dev->base_addr = pci_resource_start(pdev, 0);
/* display version info if adapter is found */
if (!version_disp) {
/* set display flag to TRUE so that */
/* we only display this string ONCE */
version_disp = 1;
printk(version);
}
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
if (pci_latency <= 0x58){
pci_latency = 0x58;
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, pci_latency);
}
pci_set_master(pdev);
printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI "
"at 0x%08lx, irq %i, PCI latency %i\n", dev->name,
dev->base_addr, dev->irq, pci_latency);
/*
* Remap the regs into kernel space.
*/
rrpriv->regs = ioremap(dev->base_addr, 0x1000);
if (!rrpriv->regs){
printk(KERN_ERR "%s: Unable to map I/O register, "
"RoadRunner will be disabled.\n", dev->name);
ret = -EIO;
goto out;
}
tmpptr = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
rrpriv->tx_ring = tmpptr;
rrpriv->tx_ring_dma = ring_dma;
if (!tmpptr) {
ret = -ENOMEM;
goto out;
}
tmpptr = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
rrpriv->rx_ring = tmpptr;
rrpriv->rx_ring_dma = ring_dma;
if (!tmpptr) {
ret = -ENOMEM;
goto out;
}
tmpptr = pci_alloc_consistent(pdev, EVT_RING_SIZE, &ring_dma);
rrpriv->evt_ring = tmpptr;
rrpriv->evt_ring_dma = ring_dma;
if (!tmpptr) {
ret = -ENOMEM;
goto out;
}
/*
* Don't access any register before this point!
*/
#ifdef __BIG_ENDIAN
writel(readl(&rrpriv->regs->HostCtrl) | NO_SWAP,
&rrpriv->regs->HostCtrl);
#endif
/*
* Need to add a case for little-endian 64-bit hosts here.
*/
rr_init(dev);
dev->base_addr = 0;
ret = register_netdev(dev);
if (ret)
goto out;
return 0;
out:
if (rrpriv->rx_ring)
pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring,
rrpriv->rx_ring_dma);
if (rrpriv->tx_ring)
pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring,
rrpriv->tx_ring_dma);
if (rrpriv->regs)
iounmap(rrpriv->regs);
if (pdev) {
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
}
out2:
free_netdev(dev);
out3:
return ret;
}
static void __devexit rr_remove_one (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (dev) {
struct rr_private *rr = netdev_priv(dev);
if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)){
printk(KERN_ERR "%s: trying to unload running NIC\n",
dev->name);
writel(HALT_NIC, &rr->regs->HostCtrl);
}
pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
rr->evt_ring_dma);
pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
rr->rx_ring_dma);
pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
rr->tx_ring_dma);
unregister_netdev(dev);
iounmap(rr->regs);
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
/*
* Commands are considered to be slow, thus there is no reason to
* inline this.
*/
static void rr_issue_cmd(struct rr_private *rrpriv, struct cmd *cmd)
{
struct rr_regs __iomem *regs;
u32 idx;
regs = rrpriv->regs;
/*
* This is temporary - it will go away in the final version.
* We probably also want to make this function inline.
*/
if (readl(®s->HostCtrl) & NIC_HALTED){
printk("issuing command for halted NIC, code 0x%x, "
"HostCtrl %08x\n", cmd->code, readl(®s->HostCtrl));
if (readl(®s->Mode) & FATAL_ERR)
printk("error codes Fail1 %02x, Fail2 %02x\n",
readl(®s->Fail1), readl(®s->Fail2));
}
idx = rrpriv->info->cmd_ctrl.pi;
writel(*(u32*)(cmd), ®s->CmdRing[idx]);
wmb();
idx = (idx - 1) % CMD_RING_ENTRIES;
rrpriv->info->cmd_ctrl.pi = idx;
wmb();
if (readl(®s->Mode) & FATAL_ERR)
printk("error code %02x\n", readl(®s->Fail1));
}
/*
* Reset the board in a sensible manner. The NIC is already halted
* when we get here and a spin-lock is held.
*/
static int rr_reset(struct net_device *dev)
{
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
u32 start_pc;
int i;
rrpriv = netdev_priv(dev);
regs = rrpriv->regs;
rr_load_firmware(dev);
writel(0x01000000, ®s->TX_state);
writel(0xff800000, ®s->RX_state);
writel(0, ®s->AssistState);
writel(CLEAR_INTA, ®s->LocalCtrl);
writel(0x01, ®s->BrkPt);
writel(0, ®s->Timer);
writel(0, ®s->TimerRef);
writel(RESET_DMA, ®s->DmaReadState);
writel(RESET_DMA, ®s->DmaWriteState);
writel(0, ®s->DmaWriteHostHi);
writel(0, ®s->DmaWriteHostLo);
writel(0, ®s->DmaReadHostHi);
writel(0, ®s->DmaReadHostLo);
writel(0, ®s->DmaReadLen);
writel(0, ®s->DmaWriteLen);
writel(0, ®s->DmaWriteLcl);
writel(0, ®s->DmaWriteIPchecksum);
writel(0, ®s->DmaReadLcl);
writel(0, ®s->DmaReadIPchecksum);
writel(0, ®s->PciState);
#if (BITS_PER_LONG == 64) && defined __LITTLE_ENDIAN
writel(SWAP_DATA | PTR64BIT | PTR_WD_SWAP, ®s->Mode);
#elif (BITS_PER_LONG == 64)
writel(SWAP_DATA | PTR64BIT | PTR_WD_NOSWAP, ®s->Mode);
#else
writel(SWAP_DATA | PTR32BIT | PTR_WD_NOSWAP, ®s->Mode);
#endif
#if 0
/*
* Don't worry, this is just black magic.
*/
writel(0xdf000, ®s->RxBase);
writel(0xdf000, ®s->RxPrd);
writel(0xdf000, ®s->RxCon);
writel(0xce000, ®s->TxBase);
writel(0xce000, ®s->TxPrd);
writel(0xce000, ®s->TxCon);
writel(0, ®s->RxIndPro);
writel(0, ®s->RxIndCon);
writel(0, ®s->RxIndRef);
writel(0, ®s->TxIndPro);
writel(0, ®s->TxIndCon);
writel(0, ®s->TxIndRef);
writel(0xcc000, ®s->pad10[0]);
writel(0, ®s->DrCmndPro);
writel(0, ®s->DrCmndCon);
writel(0, ®s->DwCmndPro);
writel(0, ®s->DwCmndCon);
writel(0, ®s->DwCmndRef);
writel(0, ®s->DrDataPro);
writel(0, ®s->DrDataCon);
writel(0, ®s->DrDataRef);
writel(0, ®s->DwDataPro);
writel(0, ®s->DwDataCon);
writel(0, ®s->DwDataRef);
#endif
writel(0xffffffff, ®s->MbEvent);
writel(0, ®s->Event);
writel(0, ®s->TxPi);
writel(0, ®s->IpRxPi);
writel(0, ®s->EvtCon);
writel(0, ®s->EvtPrd);
rrpriv->info->evt_ctrl.pi = 0;
for (i = 0; i < CMD_RING_ENTRIES; i++)
writel(0, ®s->CmdRing[i]);
/*
* Why 32 ? is this not cache line size dependent?
*/
writel(RBURST_64|WBURST_64, ®s->PciState);
wmb();
start_pc = rr_read_eeprom_word(rrpriv,
offsetof(struct eeprom, rncd_info.FwStart));
#if (DEBUG > 1)
printk("%s: Executing firmware at address 0x%06x\n",
dev->name, start_pc);
#endif
writel(start_pc + 0x800, ®s->Pc);
wmb();
udelay(5);
writel(start_pc, ®s->Pc);
wmb();
return 0;
}
/*
* Read a string from the EEPROM.
*/
static unsigned int rr_read_eeprom(struct rr_private *rrpriv,
unsigned long offset,
unsigned char *buf,
unsigned long length)
{
struct rr_regs __iomem *regs = rrpriv->regs;
u32 misc, io, host, i;
io = readl(®s->ExtIo);
writel(0, ®s->ExtIo);
misc = readl(®s->LocalCtrl);
writel(0, ®s->LocalCtrl);
host = readl(®s->HostCtrl);
writel(host | HALT_NIC, ®s->HostCtrl);
mb();
for (i = 0; i < length; i++){
writel((EEPROM_BASE + ((offset+i) << 3)), ®s->WinBase);
mb();
buf[i] = (readl(®s->WinData) >> 24) & 0xff;
mb();
}
writel(host, ®s->HostCtrl);
writel(misc, ®s->LocalCtrl);
writel(io, ®s->ExtIo);
mb();
return i;
}
/*
* Shortcut to read one word (4 bytes) out of the EEPROM and convert
* it to our CPU byte-order.
*/
static u32 rr_read_eeprom_word(struct rr_private *rrpriv,
size_t offset)
{
__be32 word;
if ((rr_read_eeprom(rrpriv, offset,
(unsigned char *)&word, 4) == 4))
return be32_to_cpu(word);
return 0;
}
/*
* Write a string to the EEPROM.
*
* This is only called when the firmware is not running.
*/
static unsigned int write_eeprom(struct rr_private *rrpriv,
unsigned long offset,
unsigned char *buf,
unsigned long length)
{
struct rr_regs __iomem *regs = rrpriv->regs;
u32 misc, io, data, i, j, ready, error = 0;
io = readl(®s->ExtIo);
writel(0, ®s->ExtIo);
misc = readl(®s->LocalCtrl);
writel(ENABLE_EEPROM_WRITE, ®s->LocalCtrl);
mb();
for (i = 0; i < length; i++){
writel((EEPROM_BASE + ((offset+i) << 3)), ®s->WinBase);
mb();
data = buf[i] << 24;
/*
* Only try to write the data if it is not the same
* value already.
*/
if ((readl(®s->WinData) & 0xff000000) != data){
writel(data, ®s->WinData);
ready = 0;
j = 0;
mb();
while(!ready){
udelay(20);
if ((readl(®s->WinData) & 0xff000000) ==
data)
ready = 1;
mb();
if (j++ > 5000){
printk("data mismatch: %08x, "
"WinData %08x\n", data,
readl(®s->WinData));
ready = 1;
error = 1;
}
}
}
}
writel(misc, ®s->LocalCtrl);
writel(io, ®s->ExtIo);
mb();
return error;
}
static int __devinit rr_init(struct net_device *dev)
{
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
u32 sram_size, rev;
rrpriv = netdev_priv(dev);
regs = rrpriv->regs;
rev = readl(®s->FwRev);
rrpriv->fw_rev = rev;
if (rev > 0x00020024)
printk(" Firmware revision: %i.%i.%i\n", (rev >> 16),
((rev >> 8) & 0xff), (rev & 0xff));
else if (rev >= 0x00020000) {
printk(" Firmware revision: %i.%i.%i (2.0.37 or "
"later is recommended)\n", (rev >> 16),
((rev >> 8) & 0xff), (rev & 0xff));
}else{
printk(" Firmware revision too old: %i.%i.%i, please "
"upgrade to 2.0.37 or later.\n",
(rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff));
}
#if (DEBUG > 2)
printk(" Maximum receive rings %i\n", readl(®s->MaxRxRng));
#endif
/*
* Read the hardware address from the eeprom. The HW address
* is not really necessary for HIPPI but awfully convenient.
* The pointer arithmetic to put it in dev_addr is ugly, but
* Donald Becker does it this way for the GigE version of this
* card and it's shorter and more portable than any
* other method I've seen. -VAL
*/
*(__be16 *)(dev->dev_addr) =
htons(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA)));
*(__be32 *)(dev->dev_addr+2) =
htonl(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA[4])));
printk(" MAC: %pM\n", dev->dev_addr);
sram_size = rr_read_eeprom_word(rrpriv, 8);
printk(" SRAM size 0x%06x\n", sram_size);
return 0;
}
static int rr_init1(struct net_device *dev)
{
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
unsigned long myjif, flags;
struct cmd cmd;
u32 hostctrl;
int ecode = 0;
short i;
rrpriv = netdev_priv(dev);
regs = rrpriv->regs;
spin_lock_irqsave(&rrpriv->lock, flags);
hostctrl = readl(®s->HostCtrl);
writel(hostctrl | HALT_NIC | RR_CLEAR_INT, ®s->HostCtrl);
wmb();
if (hostctrl & PARITY_ERR){
printk("%s: Parity error halting NIC - this is serious!\n",
dev->name);
spin_unlock_irqrestore(&rrpriv->lock, flags);
ecode = -EFAULT;
goto error;
}
set_rxaddr(regs, rrpriv->rx_ctrl_dma);
set_infoaddr(regs, rrpriv->info_dma);
rrpriv->info->evt_ctrl.entry_size = sizeof(struct event);
rrpriv->info->evt_ctrl.entries = EVT_RING_ENTRIES;
rrpriv->info->evt_ctrl.mode = 0;
rrpriv->info->evt_ctrl.pi = 0;
set_rraddr(&rrpriv->info->evt_ctrl.rngptr, rrpriv->evt_ring_dma);
rrpriv->info->cmd_ctrl.entry_size = sizeof(struct cmd);
rrpriv->info->cmd_ctrl.entries = CMD_RING_ENTRIES;
rrpriv->info->cmd_ctrl.mode = 0;
rrpriv->info->cmd_ctrl.pi = 15;
for (i = 0; i < CMD_RING_ENTRIES; i++) {
writel(0, ®s->CmdRing[i]);
}
for (i = 0; i < TX_RING_ENTRIES; i++) {
rrpriv->tx_ring[i].size = 0;
set_rraddr(&rrpriv->tx_ring[i].addr, 0);
rrpriv->tx_skbuff[i] = NULL;
}
rrpriv->info->tx_ctrl.entry_size = sizeof(struct tx_desc);
rrpriv->info->tx_ctrl.entries = TX_RING_ENTRIES;
rrpriv->info->tx_ctrl.mode = 0;
rrpriv->info->tx_ctrl.pi = 0;
set_rraddr(&rrpriv->info->tx_ctrl.rngptr, rrpriv->tx_ring_dma);
/*
* Set dirty_tx before we start receiving interrupts, otherwise
* the interrupt handler might think it is supposed to process
* tx ints before we are up and running, which may cause a null
* pointer access in the int handler.
*/
rrpriv->tx_full = 0;
rrpriv->cur_rx = 0;
rrpriv->dirty_rx = rrpriv->dirty_tx = 0;
rr_reset(dev);
/* Tuning values */
writel(0x5000, ®s->ConRetry);
writel(0x100, ®s->ConRetryTmr);
writel(0x500000, ®s->ConTmout);
writel(0x60, ®s->IntrTmr);
writel(0x500000, ®s->TxDataMvTimeout);
writel(0x200000, ®s->RxDataMvTimeout);
writel(0x80, ®s->WriteDmaThresh);
writel(0x80, ®s->ReadDmaThresh);
rrpriv->fw_running = 0;
wmb();
hostctrl &= ~(HALT_NIC | INVALID_INST_B | PARITY_ERR);
writel(hostctrl, ®s->HostCtrl);
wmb();
spin_unlock_irqrestore(&rrpriv->lock, flags);
for (i = 0; i < RX_RING_ENTRIES; i++) {
struct sk_buff *skb;
dma_addr_t addr;
rrpriv->rx_ring[i].mode = 0;
skb = alloc_skb(dev->mtu + HIPPI_HLEN, GFP_ATOMIC);
if (!skb) {
printk(KERN_WARNING "%s: Unable to allocate memory "
"for receive ring - halting NIC\n", dev->name);
ecode = -ENOMEM;
goto error;
}
rrpriv->rx_skbuff[i] = skb;
addr = pci_map_single(rrpriv->pci_dev, skb->data,
dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
/*
* Sanity test to see if we conflict with the DMA
* limitations of the Roadrunner.
*/
if ((((unsigned long)skb->data) & 0xfff) > ~65320)
printk("skb alloc error\n");
set_rraddr(&rrpriv->rx_ring[i].addr, addr);
rrpriv->rx_ring[i].size = dev->mtu + HIPPI_HLEN;
}
rrpriv->rx_ctrl[4].entry_size = sizeof(struct rx_desc);
rrpriv->rx_ctrl[4].entries = RX_RING_ENTRIES;
rrpriv->rx_ctrl[4].mode = 8;
rrpriv->rx_ctrl[4].pi = 0;
wmb();
set_rraddr(&rrpriv->rx_ctrl[4].rngptr, rrpriv->rx_ring_dma);
udelay(1000);
/*
* Now start the FirmWare.
*/
cmd.code = C_START_FW;
cmd.ring = 0;
cmd.index = 0;
rr_issue_cmd(rrpriv, &cmd);
/*
* Give the FirmWare time to chew on the `get running' command.
*/
myjif = jiffies + 5 * HZ;
while (time_before(jiffies, myjif) && !rrpriv->fw_running)
cpu_relax();
netif_start_queue(dev);
return ecode;
error:
/*
* We might have gotten here because we are out of memory,
* make sure we release everything we allocated before failing
*/
for (i = 0; i < RX_RING_ENTRIES; i++) {
struct sk_buff *skb = rrpriv->rx_skbuff[i];
if (skb) {
pci_unmap_single(rrpriv->pci_dev,
rrpriv->rx_ring[i].addr.addrlo,
dev->mtu + HIPPI_HLEN,
PCI_DMA_FROMDEVICE);
rrpriv->rx_ring[i].size = 0;
set_rraddr(&rrpriv->rx_ring[i].addr, 0);
dev_kfree_skb(skb);
rrpriv->rx_skbuff[i] = NULL;
}
}
return ecode;
}
/*
* All events are considered to be slow (RX/TX ints do not generate
* events) and are handled here, outside the main interrupt handler,
* to reduce the size of the handler.
*/
static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx)
{
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
u32 tmp;
rrpriv = netdev_priv(dev);
regs = rrpriv->regs;
while (prodidx != eidx){
switch (rrpriv->evt_ring[eidx].code){
case E_NIC_UP:
tmp = readl(®s->FwRev);
printk(KERN_INFO "%s: Firmware revision %i.%i.%i "
"up and running\n", dev->name,
(tmp >> 16), ((tmp >> 8) & 0xff), (tmp & 0xff));
rrpriv->fw_running = 1;
writel(RX_RING_ENTRIES - 1, ®s->IpRxPi);
wmb();
break;
case E_LINK_ON:
printk(KERN_INFO "%s: Optical link ON\n", dev->name);
break;
case E_LINK_OFF:
printk(KERN_INFO "%s: Optical link OFF\n", dev->name);
break;
case E_RX_IDLE:
printk(KERN_WARNING "%s: RX data not moving\n",
dev->name);
goto drop;
case E_WATCHDOG:
printk(KERN_INFO "%s: The watchdog is here to see "
"us\n", dev->name);
break;
case E_INTERN_ERR:
printk(KERN_ERR "%s: HIPPI Internal NIC error\n",
dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
case E_HOST_ERR:
printk(KERN_ERR "%s: Host software error\n",
dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
/*
* TX events.
*/
case E_CON_REJ:
printk(KERN_WARNING "%s: Connection rejected\n",
dev->name);
dev->stats.tx_aborted_errors++;
break;
case E_CON_TMOUT:
printk(KERN_WARNING "%s: Connection timeout\n",
dev->name);
break;
case E_DISC_ERR:
printk(KERN_WARNING "%s: HIPPI disconnect error\n",
dev->name);
dev->stats.tx_aborted_errors++;
break;
case E_INT_PRTY:
printk(KERN_ERR "%s: HIPPI Internal Parity error\n",
dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
case E_TX_IDLE:
printk(KERN_WARNING "%s: Transmitter idle\n",
dev->name);
break;
case E_TX_LINK_DROP:
printk(KERN_WARNING "%s: Link lost during transmit\n",
dev->name);
dev->stats.tx_aborted_errors++;
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
case E_TX_INV_RNG:
printk(KERN_ERR "%s: Invalid send ring block\n",
dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
case E_TX_INV_BUF:
printk(KERN_ERR "%s: Invalid send buffer address\n",
dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
case E_TX_INV_DSC:
printk(KERN_ERR "%s: Invalid descriptor address\n",
dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
/*
* RX events.
*/
case E_RX_RNG_OUT:
printk(KERN_INFO "%s: Receive ring full\n", dev->name);
break;
case E_RX_PAR_ERR:
printk(KERN_WARNING "%s: Receive parity error\n",
dev->name);
goto drop;
case E_RX_LLRC_ERR:
printk(KERN_WARNING "%s: Receive LLRC error\n",
dev->name);
goto drop;
case E_PKT_LN_ERR:
printk(KERN_WARNING "%s: Receive packet length "
"error\n", dev->name);
goto drop;
case E_DTA_CKSM_ERR:
printk(KERN_WARNING "%s: Data checksum error\n",
dev->name);
goto drop;
case E_SHT_BST:
printk(KERN_WARNING "%s: Unexpected short burst "
"error\n", dev->name);
goto drop;
case E_STATE_ERR:
printk(KERN_WARNING "%s: Recv. state transition"
" error\n", dev->name);
goto drop;
case E_UNEXP_DATA:
printk(KERN_WARNING "%s: Unexpected data error\n",
dev->name);
goto drop;
case E_LST_LNK_ERR:
printk(KERN_WARNING "%s: Link lost error\n",
dev->name);
goto drop;
case E_FRM_ERR:
printk(KERN_WARNING "%s: Framming Error\n",
dev->name);
goto drop;
case E_FLG_SYN_ERR:
printk(KERN_WARNING "%s: Flag sync. lost during "
"packet\n", dev->name);
goto drop;
case E_RX_INV_BUF:
printk(KERN_ERR "%s: Invalid receive buffer "
"address\n", dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
case E_RX_INV_DSC:
printk(KERN_ERR "%s: Invalid receive descriptor "
"address\n", dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
case E_RNG_BLK:
printk(KERN_ERR "%s: Invalid ring block\n",
dev->name);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
wmb();
break;
drop:
/* Label packet to be dropped.
* Actual dropping occurs in rx
* handling.
*
* The index of packet we get to drop is
* the index of the packet following
* the bad packet. -kbf
*/
{
u16 index = rrpriv->evt_ring[eidx].index;
index = (index + (RX_RING_ENTRIES - 1)) %
RX_RING_ENTRIES;
rrpriv->rx_ring[index].mode |=
(PACKET_BAD | PACKET_END);
}
break;
default:
printk(KERN_WARNING "%s: Unhandled event 0x%02x\n",
dev->name, rrpriv->evt_ring[eidx].code);
}
eidx = (eidx + 1) % EVT_RING_ENTRIES;
}
rrpriv->info->evt_ctrl.pi = eidx;
wmb();
return eidx;
}
static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
{
struct rr_private *rrpriv = netdev_priv(dev);
struct rr_regs __iomem *regs = rrpriv->regs;
do {
struct rx_desc *desc;
u32 pkt_len;
desc = &(rrpriv->rx_ring[index]);
pkt_len = desc->size;
#if (DEBUG > 2)
printk("index %i, rxlimit %i\n", index, rxlimit);
printk("len %x, mode %x\n", pkt_len, desc->mode);
#endif
if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){
dev->stats.rx_dropped++;
goto defer;
}
if (pkt_len > 0){
struct sk_buff *skb, *rx_skb;
rx_skb = rrpriv->rx_skbuff[index];
if (pkt_len < PKT_COPY_THRESHOLD) {
skb = alloc_skb(pkt_len, GFP_ATOMIC);
if (skb == NULL){
printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len);
dev->stats.rx_dropped++;
goto defer;
} else {
pci_dma_sync_single_for_cpu(rrpriv->pci_dev,
desc->addr.addrlo,
pkt_len,
PCI_DMA_FROMDEVICE);
memcpy(skb_put(skb, pkt_len),
rx_skb->data, pkt_len);
pci_dma_sync_single_for_device(rrpriv->pci_dev,
desc->addr.addrlo,
pkt_len,
PCI_DMA_FROMDEVICE);
}
}else{
struct sk_buff *newskb;
newskb = alloc_skb(dev->mtu + HIPPI_HLEN,
GFP_ATOMIC);
if (newskb){
dma_addr_t addr;
pci_unmap_single(rrpriv->pci_dev,
desc->addr.addrlo, dev->mtu +
HIPPI_HLEN, PCI_DMA_FROMDEVICE);
skb = rx_skb;
skb_put(skb, pkt_len);
rrpriv->rx_skbuff[index] = newskb;
addr = pci_map_single(rrpriv->pci_dev,
newskb->data,
dev->mtu + HIPPI_HLEN,
PCI_DMA_FROMDEVICE);
set_rraddr(&desc->addr, addr);
} else {
printk("%s: Out of memory, deferring "
"packet\n", dev->name);
dev->stats.rx_dropped++;
goto defer;
}
}
skb->protocol = hippi_type_trans(skb, dev);
netif_rx(skb); /* send it up */
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
}
defer:
desc->mode = 0;
desc->size = dev->mtu + HIPPI_HLEN;
if ((index & 7) == 7)
writel(index, ®s->IpRxPi);
index = (index + 1) % RX_RING_ENTRIES;
} while(index != rxlimit);
rrpriv->cur_rx = index;
wmb();
}
static irqreturn_t rr_interrupt(int irq, void *dev_id)
{
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
struct net_device *dev = (struct net_device *)dev_id;
u32 prodidx, rxindex, eidx, txcsmr, rxlimit, txcon;
rrpriv = netdev_priv(dev);
regs = rrpriv->regs;
if (!(readl(®s->HostCtrl) & RR_INT))
return IRQ_NONE;
spin_lock(&rrpriv->lock);
prodidx = readl(®s->EvtPrd);
txcsmr = (prodidx >> 8) & 0xff;
rxlimit = (prodidx >> 16) & 0xff;
prodidx &= 0xff;
#if (DEBUG > 2)
printk("%s: interrupt, prodidx = %i, eidx = %i\n", dev->name,
prodidx, rrpriv->info->evt_ctrl.pi);
#endif
/*
* Order here is important. We must handle events
* before doing anything else in order to catch
* such things as LLRC errors, etc -kbf
*/
eidx = rrpriv->info->evt_ctrl.pi;
if (prodidx != eidx)
eidx = rr_handle_event(dev, prodidx, eidx);
rxindex = rrpriv->cur_rx;
if (rxindex != rxlimit)
rx_int(dev, rxlimit, rxindex);
txcon = rrpriv->dirty_tx;
if (txcsmr != txcon) {
do {
/* Due to occational firmware TX producer/consumer out
* of sync. error need to check entry in ring -kbf
*/
if(rrpriv->tx_skbuff[txcon]){
struct tx_desc *desc;
struct sk_buff *skb;
desc = &(rrpriv->tx_ring[txcon]);
skb = rrpriv->tx_skbuff[txcon];
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
pci_unmap_single(rrpriv->pci_dev,
desc->addr.addrlo, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
rrpriv->tx_skbuff[txcon] = NULL;
desc->size = 0;
set_rraddr(&rrpriv->tx_ring[txcon].addr, 0);
desc->mode = 0;
}
txcon = (txcon + 1) % TX_RING_ENTRIES;
} while (txcsmr != txcon);
wmb();
rrpriv->dirty_tx = txcon;
if (rrpriv->tx_full && rr_if_busy(dev) &&
(((rrpriv->info->tx_ctrl.pi + 1) % TX_RING_ENTRIES)
!= rrpriv->dirty_tx)){
rrpriv->tx_full = 0;
netif_wake_queue(dev);
}
}
eidx |= ((txcsmr << 8) | (rxlimit << 16));
writel(eidx, ®s->EvtCon);
wmb();
spin_unlock(&rrpriv->lock);
return IRQ_HANDLED;
}
static inline void rr_raz_tx(struct rr_private *rrpriv,
struct net_device *dev)
{
int i;
for (i = 0; i < TX_RING_ENTRIES; i++) {
struct sk_buff *skb = rrpriv->tx_skbuff[i];
if (skb) {
struct tx_desc *desc = &(rrpriv->tx_ring[i]);
pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
skb->len, PCI_DMA_TODEVICE);
desc->size = 0;
set_rraddr(&desc->addr, 0);
dev_kfree_skb(skb);
rrpriv->tx_skbuff[i] = NULL;
}
}
}
static inline void rr_raz_rx(struct rr_private *rrpriv,
struct net_device *dev)
{
int i;
for (i = 0; i < RX_RING_ENTRIES; i++) {
struct sk_buff *skb = rrpriv->rx_skbuff[i];
if (skb) {
struct rx_desc *desc = &(rrpriv->rx_ring[i]);
pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
desc->size = 0;
set_rraddr(&desc->addr, 0);
dev_kfree_skb(skb);
rrpriv->rx_skbuff[i] = NULL;
}
}
}
static void rr_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct rr_private *rrpriv = netdev_priv(dev);
struct rr_regs __iomem *regs = rrpriv->regs;
unsigned long flags;
if (readl(®s->HostCtrl) & NIC_HALTED){
printk("%s: Restarting nic\n", dev->name);
memset(rrpriv->rx_ctrl, 0, 256 * sizeof(struct ring_ctrl));
memset(rrpriv->info, 0, sizeof(struct rr_info));
wmb();
rr_raz_tx(rrpriv, dev);
rr_raz_rx(rrpriv, dev);
if (rr_init1(dev)) {
spin_lock_irqsave(&rrpriv->lock, flags);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT,
®s->HostCtrl);
spin_unlock_irqrestore(&rrpriv->lock, flags);
}
}
rrpriv->timer.expires = RUN_AT(5*HZ);
add_timer(&rrpriv->timer);
}
static int rr_open(struct net_device *dev)
{
struct rr_private *rrpriv = netdev_priv(dev);
struct pci_dev *pdev = rrpriv->pci_dev;
struct rr_regs __iomem *regs;
int ecode = 0;
unsigned long flags;
dma_addr_t dma_addr;
regs = rrpriv->regs;
if (rrpriv->fw_rev < 0x00020000) {
printk(KERN_WARNING "%s: trying to configure device with "
"obsolete firmware\n", dev->name);
ecode = -EBUSY;
goto error;
}
rrpriv->rx_ctrl = pci_alloc_consistent(pdev,
256 * sizeof(struct ring_ctrl),
&dma_addr);
if (!rrpriv->rx_ctrl) {
ecode = -ENOMEM;
goto error;
}
rrpriv->rx_ctrl_dma = dma_addr;
memset(rrpriv->rx_ctrl, 0, 256*sizeof(struct ring_ctrl));
rrpriv->info = pci_alloc_consistent(pdev, sizeof(struct rr_info),
&dma_addr);
if (!rrpriv->info) {
ecode = -ENOMEM;
goto error;
}
rrpriv->info_dma = dma_addr;
memset(rrpriv->info, 0, sizeof(struct rr_info));
wmb();
spin_lock_irqsave(&rrpriv->lock, flags);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, ®s->HostCtrl);
readl(®s->HostCtrl);
spin_unlock_irqrestore(&rrpriv->lock, flags);
if (request_irq(dev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) {
printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
dev->name, dev->irq);
ecode = -EAGAIN;
goto error;
}
if ((ecode = rr_init1(dev)))
goto error;
/* Set the timer to switch to check for link beat and perhaps switch
to an alternate media type. */
init_timer(&rrpriv->timer);
rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */
rrpriv->timer.data = (unsigned long)dev;
rrpriv->timer.function = rr_timer; /* timer handler */
add_timer(&rrpriv->timer);
netif_start_queue(dev);
return ecode;
error:
spin_lock_irqsave(&rrpriv->lock, flags);
writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, ®s->HostCtrl);
spin_unlock_irqrestore(&rrpriv->lock, flags);
if (rrpriv->info) {
pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
rrpriv->info_dma);
rrpriv->info = NULL;
}
if (rrpriv->rx_ctrl) {
pci_free_consistent(pdev, sizeof(struct ring_ctrl),
rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
rrpriv->rx_ctrl = NULL;
}
netif_stop_queue(dev);
return ecode;
}
static void rr_dump(struct net_device *dev)
{
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
u32 index, cons;
short i;
int len;
rrpriv = netdev_priv(dev);
regs = rrpriv->regs;
printk("%s: dumping NIC TX rings\n", dev->name);
printk("RxPrd %08x, TxPrd %02x, EvtPrd %08x, TxPi %02x, TxCtrlPi %02x\n",
readl(®s->RxPrd), readl(®s->TxPrd),
readl(®s->EvtPrd), readl(®s->TxPi),
rrpriv->info->tx_ctrl.pi);
printk("Error code 0x%x\n", readl(®s->Fail1));
index = (((readl(®s->EvtPrd) >> 8) & 0xff) - 1) % TX_RING_ENTRIES;
cons = rrpriv->dirty_tx;
printk("TX ring index %i, TX consumer %i\n",
index, cons);
if (rrpriv->tx_skbuff[index]){
len = min_t(int, 0x80, rrpriv->tx_skbuff[index]->len);
printk("skbuff for index %i is valid - dumping data (0x%x bytes - DMA len 0x%x)\n", index, len, rrpriv->tx_ring[index].size);
for (i = 0; i < len; i++){
if (!(i & 7))
printk("\n");
printk("%02x ", (unsigned char) rrpriv->tx_skbuff[index]->data[i]);
}
printk("\n");
}
if (rrpriv->tx_skbuff[cons]){
len = min_t(int, 0x80, rrpriv->tx_skbuff[cons]->len);
printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons, len, rrpriv->tx_skbuff[cons]->len);
printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %08lx, truesize 0x%x\n",
rrpriv->tx_ring[cons].mode,
rrpriv->tx_ring[cons].size,
(unsigned long long) rrpriv->tx_ring[cons].addr.addrlo,
(unsigned long)rrpriv->tx_skbuff[cons]->data,
(unsigned int)rrpriv->tx_skbuff[cons]->truesize);
for (i = 0; i < len; i++){
if (!(i & 7))
printk("\n");
printk("%02x ", (unsigned char)rrpriv->tx_ring[cons].size);
}
printk("\n");
}
printk("dumping TX ring info:\n");
for (i = 0; i < TX_RING_ENTRIES; i++)
printk("mode 0x%x, size 0x%x, phys-addr %08Lx\n",
rrpriv->tx_ring[i].mode,
rrpriv->tx_ring[i].size,
(unsigned long long) rrpriv->tx_ring[i].addr.addrlo);
}
static int rr_close(struct net_device *dev)
{
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
unsigned long flags;
u32 tmp;
short i;
netif_stop_queue(dev);
rrpriv = netdev_priv(dev);
regs = rrpriv->regs;
/*
* Lock to make sure we are not cleaning up while another CPU
* is handling interrupts.
*/
spin_lock_irqsave(&rrpriv->lock, flags);
tmp = readl(®s->HostCtrl);
if (tmp & NIC_HALTED){
printk("%s: NIC already halted\n", dev->name);
rr_dump(dev);
}else{
tmp |= HALT_NIC | RR_CLEAR_INT;
writel(tmp, ®s->HostCtrl);
readl(®s->HostCtrl);
}
rrpriv->fw_running = 0;
del_timer_sync(&rrpriv->timer);
writel(0, ®s->TxPi);
writel(0, ®s->IpRxPi);
writel(0, ®s->EvtCon);
writel(0, ®s->EvtPrd);
for (i = 0; i < CMD_RING_ENTRIES; i++)
writel(0, ®s->CmdRing[i]);
rrpriv->info->tx_ctrl.entries = 0;
rrpriv->info->cmd_ctrl.pi = 0;
rrpriv->info->evt_ctrl.pi = 0;
rrpriv->rx_ctrl[4].entries = 0;
rr_raz_tx(rrpriv, dev);
rr_raz_rx(rrpriv, dev);
pci_free_consistent(rrpriv->pci_dev, 256 * sizeof(struct ring_ctrl),
rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
rrpriv->rx_ctrl = NULL;
pci_free_consistent(rrpriv->pci_dev, sizeof(struct rr_info),
rrpriv->info, rrpriv->info_dma);
rrpriv->info = NULL;
free_irq(dev->irq, dev);
spin_unlock_irqrestore(&rrpriv->lock, flags);
return 0;
}
static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct rr_private *rrpriv = netdev_priv(dev);
struct rr_regs __iomem *regs = rrpriv->regs;
struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
struct ring_ctrl *txctrl;
unsigned long flags;
u32 index, len = skb->len;
u32 *ifield;
struct sk_buff *new_skb;
if (readl(®s->Mode) & FATAL_ERR)
printk("error codes Fail1 %02x, Fail2 %02x\n",
readl(®s->Fail1), readl(®s->Fail2));
/*
* We probably need to deal with tbusy here to prevent overruns.
*/
if (skb_headroom(skb) < 8){
printk("incoming skb too small - reallocating\n");
if (!(new_skb = dev_alloc_skb(len + 8))) {
dev_kfree_skb(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
skb_reserve(new_skb, 8);
skb_put(new_skb, len);
skb_copy_from_linear_data(skb, new_skb->data, len);
dev_kfree_skb(skb);
skb = new_skb;
}
ifield = (u32 *)skb_push(skb, 8);
ifield[0] = 0;
ifield[1] = hcb->ifield;
/*
* We don't need the lock before we are actually going to start
* fiddling with the control blocks.
*/
spin_lock_irqsave(&rrpriv->lock, flags);
txctrl = &rrpriv->info->tx_ctrl;
index = txctrl->pi;
rrpriv->tx_skbuff[index] = skb;
set_rraddr(&rrpriv->tx_ring[index].addr, pci_map_single(
rrpriv->pci_dev, skb->data, len + 8, PCI_DMA_TODEVICE));
rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */
rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END;
txctrl->pi = (index + 1) % TX_RING_ENTRIES;
wmb();
writel(txctrl->pi, ®s->TxPi);
if (txctrl->pi == rrpriv->dirty_tx){
rrpriv->tx_full = 1;
netif_stop_queue(dev);
}
spin_unlock_irqrestore(&rrpriv->lock, flags);
return NETDEV_TX_OK;
}
/*
* Read the firmware out of the EEPROM and put it into the SRAM
* (or from user space - later)
*
* This operation requires the NIC to be halted and is performed with
* interrupts disabled and with the spinlock hold.
*/
static int rr_load_firmware(struct net_device *dev)
{
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
size_t eptr, segptr;
int i, j;
u32 localctrl, sptr, len, tmp;
u32 p2len, p2size, nr_seg, revision, io, sram_size;
rrpriv = netdev_priv(dev);
regs = rrpriv->regs;
if (dev->flags & IFF_UP)
return -EBUSY;
if (!(readl(®s->HostCtrl) & NIC_HALTED)){
printk("%s: Trying to load firmware to a running NIC.\n",
dev->name);
return -EBUSY;
}
localctrl = readl(®s->LocalCtrl);
writel(0, ®s->LocalCtrl);
writel(0, ®s->EvtPrd);
writel(0, ®s->RxPrd);
writel(0, ®s->TxPrd);
/*
* First wipe the entire SRAM, otherwise we might run into all
* kinds of trouble ... sigh, this took almost all afternoon
* to track down ;-(
*/
io = readl(®s->ExtIo);
writel(0, ®s->ExtIo);
sram_size = rr_read_eeprom_word(rrpriv, 8);
for (i = 200; i < sram_size / 4; i++){
writel(i * 4, ®s->WinBase);
mb();
writel(0, ®s->WinData);
mb();
}
writel(io, ®s->ExtIo);
mb();
eptr = rr_read_eeprom_word(rrpriv,
offsetof(struct eeprom, rncd_info.AddrRunCodeSegs));
eptr = ((eptr & 0x1fffff) >> 3);
p2len = rr_read_eeprom_word(rrpriv, 0x83*4);
p2len = (p2len << 2);
p2size = rr_read_eeprom_word(rrpriv, 0x84*4);
p2size = ((p2size & 0x1fffff) >> 3);
if ((eptr < p2size) || (eptr > (p2size + p2len))){
printk("%s: eptr is invalid\n", dev->name);
goto out;
}
revision = rr_read_eeprom_word(rrpriv,
offsetof(struct eeprom, manf.HeaderFmt));
if (revision != 1){
printk("%s: invalid firmware format (%i)\n",
dev->name, revision);
goto out;
}
nr_seg = rr_read_eeprom_word(rrpriv, eptr);
eptr +=4;
#if (DEBUG > 1)
printk("%s: nr_seg %i\n", dev->name, nr_seg);
#endif
for (i = 0; i < nr_seg; i++){
sptr = rr_read_eeprom_word(rrpriv, eptr);
eptr += 4;
len = rr_read_eeprom_word(rrpriv, eptr);
eptr += 4;
segptr = rr_read_eeprom_word(rrpriv, eptr);
segptr = ((segptr & 0x1fffff) >> 3);
eptr += 4;
#if (DEBUG > 1)
printk("%s: segment %i, sram address %06x, length %04x, segptr %06x\n",
dev->name, i, sptr, len, segptr);
#endif
for (j = 0; j < len; j++){
tmp = rr_read_eeprom_word(rrpriv, segptr);
writel(sptr, ®s->WinBase);
mb();
writel(tmp, ®s->WinData);
mb();
segptr += 4;
sptr += 4;
}
}
out:
writel(localctrl, ®s->LocalCtrl);
mb();
return 0;
}
static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct rr_private *rrpriv;
unsigned char *image, *oldimage;
unsigned long flags;
unsigned int i;
int error = -EOPNOTSUPP;
rrpriv = netdev_priv(dev);
switch(cmd){
case SIOCRRGFW:
if (!capable(CAP_SYS_RAWIO)){
return -EPERM;
}
image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
if (!image)
return -ENOMEM;
if (rrpriv->fw_running){
printk("%s: Firmware already running\n", dev->name);
error = -EPERM;
goto gf_out;
}
spin_lock_irqsave(&rrpriv->lock, flags);
i = rr_read_eeprom(rrpriv, 0, image, EEPROM_BYTES);
spin_unlock_irqrestore(&rrpriv->lock, flags);
if (i != EEPROM_BYTES){
printk(KERN_ERR "%s: Error reading EEPROM\n",
dev->name);
error = -EFAULT;
goto gf_out;
}
error = copy_to_user(rq->ifr_data, image, EEPROM_BYTES);
if (error)
error = -EFAULT;
gf_out:
kfree(image);
return error;
case SIOCRRPFW:
if (!capable(CAP_SYS_RAWIO)){
return -EPERM;
}
image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
oldimage = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL);
if (!image || !oldimage) {
error = -ENOMEM;
goto wf_out;
}
error = copy_from_user(image, rq->ifr_data, EEPROM_BYTES);
if (error) {
error = -EFAULT;
goto wf_out;
}
if (rrpriv->fw_running){
printk("%s: Firmware already running\n", dev->name);
error = -EPERM;
goto wf_out;
}
printk("%s: Updating EEPROM firmware\n", dev->name);
spin_lock_irqsave(&rrpriv->lock, flags);
error = write_eeprom(rrpriv, 0, image, EEPROM_BYTES);
if (error)
printk(KERN_ERR "%s: Error writing EEPROM\n",
dev->name);
i = rr_read_eeprom(rrpriv, 0, oldimage, EEPROM_BYTES);
spin_unlock_irqrestore(&rrpriv->lock, flags);
if (i != EEPROM_BYTES)
printk(KERN_ERR "%s: Error reading back EEPROM "
"image\n", dev->name);
error = memcmp(image, oldimage, EEPROM_BYTES);
if (error){
printk(KERN_ERR "%s: Error verifying EEPROM image\n",
dev->name);
error = -EFAULT;
}
wf_out:
kfree(oldimage);
kfree(image);
return error;
case SIOCRRID:
return put_user(0x52523032, (int __user *)rq->ifr_data);
default:
return error;
}
}
static DEFINE_PCI_DEVICE_TABLE(rr_pci_tbl) = {
{ PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, rr_pci_tbl);
static struct pci_driver rr_driver = {
.name = "rrunner",
.id_table = rr_pci_tbl,
.probe = rr_init_one,
.remove = __devexit_p(rr_remove_one),
};
static int __init rr_init_module(void)
{
return pci_register_driver(&rr_driver);
}
static void __exit rr_cleanup_module(void)
{
pci_unregister_driver(&rr_driver);
}
module_init(rr_init_module);
module_exit(rr_cleanup_module);
| gpl-2.0 |
rachitrawat/taoshan | drivers/mtd/nand/r852.c | 4799 | 26049 | /*
* Copyright © 2009 - Maxim Levitsky
* driver for Ricoh xD readers
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <linux/sched.h>
#include "sm_common.h"
#include "r852.h"
static bool r852_enable_dma = 1;
module_param(r852_enable_dma, bool, S_IRUGO);
MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
static int debug;
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
/* read register */
static inline uint8_t r852_read_reg(struct r852_device *dev, int address)
{
uint8_t reg = readb(dev->mmio + address);
return reg;
}
/* write register */
static inline void r852_write_reg(struct r852_device *dev,
int address, uint8_t value)
{
writeb(value, dev->mmio + address);
mmiowb();
}
/* read dword sized register */
static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address)
{
uint32_t reg = le32_to_cpu(readl(dev->mmio + address));
return reg;
}
/* write dword sized register */
static inline void r852_write_reg_dword(struct r852_device *dev,
int address, uint32_t value)
{
writel(cpu_to_le32(value), dev->mmio + address);
mmiowb();
}
/* returns pointer to our private structure */
static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
return chip->priv;
}
/* check if controller supports dma */
static void r852_dma_test(struct r852_device *dev)
{
dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) &
(R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2);
if (!dev->dma_usable)
message("Non dma capable device detected, dma disabled");
if (!r852_enable_dma) {
message("disabling dma on user request");
dev->dma_usable = 0;
}
}
/*
* Enable dma. Enables ether first or second stage of the DMA,
* Expects dev->dma_dir and dev->dma_state be set
*/
static void r852_dma_enable(struct r852_device *dev)
{
uint8_t dma_reg, dma_irq_reg;
/* Set up dma settings */
dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS);
dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY);
if (dev->dma_dir)
dma_reg |= R852_DMA_READ;
if (dev->dma_state == DMA_INTERNAL) {
dma_reg |= R852_DMA_INTERNAL;
/* Precaution to make sure HW doesn't write */
/* to random kernel memory */
r852_write_reg_dword(dev, R852_DMA_ADDR,
cpu_to_le32(dev->phys_bounce_buffer));
} else {
dma_reg |= R852_DMA_MEMORY;
r852_write_reg_dword(dev, R852_DMA_ADDR,
cpu_to_le32(dev->phys_dma_addr));
}
/* Precaution: make sure write reached the device */
r852_read_reg_dword(dev, R852_DMA_ADDR);
r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg);
/* Set dma irq */
dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
dma_irq_reg |
R852_DMA_IRQ_INTERNAL |
R852_DMA_IRQ_ERROR |
R852_DMA_IRQ_MEMORY);
}
/*
* Disable dma, called from the interrupt handler, which specifies
* success of the operation via 'error' argument
*/
static void r852_dma_done(struct r852_device *dev, int error)
{
WARN_ON(dev->dma_stage == 0);
r852_write_reg_dword(dev, R852_DMA_IRQ_STA,
r852_read_reg_dword(dev, R852_DMA_IRQ_STA));
r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0);
r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0);
/* Precaution to make sure HW doesn't write to random kernel memory */
r852_write_reg_dword(dev, R852_DMA_ADDR,
cpu_to_le32(dev->phys_bounce_buffer));
r852_read_reg_dword(dev, R852_DMA_ADDR);
dev->dma_error = error;
dev->dma_stage = 0;
if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN,
dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
}
/*
* Wait, till dma is done, which includes both phases of it
*/
static int r852_dma_wait(struct r852_device *dev)
{
long timeout = wait_for_completion_timeout(&dev->dma_done,
msecs_to_jiffies(1000));
if (!timeout) {
dbg("timeout waiting for DMA interrupt");
return -ETIMEDOUT;
}
return 0;
}
/*
* Read/Write one page using dma. Only pages can be read (512 bytes)
*/
static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
{
int bounce = 0;
unsigned long flags;
int error;
dev->dma_error = 0;
/* Set dma direction */
dev->dma_dir = do_read;
dev->dma_stage = 1;
INIT_COMPLETION(dev->dma_done);
dbg_verbose("doing dma %s ", do_read ? "read" : "write");
/* Set initial dma state: for reading first fill on board buffer,
from device, for writes first fill the buffer from memory*/
dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY;
/* if incoming buffer is not page aligned, we should do bounce */
if ((unsigned long)buf & (R852_DMA_LEN-1))
bounce = 1;
if (!bounce) {
dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf,
R852_DMA_LEN,
(do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr))
bounce = 1;
}
if (bounce) {
dbg_verbose("dma: using bounce buffer");
dev->phys_dma_addr = dev->phys_bounce_buffer;
if (!do_read)
memcpy(dev->bounce_buffer, buf, R852_DMA_LEN);
}
/* Enable DMA */
spin_lock_irqsave(&dev->irqlock, flags);
r852_dma_enable(dev);
spin_unlock_irqrestore(&dev->irqlock, flags);
/* Wait till complete */
error = r852_dma_wait(dev);
if (error) {
r852_dma_done(dev, error);
return;
}
if (do_read && bounce)
memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN);
}
/*
* Program data lines of the nand chip to send data to it
*/
void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
struct r852_device *dev = r852_get_dev(mtd);
uint32_t reg;
/* Don't allow any access to hardware if we suspect card removal */
if (dev->card_unstable)
return;
/* Special case for whole sector read */
if (len == R852_DMA_LEN && dev->dma_usable) {
r852_do_dma(dev, (uint8_t *)buf, 0);
return;
}
/* write DWORD chinks - faster */
while (len) {
reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
r852_write_reg_dword(dev, R852_DATALINE, reg);
buf += 4;
len -= 4;
}
/* write rest */
while (len)
r852_write_reg(dev, R852_DATALINE, *buf++);
}
/*
* Read data lines of the nand chip to retrieve data
*/
void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct r852_device *dev = r852_get_dev(mtd);
uint32_t reg;
if (dev->card_unstable) {
/* since we can't signal error here, at least, return
predictable buffer */
memset(buf, 0, len);
return;
}
/* special case for whole sector read */
if (len == R852_DMA_LEN && dev->dma_usable) {
r852_do_dma(dev, buf, 1);
return;
}
/* read in dword sized chunks */
while (len >= 4) {
reg = r852_read_reg_dword(dev, R852_DATALINE);
*buf++ = reg & 0xFF;
*buf++ = (reg >> 8) & 0xFF;
*buf++ = (reg >> 16) & 0xFF;
*buf++ = (reg >> 24) & 0xFF;
len -= 4;
}
/* read the reset by bytes */
while (len--)
*buf++ = r852_read_reg(dev, R852_DATALINE);
}
/*
* Read one byte from nand chip
*/
static uint8_t r852_read_byte(struct mtd_info *mtd)
{
struct r852_device *dev = r852_get_dev(mtd);
/* Same problem as in r852_read_buf.... */
if (dev->card_unstable)
return 0;
return r852_read_reg(dev, R852_DATALINE);
}
/*
* Readback the buffer to verify it
*/
int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
struct r852_device *dev = r852_get_dev(mtd);
/* We can't be sure about anything here... */
if (dev->card_unstable)
return -1;
/* This will never happen, unless you wired up a nand chip
with > 512 bytes page size to the reader */
if (len > SM_SECTOR_SIZE)
return 0;
r852_read_buf(mtd, dev->tmp_buffer, len);
return memcmp(buf, dev->tmp_buffer, len);
}
/*
* Control several chip lines & send commands
*/
void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
{
struct r852_device *dev = r852_get_dev(mtd);
if (dev->card_unstable)
return;
if (ctrl & NAND_CTRL_CHANGE) {
dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND |
R852_CTL_ON | R852_CTL_CARDENABLE);
if (ctrl & NAND_ALE)
dev->ctlreg |= R852_CTL_DATA;
if (ctrl & NAND_CLE)
dev->ctlreg |= R852_CTL_COMMAND;
if (ctrl & NAND_NCE)
dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON);
else
dev->ctlreg &= ~R852_CTL_WRITE;
/* when write is stareted, enable write access */
if (dat == NAND_CMD_ERASE1)
dev->ctlreg |= R852_CTL_WRITE;
r852_write_reg(dev, R852_CTL, dev->ctlreg);
}
/* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need
to set write mode */
if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) {
dev->ctlreg |= R852_CTL_WRITE;
r852_write_reg(dev, R852_CTL, dev->ctlreg);
}
if (dat != NAND_CMD_NONE)
r852_write_reg(dev, R852_DATALINE, dat);
}
/*
* Wait till card is ready.
* based on nand_wait, but returns errors on DMA error
*/
int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
struct r852_device *dev = chip->priv;
unsigned long timeout;
int status;
timeout = jiffies + (chip->state == FL_ERASING ?
msecs_to_jiffies(400) : msecs_to_jiffies(20));
while (time_before(jiffies, timeout))
if (chip->dev_ready(mtd))
break;
chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
status = (int)chip->read_byte(mtd);
/* Unfortunelly, no way to send detailed error status... */
if (dev->dma_error) {
status |= NAND_STATUS_FAIL;
dev->dma_error = 0;
}
return status;
}
/*
* Check if card is ready
*/
int r852_ready(struct mtd_info *mtd)
{
struct r852_device *dev = r852_get_dev(mtd);
return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
}
/*
* Set ECC engine mode
*/
void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
{
struct r852_device *dev = r852_get_dev(mtd);
if (dev->card_unstable)
return;
switch (mode) {
case NAND_ECC_READ:
case NAND_ECC_WRITE:
/* enable ecc generation/check*/
dev->ctlreg |= R852_CTL_ECC_ENABLE;
/* flush ecc buffer */
r852_write_reg(dev, R852_CTL,
dev->ctlreg | R852_CTL_ECC_ACCESS);
r852_read_reg_dword(dev, R852_DATALINE);
r852_write_reg(dev, R852_CTL, dev->ctlreg);
return;
case NAND_ECC_READSYN:
/* disable ecc generation */
dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
r852_write_reg(dev, R852_CTL, dev->ctlreg);
}
}
/*
* Calculate ECC, only used for writes
*/
int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
uint8_t *ecc_code)
{
struct r852_device *dev = r852_get_dev(mtd);
struct sm_oob *oob = (struct sm_oob *)ecc_code;
uint32_t ecc1, ecc2;
if (dev->card_unstable)
return 0;
dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
ecc1 = r852_read_reg_dword(dev, R852_DATALINE);
ecc2 = r852_read_reg_dword(dev, R852_DATALINE);
oob->ecc1[0] = (ecc1) & 0xFF;
oob->ecc1[1] = (ecc1 >> 8) & 0xFF;
oob->ecc1[2] = (ecc1 >> 16) & 0xFF;
oob->ecc2[0] = (ecc2) & 0xFF;
oob->ecc2[1] = (ecc2 >> 8) & 0xFF;
oob->ecc2[2] = (ecc2 >> 16) & 0xFF;
r852_write_reg(dev, R852_CTL, dev->ctlreg);
return 0;
}
/*
* Correct the data using ECC, hw did almost everything for us
*/
int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
uint8_t *read_ecc, uint8_t *calc_ecc)
{
uint16_t ecc_reg;
uint8_t ecc_status, err_byte;
int i, error = 0;
struct r852_device *dev = r852_get_dev(mtd);
if (dev->card_unstable)
return 0;
if (dev->dma_error) {
dev->dma_error = 0;
return -1;
}
r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
r852_write_reg(dev, R852_CTL, dev->ctlreg);
for (i = 0 ; i <= 1 ; i++) {
ecc_status = (ecc_reg >> 8) & 0xFF;
/* ecc uncorrectable error */
if (ecc_status & R852_ECC_FAIL) {
dbg("ecc: unrecoverable error, in half %d", i);
error = -1;
goto exit;
}
/* correctable error */
if (ecc_status & R852_ECC_CORRECTABLE) {
err_byte = ecc_reg & 0xFF;
dbg("ecc: recoverable error, "
"in half %d, byte %d, bit %d", i,
err_byte, ecc_status & R852_ECC_ERR_BIT_MSK);
dat[err_byte] ^=
1 << (ecc_status & R852_ECC_ERR_BIT_MSK);
error++;
}
dat += 256;
ecc_reg >>= 16;
}
exit:
return error;
}
/*
* This is copy of nand_read_oob_std
* nand_read_oob_syndrome assumes we can send column address - we can't
*/
static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page, int sndcmd)
{
if (sndcmd) {
chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
sndcmd = 0;
}
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
return sndcmd;
}
/*
* Start the nand engine
*/
void r852_engine_enable(struct r852_device *dev)
{
if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
} else {
r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
}
msleep(300);
r852_write_reg(dev, R852_CTL, 0);
}
/*
* Stop the nand engine
*/
void r852_engine_disable(struct r852_device *dev)
{
r852_write_reg_dword(dev, R852_HW, 0);
r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
}
/*
* Test if card is present
*/
void r852_card_update_present(struct r852_device *dev)
{
unsigned long flags;
uint8_t reg;
spin_lock_irqsave(&dev->irqlock, flags);
reg = r852_read_reg(dev, R852_CARD_STA);
dev->card_detected = !!(reg & R852_CARD_STA_PRESENT);
spin_unlock_irqrestore(&dev->irqlock, flags);
}
/*
* Update card detection IRQ state according to current card state
* which is read in r852_card_update_present
*/
void r852_update_card_detect(struct r852_device *dev)
{
int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
dev->card_unstable = 0;
card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT);
card_detect_reg |= R852_CARD_IRQ_GENABLE;
card_detect_reg |= dev->card_detected ?
R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT;
r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
}
ssize_t r852_media_type_show(struct device *sys_dev,
struct device_attribute *attr, char *buf)
{
struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
struct r852_device *dev = r852_get_dev(mtd);
char *data = dev->sm ? "smartmedia" : "xd";
strcpy(buf, data);
return strlen(data);
}
DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
/* Detect properties of card in slot */
void r852_update_media_status(struct r852_device *dev)
{
uint8_t reg;
unsigned long flags;
int readonly;
spin_lock_irqsave(&dev->irqlock, flags);
if (!dev->card_detected) {
message("card removed");
spin_unlock_irqrestore(&dev->irqlock, flags);
return ;
}
readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO;
reg = r852_read_reg(dev, R852_DMA_CAP);
dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT);
message("detected %s %s card in slot",
dev->sm ? "SmartMedia" : "xD",
readonly ? "readonly" : "writeable");
dev->readonly = readonly;
spin_unlock_irqrestore(&dev->irqlock, flags);
}
/*
* Register the nand device
* Called when the card is detected
*/
int r852_register_nand_device(struct r852_device *dev)
{
dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
if (!dev->mtd)
goto error1;
WARN_ON(dev->card_registred);
dev->mtd->owner = THIS_MODULE;
dev->mtd->priv = dev->chip;
dev->mtd->dev.parent = &dev->pci_dev->dev;
if (dev->readonly)
dev->chip->options |= NAND_ROM;
r852_engine_enable(dev);
if (sm_register_device(dev->mtd, dev->sm))
goto error2;
if (device_create_file(&dev->mtd->dev, &dev_attr_media_type))
message("can't create media type sysfs attribute");
dev->card_registred = 1;
return 0;
error2:
kfree(dev->mtd);
error1:
/* Force card redetect */
dev->card_detected = 0;
return -1;
}
/*
* Unregister the card
*/
void r852_unregister_nand_device(struct r852_device *dev)
{
if (!dev->card_registred)
return;
device_remove_file(&dev->mtd->dev, &dev_attr_media_type);
nand_release(dev->mtd);
r852_engine_disable(dev);
dev->card_registred = 0;
kfree(dev->mtd);
dev->mtd = NULL;
}
/* Card state updater */
void r852_card_detect_work(struct work_struct *work)
{
struct r852_device *dev =
container_of(work, struct r852_device, card_detect_work.work);
r852_card_update_present(dev);
r852_update_card_detect(dev);
dev->card_unstable = 0;
/* False alarm */
if (dev->card_detected == dev->card_registred)
goto exit;
/* Read media properties */
r852_update_media_status(dev);
/* Register the card */
if (dev->card_detected)
r852_register_nand_device(dev);
else
r852_unregister_nand_device(dev);
exit:
r852_update_card_detect(dev);
}
/* Ack + disable IRQ generation */
static void r852_disable_irqs(struct r852_device *dev)
{
uint8_t reg;
reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK);
reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
reg & ~R852_DMA_IRQ_MASK);
r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK);
r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK);
}
/* Interrupt handler */
static irqreturn_t r852_irq(int irq, void *data)
{
struct r852_device *dev = (struct r852_device *)data;
uint8_t card_status, dma_status;
unsigned long flags;
irqreturn_t ret = IRQ_NONE;
spin_lock_irqsave(&dev->irqlock, flags);
/* handle card detection interrupts first */
card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) {
ret = IRQ_HANDLED;
dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT);
/* we shouldn't receive any interrupts if we wait for card
to settle */
WARN_ON(dev->card_unstable);
/* disable irqs while card is unstable */
/* this will timeout DMA if active, but better that garbage */
r852_disable_irqs(dev);
if (dev->card_unstable)
goto out;
/* let, card state to settle a bit, and then do the work */
dev->card_unstable = 1;
queue_delayed_work(dev->card_workqueue,
&dev->card_detect_work, msecs_to_jiffies(100));
goto out;
}
/* Handle dma interrupts */
dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA);
r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status);
if (dma_status & R852_DMA_IRQ_MASK) {
ret = IRQ_HANDLED;
if (dma_status & R852_DMA_IRQ_ERROR) {
dbg("received dma error IRQ");
r852_dma_done(dev, -EIO);
complete(&dev->dma_done);
goto out;
}
/* received DMA interrupt out of nowhere? */
WARN_ON_ONCE(dev->dma_stage == 0);
if (dev->dma_stage == 0)
goto out;
/* done device access */
if (dev->dma_state == DMA_INTERNAL &&
(dma_status & R852_DMA_IRQ_INTERNAL)) {
dev->dma_state = DMA_MEMORY;
dev->dma_stage++;
}
/* done memory DMA */
if (dev->dma_state == DMA_MEMORY &&
(dma_status & R852_DMA_IRQ_MEMORY)) {
dev->dma_state = DMA_INTERNAL;
dev->dma_stage++;
}
/* Enable 2nd half of dma dance */
if (dev->dma_stage == 2)
r852_dma_enable(dev);
/* Operation done */
if (dev->dma_stage == 3) {
r852_dma_done(dev, 0);
complete(&dev->dma_done);
}
goto out;
}
/* Handle unknown interrupts */
if (dma_status)
dbg("bad dma IRQ status = %x", dma_status);
if (card_status & ~R852_CARD_STA_CD)
dbg("strange card status = %x", card_status);
out:
spin_unlock_irqrestore(&dev->irqlock, flags);
return ret;
}
int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
{
int error;
struct nand_chip *chip;
struct r852_device *dev;
/* pci initialization */
error = pci_enable_device(pci_dev);
if (error)
goto error1;
pci_set_master(pci_dev);
error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
if (error)
goto error2;
error = pci_request_regions(pci_dev, DRV_NAME);
if (error)
goto error3;
error = -ENOMEM;
/* init nand chip, but register it only on card insert */
chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
if (!chip)
goto error4;
/* commands */
chip->cmd_ctrl = r852_cmdctl;
chip->waitfunc = r852_wait;
chip->dev_ready = r852_ready;
/* I/O */
chip->read_byte = r852_read_byte;
chip->read_buf = r852_read_buf;
chip->write_buf = r852_write_buf;
chip->verify_buf = r852_verify_buf;
/* ecc */
chip->ecc.mode = NAND_ECC_HW_SYNDROME;
chip->ecc.size = R852_DMA_LEN;
chip->ecc.bytes = SM_OOB_SIZE;
chip->ecc.strength = 2;
chip->ecc.hwctl = r852_ecc_hwctl;
chip->ecc.calculate = r852_ecc_calculate;
chip->ecc.correct = r852_ecc_correct;
/* TODO: hack */
chip->ecc.read_oob = r852_read_oob;
/* init our device structure */
dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
if (!dev)
goto error5;
chip->priv = dev;
dev->chip = chip;
dev->pci_dev = pci_dev;
pci_set_drvdata(pci_dev, dev);
dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN,
&dev->phys_bounce_buffer);
if (!dev->bounce_buffer)
goto error6;
error = -ENODEV;
dev->mmio = pci_ioremap_bar(pci_dev, 0);
if (!dev->mmio)
goto error7;
error = -ENOMEM;
dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
if (!dev->tmp_buffer)
goto error8;
init_completion(&dev->dma_done);
dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
if (!dev->card_workqueue)
goto error9;
INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work);
/* shutdown everything - precation */
r852_engine_disable(dev);
r852_disable_irqs(dev);
r852_dma_test(dev);
dev->irq = pci_dev->irq;
spin_lock_init(&dev->irqlock);
dev->card_detected = 0;
r852_card_update_present(dev);
/*register irq handler*/
error = -ENODEV;
if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
DRV_NAME, dev))
goto error10;
/* kick initial present test */
queue_delayed_work(dev->card_workqueue,
&dev->card_detect_work, 0);
printk(KERN_NOTICE DRV_NAME ": driver loaded successfully\n");
return 0;
error10:
destroy_workqueue(dev->card_workqueue);
error9:
kfree(dev->tmp_buffer);
error8:
pci_iounmap(pci_dev, dev->mmio);
error7:
pci_free_consistent(pci_dev, R852_DMA_LEN,
dev->bounce_buffer, dev->phys_bounce_buffer);
error6:
kfree(dev);
error5:
kfree(chip);
error4:
pci_release_regions(pci_dev);
error3:
error2:
pci_disable_device(pci_dev);
error1:
return error;
}
void r852_remove(struct pci_dev *pci_dev)
{
struct r852_device *dev = pci_get_drvdata(pci_dev);
/* Stop detect workqueue -
we are going to unregister the device anyway*/
cancel_delayed_work_sync(&dev->card_detect_work);
destroy_workqueue(dev->card_workqueue);
/* Unregister the device, this might make more IO */
r852_unregister_nand_device(dev);
/* Stop interrupts */
r852_disable_irqs(dev);
synchronize_irq(dev->irq);
free_irq(dev->irq, dev);
/* Cleanup */
kfree(dev->tmp_buffer);
pci_iounmap(pci_dev, dev->mmio);
pci_free_consistent(pci_dev, R852_DMA_LEN,
dev->bounce_buffer, dev->phys_bounce_buffer);
kfree(dev->chip);
kfree(dev);
/* Shutdown the PCI device */
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
}
void r852_shutdown(struct pci_dev *pci_dev)
{
struct r852_device *dev = pci_get_drvdata(pci_dev);
cancel_delayed_work_sync(&dev->card_detect_work);
r852_disable_irqs(dev);
synchronize_irq(dev->irq);
pci_disable_device(pci_dev);
}
#ifdef CONFIG_PM
static int r852_suspend(struct device *device)
{
struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
if (dev->ctlreg & R852_CTL_CARDENABLE)
return -EBUSY;
/* First make sure the detect work is gone */
cancel_delayed_work_sync(&dev->card_detect_work);
/* Turn off the interrupts and stop the device */
r852_disable_irqs(dev);
r852_engine_disable(dev);
/* If card was pulled off just during the suspend, which is very
unlikely, we will remove it on resume, it too late now
anyway... */
dev->card_unstable = 0;
return 0;
}
static int r852_resume(struct device *device)
{
struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
r852_disable_irqs(dev);
r852_card_update_present(dev);
r852_engine_disable(dev);
/* If card status changed, just do the work */
if (dev->card_detected != dev->card_registred) {
dbg("card was %s during low power state",
dev->card_detected ? "added" : "removed");
queue_delayed_work(dev->card_workqueue,
&dev->card_detect_work, msecs_to_jiffies(1000));
return 0;
}
/* Otherwise, initialize the card */
if (dev->card_registred) {
r852_engine_enable(dev);
dev->chip->select_chip(dev->mtd, 0);
dev->chip->cmdfunc(dev->mtd, NAND_CMD_RESET, -1, -1);
dev->chip->select_chip(dev->mtd, -1);
}
/* Program card detection IRQ */
r852_update_card_detect(dev);
return 0;
}
#else
#define r852_suspend NULL
#define r852_resume NULL
#endif
static const struct pci_device_id r852_pci_id_tbl[] = {
{ PCI_VDEVICE(RICOH, 0x0852), },
{ },
};
MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
static struct pci_driver r852_pci_driver = {
.name = DRV_NAME,
.id_table = r852_pci_id_tbl,
.probe = r852_probe,
.remove = r852_remove,
.shutdown = r852_shutdown,
.driver.pm = &r852_pm_ops,
};
static __init int r852_module_init(void)
{
return pci_register_driver(&r852_pci_driver);
}
static void __exit r852_module_exit(void)
{
pci_unregister_driver(&r852_pci_driver);
}
module_init(r852_module_init);
module_exit(r852_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
| gpl-2.0 |
lollipop-og/hellsgod-kernel | drivers/mtd/nand/atmel_nand.c | 4799 | 19453 | /*
* Copyright (C) 2003 Rick Bronson
*
* Derived from drivers/mtd/nand/autcpu12.c
* Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
*
* Derived from drivers/mtd/spia.c
* Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com)
*
*
* Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
* Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright (C) 2007
*
* Derived from Das U-Boot source code
* (u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
* (C) Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_mtd.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/dmaengine.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/platform_data/atmel.h>
#include <mach/cpu.h>
static int use_dma = 1;
module_param(use_dma, int, 0);
static int on_flash_bbt = 0;
module_param(on_flash_bbt, int, 0);
/* Register access macros */
#define ecc_readl(add, reg) \
__raw_readl(add + ATMEL_ECC_##reg)
#define ecc_writel(add, reg, value) \
__raw_writel((value), add + ATMEL_ECC_##reg)
#include "atmel_nand_ecc.h" /* Hardware ECC registers */
/* oob layout for large page size
* bad block info is on bytes 0 and 1
* the bytes have to be consecutives to avoid
* several NAND_CMD_RNDOUT during read
*/
static struct nand_ecclayout atmel_oobinfo_large = {
.eccbytes = 4,
.eccpos = {60, 61, 62, 63},
.oobfree = {
{2, 58}
},
};
/* oob layout for small page size
* bad block info is on bytes 4 and 5
* the bytes have to be consecutives to avoid
* several NAND_CMD_RNDOUT during read
*/
static struct nand_ecclayout atmel_oobinfo_small = {
.eccbytes = 4,
.eccpos = {0, 1, 2, 3},
.oobfree = {
{6, 10}
},
};
struct atmel_nand_host {
struct nand_chip nand_chip;
struct mtd_info mtd;
void __iomem *io_base;
dma_addr_t io_phys;
struct atmel_nand_data board;
struct device *dev;
void __iomem *ecc;
struct completion comp;
struct dma_chan *dma_chan;
};
static int cpu_has_dma(void)
{
return cpu_is_at91sam9rl() || cpu_is_at91sam9g45();
}
/*
* Enable NAND.
*/
static void atmel_nand_enable(struct atmel_nand_host *host)
{
if (gpio_is_valid(host->board.enable_pin))
gpio_set_value(host->board.enable_pin, 0);
}
/*
* Disable NAND.
*/
static void atmel_nand_disable(struct atmel_nand_host *host)
{
if (gpio_is_valid(host->board.enable_pin))
gpio_set_value(host->board.enable_pin, 1);
}
/*
* Hardware specific access to control-lines
*/
static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
struct nand_chip *nand_chip = mtd->priv;
struct atmel_nand_host *host = nand_chip->priv;
if (ctrl & NAND_CTRL_CHANGE) {
if (ctrl & NAND_NCE)
atmel_nand_enable(host);
else
atmel_nand_disable(host);
}
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
writeb(cmd, host->io_base + (1 << host->board.cle));
else
writeb(cmd, host->io_base + (1 << host->board.ale));
}
/*
* Read the Device Ready pin.
*/
static int atmel_nand_device_ready(struct mtd_info *mtd)
{
struct nand_chip *nand_chip = mtd->priv;
struct atmel_nand_host *host = nand_chip->priv;
return gpio_get_value(host->board.rdy_pin) ^
!!host->board.rdy_pin_active_low;
}
/*
* Minimal-overhead PIO for data access.
*/
static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
{
struct nand_chip *nand_chip = mtd->priv;
__raw_readsb(nand_chip->IO_ADDR_R, buf, len);
}
static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
{
struct nand_chip *nand_chip = mtd->priv;
__raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
}
static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
{
struct nand_chip *nand_chip = mtd->priv;
__raw_writesb(nand_chip->IO_ADDR_W, buf, len);
}
static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
{
struct nand_chip *nand_chip = mtd->priv;
__raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2);
}
static void dma_complete_func(void *completion)
{
complete(completion);
}
static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
int is_read)
{
struct dma_device *dma_dev;
enum dma_ctrl_flags flags;
dma_addr_t dma_src_addr, dma_dst_addr, phys_addr;
struct dma_async_tx_descriptor *tx = NULL;
dma_cookie_t cookie;
struct nand_chip *chip = mtd->priv;
struct atmel_nand_host *host = chip->priv;
void *p = buf;
int err = -EIO;
enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (buf >= high_memory)
goto err_buf;
dma_dev = host->dma_chan->device;
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP |
DMA_COMPL_SKIP_DEST_UNMAP;
phys_addr = dma_map_single(dma_dev->dev, p, len, dir);
if (dma_mapping_error(dma_dev->dev, phys_addr)) {
dev_err(host->dev, "Failed to dma_map_single\n");
goto err_buf;
}
if (is_read) {
dma_src_addr = host->io_phys;
dma_dst_addr = phys_addr;
} else {
dma_src_addr = phys_addr;
dma_dst_addr = host->io_phys;
}
tx = dma_dev->device_prep_dma_memcpy(host->dma_chan, dma_dst_addr,
dma_src_addr, len, flags);
if (!tx) {
dev_err(host->dev, "Failed to prepare DMA memcpy\n");
goto err_dma;
}
init_completion(&host->comp);
tx->callback = dma_complete_func;
tx->callback_param = &host->comp;
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
dev_err(host->dev, "Failed to do DMA tx_submit\n");
goto err_dma;
}
dma_async_issue_pending(host->dma_chan);
wait_for_completion(&host->comp);
err = 0;
err_dma:
dma_unmap_single(dma_dev->dev, phys_addr, len, dir);
err_buf:
if (err != 0)
dev_warn(host->dev, "Fall back to CPU I/O\n");
return err;
}
static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
{
struct nand_chip *chip = mtd->priv;
struct atmel_nand_host *host = chip->priv;
if (use_dma && len > mtd->oobsize)
/* only use DMA for bigger than oob size: better performances */
if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
return;
if (host->board.bus_width_16)
atmel_read_buf16(mtd, buf, len);
else
atmel_read_buf8(mtd, buf, len);
}
static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
{
struct nand_chip *chip = mtd->priv;
struct atmel_nand_host *host = chip->priv;
if (use_dma && len > mtd->oobsize)
/* only use DMA for bigger than oob size: better performances */
if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
return;
if (host->board.bus_width_16)
atmel_write_buf16(mtd, buf, len);
else
atmel_write_buf8(mtd, buf, len);
}
/*
* Calculate HW ECC
*
* function called after a write
*
* mtd: MTD block structure
* dat: raw data (unused)
* ecc_code: buffer for ECC
*/
static int atmel_nand_calculate(struct mtd_info *mtd,
const u_char *dat, unsigned char *ecc_code)
{
struct nand_chip *nand_chip = mtd->priv;
struct atmel_nand_host *host = nand_chip->priv;
unsigned int ecc_value;
/* get the first 2 ECC bytes */
ecc_value = ecc_readl(host->ecc, PR);
ecc_code[0] = ecc_value & 0xFF;
ecc_code[1] = (ecc_value >> 8) & 0xFF;
/* get the last 2 ECC bytes */
ecc_value = ecc_readl(host->ecc, NPR) & ATMEL_ECC_NPARITY;
ecc_code[2] = ecc_value & 0xFF;
ecc_code[3] = (ecc_value >> 8) & 0xFF;
return 0;
}
/*
* HW ECC read page function
*
* mtd: mtd info structure
* chip: nand chip info structure
* buf: buffer to store read data
*/
static int atmel_nand_read_page(struct mtd_info *mtd,
struct nand_chip *chip, uint8_t *buf, int page)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
uint32_t *eccpos = chip->ecc.layout->eccpos;
uint8_t *p = buf;
uint8_t *oob = chip->oob_poi;
uint8_t *ecc_pos;
int stat;
/*
* Errata: ALE is incorrectly wired up to the ECC controller
* on the AP7000, so it will include the address cycles in the
* ECC calculation.
*
* Workaround: Reset the parity registers before reading the
* actual data.
*/
if (cpu_is_at32ap7000()) {
struct atmel_nand_host *host = chip->priv;
ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
}
/* read the page */
chip->read_buf(mtd, p, eccsize);
/* move to ECC position if needed */
if (eccpos[0] != 0) {
/* This only works on large pages
* because the ECC controller waits for
* NAND_CMD_RNDOUTSTART after the
* NAND_CMD_RNDOUT.
* anyway, for small pages, the eccpos[0] == 0
*/
chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
mtd->writesize + eccpos[0], -1);
}
/* the ECC controller needs to read the ECC just after the data */
ecc_pos = oob + eccpos[0];
chip->read_buf(mtd, ecc_pos, eccbytes);
/* check if there's an error */
stat = chip->ecc.correct(mtd, p, oob, NULL);
if (stat < 0)
mtd->ecc_stats.failed++;
else
mtd->ecc_stats.corrected += stat;
/* get back to oob start (end of page) */
chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
/* read the oob */
chip->read_buf(mtd, oob, mtd->oobsize);
return 0;
}
/*
* HW ECC Correction
*
* function called after a read
*
* mtd: MTD block structure
* dat: raw data read from the chip
* read_ecc: ECC from the chip (unused)
* isnull: unused
*
* Detect and correct a 1 bit error for a page
*/
static int atmel_nand_correct(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *isnull)
{
struct nand_chip *nand_chip = mtd->priv;
struct atmel_nand_host *host = nand_chip->priv;
unsigned int ecc_status;
unsigned int ecc_word, ecc_bit;
/* get the status from the Status Register */
ecc_status = ecc_readl(host->ecc, SR);
/* if there's no error */
if (likely(!(ecc_status & ATMEL_ECC_RECERR)))
return 0;
/* get error bit offset (4 bits) */
ecc_bit = ecc_readl(host->ecc, PR) & ATMEL_ECC_BITADDR;
/* get word address (12 bits) */
ecc_word = ecc_readl(host->ecc, PR) & ATMEL_ECC_WORDADDR;
ecc_word >>= 4;
/* if there are multiple errors */
if (ecc_status & ATMEL_ECC_MULERR) {
/* check if it is a freshly erased block
* (filled with 0xff) */
if ((ecc_bit == ATMEL_ECC_BITADDR)
&& (ecc_word == (ATMEL_ECC_WORDADDR >> 4))) {
/* the block has just been erased, return OK */
return 0;
}
/* it doesn't seems to be a freshly
* erased block.
* We can't correct so many errors */
dev_dbg(host->dev, "atmel_nand : multiple errors detected."
" Unable to correct.\n");
return -EIO;
}
/* if there's a single bit error : we can correct it */
if (ecc_status & ATMEL_ECC_ECCERR) {
/* there's nothing much to do here.
* the bit error is on the ECC itself.
*/
dev_dbg(host->dev, "atmel_nand : one bit error on ECC code."
" Nothing to correct\n");
return 0;
}
dev_dbg(host->dev, "atmel_nand : one bit error on data."
" (word offset in the page :"
" 0x%x bit offset : 0x%x)\n",
ecc_word, ecc_bit);
/* correct the error */
if (nand_chip->options & NAND_BUSWIDTH_16) {
/* 16 bits words */
((unsigned short *) dat)[ecc_word] ^= (1 << ecc_bit);
} else {
/* 8 bits words */
dat[ecc_word] ^= (1 << ecc_bit);
}
dev_dbg(host->dev, "atmel_nand : error corrected\n");
return 1;
}
/*
* Enable HW ECC : unused on most chips
*/
static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
{
if (cpu_is_at32ap7000()) {
struct nand_chip *nand_chip = mtd->priv;
struct atmel_nand_host *host = nand_chip->priv;
ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
}
}
#if defined(CONFIG_OF)
static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
struct device_node *np)
{
u32 val;
int ecc_mode;
struct atmel_nand_data *board = &host->board;
enum of_gpio_flags flags;
if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
if (val >= 32) {
dev_err(host->dev, "invalid addr-offset %u\n", val);
return -EINVAL;
}
board->ale = val;
}
if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) {
if (val >= 32) {
dev_err(host->dev, "invalid cmd-offset %u\n", val);
return -EINVAL;
}
board->cle = val;
}
ecc_mode = of_get_nand_ecc_mode(np);
board->ecc_mode = ecc_mode < 0 ? NAND_ECC_SOFT : ecc_mode;
board->on_flash_bbt = of_get_nand_on_flash_bbt(np);
if (of_get_nand_bus_width(np) == 16)
board->bus_width_16 = 1;
board->rdy_pin = of_get_gpio_flags(np, 0, &flags);
board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW);
board->enable_pin = of_get_gpio(np, 1);
board->det_pin = of_get_gpio(np, 2);
return 0;
}
#else
static int __devinit atmel_of_init_port(struct atmel_nand_host *host,
struct device_node *np)
{
return -EINVAL;
}
#endif
/*
* Probe for the NAND device.
*/
static int __init atmel_nand_probe(struct platform_device *pdev)
{
struct atmel_nand_host *host;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
struct resource *regs;
struct resource *mem;
struct mtd_part_parser_data ppdata = {};
int res;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
printk(KERN_ERR "atmel_nand: can't get I/O resource mem\n");
return -ENXIO;
}
/* Allocate memory for the device structure (and zero it) */
host = kzalloc(sizeof(struct atmel_nand_host), GFP_KERNEL);
if (!host) {
printk(KERN_ERR "atmel_nand: failed to allocate device structure.\n");
return -ENOMEM;
}
host->io_phys = (dma_addr_t)mem->start;
host->io_base = ioremap(mem->start, resource_size(mem));
if (host->io_base == NULL) {
printk(KERN_ERR "atmel_nand: ioremap failed\n");
res = -EIO;
goto err_nand_ioremap;
}
mtd = &host->mtd;
nand_chip = &host->nand_chip;
host->dev = &pdev->dev;
if (pdev->dev.of_node) {
res = atmel_of_init_port(host, pdev->dev.of_node);
if (res)
goto err_nand_ioremap;
} else {
memcpy(&host->board, pdev->dev.platform_data,
sizeof(struct atmel_nand_data));
}
nand_chip->priv = host; /* link the private data structures */
mtd->priv = nand_chip;
mtd->owner = THIS_MODULE;
/* Set address of NAND IO lines */
nand_chip->IO_ADDR_R = host->io_base;
nand_chip->IO_ADDR_W = host->io_base;
nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
if (gpio_is_valid(host->board.rdy_pin))
nand_chip->dev_ready = atmel_nand_device_ready;
nand_chip->ecc.mode = host->board.ecc_mode;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!regs && nand_chip->ecc.mode == NAND_ECC_HW) {
printk(KERN_ERR "atmel_nand: can't get I/O resource "
"regs\nFalling back on software ECC\n");
nand_chip->ecc.mode = NAND_ECC_SOFT;
}
if (nand_chip->ecc.mode == NAND_ECC_HW) {
host->ecc = ioremap(regs->start, resource_size(regs));
if (host->ecc == NULL) {
printk(KERN_ERR "atmel_nand: ioremap failed\n");
res = -EIO;
goto err_ecc_ioremap;
}
nand_chip->ecc.calculate = atmel_nand_calculate;
nand_chip->ecc.correct = atmel_nand_correct;
nand_chip->ecc.hwctl = atmel_nand_hwctl;
nand_chip->ecc.read_page = atmel_nand_read_page;
nand_chip->ecc.bytes = 4;
nand_chip->ecc.strength = 1;
}
nand_chip->chip_delay = 20; /* 20us command delay time */
if (host->board.bus_width_16) /* 16-bit bus width */
nand_chip->options |= NAND_BUSWIDTH_16;
nand_chip->read_buf = atmel_read_buf;
nand_chip->write_buf = atmel_write_buf;
platform_set_drvdata(pdev, host);
atmel_nand_enable(host);
if (gpio_is_valid(host->board.det_pin)) {
if (gpio_get_value(host->board.det_pin)) {
printk(KERN_INFO "No SmartMedia card inserted.\n");
res = -ENXIO;
goto err_no_card;
}
}
if (host->board.on_flash_bbt || on_flash_bbt) {
printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
}
if (!cpu_has_dma())
use_dma = 0;
if (use_dma) {
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
host->dma_chan = dma_request_channel(mask, NULL, NULL);
if (!host->dma_chan) {
dev_err(host->dev, "Failed to request DMA channel\n");
use_dma = 0;
}
}
if (use_dma)
dev_info(host->dev, "Using %s for DMA transfers.\n",
dma_chan_name(host->dma_chan));
else
dev_info(host->dev, "No DMA support for NAND access.\n");
/* first scan to find the device and get the page size */
if (nand_scan_ident(mtd, 1, NULL)) {
res = -ENXIO;
goto err_scan_ident;
}
if (nand_chip->ecc.mode == NAND_ECC_HW) {
/* ECC is calculated for the whole page (1 step) */
nand_chip->ecc.size = mtd->writesize;
/* set ECC page size and oob layout */
switch (mtd->writesize) {
case 512:
nand_chip->ecc.layout = &atmel_oobinfo_small;
ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
break;
case 1024:
nand_chip->ecc.layout = &atmel_oobinfo_large;
ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
break;
case 2048:
nand_chip->ecc.layout = &atmel_oobinfo_large;
ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
break;
case 4096:
nand_chip->ecc.layout = &atmel_oobinfo_large;
ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
break;
default:
/* page size not handled by HW ECC */
/* switching back to soft ECC */
nand_chip->ecc.mode = NAND_ECC_SOFT;
nand_chip->ecc.calculate = NULL;
nand_chip->ecc.correct = NULL;
nand_chip->ecc.hwctl = NULL;
nand_chip->ecc.read_page = NULL;
nand_chip->ecc.postpad = 0;
nand_chip->ecc.prepad = 0;
nand_chip->ecc.bytes = 0;
break;
}
}
/* second phase scan */
if (nand_scan_tail(mtd)) {
res = -ENXIO;
goto err_scan_tail;
}
mtd->name = "atmel_nand";
ppdata.of_node = pdev->dev.of_node;
res = mtd_device_parse_register(mtd, NULL, &ppdata,
host->board.parts, host->board.num_parts);
if (!res)
return res;
err_scan_tail:
err_scan_ident:
err_no_card:
atmel_nand_disable(host);
platform_set_drvdata(pdev, NULL);
if (host->dma_chan)
dma_release_channel(host->dma_chan);
if (host->ecc)
iounmap(host->ecc);
err_ecc_ioremap:
iounmap(host->io_base);
err_nand_ioremap:
kfree(host);
return res;
}
/*
* Remove a NAND device.
*/
static int __exit atmel_nand_remove(struct platform_device *pdev)
{
struct atmel_nand_host *host = platform_get_drvdata(pdev);
struct mtd_info *mtd = &host->mtd;
nand_release(mtd);
atmel_nand_disable(host);
if (host->ecc)
iounmap(host->ecc);
if (host->dma_chan)
dma_release_channel(host->dma_chan);
iounmap(host->io_base);
kfree(host);
return 0;
}
#if defined(CONFIG_OF)
static const struct of_device_id atmel_nand_dt_ids[] = {
{ .compatible = "atmel,at91rm9200-nand" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_nand_dt_ids);
#endif
static struct platform_driver atmel_nand_driver = {
.remove = __exit_p(atmel_nand_remove),
.driver = {
.name = "atmel_nand",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(atmel_nand_dt_ids),
},
};
static int __init atmel_nand_init(void)
{
return platform_driver_probe(&atmel_nand_driver, atmel_nand_probe);
}
static void __exit atmel_nand_exit(void)
{
platform_driver_unregister(&atmel_nand_driver);
}
module_init(atmel_nand_init);
module_exit(atmel_nand_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rick Bronson");
MODULE_DESCRIPTION("NAND/SmartMedia driver for AT91 / AVR32");
MODULE_ALIAS("platform:atmel_nand");
| gpl-2.0 |
DirtyUnicorns/android_kernel_motorola_msm8960dt-common | arch/arm/mach-ux500/board-mop500-stuib.c | 7871 | 4759 | /*
* Copyright (C) ST-Ericsson SA 2010
*
* License terms: GNU General Public License (GPL), version 2
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mfd/stmpe.h>
#include <linux/input/bu21013.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/input/matrix_keypad.h>
#include <asm/mach-types.h>
#include "board-mop500.h"
/* STMPE/SKE keypad use this key layout */
static const unsigned int mop500_keymap[] = {
KEY(2, 5, KEY_END),
KEY(4, 1, KEY_POWER),
KEY(3, 5, KEY_VOLUMEDOWN),
KEY(1, 3, KEY_3),
KEY(5, 2, KEY_RIGHT),
KEY(5, 0, KEY_9),
KEY(0, 5, KEY_MENU),
KEY(7, 6, KEY_ENTER),
KEY(4, 5, KEY_0),
KEY(6, 7, KEY_2),
KEY(3, 4, KEY_UP),
KEY(3, 3, KEY_DOWN),
KEY(6, 4, KEY_SEND),
KEY(6, 2, KEY_BACK),
KEY(4, 2, KEY_VOLUMEUP),
KEY(5, 5, KEY_1),
KEY(4, 3, KEY_LEFT),
KEY(3, 2, KEY_7),
};
static const struct matrix_keymap_data mop500_keymap_data = {
.keymap = mop500_keymap,
.keymap_size = ARRAY_SIZE(mop500_keymap),
};
/*
* STMPE1601
*/
static struct stmpe_keypad_platform_data stmpe1601_keypad_data = {
.debounce_ms = 64,
.scan_count = 8,
.no_autorepeat = true,
.keymap_data = &mop500_keymap_data,
};
static struct stmpe_platform_data stmpe1601_data = {
.id = 1,
.blocks = STMPE_BLOCK_KEYPAD,
.irq_trigger = IRQF_TRIGGER_FALLING,
.irq_base = MOP500_STMPE1601_IRQ(0),
.keypad = &stmpe1601_keypad_data,
.autosleep = true,
.autosleep_timeout = 1024,
};
static struct i2c_board_info __initdata mop500_i2c0_devices_stuib[] = {
{
I2C_BOARD_INFO("stmpe1601", 0x40),
.irq = NOMADIK_GPIO_TO_IRQ(218),
.platform_data = &stmpe1601_data,
.flags = I2C_CLIENT_WAKE,
},
};
/*
* BU21013 ROHM touchscreen interface on the STUIBs
*/
/* tracks number of bu21013 devices being enabled */
static int bu21013_devices;
#define TOUCH_GPIO_PIN 84
#define TOUCH_XMAX 384
#define TOUCH_YMAX 704
#define PRCMU_CLOCK_OCR 0x1CC
#define TSC_EXT_CLOCK_9_6MHZ 0x840000
/**
* bu21013_gpio_board_init : configures the touch panel.
* @reset_pin: reset pin number
* This function can be used to configures
* the voltage and reset the touch panel controller.
*/
static int bu21013_gpio_board_init(int reset_pin)
{
int retval = 0;
bu21013_devices++;
if (bu21013_devices == 1) {
retval = gpio_request(reset_pin, "touchp_reset");
if (retval) {
printk(KERN_ERR "Unable to request gpio reset_pin");
return retval;
}
retval = gpio_direction_output(reset_pin, 1);
if (retval < 0) {
printk(KERN_ERR "%s: gpio direction failed\n",
__func__);
return retval;
}
}
return retval;
}
/**
* bu21013_gpio_board_exit : deconfigures the touch panel controller
* @reset_pin: reset pin number
* This function can be used to deconfigures the chip selection
* for touch panel controller.
*/
static int bu21013_gpio_board_exit(int reset_pin)
{
int retval = 0;
if (bu21013_devices == 1) {
retval = gpio_direction_output(reset_pin, 0);
if (retval < 0) {
printk(KERN_ERR "%s: gpio direction failed\n",
__func__);
return retval;
}
gpio_set_value(reset_pin, 0);
}
bu21013_devices--;
return retval;
}
/**
* bu21013_read_pin_val : get the interrupt pin value
* This function can be used to get the interrupt pin value for touch panel
* controller.
*/
static int bu21013_read_pin_val(void)
{
return gpio_get_value(TOUCH_GPIO_PIN);
}
static struct bu21013_platform_device tsc_plat_device = {
.cs_en = bu21013_gpio_board_init,
.cs_dis = bu21013_gpio_board_exit,
.irq_read_val = bu21013_read_pin_val,
.irq = NOMADIK_GPIO_TO_IRQ(TOUCH_GPIO_PIN),
.touch_x_max = TOUCH_XMAX,
.touch_y_max = TOUCH_YMAX,
.ext_clk = false,
.x_flip = false,
.y_flip = true,
};
static struct bu21013_platform_device tsc_plat2_device = {
.cs_en = bu21013_gpio_board_init,
.cs_dis = bu21013_gpio_board_exit,
.irq_read_val = bu21013_read_pin_val,
.irq = NOMADIK_GPIO_TO_IRQ(TOUCH_GPIO_PIN),
.touch_x_max = TOUCH_XMAX,
.touch_y_max = TOUCH_YMAX,
.ext_clk = false,
.x_flip = false,
.y_flip = true,
};
static struct i2c_board_info __initdata u8500_i2c3_devices_stuib[] = {
{
I2C_BOARD_INFO("bu21013_tp", 0x5C),
.platform_data = &tsc_plat_device,
},
{
I2C_BOARD_INFO("bu21013_tp", 0x5D),
.platform_data = &tsc_plat2_device,
},
};
void __init mop500_stuib_init(void)
{
if (machine_is_hrefv60()) {
tsc_plat_device.cs_pin = HREFV60_TOUCH_RST_GPIO;
tsc_plat2_device.cs_pin = HREFV60_TOUCH_RST_GPIO;
} else {
tsc_plat_device.cs_pin = GPIO_BU21013_CS;
tsc_plat2_device.cs_pin = GPIO_BU21013_CS;
}
mop500_uib_i2c_add(0, mop500_i2c0_devices_stuib,
ARRAY_SIZE(mop500_i2c0_devices_stuib));
mop500_uib_i2c_add(3, u8500_i2c3_devices_stuib,
ARRAY_SIZE(u8500_i2c3_devices_stuib));
}
| gpl-2.0 |
RyanAM/gs5-kernel | drivers/net/wireless/rtlwifi/rtl8192se/fw.c | 7871 | 17756 | /******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include "../wifi.h"
#include "../pci.h"
#include "../base.h"
#include "reg.h"
#include "def.h"
#include "fw.h"
static void _rtl92s_fw_set_rqpn(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_dword(rtlpriv, RQPN, 0xffffffff);
rtl_write_dword(rtlpriv, RQPN + 4, 0xffffffff);
rtl_write_byte(rtlpriv, RQPN + 8, 0xff);
rtl_write_byte(rtlpriv, RQPN + 0xB, 0x80);
}
static bool _rtl92s_firmware_enable_cpu(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 ichecktime = 200;
u16 tmpu2b;
u8 tmpu1b, cpustatus = 0;
_rtl92s_fw_set_rqpn(hw);
/* Enable CPU. */
tmpu1b = rtl_read_byte(rtlpriv, SYS_CLKR);
/* AFE source */
rtl_write_byte(rtlpriv, SYS_CLKR, (tmpu1b | SYS_CPU_CLKSEL));
tmpu2b = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (tmpu2b | FEN_CPUEN));
/* Polling IMEM Ready after CPU has refilled. */
do {
cpustatus = rtl_read_byte(rtlpriv, TCR);
if (cpustatus & IMEM_RDY) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"IMEM Ready after CPU has refilled\n");
break;
}
udelay(100);
} while (ichecktime--);
if (!(cpustatus & IMEM_RDY))
return false;
return true;
}
static enum fw_status _rtl92s_firmware_get_nextstatus(
enum fw_status fw_currentstatus)
{
enum fw_status next_fwstatus = 0;
switch (fw_currentstatus) {
case FW_STATUS_INIT:
next_fwstatus = FW_STATUS_LOAD_IMEM;
break;
case FW_STATUS_LOAD_IMEM:
next_fwstatus = FW_STATUS_LOAD_EMEM;
break;
case FW_STATUS_LOAD_EMEM:
next_fwstatus = FW_STATUS_LOAD_DMEM;
break;
case FW_STATUS_LOAD_DMEM:
next_fwstatus = FW_STATUS_READY;
break;
default:
break;
}
return next_fwstatus;
}
static u8 _rtl92s_firmware_header_map_rftype(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
switch (rtlphy->rf_type) {
case RF_1T1R:
return 0x11;
break;
case RF_1T2R:
return 0x12;
break;
case RF_2T2R:
return 0x22;
break;
default:
RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown RF type(%x)\n",
rtlphy->rf_type);
break;
}
return 0x22;
}
static void _rtl92s_firmwareheader_priveupdate(struct ieee80211_hw *hw,
struct fw_priv *pfw_priv)
{
/* Update RF types for RATR settings. */
pfw_priv->rf_config = _rtl92s_firmware_header_map_rftype(hw);
}
static bool _rtl92s_cmd_send_packet(struct ieee80211_hw *hw,
struct sk_buff *skb, u8 last)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring;
struct rtl_tx_desc *pdesc;
unsigned long flags;
u8 idx = 0;
ring = &rtlpci->tx_ring[TXCMD_QUEUE];
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
pdesc = &ring->desc[idx];
rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
__skb_queue_tail(&ring->queue, skb);
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
return true;
}
static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
u8 *code_virtual_address, u32 buffer_len)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct sk_buff *skb;
struct rtl_tcb_desc *tcb_desc;
unsigned char *seg_ptr;
u16 frag_threshold = MAX_FIRMWARE_CODE_SIZE;
u16 frag_length, frag_offset = 0;
u16 extra_descoffset = 0;
u8 last_inipkt = 0;
_rtl92s_fw_set_rqpn(hw);
if (buffer_len >= MAX_FIRMWARE_CODE_SIZE) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Size over FIRMWARE_CODE_SIZE!\n");
return false;
}
extra_descoffset = 0;
do {
if ((buffer_len - frag_offset) > frag_threshold) {
frag_length = frag_threshold + extra_descoffset;
} else {
frag_length = (u16)(buffer_len - frag_offset +
extra_descoffset);
last_inipkt = 1;
}
/* Allocate skb buffer to contain firmware */
/* info and tx descriptor info. */
skb = dev_alloc_skb(frag_length);
if (!skb)
return false;
skb_reserve(skb, extra_descoffset);
seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length -
extra_descoffset));
memcpy(seg_ptr, code_virtual_address + frag_offset,
(u32)(frag_length - extra_descoffset));
tcb_desc = (struct rtl_tcb_desc *)(skb->cb);
tcb_desc->queue_index = TXCMD_QUEUE;
tcb_desc->cmd_or_init = DESC_PACKET_TYPE_INIT;
tcb_desc->last_inipkt = last_inipkt;
_rtl92s_cmd_send_packet(hw, skb, last_inipkt);
frag_offset += (frag_length - extra_descoffset);
} while (frag_offset < buffer_len);
rtl_write_byte(rtlpriv, TP_POLL, TPPOLL_CQ);
return true ;
}
static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
u8 loadfw_status)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rt_firmware *firmware = (struct rt_firmware *)rtlhal->pfirmware;
u32 tmpu4b;
u8 cpustatus = 0;
short pollingcnt = 1000;
bool rtstatus = true;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"LoadStaus(%d)\n", loadfw_status);
firmware->fwstatus = (enum fw_status)loadfw_status;
switch (loadfw_status) {
case FW_STATUS_LOAD_IMEM:
/* Polling IMEM code done. */
do {
cpustatus = rtl_read_byte(rtlpriv, TCR);
if (cpustatus & IMEM_CODE_DONE)
break;
udelay(5);
} while (pollingcnt--);
if (!(cpustatus & IMEM_CHK_RPT) || (pollingcnt <= 0)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"FW_STATUS_LOAD_IMEM FAIL CPU, Status=%x\n",
cpustatus);
goto status_check_fail;
}
break;
case FW_STATUS_LOAD_EMEM:
/* Check Put Code OK and Turn On CPU */
/* Polling EMEM code done. */
do {
cpustatus = rtl_read_byte(rtlpriv, TCR);
if (cpustatus & EMEM_CODE_DONE)
break;
udelay(5);
} while (pollingcnt--);
if (!(cpustatus & EMEM_CHK_RPT) || (pollingcnt <= 0)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"FW_STATUS_LOAD_EMEM FAIL CPU, Status=%x\n",
cpustatus);
goto status_check_fail;
}
/* Turn On CPU */
rtstatus = _rtl92s_firmware_enable_cpu(hw);
if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Enable CPU fail!\n");
goto status_check_fail;
}
break;
case FW_STATUS_LOAD_DMEM:
/* Polling DMEM code done */
do {
cpustatus = rtl_read_byte(rtlpriv, TCR);
if (cpustatus & DMEM_CODE_DONE)
break;
udelay(5);
} while (pollingcnt--);
if (!(cpustatus & DMEM_CODE_DONE) || (pollingcnt <= 0)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Polling DMEM code done fail ! cpustatus(%#x)\n",
cpustatus);
goto status_check_fail;
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"DMEM code download success, cpustatus(%#x)\n",
cpustatus);
/* Prevent Delay too much and being scheduled out */
/* Polling Load Firmware ready */
pollingcnt = 2000;
do {
cpustatus = rtl_read_byte(rtlpriv, TCR);
if (cpustatus & FWRDY)
break;
udelay(40);
} while (pollingcnt--);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Polling Load Firmware ready, cpustatus(%x)\n",
cpustatus);
if (((cpustatus & LOAD_FW_READY) != LOAD_FW_READY) ||
(pollingcnt <= 0)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Polling Load Firmware ready fail ! cpustatus(%x)\n",
cpustatus);
goto status_check_fail;
}
/* If right here, we can set TCR/RCR to desired value */
/* and config MAC lookback mode to normal mode */
tmpu4b = rtl_read_dword(rtlpriv, TCR);
rtl_write_dword(rtlpriv, TCR, (tmpu4b & (~TCR_ICV)));
tmpu4b = rtl_read_dword(rtlpriv, RCR);
rtl_write_dword(rtlpriv, RCR, (tmpu4b | RCR_APPFCS |
RCR_APP_ICV | RCR_APP_MIC));
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Current RCR settings(%#x)\n", tmpu4b);
/* Set to normal mode. */
rtl_write_byte(rtlpriv, LBKMD_SEL, LBK_NORMAL);
break;
default:
RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
"Unknown status check!\n");
rtstatus = false;
break;
}
status_check_fail:
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"loadfw_status(%d), rtstatus(%x)\n",
loadfw_status, rtstatus);
return rtstatus;
}
int rtl92s_download_fw(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rt_firmware *firmware = NULL;
struct fw_hdr *pfwheader;
struct fw_priv *pfw_priv = NULL;
u8 *puc_mappedfile = NULL;
u32 ul_filelength = 0;
u8 fwhdr_size = RT_8192S_FIRMWARE_HDR_SIZE;
u8 fwstatus = FW_STATUS_INIT;
bool rtstatus = true;
if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware)
return 1;
firmware = (struct rt_firmware *)rtlhal->pfirmware;
firmware->fwstatus = FW_STATUS_INIT;
puc_mappedfile = firmware->sz_fw_tmpbuffer;
/* 1. Retrieve FW header. */
firmware->pfwheader = (struct fw_hdr *) puc_mappedfile;
pfwheader = firmware->pfwheader;
firmware->firmwareversion = byte(pfwheader->version, 0);
firmware->pfwheader->fwpriv.hci_sel = 1;/* pcie */
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"signature:%x, version:%x, size:%x, imemsize:%x, sram size:%x\n",
pfwheader->signature,
pfwheader->version, pfwheader->dmem_size,
pfwheader->img_imem_size, pfwheader->img_sram_size);
/* 2. Retrieve IMEM image. */
if ((pfwheader->img_imem_size == 0) || (pfwheader->img_imem_size >
sizeof(firmware->fw_imem))) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"memory for data image is less than IMEM required\n");
goto fail;
} else {
puc_mappedfile += fwhdr_size;
memcpy(firmware->fw_imem, puc_mappedfile,
pfwheader->img_imem_size);
firmware->fw_imem_len = pfwheader->img_imem_size;
}
/* 3. Retriecve EMEM image. */
if (pfwheader->img_sram_size > sizeof(firmware->fw_emem)) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"memory for data image is less than EMEM required\n");
goto fail;
} else {
puc_mappedfile += firmware->fw_imem_len;
memcpy(firmware->fw_emem, puc_mappedfile,
pfwheader->img_sram_size);
firmware->fw_emem_len = pfwheader->img_sram_size;
}
/* 4. download fw now */
fwstatus = _rtl92s_firmware_get_nextstatus(firmware->fwstatus);
while (fwstatus != FW_STATUS_READY) {
/* Image buffer redirection. */
switch (fwstatus) {
case FW_STATUS_LOAD_IMEM:
puc_mappedfile = firmware->fw_imem;
ul_filelength = firmware->fw_imem_len;
break;
case FW_STATUS_LOAD_EMEM:
puc_mappedfile = firmware->fw_emem;
ul_filelength = firmware->fw_emem_len;
break;
case FW_STATUS_LOAD_DMEM:
/* Partial update the content of header private. */
pfwheader = firmware->pfwheader;
pfw_priv = &pfwheader->fwpriv;
_rtl92s_firmwareheader_priveupdate(hw, pfw_priv);
puc_mappedfile = (u8 *)(firmware->pfwheader) +
RT_8192S_FIRMWARE_HDR_EXCLUDE_PRI_SIZE;
ul_filelength = fwhdr_size -
RT_8192S_FIRMWARE_HDR_EXCLUDE_PRI_SIZE;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Unexpected Download step!!\n");
goto fail;
break;
}
/* <2> Download image file */
rtstatus = _rtl92s_firmware_downloadcode(hw, puc_mappedfile,
ul_filelength);
if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "fail!\n");
goto fail;
}
/* <3> Check whether load FW process is ready */
rtstatus = _rtl92s_firmware_checkready(hw, fwstatus);
if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "fail!\n");
goto fail;
}
fwstatus = _rtl92s_firmware_get_nextstatus(firmware->fwstatus);
}
return rtstatus;
fail:
return 0;
}
static u32 _rtl92s_fill_h2c_cmd(struct sk_buff *skb, u32 h2cbufferlen,
u32 cmd_num, u32 *pelement_id, u32 *pcmd_len,
u8 **pcmb_buffer, u8 *cmd_start_seq)
{
u32 totallen = 0, len = 0, tx_desclen = 0;
u32 pre_continueoffset = 0;
u8 *ph2c_buffer;
u8 i = 0;
do {
/* 8 - Byte aligment */
len = H2C_TX_CMD_HDR_LEN + N_BYTE_ALIGMENT(pcmd_len[i], 8);
/* Buffer length is not enough */
if (h2cbufferlen < totallen + len + tx_desclen)
break;
/* Clear content */
ph2c_buffer = (u8 *)skb_put(skb, (u32)len);
memset((ph2c_buffer + totallen + tx_desclen), 0, len);
/* CMD len */
SET_BITS_TO_LE_4BYTE((ph2c_buffer + totallen + tx_desclen),
0, 16, pcmd_len[i]);
/* CMD ID */
SET_BITS_TO_LE_4BYTE((ph2c_buffer + totallen + tx_desclen),
16, 8, pelement_id[i]);
/* CMD Sequence */
*cmd_start_seq = *cmd_start_seq % 0x80;
SET_BITS_TO_LE_4BYTE((ph2c_buffer + totallen + tx_desclen),
24, 7, *cmd_start_seq);
++*cmd_start_seq;
/* Copy memory */
memcpy((ph2c_buffer + totallen + tx_desclen +
H2C_TX_CMD_HDR_LEN), pcmb_buffer[i], pcmd_len[i]);
/* CMD continue */
/* set the continue in prevoius cmd. */
if (i < cmd_num - 1)
SET_BITS_TO_LE_4BYTE((ph2c_buffer + pre_continueoffset),
31, 1, 1);
pre_continueoffset = totallen;
totallen += len;
} while (++i < cmd_num);
return totallen;
}
static u32 _rtl92s_get_h2c_cmdlen(u32 h2cbufferlen, u32 cmd_num, u32 *pcmd_len)
{
u32 totallen = 0, len = 0, tx_desclen = 0;
u8 i = 0;
do {
/* 8 - Byte aligment */
len = H2C_TX_CMD_HDR_LEN + N_BYTE_ALIGMENT(pcmd_len[i], 8);
/* Buffer length is not enough */
if (h2cbufferlen < totallen + len + tx_desclen)
break;
totallen += len;
} while (++i < cmd_num);
return totallen + tx_desclen;
}
static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd,
u8 *pcmd_buffer)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_tcb_desc *cb_desc;
struct sk_buff *skb;
u32 element_id = 0;
u32 cmd_len = 0;
u32 len;
switch (h2c_cmd) {
case FW_H2C_SETPWRMODE:
element_id = H2C_SETPWRMODE_CMD ;
cmd_len = sizeof(struct h2c_set_pwrmode_parm);
break;
case FW_H2C_JOINBSSRPT:
element_id = H2C_JOINBSSRPT_CMD;
cmd_len = sizeof(struct h2c_joinbss_rpt_parm);
break;
case FW_H2C_WOWLAN_UPDATE_GTK:
element_id = H2C_WOWLAN_UPDATE_GTK_CMD;
cmd_len = sizeof(struct h2c_wpa_two_way_parm);
break;
case FW_H2C_WOWLAN_UPDATE_IV:
element_id = H2C_WOWLAN_UPDATE_IV_CMD;
cmd_len = sizeof(unsigned long long);
break;
case FW_H2C_WOWLAN_OFFLOAD:
element_id = H2C_WOWLAN_FW_OFFLOAD;
cmd_len = sizeof(u8);
break;
default:
break;
}
len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len);
skb = dev_alloc_skb(len);
if (!skb)
return false;
cb_desc = (struct rtl_tcb_desc *)(skb->cb);
cb_desc->queue_index = TXCMD_QUEUE;
cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL;
cb_desc->last_inipkt = false;
_rtl92s_fill_h2c_cmd(skb, MAX_TRANSMIT_BUFFER_SIZE, 1, &element_id,
&cmd_len, &pcmd_buffer, &rtlhal->h2c_txcmd_seq);
_rtl92s_cmd_send_packet(hw, skb, false);
rtlpriv->cfg->ops->tx_polling(hw, TXCMD_QUEUE);
return true;
}
void rtl92s_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 Mode)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct h2c_set_pwrmode_parm pwrmode;
u16 max_wakeup_period = 0;
pwrmode.mode = Mode;
pwrmode.flag_low_traffic_en = 0;
pwrmode.flag_lpnav_en = 0;
pwrmode.flag_rf_low_snr_en = 0;
pwrmode.flag_dps_en = 0;
pwrmode.bcn_rx_en = 0;
pwrmode.bcn_to = 0;
SET_BITS_TO_LE_2BYTE((u8 *)(&pwrmode) + 8, 0, 16,
mac->vif->bss_conf.beacon_int);
pwrmode.app_itv = 0;
pwrmode.awake_bcn_itvl = ppsc->reg_max_lps_awakeintvl;
pwrmode.smart_ps = 1;
pwrmode.bcn_pass_period = 10;
/* Set beacon pass count */
if (pwrmode.mode == FW_PS_MIN_MODE)
max_wakeup_period = mac->vif->bss_conf.beacon_int;
else if (pwrmode.mode == FW_PS_MAX_MODE)
max_wakeup_period = mac->vif->bss_conf.beacon_int *
mac->vif->bss_conf.dtim_period;
if (max_wakeup_period >= 500)
pwrmode.bcn_pass_cnt = 1;
else if ((max_wakeup_period >= 300) && (max_wakeup_period < 500))
pwrmode.bcn_pass_cnt = 2;
else if ((max_wakeup_period >= 200) && (max_wakeup_period < 300))
pwrmode.bcn_pass_cnt = 3;
else if ((max_wakeup_period >= 20) && (max_wakeup_period < 200))
pwrmode.bcn_pass_cnt = 5;
else
pwrmode.bcn_pass_cnt = 1;
_rtl92s_firmware_set_h2c_cmd(hw, FW_H2C_SETPWRMODE, (u8 *)&pwrmode);
}
void rtl92s_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw,
u8 mstatus, u8 ps_qosinfo)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct h2c_joinbss_rpt_parm joinbss_rpt;
joinbss_rpt.opmode = mstatus;
joinbss_rpt.ps_qos_info = ps_qosinfo;
joinbss_rpt.bssid[0] = mac->bssid[0];
joinbss_rpt.bssid[1] = mac->bssid[1];
joinbss_rpt.bssid[2] = mac->bssid[2];
joinbss_rpt.bssid[3] = mac->bssid[3];
joinbss_rpt.bssid[4] = mac->bssid[4];
joinbss_rpt.bssid[5] = mac->bssid[5];
SET_BITS_TO_LE_2BYTE((u8 *)(&joinbss_rpt) + 8, 0, 16,
mac->vif->bss_conf.beacon_int);
SET_BITS_TO_LE_2BYTE((u8 *)(&joinbss_rpt) + 10, 0, 16, mac->assoc_id);
_rtl92s_firmware_set_h2c_cmd(hw, FW_H2C_JOINBSSRPT, (u8 *)&joinbss_rpt);
}
| gpl-2.0 |
photon-dev-team/ics_kernel | arch/cris/kernel/crisksyms.c | 9407 | 1679 | #include <linux/module.h>
#include <linux/user.h>
#include <linux/elfcore.h>
#include <linux/sched.h>
#include <linux/in6.h>
#include <linux/interrupt.h>
#include <linux/pm.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/tty.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/irq.h>
#include <asm/pgtable.h>
#include <asm/fasttimer.h>
extern unsigned long get_cmos_time(void);
extern void __Udiv(void);
extern void __Umod(void);
extern void __Div(void);
extern void __Mod(void);
extern void __ashldi3(void);
extern void __ashrdi3(void);
extern void __lshrdi3(void);
extern void __negdi2(void);
extern void iounmap(volatile void * __iomem);
/* Platform dependent support */
EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(get_cmos_time);
EXPORT_SYMBOL(loops_per_usec);
/* Math functions */
EXPORT_SYMBOL(__Udiv);
EXPORT_SYMBOL(__Umod);
EXPORT_SYMBOL(__Div);
EXPORT_SYMBOL(__Mod);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__negdi2);
/* Memory functions */
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
/* Userspace access functions */
EXPORT_SYMBOL(__copy_user_zeroing);
EXPORT_SYMBOL(__copy_user);
#undef memcpy
#undef memset
extern void * memset(void *, int, __kernel_size_t);
extern void * memcpy(void *, const void *, __kernel_size_t);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
#ifdef CONFIG_ETRAX_FAST_TIMER
/* Fast timer functions */
EXPORT_SYMBOL(fast_timer_list);
EXPORT_SYMBOL(start_one_shot_timer);
EXPORT_SYMBOL(del_fast_timer);
EXPORT_SYMBOL(schedule_usleep);
#endif
EXPORT_SYMBOL(csum_partial);
| gpl-2.0 |
armani-dev/android_kernel_xiaomi_armani_OLD | drivers/media/video/pvrusb2/pvrusb2-eeprom.c | 11455 | 4775 | /*
*
*
* Copyright (C) 2005 Mike Isely <isely@pobox.com>
* Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/slab.h>
#include "pvrusb2-eeprom.h"
#include "pvrusb2-hdw-internal.h"
#include "pvrusb2-debug.h"
#define trace_eeprom(...) pvr2_trace(PVR2_TRACE_EEPROM,__VA_ARGS__)
/*
Read and analyze data in the eeprom. Use tveeprom to figure out
the packet structure, since this is another Hauppauge device and
internally it has a family resemblance to ivtv-type devices
*/
#include <media/tveeprom.h>
/* We seem to only be interested in the last 128 bytes of the EEPROM */
#define EEPROM_SIZE 128
/* Grab EEPROM contents, needed for direct method. */
static u8 *pvr2_eeprom_fetch(struct pvr2_hdw *hdw)
{
struct i2c_msg msg[2];
u8 *eeprom;
u8 iadd[2];
u8 addr;
u16 eepromSize;
unsigned int offs;
int ret;
int mode16 = 0;
unsigned pcnt,tcnt;
eeprom = kmalloc(EEPROM_SIZE,GFP_KERNEL);
if (!eeprom) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"Failed to allocate memory"
" required to read eeprom");
return NULL;
}
trace_eeprom("Value for eeprom addr from controller was 0x%x",
hdw->eeprom_addr);
addr = hdw->eeprom_addr;
/* Seems that if the high bit is set, then the *real* eeprom
address is shifted right now bit position (noticed this in
newer PVR USB2 hardware) */
if (addr & 0x80) addr >>= 1;
/* FX2 documentation states that a 16bit-addressed eeprom is
expected if the I2C address is an odd number (yeah, this is
strange but it's what they do) */
mode16 = (addr & 1);
eepromSize = (mode16 ? 4096 : 256);
trace_eeprom("Examining %d byte eeprom at location 0x%x"
" using %d bit addressing",eepromSize,addr,
mode16 ? 16 : 8);
msg[0].addr = addr;
msg[0].flags = 0;
msg[0].len = mode16 ? 2 : 1;
msg[0].buf = iadd;
msg[1].addr = addr;
msg[1].flags = I2C_M_RD;
/* We have to do the actual eeprom data fetch ourselves, because
(1) we're only fetching part of the eeprom, and (2) if we were
getting the whole thing our I2C driver can't grab it in one
pass - which is what tveeprom is otherwise going to attempt */
memset(eeprom,0,EEPROM_SIZE);
for (tcnt = 0; tcnt < EEPROM_SIZE; tcnt += pcnt) {
pcnt = 16;
if (pcnt + tcnt > EEPROM_SIZE) pcnt = EEPROM_SIZE-tcnt;
offs = tcnt + (eepromSize - EEPROM_SIZE);
if (mode16) {
iadd[0] = offs >> 8;
iadd[1] = offs;
} else {
iadd[0] = offs;
}
msg[1].len = pcnt;
msg[1].buf = eeprom+tcnt;
if ((ret = i2c_transfer(&hdw->i2c_adap,
msg,ARRAY_SIZE(msg))) != 2) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"eeprom fetch set offs err=%d",ret);
kfree(eeprom);
return NULL;
}
}
return eeprom;
}
/* Directly call eeprom analysis function within tveeprom. */
int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
{
u8 *eeprom;
struct tveeprom tvdata;
memset(&tvdata,0,sizeof(tvdata));
eeprom = pvr2_eeprom_fetch(hdw);
if (!eeprom) return -EINVAL;
{
struct i2c_client fake_client;
/* Newer version expects a useless client interface */
fake_client.addr = hdw->eeprom_addr;
fake_client.adapter = &hdw->i2c_adap;
tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom);
}
trace_eeprom("eeprom assumed v4l tveeprom module");
trace_eeprom("eeprom direct call results:");
trace_eeprom("has_radio=%d",tvdata.has_radio);
trace_eeprom("tuner_type=%d",tvdata.tuner_type);
trace_eeprom("tuner_formats=0x%x",tvdata.tuner_formats);
trace_eeprom("audio_processor=%d",tvdata.audio_processor);
trace_eeprom("model=%d",tvdata.model);
trace_eeprom("revision=%d",tvdata.revision);
trace_eeprom("serial_number=%d",tvdata.serial_number);
trace_eeprom("rev_str=%s",tvdata.rev_str);
hdw->tuner_type = tvdata.tuner_type;
hdw->tuner_updated = !0;
hdw->serial_number = tvdata.serial_number;
hdw->std_mask_eeprom = tvdata.tuner_formats;
kfree(eeprom);
return 0;
}
/*
Stuff for Emacs to see, in order to encourage consistent editing style:
*** Local Variables: ***
*** mode: c ***
*** fill-column: 70 ***
*** tab-width: 8 ***
*** c-basic-offset: 8 ***
*** End: ***
*/
| gpl-2.0 |
multipath-tcp/mptcp_3.12.x | drivers/usb/mon/mon_main.c | 11967 | 9277 | /*
* The USB Monitor, inspired by Dave Harding's USBMon.
*
* mon_main.c: Main file, module initiation and exit, registrations, etc.
*
* Copyright (C) 2005 Pete Zaitcev (zaitcev@redhat.com)
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/mutex.h>
#include "usb_mon.h"
static void mon_stop(struct mon_bus *mbus);
static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus);
static void mon_bus_drop(struct kref *r);
static void mon_bus_init(struct usb_bus *ubus);
DEFINE_MUTEX(mon_lock);
struct mon_bus mon_bus0; /* Pseudo bus meaning "all buses" */
static LIST_HEAD(mon_buses); /* All buses we know: struct mon_bus */
/*
* Link a reader into the bus.
*
* This must be called with mon_lock taken because of mbus->ref.
*/
void mon_reader_add(struct mon_bus *mbus, struct mon_reader *r)
{
unsigned long flags;
struct list_head *p;
spin_lock_irqsave(&mbus->lock, flags);
if (mbus->nreaders == 0) {
if (mbus == &mon_bus0) {
list_for_each (p, &mon_buses) {
struct mon_bus *m1;
m1 = list_entry(p, struct mon_bus, bus_link);
m1->u_bus->monitored = 1;
}
} else {
mbus->u_bus->monitored = 1;
}
}
mbus->nreaders++;
list_add_tail(&r->r_link, &mbus->r_list);
spin_unlock_irqrestore(&mbus->lock, flags);
kref_get(&mbus->ref);
}
/*
* Unlink reader from the bus.
*
* This is called with mon_lock taken, so we can decrement mbus->ref.
*/
void mon_reader_del(struct mon_bus *mbus, struct mon_reader *r)
{
unsigned long flags;
spin_lock_irqsave(&mbus->lock, flags);
list_del(&r->r_link);
--mbus->nreaders;
if (mbus->nreaders == 0)
mon_stop(mbus);
spin_unlock_irqrestore(&mbus->lock, flags);
kref_put(&mbus->ref, mon_bus_drop);
}
/*
*/
static void mon_bus_submit(struct mon_bus *mbus, struct urb *urb)
{
unsigned long flags;
struct list_head *pos;
struct mon_reader *r;
spin_lock_irqsave(&mbus->lock, flags);
mbus->cnt_events++;
list_for_each (pos, &mbus->r_list) {
r = list_entry(pos, struct mon_reader, r_link);
r->rnf_submit(r->r_data, urb);
}
spin_unlock_irqrestore(&mbus->lock, flags);
}
static void mon_submit(struct usb_bus *ubus, struct urb *urb)
{
struct mon_bus *mbus;
if ((mbus = ubus->mon_bus) != NULL)
mon_bus_submit(mbus, urb);
mon_bus_submit(&mon_bus0, urb);
}
/*
*/
static void mon_bus_submit_error(struct mon_bus *mbus, struct urb *urb, int error)
{
unsigned long flags;
struct list_head *pos;
struct mon_reader *r;
spin_lock_irqsave(&mbus->lock, flags);
mbus->cnt_events++;
list_for_each (pos, &mbus->r_list) {
r = list_entry(pos, struct mon_reader, r_link);
r->rnf_error(r->r_data, urb, error);
}
spin_unlock_irqrestore(&mbus->lock, flags);
}
static void mon_submit_error(struct usb_bus *ubus, struct urb *urb, int error)
{
struct mon_bus *mbus;
if ((mbus = ubus->mon_bus) != NULL)
mon_bus_submit_error(mbus, urb, error);
mon_bus_submit_error(&mon_bus0, urb, error);
}
/*
*/
static void mon_bus_complete(struct mon_bus *mbus, struct urb *urb, int status)
{
unsigned long flags;
struct list_head *pos;
struct mon_reader *r;
spin_lock_irqsave(&mbus->lock, flags);
mbus->cnt_events++;
list_for_each (pos, &mbus->r_list) {
r = list_entry(pos, struct mon_reader, r_link);
r->rnf_complete(r->r_data, urb, status);
}
spin_unlock_irqrestore(&mbus->lock, flags);
}
static void mon_complete(struct usb_bus *ubus, struct urb *urb, int status)
{
struct mon_bus *mbus;
if ((mbus = ubus->mon_bus) != NULL)
mon_bus_complete(mbus, urb, status);
mon_bus_complete(&mon_bus0, urb, status);
}
/* int (*unlink_urb) (struct urb *urb, int status); */
/*
* Stop monitoring.
*/
static void mon_stop(struct mon_bus *mbus)
{
struct usb_bus *ubus;
struct list_head *p;
if (mbus == &mon_bus0) {
list_for_each (p, &mon_buses) {
mbus = list_entry(p, struct mon_bus, bus_link);
/*
* We do not change nreaders here, so rely on mon_lock.
*/
if (mbus->nreaders == 0 && (ubus = mbus->u_bus) != NULL)
ubus->monitored = 0;
}
} else {
/*
* A stop can be called for a dissolved mon_bus in case of
* a reader staying across an rmmod foo_hcd, so test ->u_bus.
*/
if (mon_bus0.nreaders == 0 && (ubus = mbus->u_bus) != NULL) {
ubus->monitored = 0;
mb();
}
}
}
/*
* Add a USB bus (usually by a modprobe foo-hcd)
*
* This does not return an error code because the core cannot care less
* if monitoring is not established.
*/
static void mon_bus_add(struct usb_bus *ubus)
{
mon_bus_init(ubus);
mutex_lock(&mon_lock);
if (mon_bus0.nreaders != 0)
ubus->monitored = 1;
mutex_unlock(&mon_lock);
}
/*
* Remove a USB bus (either from rmmod foo-hcd or from a hot-remove event).
*/
static void mon_bus_remove(struct usb_bus *ubus)
{
struct mon_bus *mbus = ubus->mon_bus;
mutex_lock(&mon_lock);
list_del(&mbus->bus_link);
if (mbus->text_inited)
mon_text_del(mbus);
if (mbus->bin_inited)
mon_bin_del(mbus);
mon_dissolve(mbus, ubus);
kref_put(&mbus->ref, mon_bus_drop);
mutex_unlock(&mon_lock);
}
static int mon_notify(struct notifier_block *self, unsigned long action,
void *dev)
{
switch (action) {
case USB_BUS_ADD:
mon_bus_add(dev);
break;
case USB_BUS_REMOVE:
mon_bus_remove(dev);
}
return NOTIFY_OK;
}
static struct notifier_block mon_nb = {
.notifier_call = mon_notify,
};
/*
* Ops
*/
static struct usb_mon_operations mon_ops_0 = {
.urb_submit = mon_submit,
.urb_submit_error = mon_submit_error,
.urb_complete = mon_complete,
};
/*
* Tear usb_bus and mon_bus apart.
*/
static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus)
{
if (ubus->monitored) {
ubus->monitored = 0;
mb();
}
ubus->mon_bus = NULL;
mbus->u_bus = NULL;
mb();
/* We want synchronize_irq() here, but that needs an argument. */
}
/*
*/
static void mon_bus_drop(struct kref *r)
{
struct mon_bus *mbus = container_of(r, struct mon_bus, ref);
kfree(mbus);
}
/*
* Initialize a bus for us:
* - allocate mon_bus
* - refcount USB bus struct
* - link
*/
static void mon_bus_init(struct usb_bus *ubus)
{
struct mon_bus *mbus;
if ((mbus = kzalloc(sizeof(struct mon_bus), GFP_KERNEL)) == NULL)
goto err_alloc;
kref_init(&mbus->ref);
spin_lock_init(&mbus->lock);
INIT_LIST_HEAD(&mbus->r_list);
/*
* We don't need to take a reference to ubus, because we receive
* a notification if the bus is about to be removed.
*/
mbus->u_bus = ubus;
ubus->mon_bus = mbus;
mbus->text_inited = mon_text_add(mbus, ubus);
mbus->bin_inited = mon_bin_add(mbus, ubus);
mutex_lock(&mon_lock);
list_add_tail(&mbus->bus_link, &mon_buses);
mutex_unlock(&mon_lock);
return;
err_alloc:
return;
}
static void mon_bus0_init(void)
{
struct mon_bus *mbus = &mon_bus0;
kref_init(&mbus->ref);
spin_lock_init(&mbus->lock);
INIT_LIST_HEAD(&mbus->r_list);
mbus->text_inited = mon_text_add(mbus, NULL);
mbus->bin_inited = mon_bin_add(mbus, NULL);
}
/*
* Search a USB bus by number. Notice that USB bus numbers start from one,
* which we may later use to identify "all" with zero.
*
* This function must be called with mon_lock held.
*
* This is obviously inefficient and may be revised in the future.
*/
struct mon_bus *mon_bus_lookup(unsigned int num)
{
struct list_head *p;
struct mon_bus *mbus;
if (num == 0) {
return &mon_bus0;
}
list_for_each (p, &mon_buses) {
mbus = list_entry(p, struct mon_bus, bus_link);
if (mbus->u_bus->busnum == num) {
return mbus;
}
}
return NULL;
}
static int __init mon_init(void)
{
struct usb_bus *ubus;
int rc;
if ((rc = mon_text_init()) != 0)
goto err_text;
if ((rc = mon_bin_init()) != 0)
goto err_bin;
mon_bus0_init();
if (usb_mon_register(&mon_ops_0) != 0) {
printk(KERN_NOTICE TAG ": unable to register with the core\n");
rc = -ENODEV;
goto err_reg;
}
// MOD_INC_USE_COUNT(which_module?);
mutex_lock(&usb_bus_list_lock);
list_for_each_entry (ubus, &usb_bus_list, bus_list) {
mon_bus_init(ubus);
}
usb_register_notify(&mon_nb);
mutex_unlock(&usb_bus_list_lock);
return 0;
err_reg:
mon_bin_exit();
err_bin:
mon_text_exit();
err_text:
return rc;
}
static void __exit mon_exit(void)
{
struct mon_bus *mbus;
struct list_head *p;
usb_unregister_notify(&mon_nb);
usb_mon_deregister();
mutex_lock(&mon_lock);
while (!list_empty(&mon_buses)) {
p = mon_buses.next;
mbus = list_entry(p, struct mon_bus, bus_link);
list_del(p);
if (mbus->text_inited)
mon_text_del(mbus);
if (mbus->bin_inited)
mon_bin_del(mbus);
/*
* This never happens, because the open/close paths in
* file level maintain module use counters and so rmmod fails
* before reaching here. However, better be safe...
*/
if (mbus->nreaders) {
printk(KERN_ERR TAG
": Outstanding opens (%d) on usb%d, leaking...\n",
mbus->nreaders, mbus->u_bus->busnum);
atomic_set(&mbus->ref.refcount, 2); /* Force leak */
}
mon_dissolve(mbus, mbus->u_bus);
kref_put(&mbus->ref, mon_bus_drop);
}
mbus = &mon_bus0;
if (mbus->text_inited)
mon_text_del(mbus);
if (mbus->bin_inited)
mon_bin_del(mbus);
mutex_unlock(&mon_lock);
mon_text_exit();
mon_bin_exit();
}
module_init(mon_init);
module_exit(mon_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
emceethemouth/kernel_androidone | sound/pci/echoaudio/indigo_dsp.c | 12479 | 4043 | /****************************************************************************
Copyright Echo Digital Audio Corporation (c) 1998 - 2004
All rights reserved
www.echoaudio.com
This file is part of Echo Digital Audio's generic driver library.
Echo Digital Audio's generic driver library is free software;
you can redistribute it and/or modify it under the terms of
the GNU General Public License as published by the Free Software
Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston,
MA 02111-1307, USA.
*************************************************************************
Translation from C++ and adaptation for use in ALSA-Driver
were made by Giuliano Pochini <pochini@shiny.it>
****************************************************************************/
static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe,
int gain);
static int update_vmixer_level(struct echoaudio *chip);
static int init_hw(struct echoaudio *chip, u16 device_id, u16 subdevice_id)
{
int err;
DE_INIT(("init_hw() - Indigo\n"));
if (snd_BUG_ON((subdevice_id & 0xfff0) != INDIGO))
return -ENODEV;
if ((err = init_dsp_comm_page(chip))) {
DE_INIT(("init_hw - could not initialize DSP comm page\n"));
return err;
}
chip->device_id = device_id;
chip->subdevice_id = subdevice_id;
chip->bad_board = TRUE;
chip->dsp_code_to_load = FW_INDIGO_DSP;
/* Since this card has no ASIC, mark it as loaded so everything
works OK */
chip->asic_loaded = TRUE;
chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
if ((err = load_firmware(chip)) < 0)
return err;
chip->bad_board = FALSE;
DE_INIT(("init_hw done\n"));
return err;
}
static int set_mixer_defaults(struct echoaudio *chip)
{
return init_line_levels(chip);
}
static u32 detect_input_clocks(const struct echoaudio *chip)
{
return ECHO_CLOCK_BIT_INTERNAL;
}
/* The Indigo has no ASIC. Just do nothing */
static int load_asic(struct echoaudio *chip)
{
return 0;
}
static int set_sample_rate(struct echoaudio *chip, u32 rate)
{
u32 control_reg;
switch (rate) {
case 96000:
control_reg = MIA_96000;
break;
case 88200:
control_reg = MIA_88200;
break;
case 48000:
control_reg = MIA_48000;
break;
case 44100:
control_reg = MIA_44100;
break;
case 32000:
control_reg = MIA_32000;
break;
default:
DE_ACT(("set_sample_rate: %d invalid!\n", rate));
return -EINVAL;
}
/* Set the control register if it has changed */
if (control_reg != le32_to_cpu(chip->comm_page->control_register)) {
if (wait_handshake(chip))
return -EIO;
chip->comm_page->sample_rate = cpu_to_le32(rate); /* ignored by the DSP */
chip->comm_page->control_register = cpu_to_le32(control_reg);
chip->sample_rate = rate;
clear_handshake(chip);
return send_vector(chip, DSP_VC_UPDATE_CLOCKS);
}
return 0;
}
/* This function routes the sound from a virtual channel to a real output */
static int set_vmixer_gain(struct echoaudio *chip, u16 output, u16 pipe,
int gain)
{
int index;
if (snd_BUG_ON(pipe >= num_pipes_out(chip) ||
output >= num_busses_out(chip)))
return -EINVAL;
if (wait_handshake(chip))
return -EIO;
chip->vmixer_gain[output][pipe] = gain;
index = output * num_pipes_out(chip) + pipe;
chip->comm_page->vmixer[index] = gain;
DE_ACT(("set_vmixer_gain: pipe %d, out %d = %d\n", pipe, output, gain));
return 0;
}
/* Tell the DSP to read and update virtual mixer levels in comm page. */
static int update_vmixer_level(struct echoaudio *chip)
{
if (wait_handshake(chip))
return -EIO;
clear_handshake(chip);
return send_vector(chip, DSP_VC_SET_VMIXER_GAIN);
}
| gpl-2.0 |
ffolkes/plasmakernel_note4 | net/ipv4/icmp.c | 192 | 26942 | /*
* NET3: Implementation of the ICMP protocol layer.
*
* Alan Cox, <alan@lxorguk.ukuu.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Some of the function names and the icmp unreach table for this
* module were derived from [icmp.c 1.0.11 06/02/93] by
* Ross Biro, Fred N. van Kempen, Mark Evans, Alan Cox, Gerhard Koerting.
* Other than that this module is a complete rewrite.
*
* Fixes:
* Clemens Fruhwirth : introduce global icmp rate limiting
* with icmp type masking ability instead
* of broken per type icmp timeouts.
* Mike Shaver : RFC1122 checks.
* Alan Cox : Multicast ping reply as self.
* Alan Cox : Fix atomicity lockup in ip_build_xmit
* call.
* Alan Cox : Added 216,128 byte paths to the MTU
* code.
* Martin Mares : RFC1812 checks.
* Martin Mares : Can be configured to follow redirects
* if acting as a router _without_ a
* routing protocol (RFC 1812).
* Martin Mares : Echo requests may be configured to
* be ignored (RFC 1812).
* Martin Mares : Limitation of ICMP error message
* transmit rate (RFC 1812).
* Martin Mares : TOS and Precedence set correctly
* (RFC 1812).
* Martin Mares : Now copying as much data from the
* original packet as we can without
* exceeding 576 bytes (RFC 1812).
* Willy Konynenberg : Transparent proxying support.
* Keith Owens : RFC1191 correction for 4.2BSD based
* path MTU bug.
* Thomas Quinot : ICMP Dest Unreach codes up to 15 are
* valid (RFC 1812).
* Andi Kleen : Check all packet lengths properly
* and moved all kfree_skb() up to
* icmp_rcv.
* Andi Kleen : Move the rate limit bookkeeping
* into the dest entry and use a token
* bucket filter (thanks to ANK). Make
* the rates sysctl configurable.
* Yu Tianli : Fixed two ugly bugs in icmp_send
* - IP option length was accounted wrongly
* - ICMP header length was not accounted
* at all.
* Tristan Greaves : Added sysctl option to ignore bogus
* broadcast responses from broken routers.
*
* To Fix:
*
* - Should use skb_pull() instead of all the manual checking.
* This would also greatly simply some upper layer error handlers. --AK
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include <linux/netfilter_ipv4.h>
#include <linux/slab.h>
#include <net/snmp.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/protocol.h>
#include <net/icmp.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/raw.h>
#include <net/ping.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <asm/uaccess.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <net/inet_common.h>
#include <net/ip_fib.h>
/*
* Build xmit assembly blocks
*/
struct icmp_bxm {
struct sk_buff *skb;
int offset;
int data_len;
struct {
struct icmphdr icmph;
__be32 times[3];
} data;
int head_len;
struct ip_options_data replyopts;
};
/* An array of errno for error messages from dest unreach. */
/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
const struct icmp_err icmp_err_convert[] = {
{
.errno = ENETUNREACH, /* ICMP_NET_UNREACH */
.fatal = 0,
},
{
.errno = EHOSTUNREACH, /* ICMP_HOST_UNREACH */
.fatal = 0,
},
{
.errno = ENOPROTOOPT /* ICMP_PROT_UNREACH */,
.fatal = 1,
},
{
.errno = ECONNREFUSED, /* ICMP_PORT_UNREACH */
.fatal = 1,
},
{
.errno = EMSGSIZE, /* ICMP_FRAG_NEEDED */
.fatal = 0,
},
{
.errno = EOPNOTSUPP, /* ICMP_SR_FAILED */
.fatal = 0,
},
{
.errno = ENETUNREACH, /* ICMP_NET_UNKNOWN */
.fatal = 1,
},
{
.errno = EHOSTDOWN, /* ICMP_HOST_UNKNOWN */
.fatal = 1,
},
{
.errno = ENONET, /* ICMP_HOST_ISOLATED */
.fatal = 1,
},
{
.errno = ENETUNREACH, /* ICMP_NET_ANO */
.fatal = 1,
},
{
.errno = EHOSTUNREACH, /* ICMP_HOST_ANO */
.fatal = 1,
},
{
.errno = ENETUNREACH, /* ICMP_NET_UNR_TOS */
.fatal = 0,
},
{
.errno = EHOSTUNREACH, /* ICMP_HOST_UNR_TOS */
.fatal = 0,
},
{
.errno = EHOSTUNREACH, /* ICMP_PKT_FILTERED */
.fatal = 1,
},
{
.errno = EHOSTUNREACH, /* ICMP_PREC_VIOLATION */
.fatal = 1,
},
{
.errno = EHOSTUNREACH, /* ICMP_PREC_CUTOFF */
.fatal = 1,
},
};
EXPORT_SYMBOL(icmp_err_convert);
/*
* ICMP control array. This specifies what to do with each ICMP.
*/
struct icmp_control {
void (*handler)(struct sk_buff *skb);
short error; /* This ICMP is classed as an error message */
};
static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
/*
* The ICMP socket(s). This is the most convenient way to flow control
* our ICMP output as well as maintain a clean interface throughout
* all layers. All Socketless IP sends will soon be gone.
*
* On SMP we have one ICMP socket per-cpu.
*/
static struct sock *icmp_sk(struct net *net)
{
return net->ipv4.icmp_sk[smp_processor_id()];
}
static inline struct sock *icmp_xmit_lock(struct net *net)
{
struct sock *sk;
local_bh_disable();
sk = icmp_sk(net);
if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
/* This can happen if the output path signals a
* dst_link_failure() for an outgoing ICMP packet.
*/
local_bh_enable();
return NULL;
}
return sk;
}
static inline void icmp_xmit_unlock(struct sock *sk)
{
spin_unlock_bh(&sk->sk_lock.slock);
}
/*
* Send an ICMP frame.
*/
static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
struct flowi4 *fl4, int type, int code)
{
struct dst_entry *dst = &rt->dst;
bool rc = true;
if (type > NR_ICMP_TYPES)
goto out;
/* Don't limit PMTU discovery. */
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
goto out;
/* No rate limit on loopback */
if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
goto out;
/* Limit if icmp type is enabled in ratemask. */
if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) {
struct inet_peer *peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1);
rc = inet_peer_xrlim_allow(peer,
net->ipv4.sysctl_icmp_ratelimit);
if (peer)
inet_putpeer(peer);
}
out:
return rc;
}
/*
* Maintain the counters used in the SNMP statistics for outgoing ICMP
*/
void icmp_out_count(struct net *net, unsigned char type)
{
ICMPMSGOUT_INC_STATS(net, type);
ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS);
}
/*
* Checksum each fragment, and on the first include the headers and final
* checksum.
*/
static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
struct sk_buff *skb)
{
struct icmp_bxm *icmp_param = (struct icmp_bxm *)from;
__wsum csum;
csum = skb_copy_and_csum_bits(icmp_param->skb,
icmp_param->offset + offset,
to, len, 0);
skb->csum = csum_block_add(skb->csum, csum, odd);
if (icmp_pointers[icmp_param->data.icmph.type].error)
nf_ct_attach(skb, icmp_param->skb);
return 0;
}
static void icmp_push_reply(struct icmp_bxm *icmp_param,
struct flowi4 *fl4,
struct ipcm_cookie *ipc, struct rtable **rt)
{
struct sock *sk;
struct sk_buff *skb;
sk = icmp_sk(dev_net((*rt)->dst.dev));
if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
icmp_param->data_len+icmp_param->head_len,
icmp_param->head_len,
ipc, rt, MSG_DONTWAIT) < 0) {
ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS);
ip_flush_pending_frames(sk);
} else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
struct icmphdr *icmph = icmp_hdr(skb);
__wsum csum = 0;
struct sk_buff *skb1;
skb_queue_walk(&sk->sk_write_queue, skb1) {
csum = csum_add(csum, skb1->csum);
}
csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
(char *)icmph,
icmp_param->head_len, csum);
icmph->checksum = csum_fold(csum);
skb->ip_summed = CHECKSUM_NONE;
ip_push_pending_frames(sk, fl4);
}
}
/*
* Driving logic for building and sending ICMP messages.
*/
static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
{
struct ipcm_cookie ipc;
struct rtable *rt = skb_rtable(skb);
struct net *net = dev_net(rt->dst.dev);
struct flowi4 fl4;
struct sock *sk;
struct inet_sock *inet;
__be32 daddr, saddr;
if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
return;
sk = icmp_xmit_lock(net);
if (sk == NULL)
return;
inet = inet_sk(sk);
icmp_param->data.icmph.checksum = 0;
inet->tos = ip_hdr(skb)->tos;
daddr = ipc.addr = ip_hdr(skb)->saddr;
saddr = fib_compute_spec_dst(skb);
ipc.opt = NULL;
ipc.tx_flags = 0;
if (icmp_param->replyopts.opt.opt.optlen) {
ipc.opt = &icmp_param->replyopts.opt;
if (ipc.opt->opt.srr)
daddr = icmp_param->replyopts.opt.opt.faddr;
}
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = daddr;
fl4.saddr = saddr;
fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
fl4.flowi4_proto = IPPROTO_ICMP;
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
goto out_unlock;
if (icmpv4_xrlim_allow(net, rt, &fl4, icmp_param->data.icmph.type,
icmp_param->data.icmph.code))
icmp_push_reply(icmp_param, &fl4, &ipc, &rt);
ip_rt_put(rt);
out_unlock:
icmp_xmit_unlock(sk);
}
static struct rtable *icmp_route_lookup(struct net *net,
struct flowi4 *fl4,
struct sk_buff *skb_in,
const struct iphdr *iph,
__be32 saddr, u8 tos,
int type, int code,
struct icmp_bxm *param)
{
struct rtable *rt, *rt2;
struct flowi4 fl4_dec;
int err;
memset(fl4, 0, sizeof(*fl4));
fl4->daddr = (param->replyopts.opt.opt.srr ?
param->replyopts.opt.opt.faddr : iph->saddr);
fl4->saddr = saddr;
fl4->flowi4_tos = RT_TOS(tos);
fl4->flowi4_proto = IPPROTO_ICMP;
fl4->fl4_icmp_type = type;
fl4->fl4_icmp_code = code;
security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
rt = __ip_route_output_key(net, fl4);
if (IS_ERR(rt))
return rt;
/* No need to clone since we're just using its address. */
rt2 = rt;
rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
flowi4_to_flowi(fl4), NULL, 0);
if (!IS_ERR(rt)) {
if (rt != rt2)
return rt;
} else if (PTR_ERR(rt) == -EPERM) {
rt = NULL;
} else
return rt;
err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4_dec), AF_INET);
if (err)
goto relookup_failed;
if (inet_addr_type(net, fl4_dec.saddr) == RTN_LOCAL) {
rt2 = __ip_route_output_key(net, &fl4_dec);
if (IS_ERR(rt2))
err = PTR_ERR(rt2);
} else {
struct flowi4 fl4_2 = {};
unsigned long orefdst;
fl4_2.daddr = fl4_dec.saddr;
rt2 = ip_route_output_key(net, &fl4_2);
if (IS_ERR(rt2)) {
err = PTR_ERR(rt2);
goto relookup_failed;
}
/* Ugh! */
orefdst = skb_in->_skb_refdst; /* save old refdst */
err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
RT_TOS(tos), rt2->dst.dev);
dst_release(&rt2->dst);
rt2 = skb_rtable(skb_in);
skb_in->_skb_refdst = orefdst; /* restore old refdst */
}
if (err)
goto relookup_failed;
rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
flowi4_to_flowi(&fl4_dec), NULL,
XFRM_LOOKUP_ICMP);
if (!IS_ERR(rt2)) {
dst_release(&rt->dst);
memcpy(fl4, &fl4_dec, sizeof(*fl4));
rt = rt2;
} else if (PTR_ERR(rt2) == -EPERM) {
if (rt)
dst_release(&rt->dst);
return rt2;
} else {
err = PTR_ERR(rt2);
goto relookup_failed;
}
return rt;
relookup_failed:
if (rt)
return rt;
return ERR_PTR(err);
}
/*
* Send an ICMP message in response to a situation
*
* RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header.
* MAY send more (we do).
* MUST NOT change this header information.
* MUST NOT reply to a multicast/broadcast IP address.
* MUST NOT reply to a multicast/broadcast MAC address.
* MUST reply to only the first fragment.
*/
void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
{
struct iphdr *iph;
int room;
struct icmp_bxm icmp_param;
struct rtable *rt = skb_rtable(skb_in);
struct ipcm_cookie ipc;
struct flowi4 fl4;
__be32 saddr;
u8 tos;
struct net *net;
struct sock *sk;
if (!rt)
goto out;
net = dev_net(rt->dst.dev);
/*
* Find the original header. It is expected to be valid, of course.
* Check this, icmp_send is called from the most obscure devices
* sometimes.
*/
iph = ip_hdr(skb_in);
if ((u8 *)iph < skb_in->head ||
(skb_in->network_header + sizeof(*iph)) > skb_in->tail)
goto out;
/*
* No replies to physical multicast/broadcast
*/
if (skb_in->pkt_type != PACKET_HOST)
goto out;
/*
* Now check at the protocol level
*/
if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
goto out;
/*
* Only reply to fragment 0. We byte re-order the constant
* mask for efficiency.
*/
if (iph->frag_off & htons(IP_OFFSET))
goto out;
/*
* If we send an ICMP error to an ICMP error a mess would result..
*/
if (icmp_pointers[type].error) {
/*
* We are an error, check if we are replying to an
* ICMP error
*/
if (iph->protocol == IPPROTO_ICMP) {
u8 _inner_type, *itp;
itp = skb_header_pointer(skb_in,
skb_network_header(skb_in) +
(iph->ihl << 2) +
offsetof(struct icmphdr,
type) -
skb_in->data,
sizeof(_inner_type),
&_inner_type);
if (itp == NULL)
goto out;
/*
* Assume any unknown ICMP type is an error. This
* isn't specified by the RFC, but think about it..
*/
if (*itp > NR_ICMP_TYPES ||
icmp_pointers[*itp].error)
goto out;
}
}
sk = icmp_xmit_lock(net);
if (sk == NULL)
return;
/*
* Construct source address and options.
*/
saddr = iph->daddr;
if (!(rt->rt_flags & RTCF_LOCAL)) {
struct net_device *dev = NULL;
rcu_read_lock();
if (rt_is_input_route(rt) &&
net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
dev = dev_get_by_index_rcu(net, inet_iif(skb_in));
if (dev)
saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
else
saddr = 0;
rcu_read_unlock();
}
tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
IPTOS_PREC_INTERNETCONTROL) :
iph->tos;
if (ip_options_echo(&icmp_param.replyopts.opt.opt, skb_in))
goto out_unlock;
/*
* Prepare data for ICMP header.
*/
icmp_param.data.icmph.type = type;
icmp_param.data.icmph.code = code;
icmp_param.data.icmph.un.gateway = info;
icmp_param.data.icmph.checksum = 0;
icmp_param.skb = skb_in;
icmp_param.offset = skb_network_offset(skb_in);
inet_sk(sk)->tos = tos;
ipc.addr = iph->saddr;
ipc.opt = &icmp_param.replyopts.opt;
ipc.tx_flags = 0;
rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos,
type, code, &icmp_param);
if (IS_ERR(rt))
goto out_unlock;
if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code))
goto ende;
/* RFC says return as much as we can without exceeding 576 bytes. */
room = dst_mtu(&rt->dst);
if (room > 576)
room = 576;
room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen;
room -= sizeof(struct icmphdr);
icmp_param.data_len = skb_in->len - icmp_param.offset;
if (icmp_param.data_len > room)
icmp_param.data_len = room;
icmp_param.head_len = sizeof(struct icmphdr);
icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
ende:
ip_rt_put(rt);
out_unlock:
icmp_xmit_unlock(sk);
out:;
}
EXPORT_SYMBOL(icmp_send);
static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
{
const struct iphdr *iph = (const struct iphdr *) skb->data;
const struct net_protocol *ipprot;
int protocol = iph->protocol;
/* Checkin full IP header plus 8 bytes of protocol to
* avoid additional coding at protocol handlers.
*/
if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
return;
raw_icmp_error(skb, protocol, info);
rcu_read_lock();
ipprot = rcu_dereference(inet_protos[protocol]);
if (ipprot && ipprot->err_handler)
ipprot->err_handler(skb, info);
rcu_read_unlock();
}
/*
* Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, and ICMP_QUENCH.
*/
static void icmp_unreach(struct sk_buff *skb)
{
const struct iphdr *iph;
struct icmphdr *icmph;
struct net *net;
u32 info = 0;
net = dev_net(skb_dst(skb)->dev);
/*
* Incomplete header ?
* Only checks for the IP header, there should be an
* additional check for longer headers in upper levels.
*/
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto out_err;
icmph = icmp_hdr(skb);
iph = (const struct iphdr *)skb->data;
if (iph->ihl < 5) /* Mangled header, drop. */
goto out_err;
if (icmph->type == ICMP_DEST_UNREACH) {
switch (icmph->code & 15) {
case ICMP_NET_UNREACH:
case ICMP_HOST_UNREACH:
case ICMP_PROT_UNREACH:
case ICMP_PORT_UNREACH:
break;
case ICMP_FRAG_NEEDED:
if (ipv4_config.no_pmtu_disc) {
LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: fragmentation needed and DF set\n"),
&iph->daddr);
} else {
info = ntohs(icmph->un.frag.mtu);
if (!info)
goto out;
}
break;
case ICMP_SR_FAILED:
LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: Source Route Failed\n"),
&iph->daddr);
break;
default:
break;
}
if (icmph->code > NR_ICMP_UNREACH)
goto out;
} else if (icmph->type == ICMP_PARAMETERPROB)
info = ntohl(icmph->un.gateway) >> 24;
/*
* Throw it at our lower layers
*
* RFC 1122: 3.2.2 MUST extract the protocol ID from the passed
* header.
* RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the
* transport layer.
* RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to
* transport layer.
*/
/*
* Check the other end isn't violating RFC 1122. Some routers send
* bogus responses to broadcast frames. If you see this message
* first check your netmask matches at both ends, if it does then
* get the other vendor to fix their kit.
*/
if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
inet_addr_type(net, iph->daddr) == RTN_BROADCAST) {
net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
&ip_hdr(skb)->saddr,
icmph->type, icmph->code,
&iph->daddr, skb->dev->name);
goto out;
}
icmp_socket_deliver(skb, info);
out:
return;
out_err:
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
goto out;
}
/*
* Handle ICMP_REDIRECT.
*/
static void icmp_redirect(struct sk_buff *skb)
{
if (skb->len < sizeof(struct iphdr)) {
ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
return;
}
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
return;
icmp_socket_deliver(skb, icmp_hdr(skb)->un.gateway);
}
/*
* Handle ICMP_ECHO ("ping") requests.
*
* RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
* requests.
* RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be
* included in the reply.
* RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring
* echo requests, MUST have default=NOT.
* See also WRT handling of options once they are done and working.
*/
static void icmp_echo(struct sk_buff *skb)
{
struct net *net;
net = dev_net(skb_dst(skb)->dev);
if (!net->ipv4.sysctl_icmp_echo_ignore_all) {
struct icmp_bxm icmp_param;
icmp_param.data.icmph = *icmp_hdr(skb);
icmp_param.data.icmph.type = ICMP_ECHOREPLY;
icmp_param.skb = skb;
icmp_param.offset = 0;
icmp_param.data_len = skb->len;
icmp_param.head_len = sizeof(struct icmphdr);
icmp_reply(&icmp_param, skb);
}
}
/*
* Handle ICMP Timestamp requests.
* RFC 1122: 3.2.2.8 MAY implement ICMP timestamp requests.
* SHOULD be in the kernel for minimum random latency.
* MUST be accurate to a few minutes.
* MUST be updated at least at 15Hz.
*/
static void icmp_timestamp(struct sk_buff *skb)
{
struct timespec tv;
struct icmp_bxm icmp_param;
/*
* Too short.
*/
if (skb->len < 4)
goto out_err;
/*
* Fill in the current time as ms since midnight UT:
*/
getnstimeofday(&tv);
icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC +
tv.tv_nsec / NSEC_PER_MSEC);
icmp_param.data.times[2] = icmp_param.data.times[1];
if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4))
BUG();
icmp_param.data.icmph = *icmp_hdr(skb);
icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY;
icmp_param.data.icmph.code = 0;
icmp_param.skb = skb;
icmp_param.offset = 0;
icmp_param.data_len = 0;
icmp_param.head_len = sizeof(struct icmphdr) + 12;
icmp_reply(&icmp_param, skb);
out:
return;
out_err:
ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
goto out;
}
static void icmp_discard(struct sk_buff *skb)
{
}
/*
* Deal with incoming ICMP packets.
*/
int icmp_rcv(struct sk_buff *skb)
{
struct icmphdr *icmph;
struct rtable *rt = skb_rtable(skb);
struct net *net = dev_net(rt->dst.dev);
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
struct sec_path *sp = skb_sec_path(skb);
int nh;
if (!(sp && sp->xvec[sp->len - 1]->props.flags &
XFRM_STATE_ICMP))
goto drop;
if (!pskb_may_pull(skb, sizeof(*icmph) + sizeof(struct iphdr)))
goto drop;
nh = skb_network_offset(skb);
skb_set_network_header(skb, sizeof(*icmph));
if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
goto drop;
skb_set_network_header(skb, nh);
}
ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS);
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
if (!csum_fold(skb->csum))
break;
/* fall through */
case CHECKSUM_NONE:
skb->csum = 0;
if (__skb_checksum_complete(skb))
goto csum_error;
}
if (!pskb_pull(skb, sizeof(*icmph)))
goto error;
icmph = icmp_hdr(skb);
ICMPMSGIN_INC_STATS_BH(net, icmph->type);
/*
* 18 is the highest 'known' ICMP type. Anything else is a mystery
*
* RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently
* discarded.
*/
if (icmph->type > NR_ICMP_TYPES)
goto error;
/*
* Parse the ICMP message
*/
if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
/*
* RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be
* silently ignored (we let user decide with a sysctl).
* RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently
* discarded if to broadcast/multicast.
*/
if ((icmph->type == ICMP_ECHO ||
icmph->type == ICMP_TIMESTAMP) &&
net->ipv4.sysctl_icmp_echo_ignore_broadcasts) {
goto error;
}
if (icmph->type != ICMP_ECHO &&
icmph->type != ICMP_TIMESTAMP &&
icmph->type != ICMP_ADDRESS &&
icmph->type != ICMP_ADDRESSREPLY) {
goto error;
}
}
icmp_pointers[icmph->type].handler(skb);
drop:
kfree_skb(skb);
return 0;
csum_error:
ICMP_INC_STATS_BH(net, ICMP_MIB_CSUMERRORS);
error:
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
goto drop;
}
void icmp_err(struct sk_buff *skb, u32 info)
{
struct iphdr *iph = (struct iphdr *)skb->data;
int offset = iph->ihl<<2;
struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset);
int type = icmp_hdr(skb)->type;
int code = icmp_hdr(skb)->code;
struct net *net = dev_net(skb->dev);
/*
* Use ping_err to handle all icmp errors except those
* triggered by ICMP_ECHOREPLY which sent from kernel.
*/
if (icmph->type != ICMP_ECHOREPLY) {
ping_err(skb, offset, info);
return;
}
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ICMP, 0);
else if (type == ICMP_REDIRECT)
ipv4_redirect(skb, net, 0, 0, IPPROTO_ICMP, 0);
}
/*
* This table is the definition of how we handle ICMP.
*/
static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
[ICMP_ECHOREPLY] = {
.handler = ping_rcv,
},
[1] = {
.handler = icmp_discard,
.error = 1,
},
[2] = {
.handler = icmp_discard,
.error = 1,
},
[ICMP_DEST_UNREACH] = {
.handler = icmp_unreach,
.error = 1,
},
[ICMP_SOURCE_QUENCH] = {
.handler = icmp_unreach,
.error = 1,
},
[ICMP_REDIRECT] = {
.handler = icmp_redirect,
.error = 1,
},
[6] = {
.handler = icmp_discard,
.error = 1,
},
[7] = {
.handler = icmp_discard,
.error = 1,
},
[ICMP_ECHO] = {
.handler = icmp_echo,
},
[9] = {
.handler = icmp_discard,
.error = 1,
},
[10] = {
.handler = icmp_discard,
.error = 1,
},
[ICMP_TIME_EXCEEDED] = {
.handler = icmp_unreach,
.error = 1,
},
[ICMP_PARAMETERPROB] = {
.handler = icmp_unreach,
.error = 1,
},
[ICMP_TIMESTAMP] = {
.handler = icmp_timestamp,
},
[ICMP_TIMESTAMPREPLY] = {
.handler = icmp_discard,
},
[ICMP_INFO_REQUEST] = {
.handler = icmp_discard,
},
[ICMP_INFO_REPLY] = {
.handler = icmp_discard,
},
[ICMP_ADDRESS] = {
.handler = icmp_discard,
},
[ICMP_ADDRESSREPLY] = {
.handler = icmp_discard,
},
};
static void __net_exit icmp_sk_exit(struct net *net)
{
int i;
for_each_possible_cpu(i)
inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
kfree(net->ipv4.icmp_sk);
net->ipv4.icmp_sk = NULL;
}
static int __net_init icmp_sk_init(struct net *net)
{
int i, err;
net->ipv4.icmp_sk =
kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
if (net->ipv4.icmp_sk == NULL)
return -ENOMEM;
for_each_possible_cpu(i) {
struct sock *sk;
err = inet_ctl_sock_create(&sk, PF_INET,
SOCK_RAW, IPPROTO_ICMP, net);
if (err < 0)
goto fail;
net->ipv4.icmp_sk[i] = sk;
/* Enough space for 2 64K ICMP packets, including
* sk_buff/skb_shared_info struct overhead.
*/
sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
/*
* Speedup sock_wfree()
*/
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT;
}
/* Control parameters for ECHO replies. */
net->ipv4.sysctl_icmp_echo_ignore_all = 0;
net->ipv4.sysctl_icmp_echo_ignore_broadcasts = 1;
/* Control parameter - ignore bogus broadcast responses? */
net->ipv4.sysctl_icmp_ignore_bogus_error_responses = 1;
/*
* Configurable global rate limit.
*
* ratelimit defines tokens/packet consumed for dst->rate_token
* bucket ratemask defines which icmp types are ratelimited by
* setting it's bit position.
*
* default:
* dest unreachable (3), source quench (4),
* time exceeded (11), parameter problem (12)
*/
net->ipv4.sysctl_icmp_ratelimit = 1 * HZ;
net->ipv4.sysctl_icmp_ratemask = 0x1818;
net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0;
return 0;
fail:
for_each_possible_cpu(i)
inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]);
kfree(net->ipv4.icmp_sk);
return err;
}
static struct pernet_operations __net_initdata icmp_sk_ops = {
.init = icmp_sk_init,
.exit = icmp_sk_exit,
};
int __init icmp_init(void)
{
return register_pernet_subsys(&icmp_sk_ops);
}
| gpl-2.0 |
modulexcite/linux | drivers/mfd/stmpe.c | 192 | 31594 | /*
* ST Microelectronics MFD: stmpe's driver
*
* Copyright (C) ST-Ericsson SA 2010
*
* License Terms: GNU General Public License, version 2
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
*/
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/mfd/core.h>
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
#include "stmpe.h"
static int __stmpe_enable(struct stmpe *stmpe, unsigned int blocks)
{
return stmpe->variant->enable(stmpe, blocks, true);
}
static int __stmpe_disable(struct stmpe *stmpe, unsigned int blocks)
{
return stmpe->variant->enable(stmpe, blocks, false);
}
static int __stmpe_reg_read(struct stmpe *stmpe, u8 reg)
{
int ret;
ret = stmpe->ci->read_byte(stmpe, reg);
if (ret < 0)
dev_err(stmpe->dev, "failed to read reg %#x: %d\n", reg, ret);
dev_vdbg(stmpe->dev, "rd: reg %#x => data %#x\n", reg, ret);
return ret;
}
static int __stmpe_reg_write(struct stmpe *stmpe, u8 reg, u8 val)
{
int ret;
dev_vdbg(stmpe->dev, "wr: reg %#x <= %#x\n", reg, val);
ret = stmpe->ci->write_byte(stmpe, reg, val);
if (ret < 0)
dev_err(stmpe->dev, "failed to write reg %#x: %d\n", reg, ret);
return ret;
}
static int __stmpe_set_bits(struct stmpe *stmpe, u8 reg, u8 mask, u8 val)
{
int ret;
ret = __stmpe_reg_read(stmpe, reg);
if (ret < 0)
return ret;
ret &= ~mask;
ret |= val;
return __stmpe_reg_write(stmpe, reg, ret);
}
static int __stmpe_block_read(struct stmpe *stmpe, u8 reg, u8 length,
u8 *values)
{
int ret;
ret = stmpe->ci->read_block(stmpe, reg, length, values);
if (ret < 0)
dev_err(stmpe->dev, "failed to read regs %#x: %d\n", reg, ret);
dev_vdbg(stmpe->dev, "rd: reg %#x (%d) => ret %#x\n", reg, length, ret);
stmpe_dump_bytes("stmpe rd: ", values, length);
return ret;
}
static int __stmpe_block_write(struct stmpe *stmpe, u8 reg, u8 length,
const u8 *values)
{
int ret;
dev_vdbg(stmpe->dev, "wr: regs %#x (%d)\n", reg, length);
stmpe_dump_bytes("stmpe wr: ", values, length);
ret = stmpe->ci->write_block(stmpe, reg, length, values);
if (ret < 0)
dev_err(stmpe->dev, "failed to write regs %#x: %d\n", reg, ret);
return ret;
}
/**
* stmpe_enable - enable blocks on an STMPE device
* @stmpe: Device to work on
* @blocks: Mask of blocks (enum stmpe_block values) to enable
*/
int stmpe_enable(struct stmpe *stmpe, unsigned int blocks)
{
int ret;
mutex_lock(&stmpe->lock);
ret = __stmpe_enable(stmpe, blocks);
mutex_unlock(&stmpe->lock);
return ret;
}
EXPORT_SYMBOL_GPL(stmpe_enable);
/**
* stmpe_disable - disable blocks on an STMPE device
* @stmpe: Device to work on
* @blocks: Mask of blocks (enum stmpe_block values) to enable
*/
int stmpe_disable(struct stmpe *stmpe, unsigned int blocks)
{
int ret;
mutex_lock(&stmpe->lock);
ret = __stmpe_disable(stmpe, blocks);
mutex_unlock(&stmpe->lock);
return ret;
}
EXPORT_SYMBOL_GPL(stmpe_disable);
/**
* stmpe_reg_read() - read a single STMPE register
* @stmpe: Device to read from
* @reg: Register to read
*/
int stmpe_reg_read(struct stmpe *stmpe, u8 reg)
{
int ret;
mutex_lock(&stmpe->lock);
ret = __stmpe_reg_read(stmpe, reg);
mutex_unlock(&stmpe->lock);
return ret;
}
EXPORT_SYMBOL_GPL(stmpe_reg_read);
/**
* stmpe_reg_write() - write a single STMPE register
* @stmpe: Device to write to
* @reg: Register to write
* @val: Value to write
*/
int stmpe_reg_write(struct stmpe *stmpe, u8 reg, u8 val)
{
int ret;
mutex_lock(&stmpe->lock);
ret = __stmpe_reg_write(stmpe, reg, val);
mutex_unlock(&stmpe->lock);
return ret;
}
EXPORT_SYMBOL_GPL(stmpe_reg_write);
/**
* stmpe_set_bits() - set the value of a bitfield in a STMPE register
* @stmpe: Device to write to
* @reg: Register to write
* @mask: Mask of bits to set
* @val: Value to set
*/
int stmpe_set_bits(struct stmpe *stmpe, u8 reg, u8 mask, u8 val)
{
int ret;
mutex_lock(&stmpe->lock);
ret = __stmpe_set_bits(stmpe, reg, mask, val);
mutex_unlock(&stmpe->lock);
return ret;
}
EXPORT_SYMBOL_GPL(stmpe_set_bits);
/**
* stmpe_block_read() - read multiple STMPE registers
* @stmpe: Device to read from
* @reg: First register
* @length: Number of registers
* @values: Buffer to write to
*/
int stmpe_block_read(struct stmpe *stmpe, u8 reg, u8 length, u8 *values)
{
int ret;
mutex_lock(&stmpe->lock);
ret = __stmpe_block_read(stmpe, reg, length, values);
mutex_unlock(&stmpe->lock);
return ret;
}
EXPORT_SYMBOL_GPL(stmpe_block_read);
/**
* stmpe_block_write() - write multiple STMPE registers
* @stmpe: Device to write to
* @reg: First register
* @length: Number of registers
* @values: Values to write
*/
int stmpe_block_write(struct stmpe *stmpe, u8 reg, u8 length,
const u8 *values)
{
int ret;
mutex_lock(&stmpe->lock);
ret = __stmpe_block_write(stmpe, reg, length, values);
mutex_unlock(&stmpe->lock);
return ret;
}
EXPORT_SYMBOL_GPL(stmpe_block_write);
/**
* stmpe_set_altfunc()- set the alternate function for STMPE pins
* @stmpe: Device to configure
* @pins: Bitmask of pins to affect
* @block: block to enable alternate functions for
*
* @pins is assumed to have a bit set for each of the bits whose alternate
* function is to be changed, numbered according to the GPIOXY numbers.
*
* If the GPIO module is not enabled, this function automatically enables it in
* order to perform the change.
*/
int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins, enum stmpe_block block)
{
struct stmpe_variant_info *variant = stmpe->variant;
u8 regaddr = stmpe->regs[STMPE_IDX_GPAFR_U_MSB];
int af_bits = variant->af_bits;
int numregs = DIV_ROUND_UP(stmpe->num_gpios * af_bits, 8);
int mask = (1 << af_bits) - 1;
u8 regs[8];
int af, afperreg, ret;
if (!variant->get_altfunc)
return 0;
afperreg = 8 / af_bits;
mutex_lock(&stmpe->lock);
ret = __stmpe_enable(stmpe, STMPE_BLOCK_GPIO);
if (ret < 0)
goto out;
ret = __stmpe_block_read(stmpe, regaddr, numregs, regs);
if (ret < 0)
goto out;
af = variant->get_altfunc(stmpe, block);
while (pins) {
int pin = __ffs(pins);
int regoffset = numregs - (pin / afperreg) - 1;
int pos = (pin % afperreg) * (8 / afperreg);
regs[regoffset] &= ~(mask << pos);
regs[regoffset] |= af << pos;
pins &= ~(1 << pin);
}
ret = __stmpe_block_write(stmpe, regaddr, numregs, regs);
out:
mutex_unlock(&stmpe->lock);
return ret;
}
EXPORT_SYMBOL_GPL(stmpe_set_altfunc);
/*
* GPIO (all variants)
*/
static struct resource stmpe_gpio_resources[] = {
/* Start and end filled dynamically */
{
.flags = IORESOURCE_IRQ,
},
};
static const struct mfd_cell stmpe_gpio_cell = {
.name = "stmpe-gpio",
.of_compatible = "st,stmpe-gpio",
.resources = stmpe_gpio_resources,
.num_resources = ARRAY_SIZE(stmpe_gpio_resources),
};
static const struct mfd_cell stmpe_gpio_cell_noirq = {
.name = "stmpe-gpio",
.of_compatible = "st,stmpe-gpio",
/* gpio cell resources consist of an irq only so no resources here */
};
/*
* Keypad (1601, 2401, 2403)
*/
static struct resource stmpe_keypad_resources[] = {
{
.name = "KEYPAD",
.flags = IORESOURCE_IRQ,
},
{
.name = "KEYPAD_OVER",
.flags = IORESOURCE_IRQ,
},
};
static const struct mfd_cell stmpe_keypad_cell = {
.name = "stmpe-keypad",
.of_compatible = "st,stmpe-keypad",
.resources = stmpe_keypad_resources,
.num_resources = ARRAY_SIZE(stmpe_keypad_resources),
};
/*
* STMPE801
*/
static const u8 stmpe801_regs[] = {
[STMPE_IDX_CHIP_ID] = STMPE801_REG_CHIP_ID,
[STMPE_IDX_ICR_LSB] = STMPE801_REG_SYS_CTRL,
[STMPE_IDX_GPMR_LSB] = STMPE801_REG_GPIO_MP_STA,
[STMPE_IDX_GPSR_LSB] = STMPE801_REG_GPIO_SET_PIN,
[STMPE_IDX_GPCR_LSB] = STMPE801_REG_GPIO_SET_PIN,
[STMPE_IDX_GPDR_LSB] = STMPE801_REG_GPIO_DIR,
[STMPE_IDX_IEGPIOR_LSB] = STMPE801_REG_GPIO_INT_EN,
[STMPE_IDX_ISGPIOR_MSB] = STMPE801_REG_GPIO_INT_STA,
};
static struct stmpe_variant_block stmpe801_blocks[] = {
{
.cell = &stmpe_gpio_cell,
.irq = 0,
.block = STMPE_BLOCK_GPIO,
},
};
static struct stmpe_variant_block stmpe801_blocks_noirq[] = {
{
.cell = &stmpe_gpio_cell_noirq,
.block = STMPE_BLOCK_GPIO,
},
};
static int stmpe801_enable(struct stmpe *stmpe, unsigned int blocks,
bool enable)
{
if (blocks & STMPE_BLOCK_GPIO)
return 0;
else
return -EINVAL;
}
static struct stmpe_variant_info stmpe801 = {
.name = "stmpe801",
.id_val = STMPE801_ID,
.id_mask = 0xffff,
.num_gpios = 8,
.regs = stmpe801_regs,
.blocks = stmpe801_blocks,
.num_blocks = ARRAY_SIZE(stmpe801_blocks),
.num_irqs = STMPE801_NR_INTERNAL_IRQS,
.enable = stmpe801_enable,
};
static struct stmpe_variant_info stmpe801_noirq = {
.name = "stmpe801",
.id_val = STMPE801_ID,
.id_mask = 0xffff,
.num_gpios = 8,
.regs = stmpe801_regs,
.blocks = stmpe801_blocks_noirq,
.num_blocks = ARRAY_SIZE(stmpe801_blocks_noirq),
.enable = stmpe801_enable,
};
/*
* Touchscreen (STMPE811 or STMPE610)
*/
static struct resource stmpe_ts_resources[] = {
{
.name = "TOUCH_DET",
.flags = IORESOURCE_IRQ,
},
{
.name = "FIFO_TH",
.flags = IORESOURCE_IRQ,
},
};
static const struct mfd_cell stmpe_ts_cell = {
.name = "stmpe-ts",
.of_compatible = "st,stmpe-ts",
.resources = stmpe_ts_resources,
.num_resources = ARRAY_SIZE(stmpe_ts_resources),
};
/*
* STMPE811 or STMPE610
*/
static const u8 stmpe811_regs[] = {
[STMPE_IDX_CHIP_ID] = STMPE811_REG_CHIP_ID,
[STMPE_IDX_ICR_LSB] = STMPE811_REG_INT_CTRL,
[STMPE_IDX_IER_LSB] = STMPE811_REG_INT_EN,
[STMPE_IDX_ISR_MSB] = STMPE811_REG_INT_STA,
[STMPE_IDX_GPMR_LSB] = STMPE811_REG_GPIO_MP_STA,
[STMPE_IDX_GPSR_LSB] = STMPE811_REG_GPIO_SET_PIN,
[STMPE_IDX_GPCR_LSB] = STMPE811_REG_GPIO_CLR_PIN,
[STMPE_IDX_GPDR_LSB] = STMPE811_REG_GPIO_DIR,
[STMPE_IDX_GPRER_LSB] = STMPE811_REG_GPIO_RE,
[STMPE_IDX_GPFER_LSB] = STMPE811_REG_GPIO_FE,
[STMPE_IDX_GPAFR_U_MSB] = STMPE811_REG_GPIO_AF,
[STMPE_IDX_IEGPIOR_LSB] = STMPE811_REG_GPIO_INT_EN,
[STMPE_IDX_ISGPIOR_MSB] = STMPE811_REG_GPIO_INT_STA,
[STMPE_IDX_GPEDR_MSB] = STMPE811_REG_GPIO_ED,
};
static struct stmpe_variant_block stmpe811_blocks[] = {
{
.cell = &stmpe_gpio_cell,
.irq = STMPE811_IRQ_GPIOC,
.block = STMPE_BLOCK_GPIO,
},
{
.cell = &stmpe_ts_cell,
.irq = STMPE811_IRQ_TOUCH_DET,
.block = STMPE_BLOCK_TOUCHSCREEN,
},
};
static int stmpe811_enable(struct stmpe *stmpe, unsigned int blocks,
bool enable)
{
unsigned int mask = 0;
if (blocks & STMPE_BLOCK_GPIO)
mask |= STMPE811_SYS_CTRL2_GPIO_OFF;
if (blocks & STMPE_BLOCK_ADC)
mask |= STMPE811_SYS_CTRL2_ADC_OFF;
if (blocks & STMPE_BLOCK_TOUCHSCREEN)
mask |= STMPE811_SYS_CTRL2_TSC_OFF;
return __stmpe_set_bits(stmpe, STMPE811_REG_SYS_CTRL2, mask,
enable ? 0 : mask);
}
static int stmpe811_get_altfunc(struct stmpe *stmpe, enum stmpe_block block)
{
/* 0 for touchscreen, 1 for GPIO */
return block != STMPE_BLOCK_TOUCHSCREEN;
}
static struct stmpe_variant_info stmpe811 = {
.name = "stmpe811",
.id_val = 0x0811,
.id_mask = 0xffff,
.num_gpios = 8,
.af_bits = 1,
.regs = stmpe811_regs,
.blocks = stmpe811_blocks,
.num_blocks = ARRAY_SIZE(stmpe811_blocks),
.num_irqs = STMPE811_NR_INTERNAL_IRQS,
.enable = stmpe811_enable,
.get_altfunc = stmpe811_get_altfunc,
};
/* Similar to 811, except number of gpios */
static struct stmpe_variant_info stmpe610 = {
.name = "stmpe610",
.id_val = 0x0811,
.id_mask = 0xffff,
.num_gpios = 6,
.af_bits = 1,
.regs = stmpe811_regs,
.blocks = stmpe811_blocks,
.num_blocks = ARRAY_SIZE(stmpe811_blocks),
.num_irqs = STMPE811_NR_INTERNAL_IRQS,
.enable = stmpe811_enable,
.get_altfunc = stmpe811_get_altfunc,
};
/*
* STMPE1601
*/
static const u8 stmpe1601_regs[] = {
[STMPE_IDX_CHIP_ID] = STMPE1601_REG_CHIP_ID,
[STMPE_IDX_ICR_LSB] = STMPE1601_REG_ICR_LSB,
[STMPE_IDX_IER_LSB] = STMPE1601_REG_IER_LSB,
[STMPE_IDX_ISR_MSB] = STMPE1601_REG_ISR_MSB,
[STMPE_IDX_GPMR_LSB] = STMPE1601_REG_GPIO_MP_LSB,
[STMPE_IDX_GPSR_LSB] = STMPE1601_REG_GPIO_SET_LSB,
[STMPE_IDX_GPCR_LSB] = STMPE1601_REG_GPIO_CLR_LSB,
[STMPE_IDX_GPDR_LSB] = STMPE1601_REG_GPIO_SET_DIR_LSB,
[STMPE_IDX_GPRER_LSB] = STMPE1601_REG_GPIO_RE_LSB,
[STMPE_IDX_GPFER_LSB] = STMPE1601_REG_GPIO_FE_LSB,
[STMPE_IDX_GPPUR_LSB] = STMPE1601_REG_GPIO_PU_LSB,
[STMPE_IDX_GPAFR_U_MSB] = STMPE1601_REG_GPIO_AF_U_MSB,
[STMPE_IDX_IEGPIOR_LSB] = STMPE1601_REG_INT_EN_GPIO_MASK_LSB,
[STMPE_IDX_ISGPIOR_MSB] = STMPE1601_REG_INT_STA_GPIO_MSB,
[STMPE_IDX_GPEDR_MSB] = STMPE1601_REG_GPIO_ED_MSB,
};
static struct stmpe_variant_block stmpe1601_blocks[] = {
{
.cell = &stmpe_gpio_cell,
.irq = STMPE1601_IRQ_GPIOC,
.block = STMPE_BLOCK_GPIO,
},
{
.cell = &stmpe_keypad_cell,
.irq = STMPE1601_IRQ_KEYPAD,
.block = STMPE_BLOCK_KEYPAD,
},
};
/* supported autosleep timeout delay (in msecs) */
static const int stmpe_autosleep_delay[] = {
4, 16, 32, 64, 128, 256, 512, 1024,
};
static int stmpe_round_timeout(int timeout)
{
int i;
for (i = 0; i < ARRAY_SIZE(stmpe_autosleep_delay); i++) {
if (stmpe_autosleep_delay[i] >= timeout)
return i;
}
/*
* requests for delays longer than supported should not return the
* longest supported delay
*/
return -EINVAL;
}
static int stmpe_autosleep(struct stmpe *stmpe, int autosleep_timeout)
{
int ret;
if (!stmpe->variant->enable_autosleep)
return -ENOSYS;
mutex_lock(&stmpe->lock);
ret = stmpe->variant->enable_autosleep(stmpe, autosleep_timeout);
mutex_unlock(&stmpe->lock);
return ret;
}
/*
* Both stmpe 1601/2403 support same layout for autosleep
*/
static int stmpe1601_autosleep(struct stmpe *stmpe,
int autosleep_timeout)
{
int ret, timeout;
/* choose the best available timeout */
timeout = stmpe_round_timeout(autosleep_timeout);
if (timeout < 0) {
dev_err(stmpe->dev, "invalid timeout\n");
return timeout;
}
ret = __stmpe_set_bits(stmpe, STMPE1601_REG_SYS_CTRL2,
STMPE1601_AUTOSLEEP_TIMEOUT_MASK,
timeout);
if (ret < 0)
return ret;
return __stmpe_set_bits(stmpe, STMPE1601_REG_SYS_CTRL2,
STPME1601_AUTOSLEEP_ENABLE,
STPME1601_AUTOSLEEP_ENABLE);
}
static int stmpe1601_enable(struct stmpe *stmpe, unsigned int blocks,
bool enable)
{
unsigned int mask = 0;
if (blocks & STMPE_BLOCK_GPIO)
mask |= STMPE1601_SYS_CTRL_ENABLE_GPIO;
else
mask &= ~STMPE1601_SYS_CTRL_ENABLE_GPIO;
if (blocks & STMPE_BLOCK_KEYPAD)
mask |= STMPE1601_SYS_CTRL_ENABLE_KPC;
else
mask &= ~STMPE1601_SYS_CTRL_ENABLE_KPC;
if (blocks & STMPE_BLOCK_PWM)
mask |= STMPE1601_SYS_CTRL_ENABLE_SPWM;
else
mask &= ~STMPE1601_SYS_CTRL_ENABLE_SPWM;
return __stmpe_set_bits(stmpe, STMPE1601_REG_SYS_CTRL, mask,
enable ? mask : 0);
}
static int stmpe1601_get_altfunc(struct stmpe *stmpe, enum stmpe_block block)
{
switch (block) {
case STMPE_BLOCK_PWM:
return 2;
case STMPE_BLOCK_KEYPAD:
return 1;
case STMPE_BLOCK_GPIO:
default:
return 0;
}
}
static struct stmpe_variant_info stmpe1601 = {
.name = "stmpe1601",
.id_val = 0x0210,
.id_mask = 0xfff0, /* at least 0x0210 and 0x0212 */
.num_gpios = 16,
.af_bits = 2,
.regs = stmpe1601_regs,
.blocks = stmpe1601_blocks,
.num_blocks = ARRAY_SIZE(stmpe1601_blocks),
.num_irqs = STMPE1601_NR_INTERNAL_IRQS,
.enable = stmpe1601_enable,
.get_altfunc = stmpe1601_get_altfunc,
.enable_autosleep = stmpe1601_autosleep,
};
/*
* STMPE1801
*/
static const u8 stmpe1801_regs[] = {
[STMPE_IDX_CHIP_ID] = STMPE1801_REG_CHIP_ID,
[STMPE_IDX_ICR_LSB] = STMPE1801_REG_INT_CTRL_LOW,
[STMPE_IDX_IER_LSB] = STMPE1801_REG_INT_EN_MASK_LOW,
[STMPE_IDX_ISR_LSB] = STMPE1801_REG_INT_STA_LOW,
[STMPE_IDX_GPMR_LSB] = STMPE1801_REG_GPIO_MP_LOW,
[STMPE_IDX_GPSR_LSB] = STMPE1801_REG_GPIO_SET_LOW,
[STMPE_IDX_GPCR_LSB] = STMPE1801_REG_GPIO_CLR_LOW,
[STMPE_IDX_GPDR_LSB] = STMPE1801_REG_GPIO_SET_DIR_LOW,
[STMPE_IDX_GPRER_LSB] = STMPE1801_REG_GPIO_RE_LOW,
[STMPE_IDX_GPFER_LSB] = STMPE1801_REG_GPIO_FE_LOW,
[STMPE_IDX_GPPUR_LSB] = STMPE1801_REG_GPIO_PULL_UP_LOW,
[STMPE_IDX_IEGPIOR_LSB] = STMPE1801_REG_INT_EN_GPIO_MASK_LOW,
[STMPE_IDX_ISGPIOR_LSB] = STMPE1801_REG_INT_STA_GPIO_LOW,
};
static struct stmpe_variant_block stmpe1801_blocks[] = {
{
.cell = &stmpe_gpio_cell,
.irq = STMPE1801_IRQ_GPIOC,
.block = STMPE_BLOCK_GPIO,
},
{
.cell = &stmpe_keypad_cell,
.irq = STMPE1801_IRQ_KEYPAD,
.block = STMPE_BLOCK_KEYPAD,
},
};
static int stmpe1801_enable(struct stmpe *stmpe, unsigned int blocks,
bool enable)
{
unsigned int mask = 0;
if (blocks & STMPE_BLOCK_GPIO)
mask |= STMPE1801_MSK_INT_EN_GPIO;
if (blocks & STMPE_BLOCK_KEYPAD)
mask |= STMPE1801_MSK_INT_EN_KPC;
return __stmpe_set_bits(stmpe, STMPE1801_REG_INT_EN_MASK_LOW, mask,
enable ? mask : 0);
}
static int stmpe1801_reset(struct stmpe *stmpe)
{
unsigned long timeout;
int ret = 0;
ret = __stmpe_set_bits(stmpe, STMPE1801_REG_SYS_CTRL,
STMPE1801_MSK_SYS_CTRL_RESET, STMPE1801_MSK_SYS_CTRL_RESET);
if (ret < 0)
return ret;
timeout = jiffies + msecs_to_jiffies(100);
while (time_before(jiffies, timeout)) {
ret = __stmpe_reg_read(stmpe, STMPE1801_REG_SYS_CTRL);
if (ret < 0)
return ret;
if (!(ret & STMPE1801_MSK_SYS_CTRL_RESET))
return 0;
usleep_range(100, 200);
}
return -EIO;
}
static struct stmpe_variant_info stmpe1801 = {
.name = "stmpe1801",
.id_val = STMPE1801_ID,
.id_mask = 0xfff0,
.num_gpios = 18,
.af_bits = 0,
.regs = stmpe1801_regs,
.blocks = stmpe1801_blocks,
.num_blocks = ARRAY_SIZE(stmpe1801_blocks),
.num_irqs = STMPE1801_NR_INTERNAL_IRQS,
.enable = stmpe1801_enable,
/* stmpe1801 do not have any gpio alternate function */
.get_altfunc = NULL,
};
/*
* STMPE24XX
*/
static const u8 stmpe24xx_regs[] = {
[STMPE_IDX_CHIP_ID] = STMPE24XX_REG_CHIP_ID,
[STMPE_IDX_ICR_LSB] = STMPE24XX_REG_ICR_LSB,
[STMPE_IDX_IER_LSB] = STMPE24XX_REG_IER_LSB,
[STMPE_IDX_ISR_MSB] = STMPE24XX_REG_ISR_MSB,
[STMPE_IDX_GPMR_LSB] = STMPE24XX_REG_GPMR_LSB,
[STMPE_IDX_GPSR_LSB] = STMPE24XX_REG_GPSR_LSB,
[STMPE_IDX_GPCR_LSB] = STMPE24XX_REG_GPCR_LSB,
[STMPE_IDX_GPDR_LSB] = STMPE24XX_REG_GPDR_LSB,
[STMPE_IDX_GPRER_LSB] = STMPE24XX_REG_GPRER_LSB,
[STMPE_IDX_GPFER_LSB] = STMPE24XX_REG_GPFER_LSB,
[STMPE_IDX_GPPUR_LSB] = STMPE24XX_REG_GPPUR_LSB,
[STMPE_IDX_GPPDR_LSB] = STMPE24XX_REG_GPPDR_LSB,
[STMPE_IDX_GPAFR_U_MSB] = STMPE24XX_REG_GPAFR_U_MSB,
[STMPE_IDX_IEGPIOR_LSB] = STMPE24XX_REG_IEGPIOR_LSB,
[STMPE_IDX_ISGPIOR_MSB] = STMPE24XX_REG_ISGPIOR_MSB,
[STMPE_IDX_GPEDR_MSB] = STMPE24XX_REG_GPEDR_MSB,
};
static struct stmpe_variant_block stmpe24xx_blocks[] = {
{
.cell = &stmpe_gpio_cell,
.irq = STMPE24XX_IRQ_GPIOC,
.block = STMPE_BLOCK_GPIO,
},
{
.cell = &stmpe_keypad_cell,
.irq = STMPE24XX_IRQ_KEYPAD,
.block = STMPE_BLOCK_KEYPAD,
},
};
static int stmpe24xx_enable(struct stmpe *stmpe, unsigned int blocks,
bool enable)
{
unsigned int mask = 0;
if (blocks & STMPE_BLOCK_GPIO)
mask |= STMPE24XX_SYS_CTRL_ENABLE_GPIO;
if (blocks & STMPE_BLOCK_KEYPAD)
mask |= STMPE24XX_SYS_CTRL_ENABLE_KPC;
return __stmpe_set_bits(stmpe, STMPE24XX_REG_SYS_CTRL, mask,
enable ? mask : 0);
}
static int stmpe24xx_get_altfunc(struct stmpe *stmpe, enum stmpe_block block)
{
switch (block) {
case STMPE_BLOCK_ROTATOR:
return 2;
case STMPE_BLOCK_KEYPAD:
return 1;
case STMPE_BLOCK_GPIO:
default:
return 0;
}
}
static struct stmpe_variant_info stmpe2401 = {
.name = "stmpe2401",
.id_val = 0x0101,
.id_mask = 0xffff,
.num_gpios = 24,
.af_bits = 2,
.regs = stmpe24xx_regs,
.blocks = stmpe24xx_blocks,
.num_blocks = ARRAY_SIZE(stmpe24xx_blocks),
.num_irqs = STMPE24XX_NR_INTERNAL_IRQS,
.enable = stmpe24xx_enable,
.get_altfunc = stmpe24xx_get_altfunc,
};
static struct stmpe_variant_info stmpe2403 = {
.name = "stmpe2403",
.id_val = 0x0120,
.id_mask = 0xffff,
.num_gpios = 24,
.af_bits = 2,
.regs = stmpe24xx_regs,
.blocks = stmpe24xx_blocks,
.num_blocks = ARRAY_SIZE(stmpe24xx_blocks),
.num_irqs = STMPE24XX_NR_INTERNAL_IRQS,
.enable = stmpe24xx_enable,
.get_altfunc = stmpe24xx_get_altfunc,
.enable_autosleep = stmpe1601_autosleep, /* same as stmpe1601 */
};
static struct stmpe_variant_info *stmpe_variant_info[STMPE_NBR_PARTS] = {
[STMPE610] = &stmpe610,
[STMPE801] = &stmpe801,
[STMPE811] = &stmpe811,
[STMPE1601] = &stmpe1601,
[STMPE1801] = &stmpe1801,
[STMPE2401] = &stmpe2401,
[STMPE2403] = &stmpe2403,
};
/*
* These devices can be connected in a 'no-irq' configuration - the irq pin
* is not used and the device cannot interrupt the CPU. Here we only list
* devices which support this configuration - the driver will fail probing
* for any devices not listed here which are configured in this way.
*/
static struct stmpe_variant_info *stmpe_noirq_variant_info[STMPE_NBR_PARTS] = {
[STMPE801] = &stmpe801_noirq,
};
static irqreturn_t stmpe_irq(int irq, void *data)
{
struct stmpe *stmpe = data;
struct stmpe_variant_info *variant = stmpe->variant;
int num = DIV_ROUND_UP(variant->num_irqs, 8);
u8 israddr;
u8 isr[3];
int ret;
int i;
if (variant->id_val == STMPE801_ID) {
int base = irq_create_mapping(stmpe->domain, 0);
handle_nested_irq(base);
return IRQ_HANDLED;
}
if (variant->id_val == STMPE1801_ID)
israddr = stmpe->regs[STMPE_IDX_ISR_LSB];
else
israddr = stmpe->regs[STMPE_IDX_ISR_MSB];
ret = stmpe_block_read(stmpe, israddr, num, isr);
if (ret < 0)
return IRQ_NONE;
for (i = 0; i < num; i++) {
int bank = num - i - 1;
u8 status = isr[i];
u8 clear;
status &= stmpe->ier[bank];
if (!status)
continue;
clear = status;
while (status) {
int bit = __ffs(status);
int line = bank * 8 + bit;
int nestedirq = irq_create_mapping(stmpe->domain, line);
handle_nested_irq(nestedirq);
status &= ~(1 << bit);
}
stmpe_reg_write(stmpe, israddr + i, clear);
}
return IRQ_HANDLED;
}
static void stmpe_irq_lock(struct irq_data *data)
{
struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
mutex_lock(&stmpe->irq_lock);
}
static void stmpe_irq_sync_unlock(struct irq_data *data)
{
struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
struct stmpe_variant_info *variant = stmpe->variant;
int num = DIV_ROUND_UP(variant->num_irqs, 8);
int i;
for (i = 0; i < num; i++) {
u8 new = stmpe->ier[i];
u8 old = stmpe->oldier[i];
if (new == old)
continue;
stmpe->oldier[i] = new;
stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_IER_LSB] - i, new);
}
mutex_unlock(&stmpe->irq_lock);
}
static void stmpe_irq_mask(struct irq_data *data)
{
struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
int offset = data->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
stmpe->ier[regoffset] &= ~mask;
}
static void stmpe_irq_unmask(struct irq_data *data)
{
struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
int offset = data->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
stmpe->ier[regoffset] |= mask;
}
static struct irq_chip stmpe_irq_chip = {
.name = "stmpe",
.irq_bus_lock = stmpe_irq_lock,
.irq_bus_sync_unlock = stmpe_irq_sync_unlock,
.irq_mask = stmpe_irq_mask,
.irq_unmask = stmpe_irq_unmask,
};
static int stmpe_irq_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hwirq)
{
struct stmpe *stmpe = d->host_data;
struct irq_chip *chip = NULL;
if (stmpe->variant->id_val != STMPE801_ID)
chip = &stmpe_irq_chip;
irq_set_chip_data(virq, stmpe);
irq_set_chip_and_handler(virq, chip, handle_edge_irq);
irq_set_nested_thread(virq, 1);
#ifdef CONFIG_ARM
set_irq_flags(virq, IRQF_VALID);
#else
irq_set_noprobe(virq);
#endif
return 0;
}
static void stmpe_irq_unmap(struct irq_domain *d, unsigned int virq)
{
#ifdef CONFIG_ARM
set_irq_flags(virq, 0);
#endif
irq_set_chip_and_handler(virq, NULL, NULL);
irq_set_chip_data(virq, NULL);
}
static const struct irq_domain_ops stmpe_irq_ops = {
.map = stmpe_irq_map,
.unmap = stmpe_irq_unmap,
.xlate = irq_domain_xlate_twocell,
};
static int stmpe_irq_init(struct stmpe *stmpe, struct device_node *np)
{
int base = 0;
int num_irqs = stmpe->variant->num_irqs;
stmpe->domain = irq_domain_add_simple(np, num_irqs, base,
&stmpe_irq_ops, stmpe);
if (!stmpe->domain) {
dev_err(stmpe->dev, "Failed to create irqdomain\n");
return -ENOSYS;
}
return 0;
}
static int stmpe_chip_init(struct stmpe *stmpe)
{
unsigned int irq_trigger = stmpe->pdata->irq_trigger;
int autosleep_timeout = stmpe->pdata->autosleep_timeout;
struct stmpe_variant_info *variant = stmpe->variant;
u8 icr = 0;
unsigned int id;
u8 data[2];
int ret;
ret = stmpe_block_read(stmpe, stmpe->regs[STMPE_IDX_CHIP_ID],
ARRAY_SIZE(data), data);
if (ret < 0)
return ret;
id = (data[0] << 8) | data[1];
if ((id & variant->id_mask) != variant->id_val) {
dev_err(stmpe->dev, "unknown chip id: %#x\n", id);
return -EINVAL;
}
dev_info(stmpe->dev, "%s detected, chip id: %#x\n", variant->name, id);
/* Disable all modules -- subdrivers should enable what they need. */
ret = stmpe_disable(stmpe, ~0);
if (ret)
return ret;
if (id == STMPE1801_ID) {
ret = stmpe1801_reset(stmpe);
if (ret < 0)
return ret;
}
if (stmpe->irq >= 0) {
if (id == STMPE801_ID)
icr = STMPE801_REG_SYS_CTRL_INT_EN;
else
icr = STMPE_ICR_LSB_GIM;
/* STMPE801 doesn't support Edge interrupts */
if (id != STMPE801_ID) {
if (irq_trigger == IRQF_TRIGGER_FALLING ||
irq_trigger == IRQF_TRIGGER_RISING)
icr |= STMPE_ICR_LSB_EDGE;
}
if (irq_trigger == IRQF_TRIGGER_RISING ||
irq_trigger == IRQF_TRIGGER_HIGH) {
if (id == STMPE801_ID)
icr |= STMPE801_REG_SYS_CTRL_INT_HI;
else
icr |= STMPE_ICR_LSB_HIGH;
}
}
if (stmpe->pdata->autosleep) {
ret = stmpe_autosleep(stmpe, autosleep_timeout);
if (ret)
return ret;
}
return stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_ICR_LSB], icr);
}
static int stmpe_add_device(struct stmpe *stmpe, const struct mfd_cell *cell)
{
return mfd_add_devices(stmpe->dev, stmpe->pdata->id, cell, 1,
NULL, 0, stmpe->domain);
}
static int stmpe_devices_init(struct stmpe *stmpe)
{
struct stmpe_variant_info *variant = stmpe->variant;
unsigned int platform_blocks = stmpe->pdata->blocks;
int ret = -EINVAL;
int i, j;
for (i = 0; i < variant->num_blocks; i++) {
struct stmpe_variant_block *block = &variant->blocks[i];
if (!(platform_blocks & block->block))
continue;
for (j = 0; j < block->cell->num_resources; j++) {
struct resource *res =
(struct resource *) &block->cell->resources[j];
/* Dynamically fill in a variant's IRQ. */
if (res->flags & IORESOURCE_IRQ)
res->start = res->end = block->irq + j;
}
platform_blocks &= ~block->block;
ret = stmpe_add_device(stmpe, block->cell);
if (ret)
return ret;
}
if (platform_blocks)
dev_warn(stmpe->dev,
"platform wants blocks (%#x) not present on variant",
platform_blocks);
return ret;
}
static void stmpe_of_probe(struct stmpe_platform_data *pdata,
struct device_node *np)
{
struct device_node *child;
pdata->id = of_alias_get_id(np, "stmpe-i2c");
if (pdata->id < 0)
pdata->id = -1;
pdata->irq_gpio = of_get_named_gpio_flags(np, "irq-gpio", 0,
&pdata->irq_trigger);
if (gpio_is_valid(pdata->irq_gpio))
pdata->irq_over_gpio = 1;
else
pdata->irq_trigger = IRQF_TRIGGER_NONE;
of_property_read_u32(np, "st,autosleep-timeout",
&pdata->autosleep_timeout);
pdata->autosleep = (pdata->autosleep_timeout) ? true : false;
for_each_child_of_node(np, child) {
if (!strcmp(child->name, "stmpe_gpio")) {
pdata->blocks |= STMPE_BLOCK_GPIO;
} else if (!strcmp(child->name, "stmpe_keypad")) {
pdata->blocks |= STMPE_BLOCK_KEYPAD;
} else if (!strcmp(child->name, "stmpe_touchscreen")) {
pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN;
} else if (!strcmp(child->name, "stmpe_adc")) {
pdata->blocks |= STMPE_BLOCK_ADC;
} else if (!strcmp(child->name, "stmpe_pwm")) {
pdata->blocks |= STMPE_BLOCK_PWM;
} else if (!strcmp(child->name, "stmpe_rotator")) {
pdata->blocks |= STMPE_BLOCK_ROTATOR;
}
}
}
/* Called from client specific probe routines */
int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum)
{
struct stmpe_platform_data *pdata = dev_get_platdata(ci->dev);
struct device_node *np = ci->dev->of_node;
struct stmpe *stmpe;
int ret;
if (!pdata) {
if (!np)
return -EINVAL;
pdata = devm_kzalloc(ci->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
stmpe_of_probe(pdata, np);
if (of_find_property(np, "interrupts", NULL) == NULL)
ci->irq = -1;
}
stmpe = devm_kzalloc(ci->dev, sizeof(struct stmpe), GFP_KERNEL);
if (!stmpe)
return -ENOMEM;
mutex_init(&stmpe->irq_lock);
mutex_init(&stmpe->lock);
stmpe->dev = ci->dev;
stmpe->client = ci->client;
stmpe->pdata = pdata;
stmpe->ci = ci;
stmpe->partnum = partnum;
stmpe->variant = stmpe_variant_info[partnum];
stmpe->regs = stmpe->variant->regs;
stmpe->num_gpios = stmpe->variant->num_gpios;
stmpe->vcc = devm_regulator_get_optional(ci->dev, "vcc");
if (!IS_ERR(stmpe->vcc)) {
ret = regulator_enable(stmpe->vcc);
if (ret)
dev_warn(ci->dev, "failed to enable VCC supply\n");
}
stmpe->vio = devm_regulator_get_optional(ci->dev, "vio");
if (!IS_ERR(stmpe->vio)) {
ret = regulator_enable(stmpe->vio);
if (ret)
dev_warn(ci->dev, "failed to enable VIO supply\n");
}
dev_set_drvdata(stmpe->dev, stmpe);
if (ci->init)
ci->init(stmpe);
if (pdata->irq_over_gpio) {
ret = devm_gpio_request_one(ci->dev, pdata->irq_gpio,
GPIOF_DIR_IN, "stmpe");
if (ret) {
dev_err(stmpe->dev, "failed to request IRQ GPIO: %d\n",
ret);
return ret;
}
stmpe->irq = gpio_to_irq(pdata->irq_gpio);
} else {
stmpe->irq = ci->irq;
}
if (stmpe->irq < 0) {
/* use alternate variant info for no-irq mode, if supported */
dev_info(stmpe->dev,
"%s configured in no-irq mode by platform data\n",
stmpe->variant->name);
if (!stmpe_noirq_variant_info[stmpe->partnum]) {
dev_err(stmpe->dev,
"%s does not support no-irq mode!\n",
stmpe->variant->name);
return -ENODEV;
}
stmpe->variant = stmpe_noirq_variant_info[stmpe->partnum];
} else if (pdata->irq_trigger == IRQF_TRIGGER_NONE) {
pdata->irq_trigger = irq_get_trigger_type(stmpe->irq);
}
ret = stmpe_chip_init(stmpe);
if (ret)
return ret;
if (stmpe->irq >= 0) {
ret = stmpe_irq_init(stmpe, np);
if (ret)
return ret;
ret = devm_request_threaded_irq(ci->dev, stmpe->irq, NULL,
stmpe_irq, pdata->irq_trigger | IRQF_ONESHOT,
"stmpe", stmpe);
if (ret) {
dev_err(stmpe->dev, "failed to request IRQ: %d\n",
ret);
return ret;
}
}
ret = stmpe_devices_init(stmpe);
if (!ret)
return 0;
dev_err(stmpe->dev, "failed to add children\n");
mfd_remove_devices(stmpe->dev);
return ret;
}
int stmpe_remove(struct stmpe *stmpe)
{
if (!IS_ERR(stmpe->vio))
regulator_disable(stmpe->vio);
if (!IS_ERR(stmpe->vcc))
regulator_disable(stmpe->vcc);
mfd_remove_devices(stmpe->dev);
return 0;
}
#ifdef CONFIG_PM
static int stmpe_suspend(struct device *dev)
{
struct stmpe *stmpe = dev_get_drvdata(dev);
if (stmpe->irq >= 0 && device_may_wakeup(dev))
enable_irq_wake(stmpe->irq);
return 0;
}
static int stmpe_resume(struct device *dev)
{
struct stmpe *stmpe = dev_get_drvdata(dev);
if (stmpe->irq >= 0 && device_may_wakeup(dev))
disable_irq_wake(stmpe->irq);
return 0;
}
const struct dev_pm_ops stmpe_dev_pm_ops = {
.suspend = stmpe_suspend,
.resume = stmpe_resume,
};
#endif
| gpl-2.0 |
k2wl/3.0.y | drivers/hwmon/lineage-pem.c | 704 | 16130 | /*
* Driver for Lineage Compact Power Line series of power entry modules.
*
* Copyright (C) 2010, 2011 Ericsson AB.
*
* Documentation:
* http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
/*
* This driver supports various Lineage Compact Power Line DC/DC and AC/DC
* converters such as CP1800, CP2000AC, CP2000DC, CP2100DC, and others.
*
* The devices are nominally PMBus compliant. However, most standard PMBus
* commands are not supported. Specifically, all hardware monitoring and
* status reporting commands are non-standard. For this reason, a standard
* PMBus driver can not be used.
*
* All Lineage CPL devices have a built-in I2C bus master selector (PCA9541).
* To ensure device access, this driver should only be used as client driver
* to the pca9541 I2C master selector driver.
*/
/* Command codes */
#define PEM_OPERATION 0x01
#define PEM_CLEAR_INFO_FLAGS 0x03
#define PEM_VOUT_COMMAND 0x21
#define PEM_VOUT_OV_FAULT_LIMIT 0x40
#define PEM_READ_DATA_STRING 0xd0
#define PEM_READ_INPUT_STRING 0xdc
#define PEM_READ_FIRMWARE_REV 0xdd
#define PEM_READ_RUN_TIMER 0xde
#define PEM_FAN_HI_SPEED 0xdf
#define PEM_FAN_NORMAL_SPEED 0xe0
#define PEM_READ_FAN_SPEED 0xe1
/* offsets in data string */
#define PEM_DATA_STATUS_2 0
#define PEM_DATA_STATUS_1 1
#define PEM_DATA_ALARM_2 2
#define PEM_DATA_ALARM_1 3
#define PEM_DATA_VOUT_LSB 4
#define PEM_DATA_VOUT_MSB 5
#define PEM_DATA_CURRENT 6
#define PEM_DATA_TEMP 7
/* Virtual entries, to report constants */
#define PEM_DATA_TEMP_MAX 10
#define PEM_DATA_TEMP_CRIT 11
/* offsets in input string */
#define PEM_INPUT_VOLTAGE 0
#define PEM_INPUT_POWER_LSB 1
#define PEM_INPUT_POWER_MSB 2
/* offsets in fan data */
#define PEM_FAN_ADJUSTMENT 0
#define PEM_FAN_FAN1 1
#define PEM_FAN_FAN2 2
#define PEM_FAN_FAN3 3
/* Status register bits */
#define STS1_OUTPUT_ON (1 << 0)
#define STS1_LEDS_FLASHING (1 << 1)
#define STS1_EXT_FAULT (1 << 2)
#define STS1_SERVICE_LED_ON (1 << 3)
#define STS1_SHUTDOWN_OCCURRED (1 << 4)
#define STS1_INT_FAULT (1 << 5)
#define STS1_ISOLATION_TEST_OK (1 << 6)
#define STS2_ENABLE_PIN_HI (1 << 0)
#define STS2_DATA_OUT_RANGE (1 << 1)
#define STS2_RESTARTED_OK (1 << 1)
#define STS2_ISOLATION_TEST_FAIL (1 << 3)
#define STS2_HIGH_POWER_CAP (1 << 4)
#define STS2_INVALID_INSTR (1 << 5)
#define STS2_WILL_RESTART (1 << 6)
#define STS2_PEC_ERR (1 << 7)
/* Alarm register bits */
#define ALRM1_VIN_OUT_LIMIT (1 << 0)
#define ALRM1_VOUT_OUT_LIMIT (1 << 1)
#define ALRM1_OV_VOLT_SHUTDOWN (1 << 2)
#define ALRM1_VIN_OVERCURRENT (1 << 3)
#define ALRM1_TEMP_WARNING (1 << 4)
#define ALRM1_TEMP_SHUTDOWN (1 << 5)
#define ALRM1_PRIMARY_FAULT (1 << 6)
#define ALRM1_POWER_LIMIT (1 << 7)
#define ALRM2_5V_OUT_LIMIT (1 << 1)
#define ALRM2_TEMP_FAULT (1 << 2)
#define ALRM2_OV_LOW (1 << 3)
#define ALRM2_DCDC_TEMP_HIGH (1 << 4)
#define ALRM2_PRI_TEMP_HIGH (1 << 5)
#define ALRM2_NO_PRIMARY (1 << 6)
#define ALRM2_FAN_FAULT (1 << 7)
#define FIRMWARE_REV_LEN 4
#define DATA_STRING_LEN 9
#define INPUT_STRING_LEN 5 /* 4 for most devices */
#define FAN_SPEED_LEN 5
struct pem_data {
struct device *hwmon_dev;
struct mutex update_lock;
bool valid;
bool fans_supported;
int input_length;
unsigned long last_updated; /* in jiffies */
u8 firmware_rev[FIRMWARE_REV_LEN];
u8 data_string[DATA_STRING_LEN];
u8 input_string[INPUT_STRING_LEN];
u8 fan_speed[FAN_SPEED_LEN];
};
static int pem_read_block(struct i2c_client *client, u8 command, u8 *data,
int data_len)
{
u8 block_buffer[I2C_SMBUS_BLOCK_MAX];
int result;
result = i2c_smbus_read_block_data(client, command, block_buffer);
if (unlikely(result < 0))
goto abort;
if (unlikely(result == 0xff || result != data_len)) {
result = -EIO;
goto abort;
}
memcpy(data, block_buffer, data_len);
result = 0;
abort:
return result;
}
static struct pem_data *pem_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct pem_data *data = i2c_get_clientdata(client);
struct pem_data *ret = data;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
int result;
/* Read data string */
result = pem_read_block(client, PEM_READ_DATA_STRING,
data->data_string,
sizeof(data->data_string));
if (unlikely(result < 0)) {
ret = ERR_PTR(result);
goto abort;
}
/* Read input string */
if (data->input_length) {
result = pem_read_block(client, PEM_READ_INPUT_STRING,
data->input_string,
data->input_length);
if (unlikely(result < 0)) {
ret = ERR_PTR(result);
goto abort;
}
}
/* Read fan speeds */
if (data->fans_supported) {
result = pem_read_block(client, PEM_READ_FAN_SPEED,
data->fan_speed,
sizeof(data->fan_speed));
if (unlikely(result < 0)) {
ret = ERR_PTR(result);
goto abort;
}
}
i2c_smbus_write_byte(client, PEM_CLEAR_INFO_FLAGS);
data->last_updated = jiffies;
data->valid = 1;
}
abort:
mutex_unlock(&data->update_lock);
return ret;
}
static long pem_get_data(u8 *data, int len, int index)
{
long val;
switch (index) {
case PEM_DATA_VOUT_LSB:
val = (data[index] + (data[index+1] << 8)) * 5 / 2;
break;
case PEM_DATA_CURRENT:
val = data[index] * 200;
break;
case PEM_DATA_TEMP:
val = data[index] * 1000;
break;
case PEM_DATA_TEMP_MAX:
val = 97 * 1000; /* 97 degrees C per datasheet */
break;
case PEM_DATA_TEMP_CRIT:
val = 107 * 1000; /* 107 degrees C per datasheet */
break;
default:
WARN_ON_ONCE(1);
val = 0;
}
return val;
}
static long pem_get_input(u8 *data, int len, int index)
{
long val;
switch (index) {
case PEM_INPUT_VOLTAGE:
if (len == INPUT_STRING_LEN)
val = (data[index] + (data[index+1] << 8) - 75) * 1000;
else
val = (data[index] - 75) * 1000;
break;
case PEM_INPUT_POWER_LSB:
if (len == INPUT_STRING_LEN)
index++;
val = (data[index] + (data[index+1] << 8)) * 1000000L;
break;
default:
WARN_ON_ONCE(1);
val = 0;
}
return val;
}
static long pem_get_fan(u8 *data, int len, int index)
{
long val;
switch (index) {
case PEM_FAN_FAN1:
case PEM_FAN_FAN2:
case PEM_FAN_FAN3:
val = data[index] * 100;
break;
default:
WARN_ON_ONCE(1);
val = 0;
}
return val;
}
/*
* Show boolean, either a fault or an alarm.
* .nr points to the register, .index is the bit mask to check
*/
static ssize_t pem_show_bool(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(da);
struct pem_data *data = pem_update_device(dev);
u8 status;
if (IS_ERR(data))
return PTR_ERR(data);
status = data->data_string[attr->nr] & attr->index;
return snprintf(buf, PAGE_SIZE, "%d\n", !!status);
}
static ssize_t pem_show_data(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct pem_data *data = pem_update_device(dev);
long value;
if (IS_ERR(data))
return PTR_ERR(data);
value = pem_get_data(data->data_string, sizeof(data->data_string),
attr->index);
return snprintf(buf, PAGE_SIZE, "%ld\n", value);
}
static ssize_t pem_show_input(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct pem_data *data = pem_update_device(dev);
long value;
if (IS_ERR(data))
return PTR_ERR(data);
value = pem_get_input(data->input_string, sizeof(data->input_string),
attr->index);
return snprintf(buf, PAGE_SIZE, "%ld\n", value);
}
static ssize_t pem_show_fan(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct pem_data *data = pem_update_device(dev);
long value;
if (IS_ERR(data))
return PTR_ERR(data);
value = pem_get_fan(data->fan_speed, sizeof(data->fan_speed),
attr->index);
return snprintf(buf, PAGE_SIZE, "%ld\n", value);
}
/* Voltages */
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, pem_show_data, NULL,
PEM_DATA_VOUT_LSB);
static SENSOR_DEVICE_ATTR_2(in1_alarm, S_IRUGO, pem_show_bool, NULL,
PEM_DATA_ALARM_1, ALRM1_VOUT_OUT_LIMIT);
static SENSOR_DEVICE_ATTR_2(in1_crit_alarm, S_IRUGO, pem_show_bool, NULL,
PEM_DATA_ALARM_1, ALRM1_OV_VOLT_SHUTDOWN);
static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, pem_show_input, NULL,
PEM_INPUT_VOLTAGE);
static SENSOR_DEVICE_ATTR_2(in2_alarm, S_IRUGO, pem_show_bool, NULL,
PEM_DATA_ALARM_1,
ALRM1_VIN_OUT_LIMIT | ALRM1_PRIMARY_FAULT);
/* Currents */
static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, pem_show_data, NULL,
PEM_DATA_CURRENT);
static SENSOR_DEVICE_ATTR_2(curr1_alarm, S_IRUGO, pem_show_bool, NULL,
PEM_DATA_ALARM_1, ALRM1_VIN_OVERCURRENT);
/* Power */
static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, pem_show_input, NULL,
PEM_INPUT_POWER_LSB);
static SENSOR_DEVICE_ATTR_2(power1_alarm, S_IRUGO, pem_show_bool, NULL,
PEM_DATA_ALARM_1, ALRM1_POWER_LIMIT);
/* Fans */
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, pem_show_fan, NULL,
PEM_FAN_FAN1);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, pem_show_fan, NULL,
PEM_FAN_FAN2);
static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, pem_show_fan, NULL,
PEM_FAN_FAN3);
static SENSOR_DEVICE_ATTR_2(fan1_alarm, S_IRUGO, pem_show_bool, NULL,
PEM_DATA_ALARM_2, ALRM2_FAN_FAULT);
/* Temperatures */
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, pem_show_data, NULL,
PEM_DATA_TEMP);
static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, pem_show_data, NULL,
PEM_DATA_TEMP_MAX);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, pem_show_data, NULL,
PEM_DATA_TEMP_CRIT);
static SENSOR_DEVICE_ATTR_2(temp1_alarm, S_IRUGO, pem_show_bool, NULL,
PEM_DATA_ALARM_1, ALRM1_TEMP_WARNING);
static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO, pem_show_bool, NULL,
PEM_DATA_ALARM_1, ALRM1_TEMP_SHUTDOWN);
static SENSOR_DEVICE_ATTR_2(temp1_fault, S_IRUGO, pem_show_bool, NULL,
PEM_DATA_ALARM_2, ALRM2_TEMP_FAULT);
static struct attribute *pem_attributes[] = {
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_alarm.dev_attr.attr,
&sensor_dev_attr_in1_crit_alarm.dev_attr.attr,
&sensor_dev_attr_in2_alarm.dev_attr.attr,
&sensor_dev_attr_curr1_alarm.dev_attr.attr,
&sensor_dev_attr_power1_alarm.dev_attr.attr,
&sensor_dev_attr_fan1_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp1_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_fault.dev_attr.attr,
NULL,
};
static const struct attribute_group pem_group = {
.attrs = pem_attributes,
};
static struct attribute *pem_input_attributes[] = {
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_curr1_input.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
NULL
};
static const struct attribute_group pem_input_group = {
.attrs = pem_input_attributes,
};
static struct attribute *pem_fan_attributes[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan3_input.dev_attr.attr,
NULL
};
static const struct attribute_group pem_fan_group = {
.attrs = pem_fan_attributes,
};
static int pem_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = client->adapter;
struct pem_data *data;
int ret;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BLOCK_DATA
| I2C_FUNC_SMBUS_WRITE_BYTE))
return -ENODEV;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
/*
* We use the next two commands to determine if the device is really
* there.
*/
ret = pem_read_block(client, PEM_READ_FIRMWARE_REV,
data->firmware_rev, sizeof(data->firmware_rev));
if (ret < 0)
goto out_kfree;
ret = i2c_smbus_write_byte(client, PEM_CLEAR_INFO_FLAGS);
if (ret < 0)
goto out_kfree;
dev_info(&client->dev, "Firmware revision %d.%d.%d\n",
data->firmware_rev[0], data->firmware_rev[1],
data->firmware_rev[2]);
/* Register sysfs hooks */
ret = sysfs_create_group(&client->dev.kobj, &pem_group);
if (ret)
goto out_kfree;
/*
* Check if input readings are supported.
* This is the case if we can read input data,
* and if the returned data is not all zeros.
* Note that input alarms are always supported.
*/
ret = pem_read_block(client, PEM_READ_INPUT_STRING,
data->input_string,
sizeof(data->input_string) - 1);
if (!ret && (data->input_string[0] || data->input_string[1] ||
data->input_string[2]))
data->input_length = sizeof(data->input_string) - 1;
else if (ret < 0) {
/* Input string is one byte longer for some devices */
ret = pem_read_block(client, PEM_READ_INPUT_STRING,
data->input_string,
sizeof(data->input_string));
if (!ret && (data->input_string[0] || data->input_string[1] ||
data->input_string[2] || data->input_string[3]))
data->input_length = sizeof(data->input_string);
}
ret = 0;
if (data->input_length) {
ret = sysfs_create_group(&client->dev.kobj, &pem_input_group);
if (ret)
goto out_remove_groups;
}
/*
* Check if fan speed readings are supported.
* This is the case if we can read fan speed data,
* and if the returned data is not all zeros.
* Note that the fan alarm is always supported.
*/
ret = pem_read_block(client, PEM_READ_FAN_SPEED,
data->fan_speed,
sizeof(data->fan_speed));
if (!ret && (data->fan_speed[0] || data->fan_speed[1] ||
data->fan_speed[2] || data->fan_speed[3])) {
data->fans_supported = true;
ret = sysfs_create_group(&client->dev.kobj, &pem_fan_group);
if (ret)
goto out_remove_groups;
}
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
ret = PTR_ERR(data->hwmon_dev);
goto out_remove_groups;
}
return 0;
out_remove_groups:
sysfs_remove_group(&client->dev.kobj, &pem_input_group);
sysfs_remove_group(&client->dev.kobj, &pem_fan_group);
sysfs_remove_group(&client->dev.kobj, &pem_group);
out_kfree:
kfree(data);
return ret;
}
static int pem_remove(struct i2c_client *client)
{
struct pem_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &pem_input_group);
sysfs_remove_group(&client->dev.kobj, &pem_fan_group);
sysfs_remove_group(&client->dev.kobj, &pem_group);
kfree(data);
return 0;
}
static const struct i2c_device_id pem_id[] = {
{"lineage_pem", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, pem_id);
static struct i2c_driver pem_driver = {
.driver = {
.name = "lineage_pem",
},
.probe = pem_probe,
.remove = pem_remove,
.id_table = pem_id,
};
static int __init pem_init(void)
{
return i2c_add_driver(&pem_driver);
}
static void __exit pem_exit(void)
{
i2c_del_driver(&pem_driver);
}
MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
MODULE_DESCRIPTION("Lineage CPL PEM hardware monitoring driver");
MODULE_LICENSE("GPL");
module_init(pem_init);
module_exit(pem_exit);
| gpl-2.0 |
xb446909/personalprojects | ARMToolChain/source/linux-4.1.2/drivers/clk/clk-moxart.c | 960 | 2416 | /*
* MOXA ART SoCs clock driver.
*
* Copyright (C) 2013 Jonas Jensen
*
* Jonas Jensen <jonas.jensen@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/clkdev.h>
void __init moxart_of_pll_clk_init(struct device_node *node)
{
static void __iomem *base;
struct clk *clk, *ref_clk;
unsigned int mul;
const char *name = node->name;
const char *parent_name;
of_property_read_string(node, "clock-output-names", &name);
parent_name = of_clk_get_parent_name(node, 0);
base = of_iomap(node, 0);
if (!base) {
pr_err("%s: of_iomap failed\n", node->full_name);
return;
}
mul = readl(base + 0x30) >> 3 & 0x3f;
iounmap(base);
ref_clk = of_clk_get(node, 0);
if (IS_ERR(ref_clk)) {
pr_err("%s: of_clk_get failed\n", node->full_name);
return;
}
clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mul, 1);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock\n", node->full_name);
return;
}
clk_register_clkdev(clk, NULL, name);
of_clk_add_provider(node, of_clk_src_simple_get, clk);
}
CLK_OF_DECLARE(moxart_pll_clock, "moxa,moxart-pll-clock",
moxart_of_pll_clk_init);
void __init moxart_of_apb_clk_init(struct device_node *node)
{
static void __iomem *base;
struct clk *clk, *pll_clk;
unsigned int div, val;
unsigned int div_idx[] = { 2, 3, 4, 6, 8};
const char *name = node->name;
const char *parent_name;
of_property_read_string(node, "clock-output-names", &name);
parent_name = of_clk_get_parent_name(node, 0);
base = of_iomap(node, 0);
if (!base) {
pr_err("%s: of_iomap failed\n", node->full_name);
return;
}
val = readl(base + 0xc) >> 4 & 0x7;
iounmap(base);
if (val > 4)
val = 0;
div = div_idx[val] * 2;
pll_clk = of_clk_get(node, 0);
if (IS_ERR(pll_clk)) {
pr_err("%s: of_clk_get failed\n", node->full_name);
return;
}
clk = clk_register_fixed_factor(NULL, name, parent_name, 0, 1, div);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock\n", node->full_name);
return;
}
clk_register_clkdev(clk, NULL, name);
of_clk_add_provider(node, of_clk_src_simple_get, clk);
}
CLK_OF_DECLARE(moxart_apb_clock, "moxa,moxart-apb-clock",
moxart_of_apb_clk_init);
| gpl-2.0 |
jmahler/linux-next | lib/stmp_device.c | 1472 | 2163 | /*
* Copyright (C) 1999 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd
* Copyright 2006-2007,2010 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright 2008 Juergen Beisert, kernel@pengutronix.de
* Copyright 2009 Ilya Yanok, Emcraft Systems Ltd, yanok@emcraft.com
* Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/stmp_device.h>
#define STMP_MODULE_CLKGATE (1 << 30)
#define STMP_MODULE_SFTRST (1 << 31)
/*
* Clear the bit and poll it cleared. This is usually called with
* a reset address and mask being either SFTRST(bit 31) or CLKGATE
* (bit 30).
*/
static int stmp_clear_poll_bit(void __iomem *addr, u32 mask)
{
int timeout = 0x400;
writel(mask, addr + STMP_OFFSET_REG_CLR);
udelay(1);
while ((readl(addr) & mask) && --timeout)
/* nothing */;
return !timeout;
}
int stmp_reset_block(void __iomem *reset_addr)
{
int ret;
int timeout = 0x400;
/* clear and poll SFTRST */
ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST);
if (unlikely(ret))
goto error;
/* clear CLKGATE */
writel(STMP_MODULE_CLKGATE, reset_addr + STMP_OFFSET_REG_CLR);
/* set SFTRST to reset the block */
writel(STMP_MODULE_SFTRST, reset_addr + STMP_OFFSET_REG_SET);
udelay(1);
/* poll CLKGATE becoming set */
while ((!(readl(reset_addr) & STMP_MODULE_CLKGATE)) && --timeout)
/* nothing */;
if (unlikely(!timeout))
goto error;
/* clear and poll SFTRST */
ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST);
if (unlikely(ret))
goto error;
/* clear and poll CLKGATE */
ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_CLKGATE);
if (unlikely(ret))
goto error;
return 0;
error:
pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
return -ETIMEDOUT;
}
EXPORT_SYMBOL(stmp_reset_block);
| gpl-2.0 |
denghl/linux3.x | arch/sparc/mm/tsb.c | 1472 | 14867 | /* arch/sparc64/mm/tsb.c
*
* Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/preempt.h>
#include <linux/slab.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/setup.h>
#include <asm/tsb.h>
#include <asm/tlb.h>
#include <asm/oplib.h>
extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
{
vaddr >>= hash_shift;
return vaddr & (nentries - 1);
}
static inline int tag_compare(unsigned long tag, unsigned long vaddr)
{
return (tag == (vaddr >> 22));
}
/* TSB flushes need only occur on the processor initiating the address
* space modification, not on each cpu the address space has run on.
* Only the TLB flush needs that treatment.
*/
void flush_tsb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long v;
for (v = start; v < end; v += PAGE_SIZE) {
unsigned long hash = tsb_hash(v, PAGE_SHIFT,
KERNEL_TSB_NENTRIES);
struct tsb *ent = &swapper_tsb[hash];
if (tag_compare(ent->tag, v))
ent->tag = (1UL << TSB_TAG_INVALID_BIT);
}
}
static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
unsigned long hash_shift,
unsigned long nentries)
{
unsigned long tag, ent, hash;
v &= ~0x1UL;
hash = tsb_hash(v, hash_shift, nentries);
ent = tsb + (hash * sizeof(struct tsb));
tag = (v >> 22UL);
tsb_flush(ent, tag);
}
static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
unsigned long tsb, unsigned long nentries)
{
unsigned long i;
for (i = 0; i < tb->tlb_nr; i++)
__flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
}
void flush_tsb_user(struct tlb_batch *tb)
{
struct mm_struct *mm = tb->mm;
unsigned long nentries, base, flags;
spin_lock_irqsave(&mm->context.lock, flags);
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
__flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
__flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries);
}
#endif
spin_unlock_irqrestore(&mm->context.lock, flags);
}
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
{
unsigned long nentries, base, flags;
spin_lock_irqsave(&mm->context.lock, flags);
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
__flush_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries);
}
#endif
spin_unlock_irqrestore(&mm->context.lock, flags);
}
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
#endif
static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
{
unsigned long tsb_reg, base, tsb_paddr;
unsigned long page_sz, tte;
mm->context.tsb_block[tsb_idx].tsb_nentries =
tsb_bytes / sizeof(struct tsb);
switch (tsb_idx) {
case MM_TSB_BASE:
base = TSBMAP_8K_BASE;
break;
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
case MM_TSB_HUGE:
base = TSBMAP_4M_BASE;
break;
#endif
default:
BUG();
}
tte = pgprot_val(PAGE_KERNEL_LOCKED);
tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
/* Use the smallest page size that can map the whole TSB
* in one TLB entry.
*/
switch (tsb_bytes) {
case 8192 << 0:
tsb_reg = 0x0UL;
#ifdef DCACHE_ALIASING_POSSIBLE
base += (tsb_paddr & 8192);
#endif
page_sz = 8192;
break;
case 8192 << 1:
tsb_reg = 0x1UL;
page_sz = 64 * 1024;
break;
case 8192 << 2:
tsb_reg = 0x2UL;
page_sz = 64 * 1024;
break;
case 8192 << 3:
tsb_reg = 0x3UL;
page_sz = 64 * 1024;
break;
case 8192 << 4:
tsb_reg = 0x4UL;
page_sz = 512 * 1024;
break;
case 8192 << 5:
tsb_reg = 0x5UL;
page_sz = 512 * 1024;
break;
case 8192 << 6:
tsb_reg = 0x6UL;
page_sz = 512 * 1024;
break;
case 8192 << 7:
tsb_reg = 0x7UL;
page_sz = 4 * 1024 * 1024;
break;
default:
printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
current->comm, current->pid, tsb_bytes);
do_exit(SIGSEGV);
}
tte |= pte_sz_bits(page_sz);
if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
/* Physical mapping, no locked TLB entry for TSB. */
tsb_reg |= tsb_paddr;
mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
} else {
tsb_reg |= base;
tsb_reg |= (tsb_paddr & (page_sz - 1UL));
tte |= (tsb_paddr & ~(page_sz - 1UL));
mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
}
/* Setup the Hypervisor TSB descriptor. */
if (tlb_type == hypervisor) {
struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
switch (tsb_idx) {
case MM_TSB_BASE:
hp->pgsz_idx = HV_PGSZ_IDX_BASE;
break;
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
case MM_TSB_HUGE:
hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
break;
#endif
default:
BUG();
}
hp->assoc = 1;
hp->num_ttes = tsb_bytes / 16;
hp->ctx_idx = 0;
switch (tsb_idx) {
case MM_TSB_BASE:
hp->pgsz_mask = HV_PGSZ_MASK_BASE;
break;
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
case MM_TSB_HUGE:
hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
break;
#endif
default:
BUG();
}
hp->tsb_base = tsb_paddr;
hp->resv = 0;
}
}
struct kmem_cache *pgtable_cache __read_mostly;
static struct kmem_cache *tsb_caches[8] __read_mostly;
static const char *tsb_cache_names[8] = {
"tsb_8KB",
"tsb_16KB",
"tsb_32KB",
"tsb_64KB",
"tsb_128KB",
"tsb_256KB",
"tsb_512KB",
"tsb_1MB",
};
void __init pgtable_cache_init(void)
{
unsigned long i;
pgtable_cache = kmem_cache_create("pgtable_cache",
PAGE_SIZE, PAGE_SIZE,
0,
_clear_page);
if (!pgtable_cache) {
prom_printf("pgtable_cache_init(): Could not create!\n");
prom_halt();
}
for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) {
unsigned long size = 8192 << i;
const char *name = tsb_cache_names[i];
tsb_caches[i] = kmem_cache_create(name,
size, size,
0, NULL);
if (!tsb_caches[i]) {
prom_printf("Could not create %s cache\n", name);
prom_halt();
}
}
}
int sysctl_tsb_ratio = -2;
static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
{
unsigned long num_ents = (new_size / sizeof(struct tsb));
if (sysctl_tsb_ratio < 0)
return num_ents - (num_ents >> -sysctl_tsb_ratio);
else
return num_ents + (num_ents >> sysctl_tsb_ratio);
}
/* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
* do_sparc64_fault() invokes this routine to try and grow it.
*
* When we reach the maximum TSB size supported, we stick ~0UL into
* tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
* will not trigger any longer.
*
* The TSB can be anywhere from 8K to 1MB in size, in increasing powers
* of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
* must be 512K aligned. It also must be physically contiguous, so we
* cannot use vmalloc().
*
* The idea here is to grow the TSB when the RSS of the process approaches
* the number of entries that the current TSB can hold at once. Currently,
* we trigger when the RSS hits 3/4 of the TSB capacity.
*/
void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
{
unsigned long max_tsb_size = 1 * 1024 * 1024;
unsigned long new_size, old_size, flags;
struct tsb *old_tsb, *new_tsb;
unsigned long new_cache_index, old_cache_index;
unsigned long new_rss_limit;
gfp_t gfp_flags;
if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
max_tsb_size = (PAGE_SIZE << MAX_ORDER);
new_cache_index = 0;
for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
new_rss_limit = tsb_size_to_rss_limit(new_size);
if (new_rss_limit > rss)
break;
new_cache_index++;
}
if (new_size == max_tsb_size)
new_rss_limit = ~0UL;
retry_tsb_alloc:
gfp_flags = GFP_KERNEL;
if (new_size > (PAGE_SIZE * 2))
gfp_flags |= __GFP_NOWARN | __GFP_NORETRY;
new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
gfp_flags, numa_node_id());
if (unlikely(!new_tsb)) {
/* Not being able to fork due to a high-order TSB
* allocation failure is very bad behavior. Just back
* down to a 0-order allocation and force no TSB
* growing for this address space.
*/
if (mm->context.tsb_block[tsb_index].tsb == NULL &&
new_cache_index > 0) {
new_cache_index = 0;
new_size = 8192;
new_rss_limit = ~0UL;
goto retry_tsb_alloc;
}
/* If we failed on a TSB grow, we are under serious
* memory pressure so don't try to grow any more.
*/
if (mm->context.tsb_block[tsb_index].tsb != NULL)
mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
return;
}
/* Mark all tags as invalid. */
tsb_init(new_tsb, new_size);
/* Ok, we are about to commit the changes. If we are
* growing an existing TSB the locking is very tricky,
* so WATCH OUT!
*
* We have to hold mm->context.lock while committing to the
* new TSB, this synchronizes us with processors in
* flush_tsb_user() and switch_mm() for this address space.
*
* But even with that lock held, processors run asynchronously
* accessing the old TSB via TLB miss handling. This is OK
* because those actions are just propagating state from the
* Linux page tables into the TSB, page table mappings are not
* being changed. If a real fault occurs, the processor will
* synchronize with us when it hits flush_tsb_user(), this is
* also true for the case where vmscan is modifying the page
* tables. The only thing we need to be careful with is to
* skip any locked TSB entries during copy_tsb().
*
* When we finish committing to the new TSB, we have to drop
* the lock and ask all other cpus running this address space
* to run tsb_context_switch() to see the new TSB table.
*/
spin_lock_irqsave(&mm->context.lock, flags);
old_tsb = mm->context.tsb_block[tsb_index].tsb;
old_cache_index =
(mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
sizeof(struct tsb));
/* Handle multiple threads trying to grow the TSB at the same time.
* One will get in here first, and bump the size and the RSS limit.
* The others will get in here next and hit this check.
*/
if (unlikely(old_tsb &&
(rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
spin_unlock_irqrestore(&mm->context.lock, flags);
kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
return;
}
mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
if (old_tsb) {
extern void copy_tsb(unsigned long old_tsb_base,
unsigned long old_tsb_size,
unsigned long new_tsb_base,
unsigned long new_tsb_size);
unsigned long old_tsb_base = (unsigned long) old_tsb;
unsigned long new_tsb_base = (unsigned long) new_tsb;
if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
old_tsb_base = __pa(old_tsb_base);
new_tsb_base = __pa(new_tsb_base);
}
copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
}
mm->context.tsb_block[tsb_index].tsb = new_tsb;
setup_tsb_params(mm, tsb_index, new_size);
spin_unlock_irqrestore(&mm->context.lock, flags);
/* If old_tsb is NULL, we're being invoked for the first time
* from init_new_context().
*/
if (old_tsb) {
/* Reload it on the local cpu. */
tsb_context_switch(mm);
/* Now force other processors to do the same. */
preempt_disable();
smp_tsb_sync(mm);
preempt_enable();
/* Now it is safe to free the old tsb. */
kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
}
}
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
unsigned long huge_pte_count;
#endif
unsigned int i;
spin_lock_init(&mm->context.lock);
mm->context.sparc64_ctx_val = 0UL;
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
/* We reset it to zero because the fork() page copying
* will re-increment the counters as the parent PTEs are
* copied into the child address space.
*/
huge_pte_count = mm->context.huge_pte_count;
mm->context.huge_pte_count = 0;
#endif
/* copy_mm() copies over the parent's mm_struct before calling
* us, so we need to zero out the TSB pointer or else tsb_grow()
* will be confused and think there is an older TSB to free up.
*/
for (i = 0; i < MM_NUM_TSBS; i++)
mm->context.tsb_block[i].tsb = NULL;
/* If this is fork, inherit the parent's TSB size. We would
* grow it to that size on the first page fault anyways.
*/
tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (unlikely(huge_pte_count))
tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
#endif
if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
return -ENOMEM;
return 0;
}
static void tsb_destroy_one(struct tsb_config *tp)
{
unsigned long cache_index;
if (!tp->tsb)
return;
cache_index = tp->tsb_reg_val & 0x7UL;
kmem_cache_free(tsb_caches[cache_index], tp->tsb);
tp->tsb = NULL;
tp->tsb_reg_val = 0UL;
}
void destroy_context(struct mm_struct *mm)
{
unsigned long flags, i;
for (i = 0; i < MM_NUM_TSBS; i++)
tsb_destroy_one(&mm->context.tsb_block[i]);
spin_lock_irqsave(&ctx_alloc_lock, flags);
if (CTX_VALID(mm->context)) {
unsigned long nr = CTX_NRBITS(mm->context);
mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
}
spin_unlock_irqrestore(&ctx_alloc_lock, flags);
}
| gpl-2.0 |
TheSSJ/zf2_mmkernel | sound/pci/via82xx_modem.c | 2240 | 35258 | /*
* ALSA modem driver for VIA VT82xx (South Bridge)
*
* VT82C686A/B/C, VT8233A/C, VT8235
*
* Copyright (c) 2000 Jaroslav Kysela <perex@perex.cz>
* Tjeerd.Mulder <Tjeerd.Mulder@fujitsu-siemens.com>
* 2002 Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/*
* Changes:
*
* Sep. 2, 2004 Sasha Khapyorsky <sashak@alsa-project.org>
* Modified from original audio driver 'via82xx.c' to support AC97
* modems.
*/
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/info.h>
#include <sound/ac97_codec.h>
#include <sound/initval.h>
#if 0
#define POINTER_DEBUG
#endif
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("VIA VT82xx modem");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{VIA,VT82C686A/B/C modem,pci}}");
static int index = -2; /* Exclude the first card */
static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
static int ac97_clock = 48000;
module_param(index, int, 0444);
MODULE_PARM_DESC(index, "Index value for VIA 82xx bridge.");
module_param(id, charp, 0444);
MODULE_PARM_DESC(id, "ID string for VIA 82xx bridge.");
module_param(ac97_clock, int, 0444);
MODULE_PARM_DESC(ac97_clock, "AC'97 codec clock (default 48000Hz).");
/* just for backward compatibility */
static bool enable;
module_param(enable, bool, 0444);
/*
* Direct registers
*/
#define VIAREG(via, x) ((via)->port + VIA_REG_##x)
#define VIADEV_REG(viadev, x) ((viadev)->port + VIA_REG_##x)
/* common offsets */
#define VIA_REG_OFFSET_STATUS 0x00 /* byte - channel status */
#define VIA_REG_STAT_ACTIVE 0x80 /* RO */
#define VIA_REG_STAT_PAUSED 0x40 /* RO */
#define VIA_REG_STAT_TRIGGER_QUEUED 0x08 /* RO */
#define VIA_REG_STAT_STOPPED 0x04 /* RWC */
#define VIA_REG_STAT_EOL 0x02 /* RWC */
#define VIA_REG_STAT_FLAG 0x01 /* RWC */
#define VIA_REG_OFFSET_CONTROL 0x01 /* byte - channel control */
#define VIA_REG_CTRL_START 0x80 /* WO */
#define VIA_REG_CTRL_TERMINATE 0x40 /* WO */
#define VIA_REG_CTRL_AUTOSTART 0x20
#define VIA_REG_CTRL_PAUSE 0x08 /* RW */
#define VIA_REG_CTRL_INT_STOP 0x04
#define VIA_REG_CTRL_INT_EOL 0x02
#define VIA_REG_CTRL_INT_FLAG 0x01
#define VIA_REG_CTRL_RESET 0x01 /* RW - probably reset? undocumented */
#define VIA_REG_CTRL_INT (VIA_REG_CTRL_INT_FLAG | VIA_REG_CTRL_INT_EOL | VIA_REG_CTRL_AUTOSTART)
#define VIA_REG_OFFSET_TYPE 0x02 /* byte - channel type (686 only) */
#define VIA_REG_TYPE_AUTOSTART 0x80 /* RW - autostart at EOL */
#define VIA_REG_TYPE_16BIT 0x20 /* RW */
#define VIA_REG_TYPE_STEREO 0x10 /* RW */
#define VIA_REG_TYPE_INT_LLINE 0x00
#define VIA_REG_TYPE_INT_LSAMPLE 0x04
#define VIA_REG_TYPE_INT_LESSONE 0x08
#define VIA_REG_TYPE_INT_MASK 0x0c
#define VIA_REG_TYPE_INT_EOL 0x02
#define VIA_REG_TYPE_INT_FLAG 0x01
#define VIA_REG_OFFSET_TABLE_PTR 0x04 /* dword - channel table pointer */
#define VIA_REG_OFFSET_CURR_PTR 0x04 /* dword - channel current pointer */
#define VIA_REG_OFFSET_STOP_IDX 0x08 /* dword - stop index, channel type, sample rate */
#define VIA_REG_OFFSET_CURR_COUNT 0x0c /* dword - channel current count (24 bit) */
#define VIA_REG_OFFSET_CURR_INDEX 0x0f /* byte - channel current index (for via8233 only) */
#define DEFINE_VIA_REGSET(name,val) \
enum {\
VIA_REG_##name##_STATUS = (val),\
VIA_REG_##name##_CONTROL = (val) + 0x01,\
VIA_REG_##name##_TYPE = (val) + 0x02,\
VIA_REG_##name##_TABLE_PTR = (val) + 0x04,\
VIA_REG_##name##_CURR_PTR = (val) + 0x04,\
VIA_REG_##name##_STOP_IDX = (val) + 0x08,\
VIA_REG_##name##_CURR_COUNT = (val) + 0x0c,\
}
/* modem block */
DEFINE_VIA_REGSET(MO, 0x40);
DEFINE_VIA_REGSET(MI, 0x50);
/* AC'97 */
#define VIA_REG_AC97 0x80 /* dword */
#define VIA_REG_AC97_CODEC_ID_MASK (3<<30)
#define VIA_REG_AC97_CODEC_ID_SHIFT 30
#define VIA_REG_AC97_CODEC_ID_PRIMARY 0x00
#define VIA_REG_AC97_CODEC_ID_SECONDARY 0x01
#define VIA_REG_AC97_SECONDARY_VALID (1<<27)
#define VIA_REG_AC97_PRIMARY_VALID (1<<25)
#define VIA_REG_AC97_BUSY (1<<24)
#define VIA_REG_AC97_READ (1<<23)
#define VIA_REG_AC97_CMD_SHIFT 16
#define VIA_REG_AC97_CMD_MASK 0x7e
#define VIA_REG_AC97_DATA_SHIFT 0
#define VIA_REG_AC97_DATA_MASK 0xffff
#define VIA_REG_SGD_SHADOW 0x84 /* dword */
#define VIA_REG_SGD_STAT_PB_FLAG (1<<0)
#define VIA_REG_SGD_STAT_CP_FLAG (1<<1)
#define VIA_REG_SGD_STAT_FM_FLAG (1<<2)
#define VIA_REG_SGD_STAT_PB_EOL (1<<4)
#define VIA_REG_SGD_STAT_CP_EOL (1<<5)
#define VIA_REG_SGD_STAT_FM_EOL (1<<6)
#define VIA_REG_SGD_STAT_PB_STOP (1<<8)
#define VIA_REG_SGD_STAT_CP_STOP (1<<9)
#define VIA_REG_SGD_STAT_FM_STOP (1<<10)
#define VIA_REG_SGD_STAT_PB_ACTIVE (1<<12)
#define VIA_REG_SGD_STAT_CP_ACTIVE (1<<13)
#define VIA_REG_SGD_STAT_FM_ACTIVE (1<<14)
#define VIA_REG_SGD_STAT_MR_FLAG (1<<16)
#define VIA_REG_SGD_STAT_MW_FLAG (1<<17)
#define VIA_REG_SGD_STAT_MR_EOL (1<<20)
#define VIA_REG_SGD_STAT_MW_EOL (1<<21)
#define VIA_REG_SGD_STAT_MR_STOP (1<<24)
#define VIA_REG_SGD_STAT_MW_STOP (1<<25)
#define VIA_REG_SGD_STAT_MR_ACTIVE (1<<28)
#define VIA_REG_SGD_STAT_MW_ACTIVE (1<<29)
#define VIA_REG_GPI_STATUS 0x88
#define VIA_REG_GPI_INTR 0x8c
#define VIA_TBL_BIT_FLAG 0x40000000
#define VIA_TBL_BIT_EOL 0x80000000
/* pci space */
#define VIA_ACLINK_STAT 0x40
#define VIA_ACLINK_C11_READY 0x20
#define VIA_ACLINK_C10_READY 0x10
#define VIA_ACLINK_C01_READY 0x04 /* secondary codec ready */
#define VIA_ACLINK_LOWPOWER 0x02 /* low-power state */
#define VIA_ACLINK_C00_READY 0x01 /* primary codec ready */
#define VIA_ACLINK_CTRL 0x41
#define VIA_ACLINK_CTRL_ENABLE 0x80 /* 0: disable, 1: enable */
#define VIA_ACLINK_CTRL_RESET 0x40 /* 0: assert, 1: de-assert */
#define VIA_ACLINK_CTRL_SYNC 0x20 /* 0: release SYNC, 1: force SYNC hi */
#define VIA_ACLINK_CTRL_SDO 0x10 /* 0: release SDO, 1: force SDO hi */
#define VIA_ACLINK_CTRL_VRA 0x08 /* 0: disable VRA, 1: enable VRA */
#define VIA_ACLINK_CTRL_PCM 0x04 /* 0: disable PCM, 1: enable PCM */
#define VIA_ACLINK_CTRL_FM 0x02 /* via686 only */
#define VIA_ACLINK_CTRL_SB 0x01 /* via686 only */
#define VIA_ACLINK_CTRL_INIT (VIA_ACLINK_CTRL_ENABLE|\
VIA_ACLINK_CTRL_RESET|\
VIA_ACLINK_CTRL_PCM)
#define VIA_FUNC_ENABLE 0x42
#define VIA_FUNC_MIDI_PNP 0x80 /* FIXME: it's 0x40 in the datasheet! */
#define VIA_FUNC_MIDI_IRQMASK 0x40 /* FIXME: not documented! */
#define VIA_FUNC_RX2C_WRITE 0x20
#define VIA_FUNC_SB_FIFO_EMPTY 0x10
#define VIA_FUNC_ENABLE_GAME 0x08
#define VIA_FUNC_ENABLE_FM 0x04
#define VIA_FUNC_ENABLE_MIDI 0x02
#define VIA_FUNC_ENABLE_SB 0x01
#define VIA_PNP_CONTROL 0x43
#define VIA_MC97_CTRL 0x44
#define VIA_MC97_CTRL_ENABLE 0x80
#define VIA_MC97_CTRL_SECONDARY 0x40
#define VIA_MC97_CTRL_INIT (VIA_MC97_CTRL_ENABLE|\
VIA_MC97_CTRL_SECONDARY)
/*
* pcm stream
*/
struct snd_via_sg_table {
unsigned int offset;
unsigned int size;
} ;
#define VIA_TABLE_SIZE 255
struct viadev {
unsigned int reg_offset;
unsigned long port;
int direction; /* playback = 0, capture = 1 */
struct snd_pcm_substream *substream;
int running;
unsigned int tbl_entries; /* # descriptors */
struct snd_dma_buffer table;
struct snd_via_sg_table *idx_table;
/* for recovery from the unexpected pointer */
unsigned int lastpos;
unsigned int bufsize;
unsigned int bufsize2;
};
enum { TYPE_CARD_VIA82XX_MODEM = 1 };
#define VIA_MAX_MODEM_DEVS 2
struct via82xx_modem {
int irq;
unsigned long port;
unsigned int intr_mask; /* SGD_SHADOW mask to check interrupts */
struct pci_dev *pci;
struct snd_card *card;
unsigned int num_devs;
unsigned int playback_devno, capture_devno;
struct viadev devs[VIA_MAX_MODEM_DEVS];
struct snd_pcm *pcms[2];
struct snd_ac97_bus *ac97_bus;
struct snd_ac97 *ac97;
unsigned int ac97_clock;
unsigned int ac97_secondary; /* secondary AC'97 codec is present */
spinlock_t reg_lock;
struct snd_info_entry *proc_entry;
};
static DEFINE_PCI_DEVICE_TABLE(snd_via82xx_modem_ids) = {
{ PCI_VDEVICE(VIA, 0x3068), TYPE_CARD_VIA82XX_MODEM, },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, snd_via82xx_modem_ids);
/*
*/
/*
* allocate and initialize the descriptor buffers
* periods = number of periods
* fragsize = period size in bytes
*/
static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substream,
struct pci_dev *pci,
unsigned int periods, unsigned int fragsize)
{
unsigned int i, idx, ofs, rest;
struct via82xx_modem *chip = snd_pcm_substream_chip(substream);
if (dev->table.area == NULL) {
/* the start of each lists must be aligned to 8 bytes,
* but the kernel pages are much bigger, so we don't care
*/
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
PAGE_ALIGN(VIA_TABLE_SIZE * 2 * 8),
&dev->table) < 0)
return -ENOMEM;
}
if (! dev->idx_table) {
dev->idx_table = kmalloc(sizeof(*dev->idx_table) * VIA_TABLE_SIZE, GFP_KERNEL);
if (! dev->idx_table)
return -ENOMEM;
}
/* fill the entries */
idx = 0;
ofs = 0;
for (i = 0; i < periods; i++) {
rest = fragsize;
/* fill descriptors for a period.
* a period can be split to several descriptors if it's
* over page boundary.
*/
do {
unsigned int r;
unsigned int flag;
unsigned int addr;
if (idx >= VIA_TABLE_SIZE) {
snd_printk(KERN_ERR "via82xx: too much table size!\n");
return -EINVAL;
}
addr = snd_pcm_sgbuf_get_addr(substream, ofs);
((u32 *)dev->table.area)[idx << 1] = cpu_to_le32(addr);
r = PAGE_SIZE - (ofs % PAGE_SIZE);
if (rest < r)
r = rest;
rest -= r;
if (! rest) {
if (i == periods - 1)
flag = VIA_TBL_BIT_EOL; /* buffer boundary */
else
flag = VIA_TBL_BIT_FLAG; /* period boundary */
} else
flag = 0; /* period continues to the next */
/*
printk(KERN_DEBUG "via: tbl %d: at %d size %d "
"(rest %d)\n", idx, ofs, r, rest);
*/
((u32 *)dev->table.area)[(idx<<1) + 1] = cpu_to_le32(r | flag);
dev->idx_table[idx].offset = ofs;
dev->idx_table[idx].size = r;
ofs += r;
idx++;
} while (rest > 0);
}
dev->tbl_entries = idx;
dev->bufsize = periods * fragsize;
dev->bufsize2 = dev->bufsize / 2;
return 0;
}
static int clean_via_table(struct viadev *dev, struct snd_pcm_substream *substream,
struct pci_dev *pci)
{
if (dev->table.area) {
snd_dma_free_pages(&dev->table);
dev->table.area = NULL;
}
kfree(dev->idx_table);
dev->idx_table = NULL;
return 0;
}
/*
* Basic I/O
*/
static inline unsigned int snd_via82xx_codec_xread(struct via82xx_modem *chip)
{
return inl(VIAREG(chip, AC97));
}
static inline void snd_via82xx_codec_xwrite(struct via82xx_modem *chip, unsigned int val)
{
outl(val, VIAREG(chip, AC97));
}
static int snd_via82xx_codec_ready(struct via82xx_modem *chip, int secondary)
{
unsigned int timeout = 1000; /* 1ms */
unsigned int val;
while (timeout-- > 0) {
udelay(1);
if (!((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_BUSY))
return val & 0xffff;
}
snd_printk(KERN_ERR "codec_ready: codec %i is not ready [0x%x]\n",
secondary, snd_via82xx_codec_xread(chip));
return -EIO;
}
static int snd_via82xx_codec_valid(struct via82xx_modem *chip, int secondary)
{
unsigned int timeout = 1000; /* 1ms */
unsigned int val, val1;
unsigned int stat = !secondary ? VIA_REG_AC97_PRIMARY_VALID :
VIA_REG_AC97_SECONDARY_VALID;
while (timeout-- > 0) {
val = snd_via82xx_codec_xread(chip);
val1 = val & (VIA_REG_AC97_BUSY | stat);
if (val1 == stat)
return val & 0xffff;
udelay(1);
}
return -EIO;
}
static void snd_via82xx_codec_wait(struct snd_ac97 *ac97)
{
struct via82xx_modem *chip = ac97->private_data;
int err;
err = snd_via82xx_codec_ready(chip, ac97->num);
/* here we need to wait fairly for long time.. */
msleep(500);
}
static void snd_via82xx_codec_write(struct snd_ac97 *ac97,
unsigned short reg,
unsigned short val)
{
struct via82xx_modem *chip = ac97->private_data;
unsigned int xval;
if(reg == AC97_GPIO_STATUS) {
outl(val, VIAREG(chip, GPI_STATUS));
return;
}
xval = !ac97->num ? VIA_REG_AC97_CODEC_ID_PRIMARY : VIA_REG_AC97_CODEC_ID_SECONDARY;
xval <<= VIA_REG_AC97_CODEC_ID_SHIFT;
xval |= reg << VIA_REG_AC97_CMD_SHIFT;
xval |= val << VIA_REG_AC97_DATA_SHIFT;
snd_via82xx_codec_xwrite(chip, xval);
snd_via82xx_codec_ready(chip, ac97->num);
}
static unsigned short snd_via82xx_codec_read(struct snd_ac97 *ac97, unsigned short reg)
{
struct via82xx_modem *chip = ac97->private_data;
unsigned int xval, val = 0xffff;
int again = 0;
xval = ac97->num << VIA_REG_AC97_CODEC_ID_SHIFT;
xval |= ac97->num ? VIA_REG_AC97_SECONDARY_VALID : VIA_REG_AC97_PRIMARY_VALID;
xval |= VIA_REG_AC97_READ;
xval |= (reg & 0x7f) << VIA_REG_AC97_CMD_SHIFT;
while (1) {
if (again++ > 3) {
snd_printk(KERN_ERR "codec_read: codec %i is not valid [0x%x]\n",
ac97->num, snd_via82xx_codec_xread(chip));
return 0xffff;
}
snd_via82xx_codec_xwrite(chip, xval);
udelay (20);
if (snd_via82xx_codec_valid(chip, ac97->num) >= 0) {
udelay(25);
val = snd_via82xx_codec_xread(chip);
break;
}
}
return val & 0xffff;
}
static void snd_via82xx_channel_reset(struct via82xx_modem *chip, struct viadev *viadev)
{
outb(VIA_REG_CTRL_PAUSE | VIA_REG_CTRL_TERMINATE | VIA_REG_CTRL_RESET,
VIADEV_REG(viadev, OFFSET_CONTROL));
inb(VIADEV_REG(viadev, OFFSET_CONTROL));
udelay(50);
/* disable interrupts */
outb(0x00, VIADEV_REG(viadev, OFFSET_CONTROL));
/* clear interrupts */
outb(0x03, VIADEV_REG(viadev, OFFSET_STATUS));
outb(0x00, VIADEV_REG(viadev, OFFSET_TYPE)); /* for via686 */
// outl(0, VIADEV_REG(viadev, OFFSET_CURR_PTR));
viadev->lastpos = 0;
}
/*
* Interrupt handler
*/
static irqreturn_t snd_via82xx_interrupt(int irq, void *dev_id)
{
struct via82xx_modem *chip = dev_id;
unsigned int status;
unsigned int i;
status = inl(VIAREG(chip, SGD_SHADOW));
if (! (status & chip->intr_mask)) {
return IRQ_NONE;
}
// _skip_sgd:
/* check status for each stream */
spin_lock(&chip->reg_lock);
for (i = 0; i < chip->num_devs; i++) {
struct viadev *viadev = &chip->devs[i];
unsigned char c_status = inb(VIADEV_REG(viadev, OFFSET_STATUS));
c_status &= (VIA_REG_STAT_EOL|VIA_REG_STAT_FLAG|VIA_REG_STAT_STOPPED);
if (! c_status)
continue;
if (viadev->substream && viadev->running) {
spin_unlock(&chip->reg_lock);
snd_pcm_period_elapsed(viadev->substream);
spin_lock(&chip->reg_lock);
}
outb(c_status, VIADEV_REG(viadev, OFFSET_STATUS)); /* ack */
}
spin_unlock(&chip->reg_lock);
return IRQ_HANDLED;
}
/*
* PCM callbacks
*/
/*
* trigger callback
*/
static int snd_via82xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct via82xx_modem *chip = snd_pcm_substream_chip(substream);
struct viadev *viadev = substream->runtime->private_data;
unsigned char val = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_SUSPEND:
val |= VIA_REG_CTRL_START;
viadev->running = 1;
break;
case SNDRV_PCM_TRIGGER_STOP:
val = VIA_REG_CTRL_TERMINATE;
viadev->running = 0;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
val |= VIA_REG_CTRL_PAUSE;
viadev->running = 0;
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
viadev->running = 1;
break;
default:
return -EINVAL;
}
outb(val, VIADEV_REG(viadev, OFFSET_CONTROL));
if (cmd == SNDRV_PCM_TRIGGER_STOP)
snd_via82xx_channel_reset(chip, viadev);
return 0;
}
/*
* pointer callbacks
*/
/*
* calculate the linear position at the given sg-buffer index and the rest count
*/
#define check_invalid_pos(viadev,pos) \
((pos) < viadev->lastpos && ((pos) >= viadev->bufsize2 ||\
viadev->lastpos < viadev->bufsize2))
static inline unsigned int calc_linear_pos(struct viadev *viadev, unsigned int idx,
unsigned int count)
{
unsigned int size, res;
size = viadev->idx_table[idx].size;
res = viadev->idx_table[idx].offset + size - count;
/* check the validity of the calculated position */
if (size < count) {
snd_printd(KERN_ERR "invalid via82xx_cur_ptr (size = %d, count = %d)\n",
(int)size, (int)count);
res = viadev->lastpos;
} else if (check_invalid_pos(viadev, res)) {
#ifdef POINTER_DEBUG
printk(KERN_DEBUG "fail: idx = %i/%i, lastpos = 0x%x, "
"bufsize2 = 0x%x, offsize = 0x%x, size = 0x%x, "
"count = 0x%x\n", idx, viadev->tbl_entries, viadev->lastpos,
viadev->bufsize2, viadev->idx_table[idx].offset,
viadev->idx_table[idx].size, count);
#endif
if (count && size < count) {
snd_printd(KERN_ERR "invalid via82xx_cur_ptr, "
"using last valid pointer\n");
res = viadev->lastpos;
} else {
if (! count)
/* bogus count 0 on the DMA boundary? */
res = viadev->idx_table[idx].offset;
else
/* count register returns full size
* when end of buffer is reached
*/
res = viadev->idx_table[idx].offset + size;
if (check_invalid_pos(viadev, res)) {
snd_printd(KERN_ERR "invalid via82xx_cur_ptr (2), "
"using last valid pointer\n");
res = viadev->lastpos;
}
}
}
viadev->lastpos = res; /* remember the last position */
if (res >= viadev->bufsize)
res -= viadev->bufsize;
return res;
}
/*
* get the current pointer on via686
*/
static snd_pcm_uframes_t snd_via686_pcm_pointer(struct snd_pcm_substream *substream)
{
struct via82xx_modem *chip = snd_pcm_substream_chip(substream);
struct viadev *viadev = substream->runtime->private_data;
unsigned int idx, ptr, count, res;
if (snd_BUG_ON(!viadev->tbl_entries))
return 0;
if (!(inb(VIADEV_REG(viadev, OFFSET_STATUS)) & VIA_REG_STAT_ACTIVE))
return 0;
spin_lock(&chip->reg_lock);
count = inl(VIADEV_REG(viadev, OFFSET_CURR_COUNT)) & 0xffffff;
/* The via686a does not have the current index register,
* so we need to calculate the index from CURR_PTR.
*/
ptr = inl(VIADEV_REG(viadev, OFFSET_CURR_PTR));
if (ptr <= (unsigned int)viadev->table.addr)
idx = 0;
else /* CURR_PTR holds the address + 8 */
idx = ((ptr - (unsigned int)viadev->table.addr) / 8 - 1) %
viadev->tbl_entries;
res = calc_linear_pos(viadev, idx, count);
spin_unlock(&chip->reg_lock);
return bytes_to_frames(substream->runtime, res);
}
/*
* hw_params callback:
* allocate the buffer and build up the buffer description table
*/
static int snd_via82xx_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct via82xx_modem *chip = snd_pcm_substream_chip(substream);
struct viadev *viadev = substream->runtime->private_data;
int err;
err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
err = build_via_table(viadev, substream, chip->pci,
params_periods(hw_params),
params_period_bytes(hw_params));
if (err < 0)
return err;
snd_ac97_write(chip->ac97, AC97_LINE1_RATE, params_rate(hw_params));
snd_ac97_write(chip->ac97, AC97_LINE1_LEVEL, 0);
return 0;
}
/*
* hw_free callback:
* clean up the buffer description table and release the buffer
*/
static int snd_via82xx_hw_free(struct snd_pcm_substream *substream)
{
struct via82xx_modem *chip = snd_pcm_substream_chip(substream);
struct viadev *viadev = substream->runtime->private_data;
clean_via_table(viadev, substream, chip->pci);
snd_pcm_lib_free_pages(substream);
return 0;
}
/*
* set up the table pointer
*/
static void snd_via82xx_set_table_ptr(struct via82xx_modem *chip, struct viadev *viadev)
{
snd_via82xx_codec_ready(chip, chip->ac97_secondary);
outl((u32)viadev->table.addr, VIADEV_REG(viadev, OFFSET_TABLE_PTR));
udelay(20);
snd_via82xx_codec_ready(chip, chip->ac97_secondary);
}
/*
* prepare callback for playback and capture
*/
static int snd_via82xx_pcm_prepare(struct snd_pcm_substream *substream)
{
struct via82xx_modem *chip = snd_pcm_substream_chip(substream);
struct viadev *viadev = substream->runtime->private_data;
snd_via82xx_channel_reset(chip, viadev);
/* this must be set after channel_reset */
snd_via82xx_set_table_ptr(chip, viadev);
outb(VIA_REG_TYPE_AUTOSTART|VIA_REG_TYPE_INT_EOL|VIA_REG_TYPE_INT_FLAG,
VIADEV_REG(viadev, OFFSET_TYPE));
return 0;
}
/*
* pcm hardware definition, identical for both playback and capture
*/
static struct snd_pcm_hardware snd_via82xx_hw =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
/* SNDRV_PCM_INFO_RESUME | */
SNDRV_PCM_INFO_PAUSE),
.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_KNOT,
.rate_min = 8000,
.rate_max = 16000,
.channels_min = 1,
.channels_max = 1,
.buffer_bytes_max = 128 * 1024,
.period_bytes_min = 32,
.period_bytes_max = 128 * 1024,
.periods_min = 2,
.periods_max = VIA_TABLE_SIZE / 2,
.fifo_size = 0,
};
/*
* open callback skeleton
*/
static int snd_via82xx_modem_pcm_open(struct via82xx_modem *chip, struct viadev *viadev,
struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
static unsigned int rates[] = { 8000, 9600, 12000, 16000 };
static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
};
runtime->hw = snd_via82xx_hw;
if ((err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&hw_constraints_rates)) < 0)
return err;
/* we may remove following constaint when we modify table entries
in interrupt */
if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0)
return err;
runtime->private_data = viadev;
viadev->substream = substream;
return 0;
}
/*
* open callback for playback
*/
static int snd_via82xx_playback_open(struct snd_pcm_substream *substream)
{
struct via82xx_modem *chip = snd_pcm_substream_chip(substream);
struct viadev *viadev = &chip->devs[chip->playback_devno + substream->number];
return snd_via82xx_modem_pcm_open(chip, viadev, substream);
}
/*
* open callback for capture
*/
static int snd_via82xx_capture_open(struct snd_pcm_substream *substream)
{
struct via82xx_modem *chip = snd_pcm_substream_chip(substream);
struct viadev *viadev = &chip->devs[chip->capture_devno + substream->pcm->device];
return snd_via82xx_modem_pcm_open(chip, viadev, substream);
}
/*
* close callback
*/
static int snd_via82xx_pcm_close(struct snd_pcm_substream *substream)
{
struct viadev *viadev = substream->runtime->private_data;
viadev->substream = NULL;
return 0;
}
/* via686 playback callbacks */
static struct snd_pcm_ops snd_via686_playback_ops = {
.open = snd_via82xx_playback_open,
.close = snd_via82xx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_via82xx_hw_params,
.hw_free = snd_via82xx_hw_free,
.prepare = snd_via82xx_pcm_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via686_pcm_pointer,
.page = snd_pcm_sgbuf_ops_page,
};
/* via686 capture callbacks */
static struct snd_pcm_ops snd_via686_capture_ops = {
.open = snd_via82xx_capture_open,
.close = snd_via82xx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_via82xx_hw_params,
.hw_free = snd_via82xx_hw_free,
.prepare = snd_via82xx_pcm_prepare,
.trigger = snd_via82xx_pcm_trigger,
.pointer = snd_via686_pcm_pointer,
.page = snd_pcm_sgbuf_ops_page,
};
static void init_viadev(struct via82xx_modem *chip, int idx, unsigned int reg_offset,
int direction)
{
chip->devs[idx].reg_offset = reg_offset;
chip->devs[idx].direction = direction;
chip->devs[idx].port = chip->port + reg_offset;
}
/*
* create a pcm instance for via686a/b
*/
static int snd_via686_pcm_new(struct via82xx_modem *chip)
{
struct snd_pcm *pcm;
int err;
chip->playback_devno = 0;
chip->capture_devno = 1;
chip->num_devs = 2;
chip->intr_mask = 0x330000; /* FLAGS | EOL for MR, MW */
err = snd_pcm_new(chip->card, chip->card->shortname, 0, 1, 1, &pcm);
if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_via686_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_via686_capture_ops);
pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
pcm->private_data = chip;
strcpy(pcm->name, chip->card->shortname);
chip->pcms[0] = pcm;
init_viadev(chip, 0, VIA_REG_MO_STATUS, 0);
init_viadev(chip, 1, VIA_REG_MI_STATUS, 1);
if ((err = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
snd_dma_pci_data(chip->pci),
64*1024, 128*1024)) < 0)
return err;
return 0;
}
/*
* Mixer part
*/
static void snd_via82xx_mixer_free_ac97_bus(struct snd_ac97_bus *bus)
{
struct via82xx_modem *chip = bus->private_data;
chip->ac97_bus = NULL;
}
static void snd_via82xx_mixer_free_ac97(struct snd_ac97 *ac97)
{
struct via82xx_modem *chip = ac97->private_data;
chip->ac97 = NULL;
}
static int snd_via82xx_mixer_new(struct via82xx_modem *chip)
{
struct snd_ac97_template ac97;
int err;
static struct snd_ac97_bus_ops ops = {
.write = snd_via82xx_codec_write,
.read = snd_via82xx_codec_read,
.wait = snd_via82xx_codec_wait,
};
if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus)) < 0)
return err;
chip->ac97_bus->private_free = snd_via82xx_mixer_free_ac97_bus;
chip->ac97_bus->clock = chip->ac97_clock;
memset(&ac97, 0, sizeof(ac97));
ac97.private_data = chip;
ac97.private_free = snd_via82xx_mixer_free_ac97;
ac97.pci = chip->pci;
ac97.scaps = AC97_SCAP_SKIP_AUDIO | AC97_SCAP_POWER_SAVE;
ac97.num = chip->ac97_secondary;
if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97)) < 0)
return err;
return 0;
}
/*
* proc interface
*/
static void snd_via82xx_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
{
struct via82xx_modem *chip = entry->private_data;
int i;
snd_iprintf(buffer, "%s\n\n", chip->card->longname);
for (i = 0; i < 0xa0; i += 4) {
snd_iprintf(buffer, "%02x: %08x\n", i, inl(chip->port + i));
}
}
static void snd_via82xx_proc_init(struct via82xx_modem *chip)
{
struct snd_info_entry *entry;
if (! snd_card_proc_new(chip->card, "via82xx", &entry))
snd_info_set_text_ops(entry, chip, snd_via82xx_proc_read);
}
/*
*
*/
static int snd_via82xx_chip_init(struct via82xx_modem *chip)
{
unsigned int val;
unsigned long end_time;
unsigned char pval;
pci_read_config_byte(chip->pci, VIA_MC97_CTRL, &pval);
if((pval & VIA_MC97_CTRL_INIT) != VIA_MC97_CTRL_INIT) {
pci_write_config_byte(chip->pci, 0x44, pval|VIA_MC97_CTRL_INIT);
udelay(100);
}
pci_read_config_byte(chip->pci, VIA_ACLINK_STAT, &pval);
if (! (pval & VIA_ACLINK_C00_READY)) { /* codec not ready? */
/* deassert ACLink reset, force SYNC */
pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL,
VIA_ACLINK_CTRL_ENABLE |
VIA_ACLINK_CTRL_RESET |
VIA_ACLINK_CTRL_SYNC);
udelay(100);
#if 1 /* FIXME: should we do full reset here for all chip models? */
pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, 0x00);
udelay(100);
#else
/* deassert ACLink reset, force SYNC (warm AC'97 reset) */
pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL,
VIA_ACLINK_CTRL_RESET|VIA_ACLINK_CTRL_SYNC);
udelay(2);
#endif
/* ACLink on, deassert ACLink reset, VSR, SGD data out */
pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, VIA_ACLINK_CTRL_INIT);
udelay(100);
}
pci_read_config_byte(chip->pci, VIA_ACLINK_CTRL, &pval);
if ((pval & VIA_ACLINK_CTRL_INIT) != VIA_ACLINK_CTRL_INIT) {
/* ACLink on, deassert ACLink reset, VSR, SGD data out */
pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, VIA_ACLINK_CTRL_INIT);
udelay(100);
}
/* wait until codec ready */
end_time = jiffies + msecs_to_jiffies(750);
do {
pci_read_config_byte(chip->pci, VIA_ACLINK_STAT, &pval);
if (pval & VIA_ACLINK_C00_READY) /* primary codec ready */
break;
schedule_timeout_uninterruptible(1);
} while (time_before(jiffies, end_time));
if ((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_BUSY)
snd_printk(KERN_ERR "AC'97 codec is not ready [0x%x]\n", val);
snd_via82xx_codec_xwrite(chip, VIA_REG_AC97_READ |
VIA_REG_AC97_SECONDARY_VALID |
(VIA_REG_AC97_CODEC_ID_SECONDARY << VIA_REG_AC97_CODEC_ID_SHIFT));
end_time = jiffies + msecs_to_jiffies(750);
snd_via82xx_codec_xwrite(chip, VIA_REG_AC97_READ |
VIA_REG_AC97_SECONDARY_VALID |
(VIA_REG_AC97_CODEC_ID_SECONDARY << VIA_REG_AC97_CODEC_ID_SHIFT));
do {
if ((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_SECONDARY_VALID) {
chip->ac97_secondary = 1;
goto __ac97_ok2;
}
schedule_timeout_uninterruptible(1);
} while (time_before(jiffies, end_time));
/* This is ok, the most of motherboards have only one codec */
__ac97_ok2:
/* route FM trap to IRQ, disable FM trap */
// pci_write_config_byte(chip->pci, VIA_FM_NMI_CTRL, 0);
/* disable all GPI interrupts */
outl(0, VIAREG(chip, GPI_INTR));
return 0;
}
#ifdef CONFIG_PM_SLEEP
/*
* power management
*/
static int snd_via82xx_suspend(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct via82xx_modem *chip = card->private_data;
int i;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
for (i = 0; i < 2; i++)
snd_pcm_suspend_all(chip->pcms[i]);
for (i = 0; i < chip->num_devs; i++)
snd_via82xx_channel_reset(chip, &chip->devs[i]);
synchronize_irq(chip->irq);
snd_ac97_suspend(chip->ac97);
pci_disable_device(pci);
pci_save_state(pci);
pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int snd_via82xx_resume(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct via82xx_modem *chip = card->private_data;
int i;
pci_set_power_state(pci, PCI_D0);
pci_restore_state(pci);
if (pci_enable_device(pci) < 0) {
printk(KERN_ERR "via82xx-modem: pci_enable_device failed, "
"disabling device\n");
snd_card_disconnect(card);
return -EIO;
}
pci_set_master(pci);
snd_via82xx_chip_init(chip);
snd_ac97_resume(chip->ac97);
for (i = 0; i < chip->num_devs; i++)
snd_via82xx_channel_reset(chip, &chip->devs[i]);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
static SIMPLE_DEV_PM_OPS(snd_via82xx_pm, snd_via82xx_suspend, snd_via82xx_resume);
#define SND_VIA82XX_PM_OPS &snd_via82xx_pm
#else
#define SND_VIA82XX_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static int snd_via82xx_free(struct via82xx_modem *chip)
{
unsigned int i;
if (chip->irq < 0)
goto __end_hw;
/* disable interrupts */
for (i = 0; i < chip->num_devs; i++)
snd_via82xx_channel_reset(chip, &chip->devs[i]);
__end_hw:
if (chip->irq >= 0)
free_irq(chip->irq, chip);
pci_release_regions(chip->pci);
pci_disable_device(chip->pci);
kfree(chip);
return 0;
}
static int snd_via82xx_dev_free(struct snd_device *device)
{
struct via82xx_modem *chip = device->device_data;
return snd_via82xx_free(chip);
}
static int snd_via82xx_create(struct snd_card *card,
struct pci_dev *pci,
int chip_type,
int revision,
unsigned int ac97_clock,
struct via82xx_modem **r_via)
{
struct via82xx_modem *chip;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_via82xx_dev_free,
};
if ((err = pci_enable_device(pci)) < 0)
return err;
if ((chip = kzalloc(sizeof(*chip), GFP_KERNEL)) == NULL) {
pci_disable_device(pci);
return -ENOMEM;
}
spin_lock_init(&chip->reg_lock);
chip->card = card;
chip->pci = pci;
chip->irq = -1;
if ((err = pci_request_regions(pci, card->driver)) < 0) {
kfree(chip);
pci_disable_device(pci);
return err;
}
chip->port = pci_resource_start(pci, 0);
if (request_irq(pci->irq, snd_via82xx_interrupt, IRQF_SHARED,
KBUILD_MODNAME, chip)) {
snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq);
snd_via82xx_free(chip);
return -EBUSY;
}
chip->irq = pci->irq;
if (ac97_clock >= 8000 && ac97_clock <= 48000)
chip->ac97_clock = ac97_clock;
synchronize_irq(chip->irq);
if ((err = snd_via82xx_chip_init(chip)) < 0) {
snd_via82xx_free(chip);
return err;
}
if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
snd_via82xx_free(chip);
return err;
}
/* The 8233 ac97 controller does not implement the master bit
* in the pci command register. IMHO this is a violation of the PCI spec.
* We call pci_set_master here because it does not hurt. */
pci_set_master(pci);
snd_card_set_dev(card, &pci->dev);
*r_via = chip;
return 0;
}
static int snd_via82xx_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct via82xx_modem *chip;
int chip_type = 0, card_type;
unsigned int i;
int err;
err = snd_card_create(index, id, THIS_MODULE, 0, &card);
if (err < 0)
return err;
card_type = pci_id->driver_data;
switch (card_type) {
case TYPE_CARD_VIA82XX_MODEM:
strcpy(card->driver, "VIA82XX-MODEM");
sprintf(card->shortname, "VIA 82XX modem");
break;
default:
snd_printk(KERN_ERR "invalid card type %d\n", card_type);
err = -EINVAL;
goto __error;
}
if ((err = snd_via82xx_create(card, pci, chip_type, pci->revision,
ac97_clock, &chip)) < 0)
goto __error;
card->private_data = chip;
if ((err = snd_via82xx_mixer_new(chip)) < 0)
goto __error;
if ((err = snd_via686_pcm_new(chip)) < 0 )
goto __error;
/* disable interrupts */
for (i = 0; i < chip->num_devs; i++)
snd_via82xx_channel_reset(chip, &chip->devs[i]);
sprintf(card->longname, "%s at 0x%lx, irq %d",
card->shortname, chip->port, chip->irq);
snd_via82xx_proc_init(chip);
if ((err = snd_card_register(card)) < 0) {
snd_card_free(card);
return err;
}
pci_set_drvdata(pci, card);
return 0;
__error:
snd_card_free(card);
return err;
}
static void snd_via82xx_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
}
static struct pci_driver via82xx_modem_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_via82xx_modem_ids,
.probe = snd_via82xx_probe,
.remove = snd_via82xx_remove,
.driver = {
.pm = SND_VIA82XX_PM_OPS,
},
};
module_pci_driver(via82xx_modem_driver);
| gpl-2.0 |
imasaru/kernel_zte_cymbalT | sound/pci/cs46xx/cs46xx.c | 2240 | 5035 | /*
* The driver for the Cirrus Logic's Sound Fusion CS46XX based soundcards
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/*
NOTES:
- sometimes the sound is metallic and sibilant, unloading and
reloading the module may solve this.
*/
#include <linux/pci.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/module.h>
#include <sound/core.h>
#include "cs46xx.h"
#include <sound/initval.h>
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Cirrus Logic Sound Fusion CS46XX");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Cirrus Logic,Sound Fusion (CS4280)},"
"{Cirrus Logic,Sound Fusion (CS4610)},"
"{Cirrus Logic,Sound Fusion (CS4612)},"
"{Cirrus Logic,Sound Fusion (CS4615)},"
"{Cirrus Logic,Sound Fusion (CS4622)},"
"{Cirrus Logic,Sound Fusion (CS4624)},"
"{Cirrus Logic,Sound Fusion (CS4630)}}");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */
static bool external_amp[SNDRV_CARDS];
static bool thinkpad[SNDRV_CARDS];
static bool mmap_valid[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1};
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for the CS46xx soundcard.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for the CS46xx soundcard.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable CS46xx soundcard.");
module_param_array(external_amp, bool, NULL, 0444);
MODULE_PARM_DESC(external_amp, "Force to enable external amplifer.");
module_param_array(thinkpad, bool, NULL, 0444);
MODULE_PARM_DESC(thinkpad, "Force to enable Thinkpad's CLKRUN control.");
module_param_array(mmap_valid, bool, NULL, 0444);
MODULE_PARM_DESC(mmap_valid, "Support OSS mmap.");
static DEFINE_PCI_DEVICE_TABLE(snd_cs46xx_ids) = {
{ PCI_VDEVICE(CIRRUS, 0x6001), 0, }, /* CS4280 */
{ PCI_VDEVICE(CIRRUS, 0x6003), 0, }, /* CS4612 */
{ PCI_VDEVICE(CIRRUS, 0x6004), 0, }, /* CS4615 */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, snd_cs46xx_ids);
static int snd_card_cs46xx_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
struct snd_cs46xx *chip;
int err;
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
dev++;
return -ENOENT;
}
err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card);
if (err < 0)
return err;
if ((err = snd_cs46xx_create(card, pci,
external_amp[dev], thinkpad[dev],
&chip)) < 0) {
snd_card_free(card);
return err;
}
card->private_data = chip;
chip->accept_valid = mmap_valid[dev];
if ((err = snd_cs46xx_pcm(chip, 0, NULL)) < 0) {
snd_card_free(card);
return err;
}
#ifdef CONFIG_SND_CS46XX_NEW_DSP
if ((err = snd_cs46xx_pcm_rear(chip,1, NULL)) < 0) {
snd_card_free(card);
return err;
}
if ((err = snd_cs46xx_pcm_iec958(chip,2,NULL)) < 0) {
snd_card_free(card);
return err;
}
#endif
if ((err = snd_cs46xx_mixer(chip, 2)) < 0) {
snd_card_free(card);
return err;
}
#ifdef CONFIG_SND_CS46XX_NEW_DSP
if (chip->nr_ac97_codecs ==2) {
if ((err = snd_cs46xx_pcm_center_lfe(chip,3,NULL)) < 0) {
snd_card_free(card);
return err;
}
}
#endif
if ((err = snd_cs46xx_midi(chip, 0, NULL)) < 0) {
snd_card_free(card);
return err;
}
if ((err = snd_cs46xx_start_dsp(chip)) < 0) {
snd_card_free(card);
return err;
}
snd_cs46xx_gameport(chip);
strcpy(card->driver, "CS46xx");
strcpy(card->shortname, "Sound Fusion CS46xx");
sprintf(card->longname, "%s at 0x%lx/0x%lx, irq %i",
card->shortname,
chip->ba0_addr,
chip->ba1_addr,
chip->irq);
if ((err = snd_card_register(card)) < 0) {
snd_card_free(card);
return err;
}
pci_set_drvdata(pci, card);
dev++;
return 0;
}
static void snd_card_cs46xx_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
}
static struct pci_driver cs46xx_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_cs46xx_ids,
.probe = snd_card_cs46xx_probe,
.remove = snd_card_cs46xx_remove,
#ifdef CONFIG_PM_SLEEP
.driver = {
.pm = &snd_cs46xx_pm,
},
#endif
};
module_pci_driver(cs46xx_driver);
| gpl-2.0 |
imrehg/vab820-kernel-bsp | drivers/usb/gadget/f_uvc.c | 2752 | 17625 | /*
* uvc_gadget.c -- USB Video Class Gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/video.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-event.h>
#include "uvc.h"
unsigned int uvc_gadget_trace_param;
/* --------------------------------------------------------------------------
* Function descriptors
*/
/* string IDs are assigned dynamically */
#define UVC_STRING_ASSOCIATION_IDX 0
#define UVC_STRING_CONTROL_IDX 1
#define UVC_STRING_STREAMING_IDX 2
static struct usb_string uvc_en_us_strings[] = {
[UVC_STRING_ASSOCIATION_IDX].s = "UVC Camera",
[UVC_STRING_CONTROL_IDX].s = "Video Control",
[UVC_STRING_STREAMING_IDX].s = "Video Streaming",
{ }
};
static struct usb_gadget_strings uvc_stringtab = {
.language = 0x0409, /* en-us */
.strings = uvc_en_us_strings,
};
static struct usb_gadget_strings *uvc_function_strings[] = {
&uvc_stringtab,
NULL,
};
#define UVC_INTF_VIDEO_CONTROL 0
#define UVC_INTF_VIDEO_STREAMING 1
static struct usb_interface_assoc_descriptor uvc_iad __initdata = {
.bLength = sizeof(uvc_iad),
.bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
.bFirstInterface = 0,
.bInterfaceCount = 2,
.bFunctionClass = USB_CLASS_VIDEO,
.bFunctionSubClass = UVC_SC_VIDEO_INTERFACE_COLLECTION,
.bFunctionProtocol = 0x00,
.iFunction = 0,
};
static struct usb_interface_descriptor uvc_control_intf __initdata = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = UVC_INTF_VIDEO_CONTROL,
.bAlternateSetting = 0,
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = UVC_SC_VIDEOCONTROL,
.bInterfaceProtocol = 0x00,
.iInterface = 0,
};
static struct usb_endpoint_descriptor uvc_control_ep __initdata = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = cpu_to_le16(16),
.bInterval = 8,
};
static struct uvc_control_endpoint_descriptor uvc_control_cs_ep __initdata = {
.bLength = UVC_DT_CONTROL_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubType = UVC_EP_INTERRUPT,
.wMaxTransferSize = cpu_to_le16(16),
};
static struct usb_interface_descriptor uvc_streaming_intf_alt0 __initdata = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = UVC_INTF_VIDEO_STREAMING,
.bAlternateSetting = 0,
.bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = UVC_SC_VIDEOSTREAMING,
.bInterfaceProtocol = 0x00,
.iInterface = 0,
};
static struct usb_interface_descriptor uvc_streaming_intf_alt1 __initdata = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = UVC_INTF_VIDEO_STREAMING,
.bAlternateSetting = 1,
.bNumEndpoints = 1,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = UVC_SC_VIDEOSTREAMING,
.bInterfaceProtocol = 0x00,
.iInterface = 0,
};
static struct usb_endpoint_descriptor uvc_streaming_ep = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = cpu_to_le16(512),
.bInterval = 1,
};
static const struct usb_descriptor_header * const uvc_fs_streaming[] = {
(struct usb_descriptor_header *) &uvc_streaming_intf_alt1,
(struct usb_descriptor_header *) &uvc_streaming_ep,
NULL,
};
static const struct usb_descriptor_header * const uvc_hs_streaming[] = {
(struct usb_descriptor_header *) &uvc_streaming_intf_alt1,
(struct usb_descriptor_header *) &uvc_streaming_ep,
NULL,
};
/* --------------------------------------------------------------------------
* Control requests
*/
static void
uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req)
{
struct uvc_device *uvc = req->context;
struct v4l2_event v4l2_event;
struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
if (uvc->event_setup_out) {
uvc->event_setup_out = 0;
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_DATA;
uvc_event->data.length = req->actual;
memcpy(&uvc_event->data.data, req->buf, req->actual);
v4l2_event_queue(uvc->vdev, &v4l2_event);
}
}
static int
uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
struct uvc_device *uvc = to_uvc(f);
struct v4l2_event v4l2_event;
struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
/* printk(KERN_INFO "setup request %02x %02x value %04x index %04x %04x\n",
* ctrl->bRequestType, ctrl->bRequest, le16_to_cpu(ctrl->wValue),
* le16_to_cpu(ctrl->wIndex), le16_to_cpu(ctrl->wLength));
*/
if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS) {
INFO(f->config->cdev, "invalid request type\n");
return -EINVAL;
}
/* Stall too big requests. */
if (le16_to_cpu(ctrl->wLength) > UVC_MAX_REQUEST_SIZE)
return -EINVAL;
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_SETUP;
memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req));
v4l2_event_queue(uvc->vdev, &v4l2_event);
return 0;
}
static int
uvc_function_get_alt(struct usb_function *f, unsigned interface)
{
struct uvc_device *uvc = to_uvc(f);
INFO(f->config->cdev, "uvc_function_get_alt(%u)\n", interface);
if (interface == uvc->control_intf)
return 0;
else if (interface != uvc->streaming_intf)
return -EINVAL;
else
return uvc->state == UVC_STATE_STREAMING ? 1 : 0;
}
static int
uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
{
struct uvc_device *uvc = to_uvc(f);
struct v4l2_event v4l2_event;
struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
INFO(f->config->cdev, "uvc_function_set_alt(%u, %u)\n", interface, alt);
if (interface == uvc->control_intf) {
if (alt)
return -EINVAL;
if (uvc->state == UVC_STATE_DISCONNECTED) {
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_CONNECT;
uvc_event->speed = f->config->cdev->gadget->speed;
v4l2_event_queue(uvc->vdev, &v4l2_event);
uvc->state = UVC_STATE_CONNECTED;
}
return 0;
}
if (interface != uvc->streaming_intf)
return -EINVAL;
/* TODO
if (usb_endpoint_xfer_bulk(&uvc->desc.vs_ep))
return alt ? -EINVAL : 0;
*/
switch (alt) {
case 0:
if (uvc->state != UVC_STATE_STREAMING)
return 0;
if (uvc->video.ep)
usb_ep_disable(uvc->video.ep);
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_STREAMOFF;
v4l2_event_queue(uvc->vdev, &v4l2_event);
uvc->state = UVC_STATE_CONNECTED;
break;
case 1:
if (uvc->state != UVC_STATE_CONNECTED)
return 0;
if (uvc->video.ep)
usb_ep_enable(uvc->video.ep, &uvc_streaming_ep);
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_STREAMON;
v4l2_event_queue(uvc->vdev, &v4l2_event);
uvc->state = UVC_STATE_STREAMING;
break;
default:
return -EINVAL;
}
return 0;
}
static void
uvc_function_disable(struct usb_function *f)
{
struct uvc_device *uvc = to_uvc(f);
struct v4l2_event v4l2_event;
INFO(f->config->cdev, "uvc_function_disable\n");
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_DISCONNECT;
v4l2_event_queue(uvc->vdev, &v4l2_event);
uvc->state = UVC_STATE_DISCONNECTED;
}
/* --------------------------------------------------------------------------
* Connection / disconnection
*/
void
uvc_function_connect(struct uvc_device *uvc)
{
struct usb_composite_dev *cdev = uvc->func.config->cdev;
int ret;
if ((ret = usb_function_activate(&uvc->func)) < 0)
INFO(cdev, "UVC connect failed with %d\n", ret);
}
void
uvc_function_disconnect(struct uvc_device *uvc)
{
struct usb_composite_dev *cdev = uvc->func.config->cdev;
int ret;
if ((ret = usb_function_deactivate(&uvc->func)) < 0)
INFO(cdev, "UVC disconnect failed with %d\n", ret);
}
/* --------------------------------------------------------------------------
* USB probe and disconnect
*/
static int
uvc_register_video(struct uvc_device *uvc)
{
struct usb_composite_dev *cdev = uvc->func.config->cdev;
struct video_device *video;
/* TODO reference counting. */
video = video_device_alloc();
if (video == NULL)
return -ENOMEM;
video->parent = &cdev->gadget->dev;
video->minor = -1;
video->fops = &uvc_v4l2_fops;
video->release = video_device_release;
strncpy(video->name, cdev->gadget->name, sizeof(video->name));
uvc->vdev = video;
video_set_drvdata(video, uvc);
return video_register_device(video, VFL_TYPE_GRABBER, -1);
}
#define UVC_COPY_DESCRIPTOR(mem, dst, desc) \
do { \
memcpy(mem, desc, (desc)->bLength); \
*(dst)++ = mem; \
mem += (desc)->bLength; \
} while (0);
#define UVC_COPY_DESCRIPTORS(mem, dst, src) \
do { \
const struct usb_descriptor_header * const *__src; \
for (__src = src; *__src; ++__src) { \
memcpy(mem, *__src, (*__src)->bLength); \
*dst++ = mem; \
mem += (*__src)->bLength; \
} \
} while (0)
static struct usb_descriptor_header ** __init
uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
{
struct uvc_input_header_descriptor *uvc_streaming_header;
struct uvc_header_descriptor *uvc_control_header;
const struct uvc_descriptor_header * const *uvc_streaming_cls;
const struct usb_descriptor_header * const *uvc_streaming_std;
const struct usb_descriptor_header * const *src;
struct usb_descriptor_header **dst;
struct usb_descriptor_header **hdr;
unsigned int control_size;
unsigned int streaming_size;
unsigned int n_desc;
unsigned int bytes;
void *mem;
uvc_streaming_cls = (speed == USB_SPEED_FULL)
? uvc->desc.fs_streaming : uvc->desc.hs_streaming;
uvc_streaming_std = (speed == USB_SPEED_FULL)
? uvc_fs_streaming : uvc_hs_streaming;
/* Descriptors layout
*
* uvc_iad
* uvc_control_intf
* Class-specific UVC control descriptors
* uvc_control_ep
* uvc_control_cs_ep
* uvc_streaming_intf_alt0
* Class-specific UVC streaming descriptors
* uvc_{fs|hs}_streaming
*/
/* Count descriptors and compute their size. */
control_size = 0;
streaming_size = 0;
bytes = uvc_iad.bLength + uvc_control_intf.bLength
+ uvc_control_ep.bLength + uvc_control_cs_ep.bLength
+ uvc_streaming_intf_alt0.bLength;
n_desc = 5;
for (src = (const struct usb_descriptor_header**)uvc->desc.control; *src; ++src) {
control_size += (*src)->bLength;
bytes += (*src)->bLength;
n_desc++;
}
for (src = (const struct usb_descriptor_header**)uvc_streaming_cls; *src; ++src) {
streaming_size += (*src)->bLength;
bytes += (*src)->bLength;
n_desc++;
}
for (src = uvc_streaming_std; *src; ++src) {
bytes += (*src)->bLength;
n_desc++;
}
mem = kmalloc((n_desc + 1) * sizeof(*src) + bytes, GFP_KERNEL);
if (mem == NULL)
return NULL;
hdr = mem;
dst = mem;
mem += (n_desc + 1) * sizeof(*src);
/* Copy the descriptors. */
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_iad);
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_intf);
uvc_control_header = mem;
UVC_COPY_DESCRIPTORS(mem, dst,
(const struct usb_descriptor_header**)uvc->desc.control);
uvc_control_header->wTotalLength = cpu_to_le16(control_size);
uvc_control_header->bInCollection = 1;
uvc_control_header->baInterfaceNr[0] = uvc->streaming_intf;
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_ep);
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_cs_ep);
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_streaming_intf_alt0);
uvc_streaming_header = mem;
UVC_COPY_DESCRIPTORS(mem, dst,
(const struct usb_descriptor_header**)uvc_streaming_cls);
uvc_streaming_header->wTotalLength = cpu_to_le16(streaming_size);
uvc_streaming_header->bEndpointAddress = uvc_streaming_ep.bEndpointAddress;
UVC_COPY_DESCRIPTORS(mem, dst, uvc_streaming_std);
*dst = NULL;
return hdr;
}
static void
uvc_function_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct uvc_device *uvc = to_uvc(f);
INFO(cdev, "uvc_function_unbind\n");
if (uvc->vdev) {
if (uvc->vdev->minor == -1)
video_device_release(uvc->vdev);
else
video_unregister_device(uvc->vdev);
uvc->vdev = NULL;
}
if (uvc->control_ep)
uvc->control_ep->driver_data = NULL;
if (uvc->video.ep)
uvc->video.ep->driver_data = NULL;
if (uvc->control_req) {
usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
kfree(uvc->control_buf);
}
kfree(f->descriptors);
kfree(f->hs_descriptors);
kfree(uvc);
}
static int __init
uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct uvc_device *uvc = to_uvc(f);
struct usb_ep *ep;
int ret = -EINVAL;
INFO(cdev, "uvc_function_bind\n");
/* Allocate endpoints. */
ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
if (!ep) {
INFO(cdev, "Unable to allocate control EP\n");
goto error;
}
uvc->control_ep = ep;
ep->driver_data = uvc;
ep = usb_ep_autoconfig(cdev->gadget, &uvc_streaming_ep);
if (!ep) {
INFO(cdev, "Unable to allocate streaming EP\n");
goto error;
}
uvc->video.ep = ep;
ep->driver_data = uvc;
/* Allocate interface IDs. */
if ((ret = usb_interface_id(c, f)) < 0)
goto error;
uvc_iad.bFirstInterface = ret;
uvc_control_intf.bInterfaceNumber = ret;
uvc->control_intf = ret;
if ((ret = usb_interface_id(c, f)) < 0)
goto error;
uvc_streaming_intf_alt0.bInterfaceNumber = ret;
uvc_streaming_intf_alt1.bInterfaceNumber = ret;
uvc->streaming_intf = ret;
/* Copy descriptors. */
f->descriptors = uvc_copy_descriptors(uvc, USB_SPEED_FULL);
f->hs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_HIGH);
/* Preallocate control endpoint request. */
uvc->control_req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
uvc->control_buf = kmalloc(UVC_MAX_REQUEST_SIZE, GFP_KERNEL);
if (uvc->control_req == NULL || uvc->control_buf == NULL) {
ret = -ENOMEM;
goto error;
}
uvc->control_req->buf = uvc->control_buf;
uvc->control_req->complete = uvc_function_ep0_complete;
uvc->control_req->context = uvc;
/* Avoid letting this gadget enumerate until the userspace server is
* active.
*/
if ((ret = usb_function_deactivate(f)) < 0)
goto error;
/* Initialise video. */
ret = uvc_video_init(&uvc->video);
if (ret < 0)
goto error;
/* Register a V4L2 device. */
ret = uvc_register_video(uvc);
if (ret < 0) {
printk(KERN_INFO "Unable to register video device\n");
goto error;
}
return 0;
error:
uvc_function_unbind(c, f);
return ret;
}
/* --------------------------------------------------------------------------
* USB gadget function
*/
/**
* uvc_bind_config - add a UVC function to a configuration
* @c: the configuration to support the UVC instance
* Context: single threaded during gadget setup
*
* Returns zero on success, else negative errno.
*
* Caller must have called @uvc_setup(). Caller is also responsible for
* calling @uvc_cleanup() before module unload.
*/
int __init
uvc_bind_config(struct usb_configuration *c,
const struct uvc_descriptor_header * const *control,
const struct uvc_descriptor_header * const *fs_streaming,
const struct uvc_descriptor_header * const *hs_streaming)
{
struct uvc_device *uvc;
int ret = 0;
/* TODO Check if the USB device controller supports the required
* features.
*/
if (!gadget_is_dualspeed(c->cdev->gadget))
return -EINVAL;
uvc = kzalloc(sizeof(*uvc), GFP_KERNEL);
if (uvc == NULL)
return -ENOMEM;
uvc->state = UVC_STATE_DISCONNECTED;
/* Validate the descriptors. */
if (control == NULL || control[0] == NULL ||
control[0]->bDescriptorSubType != UVC_VC_HEADER)
goto error;
if (fs_streaming == NULL || fs_streaming[0] == NULL ||
fs_streaming[0]->bDescriptorSubType != UVC_VS_INPUT_HEADER)
goto error;
if (hs_streaming == NULL || hs_streaming[0] == NULL ||
hs_streaming[0]->bDescriptorSubType != UVC_VS_INPUT_HEADER)
goto error;
uvc->desc.control = control;
uvc->desc.fs_streaming = fs_streaming;
uvc->desc.hs_streaming = hs_streaming;
/* Allocate string descriptor numbers. */
if ((ret = usb_string_id(c->cdev)) < 0)
goto error;
uvc_en_us_strings[UVC_STRING_ASSOCIATION_IDX].id = ret;
uvc_iad.iFunction = ret;
if ((ret = usb_string_id(c->cdev)) < 0)
goto error;
uvc_en_us_strings[UVC_STRING_CONTROL_IDX].id = ret;
uvc_control_intf.iInterface = ret;
if ((ret = usb_string_id(c->cdev)) < 0)
goto error;
uvc_en_us_strings[UVC_STRING_STREAMING_IDX].id = ret;
uvc_streaming_intf_alt0.iInterface = ret;
uvc_streaming_intf_alt1.iInterface = ret;
/* Register the function. */
uvc->func.name = "uvc";
uvc->func.strings = uvc_function_strings;
uvc->func.bind = uvc_function_bind;
uvc->func.unbind = uvc_function_unbind;
uvc->func.get_alt = uvc_function_get_alt;
uvc->func.set_alt = uvc_function_set_alt;
uvc->func.disable = uvc_function_disable;
uvc->func.setup = uvc_function_setup;
ret = usb_add_function(c, &uvc->func);
if (ret)
kfree(uvc);
return 0;
error:
kfree(uvc);
return ret;
}
module_param_named(trace, uvc_gadget_trace_param, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(trace, "Trace level bitmask");
| gpl-2.0 |
Excito/community-b3-kernel | crypto/pcrypt.c | 3264 | 15018 | /*
* pcrypt - Parallel crypto wrapper.
*
* Copyright (C) 2009 secunet Security Networks AG
* Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/kobject.h>
#include <linux/cpu.h>
#include <crypto/pcrypt.h>
struct padata_pcrypt {
struct padata_instance *pinst;
struct workqueue_struct *wq;
/*
* Cpumask for callback CPUs. It should be
* equal to serial cpumask of corresponding padata instance,
* so it is updated when padata notifies us about serial
* cpumask change.
*
* cb_cpumask is protected by RCU. This fact prevents us from
* using cpumask_var_t directly because the actual type of
* cpumsak_var_t depends on kernel configuration(particularly on
* CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
* cpumask_var_t may be either a pointer to the struct cpumask
* or a variable allocated on the stack. Thus we can not safely use
* cpumask_var_t with RCU operations such as rcu_assign_pointer or
* rcu_dereference. So cpumask_var_t is wrapped with struct
* pcrypt_cpumask which makes possible to use it with RCU.
*/
struct pcrypt_cpumask {
cpumask_var_t mask;
} *cb_cpumask;
struct notifier_block nblock;
};
static struct padata_pcrypt pencrypt;
static struct padata_pcrypt pdecrypt;
static struct kset *pcrypt_kset;
struct pcrypt_instance_ctx {
struct crypto_spawn spawn;
unsigned int tfm_count;
};
struct pcrypt_aead_ctx {
struct crypto_aead *child;
unsigned int cb_cpu;
};
static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
struct padata_pcrypt *pcrypt)
{
unsigned int cpu_index, cpu, i;
struct pcrypt_cpumask *cpumask;
cpu = *cb_cpu;
rcu_read_lock_bh();
cpumask = rcu_dereference(pcrypt->cb_cpumask);
if (cpumask_test_cpu(cpu, cpumask->mask))
goto out;
if (!cpumask_weight(cpumask->mask))
goto out;
cpu_index = cpu % cpumask_weight(cpumask->mask);
cpu = cpumask_first(cpumask->mask);
for (i = 0; i < cpu_index; i++)
cpu = cpumask_next(cpu, cpumask->mask);
*cb_cpu = cpu;
out:
rcu_read_unlock_bh();
return padata_do_parallel(pcrypt->pinst, padata, cpu);
}
static int pcrypt_aead_setkey(struct crypto_aead *parent,
const u8 *key, unsigned int keylen)
{
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
return crypto_aead_setkey(ctx->child, key, keylen);
}
static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
unsigned int authsize)
{
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
return crypto_aead_setauthsize(ctx->child, authsize);
}
static void pcrypt_aead_serial(struct padata_priv *padata)
{
struct pcrypt_request *preq = pcrypt_padata_request(padata);
struct aead_request *req = pcrypt_request_ctx(preq);
aead_request_complete(req->base.data, padata->info);
}
static void pcrypt_aead_giv_serial(struct padata_priv *padata)
{
struct pcrypt_request *preq = pcrypt_padata_request(padata);
struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
aead_request_complete(req->areq.base.data, padata->info);
}
static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
{
struct aead_request *req = areq->data;
struct pcrypt_request *preq = aead_request_ctx(req);
struct padata_priv *padata = pcrypt_request_padata(preq);
padata->info = err;
req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
padata_do_serial(padata);
}
static void pcrypt_aead_enc(struct padata_priv *padata)
{
struct pcrypt_request *preq = pcrypt_padata_request(padata);
struct aead_request *req = pcrypt_request_ctx(preq);
padata->info = crypto_aead_encrypt(req);
if (padata->info == -EINPROGRESS)
return;
padata_do_serial(padata);
}
static int pcrypt_aead_encrypt(struct aead_request *req)
{
int err;
struct pcrypt_request *preq = aead_request_ctx(req);
struct aead_request *creq = pcrypt_request_ctx(preq);
struct padata_priv *padata = pcrypt_request_padata(preq);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
u32 flags = aead_request_flags(req);
memset(padata, 0, sizeof(struct padata_priv));
padata->parallel = pcrypt_aead_enc;
padata->serial = pcrypt_aead_serial;
aead_request_set_tfm(creq, ctx->child);
aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
pcrypt_aead_done, req);
aead_request_set_crypt(creq, req->src, req->dst,
req->cryptlen, req->iv);
aead_request_set_assoc(creq, req->assoc, req->assoclen);
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
if (!err)
return -EINPROGRESS;
return err;
}
static void pcrypt_aead_dec(struct padata_priv *padata)
{
struct pcrypt_request *preq = pcrypt_padata_request(padata);
struct aead_request *req = pcrypt_request_ctx(preq);
padata->info = crypto_aead_decrypt(req);
if (padata->info == -EINPROGRESS)
return;
padata_do_serial(padata);
}
static int pcrypt_aead_decrypt(struct aead_request *req)
{
int err;
struct pcrypt_request *preq = aead_request_ctx(req);
struct aead_request *creq = pcrypt_request_ctx(preq);
struct padata_priv *padata = pcrypt_request_padata(preq);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
u32 flags = aead_request_flags(req);
memset(padata, 0, sizeof(struct padata_priv));
padata->parallel = pcrypt_aead_dec;
padata->serial = pcrypt_aead_serial;
aead_request_set_tfm(creq, ctx->child);
aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
pcrypt_aead_done, req);
aead_request_set_crypt(creq, req->src, req->dst,
req->cryptlen, req->iv);
aead_request_set_assoc(creq, req->assoc, req->assoclen);
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
if (!err)
return -EINPROGRESS;
return err;
}
static void pcrypt_aead_givenc(struct padata_priv *padata)
{
struct pcrypt_request *preq = pcrypt_padata_request(padata);
struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
padata->info = crypto_aead_givencrypt(req);
if (padata->info == -EINPROGRESS)
return;
padata_do_serial(padata);
}
static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req)
{
int err;
struct aead_request *areq = &req->areq;
struct pcrypt_request *preq = aead_request_ctx(areq);
struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq);
struct padata_priv *padata = pcrypt_request_padata(preq);
struct crypto_aead *aead = aead_givcrypt_reqtfm(req);
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
u32 flags = aead_request_flags(areq);
memset(padata, 0, sizeof(struct padata_priv));
padata->parallel = pcrypt_aead_givenc;
padata->serial = pcrypt_aead_giv_serial;
aead_givcrypt_set_tfm(creq, ctx->child);
aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
pcrypt_aead_done, areq);
aead_givcrypt_set_crypt(creq, areq->src, areq->dst,
areq->cryptlen, areq->iv);
aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen);
aead_givcrypt_set_giv(creq, req->giv, req->seq);
err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
if (!err)
return -EINPROGRESS;
return err;
}
static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
{
int cpu, cpu_index;
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst);
struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_aead *cipher;
ictx->tfm_count++;
cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask);
ctx->cb_cpu = cpumask_first(cpu_active_mask);
for (cpu = 0; cpu < cpu_index; cpu++)
ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask);
cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
tfm->crt_aead.reqsize = sizeof(struct pcrypt_request)
+ sizeof(struct aead_givcrypt_request)
+ crypto_aead_reqsize(cipher);
return 0;
}
static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm)
{
struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_aead(ctx->child);
}
static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg)
{
struct crypto_instance *inst;
struct pcrypt_instance_ctx *ctx;
int err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst) {
inst = ERR_PTR(-ENOMEM);
goto out;
}
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto out_free_inst;
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
ctx = crypto_instance_ctx(inst);
err = crypto_init_spawn(&ctx->spawn, alg, inst,
CRYPTO_ALG_TYPE_MASK);
if (err)
goto out_free_inst;
inst->alg.cra_priority = alg->cra_priority + 100;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
out:
return inst;
out_free_inst:
kfree(inst);
inst = ERR_PTR(err);
goto out;
}
static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb,
u32 type, u32 mask)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK));
if (IS_ERR(alg))
return ERR_CAST(alg);
inst = pcrypt_alloc_instance(alg);
if (IS_ERR(inst))
goto out_put_alg;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
inst->alg.cra_type = &crypto_aead_type;
inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
inst->alg.cra_init = pcrypt_aead_init_tfm;
inst->alg.cra_exit = pcrypt_aead_exit_tfm;
inst->alg.cra_aead.setkey = pcrypt_aead_setkey;
inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize;
inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt;
inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt;
inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt;
out_put_alg:
crypto_mod_put(alg);
return inst;
}
static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return ERR_CAST(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
return pcrypt_alloc_aead(tb, algt->type, algt->mask);
}
return ERR_PTR(-EINVAL);
}
static void pcrypt_free(struct crypto_instance *inst)
{
struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
crypto_drop_spawn(&ctx->spawn);
kfree(inst);
}
static int pcrypt_cpumask_change_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct padata_pcrypt *pcrypt;
struct pcrypt_cpumask *new_mask, *old_mask;
struct padata_cpumask *cpumask = (struct padata_cpumask *)data;
if (!(val & PADATA_CPU_SERIAL))
return 0;
pcrypt = container_of(self, struct padata_pcrypt, nblock);
new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
if (!new_mask)
return -ENOMEM;
if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
kfree(new_mask);
return -ENOMEM;
}
old_mask = pcrypt->cb_cpumask;
cpumask_copy(new_mask->mask, cpumask->cbcpu);
rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
synchronize_rcu_bh();
free_cpumask_var(old_mask->mask);
kfree(old_mask);
return 0;
}
static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
{
int ret;
pinst->kobj.kset = pcrypt_kset;
ret = kobject_add(&pinst->kobj, NULL, name);
if (!ret)
kobject_uevent(&pinst->kobj, KOBJ_ADD);
return ret;
}
static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
const char *name)
{
int ret = -ENOMEM;
struct pcrypt_cpumask *mask;
get_online_cpus();
pcrypt->wq = alloc_workqueue(name,
WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
if (!pcrypt->wq)
goto err;
pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
if (!pcrypt->pinst)
goto err_destroy_workqueue;
mask = kmalloc(sizeof(*mask), GFP_KERNEL);
if (!mask)
goto err_free_padata;
if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
kfree(mask);
goto err_free_padata;
}
cpumask_and(mask->mask, cpu_possible_mask, cpu_active_mask);
rcu_assign_pointer(pcrypt->cb_cpumask, mask);
pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
if (ret)
goto err_free_cpumask;
ret = pcrypt_sysfs_add(pcrypt->pinst, name);
if (ret)
goto err_unregister_notifier;
put_online_cpus();
return ret;
err_unregister_notifier:
padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
err_free_cpumask:
free_cpumask_var(mask->mask);
kfree(mask);
err_free_padata:
padata_free(pcrypt->pinst);
err_destroy_workqueue:
destroy_workqueue(pcrypt->wq);
err:
put_online_cpus();
return ret;
}
static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
{
free_cpumask_var(pcrypt->cb_cpumask->mask);
kfree(pcrypt->cb_cpumask);
padata_stop(pcrypt->pinst);
padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
destroy_workqueue(pcrypt->wq);
padata_free(pcrypt->pinst);
}
static struct crypto_template pcrypt_tmpl = {
.name = "pcrypt",
.alloc = pcrypt_alloc,
.free = pcrypt_free,
.module = THIS_MODULE,
};
static int __init pcrypt_init(void)
{
int err = -ENOMEM;
pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
if (!pcrypt_kset)
goto err;
err = pcrypt_init_padata(&pencrypt, "pencrypt");
if (err)
goto err_unreg_kset;
err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
if (err)
goto err_deinit_pencrypt;
padata_start(pencrypt.pinst);
padata_start(pdecrypt.pinst);
return crypto_register_template(&pcrypt_tmpl);
err_deinit_pencrypt:
pcrypt_fini_padata(&pencrypt);
err_unreg_kset:
kset_unregister(pcrypt_kset);
err:
return err;
}
static void __exit pcrypt_exit(void)
{
pcrypt_fini_padata(&pencrypt);
pcrypt_fini_padata(&pdecrypt);
kset_unregister(pcrypt_kset);
crypto_unregister_template(&pcrypt_tmpl);
}
module_init(pcrypt_init);
module_exit(pcrypt_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
MODULE_DESCRIPTION("Parallel crypto wrapper");
| gpl-2.0 |
BOOTMGR/GT-I9070_kernel | sound/isa/sb/sb8_main.c | 4032 | 18599 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Uros Bizjak <uros@kss-loka.si>
*
* Routines for control of 8-bit SoundBlaster cards and clones
* Please note: I don't have access to old SB8 soundcards.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* --
*
* Thu Apr 29 20:36:17 BST 1999 George David Morrison <gdm@gedamo.demon.co.uk>
* DSP can't respond to commands whilst in "high speed" mode. Caused
* glitching during playback. Fixed.
*
* Wed Jul 12 22:02:55 CEST 2000 Uros Bizjak <uros@kss-loka.si>
* Cleaned up and rewrote lowlevel routines.
*/
#include <asm/io.h>
#include <asm/dma.h>
#include <linux/init.h>
#include <linux/time.h>
#include <sound/core.h>
#include <sound/sb.h>
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Uros Bizjak <uros@kss-loka.si>");
MODULE_DESCRIPTION("Routines for control of 8-bit SoundBlaster cards and clones");
MODULE_LICENSE("GPL");
#define SB8_CLOCK 1000000
#define SB8_DEN(v) ((SB8_CLOCK + (v) / 2) / (v))
#define SB8_RATE(v) (SB8_CLOCK / SB8_DEN(v))
static struct snd_ratnum clock = {
.num = SB8_CLOCK,
.den_min = 1,
.den_max = 256,
.den_step = 1,
};
static struct snd_pcm_hw_constraint_ratnums hw_constraints_clock = {
.nrats = 1,
.rats = &clock,
};
static struct snd_ratnum stereo_clocks[] = {
{
.num = SB8_CLOCK,
.den_min = SB8_DEN(22050),
.den_max = SB8_DEN(22050),
.den_step = 1,
},
{
.num = SB8_CLOCK,
.den_min = SB8_DEN(11025),
.den_max = SB8_DEN(11025),
.den_step = 1,
}
};
static int snd_sb8_hw_constraint_rate_channels(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_interval *c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
if (c->min > 1) {
unsigned int num = 0, den = 0;
int err = snd_interval_ratnum(hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE),
2, stereo_clocks, &num, &den);
if (err >= 0 && den) {
params->rate_num = num;
params->rate_den = den;
}
return err;
}
return 0;
}
static int snd_sb8_hw_constraint_channels_rate(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_interval *r = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
if (r->min > SB8_RATE(22050) || r->max <= SB8_RATE(11025)) {
struct snd_interval t = { .min = 1, .max = 1 };
return snd_interval_refine(hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS), &t);
}
return 0;
}
static int snd_sb8_playback_prepare(struct snd_pcm_substream *substream)
{
unsigned long flags;
struct snd_sb *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int mixreg, rate, size, count;
unsigned char format;
unsigned char stereo = runtime->channels > 1;
int dma;
rate = runtime->rate;
switch (chip->hardware) {
case SB_HW_JAZZ16:
if (runtime->format == SNDRV_PCM_FORMAT_S16_LE) {
if (chip->mode & SB_MODE_CAPTURE_16)
return -EBUSY;
else
chip->mode |= SB_MODE_PLAYBACK_16;
}
chip->playback_format = SB_DSP_LO_OUTPUT_AUTO;
break;
case SB_HW_PRO:
if (runtime->channels > 1) {
if (snd_BUG_ON(rate != SB8_RATE(11025) &&
rate != SB8_RATE(22050)))
return -EINVAL;
chip->playback_format = SB_DSP_HI_OUTPUT_AUTO;
break;
}
/* fallthru */
case SB_HW_201:
if (rate > 23000) {
chip->playback_format = SB_DSP_HI_OUTPUT_AUTO;
break;
}
/* fallthru */
case SB_HW_20:
chip->playback_format = SB_DSP_LO_OUTPUT_AUTO;
break;
case SB_HW_10:
chip->playback_format = SB_DSP_OUTPUT;
break;
default:
return -EINVAL;
}
if (chip->mode & SB_MODE_PLAYBACK_16) {
format = stereo ? SB_DSP_STEREO_16BIT : SB_DSP_MONO_16BIT;
dma = chip->dma16;
} else {
format = stereo ? SB_DSP_STEREO_8BIT : SB_DSP_MONO_8BIT;
chip->mode |= SB_MODE_PLAYBACK_8;
dma = chip->dma8;
}
size = chip->p_dma_size = snd_pcm_lib_buffer_bytes(substream);
count = chip->p_period_size = snd_pcm_lib_period_bytes(substream);
spin_lock_irqsave(&chip->reg_lock, flags);
snd_sbdsp_command(chip, SB_DSP_SPEAKER_ON);
if (chip->hardware == SB_HW_JAZZ16)
snd_sbdsp_command(chip, format);
else if (stereo) {
/* set playback stereo mode */
spin_lock(&chip->mixer_lock);
mixreg = snd_sbmixer_read(chip, SB_DSP_STEREO_SW);
snd_sbmixer_write(chip, SB_DSP_STEREO_SW, mixreg | 0x02);
spin_unlock(&chip->mixer_lock);
/* Soundblaster hardware programming reference guide, 3-23 */
snd_sbdsp_command(chip, SB_DSP_DMA8_EXIT);
runtime->dma_area[0] = 0x80;
snd_dma_program(dma, runtime->dma_addr, 1, DMA_MODE_WRITE);
/* force interrupt */
snd_sbdsp_command(chip, SB_DSP_OUTPUT);
snd_sbdsp_command(chip, 0);
snd_sbdsp_command(chip, 0);
}
snd_sbdsp_command(chip, SB_DSP_SAMPLE_RATE);
if (stereo) {
snd_sbdsp_command(chip, 256 - runtime->rate_den / 2);
spin_lock(&chip->mixer_lock);
/* save output filter status and turn it off */
mixreg = snd_sbmixer_read(chip, SB_DSP_PLAYBACK_FILT);
snd_sbmixer_write(chip, SB_DSP_PLAYBACK_FILT, mixreg | 0x20);
spin_unlock(&chip->mixer_lock);
/* just use force_mode16 for temporary storate... */
chip->force_mode16 = mixreg;
} else {
snd_sbdsp_command(chip, 256 - runtime->rate_den);
}
if (chip->playback_format != SB_DSP_OUTPUT) {
if (chip->mode & SB_MODE_PLAYBACK_16)
count /= 2;
count--;
snd_sbdsp_command(chip, SB_DSP_BLOCK_SIZE);
snd_sbdsp_command(chip, count & 0xff);
snd_sbdsp_command(chip, count >> 8);
}
spin_unlock_irqrestore(&chip->reg_lock, flags);
snd_dma_program(dma, runtime->dma_addr,
size, DMA_MODE_WRITE | DMA_AUTOINIT);
return 0;
}
static int snd_sb8_playback_trigger(struct snd_pcm_substream *substream,
int cmd)
{
unsigned long flags;
struct snd_sb *chip = snd_pcm_substream_chip(substream);
unsigned int count;
spin_lock_irqsave(&chip->reg_lock, flags);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
snd_sbdsp_command(chip, chip->playback_format);
if (chip->playback_format == SB_DSP_OUTPUT) {
count = chip->p_period_size - 1;
snd_sbdsp_command(chip, count & 0xff);
snd_sbdsp_command(chip, count >> 8);
}
break;
case SNDRV_PCM_TRIGGER_STOP:
if (chip->playback_format == SB_DSP_HI_OUTPUT_AUTO) {
struct snd_pcm_runtime *runtime = substream->runtime;
snd_sbdsp_reset(chip);
if (runtime->channels > 1) {
spin_lock(&chip->mixer_lock);
/* restore output filter and set hardware to mono mode */
snd_sbmixer_write(chip, SB_DSP_STEREO_SW, chip->force_mode16 & ~0x02);
spin_unlock(&chip->mixer_lock);
}
} else {
snd_sbdsp_command(chip, SB_DSP_DMA8_OFF);
}
snd_sbdsp_command(chip, SB_DSP_SPEAKER_OFF);
}
spin_unlock_irqrestore(&chip->reg_lock, flags);
return 0;
}
static int snd_sb8_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
}
static int snd_sb8_hw_free(struct snd_pcm_substream *substream)
{
snd_pcm_lib_free_pages(substream);
return 0;
}
static int snd_sb8_capture_prepare(struct snd_pcm_substream *substream)
{
unsigned long flags;
struct snd_sb *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int mixreg, rate, size, count;
unsigned char format;
unsigned char stereo = runtime->channels > 1;
int dma;
rate = runtime->rate;
switch (chip->hardware) {
case SB_HW_JAZZ16:
if (runtime->format == SNDRV_PCM_FORMAT_S16_LE) {
if (chip->mode & SB_MODE_PLAYBACK_16)
return -EBUSY;
else
chip->mode |= SB_MODE_CAPTURE_16;
}
chip->capture_format = SB_DSP_LO_INPUT_AUTO;
break;
case SB_HW_PRO:
if (runtime->channels > 1) {
if (snd_BUG_ON(rate != SB8_RATE(11025) &&
rate != SB8_RATE(22050)))
return -EINVAL;
chip->capture_format = SB_DSP_HI_INPUT_AUTO;
break;
}
chip->capture_format = (rate > 23000) ? SB_DSP_HI_INPUT_AUTO : SB_DSP_LO_INPUT_AUTO;
break;
case SB_HW_201:
if (rate > 13000) {
chip->capture_format = SB_DSP_HI_INPUT_AUTO;
break;
}
/* fallthru */
case SB_HW_20:
chip->capture_format = SB_DSP_LO_INPUT_AUTO;
break;
case SB_HW_10:
chip->capture_format = SB_DSP_INPUT;
break;
default:
return -EINVAL;
}
if (chip->mode & SB_MODE_CAPTURE_16) {
format = stereo ? SB_DSP_STEREO_16BIT : SB_DSP_MONO_16BIT;
dma = chip->dma16;
} else {
format = stereo ? SB_DSP_STEREO_8BIT : SB_DSP_MONO_8BIT;
chip->mode |= SB_MODE_CAPTURE_8;
dma = chip->dma8;
}
size = chip->c_dma_size = snd_pcm_lib_buffer_bytes(substream);
count = chip->c_period_size = snd_pcm_lib_period_bytes(substream);
spin_lock_irqsave(&chip->reg_lock, flags);
snd_sbdsp_command(chip, SB_DSP_SPEAKER_OFF);
if (chip->hardware == SB_HW_JAZZ16)
snd_sbdsp_command(chip, format);
else if (stereo)
snd_sbdsp_command(chip, SB_DSP_STEREO_8BIT);
snd_sbdsp_command(chip, SB_DSP_SAMPLE_RATE);
if (stereo) {
snd_sbdsp_command(chip, 256 - runtime->rate_den / 2);
spin_lock(&chip->mixer_lock);
/* save input filter status and turn it off */
mixreg = snd_sbmixer_read(chip, SB_DSP_CAPTURE_FILT);
snd_sbmixer_write(chip, SB_DSP_CAPTURE_FILT, mixreg | 0x20);
spin_unlock(&chip->mixer_lock);
/* just use force_mode16 for temporary storate... */
chip->force_mode16 = mixreg;
} else {
snd_sbdsp_command(chip, 256 - runtime->rate_den);
}
if (chip->capture_format != SB_DSP_INPUT) {
if (chip->mode & SB_MODE_PLAYBACK_16)
count /= 2;
count--;
snd_sbdsp_command(chip, SB_DSP_BLOCK_SIZE);
snd_sbdsp_command(chip, count & 0xff);
snd_sbdsp_command(chip, count >> 8);
}
spin_unlock_irqrestore(&chip->reg_lock, flags);
snd_dma_program(dma, runtime->dma_addr,
size, DMA_MODE_READ | DMA_AUTOINIT);
return 0;
}
static int snd_sb8_capture_trigger(struct snd_pcm_substream *substream,
int cmd)
{
unsigned long flags;
struct snd_sb *chip = snd_pcm_substream_chip(substream);
unsigned int count;
spin_lock_irqsave(&chip->reg_lock, flags);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
snd_sbdsp_command(chip, chip->capture_format);
if (chip->capture_format == SB_DSP_INPUT) {
count = chip->c_period_size - 1;
snd_sbdsp_command(chip, count & 0xff);
snd_sbdsp_command(chip, count >> 8);
}
break;
case SNDRV_PCM_TRIGGER_STOP:
if (chip->capture_format == SB_DSP_HI_INPUT_AUTO) {
struct snd_pcm_runtime *runtime = substream->runtime;
snd_sbdsp_reset(chip);
if (runtime->channels > 1) {
/* restore input filter status */
spin_lock(&chip->mixer_lock);
snd_sbmixer_write(chip, SB_DSP_CAPTURE_FILT, chip->force_mode16);
spin_unlock(&chip->mixer_lock);
/* set hardware to mono mode */
snd_sbdsp_command(chip, SB_DSP_MONO_8BIT);
}
} else {
snd_sbdsp_command(chip, SB_DSP_DMA8_OFF);
}
snd_sbdsp_command(chip, SB_DSP_SPEAKER_OFF);
}
spin_unlock_irqrestore(&chip->reg_lock, flags);
return 0;
}
irqreturn_t snd_sb8dsp_interrupt(struct snd_sb *chip)
{
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
snd_sb_ack_8bit(chip);
switch (chip->mode) {
case SB_MODE_PLAYBACK_16: /* ok.. playback is active */
if (chip->hardware != SB_HW_JAZZ16)
break;
/* fallthru */
case SB_MODE_PLAYBACK_8:
substream = chip->playback_substream;
runtime = substream->runtime;
if (chip->playback_format == SB_DSP_OUTPUT)
snd_sb8_playback_trigger(substream, SNDRV_PCM_TRIGGER_START);
snd_pcm_period_elapsed(substream);
break;
case SB_MODE_CAPTURE_16:
if (chip->hardware != SB_HW_JAZZ16)
break;
/* fallthru */
case SB_MODE_CAPTURE_8:
substream = chip->capture_substream;
runtime = substream->runtime;
if (chip->capture_format == SB_DSP_INPUT)
snd_sb8_capture_trigger(substream, SNDRV_PCM_TRIGGER_START);
snd_pcm_period_elapsed(substream);
break;
}
return IRQ_HANDLED;
}
static snd_pcm_uframes_t snd_sb8_playback_pointer(struct snd_pcm_substream *substream)
{
struct snd_sb *chip = snd_pcm_substream_chip(substream);
size_t ptr;
int dma;
if (chip->mode & SB_MODE_PLAYBACK_8)
dma = chip->dma8;
else if (chip->mode & SB_MODE_PLAYBACK_16)
dma = chip->dma16;
else
return 0;
ptr = snd_dma_pointer(dma, chip->p_dma_size);
return bytes_to_frames(substream->runtime, ptr);
}
static snd_pcm_uframes_t snd_sb8_capture_pointer(struct snd_pcm_substream *substream)
{
struct snd_sb *chip = snd_pcm_substream_chip(substream);
size_t ptr;
int dma;
if (chip->mode & SB_MODE_CAPTURE_8)
dma = chip->dma8;
else if (chip->mode & SB_MODE_CAPTURE_16)
dma = chip->dma16;
else
return 0;
ptr = snd_dma_pointer(dma, chip->c_dma_size);
return bytes_to_frames(substream->runtime, ptr);
}
/*
*/
static struct snd_pcm_hardware snd_sb8_playback =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP_VALID),
.formats = SNDRV_PCM_FMTBIT_U8,
.rates = (SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000 |
SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_22050),
.rate_min = 4000,
.rate_max = 23000,
.channels_min = 1,
.channels_max = 1,
.buffer_bytes_max = 65536,
.period_bytes_min = 64,
.period_bytes_max = 65536,
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
static struct snd_pcm_hardware snd_sb8_capture =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP_VALID),
.formats = SNDRV_PCM_FMTBIT_U8,
.rates = (SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000 |
SNDRV_PCM_RATE_11025),
.rate_min = 4000,
.rate_max = 13000,
.channels_min = 1,
.channels_max = 1,
.buffer_bytes_max = 65536,
.period_bytes_min = 64,
.period_bytes_max = 65536,
.periods_min = 1,
.periods_max = 1024,
.fifo_size = 0,
};
/*
*
*/
static int snd_sb8_open(struct snd_pcm_substream *substream)
{
struct snd_sb *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned long flags;
spin_lock_irqsave(&chip->open_lock, flags);
if (chip->open) {
spin_unlock_irqrestore(&chip->open_lock, flags);
return -EAGAIN;
}
chip->open |= SB_OPEN_PCM;
spin_unlock_irqrestore(&chip->open_lock, flags);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
chip->playback_substream = substream;
runtime->hw = snd_sb8_playback;
} else {
chip->capture_substream = substream;
runtime->hw = snd_sb8_capture;
}
switch (chip->hardware) {
case SB_HW_JAZZ16:
if (chip->dma16 == 5 || chip->dma16 == 7)
runtime->hw.formats |= SNDRV_PCM_FMTBIT_S16_LE;
runtime->hw.rates |= SNDRV_PCM_RATE_8000_48000;
runtime->hw.rate_min = 4000;
runtime->hw.rate_max = 50000;
runtime->hw.channels_max = 2;
break;
case SB_HW_PRO:
runtime->hw.rate_max = 44100;
runtime->hw.channels_max = 2;
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
snd_sb8_hw_constraint_rate_channels, NULL,
SNDRV_PCM_HW_PARAM_CHANNELS,
SNDRV_PCM_HW_PARAM_RATE, -1);
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
snd_sb8_hw_constraint_channels_rate, NULL,
SNDRV_PCM_HW_PARAM_RATE, -1);
break;
case SB_HW_201:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
runtime->hw.rate_max = 44100;
} else {
runtime->hw.rate_max = 15000;
}
default:
break;
}
snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&hw_constraints_clock);
if (chip->dma8 > 3 || chip->dma16 >= 0) {
snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 2);
snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 2);
runtime->hw.buffer_bytes_max = 128 * 1024 * 1024;
runtime->hw.period_bytes_max = 128 * 1024 * 1024;
}
return 0;
}
static int snd_sb8_close(struct snd_pcm_substream *substream)
{
unsigned long flags;
struct snd_sb *chip = snd_pcm_substream_chip(substream);
chip->playback_substream = NULL;
chip->capture_substream = NULL;
spin_lock_irqsave(&chip->open_lock, flags);
chip->open &= ~SB_OPEN_PCM;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
chip->mode &= ~SB_MODE_PLAYBACK;
else
chip->mode &= ~SB_MODE_CAPTURE;
spin_unlock_irqrestore(&chip->open_lock, flags);
return 0;
}
/*
* Initialization part
*/
static struct snd_pcm_ops snd_sb8_playback_ops = {
.open = snd_sb8_open,
.close = snd_sb8_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_sb8_hw_params,
.hw_free = snd_sb8_hw_free,
.prepare = snd_sb8_playback_prepare,
.trigger = snd_sb8_playback_trigger,
.pointer = snd_sb8_playback_pointer,
};
static struct snd_pcm_ops snd_sb8_capture_ops = {
.open = snd_sb8_open,
.close = snd_sb8_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_sb8_hw_params,
.hw_free = snd_sb8_hw_free,
.prepare = snd_sb8_capture_prepare,
.trigger = snd_sb8_capture_trigger,
.pointer = snd_sb8_capture_pointer,
};
int snd_sb8dsp_pcm(struct snd_sb *chip, int device, struct snd_pcm ** rpcm)
{
struct snd_card *card = chip->card;
struct snd_pcm *pcm;
int err;
size_t max_prealloc = 64 * 1024;
if (rpcm)
*rpcm = NULL;
if ((err = snd_pcm_new(card, "SB8 DSP", device, 1, 1, &pcm)) < 0)
return err;
sprintf(pcm->name, "DSP v%i.%i", chip->version >> 8, chip->version & 0xff);
pcm->info_flags = SNDRV_PCM_INFO_HALF_DUPLEX;
pcm->private_data = chip;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_sb8_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_sb8_capture_ops);
if (chip->dma8 > 3 || chip->dma16 >= 0)
max_prealloc = 128 * 1024;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_isa_data(),
64*1024, max_prealloc);
if (rpcm)
*rpcm = pcm;
return 0;
}
EXPORT_SYMBOL(snd_sb8dsp_pcm);
EXPORT_SYMBOL(snd_sb8dsp_interrupt);
/* sb8_midi.c */
EXPORT_SYMBOL(snd_sb8dsp_midi_interrupt);
EXPORT_SYMBOL(snd_sb8dsp_midi);
/*
* INIT part
*/
static int __init alsa_sb8_init(void)
{
return 0;
}
static void __exit alsa_sb8_exit(void)
{
}
module_init(alsa_sb8_init)
module_exit(alsa_sb8_exit)
| gpl-2.0 |
threader/Huawei_S7_kernel_2.6.35 | drivers/usb/host/pehci/hal/hal_msm.c | 4544 | 19723 | /*
* Copyright (C) ST-Ericsson AP Pte Ltd 2010
*
* ISP1763 Linux HCD Controller driver : hal
*
* This program is free software; you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by the Free Software Foundation; version
* 2 of the License.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* This is the main hardware abstraction layer file. Hardware initialization, interupt
* processing and read/write routines are handled here.
*
* Author : wired support <wired.support@stericsson.com>
*
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/gpio.h>
#include <mach/board.h>
#include <linux/poll.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/unaligned.h>
/*--------------------------------------------------------------*
* linux system include files
*--------------------------------------------------------------*/
#include "hal_msm.h"
#include "../hal/hal_intf.h"
#include "../hal/isp1763.h"
/*--------------------------------------------------------------*
* Local variable Definitions
*--------------------------------------------------------------*/
struct isp1763_dev isp1763_loc_dev[ISP1763_LAST_DEV];
/*--------------------------------------------------------------*
* Local # Definitions
*--------------------------------------------------------------*/
#define PCI_ACCESS_RETRY_COUNT 20
#define ISP1763_DRIVER_NAME "isp1763_usb"
/*--------------------------------------------------------------*
* Local Function
*--------------------------------------------------------------*/
static int __devexit isp1763_remove(struct platform_device *pdev);
static int __devinit isp1763_probe(struct platform_device *pdev);
/*--------------------------------------------------------------*
* Platform Driver Interface Functions
*--------------------------------------------------------------*/
static struct platform_driver isp1763_usb_driver = {
.remove = __exit_p(isp1763_remove),
.driver = {
.name = ISP1763_DRIVER_NAME,
.owner = THIS_MODULE,
},
};
/*--------------------------------------------------------------*
* ISP1763 Read write routine
*--------------------------------------------------------------*/
/*
* EBI2 on 8660 ignores the first bit and shifts the address by
* one bit to the right.
* Hence, shift left all the register addresses before accessing
* them over EBI2.
* This logic applies only for the register read/writes, for
* read/write from ISP memory this conversion is not needed
* as the ISP obtains the memory address from 'memory' register
*/
/* Write a 32 bit Register of isp1763 */
void
isp1763_reg_write32(struct isp1763_dev *dev, u16 reg, u32 data)
{
/* Write the 32bit to the register address given to us */
reg <<= 1;
#ifdef DATABUS_WIDTH_16
writew((u16) data, dev->baseaddress + ((reg)));
writew((u16) (data >> 16), dev->baseaddress + (((reg + 4))));
#else
writeb((u8) data, dev->baseaddress + (reg));
writeb((u8) (data >> 8), dev->baseaddress + ((reg + 1)));
writeb((u8) (data >> 16), dev->baseaddress + ((reg + 2)));
writeb((u8) (data >> 24), dev->baseaddress + ((reg + 3)));
#endif
}
EXPORT_SYMBOL(isp1763_reg_write32);
/* Read a 32 bit Register of isp1763 */
u32
isp1763_reg_read32(struct isp1763_dev *dev, u16 reg, u32 data)
{
#ifdef DATABUS_WIDTH_16
u16 wvalue1, wvalue2;
#else
u8 bval1, bval2, bval3, bval4;
#endif
data = 0;
reg <<= 1;
#ifdef DATABUS_WIDTH_16
wvalue1 = readw(dev->baseaddress + ((reg)));
wvalue2 = readw(dev->baseaddress + (((reg + 4))));
data |= wvalue2;
data <<= 16;
data |= wvalue1;
#else
bval1 = readb(dev->baseaddress + (reg));
bval2 = readb(dev->baseaddress + (reg + 1));
bval3 = readb(dev->baseaddress + (reg + 2));
bval4 = readb(dev->baseaddress + (reg + 3));
data = 0;
data |= bval4;
data <<= 8;
data |= bval3;
data <<= 8;
data |= bval2;
data <<= 8;
data |= bval1;
#endif
return data;
}
EXPORT_SYMBOL(isp1763_reg_read32);
/* Read a 16 bit Register of isp1763 */
u16
isp1763_reg_read16(struct isp1763_dev * dev, u16 reg, u16 data)
{
reg <<= 1;
#ifdef DATABUS_WIDTH_16
data = readw(dev->baseaddress + ((reg)));
#else
u8 bval1, bval2;
bval1 = readb(dev->baseaddress + (reg));
if (reg == HC_DATA_REG){
bval2 = readb(dev->baseaddress + (reg));
} else {
bval2 = readb(dev->baseaddress + ((reg + 1)));
}
data = 0;
data |= bval2;
data <<= 8;
data |= bval1;
#endif
return data;
}
EXPORT_SYMBOL(isp1763_reg_read16);
/* Write a 16 bit Register of isp1763 */
void
isp1763_reg_write16(struct isp1763_dev *dev, u16 reg, u16 data)
{
reg <<= 1;
#ifdef DATABUS_WIDTH_16
writew(data, dev->baseaddress + ((reg)));
#else
writeb((u8) data, dev->baseaddress + (reg));
if (reg == HC_DATA_REG){
writeb((u8) (data >> 8), dev->baseaddress + (reg));
}else{
writeb((u8) (data >> 8), dev->baseaddress + ((reg + 1)));
}
#endif
}
EXPORT_SYMBOL(isp1763_reg_write16);
/* Read a 8 bit Register of isp1763 */
u8
isp1763_reg_read8(struct isp1763_dev *dev, u16 reg, u8 data)
{
reg <<= 1;
data = readb((dev->baseaddress + (reg)));
return data;
}
EXPORT_SYMBOL(isp1763_reg_read8);
/* Write a 8 bit Register of isp1763 */
void
isp1763_reg_write8(struct isp1763_dev *dev, u16 reg, u8 data)
{
reg <<= 1;
writeb(data, (dev->baseaddress + (reg)));
}
EXPORT_SYMBOL(isp1763_reg_write8);
/*--------------------------------------------------------------*
*
* Module dtatils: isp1763_mem_read
*
* Memory read using PIO method.
*
* Input: struct isp1763_driver *drv --> Driver structure.
* u32 start_add --> Starting address of memory
* u32 end_add ---> End address
*
* u32 * buffer --> Buffer pointer.
* u32 length ---> Length
* u16 dir ---> Direction ( Inc or Dec)
*
* Output int Length ----> Number of bytes read
*
* Called by: system function
*
*
*--------------------------------------------------------------*/
/* Memory read function PIO */
int
isp1763_mem_read(struct isp1763_dev *dev, u32 start_add,
u32 end_add, u32 * buffer, u32 length, u16 dir)
{
u8 *one = (u8 *) buffer;
u16 *two = (u16 *) buffer;
u32 a = (u32) length;
u32 w;
u32 w2;
if (buffer == 0) {
printk("Buffer address zero\n");
return 0;
}
isp1763_reg_write16(dev, HC_MEM_READ_REG, start_add);
/* This delay requirement comes from the ISP1763A programming guide */
ndelay(100);
last:
w = isp1763_reg_read16(dev, HC_DATA_REG, w);
w2 = isp1763_reg_read16(dev, HC_DATA_REG, w);
w2 <<= 16;
w = w | w2;
if (a == 1) {
*one = (u8) w;
return 0;
}
if (a == 2) {
*two = (u16) w;
return 0;
}
if (a == 3) {
*two = (u16) w;
two += 1;
w >>= 16;
*two = (u8) (w);
return 0;
}
while (a > 0) {
*buffer = w;
a -= 4;
if (a <= 0) {
break;
}
if (a < 4) {
buffer += 1;
one = (u8 *) buffer;
two = (u16 *) buffer;
goto last;
}
buffer += 1;
w = isp1763_reg_read16(dev, HC_DATA_REG, w);
w2 = isp1763_reg_read16(dev, HC_DATA_REG, w);
w2 <<= 16;
w = w | w2;
}
return ((a < 0) || (a == 0)) ? 0 : (-1);
}
EXPORT_SYMBOL(isp1763_mem_read);
/*--------------------------------------------------------------*
*
* Module dtatils: isp1763_mem_write
*
* Memory write using PIO method.
*
* Input: struct isp1763_driver *drv --> Driver structure.
* u32 start_add --> Starting address of memory
* u32 end_add ---> End address
*
* u32 * buffer --> Buffer pointer.
* u32 length ---> Length
* u16 dir ---> Direction ( Inc or Dec)
*
* Output int Length ----> Number of bytes read
*
* Called by: system function
*
*
*--------------------------------------------------------------*/
/* Memory read function IO */
int
isp1763_mem_write(struct isp1763_dev *dev,
u32 start_add, u32 end_add, u32 * buffer, u32 length, u16 dir)
{
int a = length;
u8 one = (u8) (*buffer);
u16 two = (u16) (*buffer);
isp1763_reg_write16(dev, HC_MEM_READ_REG, start_add);
/* This delay requirement comes from the ISP1763A programming guide */
ndelay(100);
if (a == 1) {
isp1763_reg_write16(dev, HC_DATA_REG, one);
return 0;
}
if (a == 2) {
isp1763_reg_write16(dev, HC_DATA_REG, two);
return 0;
}
while (a > 0) {
isp1763_reg_write16(dev, HC_DATA_REG, (u16) (*buffer));
if (a >= 3)
isp1763_reg_write16(dev, HC_DATA_REG,
(u16) ((*buffer) >> 16));
start_add += 4;
a -= 4;
if (a <= 0)
break;
buffer += 1;
}
return ((a < 0) || (a == 0)) ? 0 : (-1);
}
EXPORT_SYMBOL(isp1763_mem_write);
/*--------------------------------------------------------------*
*
* Module dtatils: isp1763_register_driver
*
* This function is used by top driver (OTG, HCD, DCD) to register
* their communication functions (probe, remove, suspend, resume) using
* the drv data structure.
* This function will call the probe function of the driver if the ISP1763
* corresponding to the driver is enabled
*
* Input: struct isp1763_driver *drv --> Driver structure.
* Output result
* 0= complete
* 1= error.
*
* Called by: system function module_init
*
*
*--------------------------------------------------------------*/
int
isp1763_register_driver(struct isp1763_driver *drv)
{
struct isp1763_dev *dev;
int result = -EINVAL;
hal_entry("%s: Entered\n", __FUNCTION__);
info("isp1763_register_driver(drv=%p)\n", drv);
if (!drv) {
return -EINVAL;
}
dev = &isp1763_loc_dev[drv->index];
if (!dev->baseaddress)
return -EINVAL;
dev->active = 1; /* set the driver as active*/
if (drv->probe) {
result = drv->probe(dev, drv->id);
} else {
printk("%s no probe function for indes %d \n", __FUNCTION__,
(int)drv->index);
}
if (result >= 0) {
pr_debug(KERN_INFO __FILE__ ": Registered Driver %s\n",
drv->name);
dev->driver = drv;
}
hal_entry("%s: Exit\n", __FUNCTION__);
return result;
} /* End of isp1763_register_driver */
EXPORT_SYMBOL(isp1763_register_driver);
/*--------------------------------------------------------------*
*
* Module dtatils: isp1763_unregister_driver
*
* This function is used by top driver (OTG, HCD, DCD) to de-register
* their communication functions (probe, remove, suspend, resume) using
* the drv data structure.
* This function will check whether the driver is registered or not and
* call the remove function of the driver if registered
*
* Input: struct isp1763_driver *drv --> Driver structure.
* Output result
* 0= complete
* 1= error.
*
* Called by: system function module_init
*
*
*--------------------------------------------------------------*/
void
isp1763_unregister_driver(struct isp1763_driver *drv)
{
struct isp1763_dev *dev;
hal_entry("%s: Entered\n", __FUNCTION__);
info("isp1763_unregister_driver(drv=%p)\n", drv);
dev = &isp1763_loc_dev[drv->index];
if (dev->driver == drv) {
/* driver registered is same as the requestig driver */
drv->remove(dev);
dev->driver = NULL;
info(": De-registered Driver %s\n", drv->name);
return;
}
hal_entry("%s: Exit\n", __FUNCTION__);
} /* End of isp1763_unregister_driver */
EXPORT_SYMBOL(isp1763_unregister_driver);
/*--------------------------------------------------------------*
* ISP1763 Platform driver interface routine.
*--------------------------------------------------------------*/
/*--------------------------------------------------------------*
*
* Module dtatils: isp1763_module_init
*
* This is the module initialization function. It registers to
* driver for a isp1763 platform device. And also resets the
* internal data structures.
*
* Input: void
* Output result
* 0= complete
* 1= error.
*
* Called by: system function module_init
*
*
*
-------------------------------------------------------------------*/
static int __init
isp1763_module_init(void)
{
int result = 0;
hal_entry("%s: Entered\n", __FUNCTION__);
pr_debug(KERN_NOTICE "+isp1763_module_init\n");
memset(isp1763_loc_dev, 0, sizeof(isp1763_loc_dev));
result = platform_driver_probe(&isp1763_usb_driver, isp1763_probe);
pr_debug(KERN_NOTICE "-isp1763_module_init\n");
hal_entry("%s: Exit\n", __FUNCTION__);
return result;
}
/*--------------------------------------------------------------*
*
* Module dtatils: isp1763_module_cleanup
*
* This is the module cleanup function. It de-registers the
* Platform driver and resets the internal data structures.
*
* Input: void
* Output void
*
* Called by: system function module_cleanup
*
*
*
--------------------------------------------------------------*/
static void __exit
isp1763_module_cleanup(void)
{
pr_debug("Hal Module Cleanup\n");
platform_driver_unregister(&isp1763_usb_driver);
memset(isp1763_loc_dev, 0, sizeof(isp1763_loc_dev));
}
void dummy_mem_read(struct isp1763_dev *dev)
{
u32 w = 0;
isp1763_reg_write16(dev, HC_MEM_READ_REG, 0x0400);
w = isp1763_reg_read16(dev, HC_DATA_REG, w);
pr_debug("dummy_read DONE: %x\n", w);
msleep(10);
}
/*--------------------------------------------------------------*
*
* Module dtatils: isp1763_probe
*
* probe function of ISP1763
* This function is called from module_init if the corresponding platform
* device is present. This function initializes the information
* for the Host Controller with the assigned resources and tests the register
* access to the controller and do a software reset and makes it ready
* for the driver to play with. It also calls setup_gpio passed from pdata
* to setup GPIOs (e.g. used for IRQ and RST lines).
*
* Input:
* struct platform_device *dev ----> Platform Device structure
* Output void
*
* Called by: system function module_cleanup
*
*
*
--------------------------------------------------------------**/
static int __devinit
isp1763_probe(struct platform_device *pdev)
{
u32 reg_data = 0;
struct isp1763_dev *loc_dev;
int status = 1;
u32 hwmodectrl = 0;
u16 us_reset_hc = 0;
u32 chipid = 0;
struct isp1763_platform_data *pdata = pdev->dev.platform_data;
hal_entry("%s: Entered\n", __FUNCTION__);
hal_init(("isp1763_probe(dev=%p)\n", dev));
loc_dev = &(isp1763_loc_dev[ISP1763_HC]);
loc_dev->dev = pdev;
/* Get the Host Controller IO and INT resources */
loc_dev->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!loc_dev->mem_res) {
pr_err("%s: failed to get platform resource mem\n", __func__);
return -ENODEV;
}
loc_dev->baseaddress = ioremap_nocache(loc_dev->mem_res->start,
resource_size(loc_dev->mem_res));
if (!loc_dev->baseaddress) {
pr_err("%s: ioremap failed\n", __func__);
status = -ENOMEM;
goto put_mem_res;
}
pr_info("%s: ioremap done at: %x\n", __func__,
(int)loc_dev->baseaddress);
loc_dev->irq = platform_get_irq(pdev, 0);
if (!loc_dev->irq) {
pr_err("%s: platform_get_irq failed\n", __func__);
status = -ENODEV;
goto free_regs;
}
loc_dev->index = ISP1763_HC; /*zero */
loc_dev->length = resource_size(loc_dev->mem_res);
hal_init(("isp1763 HC MEM Base= %p irq = %d\n",
loc_dev->baseaddress, loc_dev->irq));
/* Setup GPIOs and isssue RESET_N to Controller */
if (pdata->setup_gpio)
if (pdata->setup_gpio(1))
pr_err("%s: Failed to setup GPIOs for isp1763\n",
__func__);
if (pdata->reset_gpio) {
gpio_set_value(pdata->reset_gpio, 0);
msleep(10);
gpio_set_value(pdata->reset_gpio, 1);
} else {
pr_err("%s: Failed to issue RESET_N to isp1763\n", __func__);
}
dummy_mem_read(loc_dev);
chipid = isp1763_reg_read32(loc_dev, DC_CHIPID, chipid);
pr_info("START: chip id:%x\n", chipid);
/*reset the host controller */
pr_debug("RESETTING\n");
us_reset_hc |= 0x1;
isp1763_reg_write16(loc_dev, 0xB8, us_reset_hc);
msleep(20);
us_reset_hc = 0;
us_reset_hc |= 0x2;
isp1763_reg_write16(loc_dev, 0xB8, us_reset_hc);
chipid = isp1763_reg_read32(loc_dev, DC_CHIPID, chipid);
pr_info("after HC reset, chipid:%x\n", chipid);
msleep(20);
hwmodectrl = isp1763_reg_read16(loc_dev, HC_HWMODECTRL_REG, hwmodectrl);
pr_debug("Mode Ctrl Value b4 setting buswidth: %x\n", hwmodectrl);
#ifdef DATABUS_WIDTH_16
hwmodectrl &= 0xFFEF; /*enable the 16 bit bus */
#else
pr_debug("Setting 8-BIT mode\n");
hwmodectrl |= 0x0010; /*enable the 8 bit bus */
#endif
isp1763_reg_write16(loc_dev, HC_HWMODECTRL_REG, hwmodectrl);
pr_debug("writing 0x%x to hw mode reg\n", hwmodectrl);
hwmodectrl = isp1763_reg_read16(loc_dev, HC_HWMODECTRL_REG, hwmodectrl);
msleep(100);
pr_debug("Mode Ctrl Value after setting buswidth: %x\n", hwmodectrl);
chipid = isp1763_reg_read32(loc_dev, DC_CHIPID, chipid);
pr_debug("after setting HW MODE to 8bit, chipid:%x\n", chipid);
hal_init(("isp1763 DC MEM Base= %lx irq = %d\n",
loc_dev->io_base, loc_dev->irq));
reg_data = isp1763_reg_read16(loc_dev, HC_SCRATCH_REG, reg_data);
pr_debug("Scratch register is 0x%x\n", reg_data);
reg_data = 0xABCD;
isp1763_reg_write16(loc_dev, HC_SCRATCH_REG, reg_data);
reg_data = isp1763_reg_read16(loc_dev, HC_SCRATCH_REG, reg_data);
pr_debug("After write, Scratch register is 0x%x\n", reg_data);
if (reg_data != 0xABCD) {
pr_err("%s: Scratch register write mismatch!!\n", __func__);
status = -ENODEV;
goto free_gpios;
}
memcpy(loc_dev->name, ISP1763_DRIVER_NAME, sizeof(ISP1763_DRIVER_NAME));
loc_dev->name[sizeof(ISP1763_DRIVER_NAME)] = 0;
pr_debug(KERN_NOTICE "-isp1763_pci_probe\n");
hal_entry("%s: Exit\n", __FUNCTION__);
return 0;
free_gpios:
if (pdata->setup_gpio)
pdata->setup_gpio(0);
free_regs:
iounmap(loc_dev->baseaddress);
put_mem_res:
loc_dev->baseaddress = NULL;
hal_entry("%s: Exit\n", __FUNCTION__);
return status;
} /* End of isp1763_probe */
/*--------------------------------------------------------------*
*
* Module details: isp1763_remove
*
* cleanup function of ISP1763
* This functions de-initializes the local variables, frees GPIOs
* and releases memory resource.
*
* Input:
* struct platform_device *dev ----> Platform Device structure
*
* Output void
*
* Called by: system function module_cleanup
*
*
*
--------------------------------------------------------------*/
static int __devexit
isp1763_remove(struct platform_device *pdev)
{
struct isp1763_dev *loc_dev;
struct isp1763_platform_data *pdata = pdev->dev.platform_data;
hal_init(("isp1763_pci_remove(dev=%p)\n", dev));
loc_dev = &isp1763_loc_dev[ISP1763_HC];
iounmap(loc_dev->baseaddress);
loc_dev->baseaddress = NULL;
if (pdata->setup_gpio)
return pdata->setup_gpio(0);
return 0;
} /* End of isp1763_remove */
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_init(isp1763_module_init);
module_exit(isp1763_module_cleanup);
| gpl-2.0 |
davidmueller13/Audax_Kernel | drivers/gpu/drm/nouveau/nouveau_fence.c | 4800 | 14614 | /*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
#include "nouveau_dma.h"
#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
struct nouveau_fence {
struct nouveau_channel *channel;
struct kref refcount;
struct list_head entry;
uint32_t sequence;
bool signalled;
void (*work)(void *priv, bool signalled);
void *priv;
};
struct nouveau_semaphore {
struct kref ref;
struct drm_device *dev;
struct drm_mm_node *mem;
};
static inline struct nouveau_fence *
nouveau_fence(void *sync_obj)
{
return (struct nouveau_fence *)sync_obj;
}
static void
nouveau_fence_del(struct kref *ref)
{
struct nouveau_fence *fence =
container_of(ref, struct nouveau_fence, refcount);
nouveau_channel_ref(NULL, &fence->channel);
kfree(fence);
}
void
nouveau_fence_update(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct nouveau_fence *tmp, *fence;
uint32_t sequence;
spin_lock(&chan->fence.lock);
/* Fetch the last sequence if the channel is still up and running */
if (likely(!list_empty(&chan->fence.pending))) {
if (USE_REFCNT(dev))
sequence = nvchan_rd32(chan, 0x48);
else
sequence = atomic_read(&chan->fence.last_sequence_irq);
if (chan->fence.sequence_ack == sequence)
goto out;
chan->fence.sequence_ack = sequence;
}
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
if (fence->sequence > chan->fence.sequence_ack)
break;
fence->signalled = true;
list_del(&fence->entry);
if (fence->work)
fence->work(fence->priv, true);
kref_put(&fence->refcount, nouveau_fence_del);
}
out:
spin_unlock(&chan->fence.lock);
}
int
nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
bool emit)
{
struct nouveau_fence *fence;
int ret = 0;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return -ENOMEM;
kref_init(&fence->refcount);
nouveau_channel_ref(chan, &fence->channel);
if (emit)
ret = nouveau_fence_emit(fence);
if (ret)
nouveau_fence_unref(&fence);
*pfence = fence;
return ret;
}
struct nouveau_channel *
nouveau_fence_channel(struct nouveau_fence *fence)
{
return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
}
int
nouveau_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = fence->channel;
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
nouveau_fence_update(chan);
BUG_ON(chan->fence.sequence ==
chan->fence.sequence_ack - 1);
}
fence->sequence = ++chan->fence.sequence;
kref_get(&fence->refcount);
spin_lock(&chan->fence.lock);
list_add_tail(&fence->entry, &chan->fence.pending);
spin_unlock(&chan->fence.lock);
if (USE_REFCNT(dev)) {
if (dev_priv->card_type < NV_C0)
BEGIN_RING(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
else
BEGIN_NVC0(chan, 2, 0, NV10_SUBCHAN_REF_CNT, 1);
} else {
BEGIN_RING(chan, NvSubSw, 0x0150, 1);
}
OUT_RING (chan, fence->sequence);
FIRE_RING(chan);
return 0;
}
void
nouveau_fence_work(struct nouveau_fence *fence,
void (*work)(void *priv, bool signalled),
void *priv)
{
BUG_ON(fence->work);
spin_lock(&fence->channel->fence.lock);
if (fence->signalled) {
work(priv, true);
} else {
fence->work = work;
fence->priv = priv;
}
spin_unlock(&fence->channel->fence.lock);
}
void
__nouveau_fence_unref(void **sync_obj)
{
struct nouveau_fence *fence = nouveau_fence(*sync_obj);
if (fence)
kref_put(&fence->refcount, nouveau_fence_del);
*sync_obj = NULL;
}
void *
__nouveau_fence_ref(void *sync_obj)
{
struct nouveau_fence *fence = nouveau_fence(sync_obj);
kref_get(&fence->refcount);
return sync_obj;
}
bool
__nouveau_fence_signalled(void *sync_obj, void *sync_arg)
{
struct nouveau_fence *fence = nouveau_fence(sync_obj);
struct nouveau_channel *chan = fence->channel;
if (fence->signalled)
return true;
nouveau_fence_update(chan);
return fence->signalled;
}
int
__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
{
unsigned long timeout = jiffies + (3 * DRM_HZ);
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
ktime_t t;
int ret = 0;
while (1) {
if (__nouveau_fence_signalled(sync_obj, sync_arg))
break;
if (time_after_eq(jiffies, timeout)) {
ret = -EBUSY;
break;
}
__set_current_state(intr ? TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE);
if (lazy) {
t = ktime_set(0, sleep_time);
schedule_hrtimeout(&t, HRTIMER_MODE_REL);
sleep_time *= 2;
if (sleep_time > NSEC_PER_MSEC)
sleep_time = NSEC_PER_MSEC;
}
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
__set_current_state(TASK_RUNNING);
return ret;
}
static struct nouveau_semaphore *
semaphore_alloc(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_semaphore *sema;
int size = (dev_priv->chipset < 0x84) ? 4 : 16;
int ret, i;
if (!USE_SEMA(dev))
return NULL;
sema = kmalloc(sizeof(*sema), GFP_KERNEL);
if (!sema)
goto fail;
ret = drm_mm_pre_get(&dev_priv->fence.heap);
if (ret)
goto fail;
spin_lock(&dev_priv->fence.lock);
sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
if (sema->mem)
sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
spin_unlock(&dev_priv->fence.lock);
if (!sema->mem)
goto fail;
kref_init(&sema->ref);
sema->dev = dev;
for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
return sema;
fail:
kfree(sema);
return NULL;
}
static void
semaphore_free(struct kref *ref)
{
struct nouveau_semaphore *sema =
container_of(ref, struct nouveau_semaphore, ref);
struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
spin_lock(&dev_priv->fence.lock);
drm_mm_put_block(sema->mem);
spin_unlock(&dev_priv->fence.lock);
kfree(sema);
}
static void
semaphore_work(void *priv, bool signalled)
{
struct nouveau_semaphore *sema = priv;
struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
if (unlikely(!signalled))
nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
kref_put(&sema->ref, semaphore_free);
}
static int
semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *fence = NULL;
u64 offset = chan->fence.vma.offset + sema->mem->start;
int ret;
if (dev_priv->chipset < 0x84) {
ret = RING_SPACE(chan, 4);
if (ret)
return ret;
BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 3);
OUT_RING (chan, NvSema);
OUT_RING (chan, offset);
OUT_RING (chan, 1);
} else
if (dev_priv->chipset < 0xc0) {
ret = RING_SPACE(chan, 7);
if (ret)
return ret;
BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
OUT_RING (chan, chan->vram_handle);
BEGIN_RING(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 1);
OUT_RING (chan, 1); /* ACQUIRE_EQ */
} else {
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 1);
OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
}
/* Delay semaphore destruction until its work is done */
ret = nouveau_fence_new(chan, &fence, true);
if (ret)
return ret;
kref_get(&sema->ref);
nouveau_fence_work(fence, semaphore_work, sema);
nouveau_fence_unref(&fence);
return 0;
}
static int
semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *fence = NULL;
u64 offset = chan->fence.vma.offset + sema->mem->start;
int ret;
if (dev_priv->chipset < 0x84) {
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvSema);
OUT_RING (chan, offset);
BEGIN_RING(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
OUT_RING (chan, 1);
} else
if (dev_priv->chipset < 0xc0) {
ret = RING_SPACE(chan, 7);
if (ret)
return ret;
BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
OUT_RING (chan, chan->vram_handle);
BEGIN_RING(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 1);
OUT_RING (chan, 2); /* RELEASE */
} else {
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 1);
OUT_RING (chan, 0x1002); /* RELEASE */
}
/* Delay semaphore destruction until its work is done */
ret = nouveau_fence_new(chan, &fence, true);
if (ret)
return ret;
kref_get(&sema->ref);
nouveau_fence_work(fence, semaphore_work, sema);
nouveau_fence_unref(&fence);
return 0;
}
int
nouveau_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *wchan)
{
struct nouveau_channel *chan = nouveau_fence_channel(fence);
struct drm_device *dev = wchan->dev;
struct nouveau_semaphore *sema;
int ret = 0;
if (likely(!chan || chan == wchan ||
nouveau_fence_signalled(fence)))
goto out;
sema = semaphore_alloc(dev);
if (!sema) {
/* Early card or broken userspace, fall back to
* software sync. */
ret = nouveau_fence_wait(fence, true, false);
goto out;
}
/* try to take chan's mutex, if we can't take it right away
* we have to fallback to software sync to prevent locking
* order issues
*/
if (!mutex_trylock(&chan->mutex)) {
ret = nouveau_fence_wait(fence, true, false);
goto out_unref;
}
/* Make wchan wait until it gets signalled */
ret = semaphore_acquire(wchan, sema);
if (ret)
goto out_unlock;
/* Signal the semaphore from chan */
ret = semaphore_release(chan, sema);
out_unlock:
mutex_unlock(&chan->mutex);
out_unref:
kref_put(&sema->ref, semaphore_free);
out:
if (chan)
nouveau_channel_put_unlocked(&chan);
return ret;
}
int
__nouveau_fence_flush(void *sync_obj, void *sync_arg)
{
return 0;
}
int
nouveau_fence_channel_init(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *obj = NULL;
int ret;
if (dev_priv->card_type < NV_C0) {
/* Create an NV_SW object for various sync purposes */
ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
if (ret)
return ret;
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
OUT_RING (chan, NvSw);
FIRE_RING (chan);
}
/* Setup area of memory shared between all channels for x-chan sync */
if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
mem->start << PAGE_SHIFT,
mem->size, NV_MEM_ACCESS_RW,
NV_MEM_TARGET_VRAM, &obj);
if (ret)
return ret;
ret = nouveau_ramht_insert(chan, NvSema, obj);
nouveau_gpuobj_ref(NULL, &obj);
if (ret)
return ret;
} else
if (USE_SEMA(dev)) {
/* map fence bo into channel's vm */
ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
&chan->fence.vma);
if (ret)
return ret;
}
atomic_set(&chan->fence.last_sequence_irq, 0);
return 0;
}
void
nouveau_fence_channel_fini(struct nouveau_channel *chan)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *tmp, *fence;
spin_lock(&chan->fence.lock);
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
fence->signalled = true;
list_del(&fence->entry);
if (unlikely(fence->work))
fence->work(fence->priv, false);
kref_put(&fence->refcount, nouveau_fence_del);
}
spin_unlock(&chan->fence.lock);
nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
}
int
nouveau_fence_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
int ret;
/* Create a shared VRAM heap for cross-channel sync. */
if (USE_SEMA(dev)) {
ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
0, 0, &dev_priv->fence.bo);
if (ret)
return ret;
ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
if (ret)
goto fail;
ret = nouveau_bo_map(dev_priv->fence.bo);
if (ret)
goto fail;
ret = drm_mm_init(&dev_priv->fence.heap, 0,
dev_priv->fence.bo->bo.mem.size);
if (ret)
goto fail;
spin_lock_init(&dev_priv->fence.lock);
}
return 0;
fail:
nouveau_bo_unmap(dev_priv->fence.bo);
nouveau_bo_ref(NULL, &dev_priv->fence.bo);
return ret;
}
void
nouveau_fence_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (USE_SEMA(dev)) {
drm_mm_takedown(&dev_priv->fence.heap);
nouveau_bo_unmap(dev_priv->fence.bo);
nouveau_bo_unpin(dev_priv->fence.bo);
nouveau_bo_ref(NULL, &dev_priv->fence.bo);
}
}
| gpl-2.0 |
nychitman1/android_kernel_asus_flo | drivers/net/ethernet/cadence/at91_ether.c | 4800 | 37829 | /*
* Ethernet driver for the Atmel AT91RM9200 (Thunder)
*
* Copyright (C) 2003 SAN People (Pty) Ltd
*
* Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
* Initial version by Rick Bronson 01/11/2003
*
* Intel LXT971A PHY support by Christopher Bahns & David Knickerbocker
* (Polaroid Corporation)
*
* Realtek RTL8201(B)L PHY support by Roman Avramenko <roman@imsystems.ru>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
#include <linux/ethtool.h>
#include <linux/platform_data/macb.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gfp.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/mach-types.h>
#include <mach/at91rm9200_emac.h>
#include <asm/gpio.h>
#include <mach/board.h>
#include "at91_ether.h"
#define DRV_NAME "at91_ether"
#define DRV_VERSION "1.0"
#define LINK_POLL_INTERVAL (HZ)
/* ..................................................................... */
/*
* Read from a EMAC register.
*/
static inline unsigned long at91_emac_read(unsigned int reg)
{
void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC;
return __raw_readl(emac_base + reg);
}
/*
* Write to a EMAC register.
*/
static inline void at91_emac_write(unsigned int reg, unsigned long value)
{
void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC;
__raw_writel(value, emac_base + reg);
}
/* ........................... PHY INTERFACE ........................... */
/*
* Enable the MDIO bit in MAC control register
* When not called from an interrupt-handler, access to the PHY must be
* protected by a spinlock.
*/
static void enable_mdi(void)
{
unsigned long ctl;
ctl = at91_emac_read(AT91_EMAC_CTL);
at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_MPE); /* enable management port */
}
/*
* Disable the MDIO bit in the MAC control register
*/
static void disable_mdi(void)
{
unsigned long ctl;
ctl = at91_emac_read(AT91_EMAC_CTL);
at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE); /* disable management port */
}
/*
* Wait until the PHY operation is complete.
*/
static inline void at91_phy_wait(void) {
unsigned long timeout = jiffies + 2;
while (!(at91_emac_read(AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) {
if (time_after(jiffies, timeout)) {
printk("at91_ether: MIO timeout\n");
break;
}
cpu_relax();
}
}
/*
* Write value to the a PHY register
* Note: MDI interface is assumed to already have been enabled.
*/
static void write_phy(unsigned char phy_addr, unsigned char address, unsigned int value)
{
at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W
| ((phy_addr & 0x1f) << 23) | (address << 18) | (value & AT91_EMAC_DATA));
/* Wait until IDLE bit in Network Status register is cleared */
at91_phy_wait();
}
/*
* Read value stored in a PHY register.
* Note: MDI interface is assumed to already have been enabled.
*/
static void read_phy(unsigned char phy_addr, unsigned char address, unsigned int *value)
{
at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R
| ((phy_addr & 0x1f) << 23) | (address << 18));
/* Wait until IDLE bit in Network Status register is cleared */
at91_phy_wait();
*value = at91_emac_read(AT91_EMAC_MAN) & AT91_EMAC_DATA;
}
/* ........................... PHY MANAGEMENT .......................... */
/*
* Access the PHY to determine the current link speed and mode, and update the
* MAC accordingly.
* If no link or auto-negotiation is busy, then no changes are made.
*/
static void update_linkspeed(struct net_device *dev, int silent)
{
struct at91_private *lp = netdev_priv(dev);
unsigned int bmsr, bmcr, lpa, mac_cfg;
unsigned int speed, duplex;
if (!mii_link_ok(&lp->mii)) { /* no link */
netif_carrier_off(dev);
if (!silent)
printk(KERN_INFO "%s: Link down.\n", dev->name);
return;
}
/* Link up, or auto-negotiation still in progress */
read_phy(lp->phy_address, MII_BMSR, &bmsr);
read_phy(lp->phy_address, MII_BMCR, &bmcr);
if (bmcr & BMCR_ANENABLE) { /* AutoNegotiation is enabled */
if (!(bmsr & BMSR_ANEGCOMPLETE))
return; /* Do nothing - another interrupt generated when negotiation complete */
read_phy(lp->phy_address, MII_LPA, &lpa);
if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100;
else speed = SPEED_10;
if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL;
else duplex = DUPLEX_HALF;
} else {
speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
}
/* Update the MAC */
mac_cfg = at91_emac_read(AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD);
if (speed == SPEED_100) {
if (duplex == DUPLEX_FULL) /* 100 Full Duplex */
mac_cfg |= AT91_EMAC_SPD | AT91_EMAC_FD;
else /* 100 Half Duplex */
mac_cfg |= AT91_EMAC_SPD;
} else {
if (duplex == DUPLEX_FULL) /* 10 Full Duplex */
mac_cfg |= AT91_EMAC_FD;
else {} /* 10 Half Duplex */
}
at91_emac_write(AT91_EMAC_CFG, mac_cfg);
if (!silent)
printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
netif_carrier_on(dev);
}
/*
* Handle interrupts from the PHY
*/
static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
struct at91_private *lp = netdev_priv(dev);
unsigned int phy;
/*
* This hander is triggered on both edges, but the PHY chips expect
* level-triggering. We therefore have to check if the PHY actually has
* an IRQ pending.
*/
enable_mdi();
if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
read_phy(lp->phy_address, MII_DSINTR_REG, &phy); /* ack interrupt in Davicom PHY */
if (!(phy & (1 << 0)))
goto done;
}
else if (lp->phy_type == MII_LXT971A_ID) {
read_phy(lp->phy_address, MII_ISINTS_REG, &phy); /* ack interrupt in Intel PHY */
if (!(phy & (1 << 2)))
goto done;
}
else if (lp->phy_type == MII_BCM5221_ID) {
read_phy(lp->phy_address, MII_BCMINTR_REG, &phy); /* ack interrupt in Broadcom PHY */
if (!(phy & (1 << 0)))
goto done;
}
else if (lp->phy_type == MII_KS8721_ID) {
read_phy(lp->phy_address, MII_TPISTATUS, &phy); /* ack interrupt in Micrel PHY */
if (!(phy & ((1 << 2) | 1)))
goto done;
}
else if (lp->phy_type == MII_T78Q21x3_ID) { /* ack interrupt in Teridian PHY */
read_phy(lp->phy_address, MII_T78Q21INT_REG, &phy);
if (!(phy & ((1 << 2) | 1)))
goto done;
}
else if (lp->phy_type == MII_DP83848_ID) {
read_phy(lp->phy_address, MII_DPPHYSTS_REG, &phy); /* ack interrupt in DP83848 PHY */
if (!(phy & (1 << 7)))
goto done;
}
update_linkspeed(dev, 0);
done:
disable_mdi();
return IRQ_HANDLED;
}
/*
* Initialize and enable the PHY interrupt for link-state changes
*/
static void enable_phyirq(struct net_device *dev)
{
struct at91_private *lp = netdev_priv(dev);
unsigned int dsintr, irq_number;
int status;
if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
/*
* PHY doesn't have an IRQ pin (RTL8201, DP83847, AC101L),
* or board does not have it connected.
*/
mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
return;
}
irq_number = lp->board_data.phy_irq_pin;
status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev);
if (status) {
printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status);
return;
}
spin_lock_irq(&lp->lock);
enable_mdi();
if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr);
dsintr = dsintr & ~0xf00; /* clear bits 8..11 */
write_phy(lp->phy_address, MII_DSINTR_REG, dsintr);
}
else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr);
dsintr = dsintr | 0xf2; /* set bits 1, 4..7 */
write_phy(lp->phy_address, MII_ISINTE_REG, dsintr);
}
else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
dsintr = (1 << 15) | ( 1 << 14);
write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr);
}
else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
dsintr = (1 << 10) | ( 1 << 8);
write_phy(lp->phy_address, MII_TPISTATUS, dsintr);
}
else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr);
dsintr = dsintr | 0x500; /* set bits 8, 10 */
write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr);
}
else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr);
dsintr = dsintr | 0x3c; /* set bits 2..5 */
write_phy(lp->phy_address, MII_DPMISR_REG, dsintr);
read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr);
dsintr = dsintr | 0x3; /* set bits 0,1 */
write_phy(lp->phy_address, MII_DPMICR_REG, dsintr);
}
disable_mdi();
spin_unlock_irq(&lp->lock);
}
/*
* Disable the PHY interrupt
*/
static void disable_phyirq(struct net_device *dev)
{
struct at91_private *lp = netdev_priv(dev);
unsigned int dsintr;
unsigned int irq_number;
if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
del_timer_sync(&lp->check_timer);
return;
}
spin_lock_irq(&lp->lock);
enable_mdi();
if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr);
dsintr = dsintr | 0xf00; /* set bits 8..11 */
write_phy(lp->phy_address, MII_DSINTR_REG, dsintr);
}
else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr);
dsintr = dsintr & ~0xf2; /* clear bits 1, 4..7 */
write_phy(lp->phy_address, MII_ISINTE_REG, dsintr);
}
else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
read_phy(lp->phy_address, MII_BCMINTR_REG, &dsintr);
dsintr = ~(1 << 14);
write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr);
}
else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
read_phy(lp->phy_address, MII_TPISTATUS, &dsintr);
dsintr = ~((1 << 10) | (1 << 8));
write_phy(lp->phy_address, MII_TPISTATUS, dsintr);
}
else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr);
dsintr = dsintr & ~0x500; /* clear bits 8, 10 */
write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr);
}
else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr);
dsintr = dsintr & ~0x3; /* clear bits 0, 1 */
write_phy(lp->phy_address, MII_DPMICR_REG, dsintr);
read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr);
dsintr = dsintr & ~0x3c; /* clear bits 2..5 */
write_phy(lp->phy_address, MII_DPMISR_REG, dsintr);
}
disable_mdi();
spin_unlock_irq(&lp->lock);
irq_number = lp->board_data.phy_irq_pin;
free_irq(irq_number, dev); /* Free interrupt handler */
}
/*
* Perform a software reset of the PHY.
*/
#if 0
static void reset_phy(struct net_device *dev)
{
struct at91_private *lp = netdev_priv(dev);
unsigned int bmcr;
spin_lock_irq(&lp->lock);
enable_mdi();
/* Perform PHY reset */
write_phy(lp->phy_address, MII_BMCR, BMCR_RESET);
/* Wait until PHY reset is complete */
do {
read_phy(lp->phy_address, MII_BMCR, &bmcr);
} while (!(bmcr & BMCR_RESET));
disable_mdi();
spin_unlock_irq(&lp->lock);
}
#endif
static void at91ether_check_link(unsigned long dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
struct at91_private *lp = netdev_priv(dev);
enable_mdi();
update_linkspeed(dev, 1);
disable_mdi();
mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
}
/* ......................... ADDRESS MANAGEMENT ........................ */
/*
* NOTE: Your bootloader must always set the MAC address correctly before
* booting into Linux.
*
* - It must always set the MAC address after reset, even if it doesn't
* happen to access the Ethernet while it's booting. Some versions of
* U-Boot on the AT91RM9200-DK do not do this.
*
* - Likewise it must store the addresses in the correct byte order.
* MicroMonitor (uMon) on the CSB337 does this incorrectly (and
* continues to do so, for bug-compatibility).
*/
static short __init unpack_mac_address(struct net_device *dev, unsigned int hi, unsigned int lo)
{
char addr[6];
if (machine_is_csb337()) {
addr[5] = (lo & 0xff); /* The CSB337 bootloader stores the MAC the wrong-way around */
addr[4] = (lo & 0xff00) >> 8;
addr[3] = (lo & 0xff0000) >> 16;
addr[2] = (lo & 0xff000000) >> 24;
addr[1] = (hi & 0xff);
addr[0] = (hi & 0xff00) >> 8;
}
else {
addr[0] = (lo & 0xff);
addr[1] = (lo & 0xff00) >> 8;
addr[2] = (lo & 0xff0000) >> 16;
addr[3] = (lo & 0xff000000) >> 24;
addr[4] = (hi & 0xff);
addr[5] = (hi & 0xff00) >> 8;
}
if (is_valid_ether_addr(addr)) {
memcpy(dev->dev_addr, &addr, 6);
return 1;
}
return 0;
}
/*
* Set the ethernet MAC address in dev->dev_addr
*/
static void __init get_mac_address(struct net_device *dev)
{
/* Check Specific-Address 1 */
if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA1H), at91_emac_read(AT91_EMAC_SA1L)))
return;
/* Check Specific-Address 2 */
if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA2H), at91_emac_read(AT91_EMAC_SA2L)))
return;
/* Check Specific-Address 3 */
if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA3H), at91_emac_read(AT91_EMAC_SA3L)))
return;
/* Check Specific-Address 4 */
if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA4H), at91_emac_read(AT91_EMAC_SA4L)))
return;
printk(KERN_ERR "at91_ether: Your bootloader did not configure a MAC address.\n");
}
/*
* Program the hardware MAC address from dev->dev_addr.
*/
static void update_mac_address(struct net_device *dev)
{
at91_emac_write(AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0]));
at91_emac_write(AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4]));
at91_emac_write(AT91_EMAC_SA2L, 0);
at91_emac_write(AT91_EMAC_SA2H, 0);
}
/*
* Store the new hardware address in dev->dev_addr, and update the MAC.
*/
static int set_mac_address(struct net_device *dev, void* addr)
{
struct sockaddr *address = addr;
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
update_mac_address(dev);
printk("%s: Setting MAC address to %pM\n", dev->name,
dev->dev_addr);
return 0;
}
static int inline hash_bit_value(int bitnr, __u8 *addr)
{
if (addr[bitnr / 8] & (1 << (bitnr % 8)))
return 1;
return 0;
}
/*
* The hash address register is 64 bits long and takes up two locations in the memory map.
* The least significant bits are stored in EMAC_HSL and the most significant
* bits in EMAC_HSH.
*
* The unicast hash enable and the multicast hash enable bits in the network configuration
* register enable the reception of hash matched frames. The destination address is
* reduced to a 6 bit index into the 64 bit hash register using the following hash function.
* The hash function is an exclusive or of every sixth bit of the destination address.
* hash_index[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
* hash_index[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
* hash_index[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
* hash_index[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
* hash_index[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
* hash_index[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
* da[0] represents the least significant bit of the first byte received, that is, the multicast/
* unicast indicator, and da[47] represents the most significant bit of the last byte
* received.
* If the hash index points to a bit that is set in the hash register then the frame will be
* matched according to whether the frame is multicast or unicast.
* A multicast match will be signalled if the multicast hash enable bit is set, da[0] is 1 and
* the hash index points to a bit set in the hash register.
* A unicast match will be signalled if the unicast hash enable bit is set, da[0] is 0 and the
* hash index points to a bit set in the hash register.
* To receive all multicast frames, the hash register should be set with all ones and the
* multicast hash enable bit should be set in the network configuration register.
*/
/*
* Return the hash index value for the specified address.
*/
static int hash_get_index(__u8 *addr)
{
int i, j, bitval;
int hash_index = 0;
for (j = 0; j < 6; j++) {
for (i = 0, bitval = 0; i < 8; i++)
bitval ^= hash_bit_value(i*6 + j, addr);
hash_index |= (bitval << j);
}
return hash_index;
}
/*
* Add multicast addresses to the internal multicast-hash table.
*/
static void at91ether_sethashtable(struct net_device *dev)
{
struct netdev_hw_addr *ha;
unsigned long mc_filter[2];
unsigned int bitnr;
mc_filter[0] = mc_filter[1] = 0;
netdev_for_each_mc_addr(ha, dev) {
bitnr = hash_get_index(ha->addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
at91_emac_write(AT91_EMAC_HSL, mc_filter[0]);
at91_emac_write(AT91_EMAC_HSH, mc_filter[1]);
}
/*
* Enable/Disable promiscuous and multicast modes.
*/
static void at91ether_set_multicast_list(struct net_device *dev)
{
unsigned long cfg;
cfg = at91_emac_read(AT91_EMAC_CFG);
if (dev->flags & IFF_PROMISC) /* Enable promiscuous mode */
cfg |= AT91_EMAC_CAF;
else if (dev->flags & (~IFF_PROMISC)) /* Disable promiscuous mode */
cfg &= ~AT91_EMAC_CAF;
if (dev->flags & IFF_ALLMULTI) { /* Enable all multicast mode */
at91_emac_write(AT91_EMAC_HSH, -1);
at91_emac_write(AT91_EMAC_HSL, -1);
cfg |= AT91_EMAC_MTI;
} else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
at91ether_sethashtable(dev);
cfg |= AT91_EMAC_MTI;
} else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */
at91_emac_write(AT91_EMAC_HSH, 0);
at91_emac_write(AT91_EMAC_HSL, 0);
cfg &= ~AT91_EMAC_MTI;
}
at91_emac_write(AT91_EMAC_CFG, cfg);
}
/* ......................... ETHTOOL SUPPORT ........................... */
static int mdio_read(struct net_device *dev, int phy_id, int location)
{
unsigned int value;
read_phy(phy_id, location, &value);
return value;
}
static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
{
write_phy(phy_id, location, value);
}
static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct at91_private *lp = netdev_priv(dev);
int ret;
spin_lock_irq(&lp->lock);
enable_mdi();
ret = mii_ethtool_gset(&lp->mii, cmd);
disable_mdi();
spin_unlock_irq(&lp->lock);
if (lp->phy_media == PORT_FIBRE) { /* override media type since mii.c doesn't know */
cmd->supported = SUPPORTED_FIBRE;
cmd->port = PORT_FIBRE;
}
return ret;
}
static int at91ether_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct at91_private *lp = netdev_priv(dev);
int ret;
spin_lock_irq(&lp->lock);
enable_mdi();
ret = mii_ethtool_sset(&lp->mii, cmd);
disable_mdi();
spin_unlock_irq(&lp->lock);
return ret;
}
static int at91ether_nwayreset(struct net_device *dev)
{
struct at91_private *lp = netdev_priv(dev);
int ret;
spin_lock_irq(&lp->lock);
enable_mdi();
ret = mii_nway_restart(&lp->mii);
disable_mdi();
spin_unlock_irq(&lp->lock);
return ret;
}
static void at91ether_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
}
static const struct ethtool_ops at91ether_ethtool_ops = {
.get_settings = at91ether_get_settings,
.set_settings = at91ether_set_settings,
.get_drvinfo = at91ether_get_drvinfo,
.nway_reset = at91ether_nwayreset,
.get_link = ethtool_op_get_link,
};
static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct at91_private *lp = netdev_priv(dev);
int res;
if (!netif_running(dev))
return -EINVAL;
spin_lock_irq(&lp->lock);
enable_mdi();
res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL);
disable_mdi();
spin_unlock_irq(&lp->lock);
return res;
}
/* ................................ MAC ................................ */
/*
* Initialize and start the Receiver and Transmit subsystems
*/
static void at91ether_start(struct net_device *dev)
{
struct at91_private *lp = netdev_priv(dev);
struct recv_desc_bufs *dlist, *dlist_phys;
int i;
unsigned long ctl;
dlist = lp->dlist;
dlist_phys = lp->dlist_phys;
for (i = 0; i < MAX_RX_DESCR; i++) {
dlist->descriptors[i].addr = (unsigned int) &dlist_phys->recv_buf[i][0];
dlist->descriptors[i].size = 0;
}
/* Set the Wrap bit on the last descriptor */
dlist->descriptors[i-1].addr |= EMAC_DESC_WRAP;
/* Reset buffer index */
lp->rxBuffIndex = 0;
/* Program address of descriptor list in Rx Buffer Queue register */
at91_emac_write(AT91_EMAC_RBQP, (unsigned long) dlist_phys);
/* Enable Receive and Transmit */
ctl = at91_emac_read(AT91_EMAC_CTL);
at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE);
}
/*
* Open the ethernet interface
*/
static int at91ether_open(struct net_device *dev)
{
struct at91_private *lp = netdev_priv(dev);
unsigned long ctl;
if (!is_valid_ether_addr(dev->dev_addr))
return -EADDRNOTAVAIL;
clk_enable(lp->ether_clk); /* Re-enable Peripheral clock */
/* Clear internal statistics */
ctl = at91_emac_read(AT91_EMAC_CTL);
at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_CSR);
/* Update the MAC address (incase user has changed it) */
update_mac_address(dev);
/* Enable PHY interrupt */
enable_phyirq(dev);
/* Enable MAC interrupts */
at91_emac_write(AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA
| AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
| AT91_EMAC_ROVR | AT91_EMAC_ABT);
/* Determine current link speed */
spin_lock_irq(&lp->lock);
enable_mdi();
update_linkspeed(dev, 0);
disable_mdi();
spin_unlock_irq(&lp->lock);
at91ether_start(dev);
netif_start_queue(dev);
return 0;
}
/*
* Close the interface
*/
static int at91ether_close(struct net_device *dev)
{
struct at91_private *lp = netdev_priv(dev);
unsigned long ctl;
/* Disable Receiver and Transmitter */
ctl = at91_emac_read(AT91_EMAC_CTL);
at91_emac_write(AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE));
/* Disable PHY interrupt */
disable_phyirq(dev);
/* Disable MAC interrupts */
at91_emac_write(AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA
| AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
| AT91_EMAC_ROVR | AT91_EMAC_ABT);
netif_stop_queue(dev);
clk_disable(lp->ether_clk); /* Disable Peripheral clock */
return 0;
}
/*
* Transmit packet.
*/
static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct at91_private *lp = netdev_priv(dev);
if (at91_emac_read(AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) {
netif_stop_queue(dev);
/* Store packet information (to free when Tx completed) */
lp->skb = skb;
lp->skb_length = skb->len;
lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE);
dev->stats.tx_bytes += skb->len;
/* Set address of the data in the Transmit Address register */
at91_emac_write(AT91_EMAC_TAR, lp->skb_physaddr);
/* Set length of the packet in the Transmit Control register */
at91_emac_write(AT91_EMAC_TCR, skb->len);
} else {
printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
on this skb, he also reports -ENETDOWN and printk's, so either
we free and return(0) or don't free and return 1 */
}
return NETDEV_TX_OK;
}
/*
* Update the current statistics from the internal statistics registers.
*/
static struct net_device_stats *at91ether_stats(struct net_device *dev)
{
int ale, lenerr, seqe, lcol, ecol;
if (netif_running(dev)) {
dev->stats.rx_packets += at91_emac_read(AT91_EMAC_OK); /* Good frames received */
ale = at91_emac_read(AT91_EMAC_ALE);
dev->stats.rx_frame_errors += ale; /* Alignment errors */
lenerr = at91_emac_read(AT91_EMAC_ELR) + at91_emac_read(AT91_EMAC_USF);
dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */
seqe = at91_emac_read(AT91_EMAC_SEQE);
dev->stats.rx_crc_errors += seqe; /* CRC error */
dev->stats.rx_fifo_errors += at91_emac_read(AT91_EMAC_DRFC); /* Receive buffer not available */
dev->stats.rx_errors += (ale + lenerr + seqe
+ at91_emac_read(AT91_EMAC_CDE) + at91_emac_read(AT91_EMAC_RJB));
dev->stats.tx_packets += at91_emac_read(AT91_EMAC_FRA); /* Frames successfully transmitted */
dev->stats.tx_fifo_errors += at91_emac_read(AT91_EMAC_TUE); /* Transmit FIFO underruns */
dev->stats.tx_carrier_errors += at91_emac_read(AT91_EMAC_CSE); /* Carrier Sense errors */
dev->stats.tx_heartbeat_errors += at91_emac_read(AT91_EMAC_SQEE);/* Heartbeat error */
lcol = at91_emac_read(AT91_EMAC_LCOL);
ecol = at91_emac_read(AT91_EMAC_ECOL);
dev->stats.tx_window_errors += lcol; /* Late collisions */
dev->stats.tx_aborted_errors += ecol; /* 16 collisions */
dev->stats.collisions += (at91_emac_read(AT91_EMAC_SCOL) + at91_emac_read(AT91_EMAC_MCOL) + lcol + ecol);
}
return &dev->stats;
}
/*
* Extract received frame from buffer descriptors and sent to upper layers.
* (Called from interrupt context)
*/
static void at91ether_rx(struct net_device *dev)
{
struct at91_private *lp = netdev_priv(dev);
struct recv_desc_bufs *dlist;
unsigned char *p_recv;
struct sk_buff *skb;
unsigned int pktlen;
dlist = lp->dlist;
while (dlist->descriptors[lp->rxBuffIndex].addr & EMAC_DESC_DONE) {
p_recv = dlist->recv_buf[lp->rxBuffIndex];
pktlen = dlist->descriptors[lp->rxBuffIndex].size & 0x7ff; /* Length of frame including FCS */
skb = netdev_alloc_skb(dev, pktlen + 2);
if (skb != NULL) {
skb_reserve(skb, 2);
memcpy(skb_put(skb, pktlen), p_recv, pktlen);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_bytes += pktlen;
netif_rx(skb);
}
else {
dev->stats.rx_dropped += 1;
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
}
if (dlist->descriptors[lp->rxBuffIndex].size & EMAC_MULTICAST)
dev->stats.multicast++;
dlist->descriptors[lp->rxBuffIndex].addr &= ~EMAC_DESC_DONE; /* reset ownership bit */
if (lp->rxBuffIndex == MAX_RX_DESCR-1) /* wrap after last buffer */
lp->rxBuffIndex = 0;
else
lp->rxBuffIndex++;
}
}
/*
* MAC interrupt handler
*/
static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
struct at91_private *lp = netdev_priv(dev);
unsigned long intstatus, ctl;
/* MAC Interrupt Status register indicates what interrupts are pending.
It is automatically cleared once read. */
intstatus = at91_emac_read(AT91_EMAC_ISR);
if (intstatus & AT91_EMAC_RCOM) /* Receive complete */
at91ether_rx(dev);
if (intstatus & AT91_EMAC_TCOM) { /* Transmit complete */
/* The TCOM bit is set even if the transmission failed. */
if (intstatus & (AT91_EMAC_TUND | AT91_EMAC_RTRY))
dev->stats.tx_errors += 1;
if (lp->skb) {
dev_kfree_skb_irq(lp->skb);
lp->skb = NULL;
dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE);
}
netif_wake_queue(dev);
}
/* Work-around for Errata #11 */
if (intstatus & AT91_EMAC_RBNA) {
ctl = at91_emac_read(AT91_EMAC_CTL);
at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE);
at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE);
}
if (intstatus & AT91_EMAC_ROVR)
printk("%s: ROVR error\n", dev->name);
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void at91ether_poll_controller(struct net_device *dev)
{
unsigned long flags;
local_irq_save(flags);
at91ether_interrupt(dev->irq, dev);
local_irq_restore(flags);
}
#endif
static const struct net_device_ops at91ether_netdev_ops = {
.ndo_open = at91ether_open,
.ndo_stop = at91ether_close,
.ndo_start_xmit = at91ether_start_xmit,
.ndo_get_stats = at91ether_stats,
.ndo_set_rx_mode = at91ether_set_multicast_list,
.ndo_set_mac_address = set_mac_address,
.ndo_do_ioctl = at91ether_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = at91ether_poll_controller,
#endif
};
/*
* Initialize the ethernet interface
*/
static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address,
struct platform_device *pdev, struct clk *ether_clk)
{
struct macb_platform_data *board_data = pdev->dev.platform_data;
struct net_device *dev;
struct at91_private *lp;
unsigned int val;
int res;
dev = alloc_etherdev(sizeof(struct at91_private));
if (!dev)
return -ENOMEM;
dev->base_addr = AT91_VA_BASE_EMAC;
dev->irq = AT91RM9200_ID_EMAC;
/* Install the interrupt handler */
if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) {
free_netdev(dev);
return -EBUSY;
}
/* Allocate memory for DMA Receive descriptors */
lp = netdev_priv(dev);
lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL);
if (lp->dlist == NULL) {
free_irq(dev->irq, dev);
free_netdev(dev);
return -ENOMEM;
}
lp->board_data = *board_data;
lp->ether_clk = ether_clk;
platform_set_drvdata(pdev, dev);
spin_lock_init(&lp->lock);
ether_setup(dev);
dev->netdev_ops = &at91ether_netdev_ops;
dev->ethtool_ops = &at91ether_ethtool_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
get_mac_address(dev); /* Get ethernet address and store it in dev->dev_addr */
update_mac_address(dev); /* Program ethernet address into MAC */
at91_emac_write(AT91_EMAC_CTL, 0);
if (lp->board_data.is_rmii)
at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII);
else
at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG);
/* Perform PHY-specific initialization */
spin_lock_irq(&lp->lock);
enable_mdi();
if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
read_phy(phy_address, MII_DSCR_REG, &val);
if ((val & (1 << 10)) == 0) /* DSCR bit 10 is 0 -- fiber mode */
lp->phy_media = PORT_FIBRE;
} else if (machine_is_csb337()) {
/* mix link activity status into LED2 link state */
write_phy(phy_address, MII_LEDCTRL_REG, 0x0d22);
} else if (machine_is_ecbat91())
write_phy(phy_address, MII_LEDCTRL_REG, 0x156A);
disable_mdi();
spin_unlock_irq(&lp->lock);
lp->mii.dev = dev; /* Support for ethtool */
lp->mii.mdio_read = mdio_read;
lp->mii.mdio_write = mdio_write;
lp->mii.phy_id = phy_address;
lp->mii.phy_id_mask = 0x1f;
lp->mii.reg_num_mask = 0x1f;
lp->phy_type = phy_type; /* Type of PHY connected */
lp->phy_address = phy_address; /* MDI address of PHY */
/* Register the network interface */
res = register_netdev(dev);
if (res) {
free_irq(dev->irq, dev);
free_netdev(dev);
dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
return res;
}
/* Determine current link speed */
spin_lock_irq(&lp->lock);
enable_mdi();
update_linkspeed(dev, 0);
disable_mdi();
spin_unlock_irq(&lp->lock);
netif_carrier_off(dev); /* will be enabled in open() */
/* If board has no PHY IRQ, use a timer to poll the PHY */
if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
init_timer(&lp->check_timer);
lp->check_timer.data = (unsigned long)dev;
lp->check_timer.function = at91ether_check_link;
} else if (lp->board_data.phy_irq_pin >= 32)
gpio_request(lp->board_data.phy_irq_pin, "ethernet_phy");
/* Display ethernet banner */
printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n",
dev->name, (uint) dev->base_addr, dev->irq,
at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-",
at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex",
dev->dev_addr);
if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID))
printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)");
else if (phy_type == MII_LXT971A_ID)
printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name);
else if (phy_type == MII_RTL8201_ID)
printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name);
else if (phy_type == MII_BCM5221_ID)
printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name);
else if (phy_type == MII_DP83847_ID)
printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name);
else if (phy_type == MII_DP83848_ID)
printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name);
else if (phy_type == MII_AC101L_ID)
printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name);
else if (phy_type == MII_KS8721_ID)
printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name);
else if (phy_type == MII_T78Q21x3_ID)
printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name);
else if (phy_type == MII_LAN83C185_ID)
printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name);
return 0;
}
/*
* Detect MAC and PHY and perform initialization
*/
static int __init at91ether_probe(struct platform_device *pdev)
{
unsigned int phyid1, phyid2;
int detected = -1;
unsigned long phy_id;
unsigned short phy_address = 0;
struct clk *ether_clk;
ether_clk = clk_get(&pdev->dev, "ether_clk");
if (IS_ERR(ether_clk)) {
printk(KERN_ERR "at91_ether: no clock defined\n");
return -ENODEV;
}
clk_enable(ether_clk); /* Enable Peripheral clock */
while ((detected != 0) && (phy_address < 32)) {
/* Read the PHY ID registers */
enable_mdi();
read_phy(phy_address, MII_PHYSID1, &phyid1);
read_phy(phy_address, MII_PHYSID2, &phyid2);
disable_mdi();
phy_id = (phyid1 << 16) | (phyid2 & 0xfff0);
switch (phy_id) {
case MII_DM9161_ID: /* Davicom 9161: PHY_ID1 = 0x181, PHY_ID2 = B881 */
case MII_DM9161A_ID: /* Davicom 9161A: PHY_ID1 = 0x181, PHY_ID2 = B8A0 */
case MII_LXT971A_ID: /* Intel LXT971A: PHY_ID1 = 0x13, PHY_ID2 = 78E0 */
case MII_RTL8201_ID: /* Realtek RTL8201: PHY_ID1 = 0, PHY_ID2 = 0x8201 */
case MII_BCM5221_ID: /* Broadcom BCM5221: PHY_ID1 = 0x40, PHY_ID2 = 0x61e0 */
case MII_DP83847_ID: /* National Semiconductor DP83847: */
case MII_DP83848_ID: /* National Semiconductor DP83848: */
case MII_AC101L_ID: /* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */
case MII_KS8721_ID: /* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */
case MII_T78Q21x3_ID: /* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */
case MII_LAN83C185_ID: /* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */
detected = at91ether_setup(phy_id, phy_address, pdev, ether_clk);
break;
}
phy_address++;
}
clk_disable(ether_clk); /* Disable Peripheral clock */
return detected;
}
static int __devexit at91ether_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct at91_private *lp = netdev_priv(dev);
if (gpio_is_valid(lp->board_data.phy_irq_pin) &&
lp->board_data.phy_irq_pin >= 32)
gpio_free(lp->board_data.phy_irq_pin);
unregister_netdev(dev);
free_irq(dev->irq, dev);
dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
clk_put(lp->ether_clk);
platform_set_drvdata(pdev, NULL);
free_netdev(dev);
return 0;
}
#ifdef CONFIG_PM
static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
struct at91_private *lp = netdev_priv(net_dev);
if (netif_running(net_dev)) {
if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
int phy_irq = lp->board_data.phy_irq_pin;
disable_irq(phy_irq);
}
netif_stop_queue(net_dev);
netif_device_detach(net_dev);
clk_disable(lp->ether_clk);
}
return 0;
}
static int at91ether_resume(struct platform_device *pdev)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
struct at91_private *lp = netdev_priv(net_dev);
if (netif_running(net_dev)) {
clk_enable(lp->ether_clk);
netif_device_attach(net_dev);
netif_start_queue(net_dev);
if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
int phy_irq = lp->board_data.phy_irq_pin;
enable_irq(phy_irq);
}
}
return 0;
}
#else
#define at91ether_suspend NULL
#define at91ether_resume NULL
#endif
static struct platform_driver at91ether_driver = {
.remove = __devexit_p(at91ether_remove),
.suspend = at91ether_suspend,
.resume = at91ether_resume,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
};
static int __init at91ether_init(void)
{
return platform_driver_probe(&at91ether_driver, at91ether_probe);
}
static void __exit at91ether_exit(void)
{
platform_driver_unregister(&at91ether_driver);
}
module_init(at91ether_init)
module_exit(at91ether_exit)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
MODULE_AUTHOR("Andrew Victor");
MODULE_ALIAS("platform:" DRV_NAME);
| gpl-2.0 |
Abhinav1997/android_kernel_lge_gee | arch/arm/mach-kirkwood/irq.c | 5056 | 1586 | /*
* arch/arm/mach-kirkwood/irq.c
*
* Kirkwood IRQ handling.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <mach/bridge-regs.h>
#include <plat/irq.h>
#include "common.h"
static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
BUG_ON(irq < IRQ_KIRKWOOD_GPIO_LOW_0_7);
BUG_ON(irq > IRQ_KIRKWOOD_GPIO_HIGH_16_23);
orion_gpio_irq_handler((irq - IRQ_KIRKWOOD_GPIO_LOW_0_7) << 3);
}
void __init kirkwood_init_irq(void)
{
orion_irq_init(0, (void __iomem *)(IRQ_VIRT_BASE + IRQ_MASK_LOW_OFF));
orion_irq_init(32, (void __iomem *)(IRQ_VIRT_BASE + IRQ_MASK_HIGH_OFF));
/*
* Initialize gpiolib for GPIOs 0-49.
*/
orion_gpio_init(0, 32, GPIO_LOW_VIRT_BASE, 0,
IRQ_KIRKWOOD_GPIO_START);
irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_0_7, gpio_irq_handler);
irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_8_15, gpio_irq_handler);
irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_16_23, gpio_irq_handler);
irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_24_31, gpio_irq_handler);
orion_gpio_init(32, 18, GPIO_HIGH_VIRT_BASE, 0,
IRQ_KIRKWOOD_GPIO_START + 32);
irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_0_7, gpio_irq_handler);
irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_8_15, gpio_irq_handler);
irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_16_23,
gpio_irq_handler);
}
| gpl-2.0 |
UISS-Dev-Team/android_kernel_huawei_c8813 | drivers/of/of_spi.c | 5056 | 2422 | /*
* SPI OF support routines
* Copyright (C) 2008 Secret Lab Technologies Ltd.
*
* Support routines for deriving SPI device attachments from the device
* tree.
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/device.h>
#include <linux/spi/spi.h>
#include <linux/of_irq.h>
#include <linux/of_spi.h>
/**
* of_register_spi_devices - Register child devices onto the SPI bus
* @master: Pointer to spi_master device
*
* Registers an spi_device for each child node of master node which has a 'reg'
* property.
*/
void of_register_spi_devices(struct spi_master *master)
{
struct spi_device *spi;
struct device_node *nc;
const __be32 *prop;
int rc;
int len;
if (!master->dev.of_node)
return;
for_each_child_of_node(master->dev.of_node, nc) {
/* Alloc an spi_device */
spi = spi_alloc_device(master);
if (!spi) {
dev_err(&master->dev, "spi_device alloc error for %s\n",
nc->full_name);
spi_dev_put(spi);
continue;
}
/* Select device driver */
if (of_modalias_node(nc, spi->modalias,
sizeof(spi->modalias)) < 0) {
dev_err(&master->dev, "cannot find modalias for %s\n",
nc->full_name);
spi_dev_put(spi);
continue;
}
/* Device address */
prop = of_get_property(nc, "reg", &len);
if (!prop || len < sizeof(*prop)) {
dev_err(&master->dev, "%s has no 'reg' property\n",
nc->full_name);
spi_dev_put(spi);
continue;
}
spi->chip_select = be32_to_cpup(prop);
/* Mode (clock phase/polarity/etc.) */
if (of_find_property(nc, "spi-cpha", NULL))
spi->mode |= SPI_CPHA;
if (of_find_property(nc, "spi-cpol", NULL))
spi->mode |= SPI_CPOL;
if (of_find_property(nc, "spi-cs-high", NULL))
spi->mode |= SPI_CS_HIGH;
/* Device speed */
prop = of_get_property(nc, "spi-max-frequency", &len);
if (!prop || len < sizeof(*prop)) {
dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n",
nc->full_name);
spi_dev_put(spi);
continue;
}
spi->max_speed_hz = be32_to_cpup(prop);
/* IRQ */
spi->irq = irq_of_parse_and_map(nc, 0);
/* Store a pointer to the node in the device structure */
of_node_get(nc);
spi->dev.of_node = nc;
/* Register the new device */
request_module(spi->modalias);
rc = spi_add_device(spi);
if (rc) {
dev_err(&master->dev, "spi_device register error %s\n",
nc->full_name);
spi_dev_put(spi);
}
}
}
EXPORT_SYMBOL(of_register_spi_devices);
| gpl-2.0 |
NamelessRom/android_kernel_samsung_latona | drivers/video/backlight/kb3886_bl.c | 8128 | 4950 | /*
* Backlight Driver for the KB3886 Backlight
*
* Copyright (c) 2007-2008 Claudio Nieder
*
* Based on corgi_bl.c by Richard Purdie and kb3886 driver by Robert Woerle
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#define KB3886_PARENT 0x64
#define KB3886_IO 0x60
#define KB3886_ADC_DAC_PWM 0xC4
#define KB3886_PWM0_WRITE 0x81
#define KB3886_PWM0_READ 0x41
static DEFINE_MUTEX(bl_mutex);
static void kb3886_bl_set_intensity(int intensity)
{
mutex_lock(&bl_mutex);
intensity = intensity&0xff;
outb(KB3886_ADC_DAC_PWM, KB3886_PARENT);
msleep(10);
outb(KB3886_PWM0_WRITE, KB3886_IO);
msleep(10);
outb(intensity, KB3886_IO);
mutex_unlock(&bl_mutex);
}
struct kb3886bl_machinfo {
int max_intensity;
int default_intensity;
int limit_mask;
void (*set_bl_intensity)(int intensity);
};
static struct kb3886bl_machinfo kb3886_bl_machinfo = {
.max_intensity = 0xff,
.default_intensity = 0xa0,
.limit_mask = 0x7f,
.set_bl_intensity = kb3886_bl_set_intensity,
};
static struct platform_device kb3886bl_device = {
.name = "kb3886-bl",
.dev = {
.platform_data = &kb3886_bl_machinfo,
},
.id = -1,
};
static struct platform_device *devices[] __initdata = {
&kb3886bl_device,
};
/*
* Back to driver
*/
static int kb3886bl_intensity;
static struct backlight_device *kb3886_backlight_device;
static struct kb3886bl_machinfo *bl_machinfo;
static unsigned long kb3886bl_flags;
#define KB3886BL_SUSPENDED 0x01
static struct dmi_system_id __initdata kb3886bl_device_table[] = {
{
.ident = "Sahara Touch-iT",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SDV"),
DMI_MATCH(DMI_PRODUCT_NAME, "iTouch T201"),
},
},
{ }
};
static int kb3886bl_send_intensity(struct backlight_device *bd)
{
int intensity = bd->props.brightness;
if (bd->props.power != FB_BLANK_UNBLANK)
intensity = 0;
if (bd->props.fb_blank != FB_BLANK_UNBLANK)
intensity = 0;
if (kb3886bl_flags & KB3886BL_SUSPENDED)
intensity = 0;
bl_machinfo->set_bl_intensity(intensity);
kb3886bl_intensity = intensity;
return 0;
}
#ifdef CONFIG_PM
static int kb3886bl_suspend(struct platform_device *pdev, pm_message_t state)
{
struct backlight_device *bd = platform_get_drvdata(pdev);
kb3886bl_flags |= KB3886BL_SUSPENDED;
backlight_update_status(bd);
return 0;
}
static int kb3886bl_resume(struct platform_device *pdev)
{
struct backlight_device *bd = platform_get_drvdata(pdev);
kb3886bl_flags &= ~KB3886BL_SUSPENDED;
backlight_update_status(bd);
return 0;
}
#else
#define kb3886bl_suspend NULL
#define kb3886bl_resume NULL
#endif
static int kb3886bl_get_intensity(struct backlight_device *bd)
{
return kb3886bl_intensity;
}
static const struct backlight_ops kb3886bl_ops = {
.get_brightness = kb3886bl_get_intensity,
.update_status = kb3886bl_send_intensity,
};
static int kb3886bl_probe(struct platform_device *pdev)
{
struct backlight_properties props;
struct kb3886bl_machinfo *machinfo = pdev->dev.platform_data;
bl_machinfo = machinfo;
if (!machinfo->limit_mask)
machinfo->limit_mask = -1;
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = machinfo->max_intensity;
kb3886_backlight_device = backlight_device_register("kb3886-bl",
&pdev->dev, NULL,
&kb3886bl_ops,
&props);
if (IS_ERR(kb3886_backlight_device))
return PTR_ERR(kb3886_backlight_device);
platform_set_drvdata(pdev, kb3886_backlight_device);
kb3886_backlight_device->props.power = FB_BLANK_UNBLANK;
kb3886_backlight_device->props.brightness = machinfo->default_intensity;
backlight_update_status(kb3886_backlight_device);
return 0;
}
static int kb3886bl_remove(struct platform_device *pdev)
{
struct backlight_device *bd = platform_get_drvdata(pdev);
backlight_device_unregister(bd);
return 0;
}
static struct platform_driver kb3886bl_driver = {
.probe = kb3886bl_probe,
.remove = kb3886bl_remove,
.suspend = kb3886bl_suspend,
.resume = kb3886bl_resume,
.driver = {
.name = "kb3886-bl",
},
};
static int __init kb3886_init(void)
{
if (!dmi_check_system(kb3886bl_device_table))
return -ENODEV;
platform_add_devices(devices, ARRAY_SIZE(devices));
return platform_driver_register(&kb3886bl_driver);
}
static void __exit kb3886_exit(void)
{
platform_driver_unregister(&kb3886bl_driver);
}
module_init(kb3886_init);
module_exit(kb3886_exit);
MODULE_AUTHOR("Claudio Nieder <private@claudio.ch>");
MODULE_DESCRIPTION("Tabletkiosk Sahara Touch-iT Backlight Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("dmi:*:svnSDV:pniTouchT201:*");
| gpl-2.0 |
ASaifM/orinoco | arch/avr32/mm/dma-coherent.c | 8896 | 3716 | /*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/export.h>
#include <asm/addrspace.h>
#include <asm/cacheflush.h>
void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
{
/*
* No need to sync an uncached area
*/
if (PXSEG(vaddr) == P2SEG)
return;
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
invalidate_dcache_region(vaddr, size);
break;
case DMA_TO_DEVICE: /* writeback only */
clean_dcache_region(vaddr, size);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
flush_dcache_region(vaddr, size);
break;
default:
BUG();
}
}
EXPORT_SYMBOL(dma_cache_sync);
static struct page *__dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp)
{
struct page *page, *free, *end;
int order;
/* Following is a work-around (a.k.a. hack) to prevent pages
* with __GFP_COMP being passed to split_page() which cannot
* handle them. The real problem is that this flag probably
* should be 0 on AVR32 as it is not supported on this
* platform--see CONFIG_HUGETLB_PAGE. */
gfp &= ~(__GFP_COMP);
size = PAGE_ALIGN(size);
order = get_order(size);
page = alloc_pages(gfp, order);
if (!page)
return NULL;
split_page(page, order);
/*
* When accessing physical memory with valid cache data, we
* get a cache hit even if the virtual memory region is marked
* as uncached.
*
* Since the memory is newly allocated, there is no point in
* doing a writeback. If the previous owner cares, he should
* have flushed the cache before releasing the memory.
*/
invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
*handle = page_to_bus(page);
free = page + (size >> PAGE_SHIFT);
end = page + (1 << order);
/*
* Free any unused pages
*/
while (free < end) {
__free_page(free);
free++;
}
return page;
}
static void __dma_free(struct device *dev, size_t size,
struct page *page, dma_addr_t handle)
{
struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
while (page < end)
__free_page(page++);
}
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp)
{
struct page *page;
void *ret = NULL;
page = __dma_alloc(dev, size, handle, gfp);
if (page)
ret = phys_to_uncached(page_to_phys(page));
return ret;
}
EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t handle)
{
void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
struct page *page;
pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
cpu_addr, (unsigned long)handle, (unsigned)size);
BUG_ON(!virt_addr_valid(addr));
page = virt_to_page(addr);
__dma_free(dev, size, page, handle);
}
EXPORT_SYMBOL(dma_free_coherent);
void *dma_alloc_writecombine(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp)
{
struct page *page;
dma_addr_t phys;
page = __dma_alloc(dev, size, handle, gfp);
if (!page)
return NULL;
phys = page_to_phys(page);
*handle = phys;
/* Now, map the page into P3 with write-combining turned on */
return __ioremap(phys, size, _PAGE_BUFFER);
}
EXPORT_SYMBOL(dma_alloc_writecombine);
void dma_free_writecombine(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t handle)
{
struct page *page;
iounmap(cpu_addr);
page = phys_to_page(handle);
__dma_free(dev, size, page, handle);
}
EXPORT_SYMBOL(dma_free_writecombine);
| gpl-2.0 |
xtrymind/android_kernel_msm | net/rds/af_rds.c | 9920 | 14485 | /*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/in.h>
#include <linux/poll.h>
#include <net/sock.h>
#include "rds.h"
char *rds_str_array(char **array, size_t elements, size_t index)
{
if ((index < elements) && array[index])
return array[index];
else
return "unknown";
}
EXPORT_SYMBOL(rds_str_array);
/* this is just used for stats gathering :/ */
static DEFINE_SPINLOCK(rds_sock_lock);
static unsigned long rds_sock_count;
static LIST_HEAD(rds_sock_list);
DECLARE_WAIT_QUEUE_HEAD(rds_poll_waitq);
/*
* This is called as the final descriptor referencing this socket is closed.
* We have to unbind the socket so that another socket can be bound to the
* address it was using.
*
* We have to be careful about racing with the incoming path. sock_orphan()
* sets SOCK_DEAD and we use that as an indicator to the rx path that new
* messages shouldn't be queued.
*/
static int rds_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct rds_sock *rs;
if (!sk)
goto out;
rs = rds_sk_to_rs(sk);
sock_orphan(sk);
/* Note - rds_clear_recv_queue grabs rs_recv_lock, so
* that ensures the recv path has completed messing
* with the socket. */
rds_clear_recv_queue(rs);
rds_cong_remove_socket(rs);
/*
* the binding lookup hash uses rcu, we need to
* make sure we sychronize_rcu before we free our
* entry
*/
rds_remove_bound(rs);
synchronize_rcu();
rds_send_drop_to(rs, NULL);
rds_rdma_drop_keys(rs);
rds_notify_queue_get(rs, NULL);
spin_lock_bh(&rds_sock_lock);
list_del_init(&rs->rs_item);
rds_sock_count--;
spin_unlock_bh(&rds_sock_lock);
rds_trans_put(rs->rs_transport);
sock->sk = NULL;
sock_put(sk);
out:
return 0;
}
/*
* Careful not to race with rds_release -> sock_orphan which clears sk_sleep.
* _bh() isn't OK here, we're called from interrupt handlers. It's probably OK
* to wake the waitqueue after sk_sleep is clear as we hold a sock ref, but
* this seems more conservative.
* NB - normally, one would use sk_callback_lock for this, but we can
* get here from interrupts, whereas the network code grabs sk_callback_lock
* with _lock_bh only - so relying on sk_callback_lock introduces livelocks.
*/
void rds_wake_sk_sleep(struct rds_sock *rs)
{
unsigned long flags;
read_lock_irqsave(&rs->rs_recv_lock, flags);
__rds_wake_sk_sleep(rds_rs_to_sk(rs));
read_unlock_irqrestore(&rs->rs_recv_lock, flags);
}
static int rds_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
struct rds_sock *rs = rds_sk_to_rs(sock->sk);
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
/* racey, don't care */
if (peer) {
if (!rs->rs_conn_addr)
return -ENOTCONN;
sin->sin_port = rs->rs_conn_port;
sin->sin_addr.s_addr = rs->rs_conn_addr;
} else {
sin->sin_port = rs->rs_bound_port;
sin->sin_addr.s_addr = rs->rs_bound_addr;
}
sin->sin_family = AF_INET;
*uaddr_len = sizeof(*sin);
return 0;
}
/*
* RDS' poll is without a doubt the least intuitive part of the interface,
* as POLLIN and POLLOUT do not behave entirely as you would expect from
* a network protocol.
*
* POLLIN is asserted if
* - there is data on the receive queue.
* - to signal that a previously congested destination may have become
* uncongested
* - A notification has been queued to the socket (this can be a congestion
* update, or a RDMA completion).
*
* POLLOUT is asserted if there is room on the send queue. This does not mean
* however, that the next sendmsg() call will succeed. If the application tries
* to send to a congested destination, the system call may still fail (and
* return ENOBUFS).
*/
static unsigned int rds_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct rds_sock *rs = rds_sk_to_rs(sk);
unsigned int mask = 0;
unsigned long flags;
poll_wait(file, sk_sleep(sk), wait);
if (rs->rs_seen_congestion)
poll_wait(file, &rds_poll_waitq, wait);
read_lock_irqsave(&rs->rs_recv_lock, flags);
if (!rs->rs_cong_monitor) {
/* When a congestion map was updated, we signal POLLIN for
* "historical" reasons. Applications can also poll for
* WRBAND instead. */
if (rds_cong_updated_since(&rs->rs_cong_track))
mask |= (POLLIN | POLLRDNORM | POLLWRBAND);
} else {
spin_lock(&rs->rs_lock);
if (rs->rs_cong_notify)
mask |= (POLLIN | POLLRDNORM);
spin_unlock(&rs->rs_lock);
}
if (!list_empty(&rs->rs_recv_queue) ||
!list_empty(&rs->rs_notify_queue))
mask |= (POLLIN | POLLRDNORM);
if (rs->rs_snd_bytes < rds_sk_sndbuf(rs))
mask |= (POLLOUT | POLLWRNORM);
read_unlock_irqrestore(&rs->rs_recv_lock, flags);
/* clear state any time we wake a seen-congested socket */
if (mask)
rs->rs_seen_congestion = 0;
return mask;
}
static int rds_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
return -ENOIOCTLCMD;
}
static int rds_cancel_sent_to(struct rds_sock *rs, char __user *optval,
int len)
{
struct sockaddr_in sin;
int ret = 0;
/* racing with another thread binding seems ok here */
if (rs->rs_bound_addr == 0) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
if (len < sizeof(struct sockaddr_in)) {
ret = -EINVAL;
goto out;
}
if (copy_from_user(&sin, optval, sizeof(sin))) {
ret = -EFAULT;
goto out;
}
rds_send_drop_to(rs, &sin);
out:
return ret;
}
static int rds_set_bool_option(unsigned char *optvar, char __user *optval,
int optlen)
{
int value;
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(value, (int __user *) optval))
return -EFAULT;
*optvar = !!value;
return 0;
}
static int rds_cong_monitor(struct rds_sock *rs, char __user *optval,
int optlen)
{
int ret;
ret = rds_set_bool_option(&rs->rs_cong_monitor, optval, optlen);
if (ret == 0) {
if (rs->rs_cong_monitor) {
rds_cong_add_socket(rs);
} else {
rds_cong_remove_socket(rs);
rs->rs_cong_mask = 0;
rs->rs_cong_notify = 0;
}
}
return ret;
}
static int rds_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct rds_sock *rs = rds_sk_to_rs(sock->sk);
int ret;
if (level != SOL_RDS) {
ret = -ENOPROTOOPT;
goto out;
}
switch (optname) {
case RDS_CANCEL_SENT_TO:
ret = rds_cancel_sent_to(rs, optval, optlen);
break;
case RDS_GET_MR:
ret = rds_get_mr(rs, optval, optlen);
break;
case RDS_GET_MR_FOR_DEST:
ret = rds_get_mr_for_dest(rs, optval, optlen);
break;
case RDS_FREE_MR:
ret = rds_free_mr(rs, optval, optlen);
break;
case RDS_RECVERR:
ret = rds_set_bool_option(&rs->rs_recverr, optval, optlen);
break;
case RDS_CONG_MONITOR:
ret = rds_cong_monitor(rs, optval, optlen);
break;
default:
ret = -ENOPROTOOPT;
}
out:
return ret;
}
static int rds_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct rds_sock *rs = rds_sk_to_rs(sock->sk);
int ret = -ENOPROTOOPT, len;
if (level != SOL_RDS)
goto out;
if (get_user(len, optlen)) {
ret = -EFAULT;
goto out;
}
switch (optname) {
case RDS_INFO_FIRST ... RDS_INFO_LAST:
ret = rds_info_getsockopt(sock, optname, optval,
optlen);
break;
case RDS_RECVERR:
if (len < sizeof(int))
ret = -EINVAL;
else
if (put_user(rs->rs_recverr, (int __user *) optval) ||
put_user(sizeof(int), optlen))
ret = -EFAULT;
else
ret = 0;
break;
default:
break;
}
out:
return ret;
}
static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
struct rds_sock *rs = rds_sk_to_rs(sk);
int ret = 0;
lock_sock(sk);
if (addr_len != sizeof(struct sockaddr_in)) {
ret = -EINVAL;
goto out;
}
if (sin->sin_family != AF_INET) {
ret = -EAFNOSUPPORT;
goto out;
}
if (sin->sin_addr.s_addr == htonl(INADDR_ANY)) {
ret = -EDESTADDRREQ;
goto out;
}
rs->rs_conn_addr = sin->sin_addr.s_addr;
rs->rs_conn_port = sin->sin_port;
out:
release_sock(sk);
return ret;
}
static struct proto rds_proto = {
.name = "RDS",
.owner = THIS_MODULE,
.obj_size = sizeof(struct rds_sock),
};
static const struct proto_ops rds_proto_ops = {
.family = AF_RDS,
.owner = THIS_MODULE,
.release = rds_release,
.bind = rds_bind,
.connect = rds_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = rds_getname,
.poll = rds_poll,
.ioctl = rds_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = rds_setsockopt,
.getsockopt = rds_getsockopt,
.sendmsg = rds_sendmsg,
.recvmsg = rds_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
{
struct rds_sock *rs;
sock_init_data(sock, sk);
sock->ops = &rds_proto_ops;
sk->sk_protocol = protocol;
rs = rds_sk_to_rs(sk);
spin_lock_init(&rs->rs_lock);
rwlock_init(&rs->rs_recv_lock);
INIT_LIST_HEAD(&rs->rs_send_queue);
INIT_LIST_HEAD(&rs->rs_recv_queue);
INIT_LIST_HEAD(&rs->rs_notify_queue);
INIT_LIST_HEAD(&rs->rs_cong_list);
spin_lock_init(&rs->rs_rdma_lock);
rs->rs_rdma_keys = RB_ROOT;
spin_lock_bh(&rds_sock_lock);
list_add_tail(&rs->rs_item, &rds_sock_list);
rds_sock_count++;
spin_unlock_bh(&rds_sock_lock);
return 0;
}
static int rds_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
if (sock->type != SOCK_SEQPACKET || protocol)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, AF_RDS, GFP_ATOMIC, &rds_proto);
if (!sk)
return -ENOMEM;
return __rds_create(sock, sk, protocol);
}
void rds_sock_addref(struct rds_sock *rs)
{
sock_hold(rds_rs_to_sk(rs));
}
void rds_sock_put(struct rds_sock *rs)
{
sock_put(rds_rs_to_sk(rs));
}
static const struct net_proto_family rds_family_ops = {
.family = AF_RDS,
.create = rds_create,
.owner = THIS_MODULE,
};
static void rds_sock_inc_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
struct rds_sock *rs;
struct rds_incoming *inc;
unsigned int total = 0;
len /= sizeof(struct rds_info_message);
spin_lock_bh(&rds_sock_lock);
list_for_each_entry(rs, &rds_sock_list, rs_item) {
read_lock(&rs->rs_recv_lock);
/* XXX too lazy to maintain counts.. */
list_for_each_entry(inc, &rs->rs_recv_queue, i_item) {
total++;
if (total <= len)
rds_inc_info_copy(inc, iter, inc->i_saddr,
rs->rs_bound_addr, 1);
}
read_unlock(&rs->rs_recv_lock);
}
spin_unlock_bh(&rds_sock_lock);
lens->nr = total;
lens->each = sizeof(struct rds_info_message);
}
static void rds_sock_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
struct rds_info_socket sinfo;
struct rds_sock *rs;
len /= sizeof(struct rds_info_socket);
spin_lock_bh(&rds_sock_lock);
if (len < rds_sock_count)
goto out;
list_for_each_entry(rs, &rds_sock_list, rs_item) {
sinfo.sndbuf = rds_sk_sndbuf(rs);
sinfo.rcvbuf = rds_sk_rcvbuf(rs);
sinfo.bound_addr = rs->rs_bound_addr;
sinfo.connected_addr = rs->rs_conn_addr;
sinfo.bound_port = rs->rs_bound_port;
sinfo.connected_port = rs->rs_conn_port;
sinfo.inum = sock_i_ino(rds_rs_to_sk(rs));
rds_info_copy(iter, &sinfo, sizeof(sinfo));
}
out:
lens->nr = rds_sock_count;
lens->each = sizeof(struct rds_info_socket);
spin_unlock_bh(&rds_sock_lock);
}
static void rds_exit(void)
{
sock_unregister(rds_family_ops.family);
proto_unregister(&rds_proto);
rds_conn_exit();
rds_cong_exit();
rds_sysctl_exit();
rds_threads_exit();
rds_stats_exit();
rds_page_exit();
rds_info_deregister_func(RDS_INFO_SOCKETS, rds_sock_info);
rds_info_deregister_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info);
}
module_exit(rds_exit);
static int rds_init(void)
{
int ret;
ret = rds_conn_init();
if (ret)
goto out;
ret = rds_threads_init();
if (ret)
goto out_conn;
ret = rds_sysctl_init();
if (ret)
goto out_threads;
ret = rds_stats_init();
if (ret)
goto out_sysctl;
ret = proto_register(&rds_proto, 1);
if (ret)
goto out_stats;
ret = sock_register(&rds_family_ops);
if (ret)
goto out_proto;
rds_info_register_func(RDS_INFO_SOCKETS, rds_sock_info);
rds_info_register_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info);
goto out;
out_proto:
proto_unregister(&rds_proto);
out_stats:
rds_stats_exit();
out_sysctl:
rds_sysctl_exit();
out_threads:
rds_threads_exit();
out_conn:
rds_conn_exit();
rds_cong_exit();
rds_page_exit();
out:
return ret;
}
module_init(rds_init);
#define DRV_VERSION "4.0"
#define DRV_RELDATE "Feb 12, 2009"
MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
MODULE_DESCRIPTION("RDS: Reliable Datagram Sockets"
" v" DRV_VERSION " (" DRV_RELDATE ")");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS_NETPROTO(PF_RDS);
| gpl-2.0 |
pekaka/N900-Exynos-kernel-4.3 | arch/x86/kernel/resource.c | 9920 | 1050 | #include <linux/ioport.h>
#include <asm/e820.h>
static void resource_clip(struct resource *res, resource_size_t start,
resource_size_t end)
{
resource_size_t low = 0, high = 0;
if (res->end < start || res->start > end)
return; /* no conflict */
if (res->start < start)
low = start - res->start;
if (res->end > end)
high = res->end - end;
/* Keep the area above or below the conflict, whichever is larger */
if (low > high)
res->end = start - 1;
else
res->start = end + 1;
}
static void remove_e820_regions(struct resource *avail)
{
int i;
struct e820entry *entry;
for (i = 0; i < e820.nr_map; i++) {
entry = &e820.map[i];
resource_clip(avail, entry->addr,
entry->addr + entry->size - 1);
}
}
void arch_remove_reservations(struct resource *avail)
{
/* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */
if (avail->flags & IORESOURCE_MEM) {
if (avail->start < BIOS_END)
avail->start = BIOS_END;
resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END);
remove_e820_regions(avail);
}
}
| gpl-2.0 |
Shaky156/Shield-Tablet-CPU2.5Ghz-GPU060Mhz-Overclock | arch/powerpc/boot/cuboot-taishan.c | 14016 | 1361 | /*
* Old U-boot compatibility for Taishan
*
* Author: Hugh Blemings <hugh@au.ibm.com>
*
* Copyright 2007 Hugh Blemings, IBM Corporation.
* Based on cuboot-ebony.c which is:
* Copyright 2007 David Gibson, IBM Corporation.
* Based on cuboot-83xx.c, which is:
* Copyright (c) 2007 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "ops.h"
#include "stdio.h"
#include "cuboot.h"
#include "reg.h"
#include "dcr.h"
#include "4xx.h"
#define TARGET_4xx
#define TARGET_44x
#define TARGET_440GX
#include "ppcboot.h"
static bd_t bd;
BSS_STACK(4096);
static void taishan_fixups(void)
{
/* FIXME: sysclk should be derived by reading the FPGA
registers */
unsigned long sysclk = 33000000;
ibm440gx_fixup_clocks(sysclk, 6 * 1843200, 25000000);
ibm4xx_sdram_fixup_memsize();
dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr);
dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr);
ibm4xx_fixup_ebc_ranges("/plb/opb/ebc");
}
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
CUBOOT_INIT();
platform_ops.fixups = taishan_fixups;
fdt_init(_dtb_start);
serial_console_init();
}
| gpl-2.0 |
mali1/NST-kernel | drivers/infiniband/hw/cxgb3/iwch_provider.c | 193 | 37903 | /*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
#include "cxio_hal.h"
#include "iwch.h"
#include "iwch_provider.h"
#include "iwch_cm.h"
#include "iwch_user.h"
#include "common.h"
static int iwch_modify_port(struct ib_device *ibdev,
u8 port, int port_modify_mask,
struct ib_port_modify *props)
{
return -ENOSYS;
}
static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
struct ib_ah_attr *ah_attr)
{
return ERR_PTR(-ENOSYS);
}
static int iwch_ah_destroy(struct ib_ah *ah)
{
return -ENOSYS;
}
static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
return -ENOSYS;
}
static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
return -ENOSYS;
}
static int iwch_process_mad(struct ib_device *ibdev,
int mad_flags,
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
return -ENOSYS;
}
static int iwch_dealloc_ucontext(struct ib_ucontext *context)
{
struct iwch_dev *rhp = to_iwch_dev(context->device);
struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
struct iwch_mm_entry *mm, *tmp;
PDBG("%s context %p\n", __func__, context);
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm);
cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
kfree(ucontext);
return 0;
}
static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct iwch_ucontext *context;
struct iwch_dev *rhp = to_iwch_dev(ibdev);
PDBG("%s ibdev %p\n", __func__, ibdev);
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return ERR_PTR(-ENOMEM);
cxio_init_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock);
return &context->ibucontext;
}
static int iwch_destroy_cq(struct ib_cq *ib_cq)
{
struct iwch_cq *chp;
PDBG("%s ib_cq %p\n", __func__, ib_cq);
chp = to_iwch_cq(ib_cq);
remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
atomic_dec(&chp->refcnt);
wait_event(chp->wait, !atomic_read(&chp->refcnt));
cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
kfree(chp);
return 0;
}
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector,
struct ib_ucontext *ib_context,
struct ib_udata *udata)
{
struct iwch_dev *rhp;
struct iwch_cq *chp;
struct iwch_create_cq_resp uresp;
struct iwch_create_cq_req ureq;
struct iwch_ucontext *ucontext = NULL;
PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
rhp = to_iwch_dev(ibdev);
chp = kzalloc(sizeof(*chp), GFP_KERNEL);
if (!chp)
return ERR_PTR(-ENOMEM);
if (ib_context) {
ucontext = to_iwch_ucontext(ib_context);
if (!t3a_device(rhp)) {
if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
kfree(chp);
return ERR_PTR(-EFAULT);
}
chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
}
}
if (t3a_device(rhp)) {
/*
* T3A: Add some fluff to handle extra CQEs inserted
* for various errors.
* Additional CQE possibilities:
* TERMINATE,
* incoming RDMA WRITE Failures
* incoming RDMA READ REQUEST FAILUREs
* NOTE: We cannot ensure the CQ won't overflow.
*/
entries += 16;
}
entries = roundup_pow_of_two(entries);
chp->cq.size_log2 = ilog2(entries);
if (cxio_create_cq(&rhp->rdev, &chp->cq)) {
kfree(chp);
return ERR_PTR(-ENOMEM);
}
chp->rhp = rhp;
chp->ibcq.cqe = 1 << chp->cq.size_log2;
spin_lock_init(&chp->lock);
atomic_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait);
insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
if (ucontext) {
struct iwch_mm_entry *mm;
mm = kmalloc(sizeof *mm, GFP_KERNEL);
if (!mm) {
iwch_destroy_cq(&chp->ibcq);
return ERR_PTR(-ENOMEM);
}
uresp.cqid = chp->cq.cqid;
uresp.size_log2 = chp->cq.size_log2;
spin_lock(&ucontext->mmap_lock);
uresp.key = ucontext->key;
ucontext->key += PAGE_SIZE;
spin_unlock(&ucontext->mmap_lock);
if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
kfree(mm);
iwch_destroy_cq(&chp->ibcq);
return ERR_PTR(-EFAULT);
}
mm->key = uresp.key;
mm->addr = virt_to_phys(chp->cq.queue);
mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
sizeof (struct t3_cqe));
insert_mmap(ucontext, mm);
}
PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
chp->cq.cqid, chp, (1 << chp->cq.size_log2),
(unsigned long long) chp->cq.dma_addr);
return &chp->ibcq;
}
static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
{
#ifdef notyet
struct iwch_cq *chp = to_iwch_cq(cq);
struct t3_cq oldcq, newcq;
int ret;
PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
/* We don't downsize... */
if (cqe <= cq->cqe)
return 0;
/* create new t3_cq with new size */
cqe = roundup_pow_of_two(cqe+1);
newcq.size_log2 = ilog2(cqe);
/* Dont allow resize to less than the current wce count */
if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {
return -ENOMEM;
}
/* Quiesce all QPs using this CQ */
ret = iwch_quiesce_qps(chp);
if (ret) {
return ret;
}
ret = cxio_create_cq(&chp->rhp->rdev, &newcq);
if (ret) {
return ret;
}
/* copy CQEs */
memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *
sizeof(struct t3_cqe));
/* old iwch_qp gets new t3_cq but keeps old cqid */
oldcq = chp->cq;
chp->cq = newcq;
chp->cq.cqid = oldcq.cqid;
/* resize new t3_cq to update the HW context */
ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);
if (ret) {
chp->cq = oldcq;
return ret;
}
chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;
/* destroy old t3_cq */
oldcq.cqid = newcq.cqid;
ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
if (ret) {
printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",
__func__, ret);
}
/* add user hooks here */
/* resume qps */
ret = iwch_resume_qps(chp);
return ret;
#else
return -ENOSYS;
#endif
}
static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct iwch_dev *rhp;
struct iwch_cq *chp;
enum t3_cq_opcode cq_op;
int err;
unsigned long flag;
u32 rptr;
chp = to_iwch_cq(ibcq);
rhp = chp->rhp;
if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
cq_op = CQ_ARM_SE;
else
cq_op = CQ_ARM_AN;
if (chp->user_rptr_addr) {
if (get_user(rptr, chp->user_rptr_addr))
return -EFAULT;
spin_lock_irqsave(&chp->lock, flag);
chp->cq.rptr = rptr;
} else
spin_lock_irqsave(&chp->lock, flag);
PDBG("%s rptr 0x%x\n", __func__, chp->cq.rptr);
err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
spin_unlock_irqrestore(&chp->lock, flag);
if (err < 0)
printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err,
chp->cq.cqid);
if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
err = 0;
return err;
}
static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
int len = vma->vm_end - vma->vm_start;
u32 key = vma->vm_pgoff << PAGE_SHIFT;
struct cxio_rdev *rdev_p;
int ret = 0;
struct iwch_mm_entry *mm;
struct iwch_ucontext *ucontext;
u64 addr;
PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
key, len);
if (vma->vm_start & (PAGE_SIZE-1)) {
return -EINVAL;
}
rdev_p = &(to_iwch_dev(context->device)->rdev);
ucontext = to_iwch_ucontext(context);
mm = remove_mmap(ucontext, key, len);
if (!mm)
return -EINVAL;
addr = mm->addr;
kfree(mm);
if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
(addr < (rdev_p->rnic_info.udbell_physbase +
rdev_p->rnic_info.udbell_len))) {
/*
* Map T3 DB register.
*/
if (vma->vm_flags & VM_READ) {
return -EPERM;
}
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
vma->vm_flags &= ~VM_MAYREAD;
ret = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
len, vma->vm_page_prot);
} else {
/*
* Map WQ or CQ contig dma memory...
*/
ret = remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
len, vma->vm_page_prot);
}
return ret;
}
static int iwch_deallocate_pd(struct ib_pd *pd)
{
struct iwch_dev *rhp;
struct iwch_pd *php;
php = to_iwch_pd(pd);
rhp = php->rhp;
PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
kfree(php);
return 0;
}
static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct iwch_pd *php;
u32 pdid;
struct iwch_dev *rhp;
PDBG("%s ibdev %p\n", __func__, ibdev);
rhp = (struct iwch_dev *) ibdev;
pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
if (!pdid)
return ERR_PTR(-EINVAL);
php = kzalloc(sizeof(*php), GFP_KERNEL);
if (!php) {
cxio_hal_put_pdid(rhp->rdev.rscp, pdid);
return ERR_PTR(-ENOMEM);
}
php->pdid = pdid;
php->rhp = rhp;
if (context) {
if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) {
iwch_deallocate_pd(&php->ibpd);
return ERR_PTR(-EFAULT);
}
}
PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
return &php->ibpd;
}
static int iwch_dereg_mr(struct ib_mr *ib_mr)
{
struct iwch_dev *rhp;
struct iwch_mr *mhp;
u32 mmid;
PDBG("%s ib_mr %p\n", __func__, ib_mr);
/* There can be no memory windows */
if (atomic_read(&ib_mr->usecnt))
return -EINVAL;
mhp = to_iwch_mr(ib_mr);
rhp = mhp->rhp;
mmid = mhp->attr.stag >> 8;
cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
mhp->attr.pbl_addr);
iwch_free_pbl(mhp);
remove_handle(rhp, &rhp->mmidr, mmid);
if (mhp->kva)
kfree((void *) (unsigned long) mhp->kva);
if (mhp->umem)
ib_umem_release(mhp->umem);
PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
kfree(mhp);
return 0;
}
static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
struct ib_phys_buf *buffer_list,
int num_phys_buf,
int acc,
u64 *iova_start)
{
__be64 *page_list;
int shift;
u64 total_size;
int npages;
struct iwch_dev *rhp;
struct iwch_pd *php;
struct iwch_mr *mhp;
int ret;
PDBG("%s ib_pd %p\n", __func__, pd);
php = to_iwch_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
mhp->rhp = rhp;
/* First check that we have enough alignment */
if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
ret = -EINVAL;
goto err;
}
if (num_phys_buf > 1 &&
((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
ret = -EINVAL;
goto err;
}
ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
&total_size, &npages, &shift, &page_list);
if (ret)
goto err;
ret = iwch_alloc_pbl(mhp, npages);
if (ret) {
kfree(page_list);
goto err_pbl;
}
ret = iwch_write_pbl(mhp, page_list, npages, 0);
kfree(page_list);
if (ret)
goto err_pbl;
mhp->attr.pdid = php->pdid;
mhp->attr.zbva = 0;
mhp->attr.perms = iwch_ib_to_tpt_access(acc);
mhp->attr.va_fbo = *iova_start;
mhp->attr.page_size = shift - 12;
mhp->attr.len = (u32) total_size;
mhp->attr.pbl_size = npages;
ret = iwch_register_mem(rhp, php, mhp, shift);
if (ret)
goto err_pbl;
return &mhp->ibmr;
err_pbl:
iwch_free_pbl(mhp);
err:
kfree(mhp);
return ERR_PTR(ret);
}
static int iwch_reregister_phys_mem(struct ib_mr *mr,
int mr_rereg_mask,
struct ib_pd *pd,
struct ib_phys_buf *buffer_list,
int num_phys_buf,
int acc, u64 * iova_start)
{
struct iwch_mr mh, *mhp;
struct iwch_pd *php;
struct iwch_dev *rhp;
__be64 *page_list = NULL;
int shift = 0;
u64 total_size;
int npages;
int ret;
PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
/* There can be no memory windows */
if (atomic_read(&mr->usecnt))
return -EINVAL;
mhp = to_iwch_mr(mr);
rhp = mhp->rhp;
php = to_iwch_pd(mr->pd);
/* make sure we are on the same adapter */
if (rhp != php->rhp)
return -EINVAL;
memcpy(&mh, mhp, sizeof *mhp);
if (mr_rereg_mask & IB_MR_REREG_PD)
php = to_iwch_pd(pd);
if (mr_rereg_mask & IB_MR_REREG_ACCESS)
mh.attr.perms = iwch_ib_to_tpt_access(acc);
if (mr_rereg_mask & IB_MR_REREG_TRANS) {
ret = build_phys_page_list(buffer_list, num_phys_buf,
iova_start,
&total_size, &npages,
&shift, &page_list);
if (ret)
return ret;
}
ret = iwch_reregister_mem(rhp, php, &mh, shift, npages);
kfree(page_list);
if (ret) {
return ret;
}
if (mr_rereg_mask & IB_MR_REREG_PD)
mhp->attr.pdid = php->pdid;
if (mr_rereg_mask & IB_MR_REREG_ACCESS)
mhp->attr.perms = iwch_ib_to_tpt_access(acc);
if (mr_rereg_mask & IB_MR_REREG_TRANS) {
mhp->attr.zbva = 0;
mhp->attr.va_fbo = *iova_start;
mhp->attr.page_size = shift - 12;
mhp->attr.len = (u32) total_size;
mhp->attr.pbl_size = npages;
}
return 0;
}
static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *udata)
{
__be64 *pages;
int shift, n, len;
int i, j, k;
int err = 0;
struct ib_umem_chunk *chunk;
struct iwch_dev *rhp;
struct iwch_pd *php;
struct iwch_mr *mhp;
struct iwch_reg_user_mr_resp uresp;
PDBG("%s ib_pd %p\n", __func__, pd);
php = to_iwch_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
mhp->rhp = rhp;
mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
if (IS_ERR(mhp->umem)) {
err = PTR_ERR(mhp->umem);
kfree(mhp);
return ERR_PTR(err);
}
shift = ffs(mhp->umem->page_size) - 1;
n = 0;
list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
n += chunk->nents;
err = iwch_alloc_pbl(mhp, n);
if (err)
goto err;
pages = (__be64 *) __get_free_page(GFP_KERNEL);
if (!pages) {
err = -ENOMEM;
goto err_pbl;
}
i = n = 0;
list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
for (j = 0; j < chunk->nmap; ++j) {
len = sg_dma_len(&chunk->page_list[j]) >> shift;
for (k = 0; k < len; ++k) {
pages[i++] = cpu_to_be64(sg_dma_address(
&chunk->page_list[j]) +
mhp->umem->page_size * k);
if (i == PAGE_SIZE / sizeof *pages) {
err = iwch_write_pbl(mhp, pages, i, n);
if (err)
goto pbl_done;
n += i;
i = 0;
}
}
}
if (i)
err = iwch_write_pbl(mhp, pages, i, n);
pbl_done:
free_page((unsigned long) pages);
if (err)
goto err_pbl;
mhp->attr.pdid = php->pdid;
mhp->attr.zbva = 0;
mhp->attr.perms = iwch_ib_to_tpt_access(acc);
mhp->attr.va_fbo = virt;
mhp->attr.page_size = shift - 12;
mhp->attr.len = (u32) length;
err = iwch_register_mem(rhp, php, mhp, shift);
if (err)
goto err_pbl;
if (udata && !t3a_device(rhp)) {
uresp.pbl_addr = (mhp->attr.pbl_addr -
rhp->rdev.rnic_info.pbl_base) >> 3;
PDBG("%s user resp pbl_addr 0x%x\n", __func__,
uresp.pbl_addr);
if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
iwch_dereg_mr(&mhp->ibmr);
err = -EFAULT;
goto err;
}
}
return &mhp->ibmr;
err_pbl:
iwch_free_pbl(mhp);
err:
ib_umem_release(mhp->umem);
kfree(mhp);
return ERR_PTR(err);
}
static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
{
struct ib_phys_buf bl;
u64 kva;
struct ib_mr *ibmr;
PDBG("%s ib_pd %p\n", __func__, pd);
/*
* T3 only supports 32 bits of size.
*/
bl.size = 0xffffffff;
bl.addr = 0;
kva = 0;
ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva);
return ibmr;
}
static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
{
struct iwch_dev *rhp;
struct iwch_pd *php;
struct iwch_mw *mhp;
u32 mmid;
u32 stag = 0;
int ret;
php = to_iwch_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
if (ret) {
kfree(mhp);
return ERR_PTR(ret);
}
mhp->rhp = rhp;
mhp->attr.pdid = php->pdid;
mhp->attr.type = TPT_MW;
mhp->attr.stag = stag;
mmid = (stag) >> 8;
mhp->ibmw.rkey = stag;
insert_handle(rhp, &rhp->mmidr, mhp, mmid);
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
return &(mhp->ibmw);
}
static int iwch_dealloc_mw(struct ib_mw *mw)
{
struct iwch_dev *rhp;
struct iwch_mw *mhp;
u32 mmid;
mhp = to_iwch_mw(mw);
rhp = mhp->rhp;
mmid = (mw->rkey) >> 8;
cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
remove_handle(rhp, &rhp->mmidr, mmid);
kfree(mhp);
PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
return 0;
}
static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
{
struct iwch_dev *rhp;
struct iwch_pd *php;
struct iwch_mr *mhp;
u32 mmid;
u32 stag = 0;
int ret;
php = to_iwch_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
mhp->rhp = rhp;
ret = iwch_alloc_pbl(mhp, pbl_depth);
if (ret) {
kfree(mhp);
return ERR_PTR(ret);
}
mhp->attr.pbl_size = pbl_depth;
ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
mhp->attr.pbl_size, mhp->attr.pbl_addr);
if (ret) {
iwch_free_pbl(mhp);
kfree(mhp);
return ERR_PTR(ret);
}
mhp->attr.pdid = php->pdid;
mhp->attr.type = TPT_NON_SHARED_MR;
mhp->attr.stag = stag;
mhp->attr.state = 1;
mmid = (stag) >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
insert_handle(rhp, &rhp->mmidr, mhp, mmid);
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
return &(mhp->ibmr);
}
static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl(
struct ib_device *device,
int page_list_len)
{
struct ib_fast_reg_page_list *page_list;
page_list = kmalloc(sizeof *page_list + page_list_len * sizeof(u64),
GFP_KERNEL);
if (!page_list)
return ERR_PTR(-ENOMEM);
page_list->page_list = (u64 *)(page_list + 1);
page_list->max_page_list_len = page_list_len;
return page_list;
}
static void iwch_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list)
{
kfree(page_list);
}
static int iwch_destroy_qp(struct ib_qp *ib_qp)
{
struct iwch_dev *rhp;
struct iwch_qp *qhp;
struct iwch_qp_attributes attrs;
struct iwch_ucontext *ucontext;
qhp = to_iwch_qp(ib_qp);
rhp = qhp->rhp;
attrs.next_state = IWCH_QP_STATE_ERROR;
iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
wait_event(qhp->wait, !qhp->ep);
remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
atomic_dec(&qhp->refcnt);
wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
: NULL;
cxio_destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
ib_qp, qhp->wq.qpid, qhp);
kfree(qhp);
return 0;
}
static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *attrs,
struct ib_udata *udata)
{
struct iwch_dev *rhp;
struct iwch_qp *qhp;
struct iwch_pd *php;
struct iwch_cq *schp;
struct iwch_cq *rchp;
struct iwch_create_qp_resp uresp;
int wqsize, sqsize, rqsize;
struct iwch_ucontext *ucontext;
PDBG("%s ib_pd %p\n", __func__, pd);
if (attrs->qp_type != IB_QPT_RC)
return ERR_PTR(-EINVAL);
php = to_iwch_pd(pd);
rhp = php->rhp;
schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
if (!schp || !rchp)
return ERR_PTR(-EINVAL);
/* The RQT size must be # of entries + 1 rounded up to a power of two */
rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
if (rqsize == attrs->cap.max_recv_wr)
rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
/* T3 doesn't support RQT depth < 16 */
if (rqsize < 16)
rqsize = 16;
if (rqsize > T3_MAX_RQ_SIZE)
return ERR_PTR(-EINVAL);
if (attrs->cap.max_inline_data > T3_MAX_INLINE)
return ERR_PTR(-EINVAL);
/*
* NOTE: The SQ and total WQ sizes don't need to be
* a power of two. However, all the code assumes
* they are. EG: Q_FREECNT() and friends.
*/
sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
wqsize = roundup_pow_of_two(rqsize + sqsize);
/*
* Kernel users need more wq space for fastreg WRs which can take
* 2 WR fragments.
*/
ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
wqsize = roundup_pow_of_two(rqsize +
roundup_pow_of_two(attrs->cap.max_send_wr * 2));
PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__,
wqsize, sqsize, rqsize);
qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
if (!qhp)
return ERR_PTR(-ENOMEM);
qhp->wq.size_log2 = ilog2(wqsize);
qhp->wq.rq_size_log2 = ilog2(rqsize);
qhp->wq.sq_size_log2 = ilog2(sqsize);
if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
kfree(qhp);
return ERR_PTR(-ENOMEM);
}
attrs->cap.max_recv_wr = rqsize - 1;
attrs->cap.max_send_wr = sqsize;
attrs->cap.max_inline_data = T3_MAX_INLINE;
qhp->rhp = rhp;
qhp->attr.pd = php->pdid;
qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
qhp->attr.state = IWCH_QP_STATE_IDLE;
qhp->attr.next_state = IWCH_QP_STATE_IDLE;
/*
* XXX - These don't get passed in from the openib user
* at create time. The CM sets them via a QP modify.
* Need to fix... I think the CM should
*/
qhp->attr.enable_rdma_read = 1;
qhp->attr.enable_rdma_write = 1;
qhp->attr.enable_bind = 1;
qhp->attr.max_ord = 1;
qhp->attr.max_ird = 1;
spin_lock_init(&qhp->lock);
init_waitqueue_head(&qhp->wait);
atomic_set(&qhp->refcnt, 1);
insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid);
if (udata) {
struct iwch_mm_entry *mm1, *mm2;
mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
if (!mm1) {
iwch_destroy_qp(&qhp->ibqp);
return ERR_PTR(-ENOMEM);
}
mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
if (!mm2) {
kfree(mm1);
iwch_destroy_qp(&qhp->ibqp);
return ERR_PTR(-ENOMEM);
}
uresp.qpid = qhp->wq.qpid;
uresp.size_log2 = qhp->wq.size_log2;
uresp.sq_size_log2 = qhp->wq.sq_size_log2;
uresp.rq_size_log2 = qhp->wq.rq_size_log2;
spin_lock(&ucontext->mmap_lock);
uresp.key = ucontext->key;
ucontext->key += PAGE_SIZE;
uresp.db_key = ucontext->key;
ucontext->key += PAGE_SIZE;
spin_unlock(&ucontext->mmap_lock);
if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
kfree(mm1);
kfree(mm2);
iwch_destroy_qp(&qhp->ibqp);
return ERR_PTR(-EFAULT);
}
mm1->key = uresp.key;
mm1->addr = virt_to_phys(qhp->wq.queue);
mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
insert_mmap(ucontext, mm1);
mm2->key = uresp.db_key;
mm2->addr = qhp->wq.udb & PAGE_MASK;
mm2->len = PAGE_SIZE;
insert_mmap(ucontext, mm2);
}
qhp->ibqp.qp_num = qhp->wq.qpid;
init_timer(&(qhp->timer));
PDBG("%s sq_num_entries %d, rq_num_entries %d "
"qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
__func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
1 << qhp->wq.size_log2, qhp->wq.rq_addr);
return &qhp->ibqp;
}
static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
struct iwch_dev *rhp;
struct iwch_qp *qhp;
enum iwch_qp_attr_mask mask = 0;
struct iwch_qp_attributes attrs;
PDBG("%s ib_qp %p\n", __func__, ibqp);
/* iwarp does not support the RTR state */
if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
attr_mask &= ~IB_QP_STATE;
/* Make sure we still have something left to do */
if (!attr_mask)
return 0;
memset(&attrs, 0, sizeof attrs);
qhp = to_iwch_qp(ibqp);
rhp = qhp->rhp;
attrs.next_state = iwch_convert_state(attr->qp_state);
attrs.enable_rdma_read = (attr->qp_access_flags &
IB_ACCESS_REMOTE_READ) ? 1 : 0;
attrs.enable_rdma_write = (attr->qp_access_flags &
IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
(IWCH_QP_ATTR_ENABLE_RDMA_READ |
IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
}
void iwch_qp_add_ref(struct ib_qp *qp)
{
PDBG("%s ib_qp %p\n", __func__, qp);
atomic_inc(&(to_iwch_qp(qp)->refcnt));
}
void iwch_qp_rem_ref(struct ib_qp *qp)
{
PDBG("%s ib_qp %p\n", __func__, qp);
if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
wake_up(&(to_iwch_qp(qp)->wait));
}
static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
{
PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
}
static int iwch_query_pkey(struct ib_device *ibdev,
u8 port, u16 index, u16 * pkey)
{
PDBG("%s ibdev %p\n", __func__, ibdev);
*pkey = 0;
return 0;
}
static int iwch_query_gid(struct ib_device *ibdev, u8 port,
int index, union ib_gid *gid)
{
struct iwch_dev *dev;
PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
__func__, ibdev, port, index, gid);
dev = to_iwch_dev(ibdev);
BUG_ON(port == 0 || port > 2);
memset(&(gid->raw[0]), 0, sizeof(gid->raw));
memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);
return 0;
}
static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
{
struct ethtool_drvinfo info;
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
char *cp, *next;
unsigned fw_maj, fw_min, fw_mic;
lldev->ethtool_ops->get_drvinfo(lldev, &info);
next = info.fw_version + 1;
cp = strsep(&next, ".");
sscanf(cp, "%i", &fw_maj);
cp = strsep(&next, ".");
sscanf(cp, "%i", &fw_min);
cp = strsep(&next, ".");
sscanf(cp, "%i", &fw_mic);
return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
(fw_mic & 0xffff);
}
static int iwch_query_device(struct ib_device *ibdev,
struct ib_device_attr *props)
{
struct iwch_dev *dev;
PDBG("%s ibdev %p\n", __func__, ibdev);
dev = to_iwch_dev(ibdev);
memset(props, 0, sizeof *props);
memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
props->hw_ver = dev->rdev.t3cdev_p->type;
props->fw_ver = fw_vers_string_to_u64(dev);
props->device_cap_flags = dev->device_cap_flags;
props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
props->max_mr_size = dev->attr.max_mr_size;
props->max_qp = dev->attr.max_qps;
props->max_qp_wr = dev->attr.max_wrs;
props->max_sge = dev->attr.max_sge_per_wr;
props->max_sge_rd = 1;
props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
props->max_cq = dev->attr.max_cqs;
props->max_cqe = dev->attr.max_cqes_per_cq;
props->max_mr = dev->attr.max_mem_regs;
props->max_pd = dev->attr.max_pds;
props->local_ca_ack_delay = 0;
props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
return 0;
}
static int iwch_query_port(struct ib_device *ibdev,
u8 port, struct ib_port_attr *props)
{
PDBG("%s ibdev %p\n", __func__, ibdev);
memset(props, 0, sizeof(struct ib_port_attr));
props->max_mtu = IB_MTU_4096;
props->active_mtu = IB_MTU_2048;
props->state = IB_PORT_ACTIVE;
props->port_cap_flags =
IB_PORT_CM_SUP |
IB_PORT_SNMP_TUNNEL_SUP |
IB_PORT_REINIT_SUP |
IB_PORT_DEVICE_MGMT_SUP |
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
props->gid_tbl_len = 1;
props->pkey_tbl_len = 1;
props->active_width = 2;
props->active_speed = 2;
props->max_msg_sz = -1;
return 0;
}
static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
ibdev.dev);
PDBG("%s dev 0x%p\n", __func__, dev);
return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
}
static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf)
{
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
ibdev.dev);
struct ethtool_drvinfo info;
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
PDBG("%s dev 0x%p\n", __func__, dev);
lldev->ethtool_ops->get_drvinfo(lldev, &info);
return sprintf(buf, "%s\n", info.fw_version);
}
static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
ibdev.dev);
struct ethtool_drvinfo info;
struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
PDBG("%s dev 0x%p\n", __func__, dev);
lldev->ethtool_ops->get_drvinfo(lldev, &info);
return sprintf(buf, "%s\n", info.driver);
}
static ssize_t show_board(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
ibdev.dev);
PDBG("%s dev 0x%p\n", __func__, dev);
return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
iwch_dev->rdev.rnic_info.pdev->device);
}
static int iwch_get_mib(struct ib_device *ibdev,
union rdma_protocol_stats *stats)
{
struct iwch_dev *dev;
struct tp_mib_stats m;
int ret;
PDBG("%s ibdev %p\n", __func__, ibdev);
dev = to_iwch_dev(ibdev);
ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
if (ret)
return -ENOSYS;
memset(stats, 0, sizeof *stats);
stats->iw.ipInReceives = ((u64) m.ipInReceive_hi << 32) +
m.ipInReceive_lo;
stats->iw.ipInHdrErrors = ((u64) m.ipInHdrErrors_hi << 32) +
m.ipInHdrErrors_lo;
stats->iw.ipInAddrErrors = ((u64) m.ipInAddrErrors_hi << 32) +
m.ipInAddrErrors_lo;
stats->iw.ipInUnknownProtos = ((u64) m.ipInUnknownProtos_hi << 32) +
m.ipInUnknownProtos_lo;
stats->iw.ipInDiscards = ((u64) m.ipInDiscards_hi << 32) +
m.ipInDiscards_lo;
stats->iw.ipInDelivers = ((u64) m.ipInDelivers_hi << 32) +
m.ipInDelivers_lo;
stats->iw.ipOutRequests = ((u64) m.ipOutRequests_hi << 32) +
m.ipOutRequests_lo;
stats->iw.ipOutDiscards = ((u64) m.ipOutDiscards_hi << 32) +
m.ipOutDiscards_lo;
stats->iw.ipOutNoRoutes = ((u64) m.ipOutNoRoutes_hi << 32) +
m.ipOutNoRoutes_lo;
stats->iw.ipReasmTimeout = (u64) m.ipReasmTimeout;
stats->iw.ipReasmReqds = (u64) m.ipReasmReqds;
stats->iw.ipReasmOKs = (u64) m.ipReasmOKs;
stats->iw.ipReasmFails = (u64) m.ipReasmFails;
stats->iw.tcpActiveOpens = (u64) m.tcpActiveOpens;
stats->iw.tcpPassiveOpens = (u64) m.tcpPassiveOpens;
stats->iw.tcpAttemptFails = (u64) m.tcpAttemptFails;
stats->iw.tcpEstabResets = (u64) m.tcpEstabResets;
stats->iw.tcpOutRsts = (u64) m.tcpOutRsts;
stats->iw.tcpCurrEstab = (u64) m.tcpCurrEstab;
stats->iw.tcpInSegs = ((u64) m.tcpInSegs_hi << 32) +
m.tcpInSegs_lo;
stats->iw.tcpOutSegs = ((u64) m.tcpOutSegs_hi << 32) +
m.tcpOutSegs_lo;
stats->iw.tcpRetransSegs = ((u64) m.tcpRetransSeg_hi << 32) +
m.tcpRetransSeg_lo;
stats->iw.tcpInErrs = ((u64) m.tcpInErrs_hi << 32) +
m.tcpInErrs_lo;
stats->iw.tcpRtoMin = (u64) m.tcpRtoMin;
stats->iw.tcpRtoMax = (u64) m.tcpRtoMax;
return 0;
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
static struct device_attribute *iwch_class_attributes[] = {
&dev_attr_hw_rev,
&dev_attr_fw_ver,
&dev_attr_hca_type,
&dev_attr_board_id,
};
int iwch_register_device(struct iwch_dev *dev)
{
int ret;
int i;
PDBG("%s iwch_dev %p\n", __func__, dev);
strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
dev->ibdev.owner = THIS_MODULE;
dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
IB_DEVICE_MEM_WINDOW |
IB_DEVICE_MEM_MGT_EXTENSIONS;
/* cxgb3 supports STag 0. */
dev->ibdev.local_dma_lkey = 0;
dev->ibdev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
(1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_REG_MR) |
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
(1ull << IB_USER_VERBS_CMD_CREATE_QP) |
(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
(1ull << IB_USER_VERBS_CMD_POLL_CQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
(1ull << IB_USER_VERBS_CMD_POST_SEND) |
(1ull << IB_USER_VERBS_CMD_POST_RECV);
dev->ibdev.node_type = RDMA_NODE_RNIC;
memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
dev->ibdev.num_comp_vectors = 1;
dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
dev->ibdev.query_device = iwch_query_device;
dev->ibdev.query_port = iwch_query_port;
dev->ibdev.modify_port = iwch_modify_port;
dev->ibdev.query_pkey = iwch_query_pkey;
dev->ibdev.query_gid = iwch_query_gid;
dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext;
dev->ibdev.mmap = iwch_mmap;
dev->ibdev.alloc_pd = iwch_allocate_pd;
dev->ibdev.dealloc_pd = iwch_deallocate_pd;
dev->ibdev.create_ah = iwch_ah_create;
dev->ibdev.destroy_ah = iwch_ah_destroy;
dev->ibdev.create_qp = iwch_create_qp;
dev->ibdev.modify_qp = iwch_ib_modify_qp;
dev->ibdev.destroy_qp = iwch_destroy_qp;
dev->ibdev.create_cq = iwch_create_cq;
dev->ibdev.destroy_cq = iwch_destroy_cq;
dev->ibdev.resize_cq = iwch_resize_cq;
dev->ibdev.poll_cq = iwch_poll_cq;
dev->ibdev.get_dma_mr = iwch_get_dma_mr;
dev->ibdev.reg_phys_mr = iwch_register_phys_mem;
dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem;
dev->ibdev.reg_user_mr = iwch_reg_user_mr;
dev->ibdev.dereg_mr = iwch_dereg_mr;
dev->ibdev.alloc_mw = iwch_alloc_mw;
dev->ibdev.bind_mw = iwch_bind_mw;
dev->ibdev.dealloc_mw = iwch_dealloc_mw;
dev->ibdev.alloc_fast_reg_mr = iwch_alloc_fast_reg_mr;
dev->ibdev.alloc_fast_reg_page_list = iwch_alloc_fastreg_pbl;
dev->ibdev.free_fast_reg_page_list = iwch_free_fastreg_pbl;
dev->ibdev.attach_mcast = iwch_multicast_attach;
dev->ibdev.detach_mcast = iwch_multicast_detach;
dev->ibdev.process_mad = iwch_process_mad;
dev->ibdev.req_notify_cq = iwch_arm_cq;
dev->ibdev.post_send = iwch_post_send;
dev->ibdev.post_recv = iwch_post_receive;
dev->ibdev.get_protocol_stats = iwch_get_mib;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm)
return -ENOMEM;
dev->ibdev.iwcm->connect = iwch_connect;
dev->ibdev.iwcm->accept = iwch_accept_cr;
dev->ibdev.iwcm->reject = iwch_reject_cr;
dev->ibdev.iwcm->create_listen = iwch_create_listen;
dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
dev->ibdev.iwcm->get_qp = iwch_get_qp;
ret = ib_register_device(&dev->ibdev);
if (ret)
goto bail1;
for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) {
ret = device_create_file(&dev->ibdev.dev,
iwch_class_attributes[i]);
if (ret) {
goto bail2;
}
}
return 0;
bail2:
ib_unregister_device(&dev->ibdev);
bail1:
return ret;
}
void iwch_unregister_device(struct iwch_dev *dev)
{
int i;
PDBG("%s iwch_dev %p\n", __func__, dev);
for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
device_remove_file(&dev->ibdev.dev,
iwch_class_attributes[i]);
ib_unregister_device(&dev->ibdev);
return;
}
| gpl-2.0 |
Tesla-Redux-Devices/kernel_samsung_hlte | sound/soc/codecs/audience/escore-slim.c | 449 | 23706 | /*
* escore-slim.c -- Slimbus interface for Audience earSmart chips
*
* Copyright 2011 Audience, Inc.
*
* Author: Greg Clemson <gclemson@audience.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "escore.h"
#include "escore-slim.h"
static void escore_alloc_slim_rx_chan(struct slim_device *sbdev);
static void escore_alloc_slim_tx_chan(struct slim_device *sbdev);
static int escore_cfg_slim_rx(struct slim_device *sbdev, unsigned int *ch_num,
unsigned int ch_cnt, unsigned int rate);
static int escore_cfg_slim_tx(struct slim_device *sbdev, unsigned int *ch_num,
unsigned int ch_cnt, unsigned int rate);
static int escore_close_slim_rx(struct slim_device *sbdev, unsigned int *ch_num,
unsigned int ch_cnt);
static int escore_close_slim_tx(struct slim_device *sbdev, unsigned int *ch_num,
unsigned int ch_cnt);
static int escore_rx_ch_num_to_idx(struct escore_priv *escore, int ch_num);
static int escore_tx_ch_num_to_idx(struct escore_priv *escore, int ch_num);
static void escore_alloc_slim_rx_chan(struct slim_device *sbdev)
{
struct escore_priv *escore_priv = slim_get_devicedata(sbdev);
struct escore_slim_ch *rx = escore_priv->slim_rx;
int i;
int port_id;
dev_dbg(&sbdev->dev, "%s()\n", __func__);
for (i = 0; i < escore_priv->slim_rx_ports; i++) {
port_id = i;
rx[i].ch_num = escore_priv->slim_rx_port_to_ch_map[i];
slim_get_slaveport(sbdev->laddr, port_id, &rx[i].sph,
SLIM_SINK);
slim_query_ch(sbdev, rx[i].ch_num, &rx[i].ch_h);
}
}
static void escore_alloc_slim_tx_chan(struct slim_device *sbdev)
{
struct escore_priv *escore_priv = slim_get_devicedata(sbdev);
struct escore_slim_ch *tx = escore_priv->slim_tx;
int i;
int port_id;
dev_dbg(&sbdev->dev, "%s()\n", __func__);
for (i = 0; i < escore_priv->slim_tx_ports; i++) {
port_id = i + 10;
tx[i].ch_num = escore_priv->slim_tx_port_to_ch_map[i];
slim_get_slaveport(sbdev->laddr, port_id, &tx[i].sph,
SLIM_SRC);
slim_query_ch(sbdev, tx[i].ch_num, &tx[i].ch_h);
}
}
static int escore_rx_ch_num_to_idx(struct escore_priv *escore, int ch_num)
{
int i;
int idx = -1;
pr_debug("%s(ch_num = %d)\n", __func__, ch_num);
for (i = 0; i < escore->slim_rx_ports; i++) {
if (ch_num == escore->slim_rx_port_to_ch_map[i]) {
idx = i;
break;
}
}
return idx;
}
static int escore_tx_ch_num_to_idx(struct escore_priv *escore, int ch_num)
{
int i;
int idx = -1;
pr_debug("%s(ch_num = %d)\n", __func__, ch_num);
for (i = 0; i < escore->slim_tx_ports; i++) {
if (ch_num == escore->slim_tx_port_to_ch_map[i]) {
idx = i;
break;
}
}
return idx;
}
static int escore_cfg_slim_rx(struct slim_device *sbdev, unsigned int *ch_num,
unsigned int ch_cnt, unsigned int rate)
{
struct escore_priv *escore_priv = slim_get_devicedata(sbdev);
struct escore_slim_ch *rx = escore_priv->slim_rx;
u16 grph;
u32 *sph;
u16 *ch_h;
struct slim_ch prop;
int i;
int idx;
int rc = 0;
dev_dbg(&sbdev->dev, "%s(ch_cnt = %d, rate = %d)\n", __func__,
ch_cnt, rate);
sph = kmalloc(sizeof(u32)*escore_priv->slim_rx_ports, GFP_KERNEL);
if (!sph)
return -ENOMEM;
ch_h = kmalloc(sizeof(u32)*escore_priv->slim_rx_ports, GFP_KERNEL);
if (!ch_h) {
kfree(sph);
return -ENOMEM;
}
for (i = 0; i < ch_cnt; i++) {
idx = escore_rx_ch_num_to_idx(escore_priv, ch_num[i]);
ch_h[i] = rx[idx].ch_h;
sph[i] = rx[idx].sph;
}
prop.prot = SLIM_AUTO_ISO;
prop.baser = SLIM_RATE_4000HZ;
prop.dataf = SLIM_CH_DATAF_NOT_DEFINED;
prop.auxf = SLIM_CH_AUXF_NOT_APPLICABLE;
prop.ratem = (rate/4000);
prop.sampleszbits = 16;
rc = slim_define_ch(sbdev, &prop, ch_h, ch_cnt, true, &grph);
if (rc < 0) {
dev_err(&sbdev->dev, "%s(): slim_define_ch() failed: %d\n",
__func__, rc);
goto slim_define_ch_error;
}
for (i = 0; i < ch_cnt; i++) {
rc = slim_connect_sink(sbdev, &sph[i], 1, ch_h[i]);
if (rc < 0) {
dev_err(&sbdev->dev,
"%s(): slim_connect_sink() failed: %d\n",
__func__, rc);
goto slim_connect_sink_error;
}
}
rc = slim_control_ch(sbdev, grph, SLIM_CH_ACTIVATE, true);
if (rc < 0) {
dev_err(&sbdev->dev,
"%s(): slim_control_ch() failed: %d\n",
__func__, rc);
goto slim_control_ch_error;
}
for (i = 0; i < ch_cnt; i++) {
idx = escore_rx_ch_num_to_idx(escore_priv, ch_num[i]);
rx[idx].grph = grph;
}
kfree(sph);
kfree(ch_h);
return rc;
slim_control_ch_error:
slim_connect_sink_error:
escore_close_slim_rx(sbdev, ch_num, ch_cnt);
slim_define_ch_error:
kfree(sph);
kfree(ch_h);
return rc;
}
static int escore_cfg_slim_tx(struct slim_device *sbdev, unsigned int *ch_num,
unsigned int ch_cnt, unsigned int rate)
{
struct escore_priv *escore_priv = slim_get_devicedata(sbdev);
struct escore_slim_ch *tx = escore_priv->slim_tx;
u16 grph;
u32 *sph;
u16 *ch_h;
struct slim_ch prop;
int i;
int idx;
int rc;
dev_dbg(&sbdev->dev, "%s(ch_cnt = %d, rate = %d)\n", __func__,
ch_cnt, rate);
sph = kmalloc(sizeof(u32)*escore_priv->slim_rx_ports, GFP_KERNEL);
if (!sph)
return -ENOMEM;
ch_h = kmalloc(sizeof(u32)*escore_priv->slim_rx_ports, GFP_KERNEL);
if (!ch_h) {
kfree(sph);
return -ENOMEM;
}
for (i = 0; i < ch_cnt; i++) {
idx = escore_tx_ch_num_to_idx(escore_priv, ch_num[i]);
ch_h[i] = tx[idx].ch_h;
sph[i] = tx[idx].sph;
}
prop.prot = SLIM_AUTO_ISO;
prop.baser = SLIM_RATE_4000HZ;
prop.dataf = SLIM_CH_DATAF_NOT_DEFINED;
prop.auxf = SLIM_CH_AUXF_NOT_APPLICABLE;
prop.ratem = (rate/4000);
prop.sampleszbits = 16;
rc = slim_define_ch(sbdev, &prop, ch_h, ch_cnt, true, &grph);
if (rc < 0) {
dev_err(&sbdev->dev, "%s(): slim_define_ch() failed: %d\n",
__func__, rc);
goto slim_define_ch_error;
}
for (i = 0; i < ch_cnt; i++) {
rc = slim_connect_src(sbdev, sph[i], ch_h[i]);
if (rc < 0) {
dev_err(&sbdev->dev,
"%s(): slim_connect_src() failed: %d\n",
__func__, rc);
dev_err(&sbdev->dev,
"%s(): ch_num[0] = %d\n",
__func__, ch_num[0]);
goto slim_connect_src_error;
}
}
rc = slim_control_ch(sbdev, grph, SLIM_CH_ACTIVATE, true);
if (rc < 0) {
dev_err(&sbdev->dev,
"%s(): slim_control_ch() failed: %d\n",
__func__, rc);
goto slim_control_ch_error;
}
for (i = 0; i < ch_cnt; i++) {
idx = escore_tx_ch_num_to_idx(escore_priv, ch_num[i]);
tx[idx].grph = grph;
}
kfree(sph);
kfree(ch_h);
return rc;
slim_control_ch_error:
slim_connect_src_error:
escore_close_slim_tx(sbdev, ch_num, ch_cnt);
slim_define_ch_error:
kfree(sph);
kfree(ch_h);
return rc;
}
static int escore_close_slim_rx(struct slim_device *sbdev, unsigned int *ch_num,
unsigned int ch_cnt)
{
struct escore_priv *escore_priv = slim_get_devicedata(sbdev);
struct escore_slim_ch *rx = escore_priv->slim_rx;
u16 grph = 0;
u32 *sph;
int i;
int idx;
int rc;
sph = kmalloc(sizeof(u32)*escore_priv->slim_rx_ports, GFP_KERNEL);
if (!sph)
return -ENOMEM;
dev_dbg(&sbdev->dev, "%s(ch_cnt = %d)\n", __func__, ch_cnt);
for (i = 0; i < ch_cnt; i++) {
idx = escore_rx_ch_num_to_idx(escore_priv, ch_num[i]);
sph[i] = rx[idx].sph;
grph = rx[idx].grph;
}
rc = slim_control_ch(sbdev, grph, SLIM_CH_REMOVE, true);
if (rc < 0) {
dev_err(&sbdev->dev,
"%s(): slim_control_ch() failed: %d\n",
__func__, rc);
goto slim_control_ch_error;
}
for (i = 0; i < ch_cnt; i++) {
idx = escore_rx_ch_num_to_idx(escore_priv, ch_num[i]);
rx[idx].grph = 0;
}
rc = slim_disconnect_ports(sbdev, sph, ch_cnt);
if (rc < 0) {
dev_err(&sbdev->dev,
"%s(): slim_disconnect_ports() failed: %d\n",
__func__, rc);
}
slim_control_ch_error:
kfree(sph);
return rc;
}
static int escore_close_slim_tx(struct slim_device *sbdev, unsigned int *ch_num,
unsigned int ch_cnt)
{
struct escore_priv *escore_priv = slim_get_devicedata(sbdev);
struct escore_slim_ch *tx = escore_priv->slim_tx;
u16 grph = 0;
u32 *sph;
int i;
int idx;
int rc;
sph = kmalloc(sizeof(u32)*escore_priv->slim_tx_ports, GFP_KERNEL);
if (!sph)
return -ENOMEM;
dev_dbg(&sbdev->dev, "%s(ch_cnt = %d)\n", __func__, ch_cnt);
for (i = 0; i < ch_cnt; i++) {
idx = escore_tx_ch_num_to_idx(escore_priv, ch_num[i]);
sph[i] = tx[idx].sph;
grph = tx[idx].grph;
}
rc = slim_control_ch(sbdev, grph, SLIM_CH_REMOVE, true);
if (rc < 0) {
dev_err(&sbdev->dev,
"%s(): slim_connect_sink() failed: %d\n",
__func__, rc);
goto slim_control_ch_error;
}
for (i = 0; i < ch_cnt; i++) {
idx = escore_tx_ch_num_to_idx(escore_priv, ch_num[i]);
tx[idx].grph = 0;
}
rc = slim_disconnect_ports(sbdev, sph, ch_cnt);
if (rc < 0) {
dev_err(&sbdev->dev,
"%s(): slim_disconnect_ports() failed: %d\n",
__func__, rc);
}
slim_control_ch_error:
kfree(sph);
return rc;
}
int escore_remote_cfg_slim_rx(int dai_id)
{
struct escore_priv *escore = &escore_priv;
int rc = 0;
dev_dbg(escore->dev, "%s(dai_id = %d)\n", __func__, dai_id);
if (escore->remote_cfg_slim_rx)
rc = escore->remote_cfg_slim_rx(dai_id);
return rc;
}
EXPORT_SYMBOL_GPL(escore_remote_cfg_slim_rx);
int escore_remote_cfg_slim_tx(int dai_id)
{
struct escore_priv *escore = &escore_priv;
int rc = 0;
dev_dbg(escore->dev, "%s(dai_id = %d)\n", __func__, dai_id);
if (escore->remote_cfg_slim_tx)
rc = escore->remote_cfg_slim_tx(dai_id);
return rc;
}
EXPORT_SYMBOL_GPL(escore_remote_cfg_slim_tx);
int escore_remote_close_slim_rx(int dai_id)
{
struct escore_priv *escore = &escore_priv;
int rc = 0;
dev_dbg(escore->dev, "%s(dai_id = %d)\n", __func__, dai_id);
if (escore->remote_close_slim_rx)
rc = escore->remote_close_slim_rx(dai_id);
return rc;
}
EXPORT_SYMBOL_GPL(escore_remote_close_slim_rx);
int escore_remote_close_slim_tx(int dai_id)
{
struct escore_priv *escore = &escore_priv;
int rc = 0;
dev_dbg(escore->dev, "%s(dai_id = %d)\n", __func__, dai_id);
if (escore->remote_close_slim_tx)
rc = escore->remote_close_slim_tx(dai_id);
return rc;
}
EXPORT_SYMBOL_GPL(escore_remote_close_slim_tx);
void escore_init_slim_slave(struct slim_device *sbdev)
{
dev_dbg(&sbdev->dev, "%s()\n", __func__);
escore_alloc_slim_rx_chan(sbdev);
escore_alloc_slim_tx_chan(sbdev);
}
int escore_slim_read(struct escore_priv *escore, void *buf, int len)
{
struct slim_device *sbdev = escore->gen0_client;
DECLARE_COMPLETION_ONSTACK(read_done);
struct slim_ele_access msg = {
.start_offset = ES_READ_VE_OFFSET,
.num_bytes = ES_READ_VE_WIDTH,
.comp = NULL,
};
int rc;
rc = slim_request_val_element(sbdev, &msg, buf, len);
if (rc != 0)
dev_err(&sbdev->dev, "%s: read failed rc=%d\n",
__func__, rc);
return rc;
}
int escore_slim_write(struct escore_priv *escore, const void *buf, int len)
{
struct slim_device *sbdev = escore->gen0_client;
struct slim_ele_access msg = {
.start_offset = ES_WRITE_VE_OFFSET,
.num_bytes = ES_WRITE_VE_WIDTH,
.comp = NULL,
};
int ret;
int wr;
BUG_ON(len < 0);
ret = wr = 0;
while (wr < len) {
int sz = min(len - wr, ES_WRITE_VE_WIDTH);
/* As long as the caller expects the most significant
* bytes of the VE value written to be zero, this is
* valid.
*/
if (sz < ES_WRITE_VE_WIDTH)
dev_dbg(&sbdev->dev,
"write smaller than VE size %d < %d\n",
sz, ES_WRITE_VE_WIDTH);
ret = slim_change_val_element(sbdev, &msg, buf + wr, sz);
if (ret) {
dev_err(&sbdev->dev, "%s: failed ret=%d\n",
__func__, ret);
break;
}
wr += sz;
}
return ret;
}
int escore_slim_cmd(struct escore_priv *escore, u32 cmd, int sr, u32 *resp)
{
int err;
u32 rv;
pr_debug("escore: cmd=0x%08x sr=%i\n", cmd, sr);
cmd = cpu_to_le32(cmd);
err = escore_slim_write(escore, &cmd, sizeof(cmd));
if (err || sr)
return err;
/* The response must be actively read. Maximum response time
* is 20ms.
*/
msleep(20);
err = escore_slim_read(escore, &rv, sizeof(rv));
if (!err)
*resp = le32_to_cpu(rv);
pr_debug("core: %s resp=0x%08x\n", __func__, *resp);
return err;
}
static int escore_slim_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
return 0;
}
int escore_slim_set_channel_map(struct snd_soc_dai *dai,
unsigned int tx_num, unsigned int *tx_slot,
unsigned int rx_num, unsigned int *rx_slot)
{
return 0;
}
EXPORT_SYMBOL_GPL(escore_slim_set_channel_map);
int escore_slim_get_channel_map(struct snd_soc_dai *dai,
unsigned int *tx_num, unsigned int *tx_slot,
unsigned int *rx_num, unsigned int *rx_slot)
{
return 0;
}
EXPORT_SYMBOL_GPL(escore_slim_get_channel_map);
static int escore_slim_set_tristate(struct snd_soc_dai *dai, int tristate)
{
return 0;
}
static int escore_slim_port_mute(struct snd_soc_dai *dai, int mute)
{
return 0;
}
int escore_slim_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
dev_dbg(codec->dev, "%s() dai->name = %s, dai->id = %d\n", __func__,
dai->name, dai->id);
if (codec && codec->dev && codec->dev->parent)
pm_runtime_get_sync(codec->dev->parent);
return 0;
}
EXPORT_SYMBOL_GPL(escore_slim_startup);
void escore_slim_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
dev_dbg(codec->dev, "%s() dai->name = %s, dai->id = %d\n", __func__,
dai->name, dai->id);
if (codec && codec->dev && codec->dev->parent) {
pm_runtime_mark_last_busy(codec->dev->parent);
pm_runtime_put(codec->dev->parent);
}
return;
}
EXPORT_SYMBOL_GPL(escore_slim_shutdown);
int escore_slim_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
/* local codec access */
struct escore_priv *escore = snd_soc_codec_get_drvdata(codec);
/* remote codec access */
int id = dai->id;
int channels;
int rate;
int ret = 0;
int channel_dir;
dev_dbg(codec->dev, "%s() dai->name = %s, dai->id = %d\n", __func__,
dai->name, dai->id);
channels = params_channels(params);
switch (channels) {
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
escore->dai[id - 1].ch_tot = channels;
break;
default:
dev_err(codec->dev,
"%s(): unsupported number of channels, %d\n",
__func__, channels);
return -EINVAL;
}
rate = params_rate(params);
switch (rate) {
case 8000:
case 16000:
case 24000:
case 32000:
case 48000:
escore->dai[id - 1].rate = rate;
break;
default:
dev_err(codec->dev,
"%s(): unsupported rate, %d\n",
__func__, rate);
return -EINVAL;
}
if (escore->local_slim_ch_cfg && escore->channel_dir) {
channel_dir = escore->channel_dir(id);
if (channel_dir == ES_SLIM_CH_RX) {
ret = escore_cfg_slim_rx(escore->gen0_client,
escore->dai[dai->id - 1].ch_num,
escore->dai[dai->id - 1].ch_tot,
escore->dai[dai->id - 1].rate);
escore->dai[dai->id - 1].ch_act = 1;
} else if (channel_dir == ES_SLIM_CH_TX) {
ret = escore_cfg_slim_tx(escore->gen0_client,
escore->dai[dai->id - 1].ch_num,
escore->dai[dai->id - 1].ch_tot,
escore->dai[dai->id - 1].rate);
escore->dai[dai->id - 1].ch_act = 1;
} else
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(escore_slim_hw_params);
static int escore_slim_hw_free(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
/* local codec access */
struct escore_priv *escore = snd_soc_codec_get_drvdata(codec);
/* remote codec access */
int id = dai->id;
int ret = 0;
int channel_dir;
if (escore->local_slim_ch_cfg && escore->channel_dir) {
channel_dir = escore->channel_dir(id);
if (channel_dir == ES_SLIM_CH_RX &&
escore->dai[dai->id - 1].ch_tot) {
ret = escore_close_slim_rx(escore->gen0_client,
escore->dai[dai->id - 1].ch_num,
escore->dai[dai->id - 1].ch_tot);
escore->dai[dai->id - 1].ch_act = 0;
} else if (channel_dir == ES_SLIM_CH_TX &&
escore->dai[dai->id - 1].ch_tot) {
ret = escore_close_slim_tx(escore->gen0_client,
escore->dai[dai->id - 1].ch_num,
escore->dai[dai->id - 1].ch_tot);
escore->dai[dai->id - 1].ch_act = 0;
} else
return -EINVAL;
}
return 0;
}
static int escore_slim_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
return 0;
}
int escore_slim_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
return 0;
}
EXPORT_SYMBOL_GPL(escore_slim_trigger);
struct snd_soc_dai_ops escore_slim_port_dai_ops = {
.set_fmt = escore_slim_set_dai_fmt,
.set_channel_map = escore_slim_set_channel_map,
.get_channel_map = escore_slim_get_channel_map,
.set_tristate = escore_slim_set_tristate,
.digital_mute = escore_slim_port_mute,
.startup = escore_slim_startup,
.shutdown = escore_slim_shutdown,
.hw_params = escore_slim_hw_params,
.hw_free = escore_slim_hw_free,
.prepare = escore_slim_prepare,
.trigger = escore_slim_trigger,
};
int escore_slim_boot_setup(struct escore_priv *escore)
{
u32 boot_cmd = ES_SLIM_BOOT_CMD;
u32 boot_ack;
char msg[4];
int rc;
pr_debug("%s()\n", __func__);
pr_debug("%s(): write ES_BOOT_CMD = 0x%08x\n", __func__, boot_cmd);
cpu_to_le32s(&boot_cmd);
memcpy(msg, (char *)&boot_cmd, 4);
rc = escore->dev_write(escore, msg, 4);
if (rc < 0) {
pr_err("%s(): firmware load failed boot write\n", __func__);
goto escore_bootup_failed;
}
usleep_range(1000, 1000);
memset(msg, 0, 4);
rc = escore->dev_read(escore, msg, 4);
if (rc < 0) {
pr_err("%s(): firmware load failed boot ack\n", __func__);
goto escore_bootup_failed;
}
memcpy((char *)&boot_ack, msg, 4);
cpu_to_le32s(&boot_ack);
pr_debug("%s(): boot_ack = 0x%08x\n", __func__, boot_ack);
if (boot_ack != ES_SLIM_BOOT_ACK) {
pr_err("%s(): firmware load failed boot ack pattern", __func__);
rc = -EIO;
goto escore_bootup_failed;
}
escore_bootup_failed:
return rc;
}
static int escore_slim_device_up(struct slim_device *sbdev);
static int escore_fw_thread(void *priv)
{
struct escore_priv *escore = (struct escore_priv *)priv;
do {
slim_get_logical_addr(escore->gen0_client,
escore->gen0_client->e_addr,
6, &(escore->gen0_client->laddr));
usleep_range(1000, 2000);
} while (escore->gen0_client->laddr == 0xf0);
dev_dbg(&escore->gen0_client->dev, "%s(): gen0_client LA = %d\n",
__func__, escore->gen0_client->laddr);
do {
slim_get_logical_addr(escore->intf_client,
escore->intf_client->e_addr,
6, &(escore->intf_client->laddr));
usleep_range(1000, 2000);
} while (escore->intf_client->laddr == 0xf0);
dev_dbg(&escore->intf_client->dev, "%s(): intf_client LA = %d\n",
__func__, escore->intf_client->laddr);
escore_slim_device_up(escore->gen0_client);
return 0;
}
static int escore_slim_probe(struct slim_device *sbdev)
{
struct esxxx_platform_data *pdata = sbdev->dev.platform_data;
int rc;
struct task_struct *thread = NULL;
dev_dbg(&sbdev->dev, "%s(): sbdev->name = %s\n", __func__, sbdev->name);
if (strncmp(sbdev->name, "earSmart-codec-intf",
SLIMBUS_NAME_SIZE) == 0) {
dev_dbg(&sbdev->dev, "%s(): interface device probe\n",
__func__);
escore_priv.intf_client = sbdev;
}
if (strncmp(sbdev->name, "earSmart-codec-gen0",
SLIMBUS_NAME_SIZE) == 0) {
dev_dbg(&sbdev->dev, "%s(): generic device probe\n",
__func__);
escore_priv.gen0_client = sbdev;
}
if (escore_priv.intf_client == NULL ||
escore_priv.gen0_client == NULL) {
dev_dbg(&sbdev->dev, "%s() incomplete initialization\n",
__func__);
return 0;
}
if (pdata == NULL) {
dev_err(&sbdev->dev, "%s(): pdata is NULL", __func__);
rc = -EIO;
goto pdata_error;
}
slim_set_clientdata(sbdev, &escore_priv);
escore_priv.intf = ES_SLIM_INTF;
escore_priv.dev_read = escore_slim_read;
escore_priv.dev_write = escore_slim_write;
escore_priv.cmd = escore_slim_cmd;
escore_priv.boot_setup = escore_slim_boot_setup;
escore_priv.dev = &escore_priv.gen0_client->dev;
rc = escore_priv.probe(&escore_priv.gen0_client->dev);
if (rc) {
dev_err(&sbdev->dev, "%s(): escore_core_probe() failed %d\n",
__func__, rc);
goto escore_core_probe_error;
}
#if !defined(ES_DEVICE_UP)
thread = kthread_run(escore_fw_thread, &escore_priv, "audience thread");
if (IS_ERR(thread)) {
dev_err(&sbdev->dev, "%s(): can't create firmware thread:%p\n",
__func__, thread);
return -ENOMEM;
}
#endif
return 0;
escore_core_probe_error:
pdata_error:
dev_dbg(&sbdev->dev, "%s(): exit with error\n", __func__);
return rc;
}
static int escore_slim_remove(struct slim_device *sbdev)
{
struct esxxx_platform_data *pdata = sbdev->dev.platform_data;
dev_dbg(&sbdev->dev, "%s(): sbdev->name = %s\n", __func__, sbdev->name);
gpio_free(pdata->reset_gpio);
gpio_free(pdata->wakeup_gpio);
gpio_free(pdata->gpioa_gpio);
snd_soc_unregister_codec(&sbdev->dev);
return 0;
}
static int register_snd_soc(struct escore_priv *escore_priv)
{
int rc;
struct slim_device *sbdev = escore_priv->gen0_client;
escore_init_slim_slave(sbdev);
dev_dbg(&sbdev->dev, "%s(): name = %s\n", __func__, sbdev->name);
rc = snd_soc_register_codec(&sbdev->dev,
escore_priv->soc_codec_dev_escore,
escore_priv->escore_dai, escore_priv->escore_dai_nr);
dev_dbg(&sbdev->dev, "%s(): rc = snd_soc_regsiter_codec() = %d\n",
__func__, rc);
escore_priv->slim_setup(escore_priv);
return rc;
}
int fw_download(void *arg)
{
struct escore_priv *escore = (struct escore_priv *)arg;
int rc;
rc = escore->bootup(escore);
release_firmware(escore->fw);
rc = register_snd_soc(escore);
pr_debug("%s(): register_snd_soc rc=%d\n", __func__, rc);
module_put(THIS_MODULE);
return 0;
}
static int escore_slim_device_up(struct slim_device *sbdev)
{
struct escore_priv *priv;
int rc;
dev_dbg(&sbdev->dev, "%s: name=%s\n", __func__, sbdev->name);
dev_dbg(&sbdev->dev, "%s: laddr=%d\n", __func__, sbdev->laddr);
/* Start the firmware download in the workqueue context. */
priv = slim_get_devicedata(sbdev);
if (strncmp(sbdev->name, "escore-codec-intf",
SLIMBUS_NAME_SIZE) == 0)
return 0;
rc = fw_download(priv);
BUG_ON(rc != 0);
return rc;
}
static int escore_slim_suspend(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
return 0;
}
static int escore_slim_resume(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
return 0;
}
static int escore_slim_runtime_suspend(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
return 0;
}
static int escore_slim_runtime_resume(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
return 0;
}
static int escore_slim_runtime_idle(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
return 0;
}
static const struct dev_pm_ops escore_slim_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(
escore_slim_suspend,
escore_slim_resume
)
SET_RUNTIME_PM_OPS(
escore_slim_runtime_suspend,
escore_slim_runtime_resume,
escore_slim_runtime_idle
)
};
static const struct slim_device_id escore_slim_id[] = {
{ "earSmart-codec", 0 },
{ "earSmart-codec-intf", 0 },
{ "earSmart-codec-gen0", 0 },
{ }
};
MODULE_DEVICE_TABLE(slim, escore_slim_id);
struct slim_driver escore_slim_driver = {
.driver = {
.name = "earSmart-codec",
.owner = THIS_MODULE,
.pm = &escore_slim_dev_pm_ops,
},
.probe = escore_slim_probe,
.remove = escore_slim_remove,
#if defined(ES_DEVICE_UP)
.device_up = escore_slim_device_up,
#endif
.id_table = escore_slim_id,
};
int escore_slimbus_init()
{
int rc;
rc = slim_driver_register(&escore_slim_driver);
if (!rc)
pr_debug("%s() registered as SLIMBUS", __func__);
return rc;
}
MODULE_DESCRIPTION("ASoC ES driver");
MODULE_AUTHOR("Greg Clemson <gclemson@audience.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:escore-codec");
| gpl-2.0 |
Hybridmax/G92X_Mystery_Kernel | drivers/gpio/gpio-twl4030.c | 1729 | 15298 | /*
* Access to GPIOs on TWL4030/TPS659x0 chips
*
* Copyright (C) 2006-2007 Texas Instruments, Inc.
* Copyright (C) 2006 MontaVista Software, Inc.
*
* Code re-arranged and cleaned up by:
* Syed Mohammed Khasim <x0khasim@ti.com>
*
* Initial Code:
* Andy Lowe / Nishanth Menon
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kthread.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/irqdomain.h>
#include <linux/i2c/twl.h>
/*
* The GPIO "subchip" supports 18 GPIOs which can be configured as
* inputs or outputs, with pullups or pulldowns on each pin. Each
* GPIO can trigger interrupts on either or both edges.
*
* GPIO interrupts can be fed to either of two IRQ lines; this is
* intended to support multiple hosts.
*
* There are also two LED pins used sometimes as output-only GPIOs.
*/
/* genirq interfaces are not available to modules */
#ifdef MODULE
#define is_module() true
#else
#define is_module() false
#endif
/* GPIO_CTRL Fields */
#define MASK_GPIO_CTRL_GPIO0CD1 BIT(0)
#define MASK_GPIO_CTRL_GPIO1CD2 BIT(1)
#define MASK_GPIO_CTRL_GPIO_ON BIT(2)
/* Mask for GPIO registers when aggregated into a 32-bit integer */
#define GPIO_32_MASK 0x0003ffff
struct gpio_twl4030_priv {
struct gpio_chip gpio_chip;
struct mutex mutex;
int irq_base;
/* Bitfields for state caching */
unsigned int usage_count;
unsigned int direction;
unsigned int out_state;
};
/*----------------------------------------------------------------------*/
static inline struct gpio_twl4030_priv *to_gpio_twl4030(struct gpio_chip *chip)
{
return container_of(chip, struct gpio_twl4030_priv, gpio_chip);
}
/*
* To configure TWL4030 GPIO module registers
*/
static inline int gpio_twl4030_write(u8 address, u8 data)
{
return twl_i2c_write_u8(TWL4030_MODULE_GPIO, data, address);
}
/*----------------------------------------------------------------------*/
/*
* LED register offsets from TWL_MODULE_LED base
* PWMs A and B are dedicated to LEDs A and B, respectively.
*/
#define TWL4030_LED_LEDEN_REG 0x00
#define TWL4030_PWMAON_REG 0x01
#define TWL4030_PWMAOFF_REG 0x02
#define TWL4030_PWMBON_REG 0x03
#define TWL4030_PWMBOFF_REG 0x04
/* LEDEN bits */
#define LEDEN_LEDAON BIT(0)
#define LEDEN_LEDBON BIT(1)
#define LEDEN_LEDAEXT BIT(2)
#define LEDEN_LEDBEXT BIT(3)
#define LEDEN_LEDAPWM BIT(4)
#define LEDEN_LEDBPWM BIT(5)
#define LEDEN_PWM_LENGTHA BIT(6)
#define LEDEN_PWM_LENGTHB BIT(7)
#define PWMxON_LENGTH BIT(7)
/*----------------------------------------------------------------------*/
/*
* To read a TWL4030 GPIO module register
*/
static inline int gpio_twl4030_read(u8 address)
{
u8 data;
int ret = 0;
ret = twl_i2c_read_u8(TWL4030_MODULE_GPIO, &data, address);
return (ret < 0) ? ret : data;
}
/*----------------------------------------------------------------------*/
static u8 cached_leden;
/* The LED lines are open drain outputs ... a FET pulls to GND, so an
* external pullup is needed. We could also expose the integrated PWM
* as a LED brightness control; we initialize it as "always on".
*/
static void twl4030_led_set_value(int led, int value)
{
u8 mask = LEDEN_LEDAON | LEDEN_LEDAPWM;
int status;
if (led)
mask <<= 1;
if (value)
cached_leden &= ~mask;
else
cached_leden |= mask;
status = twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
TWL4030_LED_LEDEN_REG);
}
static int twl4030_set_gpio_direction(int gpio, int is_input)
{
u8 d_bnk = gpio >> 3;
u8 d_msk = BIT(gpio & 0x7);
u8 reg = 0;
u8 base = REG_GPIODATADIR1 + d_bnk;
int ret = 0;
ret = gpio_twl4030_read(base);
if (ret >= 0) {
if (is_input)
reg = ret & ~d_msk;
else
reg = ret | d_msk;
ret = gpio_twl4030_write(base, reg);
}
return ret;
}
static int twl4030_set_gpio_dataout(int gpio, int enable)
{
u8 d_bnk = gpio >> 3;
u8 d_msk = BIT(gpio & 0x7);
u8 base = 0;
if (enable)
base = REG_SETGPIODATAOUT1 + d_bnk;
else
base = REG_CLEARGPIODATAOUT1 + d_bnk;
return gpio_twl4030_write(base, d_msk);
}
static int twl4030_get_gpio_datain(int gpio)
{
u8 d_bnk = gpio >> 3;
u8 d_off = gpio & 0x7;
u8 base = 0;
int ret = 0;
base = REG_GPIODATAIN1 + d_bnk;
ret = gpio_twl4030_read(base);
if (ret > 0)
ret = (ret >> d_off) & 0x1;
return ret;
}
/*----------------------------------------------------------------------*/
static int twl_request(struct gpio_chip *chip, unsigned offset)
{
struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
int status = 0;
mutex_lock(&priv->mutex);
/* Support the two LED outputs as output-only GPIOs. */
if (offset >= TWL4030_GPIO_MAX) {
u8 ledclr_mask = LEDEN_LEDAON | LEDEN_LEDAEXT
| LEDEN_LEDAPWM | LEDEN_PWM_LENGTHA;
u8 reg = TWL4030_PWMAON_REG;
offset -= TWL4030_GPIO_MAX;
if (offset) {
ledclr_mask <<= 1;
reg = TWL4030_PWMBON_REG;
}
/* initialize PWM to always-drive */
/* Configure PWM OFF register first */
status = twl_i2c_write_u8(TWL4030_MODULE_LED, 0x7f, reg + 1);
if (status < 0)
goto done;
/* Followed by PWM ON register */
status = twl_i2c_write_u8(TWL4030_MODULE_LED, 0x7f, reg);
if (status < 0)
goto done;
/* init LED to not-driven (high) */
status = twl_i2c_read_u8(TWL4030_MODULE_LED, &cached_leden,
TWL4030_LED_LEDEN_REG);
if (status < 0)
goto done;
cached_leden &= ~ledclr_mask;
status = twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
TWL4030_LED_LEDEN_REG);
if (status < 0)
goto done;
status = 0;
goto done;
}
/* on first use, turn GPIO module "on" */
if (!priv->usage_count) {
struct twl4030_gpio_platform_data *pdata;
u8 value = MASK_GPIO_CTRL_GPIO_ON;
/* optionally have the first two GPIOs switch vMMC1
* and vMMC2 power supplies based on card presence.
*/
pdata = chip->dev->platform_data;
if (pdata)
value |= pdata->mmc_cd & 0x03;
status = gpio_twl4030_write(REG_GPIO_CTRL, value);
}
done:
if (!status)
priv->usage_count |= BIT(offset);
mutex_unlock(&priv->mutex);
return status;
}
static void twl_free(struct gpio_chip *chip, unsigned offset)
{
struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
mutex_lock(&priv->mutex);
if (offset >= TWL4030_GPIO_MAX) {
twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1);
goto out;
}
priv->usage_count &= ~BIT(offset);
/* on last use, switch off GPIO module */
if (!priv->usage_count)
gpio_twl4030_write(REG_GPIO_CTRL, 0x0);
out:
mutex_unlock(&priv->mutex);
}
static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
{
struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
int ret;
mutex_lock(&priv->mutex);
if (offset < TWL4030_GPIO_MAX)
ret = twl4030_set_gpio_direction(offset, 1);
else
ret = -EINVAL; /* LED outputs can't be set as input */
if (!ret)
priv->direction &= ~BIT(offset);
mutex_unlock(&priv->mutex);
return ret;
}
static int twl_get(struct gpio_chip *chip, unsigned offset)
{
struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
int ret;
int status = 0;
mutex_lock(&priv->mutex);
if (!(priv->usage_count & BIT(offset))) {
ret = -EPERM;
goto out;
}
if (priv->direction & BIT(offset))
status = priv->out_state & BIT(offset);
else
status = twl4030_get_gpio_datain(offset);
ret = (status <= 0) ? 0 : 1;
out:
mutex_unlock(&priv->mutex);
return ret;
}
static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
mutex_lock(&priv->mutex);
if (offset < TWL4030_GPIO_MAX)
twl4030_set_gpio_dataout(offset, value);
else
twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
if (value)
priv->out_state |= BIT(offset);
else
priv->out_state &= ~BIT(offset);
mutex_unlock(&priv->mutex);
}
static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
{
struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
int ret = 0;
mutex_lock(&priv->mutex);
if (offset < TWL4030_GPIO_MAX) {
ret = twl4030_set_gpio_direction(offset, 0);
if (ret) {
mutex_unlock(&priv->mutex);
return ret;
}
}
/*
* LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output
*/
priv->direction |= BIT(offset);
mutex_unlock(&priv->mutex);
twl_set(chip, offset, value);
return ret;
}
static int twl_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
return (priv->irq_base && (offset < TWL4030_GPIO_MAX))
? (priv->irq_base + offset)
: -EINVAL;
}
static struct gpio_chip template_chip = {
.label = "twl4030",
.owner = THIS_MODULE,
.request = twl_request,
.free = twl_free,
.direction_input = twl_direction_in,
.get = twl_get,
.direction_output = twl_direction_out,
.set = twl_set,
.to_irq = twl_to_irq,
.can_sleep = 1,
};
/*----------------------------------------------------------------------*/
static int gpio_twl4030_pulls(u32 ups, u32 downs)
{
u8 message[5];
unsigned i, gpio_bit;
/* For most pins, a pulldown was enabled by default.
* We should have data that's specific to this board.
*/
for (gpio_bit = 1, i = 0; i < 5; i++) {
u8 bit_mask;
unsigned j;
for (bit_mask = 0, j = 0; j < 8; j += 2, gpio_bit <<= 1) {
if (ups & gpio_bit)
bit_mask |= 1 << (j + 1);
else if (downs & gpio_bit)
bit_mask |= 1 << (j + 0);
}
message[i] = bit_mask;
}
return twl_i2c_write(TWL4030_MODULE_GPIO, message,
REG_GPIOPUPDCTR1, 5);
}
static int gpio_twl4030_debounce(u32 debounce, u8 mmc_cd)
{
u8 message[3];
/* 30 msec of debouncing is always used for MMC card detect,
* and is optional for everything else.
*/
message[0] = (debounce & 0xff) | (mmc_cd & 0x03);
debounce >>= 8;
message[1] = (debounce & 0xff);
debounce >>= 8;
message[2] = (debounce & 0x03);
return twl_i2c_write(TWL4030_MODULE_GPIO, message,
REG_GPIO_DEBEN1, 3);
}
static int gpio_twl4030_remove(struct platform_device *pdev);
static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev)
{
struct twl4030_gpio_platform_data *omap_twl_info;
omap_twl_info = devm_kzalloc(dev, sizeof(*omap_twl_info), GFP_KERNEL);
if (!omap_twl_info)
return NULL;
omap_twl_info->use_leds = of_property_read_bool(dev->of_node,
"ti,use-leds");
of_property_read_u32(dev->of_node, "ti,debounce",
&omap_twl_info->debounce);
of_property_read_u32(dev->of_node, "ti,mmc-cd",
(u32 *)&omap_twl_info->mmc_cd);
of_property_read_u32(dev->of_node, "ti,pullups",
&omap_twl_info->pullups);
of_property_read_u32(dev->of_node, "ti,pulldowns",
&omap_twl_info->pulldowns);
return omap_twl_info;
}
static int gpio_twl4030_probe(struct platform_device *pdev)
{
struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data;
struct device_node *node = pdev->dev.of_node;
struct gpio_twl4030_priv *priv;
int ret, irq_base;
priv = devm_kzalloc(&pdev->dev, sizeof(struct gpio_twl4030_priv),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
/* maybe setup IRQs */
if (is_module()) {
dev_err(&pdev->dev, "can't dispatch IRQs from modules\n");
goto no_irqs;
}
irq_base = irq_alloc_descs(-1, 0, TWL4030_GPIO_MAX, 0);
if (irq_base < 0) {
dev_err(&pdev->dev, "Failed to alloc irq_descs\n");
return irq_base;
}
irq_domain_add_legacy(node, TWL4030_GPIO_MAX, irq_base, 0,
&irq_domain_simple_ops, NULL);
ret = twl4030_sih_setup(&pdev->dev, TWL4030_MODULE_GPIO, irq_base);
if (ret < 0)
return ret;
priv->irq_base = irq_base;
no_irqs:
priv->gpio_chip = template_chip;
priv->gpio_chip.base = -1;
priv->gpio_chip.ngpio = TWL4030_GPIO_MAX;
priv->gpio_chip.dev = &pdev->dev;
mutex_init(&priv->mutex);
if (node)
pdata = of_gpio_twl4030(&pdev->dev);
if (pdata == NULL) {
dev_err(&pdev->dev, "Platform data is missing\n");
return -ENXIO;
}
/*
* NOTE: boards may waste power if they don't set pullups
* and pulldowns correctly ... default for non-ULPI pins is
* pulldown, and some other pins may have external pullups
* or pulldowns. Careful!
*/
ret = gpio_twl4030_pulls(pdata->pullups, pdata->pulldowns);
if (ret)
dev_dbg(&pdev->dev, "pullups %.05x %.05x --> %d\n",
pdata->pullups, pdata->pulldowns, ret);
ret = gpio_twl4030_debounce(pdata->debounce, pdata->mmc_cd);
if (ret)
dev_dbg(&pdev->dev, "debounce %.03x %.01x --> %d\n",
pdata->debounce, pdata->mmc_cd, ret);
/*
* NOTE: we assume VIBRA_CTL.VIBRA_EN, in MODULE_AUDIO_VOICE,
* is (still) clear if use_leds is set.
*/
if (pdata->use_leds)
priv->gpio_chip.ngpio += 2;
ret = gpiochip_add(&priv->gpio_chip);
if (ret < 0) {
dev_err(&pdev->dev, "could not register gpiochip, %d\n", ret);
priv->gpio_chip.ngpio = 0;
gpio_twl4030_remove(pdev);
goto out;
}
platform_set_drvdata(pdev, priv);
if (pdata && pdata->setup) {
int status;
status = pdata->setup(&pdev->dev, priv->gpio_chip.base,
TWL4030_GPIO_MAX);
if (status)
dev_dbg(&pdev->dev, "setup --> %d\n", status);
}
out:
return ret;
}
/* Cannot use as gpio_twl4030_probe() calls us */
static int gpio_twl4030_remove(struct platform_device *pdev)
{
struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data;
struct gpio_twl4030_priv *priv = platform_get_drvdata(pdev);
int status;
if (pdata && pdata->teardown) {
status = pdata->teardown(&pdev->dev, priv->gpio_chip.base,
TWL4030_GPIO_MAX);
if (status) {
dev_dbg(&pdev->dev, "teardown --> %d\n", status);
return status;
}
}
status = gpiochip_remove(&priv->gpio_chip);
if (status < 0)
return status;
if (is_module())
return 0;
/* REVISIT no support yet for deregistering all the IRQs */
WARN_ON(1);
return -EIO;
}
static const struct of_device_id twl_gpio_match[] = {
{ .compatible = "ti,twl4030-gpio", },
{ },
};
MODULE_DEVICE_TABLE(of, twl_gpio_match);
/* Note: this hardware lives inside an I2C-based multi-function device. */
MODULE_ALIAS("platform:twl4030_gpio");
static struct platform_driver gpio_twl4030_driver = {
.driver = {
.name = "twl4030_gpio",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(twl_gpio_match),
},
.probe = gpio_twl4030_probe,
.remove = gpio_twl4030_remove,
};
static int __init gpio_twl4030_init(void)
{
return platform_driver_register(&gpio_twl4030_driver);
}
subsys_initcall(gpio_twl4030_init);
static void __exit gpio_twl4030_exit(void)
{
platform_driver_unregister(&gpio_twl4030_driver);
}
module_exit(gpio_twl4030_exit);
MODULE_AUTHOR("Texas Instruments, Inc.");
MODULE_DESCRIPTION("GPIO interface for TWL4030");
MODULE_LICENSE("GPL");
| gpl-2.0 |
qhuang00/terasic_MTL | arch/arm/mach-pxa/palmz72.c | 1985 | 10497 | /*
* Hardware definitions for Palm Zire72
*
* Authors:
* Vladimir "Farcaller" Pouzanov <farcaller@gmail.com>
* Sergey Lapin <slapin@ossfans.org>
* Alex Osborne <bobofdoom@gmail.com>
* Jan Herman <2hp@seznam.cz>
*
* Rewrite for mainline:
* Marek Vasut <marek.vasut@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* (find more info at www.hackndev.com)
*
*/
#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/pda_power.h>
#include <linux/pwm_backlight.h>
#include <linux/gpio.h>
#include <linux/wm97xx.h>
#include <linux/power_supply.h>
#include <linux/usb/gpio_vbus.h>
#include <linux/i2c-gpio.h>
#include <asm/mach-types.h>
#include <asm/suspend.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <mach/pxa27x.h>
#include <mach/audio.h>
#include <mach/palmz72.h>
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/irda-pxaficp.h>
#include <linux/platform_data/keypad-pxa27x.h>
#include <mach/udc.h>
#include <linux/platform_data/asoc-palm27x.h>
#include <mach/palm27x.h>
#include <mach/pm.h>
#include <linux/platform_data/camera-pxa.h>
#include <media/soc_camera.h>
#include "generic.h"
#include "devices.h"
/******************************************************************************
* Pin configuration
******************************************************************************/
static unsigned long palmz72_pin_config[] __initdata = {
/* MMC */
GPIO32_MMC_CLK,
GPIO92_MMC_DAT_0,
GPIO109_MMC_DAT_1,
GPIO110_MMC_DAT_2,
GPIO111_MMC_DAT_3,
GPIO112_MMC_CMD,
GPIO14_GPIO, /* SD detect */
GPIO115_GPIO, /* SD RO */
GPIO98_GPIO, /* SD power */
/* AC97 */
GPIO28_AC97_BITCLK,
GPIO29_AC97_SDATA_IN_0,
GPIO30_AC97_SDATA_OUT,
GPIO31_AC97_SYNC,
GPIO89_AC97_SYSCLK,
GPIO113_AC97_nRESET,
/* IrDA */
GPIO49_GPIO, /* ir disable */
GPIO46_FICP_RXD,
GPIO47_FICP_TXD,
/* PWM */
GPIO16_PWM0_OUT,
/* USB */
GPIO15_GPIO, /* usb detect */
GPIO95_GPIO, /* usb pullup */
/* Matrix keypad */
GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH,
GPIO101_KP_MKIN_1 | WAKEUP_ON_LEVEL_HIGH,
GPIO102_KP_MKIN_2 | WAKEUP_ON_LEVEL_HIGH,
GPIO97_KP_MKIN_3 | WAKEUP_ON_LEVEL_HIGH,
GPIO103_KP_MKOUT_0,
GPIO104_KP_MKOUT_1,
GPIO105_KP_MKOUT_2,
/* LCD */
GPIOxx_LCD_TFT_16BPP,
GPIO20_GPIO, /* bl power */
GPIO21_GPIO, /* LCD border switch */
GPIO22_GPIO, /* LCD border color */
GPIO96_GPIO, /* lcd power */
/* PXA Camera */
GPIO81_CIF_DD_0,
GPIO48_CIF_DD_5,
GPIO50_CIF_DD_3,
GPIO51_CIF_DD_2,
GPIO52_CIF_DD_4,
GPIO53_CIF_MCLK,
GPIO54_CIF_PCLK,
GPIO55_CIF_DD_1,
GPIO84_CIF_FV,
GPIO85_CIF_LV,
GPIO93_CIF_DD_6,
GPIO108_CIF_DD_7,
GPIO56_GPIO, /* OV9640 Powerdown */
GPIO57_GPIO, /* OV9640 Reset */
GPIO91_GPIO, /* OV9640 Power */
/* I2C */
GPIO117_GPIO, /* I2C_SCL */
GPIO118_GPIO, /* I2C_SDA */
/* Misc. */
GPIO0_GPIO | WAKEUP_ON_LEVEL_HIGH, /* power detect */
GPIO88_GPIO, /* green led */
GPIO27_GPIO, /* WM9712 IRQ */
};
/******************************************************************************
* GPIO keyboard
******************************************************************************/
#if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE)
static const unsigned int palmz72_matrix_keys[] = {
KEY(0, 0, KEY_POWER),
KEY(0, 1, KEY_F1),
KEY(0, 2, KEY_ENTER),
KEY(1, 0, KEY_F2),
KEY(1, 1, KEY_F3),
KEY(1, 2, KEY_F4),
KEY(2, 0, KEY_UP),
KEY(2, 2, KEY_DOWN),
KEY(3, 0, KEY_RIGHT),
KEY(3, 2, KEY_LEFT),
};
static struct matrix_keymap_data almz72_matrix_keymap_data = {
.keymap = palmz72_matrix_keys,
.keymap_size = ARRAY_SIZE(palmz72_matrix_keys),
};
static struct pxa27x_keypad_platform_data palmz72_keypad_platform_data = {
.matrix_key_rows = 4,
.matrix_key_cols = 3,
.matrix_keymap_data = &almz72_matrix_keymap_data,
.debounce_interval = 30,
};
static void __init palmz72_kpc_init(void)
{
pxa_set_keypad_info(&palmz72_keypad_platform_data);
}
#else
static inline void palmz72_kpc_init(void) {}
#endif
/******************************************************************************
* LEDs
******************************************************************************/
#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
static struct gpio_led gpio_leds[] = {
{
.name = "palmz72:green:led",
.default_trigger = "none",
.gpio = GPIO_NR_PALMZ72_LED_GREEN,
},
};
static struct gpio_led_platform_data gpio_led_info = {
.leds = gpio_leds,
.num_leds = ARRAY_SIZE(gpio_leds),
};
static struct platform_device palmz72_leds = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &gpio_led_info,
}
};
static void __init palmz72_leds_init(void)
{
platform_device_register(&palmz72_leds);
}
#else
static inline void palmz72_leds_init(void) {}
#endif
#ifdef CONFIG_PM
/* We have some black magic here
* PalmOS ROM on recover expects special struct physical address
* to be transferred via PSPR. Using this struct PalmOS restores
* its state after sleep. As for Linux, we need to setup it the
* same way. More than that, PalmOS ROM changes some values in memory.
* For now only one location is found, which needs special treatment.
* Thanks to Alex Osborne, Andrzej Zaborowski, and lots of other people
* for reading backtraces for me :)
*/
#define PALMZ72_SAVE_DWORD ((unsigned long *)0xc0000050)
static struct palmz72_resume_info palmz72_resume_info = {
.magic0 = 0xb4e6,
.magic1 = 1,
/* reset state, MMU off etc */
.arm_control = 0,
.aux_control = 0,
.ttb = 0,
.domain_access = 0,
.process_id = 0,
};
static unsigned long store_ptr;
/* syscore_ops for Palm Zire 72 PM */
static int palmz72_pm_suspend(void)
{
/* setup the resume_info struct for the original bootloader */
palmz72_resume_info.resume_addr = (u32) cpu_resume;
/* Storing memory touched by ROM */
store_ptr = *PALMZ72_SAVE_DWORD;
/* Setting PSPR to a proper value */
PSPR = virt_to_phys(&palmz72_resume_info);
return 0;
}
static void palmz72_pm_resume(void)
{
*PALMZ72_SAVE_DWORD = store_ptr;
}
static struct syscore_ops palmz72_pm_syscore_ops = {
.suspend = palmz72_pm_suspend,
.resume = palmz72_pm_resume,
};
static int __init palmz72_pm_init(void)
{
if (machine_is_palmz72()) {
register_syscore_ops(&palmz72_pm_syscore_ops);
return 0;
}
return -ENODEV;
}
device_initcall(palmz72_pm_init);
#endif
/******************************************************************************
* SoC Camera
******************************************************************************/
#if defined(CONFIG_SOC_CAMERA_OV9640) || \
defined(CONFIG_SOC_CAMERA_OV9640_MODULE)
static struct pxacamera_platform_data palmz72_pxacamera_platform_data = {
.flags = PXA_CAMERA_MASTER | PXA_CAMERA_DATAWIDTH_8 |
PXA_CAMERA_PCLK_EN | PXA_CAMERA_MCLK_EN,
.mclk_10khz = 2600,
};
/* Board I2C devices. */
static struct i2c_board_info palmz72_i2c_device[] = {
{
I2C_BOARD_INFO("ov9640", 0x30),
}
};
static int palmz72_camera_power(struct device *dev, int power)
{
gpio_set_value(GPIO_NR_PALMZ72_CAM_PWDN, !power);
mdelay(50);
return 0;
}
static int palmz72_camera_reset(struct device *dev)
{
gpio_set_value(GPIO_NR_PALMZ72_CAM_RESET, 1);
mdelay(50);
gpio_set_value(GPIO_NR_PALMZ72_CAM_RESET, 0);
mdelay(50);
return 0;
}
static struct soc_camera_link palmz72_iclink = {
.bus_id = 0, /* Match id in pxa27x_device_camera in device.c */
.board_info = &palmz72_i2c_device[0],
.i2c_adapter_id = 0,
.module_name = "ov96xx",
.power = &palmz72_camera_power,
.reset = &palmz72_camera_reset,
.flags = SOCAM_DATAWIDTH_8,
};
static struct i2c_gpio_platform_data palmz72_i2c_bus_data = {
.sda_pin = 118,
.scl_pin = 117,
.udelay = 10,
.timeout = 100,
};
static struct platform_device palmz72_i2c_bus_device = {
.name = "i2c-gpio",
.id = 0, /* we use this as a replacement for i2c-pxa */
.dev = {
.platform_data = &palmz72_i2c_bus_data,
}
};
static struct platform_device palmz72_camera = {
.name = "soc-camera-pdrv",
.id = -1,
.dev = {
.platform_data = &palmz72_iclink,
},
};
/* Here we request the camera GPIOs and configure them. We power up the camera
* module, deassert the reset pin, but put it into powerdown (low to no power
* consumption) mode. This allows us to later bring the module up fast. */
static struct gpio palmz72_camera_gpios[] = {
{ GPIO_NR_PALMZ72_CAM_POWER, GPIOF_INIT_HIGH,"Camera DVDD" },
{ GPIO_NR_PALMZ72_CAM_RESET, GPIOF_INIT_LOW, "Camera RESET" },
{ GPIO_NR_PALMZ72_CAM_PWDN, GPIOF_INIT_LOW, "Camera PWDN" },
};
static inline void __init palmz72_cam_gpio_init(void)
{
int ret;
ret = gpio_request_array(ARRAY_AND_SIZE(palmz72_camera_gpios));
if (!ret)
gpio_free_array(ARRAY_AND_SIZE(palmz72_camera_gpios));
else
printk(KERN_ERR "Camera GPIO init failed!\n");
return;
}
static void __init palmz72_camera_init(void)
{
palmz72_cam_gpio_init();
pxa_set_camera_info(&palmz72_pxacamera_platform_data);
platform_device_register(&palmz72_i2c_bus_device);
platform_device_register(&palmz72_camera);
}
#else
static inline void palmz72_camera_init(void) {}
#endif
/******************************************************************************
* Machine init
******************************************************************************/
static void __init palmz72_init(void)
{
pxa2xx_mfp_config(ARRAY_AND_SIZE(palmz72_pin_config));
pxa_set_ffuart_info(NULL);
pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL);
palm27x_mmc_init(GPIO_NR_PALMZ72_SD_DETECT_N, GPIO_NR_PALMZ72_SD_RO,
GPIO_NR_PALMZ72_SD_POWER_N, 1);
palm27x_lcd_init(-1, &palm_320x320_lcd_mode);
palm27x_udc_init(GPIO_NR_PALMZ72_USB_DETECT_N,
GPIO_NR_PALMZ72_USB_PULLUP, 0);
palm27x_irda_init(GPIO_NR_PALMZ72_IR_DISABLE);
palm27x_ac97_init(PALMZ72_BAT_MIN_VOLTAGE, PALMZ72_BAT_MAX_VOLTAGE,
-1, 113);
palm27x_pwm_init(-1, -1);
palm27x_power_init(-1, -1);
palm27x_pmic_init();
palmz72_kpc_init();
palmz72_leds_init();
palmz72_camera_init();
}
MACHINE_START(PALMZ72, "Palm Zire72")
.atag_offset = 0x100,
.map_io = pxa27x_map_io,
.nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.init_time = pxa_timer_init,
.init_machine = palmz72_init,
.restart = pxa_restart,
MACHINE_END
| gpl-2.0 |
placiano/NBKernel_Lollipop | tools/perf/ui/gtk/annotate.c | 2241 | 5800 | #include "gtk.h"
#include "util/debug.h"
#include "util/annotate.h"
#include "util/evsel.h"
#include "ui/helpline.h"
enum {
ANN_COL__PERCENT,
ANN_COL__OFFSET,
ANN_COL__LINE,
MAX_ANN_COLS
};
static const char *const col_names[] = {
"Overhead",
"Offset",
"Line"
};
static int perf_gtk__get_percent(char *buf, size_t size, struct symbol *sym,
struct disasm_line *dl, int evidx)
{
struct sym_hist *symhist;
double percent = 0.0;
const char *markup;
int ret = 0;
strcpy(buf, "");
if (dl->offset == (s64) -1)
return 0;
symhist = annotation__histogram(symbol__annotation(sym), evidx);
if (!symbol_conf.event_group && !symhist->addr[dl->offset])
return 0;
percent = 100.0 * symhist->addr[dl->offset] / symhist->sum;
markup = perf_gtk__get_percent_color(percent);
if (markup)
ret += scnprintf(buf, size, "%s", markup);
ret += scnprintf(buf + ret, size - ret, "%6.2f%%", percent);
if (markup)
ret += scnprintf(buf + ret, size - ret, "</span>");
return ret;
}
static int perf_gtk__get_offset(char *buf, size_t size, struct symbol *sym,
struct map *map, struct disasm_line *dl)
{
u64 start = map__rip_2objdump(map, sym->start);
strcpy(buf, "");
if (dl->offset == (s64) -1)
return 0;
return scnprintf(buf, size, "%"PRIx64, start + dl->offset);
}
static int perf_gtk__get_line(char *buf, size_t size, struct disasm_line *dl)
{
int ret = 0;
char *line = g_markup_escape_text(dl->line, -1);
const char *markup = "<span fgcolor='gray'>";
strcpy(buf, "");
if (!line)
return 0;
if (dl->offset != (s64) -1)
markup = NULL;
if (markup)
ret += scnprintf(buf, size, "%s", markup);
ret += scnprintf(buf + ret, size - ret, "%s", line);
if (markup)
ret += scnprintf(buf + ret, size - ret, "</span>");
g_free(line);
return ret;
}
static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym,
struct map *map, struct perf_evsel *evsel,
struct hist_browser_timer *hbt __maybe_unused)
{
struct disasm_line *pos, *n;
struct annotation *notes;
GType col_types[MAX_ANN_COLS];
GtkCellRenderer *renderer;
GtkListStore *store;
GtkWidget *view;
int i;
char s[512];
notes = symbol__annotation(sym);
for (i = 0; i < MAX_ANN_COLS; i++) {
col_types[i] = G_TYPE_STRING;
}
store = gtk_list_store_newv(MAX_ANN_COLS, col_types);
view = gtk_tree_view_new();
renderer = gtk_cell_renderer_text_new();
for (i = 0; i < MAX_ANN_COLS; i++) {
gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
-1, col_names[i], renderer, "markup",
i, NULL);
}
gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
g_object_unref(GTK_TREE_MODEL(store));
list_for_each_entry(pos, ¬es->src->source, node) {
GtkTreeIter iter;
int ret = 0;
gtk_list_store_append(store, &iter);
if (perf_evsel__is_group_event(evsel)) {
for (i = 0; i < evsel->nr_members; i++) {
ret += perf_gtk__get_percent(s + ret,
sizeof(s) - ret,
sym, pos,
evsel->idx + i);
ret += scnprintf(s + ret, sizeof(s) - ret, " ");
}
} else {
ret = perf_gtk__get_percent(s, sizeof(s), sym, pos,
evsel->idx);
}
if (ret)
gtk_list_store_set(store, &iter, ANN_COL__PERCENT, s, -1);
if (perf_gtk__get_offset(s, sizeof(s), sym, map, pos))
gtk_list_store_set(store, &iter, ANN_COL__OFFSET, s, -1);
if (perf_gtk__get_line(s, sizeof(s), pos))
gtk_list_store_set(store, &iter, ANN_COL__LINE, s, -1);
}
gtk_container_add(GTK_CONTAINER(window), view);
list_for_each_entry_safe(pos, n, ¬es->src->source, node) {
list_del(&pos->node);
disasm_line__free(pos);
}
return 0;
}
int symbol__gtk_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel,
struct hist_browser_timer *hbt)
{
GtkWidget *window;
GtkWidget *notebook;
GtkWidget *scrolled_window;
GtkWidget *tab_label;
if (map->dso->annotate_warned)
return -1;
if (symbol__annotate(sym, map, 0) < 0) {
ui__error("%s", ui_helpline__current);
return -1;
}
if (perf_gtk__is_active_context(pgctx)) {
window = pgctx->main_window;
notebook = pgctx->notebook;
} else {
GtkWidget *vbox;
GtkWidget *infobar;
GtkWidget *statbar;
signal(SIGSEGV, perf_gtk__signal);
signal(SIGFPE, perf_gtk__signal);
signal(SIGINT, perf_gtk__signal);
signal(SIGQUIT, perf_gtk__signal);
signal(SIGTERM, perf_gtk__signal);
window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
gtk_window_set_title(GTK_WINDOW(window), "perf annotate");
g_signal_connect(window, "delete_event", gtk_main_quit, NULL);
pgctx = perf_gtk__activate_context(window);
if (!pgctx)
return -1;
vbox = gtk_vbox_new(FALSE, 0);
notebook = gtk_notebook_new();
pgctx->notebook = notebook;
gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0);
infobar = perf_gtk__setup_info_bar();
if (infobar) {
gtk_box_pack_start(GTK_BOX(vbox), infobar,
FALSE, FALSE, 0);
}
statbar = perf_gtk__setup_statusbar();
gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0);
gtk_container_add(GTK_CONTAINER(window), vbox);
}
scrolled_window = gtk_scrolled_window_new(NULL, NULL);
tab_label = gtk_label_new(sym->name);
gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window),
GTK_POLICY_AUTOMATIC,
GTK_POLICY_AUTOMATIC);
gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window,
tab_label);
perf_gtk__annotate_symbol(scrolled_window, sym, map, evsel, hbt);
return 0;
}
void perf_gtk__show_annotations(void)
{
GtkWidget *window;
if (!perf_gtk__is_active_context(pgctx))
return;
window = pgctx->main_window;
gtk_widget_show_all(window);
perf_gtk__resize_window(window);
gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);
gtk_main();
perf_gtk__deactivate_context(&pgctx);
}
| gpl-2.0 |
geekboxzone/mmallow_kernel | drivers/staging/rts5139/rts51x_scsi.c | 2241 | 52578 | /* Driver for Realtek RTS51xx USB card reader
*
* Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Author:
* wwang (wei_wang@realsil.com.cn)
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
* Maintainer:
* Edwin Rong (edwin_rong@realsil.com.cn)
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_device.h>
#include "debug.h"
#include "rts51x.h"
#include "rts51x_chip.h"
#include "rts51x_scsi.h"
#include "rts51x_card.h"
#include "rts51x_transport.h"
#include "sd_cprm.h"
#include "ms_mg.h"
#include "trace.h"
void rts51x_scsi_show_command(struct scsi_cmnd *srb)
{
char *what = NULL;
int i, unknown_cmd = 0;
switch (srb->cmnd[0]) {
case TEST_UNIT_READY:
what = (char *)"TEST_UNIT_READY";
break;
case REZERO_UNIT:
what = (char *)"REZERO_UNIT";
break;
case REQUEST_SENSE:
what = (char *)"REQUEST_SENSE";
break;
case FORMAT_UNIT:
what = (char *)"FORMAT_UNIT";
break;
case READ_BLOCK_LIMITS:
what = (char *)"READ_BLOCK_LIMITS";
break;
case 0x07:
what = (char *)"REASSIGN_BLOCKS";
break;
case READ_6:
what = (char *)"READ_6";
break;
case WRITE_6:
what = (char *)"WRITE_6";
break;
case SEEK_6:
what = (char *)"SEEK_6";
break;
case READ_REVERSE:
what = (char *)"READ_REVERSE";
break;
case WRITE_FILEMARKS:
what = (char *)"WRITE_FILEMARKS";
break;
case SPACE:
what = (char *)"SPACE";
break;
case INQUIRY:
what = (char *)"INQUIRY";
break;
case RECOVER_BUFFERED_DATA:
what = (char *)"RECOVER_BUFFERED_DATA";
break;
case MODE_SELECT:
what = (char *)"MODE_SELECT";
break;
case RESERVE:
what = (char *)"RESERVE";
break;
case RELEASE:
what = (char *)"RELEASE";
break;
case COPY:
what = (char *)"COPY";
break;
case ERASE:
what = (char *)"ERASE";
break;
case MODE_SENSE:
what = (char *)"MODE_SENSE";
break;
case START_STOP:
what = (char *)"START_STOP";
break;
case RECEIVE_DIAGNOSTIC:
what = (char *)"RECEIVE_DIAGNOSTIC";
break;
case SEND_DIAGNOSTIC:
what = (char *)"SEND_DIAGNOSTIC";
break;
case ALLOW_MEDIUM_REMOVAL:
what = (char *)"ALLOW_MEDIUM_REMOVAL";
break;
case SET_WINDOW:
what = (char *)"SET_WINDOW";
break;
case READ_CAPACITY:
what = (char *)"READ_CAPACITY";
break;
case READ_10:
what = (char *)"READ_10";
break;
case WRITE_10:
what = (char *)"WRITE_10";
break;
case SEEK_10:
what = (char *)"SEEK_10";
break;
case WRITE_VERIFY:
what = (char *)"WRITE_VERIFY";
break;
case VERIFY:
what = (char *)"VERIFY";
break;
case SEARCH_HIGH:
what = (char *)"SEARCH_HIGH";
break;
case SEARCH_EQUAL:
what = (char *)"SEARCH_EQUAL";
break;
case SEARCH_LOW:
what = (char *)"SEARCH_LOW";
break;
case SET_LIMITS:
what = (char *)"SET_LIMITS";
break;
case READ_POSITION:
what = (char *)"READ_POSITION";
break;
case SYNCHRONIZE_CACHE:
what = (char *)"SYNCHRONIZE_CACHE";
break;
case LOCK_UNLOCK_CACHE:
what = (char *)"LOCK_UNLOCK_CACHE";
break;
case READ_DEFECT_DATA:
what = (char *)"READ_DEFECT_DATA";
break;
case MEDIUM_SCAN:
what = (char *)"MEDIUM_SCAN";
break;
case COMPARE:
what = (char *)"COMPARE";
break;
case COPY_VERIFY:
what = (char *)"COPY_VERIFY";
break;
case WRITE_BUFFER:
what = (char *)"WRITE_BUFFER";
break;
case READ_BUFFER:
what = (char *)"READ_BUFFER";
break;
case UPDATE_BLOCK:
what = (char *)"UPDATE_BLOCK";
break;
case READ_LONG:
what = (char *)"READ_LONG";
break;
case WRITE_LONG:
what = (char *)"WRITE_LONG";
break;
case CHANGE_DEFINITION:
what = (char *)"CHANGE_DEFINITION";
break;
case WRITE_SAME:
what = (char *)"WRITE_SAME";
break;
case GPCMD_READ_SUBCHANNEL:
what = (char *)"READ SUBCHANNEL";
break;
case READ_TOC:
what = (char *)"READ_TOC";
break;
case GPCMD_READ_HEADER:
what = (char *)"READ HEADER";
break;
case GPCMD_PLAY_AUDIO_10:
what = (char *)"PLAY AUDIO (10)";
break;
case GPCMD_PLAY_AUDIO_MSF:
what = (char *)"PLAY AUDIO MSF";
break;
case GPCMD_GET_EVENT_STATUS_NOTIFICATION:
what = (char *)"GET EVENT/STATUS NOTIFICATION";
break;
case GPCMD_PAUSE_RESUME:
what = (char *)"PAUSE/RESUME";
break;
case LOG_SELECT:
what = (char *)"LOG_SELECT";
break;
case LOG_SENSE:
what = (char *)"LOG_SENSE";
break;
case GPCMD_STOP_PLAY_SCAN:
what = (char *)"STOP PLAY/SCAN";
break;
case GPCMD_READ_DISC_INFO:
what = (char *)"READ DISC INFORMATION";
break;
case GPCMD_READ_TRACK_RZONE_INFO:
what = (char *)"READ TRACK INFORMATION";
break;
case GPCMD_RESERVE_RZONE_TRACK:
what = (char *)"RESERVE TRACK";
break;
case GPCMD_SEND_OPC:
what = (char *)"SEND OPC";
break;
case MODE_SELECT_10:
what = (char *)"MODE_SELECT_10";
break;
case GPCMD_REPAIR_RZONE_TRACK:
what = (char *)"REPAIR TRACK";
break;
case 0x59:
what = (char *)"READ MASTER CUE";
break;
case MODE_SENSE_10:
what = (char *)"MODE_SENSE_10";
break;
case GPCMD_CLOSE_TRACK:
what = (char *)"CLOSE TRACK/SESSION";
break;
case 0x5C:
what = (char *)"READ BUFFER CAPACITY";
break;
case 0x5D:
what = (char *)"SEND CUE SHEET";
break;
case GPCMD_BLANK:
what = (char *)"BLANK";
break;
case REPORT_LUNS:
what = (char *)"REPORT LUNS";
break;
case MOVE_MEDIUM:
what = (char *)"MOVE_MEDIUM or PLAY AUDIO (12)";
break;
case READ_12:
what = (char *)"READ_12";
break;
case WRITE_12:
what = (char *)"WRITE_12";
break;
case WRITE_VERIFY_12:
what = (char *)"WRITE_VERIFY_12";
break;
case SEARCH_HIGH_12:
what = (char *)"SEARCH_HIGH_12";
break;
case SEARCH_EQUAL_12:
what = (char *)"SEARCH_EQUAL_12";
break;
case SEARCH_LOW_12:
what = (char *)"SEARCH_LOW_12";
break;
case SEND_VOLUME_TAG:
what = (char *)"SEND_VOLUME_TAG";
break;
case READ_ELEMENT_STATUS:
what = (char *)"READ_ELEMENT_STATUS";
break;
case GPCMD_READ_CD_MSF:
what = (char *)"READ CD MSF";
break;
case GPCMD_SCAN:
what = (char *)"SCAN";
break;
case GPCMD_SET_SPEED:
what = (char *)"SET CD SPEED";
break;
case GPCMD_MECHANISM_STATUS:
what = (char *)"MECHANISM STATUS";
break;
case GPCMD_READ_CD:
what = (char *)"READ CD";
break;
case 0xE1:
what = (char *)"WRITE CONTINUE";
break;
case WRITE_LONG_2:
what = (char *)"WRITE_LONG_2";
break;
case VENDOR_CMND:
what = (char *)"Realtek's vendor command";
break;
default:
what = (char *)"(unknown command)";
unknown_cmd = 1;
break;
}
if (srb->cmnd[0] != TEST_UNIT_READY)
RTS51X_DEBUGP("Command %s (%d bytes)\n", what, srb->cmd_len);
if (unknown_cmd) {
RTS51X_DEBUGP("");
for (i = 0; i < srb->cmd_len && i < 16; i++)
RTS51X_DEBUGPN(" %02x", srb->cmnd[i]);
RTS51X_DEBUGPN("\n");
}
}
void rts51x_set_sense_type(struct rts51x_chip *chip, unsigned int lun, int sense_type)
{
switch (sense_type) {
case SENSE_TYPE_MEDIA_CHANGE:
rts51x_set_sense_data(chip, lun, CUR_ERR, 0x06, 0, 0x28, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_NOT_PRESENT:
rts51x_set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x3A, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_LBA_OVER_RANGE:
rts51x_set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x21, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT:
rts51x_set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x25, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_WRITE_PROTECT:
rts51x_set_sense_data(chip, lun, CUR_ERR, 0x07, 0, 0x27, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR:
rts51x_set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x11, 0, 0, 0);
break;
case SENSE_TYPE_MEDIA_WRITE_ERR:
rts51x_set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x02, 0, 0);
break;
case SENSE_TYPE_MEDIA_INVALID_CMD_FIELD:
rts51x_set_sense_data(chip, lun, CUR_ERR, ILGAL_REQ, 0,
ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1);
break;
case SENSE_TYPE_FORMAT_CMD_FAILED:
rts51x_set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x31, 0x01, 0, 0);
break;
#ifdef SUPPORT_MAGIC_GATE
case SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB:
rts51x_set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x02, 0, 0);
break;
case SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN:
rts51x_set_sense_data(chip, lun, CUR_ERR, 0x05, 0, 0x6F, 0x00, 0, 0);
break;
case SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM:
rts51x_set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x30, 0x00, 0, 0);
break;
case SENSE_TYPE_MG_WRITE_ERR:
rts51x_set_sense_data(chip, lun, CUR_ERR, 0x03, 0, 0x0C, 0x00, 0, 0);
break;
#endif
case SENSE_TYPE_NO_SENSE:
default:
rts51x_set_sense_data(chip, lun, CUR_ERR, 0, 0, 0, 0, 0, 0);
break;
}
}
void rts51x_set_sense_data(struct rts51x_chip *chip, unsigned int lun, u8 err_code,
u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0,
u16 sns_key_info1)
{
struct sense_data_t *sense = &(chip->sense_buffer[lun]);
sense->err_code = err_code;
sense->sense_key = sense_key;
sense->info[0] = (u8) (info >> 24);
sense->info[1] = (u8) (info >> 16);
sense->info[2] = (u8) (info >> 8);
sense->info[3] = (u8) info;
sense->ad_sense_len = sizeof(struct sense_data_t) - 8;
sense->asc = asc;
sense->ascq = ascq;
if (sns_key_info0 != 0) {
sense->sns_key_info[0] = SKSV | sns_key_info0;
sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
sense->sns_key_info[2] = sns_key_info1 & 0x0f;
}
}
static int test_unit_ready(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
rts51x_init_cards(chip);
if (!check_card_ready(chip, lun)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
return TRANSPORT_FAILED;
}
if (!check_lun_mc(chip, lun)) {
set_lun_mc(chip, lun);
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
return TRANSPORT_FAILED;
}
return TRANSPORT_GOOD;
}
unsigned char formatter_inquiry_str[20] = {
'M', 'E', 'M', 'O', 'R', 'Y', 'S', 'T', 'I', 'C', 'K',
'-', 'M', 'G', /* Byte[47:49] */
0x0B, /* Byte[50]: MG, MS, MSPro, MSXC */
0x00, /* Byte[51]: Category Specific Commands */
0x00, /* Byte[52]: Access Control and feature */
0x20, 0x20, 0x20, /* Byte[53:55] */
};
static int inquiry(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
char *inquiry_default = (char *)"Generic-xD/SD/M.S. 1.00 ";
char *inquiry_string;
unsigned char sendbytes;
unsigned char *buf;
u8 card = rts51x_get_lun_card(chip, lun);
int pro_formatter_flag = 0;
unsigned char inquiry_buf[] = {
QULIFIRE | DRCT_ACCESS_DEV,
RMB_DISC | 0x0D,
0x00,
0x01,
0x1f,
0x02,
0,
REL_ADR | WBUS_32 | WBUS_16 | SYNC | LINKED | CMD_QUE | SFT_RE,
};
inquiry_string = inquiry_default;
buf = vmalloc(scsi_bufflen(srb));
if (buf == NULL)
TRACE_RET(chip, TRANSPORT_ERROR);
if (MS_FORMATTER_ENABLED(chip) && (get_lun2card(chip, lun) & MS_CARD)) {
if (!card || (card == MS_CARD))
pro_formatter_flag = 1;
}
if (pro_formatter_flag) {
if (scsi_bufflen(srb) < 56)
sendbytes = (unsigned char)(scsi_bufflen(srb));
else
sendbytes = 56;
} else {
if (scsi_bufflen(srb) < 36)
sendbytes = (unsigned char)(scsi_bufflen(srb));
else
sendbytes = 36;
}
if (sendbytes > 8) {
memcpy(buf, inquiry_buf, 8);
memcpy(buf + 8, inquiry_string, sendbytes - 8);
if (pro_formatter_flag)
buf[4] = 0x33; /* Additional Length */
} else {
memcpy(buf, inquiry_buf, sendbytes);
}
if (pro_formatter_flag) {
if (sendbytes > 36)
memcpy(buf + 36, formatter_inquiry_str, sendbytes - 36);
}
scsi_set_resid(srb, 0);
rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
vfree(buf);
return TRANSPORT_GOOD;
}
static int start_stop_unit(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
scsi_set_resid(srb, scsi_bufflen(srb));
if (srb->cmnd[1] == 1)
return TRANSPORT_GOOD;
switch (srb->cmnd[0x4]) {
case STOP_MEDIUM:
/* Media disabled */
return TRANSPORT_GOOD;
case UNLOAD_MEDIUM:
/* Media shall be unload */
if (check_card_ready(chip, lun))
rts51x_eject_card(chip, lun);
return TRANSPORT_GOOD;
case MAKE_MEDIUM_READY:
case LOAD_MEDIUM:
if (check_card_ready(chip, lun)) {
return TRANSPORT_GOOD;
} else {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
break;
}
TRACE_RET(chip, TRANSPORT_ERROR);
}
static int allow_medium_removal(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
int prevent;
prevent = srb->cmnd[4] & 0x1;
scsi_set_resid(srb, 0);
if (prevent) {
rts51x_set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
return TRANSPORT_GOOD;
}
static void ms_mode_sense(struct rts51x_chip *chip, u8 cmd,
int lun, u8 *buf, int buf_len)
{
struct ms_info *ms_card = &(chip->ms_card);
int sys_info_offset;
int data_size = buf_len;
int support_format = 0;
int i = 0;
if (cmd == MODE_SENSE) {
sys_info_offset = 8;
if (data_size > 0x68)
data_size = 0x68;
buf[i++] = 0x67; /* Mode Data Length */
} else {
sys_info_offset = 12;
if (data_size > 0x6C)
data_size = 0x6C;
buf[i++] = 0x00; /* Mode Data Length (MSB) */
buf[i++] = 0x6A; /* Mode Data Length (LSB) */
}
/* Medium Type Code */
if (check_card_ready(chip, lun)) {
if (CHK_MSXC(ms_card)) {
support_format = 1;
buf[i++] = 0x40;
} else if (CHK_MSPRO(ms_card)) {
support_format = 1;
buf[i++] = 0x20;
} else {
buf[i++] = 0x10;
}
/* WP */
if (check_card_wp(chip, lun))
buf[i++] = 0x80;
else
buf[i++] = 0x00;
} else {
buf[i++] = 0x00; /* MediaType */
buf[i++] = 0x00; /* WP */
}
buf[i++] = 0x00; /* Reserved */
if (cmd == MODE_SENSE_10) {
buf[i++] = 0x00; /* Reserved */
buf[i++] = 0x00; /* Block descriptor length(MSB) */
buf[i++] = 0x00; /* Block descriptor length(LSB) */
/* The Following Data is the content of "Page 0x20" */
if (data_size >= 9)
buf[i++] = 0x20; /* Page Code */
if (data_size >= 10)
buf[i++] = 0x62; /* Page Length */
if (data_size >= 11)
buf[i++] = 0x00; /* No Access Control */
if (data_size >= 12) {
if (support_format)
buf[i++] = 0xC0; /* SF, SGM */
else
buf[i++] = 0x00;
}
} else {
/* The Following Data is the content of "Page 0x20" */
if (data_size >= 5)
buf[i++] = 0x20; /* Page Code */
if (data_size >= 6)
buf[i++] = 0x62; /* Page Length */
if (data_size >= 7)
buf[i++] = 0x00; /* No Access Control */
if (data_size >= 8) {
if (support_format)
buf[i++] = 0xC0; /* SF, SGM */
else
buf[i++] = 0x00;
}
}
if (data_size > sys_info_offset) {
/* 96 Bytes Attribute Data */
int len = data_size - sys_info_offset;
len = (len < 96) ? len : 96;
memcpy(buf + sys_info_offset, ms_card->raw_sys_info, len);
}
}
static int mode_sense(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
unsigned int dataSize;
int status;
int pro_formatter_flag;
unsigned char pageCode, *buf;
u8 card = rts51x_get_lun_card(chip, lun);
if (!check_card_ready(chip, lun)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
scsi_set_resid(srb, scsi_bufflen(srb));
TRACE_RET(chip, TRANSPORT_FAILED);
}
pro_formatter_flag = 0;
dataSize = 8;
/* In Combo mode, device responses ModeSense command as a MS LUN
* when no card is inserted */
if ((get_lun2card(chip, lun) & MS_CARD)) {
if (!card || (card == MS_CARD)) {
dataSize = 108;
if (chip->option.rts51x_mspro_formatter_enable)
pro_formatter_flag = 1;
}
}
buf = kmalloc(dataSize, GFP_KERNEL);
if (buf == NULL)
TRACE_RET(chip, TRANSPORT_ERROR);
pageCode = srb->cmnd[2] & 0x3f;
if ((pageCode == 0x3F) || (pageCode == 0x1C) ||
(pageCode == 0x00) || (pro_formatter_flag && (pageCode == 0x20))) {
if (srb->cmnd[0] == MODE_SENSE) {
if ((pageCode == 0x3F) || (pageCode == 0x20)) {
ms_mode_sense(chip, srb->cmnd[0], lun, buf,
dataSize);
} else {
dataSize = 4;
buf[0] = 0x03;
buf[1] = 0x00;
if (check_card_wp(chip, lun))
buf[2] = 0x80;
else
buf[3] = 0x00;
}
} else {
if ((pageCode == 0x3F) || (pageCode == 0x20)) {
ms_mode_sense(chip, srb->cmnd[0], lun, buf,
dataSize);
} else {
dataSize = 8;
buf[0] = 0x00;
buf[1] = 0x06;
buf[2] = 0x00;
if (check_card_wp(chip, lun))
buf[3] = 0x80;
else
buf[3] = 0x00;
buf[4] = 0x00;
buf[5] = 0x00;
buf[6] = 0x00;
buf[7] = 0x00;
}
}
status = TRANSPORT_GOOD;
} else {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
scsi_set_resid(srb, scsi_bufflen(srb));
status = TRANSPORT_FAILED;
}
if (status == TRANSPORT_GOOD) {
unsigned int len = min(scsi_bufflen(srb), dataSize);
rts51x_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
}
kfree(buf);
return status;
}
static int request_sense(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
struct sense_data_t *sense;
unsigned int lun = SCSI_LUN(srb);
struct ms_info *ms_card = &(chip->ms_card);
unsigned char *tmp, *buf;
sense = &(chip->sense_buffer[lun]);
if ((rts51x_get_lun_card(chip, lun) == MS_CARD)
&& PRO_UNDER_FORMATTING(ms_card)) {
rts51x_mspro_format_sense(chip, lun);
}
buf = vmalloc(scsi_bufflen(srb));
if (buf == NULL)
TRACE_RET(chip, TRANSPORT_ERROR);
tmp = (unsigned char *)sense;
memcpy(buf, tmp, scsi_bufflen(srb));
rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
vfree(buf);
scsi_set_resid(srb, 0);
/* Reset Sense Data */
rts51x_set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
return TRANSPORT_GOOD;
}
static int read_write(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
int retval;
u32 start_sec;
u16 sec_cnt;
if (!check_card_ready(chip, lun) || (chip->capacity[lun] == 0)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (!check_lun_mc(chip, lun)) {
set_lun_mc(chip, lun);
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
return TRANSPORT_FAILED;
}
rts51x_prepare_run(chip);
RTS51X_SET_STAT(chip, STAT_RUN);
if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) {
start_sec =
((u32) srb->cmnd[2] << 24) |
((u32) srb->cmnd[3] << 16) |
((u32) srb->cmnd[4] << 8) |
((u32) srb->cmnd[5]);
sec_cnt = ((u16) (srb->cmnd[7]) << 8) | srb->cmnd[8];
} else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
start_sec = ((u32) (srb->cmnd[1] & 0x1F) << 16) |
((u32) srb->cmnd[2] << 8) | ((u32) srb->cmnd[3]);
sec_cnt = srb->cmnd[4];
} else if ((srb->cmnd[0] == VENDOR_CMND) &&
(srb->cmnd[1] == SCSI_APP_CMD) &&
((srb->cmnd[2] == PP_READ10) ||
(srb->cmnd[2] == PP_WRITE10))) {
start_sec = ((u32) srb->cmnd[4] << 24) |
((u32) srb->cmnd[5] << 16) |
((u32) srb->cmnd[6] << 8) |
((u32) srb->cmnd[7]);
sec_cnt = ((u16) (srb->cmnd[9]) << 8) | srb->cmnd[10];
} else {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if ((start_sec > chip->capacity[lun]) ||
((start_sec + sec_cnt) > chip->capacity[lun])) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (sec_cnt == 0) {
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
if ((srb->sc_data_direction == DMA_TO_DEVICE)
&& check_card_wp(chip, lun)) {
RTS51X_DEBUGP("Write protected card!\n");
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
retval = rts51x_card_rw(srb, chip, start_sec, sec_cnt);
if (retval != STATUS_SUCCESS) {
if (srb->sc_data_direction == DMA_FROM_DEVICE) {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
} else {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
}
TRACE_RET(chip, TRANSPORT_FAILED);
}
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
static int read_format_capacity(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
unsigned char *buf;
unsigned int lun = SCSI_LUN(srb);
unsigned int buf_len;
u8 card = rts51x_get_lun_card(chip, lun);
int desc_cnt;
int i = 0;
if (!check_card_ready(chip, lun)) {
if (!chip->option.rts51x_mspro_formatter_enable) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
buf_len = (scsi_bufflen(srb) > 12) ? 0x14 : 12;
buf = kmalloc(buf_len, GFP_KERNEL);
if (buf == NULL)
TRACE_RET(chip, TRANSPORT_ERROR);
buf[i++] = 0;
buf[i++] = 0;
buf[i++] = 0;
/* Capacity List Length */
if ((buf_len > 12) && chip->option.rts51x_mspro_formatter_enable &&
(chip->lun2card[lun] & MS_CARD) && (!card || (card == MS_CARD))) {
buf[i++] = 0x10;
desc_cnt = 2;
} else {
buf[i++] = 0x08;
desc_cnt = 1;
}
while (desc_cnt) {
if (check_card_ready(chip, lun)) {
buf[i++] = (unsigned char)((chip->capacity[lun]) >> 24);
buf[i++] = (unsigned char)((chip->capacity[lun]) >> 16);
buf[i++] = (unsigned char)((chip->capacity[lun]) >> 8);
buf[i++] = (unsigned char)(chip->capacity[lun]);
if (desc_cnt == 2)
/* Byte[8]: Descriptor Type: Formatted medium */
buf[i++] = 2;
else
buf[i++] = 0; /* Byte[16] */
} else {
buf[i++] = 0xFF;
buf[i++] = 0xFF;
buf[i++] = 0xFF;
buf[i++] = 0xFF;
if (desc_cnt == 2)
/* Byte[8]: Descriptor Type: No medium */
buf[i++] = 3;
else
buf[i++] = 0; /*Byte[16] */
}
buf[i++] = 0x00;
buf[i++] = 0x02;
buf[i++] = 0x00;
desc_cnt--;
}
buf_len = min(scsi_bufflen(srb), buf_len);
rts51x_set_xfer_buf(buf, buf_len, srb);
kfree(buf);
scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
return TRANSPORT_GOOD;
}
static int read_capacity(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
unsigned char *buf;
unsigned int lun = SCSI_LUN(srb);
if (!check_card_ready(chip, lun)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (!check_lun_mc(chip, lun)) {
set_lun_mc(chip, lun);
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_CHANGE);
return TRANSPORT_FAILED;
}
buf = kmalloc(8, GFP_KERNEL);
if (buf == NULL)
TRACE_RET(chip, TRANSPORT_ERROR);
buf[0] = (unsigned char)((chip->capacity[lun] - 1) >> 24);
buf[1] = (unsigned char)((chip->capacity[lun] - 1) >> 16);
buf[2] = (unsigned char)((chip->capacity[lun] - 1) >> 8);
buf[3] = (unsigned char)(chip->capacity[lun] - 1);
buf[4] = 0x00;
buf[5] = 0x00;
buf[6] = 0x02;
buf[7] = 0x00;
rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
kfree(buf);
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
static int get_dev_status(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
unsigned int buf_len;
u8 status[32] = { 0 };
rts51x_pp_status(chip, lun, status, 32);
buf_len = min(scsi_bufflen(srb), (unsigned int)sizeof(status));
rts51x_set_xfer_buf(status, buf_len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
return TRANSPORT_GOOD;
}
static int read_status(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
u8 rts51x_status[16];
unsigned int buf_len;
unsigned int lun = SCSI_LUN(srb);
rts51x_read_status(chip, lun, rts51x_status, 16);
buf_len = min(scsi_bufflen(srb), (unsigned int)sizeof(rts51x_status));
rts51x_set_xfer_buf(rts51x_status, buf_len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - buf_len);
return TRANSPORT_GOOD;
}
static int read_mem(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
unsigned short addr, len, i;
int retval;
u8 *buf;
rts51x_prepare_run(chip);
RTS51X_SET_STAT(chip, STAT_RUN);
addr = ((u16) srb->cmnd[2] << 8) | srb->cmnd[3];
len = ((u16) srb->cmnd[4] << 8) | srb->cmnd[5];
if (addr < 0xe000) {
RTS51X_DEBUGP("filter!addr=0x%x\n", addr);
return TRANSPORT_GOOD;
}
buf = vmalloc(len);
if (!buf)
TRACE_RET(chip, TRANSPORT_ERROR);
for (i = 0; i < len; i++) {
retval = rts51x_ep0_read_register(chip, addr + i, buf + i);
if (retval != STATUS_SUCCESS) {
vfree(buf);
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
rts51x_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
vfree(buf);
return TRANSPORT_GOOD;
}
static int write_mem(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
unsigned short addr, len, i;
int retval;
u8 *buf;
rts51x_prepare_run(chip);
RTS51X_SET_STAT(chip, STAT_RUN);
addr = ((u16) srb->cmnd[2] << 8) | srb->cmnd[3];
len = ((u16) srb->cmnd[4] << 8) | srb->cmnd[5];
if (addr < 0xe000) {
RTS51X_DEBUGP("filter!addr=0x%x\n", addr);
return TRANSPORT_GOOD;
}
len = (unsigned short)min(scsi_bufflen(srb), (unsigned int)len);
buf = vmalloc(len);
if (!buf)
TRACE_RET(chip, TRANSPORT_ERROR);
rts51x_get_xfer_buf(buf, len, srb);
for (i = 0; i < len; i++) {
retval =
rts51x_ep0_write_register(chip, addr + i, 0xFF, buf[i]);
if (retval != STATUS_SUCCESS) {
vfree(buf);
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
vfree(buf);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
return TRANSPORT_GOOD;
}
static int get_sd_csd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
struct sd_info *sd_card = &(chip->sd_card);
unsigned int lun = SCSI_LUN(srb);
if (!check_card_ready(chip, lun)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (rts51x_get_lun_card(chip, lun) != SD_CARD) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
scsi_set_resid(srb, 0);
rts51x_set_xfer_buf(sd_card->raw_csd, scsi_bufflen(srb), srb);
return TRANSPORT_GOOD;
}
static int read_phy_register(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
int retval;
u8 addr, len, i;
u8 *buf;
rts51x_prepare_run(chip);
RTS51X_SET_STAT(chip, STAT_RUN);
addr = srb->cmnd[5];
len = srb->cmnd[7];
if (len) {
buf = vmalloc(len);
if (!buf)
TRACE_RET(chip, TRANSPORT_ERROR);
for (i = 0; i < len; i++) {
retval =
rts51x_read_phy_register(chip, addr + i, buf + i);
if (retval != STATUS_SUCCESS) {
vfree(buf);
rts51x_set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
len = min(scsi_bufflen(srb), (unsigned int)len);
rts51x_set_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
vfree(buf);
}
return TRANSPORT_GOOD;
}
static int write_phy_register(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
int retval;
u8 addr, len, i;
u8 *buf;
rts51x_prepare_run(chip);
RTS51X_SET_STAT(chip, STAT_RUN);
addr = srb->cmnd[5];
len = srb->cmnd[7];
if (len) {
len = min(scsi_bufflen(srb), (unsigned int)len);
buf = vmalloc(len);
if (buf == NULL)
TRACE_RET(chip, TRANSPORT_ERROR);
rts51x_get_xfer_buf(buf, len, srb);
scsi_set_resid(srb, scsi_bufflen(srb) - len);
for (i = 0; i < len; i++) {
retval =
rts51x_write_phy_register(chip, addr + i, buf[i]);
if (retval != STATUS_SUCCESS) {
vfree(buf);
rts51x_set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
vfree(buf);
}
return TRANSPORT_GOOD;
}
static int get_card_bus_width(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
u8 card, bus_width;
if (!check_card_ready(chip, lun)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
card = rts51x_get_lun_card(chip, lun);
if ((card == SD_CARD) || (card == MS_CARD)) {
bus_width = chip->card_bus_width[lun];
} else {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
scsi_set_resid(srb, 0);
rts51x_set_xfer_buf(&bus_width, scsi_bufflen(srb), srb);
return TRANSPORT_GOOD;
}
#ifdef _MSG_TRACE
static int trace_msg_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
unsigned char *buf = NULL;
u8 clear;
unsigned int buf_len;
buf_len =
4 +
((2 + MSG_FUNC_LEN + MSG_FILE_LEN + TIME_VAL_LEN) * TRACE_ITEM_CNT);
if ((scsi_bufflen(srb) < buf_len) || (scsi_sglist(srb) == NULL)) {
rts51x_set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
clear = srb->cmnd[2];
buf = vmalloc(scsi_bufflen(srb));
if (buf == NULL)
TRACE_RET(chip, TRANSPORT_ERROR);
rts51x_trace_msg(chip, buf, clear);
rts51x_set_xfer_buf(buf, scsi_bufflen(srb), srb);
vfree(buf);
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
#endif
static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
int retval = STATUS_SUCCESS;
unsigned int lun = SCSI_LUN(srb);
u8 cmd_type, mask, value, idx, mode, len;
u16 addr;
u32 timeout;
rts51x_prepare_run(chip);
RTS51X_SET_STAT(chip, STAT_RUN);
switch (srb->cmnd[3]) {
case INIT_BATCHCMD:
rts51x_init_cmd(chip);
break;
case ADD_BATCHCMD:
cmd_type = srb->cmnd[4];
if (cmd_type > 2) {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
addr = (srb->cmnd[5] << 8) | srb->cmnd[6];
mask = srb->cmnd[7];
value = srb->cmnd[8];
rts51x_add_cmd(chip, cmd_type, addr, mask, value);
break;
case SEND_BATCHCMD:
mode = srb->cmnd[4];
len = srb->cmnd[5];
timeout =
((u32) srb->cmnd[6] << 24) | ((u32) srb->
cmnd[7] << 16) | ((u32) srb->
cmnd[8] <<
8) | ((u32)
srb->
cmnd
[9]);
retval = rts51x_send_cmd(chip, mode, 1000);
if (retval != STATUS_SUCCESS) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (mode & STAGE_R) {
retval = rts51x_get_rsp(chip, len, timeout);
if (retval != STATUS_SUCCESS) {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
break;
case GET_BATCHRSP:
idx = srb->cmnd[4];
value = chip->rsp_buf[idx];
if (scsi_bufflen(srb) < 1) {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
rts51x_set_xfer_buf(&value, 1, srb);
scsi_set_resid(srb, 0);
break;
default:
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (retval != STATUS_SUCCESS) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
TRACE_RET(chip, TRANSPORT_FAILED);
}
return TRANSPORT_GOOD;
}
static int suit_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
int result;
switch (srb->cmnd[3]) {
case INIT_BATCHCMD:
case ADD_BATCHCMD:
case SEND_BATCHCMD:
case GET_BATCHRSP:
result = rw_mem_cmd_buf(srb, chip);
break;
default:
result = TRANSPORT_ERROR;
}
return result;
}
static int app_cmd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
int result;
switch (srb->cmnd[2]) {
case PP_READ10:
case PP_WRITE10:
result = read_write(srb, chip);
break;
case SUIT_CMD:
result = suit_cmd(srb, chip);
break;
case READ_PHY:
result = read_phy_register(srb, chip);
break;
case WRITE_PHY:
result = write_phy_register(srb, chip);
break;
case GET_DEV_STATUS:
result = get_dev_status(srb, chip);
break;
default:
rts51x_set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
return result;
}
static int vendor_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
int result = TRANSPORT_GOOD;
switch (srb->cmnd[1]) {
case READ_STATUS:
result = read_status(srb, chip);
break;
case READ_MEM:
result = read_mem(srb, chip);
break;
case WRITE_MEM:
result = write_mem(srb, chip);
break;
case GET_BUS_WIDTH:
result = get_card_bus_width(srb, chip);
break;
case GET_SD_CSD:
result = get_sd_csd(srb, chip);
break;
#ifdef _MSG_TRACE
case TRACE_MSG:
result = trace_msg_cmd(srb, chip);
break;
#endif
case SCSI_APP_CMD:
result = app_cmd(srb, chip);
break;
default:
rts51x_set_sense_type(chip, SCSI_LUN(srb),
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
return result;
}
static int ms_format_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
struct ms_info *ms_card = &(chip->ms_card);
unsigned int lun = SCSI_LUN(srb);
int retval, quick_format;
if (rts51x_get_lun_card(chip, lun) != MS_CARD) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if ((srb->cmnd[3] != 0x4D) || (srb->cmnd[4] != 0x47)
|| (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D)
|| (srb->cmnd[7] != 0x74)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (srb->cmnd[8] & 0x01)
quick_format = 0;
else
quick_format = 1;
if (!(chip->card_ready & MS_CARD)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (chip->card_wp & MS_CARD) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (!CHK_MSPRO(ms_card)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
rts51x_prepare_run(chip);
RTS51X_SET_STAT(chip, STAT_RUN);
retval = rts51x_mspro_format(srb, chip, MS_SHORT_DATA_LEN, quick_format);
if (retval != STATUS_SUCCESS) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
TRACE_RET(chip, TRANSPORT_FAILED);
}
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
#ifdef SUPPORT_PCGL_1P18
static int get_ms_information(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
struct ms_info *ms_card = &(chip->ms_card);
unsigned int lun = SCSI_LUN(srb);
u8 dev_info_id, data_len;
u8 *buf;
unsigned int buf_len;
int i;
if (!check_card_ready(chip, lun)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if ((rts51x_get_lun_card(chip, lun) != MS_CARD)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if ((srb->cmnd[2] != 0xB0) || (srb->cmnd[4] != 0x4D) ||
(srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) ||
(srb->cmnd[7] != 0x44)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
dev_info_id = srb->cmnd[3];
if ((CHK_MSXC(ms_card) && (dev_info_id == 0x10)) ||
(!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) ||
!CHK_MSPRO(ms_card)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (dev_info_id == 0x15)
buf_len = data_len = 0x3A;
else
buf_len = data_len = 0x6A;
buf = kmalloc(buf_len, GFP_KERNEL);
if (!buf)
TRACE_RET(chip, TRANSPORT_ERROR);
i = 0;
/* GET Memory Stick Media Information Response Header */
buf[i++] = 0x00; /* Data length MSB */
buf[i++] = data_len; /* Data length LSB */
/* Device Information Type Code */
if (CHK_MSXC(ms_card))
buf[i++] = 0x03;
else
buf[i++] = 0x02;
/* SGM bit */
buf[i++] = 0x01;
/* Reserved */
buf[i++] = 0x00;
buf[i++] = 0x00;
buf[i++] = 0x00;
/* Number of Device Information */
buf[i++] = 0x01;
/* Device Information Body
* Device Information ID Number */
buf[i++] = dev_info_id;
/* Device Information Length */
if (dev_info_id == 0x15)
data_len = 0x31;
else
data_len = 0x61;
buf[i++] = 0x00; /* Data length MSB */
buf[i++] = data_len; /* Data length LSB */
/* Valid Bit */
buf[i++] = 0x80;
if ((dev_info_id == 0x10) || (dev_info_id == 0x13)) {
/* System Information */
memcpy(buf + i, ms_card->raw_sys_info, 96);
} else {
/* Model Name */
memcpy(buf + i, ms_card->raw_model_name, 48);
}
rts51x_set_xfer_buf(buf, buf_len, srb);
if (dev_info_id == 0x15)
scsi_set_resid(srb, scsi_bufflen(srb) - 0x3C);
else
scsi_set_resid(srb, scsi_bufflen(srb) - 0x6C);
kfree(buf);
return STATUS_SUCCESS;
}
#endif
static int ms_sp_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
int retval = TRANSPORT_ERROR;
if (srb->cmnd[2] == MS_FORMAT)
retval = ms_format_cmnd(srb, chip);
#ifdef SUPPORT_PCGL_1P18
else if (srb->cmnd[2] == GET_MS_INFORMATION)
retval = get_ms_information(srb, chip);
#endif
return retval;
}
#ifdef SUPPORT_CPRM
static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
unsigned int lun = SCSI_LUN(srb);
int result;
rts51x_prepare_run(chip);
RTS51X_SET_STAT(chip, STAT_RUN);
rts51x_sd_cleanup_work(chip);
if (!check_card_ready(chip, lun)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if ((rts51x_get_lun_card(chip, lun) != SD_CARD)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
switch (srb->cmnd[0]) {
case SD_PASS_THRU_MODE:
result = rts51x_sd_pass_thru_mode(srb, chip);
break;
case SD_EXECUTE_NO_DATA:
result = rts51x_sd_execute_no_data(srb, chip);
break;
case SD_EXECUTE_READ:
result = rts51x_sd_execute_read_data(srb, chip);
break;
case SD_EXECUTE_WRITE:
result = rts51x_sd_execute_write_data(srb, chip);
break;
case SD_GET_RSP:
result = rts51x_sd_get_cmd_rsp(srb, chip);
break;
case SD_HW_RST:
result = rts51x_sd_hw_rst(srb, chip);
break;
default:
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
return result;
}
#endif
#ifdef SUPPORT_MAGIC_GATE
static int mg_report_key(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
struct ms_info *ms_card = &(chip->ms_card);
unsigned int lun = SCSI_LUN(srb);
int retval;
u8 key_format;
rts51x_prepare_run(chip);
RTS51X_SET_STAT(chip, STAT_RUN);
rts51x_ms_cleanup_work(chip);
if (!check_card_ready(chip, lun)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if ((rts51x_get_lun_card(chip, lun) != MS_CARD)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (srb->cmnd[7] != KC_MG_R_PRO) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (!CHK_MSPRO(ms_card)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
TRACE_RET(chip, TRANSPORT_FAILED);
}
key_format = srb->cmnd[10] & 0x3F;
switch (key_format) {
case KF_GET_LOC_EKB:
if ((scsi_bufflen(srb) == 0x41C) &&
(srb->cmnd[8] == 0x04) && (srb->cmnd[9] == 0x1C)) {
retval = rts51x_mg_get_local_EKB(srb, chip);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, TRANSPORT_FAILED);
} else {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
break;
case KF_RSP_CHG:
if ((scsi_bufflen(srb) == 0x24) &&
(srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x24)) {
retval = rts51x_mg_get_rsp_chg(srb, chip);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, TRANSPORT_FAILED);
} else {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
break;
case KF_GET_ICV:
ms_card->mg_entry_num = srb->cmnd[5];
if ((scsi_bufflen(srb) == 0x404) &&
(srb->cmnd[8] == 0x04) &&
(srb->cmnd[9] == 0x04) &&
(srb->cmnd[2] == 0x00) &&
(srb->cmnd[3] == 0x00) &&
(srb->cmnd[4] == 0x00) && (srb->cmnd[5] < 32)) {
retval = rts51x_mg_get_ICV(srb, chip);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, TRANSPORT_FAILED);
} else {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
break;
default:
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
static int mg_send_key(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
struct ms_info *ms_card = &(chip->ms_card);
unsigned int lun = SCSI_LUN(srb);
int retval;
u8 key_format;
rts51x_prepare_run(chip);
RTS51X_SET_STAT(chip, STAT_RUN);
rts51x_ms_cleanup_work(chip);
if (!check_card_ready(chip, lun)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (check_card_wp(chip, lun)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_PROTECT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if ((rts51x_get_lun_card(chip, lun) != MS_CARD)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LUN_NOT_SUPPORT);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (srb->cmnd[7] != KC_MG_R_PRO) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
if (!CHK_MSPRO(ms_card)) {
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
TRACE_RET(chip, TRANSPORT_FAILED);
}
key_format = srb->cmnd[10] & 0x3F;
switch (key_format) {
case KF_SET_LEAF_ID:
if ((scsi_bufflen(srb) == 0x0C) &&
(srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) {
retval = rts51x_mg_set_leaf_id(srb, chip);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, TRANSPORT_FAILED);
} else {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
break;
case KF_CHG_HOST:
if ((scsi_bufflen(srb) == 0x0C) &&
(srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) {
retval = rts51x_mg_chg(srb, chip);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, TRANSPORT_FAILED);
} else {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
break;
case KF_RSP_HOST:
if ((scsi_bufflen(srb) == 0x0C) &&
(srb->cmnd[8] == 0x00) && (srb->cmnd[9] == 0x0C)) {
retval = rts51x_mg_rsp(srb, chip);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, TRANSPORT_FAILED);
} else {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
break;
case KF_SET_ICV:
ms_card->mg_entry_num = srb->cmnd[5];
if ((scsi_bufflen(srb) == 0x404) &&
(srb->cmnd[8] == 0x04) &&
(srb->cmnd[9] == 0x04) &&
(srb->cmnd[2] == 0x00) &&
(srb->cmnd[3] == 0x00) &&
(srb->cmnd[4] == 0x00) && (srb->cmnd[5] < 32)) {
retval = rts51x_mg_set_ICV(srb, chip);
if (retval != STATUS_SUCCESS)
TRACE_RET(chip, TRANSPORT_FAILED);
} else {
rts51x_set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
break;
default:
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
TRACE_RET(chip, TRANSPORT_FAILED);
}
scsi_set_resid(srb, 0);
return TRANSPORT_GOOD;
}
#endif
int rts51x_scsi_handler(struct scsi_cmnd *srb, struct rts51x_chip *chip)
{
struct ms_info *ms_card = &(chip->ms_card);
unsigned int lun = SCSI_LUN(srb);
int result = TRANSPORT_GOOD;
if ((rts51x_get_lun_card(chip, lun) == MS_CARD) &&
(ms_card->format_status == FORMAT_IN_PROGRESS)) {
if ((srb->cmnd[0] != REQUEST_SENSE)
&& (srb->cmnd[0] != INQUIRY)) {
/* Logical Unit Not Ready Format in Progress */
rts51x_set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
0, (u16) (ms_card->progress));
TRACE_RET(chip, TRANSPORT_FAILED);
}
}
switch (srb->cmnd[0]) {
case READ_10:
case WRITE_10:
case READ_6:
case WRITE_6:
result = read_write(srb, chip);
break;
case TEST_UNIT_READY:
result = test_unit_ready(srb, chip);
break;
case INQUIRY:
result = inquiry(srb, chip);
break;
case READ_CAPACITY:
result = read_capacity(srb, chip);
break;
case START_STOP:
result = start_stop_unit(srb, chip);
break;
case ALLOW_MEDIUM_REMOVAL:
result = allow_medium_removal(srb, chip);
break;
case REQUEST_SENSE:
result = request_sense(srb, chip);
break;
case MODE_SENSE:
case MODE_SENSE_10:
result = mode_sense(srb, chip);
break;
case 0x23:
result = read_format_capacity(srb, chip);
break;
case VENDOR_CMND:
result = vendor_cmnd(srb, chip);
break;
case MS_SP_CMND:
result = ms_sp_cmnd(srb, chip);
break;
#ifdef SUPPORT_CPRM
case SD_PASS_THRU_MODE:
case SD_EXECUTE_NO_DATA:
case SD_EXECUTE_READ:
case SD_EXECUTE_WRITE:
case SD_GET_RSP:
case SD_HW_RST:
result = sd_extention_cmnd(srb, chip);
break;
#endif
#ifdef SUPPORT_MAGIC_GATE
case CMD_MSPRO_MG_RKEY:
result = mg_report_key(srb, chip);
break;
case CMD_MSPRO_MG_SKEY:
result = mg_send_key(srb, chip);
break;
#endif
case FORMAT_UNIT:
case MODE_SELECT:
case VERIFY:
result = TRANSPORT_GOOD;
break;
default:
rts51x_set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
result = TRANSPORT_FAILED;
}
return result;
}
/***********************************************************************
* Host functions
***********************************************************************/
int slave_alloc(struct scsi_device *sdev)
{
/*
* Set the INQUIRY transfer length to 36. We don't use any of
* the extra data and many devices choke if asked for more or
* less than 36 bytes.
*/
sdev->inquiry_len = 36;
return 0;
}
int slave_configure(struct scsi_device *sdev)
{
/* Scatter-gather buffers (all but the last) must have a length
* divisible by the bulk maxpacket size. Otherwise a data packet
* would end up being short, causing a premature end to the data
* transfer. Since high-speed bulk pipes have a maxpacket size
* of 512, we'll use that as the scsi device queue's DMA alignment
* mask. Guaranteeing proper alignment of the first buffer will
* have the desired effect because, except at the beginning and
* the end, scatter-gather buffers follow page boundaries. */
blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
/* Set the SCSI level to at least 2. We'll leave it at 3 if that's
* what is originally reported. We need this to avoid confusing
* the SCSI layer with devices that report 0 or 1, but need 10-byte
* commands (ala ATAPI devices behind certain bridges, or devices
* which simply have broken INQUIRY data).
*
* NOTE: This means /dev/sg programs (ala cdrecord) will get the
* actual information. This seems to be the preference for
* programs like that.
*
* NOTE: This also means that /proc/scsi/scsi and sysfs may report
* the actual value or the modified one, depending on where the
* data comes from.
*/
if (sdev->scsi_level < SCSI_2)
sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2;
return 0;
}
/***********************************************************************
* /proc/scsi/ functions
***********************************************************************/
/* we use this macro to help us write into the buffer */
#undef SPRINTF
#define SPRINTF(args...) seq_printf(m, ##args)
static int write_info(struct Scsi_Host *host, char *buffer, int length)
{
/* if someone is sending us data, just throw it away */
return length;
}
static int show_info(struct seq_file *m, struct Scsi_Host *host)
{
/* print the controller name */
SPRINTF(" Host scsi%d: %s\n", host->host_no, RTS51X_NAME);
/* print product, vendor, and driver version strings */
SPRINTF(" Vendor: Realtek Corp.\n");
SPRINTF(" Product: RTS51xx USB Card Reader\n");
SPRINTF(" Version: %s\n", DRIVER_VERSION);
SPRINTF(" Build: %s\n", __TIME__);
return 0;
}
/* queue a command */
/* This is always called with scsi_lock(host) held */
int queuecommand_lck(struct scsi_cmnd *srb, void (*done) (struct scsi_cmnd *))
{
struct rts51x_chip *chip = host_to_rts51x(srb->device->host);
/* check for state-transition errors */
if (chip->srb != NULL) {
RTS51X_DEBUGP("Error in %s: chip->srb = %p\n",
__func__, chip->srb);
return SCSI_MLQUEUE_HOST_BUSY;
}
/* fail the command if we are disconnecting */
if (test_bit(FLIDX_DISCONNECTING, &chip->usb->dflags)) {
RTS51X_DEBUGP("Fail command during disconnect\n");
srb->result = DID_NO_CONNECT << 16;
done(srb);
return 0;
}
/* enqueue the command and wake up the control thread */
srb->scsi_done = done;
chip->srb = srb;
complete(&chip->usb->cmnd_ready);
return 0;
}
DEF_SCSI_QCMD(queuecommand)
/***********************************************************************
* Error handling functions
***********************************************************************/
/* Command timeout and abort */
int command_abort(struct scsi_cmnd *srb)
{
struct rts51x_chip *chip = host_to_rts51x(srb->device->host);
RTS51X_DEBUGP("%s called\n", __func__);
/* us->srb together with the TIMED_OUT, RESETTING, and ABORTING
* bits are protected by the host lock. */
scsi_lock(rts51x_to_host(chip));
/* Is this command still active? */
if (chip->srb != srb) {
scsi_unlock(rts51x_to_host(chip));
RTS51X_DEBUGP("-- nothing to abort\n");
return FAILED;
}
/* Set the TIMED_OUT bit. Also set the ABORTING bit, but only if
* a device reset isn't already in progress (to avoid interfering
* with the reset). Note that we must retain the host lock while
* calling usb_stor_stop_transport(); otherwise it might interfere
* with an auto-reset that begins as soon as we release the lock. */
set_bit(FLIDX_TIMED_OUT, &chip->usb->dflags);
if (!test_bit(FLIDX_RESETTING, &chip->usb->dflags)) {
set_bit(FLIDX_ABORTING, &chip->usb->dflags);
/* rts51x_stop_transport(us); */
}
scsi_unlock(rts51x_to_host(chip));
/* Wait for the aborted command to finish */
wait_for_completion(&chip->usb->notify);
return SUCCESS;
}
/* This invokes the transport reset mechanism to reset the state of the
* device */
static int device_reset(struct scsi_cmnd *srb)
{
int result = 0;
RTS51X_DEBUGP("%s called\n", __func__);
return result < 0 ? FAILED : SUCCESS;
}
/* Simulate a SCSI bus reset by resetting the device's USB port. */
int bus_reset(struct scsi_cmnd *srb)
{
int result = 0;
RTS51X_DEBUGP("%s called\n", __func__);
return result < 0 ? FAILED : SUCCESS;
}
static const char *rts5139_info(struct Scsi_Host *host)
{
return "SCSI emulation for RTS5139 USB card reader";
}
struct scsi_host_template rts51x_host_template = {
/* basic userland interface stuff */
.name = RTS51X_NAME,
.proc_name = RTS51X_NAME,
.show_info = show_info,
.write_info = write_info,
.info = rts5139_info,
/* command interface -- queued only */
.queuecommand = queuecommand,
/* error and abort handlers */
.eh_abort_handler = command_abort,
.eh_device_reset_handler = device_reset,
.eh_bus_reset_handler = bus_reset,
/* queue commands only, only one command per LUN */
.can_queue = 1,
.cmd_per_lun = 1,
/* unknown initiator id */
.this_id = -1,
.slave_alloc = slave_alloc,
.slave_configure = slave_configure,
/* lots of sg segments can be handled */
.sg_tablesize = SG_ALL,
/* limit the total size of a transfer to 120 KB */
.max_sectors = 240,
/* merge commands... this seems to help performance, but
* periodically someone should test to see which setting is more
* optimal.
*/
.use_clustering = 1,
/* emulated HBA */
.emulated = 1,
/* we do our own delay after a device or bus reset */
.skip_settle_delay = 1,
/* sysfs device attributes */
/* .sdev_attrs = sysfs_device_attr_list, */
/* module management */
.module = THIS_MODULE
};
| gpl-2.0 |
kozmikkick/KozmiKG2 | arch/powerpc/platforms/85xx/p4080_ds.c | 4545 | 2211 | /*
* P4080 DS Setup
*
* Maintained by Kumar Gala (see MAINTAINERS for contact information)
*
* Copyright 2009 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include <asm/ehv_pic.h>
#include "corenet_ds.h"
/*
* Called very early, device-tree isn't unflattened
*/
static int __init p4080_ds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
#ifdef CONFIG_SMP
extern struct smp_ops_t smp_85xx_ops;
#endif
if (of_flat_dt_is_compatible(root, "fsl,P4080DS"))
return 1;
/* Check if we're running under the Freescale hypervisor */
if (of_flat_dt_is_compatible(root, "fsl,P4080DS-hv")) {
ppc_md.init_IRQ = ehv_pic_init;
ppc_md.get_irq = ehv_pic_get_irq;
ppc_md.restart = fsl_hv_restart;
ppc_md.power_off = fsl_hv_halt;
ppc_md.halt = fsl_hv_halt;
#ifdef CONFIG_SMP
/*
* Disable the timebase sync operations because we can't write
* to the timebase registers under the hypervisor.
*/
smp_85xx_ops.give_timebase = NULL;
smp_85xx_ops.take_timebase = NULL;
#endif
return 1;
}
return 0;
}
define_machine(p4080_ds) {
.name = "P4080 DS",
.probe = p4080_ds_probe,
.setup_arch = corenet_ds_setup_arch,
.init_IRQ = corenet_ds_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
.get_irq = mpic_get_coreint_irq,
.restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
.power_save = e500_idle,
};
machine_device_initcall(p4080_ds, corenet_ds_publish_devices);
#ifdef CONFIG_SWIOTLB
machine_arch_initcall(p4080_ds, swiotlb_setup_bus_notifier);
#endif
| gpl-2.0 |
DirtyUnicorns/android_kernel_samsung_ks01lte | drivers/net/ethernet/i825xx/3c527.c | 4801 | 43116 | /* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6.
*
* (c) Copyright 1998 Red Hat Software Inc
* Written by Alan Cox.
* Further debugging by Carl Drougge.
* Initial SMP support by Felipe W Damasio <felipewd@terra.com.br>
* Heavily modified by Richard Procter <rnp@paradise.net.nz>
*
* Based on skeleton.c written 1993-94 by Donald Becker and ne2.c
* (for the MCA stuff) written by Wim Dumon.
*
* Thanks to 3Com for making this possible by providing me with the
* documentation.
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#define DRV_NAME "3c527"
#define DRV_VERSION "0.7-SMP"
#define DRV_RELDATE "2003/09/21"
static const char *version =
DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.nz>\n";
/**
* DOC: Traps for the unwary
*
* The diagram (Figure 1-1) and the POS summary disagree with the
* "Interrupt Level" section in the manual.
*
* The manual contradicts itself when describing the minimum number
* buffers in the 'configure lists' command.
* My card accepts a buffer config of 4/4.
*
* Setting the SAV BP bit does not save bad packets, but
* only enables RX on-card stats collection.
*
* The documentation in places seems to miss things. In actual fact
* I've always eventually found everything is documented, it just
* requires careful study.
*
* DOC: Theory Of Operation
*
* The 3com 3c527 is a 32bit MCA bus mastering adapter with a large
* amount of on board intelligence that housekeeps a somewhat dumber
* Intel NIC. For performance we want to keep the transmit queue deep
* as the card can transmit packets while fetching others from main
* memory by bus master DMA. Transmission and reception are driven by
* circular buffer queues.
*
* The mailboxes can be used for controlling how the card traverses
* its buffer rings, but are used only for initial setup in this
* implementation. The exec mailbox allows a variety of commands to
* be executed. Each command must complete before the next is
* executed. Primarily we use the exec mailbox for controlling the
* multicast lists. We have to do a certain amount of interesting
* hoop jumping as the multicast list changes can occur in interrupt
* state when the card has an exec command pending. We defer such
* events until the command completion interrupt.
*
* A copy break scheme (taken from 3c59x.c) is employed whereby
* received frames exceeding a configurable length are passed
* directly to the higher networking layers without incuring a copy,
* in what amounts to a time/space trade-off.
*
* The card also keeps a large amount of statistical information
* on-board. In a perfect world, these could be used safely at no
* cost. However, lacking information to the contrary, processing
* them without races would involve so much extra complexity as to
* make it unworthwhile to do so. In the end, a hybrid SW/HW
* implementation was made necessary --- see mc32_update_stats().
*
* DOC: Notes
*
* It should be possible to use two or more cards, but at this stage
* only by loading two copies of the same module.
*
* The on-board 82586 NIC has trouble receiving multiple
* back-to-back frames and so is likely to drop packets from fast
* senders.
**/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/mca-legacy.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/wait.h>
#include <linux/ethtool.h>
#include <linux/completion.h>
#include <linux/bitops.h>
#include <linux/semaphore.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/dma.h>
#include "3c527.h"
MODULE_LICENSE("GPL");
/*
* The name of the card. Is used for messages and in the requests for
* io regions, irqs and dma channels
*/
static const char* cardname = DRV_NAME;
/* use 0 for production, 1 for verification, >2 for debug */
#ifndef NET_DEBUG
#define NET_DEBUG 2
#endif
static unsigned int mc32_debug = NET_DEBUG;
/* The number of low I/O ports used by the ethercard. */
#define MC32_IO_EXTENT 8
/* As implemented, values must be a power-of-2 -- 4/8/16/32 */
#define TX_RING_LEN 32 /* Typically the card supports 37 */
#define RX_RING_LEN 8 /* " " " */
/* Copy break point, see above for details.
* Setting to > 1512 effectively disables this feature. */
#define RX_COPYBREAK 200 /* Value from 3c59x.c */
/* Issue the 82586 workaround command - this is for "busy lans", but
* basically means for all lans now days - has a performance (latency)
* cost, but best set. */
static const int WORKAROUND_82586=1;
/* Pointers to buffers and their on-card records */
struct mc32_ring_desc
{
volatile struct skb_header *p;
struct sk_buff *skb;
};
/* Information that needs to be kept for each board. */
struct mc32_local
{
int slot;
u32 base;
volatile struct mc32_mailbox *rx_box;
volatile struct mc32_mailbox *tx_box;
volatile struct mc32_mailbox *exec_box;
volatile struct mc32_stats *stats; /* Start of on-card statistics */
u16 tx_chain; /* Transmit list start offset */
u16 rx_chain; /* Receive list start offset */
u16 tx_len; /* Transmit list count */
u16 rx_len; /* Receive list count */
u16 xceiver_desired_state; /* HALTED or RUNNING */
u16 cmd_nonblocking; /* Thread is uninterested in command result */
u16 mc_reload_wait; /* A multicast load request is pending */
u32 mc_list_valid; /* True when the mclist is set */
struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */
struct mc32_ring_desc rx_ring[RX_RING_LEN]; /* Host Receive ring */
atomic_t tx_count; /* buffers left */
atomic_t tx_ring_head; /* index to tx en-queue end */
u16 tx_ring_tail; /* index to tx de-queue end */
u16 rx_ring_tail; /* index to rx de-queue end */
struct semaphore cmd_mutex; /* Serialises issuing of execute commands */
struct completion execution_cmd; /* Card has completed an execute command */
struct completion xceiver_cmd; /* Card has completed a tx or rx command */
};
/* The station (ethernet) address prefix, used for a sanity check. */
#define SA_ADDR0 0x02
#define SA_ADDR1 0x60
#define SA_ADDR2 0xAC
struct mca_adapters_t {
unsigned int id;
char *name;
};
static const struct mca_adapters_t mc32_adapters[] = {
{ 0x0041, "3COM EtherLink MC/32" },
{ 0x8EF5, "IBM High Performance Lan Adapter" },
{ 0x0000, NULL }
};
/* Macros for ring index manipulations */
static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); };
static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); };
static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); };
/* Index to functions, as function prototypes. */
static int mc32_probe1(struct net_device *dev, int ioaddr);
static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len);
static int mc32_open(struct net_device *dev);
static void mc32_timeout(struct net_device *dev);
static netdev_tx_t mc32_send_packet(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t mc32_interrupt(int irq, void *dev_id);
static int mc32_close(struct net_device *dev);
static struct net_device_stats *mc32_get_stats(struct net_device *dev);
static void mc32_set_multicast_list(struct net_device *dev);
static void mc32_reset_multicast_list(struct net_device *dev);
static const struct ethtool_ops netdev_ethtool_ops;
static void cleanup_card(struct net_device *dev)
{
struct mc32_local *lp = netdev_priv(dev);
unsigned slot = lp->slot;
mca_mark_as_unused(slot);
mca_set_adapter_name(slot, NULL);
free_irq(dev->irq, dev);
release_region(dev->base_addr, MC32_IO_EXTENT);
}
/**
* mc32_probe - Search for supported boards
* @unit: interface number to use
*
* Because MCA bus is a real bus and we can scan for cards we could do a
* single scan for all boards here. Right now we use the passed in device
* structure and scan for only one board. This needs fixing for modules
* in particular.
*/
struct net_device *__init mc32_probe(int unit)
{
struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local));
static int current_mca_slot = -1;
int i;
int err;
if (!dev)
return ERR_PTR(-ENOMEM);
if (unit >= 0)
sprintf(dev->name, "eth%d", unit);
/* Do not check any supplied i/o locations.
POS registers usually don't fail :) */
/* MCA cards have POS registers.
Autodetecting MCA cards is extremely simple.
Just search for the card. */
for(i = 0; (mc32_adapters[i].name != NULL); i++) {
current_mca_slot =
mca_find_unused_adapter(mc32_adapters[i].id, 0);
if(current_mca_slot != MCA_NOTFOUND) {
if(!mc32_probe1(dev, current_mca_slot))
{
mca_set_adapter_name(current_mca_slot,
mc32_adapters[i].name);
mca_mark_as_used(current_mca_slot);
err = register_netdev(dev);
if (err) {
cleanup_card(dev);
free_netdev(dev);
dev = ERR_PTR(err);
}
return dev;
}
}
}
free_netdev(dev);
return ERR_PTR(-ENODEV);
}
static const struct net_device_ops netdev_ops = {
.ndo_open = mc32_open,
.ndo_stop = mc32_close,
.ndo_start_xmit = mc32_send_packet,
.ndo_get_stats = mc32_get_stats,
.ndo_set_rx_mode = mc32_set_multicast_list,
.ndo_tx_timeout = mc32_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
/**
* mc32_probe1 - Check a given slot for a board and test the card
* @dev: Device structure to fill in
* @slot: The MCA bus slot being used by this card
*
* Decode the slot data and configure the card structures. Having done this we
* can reset the card and configure it. The card does a full self test cycle
* in firmware so we have to wait for it to return and post us either a
* failure case or some addresses we use to find the board internals.
*/
static int __init mc32_probe1(struct net_device *dev, int slot)
{
static unsigned version_printed;
int i, err;
u8 POS;
u32 base;
struct mc32_local *lp = netdev_priv(dev);
static const u16 mca_io_bases[] = {
0x7280,0x7290,
0x7680,0x7690,
0x7A80,0x7A90,
0x7E80,0x7E90
};
static const u32 mca_mem_bases[] = {
0x00C0000,
0x00C4000,
0x00C8000,
0x00CC000,
0x00D0000,
0x00D4000,
0x00D8000,
0x00DC000
};
static const char * const failures[] = {
"Processor instruction",
"Processor data bus",
"Processor data bus",
"Processor data bus",
"Adapter bus",
"ROM checksum",
"Base RAM",
"Extended RAM",
"82586 internal loopback",
"82586 initialisation failure",
"Adapter list configuration error"
};
/* Time to play MCA games */
if (mc32_debug && version_printed++ == 0)
pr_debug("%s", version);
pr_info("%s: %s found in slot %d: ", dev->name, cardname, slot);
POS = mca_read_stored_pos(slot, 2);
if(!(POS&1))
{
pr_cont("disabled.\n");
return -ENODEV;
}
/* Fill in the 'dev' fields. */
dev->base_addr = mca_io_bases[(POS>>1)&7];
dev->mem_start = mca_mem_bases[(POS>>4)&7];
POS = mca_read_stored_pos(slot, 4);
if(!(POS&1))
{
pr_cont("memory window disabled.\n");
return -ENODEV;
}
POS = mca_read_stored_pos(slot, 5);
i=(POS>>4)&3;
if(i==3)
{
pr_cont("invalid memory window.\n");
return -ENODEV;
}
i*=16384;
i+=16384;
dev->mem_end=dev->mem_start + i;
dev->irq = ((POS>>2)&3)+9;
if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
{
pr_cont("io 0x%3lX, which is busy.\n", dev->base_addr);
return -EBUSY;
}
pr_cont("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
dev->base_addr, dev->irq, dev->mem_start, i/1024);
/* We ought to set the cache line size here.. */
/*
* Go PROM browsing
*/
/* Retrieve and print the ethernet address. */
for (i = 0; i < 6; i++)
{
mca_write_pos(slot, 6, i+12);
mca_write_pos(slot, 7, 0);
dev->dev_addr[i] = mca_read_pos(slot,3);
}
pr_info("%s: Address %pM ", dev->name, dev->dev_addr);
mca_write_pos(slot, 6, 0);
mca_write_pos(slot, 7, 0);
POS = mca_read_stored_pos(slot, 4);
if(POS&2)
pr_cont(": BNC port selected.\n");
else
pr_cont(": AUI port selected.\n");
POS=inb(dev->base_addr+HOST_CTRL);
POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
POS&=~HOST_CTRL_INTE;
outb(POS, dev->base_addr+HOST_CTRL);
/* Reset adapter */
udelay(100);
/* Reset off */
POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
outb(POS, dev->base_addr+HOST_CTRL);
udelay(300);
/*
* Grab the IRQ
*/
err = request_irq(dev->irq, mc32_interrupt, IRQF_SHARED, DRV_NAME, dev);
if (err) {
release_region(dev->base_addr, MC32_IO_EXTENT);
pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
goto err_exit_ports;
}
memset(lp, 0, sizeof(struct mc32_local));
lp->slot = slot;
i=0;
base = inb(dev->base_addr);
while(base == 0xFF)
{
i++;
if(i == 1000)
{
pr_err("%s: failed to boot adapter.\n", dev->name);
err = -ENODEV;
goto err_exit_irq;
}
udelay(1000);
if(inb(dev->base_addr+2)&(1<<5))
base = inb(dev->base_addr);
}
if(base>0)
{
if(base < 0x0C)
pr_err("%s: %s%s.\n", dev->name, failures[base-1],
base<0x0A?" test failure":"");
else
pr_err("%s: unknown failure %d.\n", dev->name, base);
err = -ENODEV;
goto err_exit_irq;
}
base=0;
for(i=0;i<4;i++)
{
int n=0;
while(!(inb(dev->base_addr+2)&(1<<5)))
{
n++;
udelay(50);
if(n>100)
{
pr_err("%s: mailbox read fail (%d).\n", dev->name, i);
err = -ENODEV;
goto err_exit_irq;
}
}
base|=(inb(dev->base_addr)<<(8*i));
}
lp->exec_box=isa_bus_to_virt(dev->mem_start+base);
base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];
lp->base = dev->mem_start+base;
lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]);
lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]);
lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]);
/*
* Descriptor chains (card relative)
*/
lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
sema_init(&lp->cmd_mutex, 0);
init_completion(&lp->execution_cmd);
init_completion(&lp->xceiver_cmd);
pr_info("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = HZ*5; /* Board does all the work */
dev->ethtool_ops = &netdev_ethtool_ops;
return 0;
err_exit_irq:
free_irq(dev->irq, dev);
err_exit_ports:
release_region(dev->base_addr, MC32_IO_EXTENT);
return err;
}
/**
* mc32_ready_poll - wait until we can feed it a command
* @dev: The device to wait for
*
* Wait until the card becomes ready to accept a command via the
* command register. This tells us nothing about the completion
* status of any pending commands and takes very little time at all.
*/
static inline void mc32_ready_poll(struct net_device *dev)
{
int ioaddr = dev->base_addr;
while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
}
/**
* mc32_command_nowait - send a command non blocking
* @dev: The 3c527 to issue the command to
* @cmd: The command word to write to the mailbox
* @data: A data block if the command expects one
* @len: Length of the data block
*
* Send a command from interrupt state. If there is a command
* currently being executed then we return an error of -1. It
* simply isn't viable to wait around as commands may be
* slow. This can theoretically be starved on SMP, but it's hard
* to see a realistic situation. We do not wait for the command
* to complete --- we rely on the interrupt handler to tidy up
* after us.
*/
static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len)
{
struct mc32_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
int ret = -1;
if (down_trylock(&lp->cmd_mutex) == 0)
{
lp->cmd_nonblocking=1;
lp->exec_box->mbox=0;
lp->exec_box->mbox=cmd;
memcpy((void *)lp->exec_box->data, data, len);
barrier(); /* the memcpy forgot the volatile so be sure */
/* Send the command */
mc32_ready_poll(dev);
outb(1<<6, ioaddr+HOST_CMD);
ret = 0;
/* Interrupt handler will signal mutex on completion */
}
return ret;
}
/**
* mc32_command - send a command and sleep until completion
* @dev: The 3c527 card to issue the command to
* @cmd: The command word to write to the mailbox
* @data: A data block if the command expects one
* @len: Length of the data block
*
* Sends exec commands in a user context. This permits us to wait around
* for the replies and also to wait for the command buffer to complete
* from a previous command before we execute our command. After our
* command completes we will attempt any pending multicast reload
* we blocked off by hogging the exec buffer.
*
* You feed the card a command, you wait, it interrupts you get a
* reply. All well and good. The complication arises because you use
* commands for filter list changes which come in at bh level from things
* like IPV6 group stuff.
*/
static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
{
struct mc32_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
int ret = 0;
down(&lp->cmd_mutex);
/*
* My Turn
*/
lp->cmd_nonblocking=0;
lp->exec_box->mbox=0;
lp->exec_box->mbox=cmd;
memcpy((void *)lp->exec_box->data, data, len);
barrier(); /* the memcpy forgot the volatile so be sure */
mc32_ready_poll(dev);
outb(1<<6, ioaddr+HOST_CMD);
wait_for_completion(&lp->execution_cmd);
if(lp->exec_box->mbox&(1<<13))
ret = -1;
up(&lp->cmd_mutex);
/*
* A multicast set got blocked - try it now
*/
if(lp->mc_reload_wait)
{
mc32_reset_multicast_list(dev);
}
return ret;
}
/**
* mc32_start_transceiver - tell board to restart tx/rx
* @dev: The 3c527 card to issue the command to
*
* This may be called from the interrupt state, where it is used
* to restart the rx ring if the card runs out of rx buffers.
*
* We must first check if it's ok to (re)start the transceiver. See
* mc32_close for details.
*/
static void mc32_start_transceiver(struct net_device *dev) {
struct mc32_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
/* Ignore RX overflow on device closure */
if (lp->xceiver_desired_state==HALTED)
return;
/* Give the card the offset to the post-EOL-bit RX descriptor */
mc32_ready_poll(dev);
lp->rx_box->mbox=0;
lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next;
outb(HOST_CMD_START_RX, ioaddr+HOST_CMD);
mc32_ready_poll(dev);
lp->tx_box->mbox=0;
outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */
/* We are not interrupted on start completion */
}
/**
* mc32_halt_transceiver - tell board to stop tx/rx
* @dev: The 3c527 card to issue the command to
*
* We issue the commands to halt the card's transceiver. In fact,
* after some experimenting we now simply tell the card to
* suspend. When issuing aborts occasionally odd things happened.
*
* We then sleep until the card has notified us that both rx and
* tx have been suspended.
*/
static void mc32_halt_transceiver(struct net_device *dev)
{
struct mc32_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
mc32_ready_poll(dev);
lp->rx_box->mbox=0;
outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD);
wait_for_completion(&lp->xceiver_cmd);
mc32_ready_poll(dev);
lp->tx_box->mbox=0;
outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD);
wait_for_completion(&lp->xceiver_cmd);
}
/**
* mc32_load_rx_ring - load the ring of receive buffers
* @dev: 3c527 to build the ring for
*
* This initialises the on-card and driver datastructures to
* the point where mc32_start_transceiver() can be called.
*
* The card sets up the receive ring for us. We are required to use the
* ring it provides, although the size of the ring is configurable.
*
* We allocate an sk_buff for each ring entry in turn and
* initialise its house-keeping info. At the same time, we read
* each 'next' pointer in our rx_ring array. This reduces slow
* shared-memory reads and makes it easy to access predecessor
* descriptors.
*
* We then set the end-of-list bit for the last entry so that the
* card will know when it has run out of buffers.
*/
static int mc32_load_rx_ring(struct net_device *dev)
{
struct mc32_local *lp = netdev_priv(dev);
int i;
u16 rx_base;
volatile struct skb_header *p;
rx_base=lp->rx_chain;
for(i=0; i<RX_RING_LEN; i++) {
lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL);
if (lp->rx_ring[i].skb==NULL) {
for (;i>=0;i--)
kfree_skb(lp->rx_ring[i].skb);
return -ENOBUFS;
}
skb_reserve(lp->rx_ring[i].skb, 18);
p=isa_bus_to_virt(lp->base+rx_base);
p->control=0;
p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data);
p->status=0;
p->length=1532;
lp->rx_ring[i].p=p;
rx_base=p->next;
}
lp->rx_ring[i-1].p->control |= CONTROL_EOL;
lp->rx_ring_tail=0;
return 0;
}
/**
* mc32_flush_rx_ring - free the ring of receive buffers
* @lp: Local data of 3c527 to flush the rx ring of
*
* Free the buffer for each ring slot. This may be called
* before mc32_load_rx_ring(), eg. on error in mc32_open().
* Requires rx skb pointers to point to a valid skb, or NULL.
*/
static void mc32_flush_rx_ring(struct net_device *dev)
{
struct mc32_local *lp = netdev_priv(dev);
int i;
for(i=0; i < RX_RING_LEN; i++)
{
if (lp->rx_ring[i].skb) {
dev_kfree_skb(lp->rx_ring[i].skb);
lp->rx_ring[i].skb = NULL;
}
lp->rx_ring[i].p=NULL;
}
}
/**
* mc32_load_tx_ring - load transmit ring
* @dev: The 3c527 card to issue the command to
*
* This sets up the host transmit data-structures.
*
* First, we obtain from the card it's current position in the tx
* ring, so that we will know where to begin transmitting
* packets.
*
* Then, we read the 'next' pointers from the on-card tx ring into
* our tx_ring array to reduce slow shared-mem reads. Finally, we
* intitalise the tx house keeping variables.
*
*/
static void mc32_load_tx_ring(struct net_device *dev)
{
struct mc32_local *lp = netdev_priv(dev);
volatile struct skb_header *p;
int i;
u16 tx_base;
tx_base=lp->tx_box->data[0];
for(i=0 ; i<TX_RING_LEN ; i++)
{
p=isa_bus_to_virt(lp->base+tx_base);
lp->tx_ring[i].p=p;
lp->tx_ring[i].skb=NULL;
tx_base=p->next;
}
/* -1 so that tx_ring_head cannot "lap" tx_ring_tail */
/* see mc32_tx_ring */
atomic_set(&lp->tx_count, TX_RING_LEN-1);
atomic_set(&lp->tx_ring_head, 0);
lp->tx_ring_tail=0;
}
/**
* mc32_flush_tx_ring - free transmit ring
* @lp: Local data of 3c527 to flush the tx ring of
*
* If the ring is non-empty, zip over the it, freeing any
* allocated skb_buffs. The tx ring house-keeping variables are
* then reset. Requires rx skb pointers to point to a valid skb,
* or NULL.
*/
static void mc32_flush_tx_ring(struct net_device *dev)
{
struct mc32_local *lp = netdev_priv(dev);
int i;
for (i=0; i < TX_RING_LEN; i++)
{
if (lp->tx_ring[i].skb)
{
dev_kfree_skb(lp->tx_ring[i].skb);
lp->tx_ring[i].skb = NULL;
}
}
atomic_set(&lp->tx_count, 0);
atomic_set(&lp->tx_ring_head, 0);
lp->tx_ring_tail=0;
}
/**
* mc32_open - handle 'up' of card
* @dev: device to open
*
* The user is trying to bring the card into ready state. This requires
* a brief dialogue with the card. Firstly we enable interrupts and then
* 'indications'. Without these enabled the card doesn't bother telling
* us what it has done. This had me puzzled for a week.
*
* We configure the number of card descriptors, then load the network
* address and multicast filters. Turn on the workaround mode. This
* works around a bug in the 82586 - it asks the firmware to do
* so. It has a performance (latency) hit but is needed on busy
* [read most] lans. We load the ring with buffers then we kick it
* all off.
*/
static int mc32_open(struct net_device *dev)
{
int ioaddr = dev->base_addr;
struct mc32_local *lp = netdev_priv(dev);
u8 one=1;
u8 regs;
u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN};
/*
* Interrupts enabled
*/
regs=inb(ioaddr+HOST_CTRL);
regs|=HOST_CTRL_INTE;
outb(regs, ioaddr+HOST_CTRL);
/*
* Allow ourselves to issue commands
*/
up(&lp->cmd_mutex);
/*
* Send the indications on command
*/
mc32_command(dev, 4, &one, 2);
/*
* Poke it to make sure it's really dead.
*/
mc32_halt_transceiver(dev);
mc32_flush_tx_ring(dev);
/*
* Ask card to set up on-card descriptors to our spec
*/
if(mc32_command(dev, 8, descnumbuffs, 4)) {
pr_info("%s: %s rejected our buffer configuration!\n",
dev->name, cardname);
mc32_close(dev);
return -ENOBUFS;
}
/* Report new configuration */
mc32_command(dev, 6, NULL, 0);
lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
/* Set Network Address */
mc32_command(dev, 1, dev->dev_addr, 6);
/* Set the filters */
mc32_set_multicast_list(dev);
if (WORKAROUND_82586) {
u16 zero_word=0;
mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */
}
mc32_load_tx_ring(dev);
if(mc32_load_rx_ring(dev))
{
mc32_close(dev);
return -ENOBUFS;
}
lp->xceiver_desired_state = RUNNING;
/* And finally, set the ball rolling... */
mc32_start_transceiver(dev);
netif_start_queue(dev);
return 0;
}
/**
* mc32_timeout - handle a timeout from the network layer
* @dev: 3c527 that timed out
*
* Handle a timeout on transmit from the 3c527. This normally means
* bad things as the hardware handles cable timeouts and mess for
* us.
*
*/
static void mc32_timeout(struct net_device *dev)
{
pr_warning("%s: transmit timed out?\n", dev->name);
/* Try to restart the adaptor. */
netif_wake_queue(dev);
}
/**
* mc32_send_packet - queue a frame for transmit
* @skb: buffer to transmit
* @dev: 3c527 to send it out of
*
* Transmit a buffer. This normally means throwing the buffer onto
* the transmit queue as the queue is quite large. If the queue is
* full then we set tx_busy and return. Once the interrupt handler
* gets messages telling it to reclaim transmit queue entries, we will
* clear tx_busy and the kernel will start calling this again.
*
* We do not disable interrupts or acquire any locks; this can
* run concurrently with mc32_tx_ring(), and the function itself
* is serialised at a higher layer. However, similarly for the
* card itself, we must ensure that we update tx_ring_head only
* after we've established a valid packet on the tx ring (and
* before we let the card "see" it, to prevent it racing with the
* irq handler).
*
*/
static netdev_tx_t mc32_send_packet(struct sk_buff *skb,
struct net_device *dev)
{
struct mc32_local *lp = netdev_priv(dev);
u32 head = atomic_read(&lp->tx_ring_head);
volatile struct skb_header *p, *np;
netif_stop_queue(dev);
if(atomic_read(&lp->tx_count)==0) {
return NETDEV_TX_BUSY;
}
if (skb_padto(skb, ETH_ZLEN)) {
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
atomic_dec(&lp->tx_count);
/* P is the last sending/sent buffer as a pointer */
p=lp->tx_ring[head].p;
head = next_tx(head);
/* NP is the buffer we will be loading */
np=lp->tx_ring[head].p;
/* We will need this to flush the buffer out */
lp->tx_ring[head].skb=skb;
np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
np->data = isa_virt_to_bus(skb->data);
np->status = 0;
np->control = CONTROL_EOP | CONTROL_EOL;
wmb();
/*
* The new frame has been setup; we can now
* let the interrupt handler and card "see" it
*/
atomic_set(&lp->tx_ring_head, head);
p->control &= ~CONTROL_EOL;
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
/**
* mc32_update_stats - pull off the on board statistics
* @dev: 3c527 to service
*
*
* Query and reset the on-card stats. There's the small possibility
* of a race here, which would result in an underestimation of
* actual errors. As such, we'd prefer to keep all our stats
* collection in software. As a rule, we do. However it can't be
* used for rx errors and collisions as, by default, the card discards
* bad rx packets.
*
* Setting the SAV BP in the rx filter command supposedly
* stops this behaviour. However, testing shows that it only seems to
* enable the collation of on-card rx statistics --- the driver
* never sees an RX descriptor with an error status set.
*
*/
static void mc32_update_stats(struct net_device *dev)
{
struct mc32_local *lp = netdev_priv(dev);
volatile struct mc32_stats *st = lp->stats;
u32 rx_errors=0;
rx_errors+=dev->stats.rx_crc_errors +=st->rx_crc_errors;
st->rx_crc_errors=0;
rx_errors+=dev->stats.rx_fifo_errors +=st->rx_overrun_errors;
st->rx_overrun_errors=0;
rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors;
st->rx_alignment_errors=0;
rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors;
st->rx_tooshort_errors=0;
rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors;
st->rx_outofresource_errors=0;
dev->stats.rx_errors=rx_errors;
/* Number of packets which saw one collision */
dev->stats.collisions+=st->dataC[10];
st->dataC[10]=0;
/* Number of packets which saw 2--15 collisions */
dev->stats.collisions+=st->dataC[11];
st->dataC[11]=0;
}
/**
* mc32_rx_ring - process the receive ring
* @dev: 3c527 that needs its receive ring processing
*
*
* We have received one or more indications from the card that a
* receive has completed. The buffer ring thus contains dirty
* entries. We walk the ring by iterating over the circular rx_ring
* array, starting at the next dirty buffer (which happens to be the
* one we finished up at last time around).
*
* For each completed packet, we will either copy it and pass it up
* the stack or, if the packet is near MTU sized, we allocate
* another buffer and flip the old one up the stack.
*
* We must succeed in keeping a buffer on the ring. If necessary we
* will toss a received packet rather than lose a ring entry. Once
* the first uncompleted descriptor is found, we move the
* End-Of-List bit to include the buffers just processed.
*
*/
static void mc32_rx_ring(struct net_device *dev)
{
struct mc32_local *lp = netdev_priv(dev);
volatile struct skb_header *p;
u16 rx_ring_tail;
u16 rx_old_tail;
int x=0;
rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
do
{
p=lp->rx_ring[rx_ring_tail].p;
if(!(p->status & (1<<7))) { /* Not COMPLETED */
break;
}
if(p->status & (1<<6)) /* COMPLETED_OK */
{
u16 length=p->length;
struct sk_buff *skb;
struct sk_buff *newskb;
/* Try to save time by avoiding a copy on big frames */
if ((length > RX_COPYBREAK) &&
((newskb = netdev_alloc_skb(dev, 1532)) != NULL))
{
skb=lp->rx_ring[rx_ring_tail].skb;
skb_put(skb, length);
skb_reserve(newskb,18);
lp->rx_ring[rx_ring_tail].skb=newskb;
p->data=isa_virt_to_bus(newskb->data);
}
else
{
skb = netdev_alloc_skb(dev, length + 2);
if(skb==NULL) {
dev->stats.rx_dropped++;
goto dropped;
}
skb_reserve(skb,2);
memcpy(skb_put(skb, length),
lp->rx_ring[rx_ring_tail].skb->data, length);
}
skb->protocol=eth_type_trans(skb,dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += length;
netif_rx(skb);
}
dropped:
p->length = 1532;
p->status = 0;
rx_ring_tail=next_rx(rx_ring_tail);
}
while(x++<48);
/* If there was actually a frame to be processed, place the EOL bit */
/* at the descriptor prior to the one to be filled next */
if (rx_ring_tail != rx_old_tail)
{
lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL;
lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL;
lp->rx_ring_tail=rx_ring_tail;
}
}
/**
* mc32_tx_ring - process completed transmits
* @dev: 3c527 that needs its transmit ring processing
*
*
* This operates in a similar fashion to mc32_rx_ring. We iterate
* over the transmit ring. For each descriptor which has been
* processed by the card, we free its associated buffer and note
* any errors. This continues until the transmit ring is emptied
* or we reach a descriptor that hasn't yet been processed by the
* card.
*
*/
static void mc32_tx_ring(struct net_device *dev)
{
struct mc32_local *lp = netdev_priv(dev);
volatile struct skb_header *np;
/*
* We rely on head==tail to mean 'queue empty'.
* This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
* tx_ring_head wrapping to tail and confusing a 'queue empty'
* condition with 'queue full'
*/
while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))
{
u16 t;
t=next_tx(lp->tx_ring_tail);
np=lp->tx_ring[t].p;
if(!(np->status & (1<<7)))
{
/* Not COMPLETED */
break;
}
dev->stats.tx_packets++;
if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
{
dev->stats.tx_errors++;
switch(np->status&0x0F)
{
case 1:
dev->stats.tx_aborted_errors++;
break; /* Max collisions */
case 2:
dev->stats.tx_fifo_errors++;
break;
case 3:
dev->stats.tx_carrier_errors++;
break;
case 4:
dev->stats.tx_window_errors++;
break; /* CTS Lost */
case 5:
dev->stats.tx_aborted_errors++;
break; /* Transmit timeout */
}
}
/* Packets are sent in order - this is
basically a FIFO queue of buffers matching
the card ring */
dev->stats.tx_bytes+=lp->tx_ring[t].skb->len;
dev_kfree_skb_irq(lp->tx_ring[t].skb);
lp->tx_ring[t].skb=NULL;
atomic_inc(&lp->tx_count);
netif_wake_queue(dev);
lp->tx_ring_tail=t;
}
}
/**
* mc32_interrupt - handle an interrupt from a 3c527
* @irq: Interrupt number
* @dev_id: 3c527 that requires servicing
* @regs: Registers (unused)
*
*
* An interrupt is raised whenever the 3c527 writes to the command
* register. This register contains the message it wishes to send us
* packed into a single byte field. We keep reading status entries
* until we have processed all the control items, but simply count
* transmit and receive reports. When all reports are in we empty the
* transceiver rings as appropriate. This saves the overhead of
* multiple command requests.
*
* Because MCA is level-triggered, we shouldn't miss indications.
* Therefore, we needn't ask the card to suspend interrupts within
* this handler. The card receives an implicit acknowledgment of the
* current interrupt when we read the command register.
*
*/
static irqreturn_t mc32_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct mc32_local *lp;
int ioaddr, status, boguscount = 0;
int rx_event = 0;
int tx_event = 0;
ioaddr = dev->base_addr;
lp = netdev_priv(dev);
/* See whats cooking */
while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
{
status=inb(ioaddr+HOST_CMD);
pr_debug("Status TX%d RX%d EX%d OV%d BC%d\n",
(status&7), (status>>3)&7, (status>>6)&1,
(status>>7)&1, boguscount);
switch(status&7)
{
case 0:
break;
case 6: /* TX fail */
case 2: /* TX ok */
tx_event = 1;
break;
case 3: /* Halt */
case 4: /* Abort */
complete(&lp->xceiver_cmd);
break;
default:
pr_notice("%s: strange tx ack %d\n", dev->name, status&7);
}
status>>=3;
switch(status&7)
{
case 0:
break;
case 2: /* RX */
rx_event=1;
break;
case 3: /* Halt */
case 4: /* Abort */
complete(&lp->xceiver_cmd);
break;
case 6:
/* Out of RX buffers stat */
/* Must restart rx */
dev->stats.rx_dropped++;
mc32_rx_ring(dev);
mc32_start_transceiver(dev);
break;
default:
pr_notice("%s: strange rx ack %d\n",
dev->name, status&7);
}
status>>=3;
if(status&1)
{
/*
* No thread is waiting: we need to tidy
* up ourself.
*/
if (lp->cmd_nonblocking) {
up(&lp->cmd_mutex);
if (lp->mc_reload_wait)
mc32_reset_multicast_list(dev);
}
else complete(&lp->execution_cmd);
}
if(status&2)
{
/*
* We get interrupted once per
* counter that is about to overflow.
*/
mc32_update_stats(dev);
}
}
/*
* Process the transmit and receive rings
*/
if(tx_event)
mc32_tx_ring(dev);
if(rx_event)
mc32_rx_ring(dev);
return IRQ_HANDLED;
}
/**
* mc32_close - user configuring the 3c527 down
* @dev: 3c527 card to shut down
*
* The 3c527 is a bus mastering device. We must be careful how we
* shut it down. It may also be running shared interrupt so we have
* to be sure to silence it properly
*
* We indicate that the card is closing to the rest of the
* driver. Otherwise, it is possible that the card may run out
* of receive buffers and restart the transceiver while we're
* trying to close it.
*
* We abort any receive and transmits going on and then wait until
* any pending exec commands have completed in other code threads.
* In theory we can't get here while that is true, in practice I am
* paranoid
*
* We turn off the interrupt enable for the board to be sure it can't
* intefere with other devices.
*/
static int mc32_close(struct net_device *dev)
{
struct mc32_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
u8 regs;
u16 one=1;
lp->xceiver_desired_state = HALTED;
netif_stop_queue(dev);
/*
* Send the indications on command (handy debug check)
*/
mc32_command(dev, 4, &one, 2);
/* Shut down the transceiver */
mc32_halt_transceiver(dev);
/* Ensure we issue no more commands beyond this point */
down(&lp->cmd_mutex);
/* Ok the card is now stopping */
regs=inb(ioaddr+HOST_CTRL);
regs&=~HOST_CTRL_INTE;
outb(regs, ioaddr+HOST_CTRL);
mc32_flush_rx_ring(dev);
mc32_flush_tx_ring(dev);
mc32_update_stats(dev);
return 0;
}
/**
* mc32_get_stats - hand back stats to network layer
* @dev: The 3c527 card to handle
*
* We've collected all the stats we can in software already. Now
* it's time to update those kept on-card and return the lot.
*
*/
static struct net_device_stats *mc32_get_stats(struct net_device *dev)
{
mc32_update_stats(dev);
return &dev->stats;
}
/**
* do_mc32_set_multicast_list - attempt to update multicasts
* @dev: 3c527 device to load the list on
* @retry: indicates this is not the first call.
*
*
* Actually set or clear the multicast filter for this adaptor. The
* locking issues are handled by this routine. We have to track
* state as it may take multiple calls to get the command sequence
* completed. We just keep trying to schedule the loads until we
* manage to process them all.
*
* num_addrs == -1 Promiscuous mode, receive all packets
*
* num_addrs == 0 Normal mode, clear multicast list
*
* num_addrs > 0 Multicast mode, receive normal and MC packets,
* and do best-effort filtering.
*
* See mc32_update_stats() regards setting the SAV BP bit.
*
*/
static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
{
struct mc32_local *lp = netdev_priv(dev);
u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
if ((dev->flags&IFF_PROMISC) ||
(dev->flags&IFF_ALLMULTI) ||
netdev_mc_count(dev) > 10)
/* Enable promiscuous mode */
filt |= 1;
else if (!netdev_mc_empty(dev))
{
unsigned char block[62];
unsigned char *bp;
struct netdev_hw_addr *ha;
if(retry==0)
lp->mc_list_valid = 0;
if(!lp->mc_list_valid)
{
block[1]=0;
block[0]=netdev_mc_count(dev);
bp=block+2;
netdev_for_each_mc_addr(ha, dev) {
memcpy(bp, ha->addr, 6);
bp+=6;
}
if(mc32_command_nowait(dev, 2, block,
2+6*netdev_mc_count(dev))==-1)
{
lp->mc_reload_wait = 1;
return;
}
lp->mc_list_valid=1;
}
}
if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
{
lp->mc_reload_wait = 1;
}
else {
lp->mc_reload_wait = 0;
}
}
/**
* mc32_set_multicast_list - queue multicast list update
* @dev: The 3c527 to use
*
* Commence loading the multicast list. This is called when the kernel
* changes the lists. It will override any pending list we are trying to
* load.
*/
static void mc32_set_multicast_list(struct net_device *dev)
{
do_mc32_set_multicast_list(dev,0);
}
/**
* mc32_reset_multicast_list - reset multicast list
* @dev: The 3c527 to use
*
* Attempt the next step in loading the multicast lists. If this attempt
* fails to complete then it will be scheduled and this function called
* again later from elsewhere.
*/
static void mc32_reset_multicast_list(struct net_device *dev)
{
do_mc32_set_multicast_list(dev,1);
}
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
}
static u32 netdev_get_msglevel(struct net_device *dev)
{
return mc32_debug;
}
static void netdev_set_msglevel(struct net_device *dev, u32 level)
{
mc32_debug = level;
}
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_msglevel = netdev_get_msglevel,
.set_msglevel = netdev_set_msglevel,
};
#ifdef MODULE
static struct net_device *this_device;
/**
* init_module - entry point
*
* Probe and locate a 3c527 card. This really should probe and locate
* all the 3c527 cards in the machine not just one of them. Yes you can
* insmod multiple modules for now but it's a hack.
*/
int __init init_module(void)
{
this_device = mc32_probe(-1);
if (IS_ERR(this_device))
return PTR_ERR(this_device);
return 0;
}
/**
* cleanup_module - free resources for an unload
*
* Unloading time. We release the MCA bus resources and the interrupt
* at which point everything is ready to unload. The card must be stopped
* at this point or we would not have been called. When we unload we
* leave the card stopped but not totally shut down. When the card is
* initialized it must be rebooted or the rings reloaded before any
* transmit operations are allowed to start scribbling into memory.
*/
void __exit cleanup_module(void)
{
unregister_netdev(this_device);
cleanup_card(this_device);
free_netdev(this_device);
}
#endif /* MODULE */
| gpl-2.0 |
NoelMacwan/SXDNickiCM11-OLD | drivers/mmc/host/mxcmmc.c | 4801 | 25641 | /*
* linux/drivers/mmc/host/mxcmmc.c - Freescale i.MX MMCI driver
*
* This is a driver for the SDHC controller found in Freescale MX2/MX3
* SoCs. It is basically the same hardware as found on MX1 (imxmmc.c).
* Unlike the hardware found on MX1, this hardware just works and does
* not need all the quirks found in imxmmc.c, hence the separate driver.
*
* Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
* Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
*
* derived from pxamci.c by Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/blkdev.h>
#include <linux/dma-mapping.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/dmaengine.h>
#include <linux/types.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/sizes.h>
#include <mach/mmc.h>
#include <mach/dma.h>
#include <mach/hardware.h>
#define DRIVER_NAME "mxc-mmc"
#define MMC_REG_STR_STP_CLK 0x00
#define MMC_REG_STATUS 0x04
#define MMC_REG_CLK_RATE 0x08
#define MMC_REG_CMD_DAT_CONT 0x0C
#define MMC_REG_RES_TO 0x10
#define MMC_REG_READ_TO 0x14
#define MMC_REG_BLK_LEN 0x18
#define MMC_REG_NOB 0x1C
#define MMC_REG_REV_NO 0x20
#define MMC_REG_INT_CNTR 0x24
#define MMC_REG_CMD 0x28
#define MMC_REG_ARG 0x2C
#define MMC_REG_RES_FIFO 0x34
#define MMC_REG_BUFFER_ACCESS 0x38
#define STR_STP_CLK_RESET (1 << 3)
#define STR_STP_CLK_START_CLK (1 << 1)
#define STR_STP_CLK_STOP_CLK (1 << 0)
#define STATUS_CARD_INSERTION (1 << 31)
#define STATUS_CARD_REMOVAL (1 << 30)
#define STATUS_YBUF_EMPTY (1 << 29)
#define STATUS_XBUF_EMPTY (1 << 28)
#define STATUS_YBUF_FULL (1 << 27)
#define STATUS_XBUF_FULL (1 << 26)
#define STATUS_BUF_UND_RUN (1 << 25)
#define STATUS_BUF_OVFL (1 << 24)
#define STATUS_SDIO_INT_ACTIVE (1 << 14)
#define STATUS_END_CMD_RESP (1 << 13)
#define STATUS_WRITE_OP_DONE (1 << 12)
#define STATUS_DATA_TRANS_DONE (1 << 11)
#define STATUS_READ_OP_DONE (1 << 11)
#define STATUS_WR_CRC_ERROR_CODE_MASK (3 << 10)
#define STATUS_CARD_BUS_CLK_RUN (1 << 8)
#define STATUS_BUF_READ_RDY (1 << 7)
#define STATUS_BUF_WRITE_RDY (1 << 6)
#define STATUS_RESP_CRC_ERR (1 << 5)
#define STATUS_CRC_READ_ERR (1 << 3)
#define STATUS_CRC_WRITE_ERR (1 << 2)
#define STATUS_TIME_OUT_RESP (1 << 1)
#define STATUS_TIME_OUT_READ (1 << 0)
#define STATUS_ERR_MASK 0x2f
#define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1 << 12)
#define CMD_DAT_CONT_STOP_READWAIT (1 << 11)
#define CMD_DAT_CONT_START_READWAIT (1 << 10)
#define CMD_DAT_CONT_BUS_WIDTH_4 (2 << 8)
#define CMD_DAT_CONT_INIT (1 << 7)
#define CMD_DAT_CONT_WRITE (1 << 4)
#define CMD_DAT_CONT_DATA_ENABLE (1 << 3)
#define CMD_DAT_CONT_RESPONSE_48BIT_CRC (1 << 0)
#define CMD_DAT_CONT_RESPONSE_136BIT (2 << 0)
#define CMD_DAT_CONT_RESPONSE_48BIT (3 << 0)
#define INT_SDIO_INT_WKP_EN (1 << 18)
#define INT_CARD_INSERTION_WKP_EN (1 << 17)
#define INT_CARD_REMOVAL_WKP_EN (1 << 16)
#define INT_CARD_INSERTION_EN (1 << 15)
#define INT_CARD_REMOVAL_EN (1 << 14)
#define INT_SDIO_IRQ_EN (1 << 13)
#define INT_DAT0_EN (1 << 12)
#define INT_BUF_READ_EN (1 << 4)
#define INT_BUF_WRITE_EN (1 << 3)
#define INT_END_CMD_RES_EN (1 << 2)
#define INT_WRITE_OP_DONE_EN (1 << 1)
#define INT_READ_OP_EN (1 << 0)
struct mxcmci_host {
struct mmc_host *mmc;
struct resource *res;
void __iomem *base;
int irq;
int detect_irq;
struct dma_chan *dma;
struct dma_async_tx_descriptor *desc;
int do_dma;
int default_irq_mask;
int use_sdio;
unsigned int power_mode;
struct imxmmc_platform_data *pdata;
struct mmc_request *req;
struct mmc_command *cmd;
struct mmc_data *data;
unsigned int datasize;
unsigned int dma_dir;
u16 rev_no;
unsigned int cmdat;
struct clk *clk;
int clock;
struct work_struct datawork;
spinlock_t lock;
struct regulator *vcc;
int burstlen;
int dmareq;
struct dma_slave_config dma_slave_config;
struct imx_dma_data dma_data;
};
static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
static inline void mxcmci_init_ocr(struct mxcmci_host *host)
{
host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
if (IS_ERR(host->vcc)) {
host->vcc = NULL;
} else {
host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
if (host->pdata && host->pdata->ocr_avail)
dev_warn(mmc_dev(host->mmc),
"pdata->ocr_avail will not be used\n");
}
if (host->vcc == NULL) {
/* fall-back to platform data */
if (host->pdata && host->pdata->ocr_avail)
host->mmc->ocr_avail = host->pdata->ocr_avail;
else
host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
}
}
static inline void mxcmci_set_power(struct mxcmci_host *host,
unsigned char power_mode,
unsigned int vdd)
{
if (host->vcc) {
if (power_mode == MMC_POWER_UP)
mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
else if (power_mode == MMC_POWER_OFF)
mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
}
if (host->pdata && host->pdata->setpower)
host->pdata->setpower(mmc_dev(host->mmc), vdd);
}
static inline int mxcmci_use_dma(struct mxcmci_host *host)
{
return host->do_dma;
}
static void mxcmci_softreset(struct mxcmci_host *host)
{
int i;
dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");
/* reset sequence */
writew(STR_STP_CLK_RESET, host->base + MMC_REG_STR_STP_CLK);
writew(STR_STP_CLK_RESET | STR_STP_CLK_START_CLK,
host->base + MMC_REG_STR_STP_CLK);
for (i = 0; i < 8; i++)
writew(STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
writew(0xff, host->base + MMC_REG_RES_TO);
}
static int mxcmci_setup_dma(struct mmc_host *mmc);
static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
{
unsigned int nob = data->blocks;
unsigned int blksz = data->blksz;
unsigned int datasize = nob * blksz;
struct scatterlist *sg;
enum dma_transfer_direction slave_dirn;
int i, nents;
if (data->flags & MMC_DATA_STREAM)
nob = 0xffff;
host->data = data;
data->bytes_xfered = 0;
writew(nob, host->base + MMC_REG_NOB);
writew(blksz, host->base + MMC_REG_BLK_LEN);
host->datasize = datasize;
if (!mxcmci_use_dma(host))
return 0;
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->offset & 3 || sg->length & 3) {
host->do_dma = 0;
return 0;
}
}
if (data->flags & MMC_DATA_READ) {
host->dma_dir = DMA_FROM_DEVICE;
slave_dirn = DMA_DEV_TO_MEM;
} else {
host->dma_dir = DMA_TO_DEVICE;
slave_dirn = DMA_MEM_TO_DEV;
}
nents = dma_map_sg(host->dma->device->dev, data->sg,
data->sg_len, host->dma_dir);
if (nents != data->sg_len)
return -EINVAL;
host->desc = dmaengine_prep_slave_sg(host->dma,
data->sg, data->sg_len, slave_dirn,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!host->desc) {
dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
host->dma_dir);
host->do_dma = 0;
return 0; /* Fall back to PIO */
}
wmb();
dmaengine_submit(host->desc);
dma_async_issue_pending(host->dma);
return 0;
}
static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,
unsigned int cmdat)
{
u32 int_cntr = host->default_irq_mask;
unsigned long flags;
WARN_ON(host->cmd != NULL);
host->cmd = cmd;
switch (mmc_resp_type(cmd)) {
case MMC_RSP_R1: /* short CRC, OPCODE */
case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
cmdat |= CMD_DAT_CONT_RESPONSE_48BIT_CRC;
break;
case MMC_RSP_R2: /* long 136 bit + CRC */
cmdat |= CMD_DAT_CONT_RESPONSE_136BIT;
break;
case MMC_RSP_R3: /* short */
cmdat |= CMD_DAT_CONT_RESPONSE_48BIT;
break;
case MMC_RSP_NONE:
break;
default:
dev_err(mmc_dev(host->mmc), "unhandled response type 0x%x\n",
mmc_resp_type(cmd));
cmd->error = -EINVAL;
return -EINVAL;
}
int_cntr = INT_END_CMD_RES_EN;
if (mxcmci_use_dma(host))
int_cntr |= INT_READ_OP_EN | INT_WRITE_OP_DONE_EN;
spin_lock_irqsave(&host->lock, flags);
if (host->use_sdio)
int_cntr |= INT_SDIO_IRQ_EN;
writel(int_cntr, host->base + MMC_REG_INT_CNTR);
spin_unlock_irqrestore(&host->lock, flags);
writew(cmd->opcode, host->base + MMC_REG_CMD);
writel(cmd->arg, host->base + MMC_REG_ARG);
writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT);
return 0;
}
static void mxcmci_finish_request(struct mxcmci_host *host,
struct mmc_request *req)
{
u32 int_cntr = host->default_irq_mask;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
if (host->use_sdio)
int_cntr |= INT_SDIO_IRQ_EN;
writel(int_cntr, host->base + MMC_REG_INT_CNTR);
spin_unlock_irqrestore(&host->lock, flags);
host->req = NULL;
host->cmd = NULL;
host->data = NULL;
mmc_request_done(host->mmc, req);
}
static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)
{
struct mmc_data *data = host->data;
int data_error;
if (mxcmci_use_dma(host)) {
dmaengine_terminate_all(host->dma);
dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,
host->dma_dir);
}
if (stat & STATUS_ERR_MASK) {
dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n",
stat);
if (stat & STATUS_CRC_READ_ERR) {
dev_err(mmc_dev(host->mmc), "%s: -EILSEQ\n", __func__);
data->error = -EILSEQ;
} else if (stat & STATUS_CRC_WRITE_ERR) {
u32 err_code = (stat >> 9) & 0x3;
if (err_code == 2) { /* No CRC response */
dev_err(mmc_dev(host->mmc),
"%s: No CRC -ETIMEDOUT\n", __func__);
data->error = -ETIMEDOUT;
} else {
dev_err(mmc_dev(host->mmc),
"%s: -EILSEQ\n", __func__);
data->error = -EILSEQ;
}
} else if (stat & STATUS_TIME_OUT_READ) {
dev_err(mmc_dev(host->mmc),
"%s: read -ETIMEDOUT\n", __func__);
data->error = -ETIMEDOUT;
} else {
dev_err(mmc_dev(host->mmc), "%s: -EIO\n", __func__);
data->error = -EIO;
}
} else {
data->bytes_xfered = host->datasize;
}
data_error = data->error;
host->data = NULL;
return data_error;
}
static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat)
{
struct mmc_command *cmd = host->cmd;
int i;
u32 a, b, c;
if (!cmd)
return;
if (stat & STATUS_TIME_OUT_RESP) {
dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
cmd->error = -ETIMEDOUT;
} else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
cmd->error = -EILSEQ;
}
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
for (i = 0; i < 4; i++) {
a = readw(host->base + MMC_REG_RES_FIFO);
b = readw(host->base + MMC_REG_RES_FIFO);
cmd->resp[i] = a << 16 | b;
}
} else {
a = readw(host->base + MMC_REG_RES_FIFO);
b = readw(host->base + MMC_REG_RES_FIFO);
c = readw(host->base + MMC_REG_RES_FIFO);
cmd->resp[0] = a << 24 | b << 8 | c >> 8;
}
}
}
static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
{
u32 stat;
unsigned long timeout = jiffies + HZ;
do {
stat = readl(host->base + MMC_REG_STATUS);
if (stat & STATUS_ERR_MASK)
return stat;
if (time_after(jiffies, timeout)) {
mxcmci_softreset(host);
mxcmci_set_clk_rate(host, host->clock);
return STATUS_TIME_OUT_READ;
}
if (stat & mask)
return 0;
cpu_relax();
} while (1);
}
static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
{
unsigned int stat;
u32 *buf = _buf;
while (bytes > 3) {
stat = mxcmci_poll_status(host,
STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
if (stat)
return stat;
*buf++ = readl(host->base + MMC_REG_BUFFER_ACCESS);
bytes -= 4;
}
if (bytes) {
u8 *b = (u8 *)buf;
u32 tmp;
stat = mxcmci_poll_status(host,
STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);
if (stat)
return stat;
tmp = readl(host->base + MMC_REG_BUFFER_ACCESS);
memcpy(b, &tmp, bytes);
}
return 0;
}
static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
{
unsigned int stat;
u32 *buf = _buf;
while (bytes > 3) {
stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
if (stat)
return stat;
writel(*buf++, host->base + MMC_REG_BUFFER_ACCESS);
bytes -= 4;
}
if (bytes) {
u8 *b = (u8 *)buf;
u32 tmp;
stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
if (stat)
return stat;
memcpy(&tmp, b, bytes);
writel(tmp, host->base + MMC_REG_BUFFER_ACCESS);
}
stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
if (stat)
return stat;
return 0;
}
static int mxcmci_transfer_data(struct mxcmci_host *host)
{
struct mmc_data *data = host->req->data;
struct scatterlist *sg;
int stat, i;
host->data = data;
host->datasize = 0;
if (data->flags & MMC_DATA_READ) {
for_each_sg(data->sg, sg, data->sg_len, i) {
stat = mxcmci_pull(host, sg_virt(sg), sg->length);
if (stat)
return stat;
host->datasize += sg->length;
}
} else {
for_each_sg(data->sg, sg, data->sg_len, i) {
stat = mxcmci_push(host, sg_virt(sg), sg->length);
if (stat)
return stat;
host->datasize += sg->length;
}
stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE);
if (stat)
return stat;
}
return 0;
}
static void mxcmci_datawork(struct work_struct *work)
{
struct mxcmci_host *host = container_of(work, struct mxcmci_host,
datawork);
int datastat = mxcmci_transfer_data(host);
writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
host->base + MMC_REG_STATUS);
mxcmci_finish_data(host, datastat);
if (host->req->stop) {
if (mxcmci_start_cmd(host, host->req->stop, 0)) {
mxcmci_finish_request(host, host->req);
return;
}
} else {
mxcmci_finish_request(host, host->req);
}
}
static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)
{
struct mmc_data *data = host->data;
int data_error;
if (!data)
return;
data_error = mxcmci_finish_data(host, stat);
mxcmci_read_response(host, stat);
host->cmd = NULL;
if (host->req->stop) {
if (mxcmci_start_cmd(host, host->req->stop, 0)) {
mxcmci_finish_request(host, host->req);
return;
}
} else {
mxcmci_finish_request(host, host->req);
}
}
static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)
{
mxcmci_read_response(host, stat);
host->cmd = NULL;
if (!host->data && host->req) {
mxcmci_finish_request(host, host->req);
return;
}
/* For the DMA case the DMA engine handles the data transfer
* automatically. For non DMA we have to do it ourselves.
* Don't do it in interrupt context though.
*/
if (!mxcmci_use_dma(host) && host->data)
schedule_work(&host->datawork);
}
static irqreturn_t mxcmci_irq(int irq, void *devid)
{
struct mxcmci_host *host = devid;
unsigned long flags;
bool sdio_irq;
u32 stat;
stat = readl(host->base + MMC_REG_STATUS);
writel(stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE |
STATUS_WRITE_OP_DONE), host->base + MMC_REG_STATUS);
dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat);
spin_lock_irqsave(&host->lock, flags);
sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;
spin_unlock_irqrestore(&host->lock, flags);
if (mxcmci_use_dma(host) &&
(stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE)))
writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE,
host->base + MMC_REG_STATUS);
if (sdio_irq) {
writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS);
mmc_signal_sdio_irq(host->mmc);
}
if (stat & STATUS_END_CMD_RESP)
mxcmci_cmd_done(host, stat);
if (mxcmci_use_dma(host) &&
(stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE)))
mxcmci_data_done(host, stat);
if (host->default_irq_mask &&
(stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))
mmc_detect_change(host->mmc, msecs_to_jiffies(200));
return IRQ_HANDLED;
}
static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)
{
struct mxcmci_host *host = mmc_priv(mmc);
unsigned int cmdat = host->cmdat;
int error;
WARN_ON(host->req != NULL);
host->req = req;
host->cmdat &= ~CMD_DAT_CONT_INIT;
if (host->dma)
host->do_dma = 1;
if (req->data) {
error = mxcmci_setup_data(host, req->data);
if (error) {
req->cmd->error = error;
goto out;
}
cmdat |= CMD_DAT_CONT_DATA_ENABLE;
if (req->data->flags & MMC_DATA_WRITE)
cmdat |= CMD_DAT_CONT_WRITE;
}
error = mxcmci_start_cmd(host, req->cmd, cmdat);
out:
if (error)
mxcmci_finish_request(host, req);
}
static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
{
unsigned int divider;
int prescaler = 0;
unsigned int clk_in = clk_get_rate(host->clk);
while (prescaler <= 0x800) {
for (divider = 1; divider <= 0xF; divider++) {
int x;
x = (clk_in / (divider + 1));
if (prescaler)
x /= (prescaler * 2);
if (x <= clk_ios)
break;
}
if (divider < 0x10)
break;
if (prescaler == 0)
prescaler = 1;
else
prescaler <<= 1;
}
writew((prescaler << 4) | divider, host->base + MMC_REG_CLK_RATE);
dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n",
prescaler, divider, clk_in, clk_ios);
}
static int mxcmci_setup_dma(struct mmc_host *mmc)
{
struct mxcmci_host *host = mmc_priv(mmc);
struct dma_slave_config *config = &host->dma_slave_config;
config->dst_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
config->src_addr = host->res->start + MMC_REG_BUFFER_ACCESS;
config->dst_addr_width = 4;
config->src_addr_width = 4;
config->dst_maxburst = host->burstlen;
config->src_maxburst = host->burstlen;
config->device_fc = false;
return dmaengine_slave_config(host->dma, config);
}
static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mxcmci_host *host = mmc_priv(mmc);
int burstlen, ret;
/*
* use burstlen of 64 (16 words) in 4 bit mode (--> reg value 0)
* use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16)
*/
if (ios->bus_width == MMC_BUS_WIDTH_4)
burstlen = 16;
else
burstlen = 4;
if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
host->burstlen = burstlen;
ret = mxcmci_setup_dma(mmc);
if (ret) {
dev_err(mmc_dev(host->mmc),
"failed to config DMA channel. Falling back to PIO\n");
dma_release_channel(host->dma);
host->do_dma = 0;
host->dma = NULL;
}
}
if (ios->bus_width == MMC_BUS_WIDTH_4)
host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
else
host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
if (host->power_mode != ios->power_mode) {
mxcmci_set_power(host, ios->power_mode, ios->vdd);
host->power_mode = ios->power_mode;
if (ios->power_mode == MMC_POWER_ON)
host->cmdat |= CMD_DAT_CONT_INIT;
}
if (ios->clock) {
mxcmci_set_clk_rate(host, ios->clock);
writew(STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
} else {
writew(STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK);
}
host->clock = ios->clock;
}
static irqreturn_t mxcmci_detect_irq(int irq, void *data)
{
struct mmc_host *mmc = data;
dev_dbg(mmc_dev(mmc), "%s\n", __func__);
mmc_detect_change(mmc, msecs_to_jiffies(250));
return IRQ_HANDLED;
}
static int mxcmci_get_ro(struct mmc_host *mmc)
{
struct mxcmci_host *host = mmc_priv(mmc);
if (host->pdata && host->pdata->get_ro)
return !!host->pdata->get_ro(mmc_dev(mmc));
/*
* Board doesn't support read only detection; let the mmc core
* decide what to do.
*/
return -ENOSYS;
}
static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct mxcmci_host *host = mmc_priv(mmc);
unsigned long flags;
u32 int_cntr;
spin_lock_irqsave(&host->lock, flags);
host->use_sdio = enable;
int_cntr = readl(host->base + MMC_REG_INT_CNTR);
if (enable)
int_cntr |= INT_SDIO_IRQ_EN;
else
int_cntr &= ~INT_SDIO_IRQ_EN;
writel(int_cntr, host->base + MMC_REG_INT_CNTR);
spin_unlock_irqrestore(&host->lock, flags);
}
static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)
{
/*
* MX3 SoCs have a silicon bug which corrupts CRC calculation of
* multi-block transfers when connected SDIO peripheral doesn't
* drive the BUSY line as required by the specs.
* One way to prevent this is to only allow 1-bit transfers.
*/
if (cpu_is_mx3() && card->type == MMC_TYPE_SDIO)
host->caps &= ~MMC_CAP_4_BIT_DATA;
else
host->caps |= MMC_CAP_4_BIT_DATA;
}
static bool filter(struct dma_chan *chan, void *param)
{
struct mxcmci_host *host = param;
if (!imx_dma_is_general_purpose(chan))
return false;
chan->private = &host->dma_data;
return true;
}
static const struct mmc_host_ops mxcmci_ops = {
.request = mxcmci_request,
.set_ios = mxcmci_set_ios,
.get_ro = mxcmci_get_ro,
.enable_sdio_irq = mxcmci_enable_sdio_irq,
.init_card = mxcmci_init_card,
};
static int mxcmci_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct mxcmci_host *host = NULL;
struct resource *iores, *r;
int ret = 0, irq;
dma_cap_mask_t mask;
pr_info("i.MX SDHC driver\n");
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!iores || irq < 0)
return -EINVAL;
r = request_mem_region(iores->start, resource_size(iores), pdev->name);
if (!r)
return -EBUSY;
mmc = mmc_alloc_host(sizeof(struct mxcmci_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
goto out_release_mem;
}
mmc->ops = &mxcmci_ops;
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
/* MMC core transfer sizes tunable parameters */
mmc->max_segs = 64;
mmc->max_blk_size = 2048;
mmc->max_blk_count = 65535;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
host = mmc_priv(mmc);
host->base = ioremap(r->start, resource_size(r));
if (!host->base) {
ret = -ENOMEM;
goto out_free;
}
host->mmc = mmc;
host->pdata = pdev->dev.platform_data;
spin_lock_init(&host->lock);
mxcmci_init_ocr(host);
if (host->pdata && host->pdata->dat3_card_detect)
host->default_irq_mask =
INT_CARD_INSERTION_EN | INT_CARD_REMOVAL_EN;
else
host->default_irq_mask = 0;
host->res = r;
host->irq = irq;
host->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
goto out_iounmap;
}
clk_enable(host->clk);
mxcmci_softreset(host);
host->rev_no = readw(host->base + MMC_REG_REV_NO);
if (host->rev_no != 0x400) {
ret = -ENODEV;
dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
host->rev_no);
goto out_clk_put;
}
mmc->f_min = clk_get_rate(host->clk) >> 16;
mmc->f_max = clk_get_rate(host->clk) >> 1;
/* recommended in data sheet */
writew(0x2db4, host->base + MMC_REG_READ_TO);
writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR);
r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (r) {
host->dmareq = r->start;
host->dma_data.peripheral_type = IMX_DMATYPE_SDHC;
host->dma_data.priority = DMA_PRIO_LOW;
host->dma_data.dma_request = host->dmareq;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
host->dma = dma_request_channel(mask, filter, host);
if (host->dma)
mmc->max_seg_size = dma_get_max_seg_size(
host->dma->device->dev);
}
if (!host->dma)
dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n");
INIT_WORK(&host->datawork, mxcmci_datawork);
ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host);
if (ret)
goto out_free_dma;
platform_set_drvdata(pdev, mmc);
if (host->pdata && host->pdata->init) {
ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq,
host->mmc);
if (ret)
goto out_free_irq;
}
mmc_add_host(mmc);
return 0;
out_free_irq:
free_irq(host->irq, host);
out_free_dma:
if (host->dma)
dma_release_channel(host->dma);
out_clk_put:
clk_disable(host->clk);
clk_put(host->clk);
out_iounmap:
iounmap(host->base);
out_free:
mmc_free_host(mmc);
out_release_mem:
release_mem_region(iores->start, resource_size(iores));
return ret;
}
static int mxcmci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct mxcmci_host *host = mmc_priv(mmc);
platform_set_drvdata(pdev, NULL);
mmc_remove_host(mmc);
if (host->vcc)
regulator_put(host->vcc);
if (host->pdata && host->pdata->exit)
host->pdata->exit(&pdev->dev, mmc);
free_irq(host->irq, host);
iounmap(host->base);
if (host->dma)
dma_release_channel(host->dma);
clk_disable(host->clk);
clk_put(host->clk);
release_mem_region(host->res->start, resource_size(host->res));
mmc_free_host(mmc);
return 0;
}
#ifdef CONFIG_PM
static int mxcmci_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct mxcmci_host *host = mmc_priv(mmc);
int ret = 0;
if (mmc)
ret = mmc_suspend_host(mmc);
clk_disable(host->clk);
return ret;
}
static int mxcmci_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct mxcmci_host *host = mmc_priv(mmc);
int ret = 0;
clk_enable(host->clk);
if (mmc)
ret = mmc_resume_host(mmc);
return ret;
}
static const struct dev_pm_ops mxcmci_pm_ops = {
.suspend = mxcmci_suspend,
.resume = mxcmci_resume,
};
#endif
static struct platform_driver mxcmci_driver = {
.probe = mxcmci_probe,
.remove = mxcmci_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &mxcmci_pm_ops,
#endif
}
};
module_platform_driver(mxcmci_driver);
MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:imx-mmc");
| gpl-2.0 |
barome/HD_mako | lib/raid6/algos.c | 5057 | 3744 | /* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright 2002 H. Peter Anvin - All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, Inc., 53 Temple Place Ste 330,
* Boston MA 02111-1307, USA; either version 2 of the License, or
* (at your option) any later version; incorporated herein by reference.
*
* ----------------------------------------------------------------------- */
/*
* raid6/algos.c
*
* Algorithm list and algorithm selection for RAID-6
*/
#include <linux/raid/pq.h>
#include <linux/module.h>
#ifndef __KERNEL__
#include <sys/mman.h>
#include <stdio.h>
#else
#include <linux/gfp.h>
#if !RAID6_USE_EMPTY_ZERO_PAGE
/* In .bss so it's zeroed */
const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
EXPORT_SYMBOL(raid6_empty_zero_page);
#endif
#endif
struct raid6_calls raid6_call;
EXPORT_SYMBOL_GPL(raid6_call);
const struct raid6_calls * const raid6_algos[] = {
&raid6_intx1,
&raid6_intx2,
&raid6_intx4,
&raid6_intx8,
#if defined(__ia64__)
&raid6_intx16,
&raid6_intx32,
#endif
#if defined(__i386__) && !defined(__arch_um__)
&raid6_mmxx1,
&raid6_mmxx2,
&raid6_sse1x1,
&raid6_sse1x2,
&raid6_sse2x1,
&raid6_sse2x2,
#endif
#if defined(__x86_64__) && !defined(__arch_um__)
&raid6_sse2x1,
&raid6_sse2x2,
&raid6_sse2x4,
#endif
#ifdef CONFIG_ALTIVEC
&raid6_altivec1,
&raid6_altivec2,
&raid6_altivec4,
&raid6_altivec8,
#endif
NULL
};
#ifdef __KERNEL__
#define RAID6_TIME_JIFFIES_LG2 4
#else
/* Need more time to be stable in userspace */
#define RAID6_TIME_JIFFIES_LG2 9
#define time_before(x, y) ((x) < (y))
#endif
/* Try to pick the best algorithm */
/* This code uses the gfmul table as convenient data set to abuse */
int __init raid6_select_algo(void)
{
const struct raid6_calls * const * algo;
const struct raid6_calls * best;
char *syndromes;
void *dptrs[(65536/PAGE_SIZE)+2];
int i, disks;
unsigned long perf, bestperf;
int bestprefer;
unsigned long j0, j1;
disks = (65536/PAGE_SIZE)+2;
for ( i = 0 ; i < disks-2 ; i++ ) {
dptrs[i] = ((char *)raid6_gfmul) + PAGE_SIZE*i;
}
/* Normal code - use a 2-page allocation to avoid D$ conflict */
syndromes = (void *) __get_free_pages(GFP_KERNEL, 1);
if ( !syndromes ) {
printk("raid6: Yikes! No memory available.\n");
return -ENOMEM;
}
dptrs[disks-2] = syndromes;
dptrs[disks-1] = syndromes + PAGE_SIZE;
bestperf = 0; bestprefer = 0; best = NULL;
for ( algo = raid6_algos ; *algo ; algo++ ) {
if ( !(*algo)->valid || (*algo)->valid() ) {
perf = 0;
preempt_disable();
j0 = jiffies;
while ( (j1 = jiffies) == j0 )
cpu_relax();
while (time_before(jiffies,
j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
(*algo)->gen_syndrome(disks, PAGE_SIZE, dptrs);
perf++;
}
preempt_enable();
if ( (*algo)->prefer > bestprefer ||
((*algo)->prefer == bestprefer &&
perf > bestperf) ) {
best = *algo;
bestprefer = best->prefer;
bestperf = perf;
}
printk("raid6: %-8s %5ld MB/s\n", (*algo)->name,
(perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
}
}
if (best) {
printk("raid6: using algorithm %s (%ld MB/s)\n",
best->name,
(bestperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
raid6_call = *best;
} else
printk("raid6: Yikes! No algorithm found!\n");
free_pages((unsigned long)syndromes, 1);
return best ? 0 : -EINVAL;
}
static void raid6_exit(void)
{
do { } while (0);
}
subsys_initcall(raid6_select_algo);
module_exit(raid6_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");
| gpl-2.0 |
antmicro/linux-tk1 | arch/powerpc/platforms/cell/celleb_setup.c | 7617 | 6020 | /*
* Celleb setup code
*
* (C) Copyright 2006-2007 TOSHIBA CORPORATION
*
* This code is based on arch/powerpc/platforms/cell/setup.c:
* Copyright (C) 1995 Linus Torvalds
* Adapted from 'alpha' version by Gary Thomas
* Modified by Cort Dougan (cort@cs.nmt.edu)
* Modified by PPC64 Team, IBM Corp
* Modified by Cell Team, IBM Deutschland Entwicklung GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#undef DEBUG
#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/console.h>
#include <linux/of_platform.h>
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/irq.h>
#include <asm/time.h>
#include <asm/spu_priv1.h>
#include <asm/firmware.h>
#include <asm/rtas.h>
#include <asm/cell-regs.h>
#include "beat_interrupt.h"
#include "beat_wrapper.h"
#include "beat.h"
#include "celleb_pci.h"
#include "interrupt.h"
#include "pervasive.h"
#include "ras.h"
static char celleb_machine_type[128] = "Celleb";
static void celleb_show_cpuinfo(struct seq_file *m)
{
struct device_node *root;
const char *model = "";
root = of_find_node_by_path("/");
if (root)
model = of_get_property(root, "model", NULL);
/* using "CHRP" is to trick anaconda into installing FCx into Celleb */
seq_printf(m, "machine\t\t: %s %s\n", celleb_machine_type, model);
of_node_put(root);
}
static int __init celleb_machine_type_hack(char *ptr)
{
strlcpy(celleb_machine_type, ptr, sizeof(celleb_machine_type));
return 0;
}
__setup("celleb_machine_type_hack=", celleb_machine_type_hack);
static void celleb_progress(char *s, unsigned short hex)
{
printk("*** %04x : %s\n", hex, s ? s : "");
}
static void __init celleb_setup_arch_common(void)
{
/* init to some ~sane value until calibrate_delay() runs */
loops_per_jiffy = 50000000;
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
}
static struct of_device_id celleb_bus_ids[] __initdata = {
{ .type = "scc", },
{ .type = "ioif", }, /* old style */
{},
};
static int __init celleb_publish_devices(void)
{
/* Publish OF platform devices for southbridge IOs */
of_platform_bus_probe(NULL, celleb_bus_ids, NULL);
return 0;
}
machine_device_initcall(celleb_beat, celleb_publish_devices);
machine_device_initcall(celleb_native, celleb_publish_devices);
/*
* functions for Celleb-Beat
*/
static void __init celleb_setup_arch_beat(void)
{
#ifdef CONFIG_SPU_BASE
spu_priv1_ops = &spu_priv1_beat_ops;
spu_management_ops = &spu_management_of_ops;
#endif
celleb_setup_arch_common();
}
static int __init celleb_probe_beat(void)
{
unsigned long root = of_get_flat_dt_root();
if (!of_flat_dt_is_compatible(root, "Beat"))
return 0;
powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS
| FW_FEATURE_BEAT | FW_FEATURE_LPAR;
hpte_init_beat_v3();
return 1;
}
/*
* functions for Celleb-native
*/
static void __init celleb_init_IRQ_native(void)
{
iic_init_IRQ();
spider_init_IRQ();
}
static void __init celleb_setup_arch_native(void)
{
#ifdef CONFIG_SPU_BASE
spu_priv1_ops = &spu_priv1_mmio_ops;
spu_management_ops = &spu_management_of_ops;
#endif
cbe_regs_init();
#ifdef CONFIG_CBE_RAS
cbe_ras_init();
#endif
#ifdef CONFIG_SMP
smp_init_cell();
#endif
cbe_pervasive_init();
/* XXX: nvram initialization should be added */
celleb_setup_arch_common();
}
static int __init celleb_probe_native(void)
{
unsigned long root = of_get_flat_dt_root();
if (of_flat_dt_is_compatible(root, "Beat") ||
!of_flat_dt_is_compatible(root, "TOSHIBA,Celleb"))
return 0;
powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS;
hpte_init_native();
return 1;
}
/*
* machine definitions
*/
define_machine(celleb_beat) {
.name = "Cell Reference Set (Beat)",
.probe = celleb_probe_beat,
.setup_arch = celleb_setup_arch_beat,
.show_cpuinfo = celleb_show_cpuinfo,
.restart = beat_restart,
.power_off = beat_power_off,
.halt = beat_halt,
.get_rtc_time = beat_get_rtc_time,
.set_rtc_time = beat_set_rtc_time,
.calibrate_decr = generic_calibrate_decr,
.progress = celleb_progress,
.power_save = beat_power_save,
.nvram_size = beat_nvram_get_size,
.nvram_read = beat_nvram_read,
.nvram_write = beat_nvram_write,
.set_dabr = beat_set_xdabr,
.init_IRQ = beatic_init_IRQ,
.get_irq = beatic_get_irq,
.pci_probe_mode = celleb_pci_probe_mode,
.pci_setup_phb = celleb_setup_phb,
#ifdef CONFIG_KEXEC
.kexec_cpu_down = beat_kexec_cpu_down,
#endif
};
define_machine(celleb_native) {
.name = "Cell Reference Set (native)",
.probe = celleb_probe_native,
.setup_arch = celleb_setup_arch_native,
.show_cpuinfo = celleb_show_cpuinfo,
.restart = rtas_restart,
.power_off = rtas_power_off,
.halt = rtas_halt,
.get_boot_time = rtas_get_boot_time,
.get_rtc_time = rtas_get_rtc_time,
.set_rtc_time = rtas_set_rtc_time,
.calibrate_decr = generic_calibrate_decr,
.progress = celleb_progress,
.pci_probe_mode = celleb_pci_probe_mode,
.pci_setup_phb = celleb_setup_phb,
.init_IRQ = celleb_init_IRQ_native,
};
| gpl-2.0 |
shane87/linux_holiday-ics | drivers/media/video/ivtv/ivtv-vbi.c | 8129 | 14874 | /*
Vertical Blank Interval support functions
Copyright (C) 2004-2007 Hans Verkuil <hverkuil@xs4all.nl>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "ivtv-driver.h"
#include "ivtv-i2c.h"
#include "ivtv-ioctl.h"
#include "ivtv-queue.h"
#include "ivtv-cards.h"
#include "ivtv-vbi.h"
static void ivtv_set_vps(struct ivtv *itv, int enabled)
{
struct v4l2_sliced_vbi_data data;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return;
data.id = V4L2_SLICED_VPS;
data.field = 0;
data.line = enabled ? 16 : 0;
data.data[2] = itv->vbi.vps_payload.data[0];
data.data[8] = itv->vbi.vps_payload.data[1];
data.data[9] = itv->vbi.vps_payload.data[2];
data.data[10] = itv->vbi.vps_payload.data[3];
data.data[11] = itv->vbi.vps_payload.data[4];
ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static void ivtv_set_cc(struct ivtv *itv, int mode, const struct vbi_cc *cc)
{
struct v4l2_sliced_vbi_data data;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return;
data.id = V4L2_SLICED_CAPTION_525;
data.field = 0;
data.line = (mode & 1) ? 21 : 0;
data.data[0] = cc->odd[0];
data.data[1] = cc->odd[1];
ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
data.field = 1;
data.line = (mode & 2) ? 21 : 0;
data.data[0] = cc->even[0];
data.data[1] = cc->even[1];
ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static void ivtv_set_wss(struct ivtv *itv, int enabled, int mode)
{
struct v4l2_sliced_vbi_data data;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return;
/* When using a 50 Hz system, always turn on the
wide screen signal with 4x3 ratio as the default.
Turning this signal on and off can confuse certain
TVs. As far as I can tell there is no reason not to
transmit this signal. */
if ((itv->std_out & V4L2_STD_625_50) && !enabled) {
enabled = 1;
mode = 0x08; /* 4x3 full format */
}
data.id = V4L2_SLICED_WSS_625;
data.field = 0;
data.line = enabled ? 23 : 0;
data.data[0] = mode & 0xff;
data.data[1] = (mode >> 8) & 0xff;
ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static int odd_parity(u8 c)
{
c ^= (c >> 4);
c ^= (c >> 2);
c ^= (c >> 1);
return c & 1;
}
static void ivtv_write_vbi_line(struct ivtv *itv,
const struct v4l2_sliced_vbi_data *d,
struct vbi_cc *cc, int *found_cc)
{
struct vbi_info *vi = &itv->vbi;
if (d->id == V4L2_SLICED_CAPTION_525 && d->line == 21) {
if (d->field) {
cc->even[0] = d->data[0];
cc->even[1] = d->data[1];
} else {
cc->odd[0] = d->data[0];
cc->odd[1] = d->data[1];
}
*found_cc = 1;
} else if (d->id == V4L2_SLICED_VPS && d->line == 16 && d->field == 0) {
struct vbi_vps vps;
vps.data[0] = d->data[2];
vps.data[1] = d->data[8];
vps.data[2] = d->data[9];
vps.data[3] = d->data[10];
vps.data[4] = d->data[11];
if (memcmp(&vps, &vi->vps_payload, sizeof(vps))) {
vi->vps_payload = vps;
set_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags);
}
} else if (d->id == V4L2_SLICED_WSS_625 &&
d->line == 23 && d->field == 0) {
int wss = d->data[0] | d->data[1] << 8;
if (vi->wss_payload != wss) {
vi->wss_payload = wss;
set_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags);
}
}
}
static void ivtv_write_vbi_cc_lines(struct ivtv *itv, const struct vbi_cc *cc)
{
struct vbi_info *vi = &itv->vbi;
if (vi->cc_payload_idx < ARRAY_SIZE(vi->cc_payload)) {
memcpy(&vi->cc_payload[vi->cc_payload_idx], cc,
sizeof(struct vbi_cc));
vi->cc_payload_idx++;
set_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
}
}
static void ivtv_write_vbi(struct ivtv *itv,
const struct v4l2_sliced_vbi_data *sliced,
size_t cnt)
{
struct vbi_cc cc = { .odd = { 0x80, 0x80 }, .even = { 0x80, 0x80 } };
int found_cc = 0;
size_t i;
for (i = 0; i < cnt; i++)
ivtv_write_vbi_line(itv, sliced + i, &cc, &found_cc);
if (found_cc)
ivtv_write_vbi_cc_lines(itv, &cc);
}
ssize_t
ivtv_write_vbi_from_user(struct ivtv *itv,
const struct v4l2_sliced_vbi_data __user *sliced,
size_t cnt)
{
struct vbi_cc cc = { .odd = { 0x80, 0x80 }, .even = { 0x80, 0x80 } };
int found_cc = 0;
size_t i;
struct v4l2_sliced_vbi_data d;
ssize_t ret = cnt * sizeof(struct v4l2_sliced_vbi_data);
for (i = 0; i < cnt; i++) {
if (copy_from_user(&d, sliced + i,
sizeof(struct v4l2_sliced_vbi_data))) {
ret = -EFAULT;
break;
}
ivtv_write_vbi_line(itv, &d, &cc, &found_cc);
}
if (found_cc)
ivtv_write_vbi_cc_lines(itv, &cc);
return ret;
}
static void copy_vbi_data(struct ivtv *itv, int lines, u32 pts_stamp)
{
int line = 0;
int i;
u32 linemask[2] = { 0, 0 };
unsigned short size;
static const u8 mpeg_hdr_data[] = {
0x00, 0x00, 0x01, 0xba, 0x44, 0x00, 0x0c, 0x66,
0x24, 0x01, 0x01, 0xd1, 0xd3, 0xfa, 0xff, 0xff,
0x00, 0x00, 0x01, 0xbd, 0x00, 0x1a, 0x84, 0x80,
0x07, 0x21, 0x00, 0x5d, 0x63, 0xa7, 0xff, 0xff
};
const int sd = sizeof(mpeg_hdr_data); /* start of vbi data */
int idx = itv->vbi.frame % IVTV_VBI_FRAMES;
u8 *dst = &itv->vbi.sliced_mpeg_data[idx][0];
for (i = 0; i < lines; i++) {
int f, l;
if (itv->vbi.sliced_data[i].id == 0)
continue;
l = itv->vbi.sliced_data[i].line - 6;
f = itv->vbi.sliced_data[i].field;
if (f)
l += 18;
if (l < 32)
linemask[0] |= (1 << l);
else
linemask[1] |= (1 << (l - 32));
dst[sd + 12 + line * 43] =
ivtv_service2vbi(itv->vbi.sliced_data[i].id);
memcpy(dst + sd + 12 + line * 43 + 1, itv->vbi.sliced_data[i].data, 42);
line++;
}
memcpy(dst, mpeg_hdr_data, sizeof(mpeg_hdr_data));
if (line == 36) {
/* All lines are used, so there is no space for the linemask
(the max size of the VBI data is 36 * 43 + 4 bytes).
So in this case we use the magic number 'ITV0'. */
memcpy(dst + sd, "ITV0", 4);
memcpy(dst + sd + 4, dst + sd + 12, line * 43);
size = 4 + ((43 * line + 3) & ~3);
} else {
memcpy(dst + sd, "itv0", 4);
cpu_to_le32s(&linemask[0]);
cpu_to_le32s(&linemask[1]);
memcpy(dst + sd + 4, &linemask[0], 8);
size = 12 + ((43 * line + 3) & ~3);
}
dst[4+16] = (size + 10) >> 8;
dst[5+16] = (size + 10) & 0xff;
dst[9+16] = 0x21 | ((pts_stamp >> 29) & 0x6);
dst[10+16] = (pts_stamp >> 22) & 0xff;
dst[11+16] = 1 | ((pts_stamp >> 14) & 0xff);
dst[12+16] = (pts_stamp >> 7) & 0xff;
dst[13+16] = 1 | ((pts_stamp & 0x7f) << 1);
itv->vbi.sliced_mpeg_size[idx] = sd + size;
}
static int ivtv_convert_ivtv_vbi(struct ivtv *itv, u8 *p)
{
u32 linemask[2];
int i, l, id2;
int line = 0;
if (!memcmp(p, "itv0", 4)) {
memcpy(linemask, p + 4, 8);
p += 12;
} else if (!memcmp(p, "ITV0", 4)) {
linemask[0] = 0xffffffff;
linemask[1] = 0xf;
p += 4;
} else {
/* unknown VBI data, convert to empty VBI frame */
linemask[0] = linemask[1] = 0;
}
for (i = 0; i < 36; i++) {
int err = 0;
if (i < 32 && !(linemask[0] & (1 << i)))
continue;
if (i >= 32 && !(linemask[1] & (1 << (i - 32))))
continue;
id2 = *p & 0xf;
switch (id2) {
case IVTV_SLICED_TYPE_TELETEXT_B:
id2 = V4L2_SLICED_TELETEXT_B;
break;
case IVTV_SLICED_TYPE_CAPTION_525:
id2 = V4L2_SLICED_CAPTION_525;
err = !odd_parity(p[1]) || !odd_parity(p[2]);
break;
case IVTV_SLICED_TYPE_VPS:
id2 = V4L2_SLICED_VPS;
break;
case IVTV_SLICED_TYPE_WSS_625:
id2 = V4L2_SLICED_WSS_625;
break;
default:
id2 = 0;
break;
}
if (err == 0) {
l = (i < 18) ? i + 6 : i - 18 + 6;
itv->vbi.sliced_dec_data[line].line = l;
itv->vbi.sliced_dec_data[line].field = i >= 18;
itv->vbi.sliced_dec_data[line].id = id2;
memcpy(itv->vbi.sliced_dec_data[line].data, p + 1, 42);
line++;
}
p += 43;
}
while (line < 36) {
itv->vbi.sliced_dec_data[line].id = 0;
itv->vbi.sliced_dec_data[line].line = 0;
itv->vbi.sliced_dec_data[line].field = 0;
line++;
}
return line * sizeof(itv->vbi.sliced_dec_data[0]);
}
/* Compress raw VBI format, removes leading SAV codes and surplus space after the
field.
Returns new compressed size. */
static u32 compress_raw_buf(struct ivtv *itv, u8 *buf, u32 size)
{
u32 line_size = itv->vbi.raw_decoder_line_size;
u32 lines = itv->vbi.count;
u8 sav1 = itv->vbi.raw_decoder_sav_odd_field;
u8 sav2 = itv->vbi.raw_decoder_sav_even_field;
u8 *q = buf;
u8 *p;
int i;
for (i = 0; i < lines; i++) {
p = buf + i * line_size;
/* Look for SAV code */
if (p[0] != 0xff || p[1] || p[2] || (p[3] != sav1 && p[3] != sav2)) {
break;
}
memcpy(q, p + 4, line_size - 4);
q += line_size - 4;
}
return lines * (line_size - 4);
}
/* Compressed VBI format, all found sliced blocks put next to one another
Returns new compressed size */
static u32 compress_sliced_buf(struct ivtv *itv, u32 line, u8 *buf, u32 size, u8 sav)
{
u32 line_size = itv->vbi.sliced_decoder_line_size;
struct v4l2_decode_vbi_line vbi;
int i;
unsigned lines = 0;
/* find the first valid line */
for (i = 0; i < size; i++, buf++) {
if (buf[0] == 0xff && !buf[1] && !buf[2] && buf[3] == sav)
break;
}
size -= i;
if (size < line_size) {
return line;
}
for (i = 0; i < size / line_size; i++) {
u8 *p = buf + i * line_size;
/* Look for SAV code */
if (p[0] != 0xff || p[1] || p[2] || p[3] != sav) {
continue;
}
vbi.p = p + 4;
v4l2_subdev_call(itv->sd_video, vbi, decode_vbi_line, &vbi);
if (vbi.type && !(lines & (1 << vbi.line))) {
lines |= 1 << vbi.line;
itv->vbi.sliced_data[line].id = vbi.type;
itv->vbi.sliced_data[line].field = vbi.is_second_field;
itv->vbi.sliced_data[line].line = vbi.line;
memcpy(itv->vbi.sliced_data[line].data, vbi.p, 42);
line++;
}
}
return line;
}
void ivtv_process_vbi_data(struct ivtv *itv, struct ivtv_buffer *buf,
u64 pts_stamp, int streamtype)
{
u8 *p = (u8 *) buf->buf;
u32 size = buf->bytesused;
int y;
/* Raw VBI data */
if (streamtype == IVTV_ENC_STREAM_TYPE_VBI && ivtv_raw_vbi(itv)) {
u8 type;
ivtv_buf_swap(buf);
type = p[3];
size = buf->bytesused = compress_raw_buf(itv, p, size);
/* second field of the frame? */
if (type == itv->vbi.raw_decoder_sav_even_field) {
/* Dirty hack needed for backwards
compatibility of old VBI software. */
p += size - 4;
memcpy(p, &itv->vbi.frame, 4);
itv->vbi.frame++;
}
return;
}
/* Sliced VBI data with data insertion */
if (streamtype == IVTV_ENC_STREAM_TYPE_VBI) {
int lines;
ivtv_buf_swap(buf);
/* first field */
lines = compress_sliced_buf(itv, 0, p, size / 2,
itv->vbi.sliced_decoder_sav_odd_field);
/* second field */
/* experimentation shows that the second half does not always begin
at the exact address. So start a bit earlier (hence 32). */
lines = compress_sliced_buf(itv, lines, p + size / 2 - 32, size / 2 + 32,
itv->vbi.sliced_decoder_sav_even_field);
/* always return at least one empty line */
if (lines == 0) {
itv->vbi.sliced_data[0].id = 0;
itv->vbi.sliced_data[0].line = 0;
itv->vbi.sliced_data[0].field = 0;
lines = 1;
}
buf->bytesused = size = lines * sizeof(itv->vbi.sliced_data[0]);
memcpy(p, &itv->vbi.sliced_data[0], size);
if (itv->vbi.insert_mpeg) {
copy_vbi_data(itv, lines, pts_stamp);
}
itv->vbi.frame++;
return;
}
/* Sliced VBI re-inserted from an MPEG stream */
if (streamtype == IVTV_DEC_STREAM_TYPE_VBI) {
/* If the size is not 4-byte aligned, then the starting address
for the swapping is also shifted. After swapping the data the
real start address of the VBI data is exactly 4 bytes after the
original start. It's a bit fiddly but it works like a charm.
Non-4-byte alignment happens when an lseek is done on the input
mpeg file to a non-4-byte aligned position. So on arrival here
the VBI data is also non-4-byte aligned. */
int offset = size & 3;
int cnt;
if (offset) {
p += 4 - offset;
}
/* Swap Buffer */
for (y = 0; y < size; y += 4) {
swab32s((u32 *)(p + y));
}
cnt = ivtv_convert_ivtv_vbi(itv, p + offset);
memcpy(buf->buf, itv->vbi.sliced_dec_data, cnt);
buf->bytesused = cnt;
ivtv_write_vbi(itv, itv->vbi.sliced_dec_data,
cnt / sizeof(itv->vbi.sliced_dec_data[0]));
return;
}
}
void ivtv_disable_cc(struct ivtv *itv)
{
struct vbi_cc cc = { .odd = { 0x80, 0x80 }, .even = { 0x80, 0x80 } };
clear_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
ivtv_set_cc(itv, 0, &cc);
itv->vbi.cc_payload_idx = 0;
}
void ivtv_vbi_work_handler(struct ivtv *itv)
{
struct vbi_info *vi = &itv->vbi;
struct v4l2_sliced_vbi_data data;
struct vbi_cc cc = { .odd = { 0x80, 0x80 }, .even = { 0x80, 0x80 } };
/* Lock */
if (itv->output_mode == OUT_PASSTHROUGH) {
if (itv->is_50hz) {
data.id = V4L2_SLICED_WSS_625;
data.field = 0;
if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
ivtv_set_wss(itv, 1, data.data[0] & 0xf);
vi->wss_missing_cnt = 0;
} else if (vi->wss_missing_cnt == 4) {
ivtv_set_wss(itv, 1, 0x8); /* 4x3 full format */
} else {
vi->wss_missing_cnt++;
}
}
else {
int mode = 0;
data.id = V4L2_SLICED_CAPTION_525;
data.field = 0;
if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
mode |= 1;
cc.odd[0] = data.data[0];
cc.odd[1] = data.data[1];
}
data.field = 1;
if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
mode |= 2;
cc.even[0] = data.data[0];
cc.even[1] = data.data[1];
}
if (mode) {
vi->cc_missing_cnt = 0;
ivtv_set_cc(itv, mode, &cc);
} else if (vi->cc_missing_cnt == 4) {
ivtv_set_cc(itv, 0, &cc);
} else {
vi->cc_missing_cnt++;
}
}
return;
}
if (test_and_clear_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags)) {
ivtv_set_wss(itv, 1, vi->wss_payload & 0xf);
}
if (test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags)) {
if (vi->cc_payload_idx == 0) {
clear_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
ivtv_set_cc(itv, 3, &cc);
}
while (vi->cc_payload_idx) {
cc = vi->cc_payload[0];
memcpy(vi->cc_payload, vi->cc_payload + 1,
sizeof(vi->cc_payload) - sizeof(vi->cc_payload[0]));
vi->cc_payload_idx--;
if (vi->cc_payload_idx && cc.odd[0] == 0x80 && cc.odd[1] == 0x80)
continue;
ivtv_set_cc(itv, 3, &cc);
break;
}
}
if (test_and_clear_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags)) {
ivtv_set_vps(itv, 1);
}
}
| gpl-2.0 |
omasab/linux | drivers/media/video/ivtv/ivtv-vbi.c | 8129 | 14874 | /*
Vertical Blank Interval support functions
Copyright (C) 2004-2007 Hans Verkuil <hverkuil@xs4all.nl>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "ivtv-driver.h"
#include "ivtv-i2c.h"
#include "ivtv-ioctl.h"
#include "ivtv-queue.h"
#include "ivtv-cards.h"
#include "ivtv-vbi.h"
static void ivtv_set_vps(struct ivtv *itv, int enabled)
{
struct v4l2_sliced_vbi_data data;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return;
data.id = V4L2_SLICED_VPS;
data.field = 0;
data.line = enabled ? 16 : 0;
data.data[2] = itv->vbi.vps_payload.data[0];
data.data[8] = itv->vbi.vps_payload.data[1];
data.data[9] = itv->vbi.vps_payload.data[2];
data.data[10] = itv->vbi.vps_payload.data[3];
data.data[11] = itv->vbi.vps_payload.data[4];
ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static void ivtv_set_cc(struct ivtv *itv, int mode, const struct vbi_cc *cc)
{
struct v4l2_sliced_vbi_data data;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return;
data.id = V4L2_SLICED_CAPTION_525;
data.field = 0;
data.line = (mode & 1) ? 21 : 0;
data.data[0] = cc->odd[0];
data.data[1] = cc->odd[1];
ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
data.field = 1;
data.line = (mode & 2) ? 21 : 0;
data.data[0] = cc->even[0];
data.data[1] = cc->even[1];
ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static void ivtv_set_wss(struct ivtv *itv, int enabled, int mode)
{
struct v4l2_sliced_vbi_data data;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return;
/* When using a 50 Hz system, always turn on the
wide screen signal with 4x3 ratio as the default.
Turning this signal on and off can confuse certain
TVs. As far as I can tell there is no reason not to
transmit this signal. */
if ((itv->std_out & V4L2_STD_625_50) && !enabled) {
enabled = 1;
mode = 0x08; /* 4x3 full format */
}
data.id = V4L2_SLICED_WSS_625;
data.field = 0;
data.line = enabled ? 23 : 0;
data.data[0] = mode & 0xff;
data.data[1] = (mode >> 8) & 0xff;
ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static int odd_parity(u8 c)
{
c ^= (c >> 4);
c ^= (c >> 2);
c ^= (c >> 1);
return c & 1;
}
static void ivtv_write_vbi_line(struct ivtv *itv,
const struct v4l2_sliced_vbi_data *d,
struct vbi_cc *cc, int *found_cc)
{
struct vbi_info *vi = &itv->vbi;
if (d->id == V4L2_SLICED_CAPTION_525 && d->line == 21) {
if (d->field) {
cc->even[0] = d->data[0];
cc->even[1] = d->data[1];
} else {
cc->odd[0] = d->data[0];
cc->odd[1] = d->data[1];
}
*found_cc = 1;
} else if (d->id == V4L2_SLICED_VPS && d->line == 16 && d->field == 0) {
struct vbi_vps vps;
vps.data[0] = d->data[2];
vps.data[1] = d->data[8];
vps.data[2] = d->data[9];
vps.data[3] = d->data[10];
vps.data[4] = d->data[11];
if (memcmp(&vps, &vi->vps_payload, sizeof(vps))) {
vi->vps_payload = vps;
set_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags);
}
} else if (d->id == V4L2_SLICED_WSS_625 &&
d->line == 23 && d->field == 0) {
int wss = d->data[0] | d->data[1] << 8;
if (vi->wss_payload != wss) {
vi->wss_payload = wss;
set_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags);
}
}
}
static void ivtv_write_vbi_cc_lines(struct ivtv *itv, const struct vbi_cc *cc)
{
struct vbi_info *vi = &itv->vbi;
if (vi->cc_payload_idx < ARRAY_SIZE(vi->cc_payload)) {
memcpy(&vi->cc_payload[vi->cc_payload_idx], cc,
sizeof(struct vbi_cc));
vi->cc_payload_idx++;
set_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
}
}
static void ivtv_write_vbi(struct ivtv *itv,
const struct v4l2_sliced_vbi_data *sliced,
size_t cnt)
{
struct vbi_cc cc = { .odd = { 0x80, 0x80 }, .even = { 0x80, 0x80 } };
int found_cc = 0;
size_t i;
for (i = 0; i < cnt; i++)
ivtv_write_vbi_line(itv, sliced + i, &cc, &found_cc);
if (found_cc)
ivtv_write_vbi_cc_lines(itv, &cc);
}
ssize_t
ivtv_write_vbi_from_user(struct ivtv *itv,
const struct v4l2_sliced_vbi_data __user *sliced,
size_t cnt)
{
struct vbi_cc cc = { .odd = { 0x80, 0x80 }, .even = { 0x80, 0x80 } };
int found_cc = 0;
size_t i;
struct v4l2_sliced_vbi_data d;
ssize_t ret = cnt * sizeof(struct v4l2_sliced_vbi_data);
for (i = 0; i < cnt; i++) {
if (copy_from_user(&d, sliced + i,
sizeof(struct v4l2_sliced_vbi_data))) {
ret = -EFAULT;
break;
}
ivtv_write_vbi_line(itv, &d, &cc, &found_cc);
}
if (found_cc)
ivtv_write_vbi_cc_lines(itv, &cc);
return ret;
}
static void copy_vbi_data(struct ivtv *itv, int lines, u32 pts_stamp)
{
int line = 0;
int i;
u32 linemask[2] = { 0, 0 };
unsigned short size;
static const u8 mpeg_hdr_data[] = {
0x00, 0x00, 0x01, 0xba, 0x44, 0x00, 0x0c, 0x66,
0x24, 0x01, 0x01, 0xd1, 0xd3, 0xfa, 0xff, 0xff,
0x00, 0x00, 0x01, 0xbd, 0x00, 0x1a, 0x84, 0x80,
0x07, 0x21, 0x00, 0x5d, 0x63, 0xa7, 0xff, 0xff
};
const int sd = sizeof(mpeg_hdr_data); /* start of vbi data */
int idx = itv->vbi.frame % IVTV_VBI_FRAMES;
u8 *dst = &itv->vbi.sliced_mpeg_data[idx][0];
for (i = 0; i < lines; i++) {
int f, l;
if (itv->vbi.sliced_data[i].id == 0)
continue;
l = itv->vbi.sliced_data[i].line - 6;
f = itv->vbi.sliced_data[i].field;
if (f)
l += 18;
if (l < 32)
linemask[0] |= (1 << l);
else
linemask[1] |= (1 << (l - 32));
dst[sd + 12 + line * 43] =
ivtv_service2vbi(itv->vbi.sliced_data[i].id);
memcpy(dst + sd + 12 + line * 43 + 1, itv->vbi.sliced_data[i].data, 42);
line++;
}
memcpy(dst, mpeg_hdr_data, sizeof(mpeg_hdr_data));
if (line == 36) {
/* All lines are used, so there is no space for the linemask
(the max size of the VBI data is 36 * 43 + 4 bytes).
So in this case we use the magic number 'ITV0'. */
memcpy(dst + sd, "ITV0", 4);
memcpy(dst + sd + 4, dst + sd + 12, line * 43);
size = 4 + ((43 * line + 3) & ~3);
} else {
memcpy(dst + sd, "itv0", 4);
cpu_to_le32s(&linemask[0]);
cpu_to_le32s(&linemask[1]);
memcpy(dst + sd + 4, &linemask[0], 8);
size = 12 + ((43 * line + 3) & ~3);
}
dst[4+16] = (size + 10) >> 8;
dst[5+16] = (size + 10) & 0xff;
dst[9+16] = 0x21 | ((pts_stamp >> 29) & 0x6);
dst[10+16] = (pts_stamp >> 22) & 0xff;
dst[11+16] = 1 | ((pts_stamp >> 14) & 0xff);
dst[12+16] = (pts_stamp >> 7) & 0xff;
dst[13+16] = 1 | ((pts_stamp & 0x7f) << 1);
itv->vbi.sliced_mpeg_size[idx] = sd + size;
}
static int ivtv_convert_ivtv_vbi(struct ivtv *itv, u8 *p)
{
u32 linemask[2];
int i, l, id2;
int line = 0;
if (!memcmp(p, "itv0", 4)) {
memcpy(linemask, p + 4, 8);
p += 12;
} else if (!memcmp(p, "ITV0", 4)) {
linemask[0] = 0xffffffff;
linemask[1] = 0xf;
p += 4;
} else {
/* unknown VBI data, convert to empty VBI frame */
linemask[0] = linemask[1] = 0;
}
for (i = 0; i < 36; i++) {
int err = 0;
if (i < 32 && !(linemask[0] & (1 << i)))
continue;
if (i >= 32 && !(linemask[1] & (1 << (i - 32))))
continue;
id2 = *p & 0xf;
switch (id2) {
case IVTV_SLICED_TYPE_TELETEXT_B:
id2 = V4L2_SLICED_TELETEXT_B;
break;
case IVTV_SLICED_TYPE_CAPTION_525:
id2 = V4L2_SLICED_CAPTION_525;
err = !odd_parity(p[1]) || !odd_parity(p[2]);
break;
case IVTV_SLICED_TYPE_VPS:
id2 = V4L2_SLICED_VPS;
break;
case IVTV_SLICED_TYPE_WSS_625:
id2 = V4L2_SLICED_WSS_625;
break;
default:
id2 = 0;
break;
}
if (err == 0) {
l = (i < 18) ? i + 6 : i - 18 + 6;
itv->vbi.sliced_dec_data[line].line = l;
itv->vbi.sliced_dec_data[line].field = i >= 18;
itv->vbi.sliced_dec_data[line].id = id2;
memcpy(itv->vbi.sliced_dec_data[line].data, p + 1, 42);
line++;
}
p += 43;
}
while (line < 36) {
itv->vbi.sliced_dec_data[line].id = 0;
itv->vbi.sliced_dec_data[line].line = 0;
itv->vbi.sliced_dec_data[line].field = 0;
line++;
}
return line * sizeof(itv->vbi.sliced_dec_data[0]);
}
/* Compress raw VBI format, removes leading SAV codes and surplus space after the
field.
Returns new compressed size. */
static u32 compress_raw_buf(struct ivtv *itv, u8 *buf, u32 size)
{
u32 line_size = itv->vbi.raw_decoder_line_size;
u32 lines = itv->vbi.count;
u8 sav1 = itv->vbi.raw_decoder_sav_odd_field;
u8 sav2 = itv->vbi.raw_decoder_sav_even_field;
u8 *q = buf;
u8 *p;
int i;
for (i = 0; i < lines; i++) {
p = buf + i * line_size;
/* Look for SAV code */
if (p[0] != 0xff || p[1] || p[2] || (p[3] != sav1 && p[3] != sav2)) {
break;
}
memcpy(q, p + 4, line_size - 4);
q += line_size - 4;
}
return lines * (line_size - 4);
}
/* Compressed VBI format, all found sliced blocks put next to one another
Returns new compressed size */
static u32 compress_sliced_buf(struct ivtv *itv, u32 line, u8 *buf, u32 size, u8 sav)
{
u32 line_size = itv->vbi.sliced_decoder_line_size;
struct v4l2_decode_vbi_line vbi;
int i;
unsigned lines = 0;
/* find the first valid line */
for (i = 0; i < size; i++, buf++) {
if (buf[0] == 0xff && !buf[1] && !buf[2] && buf[3] == sav)
break;
}
size -= i;
if (size < line_size) {
return line;
}
for (i = 0; i < size / line_size; i++) {
u8 *p = buf + i * line_size;
/* Look for SAV code */
if (p[0] != 0xff || p[1] || p[2] || p[3] != sav) {
continue;
}
vbi.p = p + 4;
v4l2_subdev_call(itv->sd_video, vbi, decode_vbi_line, &vbi);
if (vbi.type && !(lines & (1 << vbi.line))) {
lines |= 1 << vbi.line;
itv->vbi.sliced_data[line].id = vbi.type;
itv->vbi.sliced_data[line].field = vbi.is_second_field;
itv->vbi.sliced_data[line].line = vbi.line;
memcpy(itv->vbi.sliced_data[line].data, vbi.p, 42);
line++;
}
}
return line;
}
void ivtv_process_vbi_data(struct ivtv *itv, struct ivtv_buffer *buf,
u64 pts_stamp, int streamtype)
{
u8 *p = (u8 *) buf->buf;
u32 size = buf->bytesused;
int y;
/* Raw VBI data */
if (streamtype == IVTV_ENC_STREAM_TYPE_VBI && ivtv_raw_vbi(itv)) {
u8 type;
ivtv_buf_swap(buf);
type = p[3];
size = buf->bytesused = compress_raw_buf(itv, p, size);
/* second field of the frame? */
if (type == itv->vbi.raw_decoder_sav_even_field) {
/* Dirty hack needed for backwards
compatibility of old VBI software. */
p += size - 4;
memcpy(p, &itv->vbi.frame, 4);
itv->vbi.frame++;
}
return;
}
/* Sliced VBI data with data insertion */
if (streamtype == IVTV_ENC_STREAM_TYPE_VBI) {
int lines;
ivtv_buf_swap(buf);
/* first field */
lines = compress_sliced_buf(itv, 0, p, size / 2,
itv->vbi.sliced_decoder_sav_odd_field);
/* second field */
/* experimentation shows that the second half does not always begin
at the exact address. So start a bit earlier (hence 32). */
lines = compress_sliced_buf(itv, lines, p + size / 2 - 32, size / 2 + 32,
itv->vbi.sliced_decoder_sav_even_field);
/* always return at least one empty line */
if (lines == 0) {
itv->vbi.sliced_data[0].id = 0;
itv->vbi.sliced_data[0].line = 0;
itv->vbi.sliced_data[0].field = 0;
lines = 1;
}
buf->bytesused = size = lines * sizeof(itv->vbi.sliced_data[0]);
memcpy(p, &itv->vbi.sliced_data[0], size);
if (itv->vbi.insert_mpeg) {
copy_vbi_data(itv, lines, pts_stamp);
}
itv->vbi.frame++;
return;
}
/* Sliced VBI re-inserted from an MPEG stream */
if (streamtype == IVTV_DEC_STREAM_TYPE_VBI) {
/* If the size is not 4-byte aligned, then the starting address
for the swapping is also shifted. After swapping the data the
real start address of the VBI data is exactly 4 bytes after the
original start. It's a bit fiddly but it works like a charm.
Non-4-byte alignment happens when an lseek is done on the input
mpeg file to a non-4-byte aligned position. So on arrival here
the VBI data is also non-4-byte aligned. */
int offset = size & 3;
int cnt;
if (offset) {
p += 4 - offset;
}
/* Swap Buffer */
for (y = 0; y < size; y += 4) {
swab32s((u32 *)(p + y));
}
cnt = ivtv_convert_ivtv_vbi(itv, p + offset);
memcpy(buf->buf, itv->vbi.sliced_dec_data, cnt);
buf->bytesused = cnt;
ivtv_write_vbi(itv, itv->vbi.sliced_dec_data,
cnt / sizeof(itv->vbi.sliced_dec_data[0]));
return;
}
}
void ivtv_disable_cc(struct ivtv *itv)
{
struct vbi_cc cc = { .odd = { 0x80, 0x80 }, .even = { 0x80, 0x80 } };
clear_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
ivtv_set_cc(itv, 0, &cc);
itv->vbi.cc_payload_idx = 0;
}
void ivtv_vbi_work_handler(struct ivtv *itv)
{
struct vbi_info *vi = &itv->vbi;
struct v4l2_sliced_vbi_data data;
struct vbi_cc cc = { .odd = { 0x80, 0x80 }, .even = { 0x80, 0x80 } };
/* Lock */
if (itv->output_mode == OUT_PASSTHROUGH) {
if (itv->is_50hz) {
data.id = V4L2_SLICED_WSS_625;
data.field = 0;
if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
ivtv_set_wss(itv, 1, data.data[0] & 0xf);
vi->wss_missing_cnt = 0;
} else if (vi->wss_missing_cnt == 4) {
ivtv_set_wss(itv, 1, 0x8); /* 4x3 full format */
} else {
vi->wss_missing_cnt++;
}
}
else {
int mode = 0;
data.id = V4L2_SLICED_CAPTION_525;
data.field = 0;
if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
mode |= 1;
cc.odd[0] = data.data[0];
cc.odd[1] = data.data[1];
}
data.field = 1;
if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
mode |= 2;
cc.even[0] = data.data[0];
cc.even[1] = data.data[1];
}
if (mode) {
vi->cc_missing_cnt = 0;
ivtv_set_cc(itv, mode, &cc);
} else if (vi->cc_missing_cnt == 4) {
ivtv_set_cc(itv, 0, &cc);
} else {
vi->cc_missing_cnt++;
}
}
return;
}
if (test_and_clear_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags)) {
ivtv_set_wss(itv, 1, vi->wss_payload & 0xf);
}
if (test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags)) {
if (vi->cc_payload_idx == 0) {
clear_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
ivtv_set_cc(itv, 3, &cc);
}
while (vi->cc_payload_idx) {
cc = vi->cc_payload[0];
memcpy(vi->cc_payload, vi->cc_payload + 1,
sizeof(vi->cc_payload) - sizeof(vi->cc_payload[0]));
vi->cc_payload_idx--;
if (vi->cc_payload_idx && cc.odd[0] == 0x80 && cc.odd[1] == 0x80)
continue;
ivtv_set_cc(itv, 3, &cc);
break;
}
}
if (test_and_clear_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags)) {
ivtv_set_vps(itv, 1);
}
}
| gpl-2.0 |
NooNameR/qsd8x50-bravo- | drivers/block/paride/friq.c | 15553 | 6365 | /*
friq.c (c) 1998 Grant R. Guenther <grant@torque.net>
Under the terms of the GNU General Public License
friq.c is a low-level protocol driver for the Freecom "IQ"
parallel port IDE adapter. Early versions of this adapter
use the 'frpw' protocol.
Freecom uses this adapter in a battery powered external
CD-ROM drive. It is also used in LS-120 drives by
Maxell and Panasonic, and other devices.
The battery powered drive requires software support to
control the power to the drive. This module enables the
drive power when the high level driver (pcd) is loaded
and disables it when the module is unloaded. Note, if
the friq module is built in to the kernel, the power
will never be switched off, so other means should be
used to conserve battery power.
*/
/* Changes:
1.01 GRG 1998.12.20 Added support for soft power switch
*/
#define FRIQ_VERSION "1.01"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <asm/io.h>
#include "paride.h"
#define CMD(x) w2(4);w0(0xff);w0(0xff);w0(0x73);w0(0x73);\
w0(0xc9);w0(0xc9);w0(0x26);w0(0x26);w0(x);w0(x);
#define j44(l,h) (((l>>4)&0x0f)|(h&0xf0))
/* cont = 0 - access the IDE register file
cont = 1 - access the IDE command set
*/
static int cont_map[2] = { 0x08, 0x10 };
static int friq_read_regr( PIA *pi, int cont, int regr )
{ int h,l,r;
r = regr + cont_map[cont];
CMD(r);
w2(6); l = r1();
w2(4); h = r1();
w2(4);
return j44(l,h);
}
static void friq_write_regr( PIA *pi, int cont, int regr, int val)
{ int r;
r = regr + cont_map[cont];
CMD(r);
w0(val);
w2(5);w2(7);w2(5);w2(4);
}
static void friq_read_block_int( PIA *pi, char * buf, int count, int regr )
{ int h, l, k, ph;
switch(pi->mode) {
case 0: CMD(regr);
for (k=0;k<count;k++) {
w2(6); l = r1();
w2(4); h = r1();
buf[k] = j44(l,h);
}
w2(4);
break;
case 1: ph = 2;
CMD(regr+0xc0);
w0(0xff);
for (k=0;k<count;k++) {
w2(0xa4 + ph);
buf[k] = r0();
ph = 2 - ph;
}
w2(0xac); w2(0xa4); w2(4);
break;
case 2: CMD(regr+0x80);
for (k=0;k<count-2;k++) buf[k] = r4();
w2(0xac); w2(0xa4);
buf[count-2] = r4();
buf[count-1] = r4();
w2(4);
break;
case 3: CMD(regr+0x80);
for (k=0;k<(count/2)-1;k++) ((u16 *)buf)[k] = r4w();
w2(0xac); w2(0xa4);
buf[count-2] = r4();
buf[count-1] = r4();
w2(4);
break;
case 4: CMD(regr+0x80);
for (k=0;k<(count/4)-1;k++) ((u32 *)buf)[k] = r4l();
buf[count-4] = r4();
buf[count-3] = r4();
w2(0xac); w2(0xa4);
buf[count-2] = r4();
buf[count-1] = r4();
w2(4);
break;
}
}
static void friq_read_block( PIA *pi, char * buf, int count)
{ friq_read_block_int(pi,buf,count,0x08);
}
static void friq_write_block( PIA *pi, char * buf, int count )
{ int k;
switch(pi->mode) {
case 0:
case 1: CMD(8); w2(5);
for (k=0;k<count;k++) {
w0(buf[k]);
w2(7);w2(5);
}
w2(4);
break;
case 2: CMD(0xc8); w2(5);
for (k=0;k<count;k++) w4(buf[k]);
w2(4);
break;
case 3: CMD(0xc8); w2(5);
for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]);
w2(4);
break;
case 4: CMD(0xc8); w2(5);
for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]);
w2(4);
break;
}
}
static void friq_connect ( PIA *pi )
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(4);
}
static void friq_disconnect ( PIA *pi )
{ CMD(0x20);
w0(pi->saved_r0);
w2(pi->saved_r2);
}
static int friq_test_proto( PIA *pi, char * scratch, int verbose )
{ int j, k, r;
int e[2] = {0,0};
pi->saved_r0 = r0();
w0(0xff); udelay(20); CMD(0x3d); /* turn the power on */
udelay(500);
w0(pi->saved_r0);
friq_connect(pi);
for (j=0;j<2;j++) {
friq_write_regr(pi,0,6,0xa0+j*0x10);
for (k=0;k<256;k++) {
friq_write_regr(pi,0,2,k^0xaa);
friq_write_regr(pi,0,3,k^0x55);
if (friq_read_regr(pi,0,2) != (k^0xaa)) e[j]++;
}
}
friq_disconnect(pi);
friq_connect(pi);
friq_read_block_int(pi,scratch,512,0x10);
r = 0;
for (k=0;k<128;k++) if (scratch[k] != k) r++;
friq_disconnect(pi);
if (verbose) {
printk("%s: friq: port 0x%x, mode %d, test=(%d,%d,%d)\n",
pi->device,pi->port,pi->mode,e[0],e[1],r);
}
return (r || (e[0] && e[1]));
}
static void friq_log_adapter( PIA *pi, char * scratch, int verbose )
{ char *mode_string[6] = {"4-bit","8-bit",
"EPP-8","EPP-16","EPP-32"};
printk("%s: friq %s, Freecom IQ ASIC-2 adapter at 0x%x, ", pi->device,
FRIQ_VERSION,pi->port);
printk("mode %d (%s), delay %d\n",pi->mode,
mode_string[pi->mode],pi->delay);
pi->private = 1;
friq_connect(pi);
CMD(0x9e); /* disable sleep timer */
friq_disconnect(pi);
}
static void friq_release_proto( PIA *pi)
{
if (pi->private) { /* turn off the power */
friq_connect(pi);
CMD(0x1d); CMD(0x1e);
friq_disconnect(pi);
pi->private = 0;
}
}
static struct pi_protocol friq = {
.owner = THIS_MODULE,
.name = "friq",
.max_mode = 5,
.epp_first = 2,
.default_delay = 1,
.max_units = 1,
.write_regr = friq_write_regr,
.read_regr = friq_read_regr,
.write_block = friq_write_block,
.read_block = friq_read_block,
.connect = friq_connect,
.disconnect = friq_disconnect,
.test_proto = friq_test_proto,
.log_adapter = friq_log_adapter,
.release_proto = friq_release_proto,
};
static int __init friq_init(void)
{
return paride_register(&friq);
}
static void __exit friq_exit(void)
{
paride_unregister(&friq);
}
MODULE_LICENSE("GPL");
module_init(friq_init)
module_exit(friq_exit)
| gpl-2.0 |
Dees-Troy/android_kernel_goldfish | fs/nls/nls_base.c | 194 | 14131 | /*
* linux/fs/nls/nls_base.c
*
* Native language support--charsets and unicode translations.
* By Gordon Chaffee 1996, 1997
*
* Unicode based case conversion 1999 by Wolfram Pienkoss
*
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/kmod.h>
#include <linux/spinlock.h>
static struct nls_table default_table;
static struct nls_table *tables = &default_table;
static DEFINE_SPINLOCK(nls_lock);
/*
* Sample implementation from Unicode home page.
* http://www.stonehand.com/unicode/standard/fss-utf.html
*/
struct utf8_table {
int cmask;
int cval;
int shift;
long lmask;
long lval;
};
static const struct utf8_table utf8_table[] =
{
{0x80, 0x00, 0*6, 0x7F, 0, /* 1 byte sequence */},
{0xE0, 0xC0, 1*6, 0x7FF, 0x80, /* 2 byte sequence */},
{0xF0, 0xE0, 2*6, 0xFFFF, 0x800, /* 3 byte sequence */},
{0xF8, 0xF0, 3*6, 0x1FFFFF, 0x10000, /* 4 byte sequence */},
{0xFC, 0xF8, 4*6, 0x3FFFFFF, 0x200000, /* 5 byte sequence */},
{0xFE, 0xFC, 5*6, 0x7FFFFFFF, 0x4000000, /* 6 byte sequence */},
{0, /* end of table */}
};
int
utf8_mbtowc(wchar_t *p, const __u8 *s, int n)
{
long l;
int c0, c, nc;
const struct utf8_table *t;
nc = 0;
c0 = *s;
l = c0;
for (t = utf8_table; t->cmask; t++) {
nc++;
if ((c0 & t->cmask) == t->cval) {
l &= t->lmask;
if (l < t->lval)
return -1;
*p = l;
return nc;
}
if (n <= nc)
return -1;
s++;
c = (*s ^ 0x80) & 0xFF;
if (c & 0xC0)
return -1;
l = (l << 6) | c;
}
return -1;
}
int
utf8_mbstowcs(wchar_t *pwcs, const __u8 *s, int n)
{
__u16 *op;
const __u8 *ip;
int size;
op = pwcs;
ip = s;
while (*ip && n > 0) {
if (*ip & 0x80) {
size = utf8_mbtowc(op, ip, n);
if (size == -1) {
/* Ignore character and move on */
ip++;
n--;
} else {
op++;
ip += size;
n -= size;
}
} else {
*op++ = *ip++;
n--;
}
}
return (op - pwcs);
}
int
utf8_wctomb(__u8 *s, wchar_t wc, int maxlen)
{
long l;
int c, nc;
const struct utf8_table *t;
if (!s)
return 0;
l = wc;
nc = 0;
for (t = utf8_table; t->cmask && maxlen; t++, maxlen--) {
nc++;
if (l <= t->lmask) {
c = t->shift;
*s = t->cval | (l >> c);
while (c > 0) {
c -= 6;
s++;
*s = 0x80 | ((l >> c) & 0x3F);
}
return nc;
}
}
return -1;
}
int
utf8_wcstombs(__u8 *s, const wchar_t *pwcs, int maxlen)
{
const __u16 *ip;
__u8 *op;
int size;
op = s;
ip = pwcs;
while (*ip && maxlen > 0) {
if (*ip > 0x7f) {
size = utf8_wctomb(op, *ip, maxlen);
if (size == -1) {
/* Ignore character and move on */
maxlen--;
} else {
op += size;
maxlen -= size;
}
} else {
*op++ = (__u8) *ip;
}
ip++;
}
return (op - s);
}
int register_nls(struct nls_table * nls)
{
struct nls_table ** tmp = &tables;
if (nls->next)
return -EBUSY;
spin_lock(&nls_lock);
while (*tmp) {
if (nls == *tmp) {
spin_unlock(&nls_lock);
return -EBUSY;
}
tmp = &(*tmp)->next;
}
nls->next = tables;
tables = nls;
spin_unlock(&nls_lock);
return 0;
}
int unregister_nls(struct nls_table * nls)
{
struct nls_table ** tmp = &tables;
spin_lock(&nls_lock);
while (*tmp) {
if (nls == *tmp) {
*tmp = nls->next;
spin_unlock(&nls_lock);
return 0;
}
tmp = &(*tmp)->next;
}
spin_unlock(&nls_lock);
return -EINVAL;
}
static struct nls_table *find_nls(char *charset)
{
struct nls_table *nls;
spin_lock(&nls_lock);
for (nls = tables; nls; nls = nls->next) {
if (!strcmp(nls->charset, charset))
break;
if (nls->alias && !strcmp(nls->alias, charset))
break;
}
if (nls && !try_module_get(nls->owner))
nls = NULL;
spin_unlock(&nls_lock);
return nls;
}
struct nls_table *load_nls(char *charset)
{
return try_then_request_module(find_nls(charset), "nls_%s", charset);
}
void unload_nls(struct nls_table *nls)
{
module_put(nls->owner);
}
static const wchar_t charset2uni[256] = {
/* 0x00*/
0x0000, 0x0001, 0x0002, 0x0003,
0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b,
0x000c, 0x000d, 0x000e, 0x000f,
/* 0x10*/
0x0010, 0x0011, 0x0012, 0x0013,
0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b,
0x001c, 0x001d, 0x001e, 0x001f,
/* 0x20*/
0x0020, 0x0021, 0x0022, 0x0023,
0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b,
0x002c, 0x002d, 0x002e, 0x002f,
/* 0x30*/
0x0030, 0x0031, 0x0032, 0x0033,
0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b,
0x003c, 0x003d, 0x003e, 0x003f,
/* 0x40*/
0x0040, 0x0041, 0x0042, 0x0043,
0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b,
0x004c, 0x004d, 0x004e, 0x004f,
/* 0x50*/
0x0050, 0x0051, 0x0052, 0x0053,
0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b,
0x005c, 0x005d, 0x005e, 0x005f,
/* 0x60*/
0x0060, 0x0061, 0x0062, 0x0063,
0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b,
0x006c, 0x006d, 0x006e, 0x006f,
/* 0x70*/
0x0070, 0x0071, 0x0072, 0x0073,
0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b,
0x007c, 0x007d, 0x007e, 0x007f,
/* 0x80*/
0x0080, 0x0081, 0x0082, 0x0083,
0x0084, 0x0085, 0x0086, 0x0087,
0x0088, 0x0089, 0x008a, 0x008b,
0x008c, 0x008d, 0x008e, 0x008f,
/* 0x90*/
0x0090, 0x0091, 0x0092, 0x0093,
0x0094, 0x0095, 0x0096, 0x0097,
0x0098, 0x0099, 0x009a, 0x009b,
0x009c, 0x009d, 0x009e, 0x009f,
/* 0xa0*/
0x00a0, 0x00a1, 0x00a2, 0x00a3,
0x00a4, 0x00a5, 0x00a6, 0x00a7,
0x00a8, 0x00a9, 0x00aa, 0x00ab,
0x00ac, 0x00ad, 0x00ae, 0x00af,
/* 0xb0*/
0x00b0, 0x00b1, 0x00b2, 0x00b3,
0x00b4, 0x00b5, 0x00b6, 0x00b7,
0x00b8, 0x00b9, 0x00ba, 0x00bb,
0x00bc, 0x00bd, 0x00be, 0x00bf,
/* 0xc0*/
0x00c0, 0x00c1, 0x00c2, 0x00c3,
0x00c4, 0x00c5, 0x00c6, 0x00c7,
0x00c8, 0x00c9, 0x00ca, 0x00cb,
0x00cc, 0x00cd, 0x00ce, 0x00cf,
/* 0xd0*/
0x00d0, 0x00d1, 0x00d2, 0x00d3,
0x00d4, 0x00d5, 0x00d6, 0x00d7,
0x00d8, 0x00d9, 0x00da, 0x00db,
0x00dc, 0x00dd, 0x00de, 0x00df,
/* 0xe0*/
0x00e0, 0x00e1, 0x00e2, 0x00e3,
0x00e4, 0x00e5, 0x00e6, 0x00e7,
0x00e8, 0x00e9, 0x00ea, 0x00eb,
0x00ec, 0x00ed, 0x00ee, 0x00ef,
/* 0xf0*/
0x00f0, 0x00f1, 0x00f2, 0x00f3,
0x00f4, 0x00f5, 0x00f6, 0x00f7,
0x00f8, 0x00f9, 0x00fa, 0x00fb,
0x00fc, 0x00fd, 0x00fe, 0x00ff,
};
static const unsigned char page00[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
};
static const unsigned char *const page_uni2charset[256] = {
page00
};
static const unsigned char charset2lower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
};
static const unsigned char charset2upper[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
};
static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
{
const unsigned char *uni2charset;
unsigned char cl = uni & 0x00ff;
unsigned char ch = (uni & 0xff00) >> 8;
if (boundlen <= 0)
return -ENAMETOOLONG;
uni2charset = page_uni2charset[ch];
if (uni2charset && uni2charset[cl])
out[0] = uni2charset[cl];
else
return -EINVAL;
return 1;
}
static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
{
*uni = charset2uni[*rawstring];
if (*uni == 0x0000)
return -EINVAL;
return 1;
}
static struct nls_table default_table = {
.charset = "default",
.uni2char = uni2char,
.char2uni = char2uni,
.charset2lower = charset2lower,
.charset2upper = charset2upper,
};
/* Returns a simple default translation table */
struct nls_table *load_nls_default(void)
{
struct nls_table *default_nls;
default_nls = load_nls(CONFIG_NLS_DEFAULT);
if (default_nls != NULL)
return default_nls;
else
return &default_table;
}
EXPORT_SYMBOL(register_nls);
EXPORT_SYMBOL(unregister_nls);
EXPORT_SYMBOL(unload_nls);
EXPORT_SYMBOL(load_nls);
EXPORT_SYMBOL(load_nls_default);
EXPORT_SYMBOL(utf8_mbtowc);
EXPORT_SYMBOL(utf8_mbstowcs);
EXPORT_SYMBOL(utf8_wctomb);
EXPORT_SYMBOL(utf8_wcstombs);
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
j-r0dd/motus_kernel | kernel/sched_idletask.c | 194 | 2966 | /*
* idle-task scheduling class.
*
* (NOTE: these are not related to SCHED_IDLE tasks which are
* handled in sched_fair.c)
*/
#ifdef CONFIG_SMP
static int select_task_rq_idle(struct task_struct *p, int sync)
{
return task_cpu(p); /* IDLE tasks as never migrated */
}
#endif /* CONFIG_SMP */
/*
* Idle tasks are unconditionally rescheduled:
*/
static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int sync)
{
resched_task(rq->idle);
}
static struct task_struct *pick_next_task_idle(struct rq *rq)
{
schedstat_inc(rq, sched_goidle);
return rq->idle;
}
/*
* It is not legal to sleep in the idle task - print a warning
* message if some code attempts to do it:
*/
static void
dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
{
spin_unlock_irq(&rq->lock);
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
dump_stack();
spin_lock_irq(&rq->lock);
}
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{
}
#ifdef CONFIG_SMP
static unsigned long
load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
int *all_pinned, int *this_best_prio)
{
return 0;
}
static int
move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct sched_domain *sd, enum cpu_idle_type idle)
{
return 0;
}
#endif
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
{
}
static void set_curr_task_idle(struct rq *rq)
{
}
static void switched_to_idle(struct rq *rq, struct task_struct *p,
int running)
{
/* Can this actually happen?? */
if (running)
resched_task(rq->curr);
else
check_preempt_curr(rq, p, 0);
}
static void prio_changed_idle(struct rq *rq, struct task_struct *p,
int oldprio, int running)
{
/* This can happen for hot plug CPUS */
/*
* Reschedule if we are currently running on this runqueue and
* our priority decreased, or if we are not currently running on
* this runqueue and our priority is higher than the current's
*/
if (running) {
if (p->prio > oldprio)
resched_task(rq->curr);
} else
check_preempt_curr(rq, p, 0);
}
/*
* Simple, special scheduling class for the per-CPU idle tasks:
*/
static const struct sched_class idle_sched_class = {
/* .next is NULL */
/* no enqueue/yield_task for idle tasks */
/* dequeue is not valid, we print a debug message there: */
.dequeue_task = dequeue_task_idle,
.check_preempt_curr = check_preempt_curr_idle,
.pick_next_task = pick_next_task_idle,
.put_prev_task = put_prev_task_idle,
#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_idle,
.load_balance = load_balance_idle,
.move_one_task = move_one_task_idle,
#endif
.set_curr_task = set_curr_task_idle,
.task_tick = task_tick_idle,
.prio_changed = prio_changed_idle,
.switched_to = switched_to_idle,
/* no .task_new for idle tasks */
};
| gpl-2.0 |
XCage15/linux-1 | security/commoncap.c | 194 | 28791 | /* Common capabilities, needed by capability.o.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/capability.h>
#include <linux/audit.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/security.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/ptrace.h>
#include <linux/xattr.h>
#include <linux/hugetlb.h>
#include <linux/mount.h>
#include <linux/sched.h>
#include <linux/prctl.h>
#include <linux/securebits.h>
#include <linux/user_namespace.h>
#include <linux/binfmts.h>
#include <linux/personality.h>
/*
* If a non-root user executes a setuid-root binary in
* !secure(SECURE_NOROOT) mode, then we raise capabilities.
* However if fE is also set, then the intent is for only
* the file capabilities to be applied, and the setuid-root
* bit is left on either to change the uid (plausible) or
* to get full privilege on a kernel without file capabilities
* support. So in that case we do not raise capabilities.
*
* Warn if that happens, once per boot.
*/
static void warn_setuid_and_fcaps_mixed(const char *fname)
{
static int warned;
if (!warned) {
printk(KERN_INFO "warning: `%s' has both setuid-root and"
" effective capabilities. Therefore not raising all"
" capabilities.\n", fname);
warned = 1;
}
}
int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
{
return 0;
}
/**
* cap_capable - Determine whether a task has a particular effective capability
* @cred: The credentials to use
* @ns: The user namespace in which we need the capability
* @cap: The capability to check for
* @audit: Whether to write an audit message or not
*
* Determine whether the nominated task has the specified capability amongst
* its effective set, returning 0 if it does, -ve if it does not.
*
* NOTE WELL: cap_has_capability() cannot be used like the kernel's capable()
* and has_capability() functions. That is, it has the reverse semantics:
* cap_has_capability() returns 0 when a task has a capability, but the
* kernel's capable() and has_capability() returns 1 for this case.
*/
int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
int cap, int audit)
{
struct user_namespace *ns = targ_ns;
/* See if cred has the capability in the target user namespace
* by examining the target user namespace and all of the target
* user namespace's parents.
*/
for (;;) {
/* Do we have the necessary capabilities? */
if (ns == cred->user_ns)
return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM;
/* Have we tried all of the parent namespaces? */
if (ns == &init_user_ns)
return -EPERM;
/*
* The owner of the user namespace in the parent of the
* user namespace has all caps.
*/
if ((ns->parent == cred->user_ns) && uid_eq(ns->owner, cred->euid))
return 0;
/*
* If you have a capability in a parent user ns, then you have
* it over all children user namespaces as well.
*/
ns = ns->parent;
}
/* We never get here */
}
/**
* cap_settime - Determine whether the current process may set the system clock
* @ts: The time to set
* @tz: The timezone to set
*
* Determine whether the current process may set the system clock and timezone
* information, returning 0 if permission granted, -ve if denied.
*/
int cap_settime(const struct timespec *ts, const struct timezone *tz)
{
if (!capable(CAP_SYS_TIME))
return -EPERM;
return 0;
}
/**
* cap_ptrace_access_check - Determine whether the current process may access
* another
* @child: The process to be accessed
* @mode: The mode of attachment.
*
* If we are in the same or an ancestor user_ns and have all the target
* task's capabilities, then ptrace access is allowed.
* If we have the ptrace capability to the target user_ns, then ptrace
* access is allowed.
* Else denied.
*
* Determine whether a process may access another, returning 0 if permission
* granted, -ve if denied.
*/
int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
{
int ret = 0;
const struct cred *cred, *child_cred;
rcu_read_lock();
cred = current_cred();
child_cred = __task_cred(child);
if (cred->user_ns == child_cred->user_ns &&
cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
goto out;
if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE))
goto out;
ret = -EPERM;
out:
rcu_read_unlock();
return ret;
}
/**
* cap_ptrace_traceme - Determine whether another process may trace the current
* @parent: The task proposed to be the tracer
*
* If parent is in the same or an ancestor user_ns and has all current's
* capabilities, then ptrace access is allowed.
* If parent has the ptrace capability to current's user_ns, then ptrace
* access is allowed.
* Else denied.
*
* Determine whether the nominated task is permitted to trace the current
* process, returning 0 if permission is granted, -ve if denied.
*/
int cap_ptrace_traceme(struct task_struct *parent)
{
int ret = 0;
const struct cred *cred, *child_cred;
rcu_read_lock();
cred = __task_cred(parent);
child_cred = current_cred();
if (cred->user_ns == child_cred->user_ns &&
cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
goto out;
if (has_ns_capability(parent, child_cred->user_ns, CAP_SYS_PTRACE))
goto out;
ret = -EPERM;
out:
rcu_read_unlock();
return ret;
}
/**
* cap_capget - Retrieve a task's capability sets
* @target: The task from which to retrieve the capability sets
* @effective: The place to record the effective set
* @inheritable: The place to record the inheritable set
* @permitted: The place to record the permitted set
*
* This function retrieves the capabilities of the nominated task and returns
* them to the caller.
*/
int cap_capget(struct task_struct *target, kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted)
{
const struct cred *cred;
/* Derived from kernel/capability.c:sys_capget. */
rcu_read_lock();
cred = __task_cred(target);
*effective = cred->cap_effective;
*inheritable = cred->cap_inheritable;
*permitted = cred->cap_permitted;
rcu_read_unlock();
return 0;
}
/*
* Determine whether the inheritable capabilities are limited to the old
* permitted set. Returns 1 if they are limited, 0 if they are not.
*/
static inline int cap_inh_is_capped(void)
{
/* they are so limited unless the current task has the CAP_SETPCAP
* capability
*/
if (cap_capable(current_cred(), current_cred()->user_ns,
CAP_SETPCAP, SECURITY_CAP_AUDIT) == 0)
return 0;
return 1;
}
/**
* cap_capset - Validate and apply proposed changes to current's capabilities
* @new: The proposed new credentials; alterations should be made here
* @old: The current task's current credentials
* @effective: A pointer to the proposed new effective capabilities set
* @inheritable: A pointer to the proposed new inheritable capabilities set
* @permitted: A pointer to the proposed new permitted capabilities set
*
* This function validates and applies a proposed mass change to the current
* process's capability sets. The changes are made to the proposed new
* credentials, and assuming no error, will be committed by the caller of LSM.
*/
int cap_capset(struct cred *new,
const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted)
{
if (cap_inh_is_capped() &&
!cap_issubset(*inheritable,
cap_combine(old->cap_inheritable,
old->cap_permitted)))
/* incapable of using this inheritable set */
return -EPERM;
if (!cap_issubset(*inheritable,
cap_combine(old->cap_inheritable,
old->cap_bset)))
/* no new pI capabilities outside bounding set */
return -EPERM;
/* verify restrictions on target's new Permitted set */
if (!cap_issubset(*permitted, old->cap_permitted))
return -EPERM;
/* verify the _new_Effective_ is a subset of the _new_Permitted_ */
if (!cap_issubset(*effective, *permitted))
return -EPERM;
new->cap_effective = *effective;
new->cap_inheritable = *inheritable;
new->cap_permitted = *permitted;
return 0;
}
/*
* Clear proposed capability sets for execve().
*/
static inline void bprm_clear_caps(struct linux_binprm *bprm)
{
cap_clear(bprm->cred->cap_permitted);
bprm->cap_effective = false;
}
/**
* cap_inode_need_killpriv - Determine if inode change affects privileges
* @dentry: The inode/dentry in being changed with change marked ATTR_KILL_PRIV
*
* Determine if an inode having a change applied that's marked ATTR_KILL_PRIV
* affects the security markings on that inode, and if it is, should
* inode_killpriv() be invoked or the change rejected?
*
* Returns 0 if granted; +ve if granted, but inode_killpriv() is required; and
* -ve to deny the change.
*/
int cap_inode_need_killpriv(struct dentry *dentry)
{
struct inode *inode = d_backing_inode(dentry);
int error;
if (!inode->i_op->getxattr)
return 0;
error = inode->i_op->getxattr(dentry, XATTR_NAME_CAPS, NULL, 0);
if (error <= 0)
return 0;
return 1;
}
/**
* cap_inode_killpriv - Erase the security markings on an inode
* @dentry: The inode/dentry to alter
*
* Erase the privilege-enhancing security markings on an inode.
*
* Returns 0 if successful, -ve on error.
*/
int cap_inode_killpriv(struct dentry *dentry)
{
struct inode *inode = d_backing_inode(dentry);
if (!inode->i_op->removexattr)
return 0;
return inode->i_op->removexattr(dentry, XATTR_NAME_CAPS);
}
/*
* Calculate the new process capability sets from the capability sets attached
* to a file.
*/
static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
struct linux_binprm *bprm,
bool *effective,
bool *has_cap)
{
struct cred *new = bprm->cred;
unsigned i;
int ret = 0;
if (caps->magic_etc & VFS_CAP_FLAGS_EFFECTIVE)
*effective = true;
if (caps->magic_etc & VFS_CAP_REVISION_MASK)
*has_cap = true;
CAP_FOR_EACH_U32(i) {
__u32 permitted = caps->permitted.cap[i];
__u32 inheritable = caps->inheritable.cap[i];
/*
* pP' = (X & fP) | (pI & fI)
*/
new->cap_permitted.cap[i] =
(new->cap_bset.cap[i] & permitted) |
(new->cap_inheritable.cap[i] & inheritable);
if (permitted & ~new->cap_permitted.cap[i])
/* insufficient to execute correctly */
ret = -EPERM;
}
/*
* For legacy apps, with no internal support for recognizing they
* do not have enough capabilities, we return an error if they are
* missing some "forced" (aka file-permitted) capabilities.
*/
return *effective ? ret : 0;
}
/*
* Extract the on-exec-apply capability sets for an executable file.
*/
int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps)
{
struct inode *inode = d_backing_inode(dentry);
__u32 magic_etc;
unsigned tocopy, i;
int size;
struct vfs_cap_data caps;
memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data));
if (!inode || !inode->i_op->getxattr)
return -ENODATA;
size = inode->i_op->getxattr((struct dentry *)dentry, XATTR_NAME_CAPS, &caps,
XATTR_CAPS_SZ);
if (size == -ENODATA || size == -EOPNOTSUPP)
/* no data, that's ok */
return -ENODATA;
if (size < 0)
return size;
if (size < sizeof(magic_etc))
return -EINVAL;
cpu_caps->magic_etc = magic_etc = le32_to_cpu(caps.magic_etc);
switch (magic_etc & VFS_CAP_REVISION_MASK) {
case VFS_CAP_REVISION_1:
if (size != XATTR_CAPS_SZ_1)
return -EINVAL;
tocopy = VFS_CAP_U32_1;
break;
case VFS_CAP_REVISION_2:
if (size != XATTR_CAPS_SZ_2)
return -EINVAL;
tocopy = VFS_CAP_U32_2;
break;
default:
return -EINVAL;
}
CAP_FOR_EACH_U32(i) {
if (i >= tocopy)
break;
cpu_caps->permitted.cap[i] = le32_to_cpu(caps.data[i].permitted);
cpu_caps->inheritable.cap[i] = le32_to_cpu(caps.data[i].inheritable);
}
cpu_caps->permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
cpu_caps->inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK;
return 0;
}
/*
* Attempt to get the on-exec apply capability sets for an executable file from
* its xattrs and, if present, apply them to the proposed credentials being
* constructed by execve().
*/
static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_cap)
{
int rc = 0;
struct cpu_vfs_cap_data vcaps;
bprm_clear_caps(bprm);
if (!file_caps_enabled)
return 0;
if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
return 0;
rc = get_vfs_caps_from_disk(bprm->file->f_path.dentry, &vcaps);
if (rc < 0) {
if (rc == -EINVAL)
printk(KERN_NOTICE "%s: get_vfs_caps_from_disk returned %d for %s\n",
__func__, rc, bprm->filename);
else if (rc == -ENODATA)
rc = 0;
goto out;
}
rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective, has_cap);
if (rc == -EINVAL)
printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n",
__func__, rc, bprm->filename);
out:
if (rc)
bprm_clear_caps(bprm);
return rc;
}
/**
* cap_bprm_set_creds - Set up the proposed credentials for execve().
* @bprm: The execution parameters, including the proposed creds
*
* Set up the proposed credentials for a new execution context being
* constructed by execve(). The proposed creds in @bprm->cred is altered,
* which won't take effect immediately. Returns 0 if successful, -ve on error.
*/
int cap_bprm_set_creds(struct linux_binprm *bprm)
{
const struct cred *old = current_cred();
struct cred *new = bprm->cred;
bool effective, has_cap = false;
int ret;
kuid_t root_uid;
effective = false;
ret = get_file_caps(bprm, &effective, &has_cap);
if (ret < 0)
return ret;
root_uid = make_kuid(new->user_ns, 0);
if (!issecure(SECURE_NOROOT)) {
/*
* If the legacy file capability is set, then don't set privs
* for a setuid root binary run by a non-root user. Do set it
* for a root user just to cause least surprise to an admin.
*/
if (has_cap && !uid_eq(new->uid, root_uid) && uid_eq(new->euid, root_uid)) {
warn_setuid_and_fcaps_mixed(bprm->filename);
goto skip;
}
/*
* To support inheritance of root-permissions and suid-root
* executables under compatibility mode, we override the
* capability sets for the file.
*
* If only the real uid is 0, we do not set the effective bit.
*/
if (uid_eq(new->euid, root_uid) || uid_eq(new->uid, root_uid)) {
/* pP' = (cap_bset & ~0) | (pI & ~0) */
new->cap_permitted = cap_combine(old->cap_bset,
old->cap_inheritable);
}
if (uid_eq(new->euid, root_uid))
effective = true;
}
skip:
/* if we have fs caps, clear dangerous personality flags */
if (!cap_issubset(new->cap_permitted, old->cap_permitted))
bprm->per_clear |= PER_CLEAR_ON_SETID;
/* Don't let someone trace a set[ug]id/setpcap binary with the revised
* credentials unless they have the appropriate permit.
*
* In addition, if NO_NEW_PRIVS, then ensure we get no new privs.
*/
if ((!uid_eq(new->euid, old->uid) ||
!gid_eq(new->egid, old->gid) ||
!cap_issubset(new->cap_permitted, old->cap_permitted)) &&
bprm->unsafe & ~LSM_UNSAFE_PTRACE_CAP) {
/* downgrade; they get no more than they had, and maybe less */
if (!capable(CAP_SETUID) ||
(bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS)) {
new->euid = new->uid;
new->egid = new->gid;
}
new->cap_permitted = cap_intersect(new->cap_permitted,
old->cap_permitted);
}
new->suid = new->fsuid = new->euid;
new->sgid = new->fsgid = new->egid;
if (effective)
new->cap_effective = new->cap_permitted;
else
cap_clear(new->cap_effective);
bprm->cap_effective = effective;
/*
* Audit candidate if current->cap_effective is set
*
* We do not bother to audit if 3 things are true:
* 1) cap_effective has all caps
* 2) we are root
* 3) root is supposed to have all caps (SECURE_NOROOT)
* Since this is just a normal root execing a process.
*
* Number 1 above might fail if you don't have a full bset, but I think
* that is interesting information to audit.
*/
if (!cap_isclear(new->cap_effective)) {
if (!cap_issubset(CAP_FULL_SET, new->cap_effective) ||
!uid_eq(new->euid, root_uid) || !uid_eq(new->uid, root_uid) ||
issecure(SECURE_NOROOT)) {
ret = audit_log_bprm_fcaps(bprm, new, old);
if (ret < 0)
return ret;
}
}
new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
return 0;
}
/**
* cap_bprm_secureexec - Determine whether a secure execution is required
* @bprm: The execution parameters
*
* Determine whether a secure execution is required, return 1 if it is, and 0
* if it is not.
*
* The credentials have been committed by this point, and so are no longer
* available through @bprm->cred.
*/
int cap_bprm_secureexec(struct linux_binprm *bprm)
{
const struct cred *cred = current_cred();
kuid_t root_uid = make_kuid(cred->user_ns, 0);
if (!uid_eq(cred->uid, root_uid)) {
if (bprm->cap_effective)
return 1;
if (!cap_isclear(cred->cap_permitted))
return 1;
}
return (!uid_eq(cred->euid, cred->uid) ||
!gid_eq(cred->egid, cred->gid));
}
/**
* cap_inode_setxattr - Determine whether an xattr may be altered
* @dentry: The inode/dentry being altered
* @name: The name of the xattr to be changed
* @value: The value that the xattr will be changed to
* @size: The size of value
* @flags: The replacement flag
*
* Determine whether an xattr may be altered or set on an inode, returning 0 if
* permission is granted, -ve if denied.
*
* This is used to make sure security xattrs don't get updated or set by those
* who aren't privileged to do so.
*/
int cap_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
if (!strcmp(name, XATTR_NAME_CAPS)) {
if (!capable(CAP_SETFCAP))
return -EPERM;
return 0;
}
if (!strncmp(name, XATTR_SECURITY_PREFIX,
sizeof(XATTR_SECURITY_PREFIX) - 1) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
/**
* cap_inode_removexattr - Determine whether an xattr may be removed
* @dentry: The inode/dentry being altered
* @name: The name of the xattr to be changed
*
* Determine whether an xattr may be removed from an inode, returning 0 if
* permission is granted, -ve if denied.
*
* This is used to make sure security xattrs don't get removed by those who
* aren't privileged to remove them.
*/
int cap_inode_removexattr(struct dentry *dentry, const char *name)
{
if (!strcmp(name, XATTR_NAME_CAPS)) {
if (!capable(CAP_SETFCAP))
return -EPERM;
return 0;
}
if (!strncmp(name, XATTR_SECURITY_PREFIX,
sizeof(XATTR_SECURITY_PREFIX) - 1) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
/*
* cap_emulate_setxuid() fixes the effective / permitted capabilities of
* a process after a call to setuid, setreuid, or setresuid.
*
* 1) When set*uiding _from_ one of {r,e,s}uid == 0 _to_ all of
* {r,e,s}uid != 0, the permitted and effective capabilities are
* cleared.
*
* 2) When set*uiding _from_ euid == 0 _to_ euid != 0, the effective
* capabilities of the process are cleared.
*
* 3) When set*uiding _from_ euid != 0 _to_ euid == 0, the effective
* capabilities are set to the permitted capabilities.
*
* fsuid is handled elsewhere. fsuid == 0 and {r,e,s}uid!= 0 should
* never happen.
*
* -astor
*
* cevans - New behaviour, Oct '99
* A process may, via prctl(), elect to keep its capabilities when it
* calls setuid() and switches away from uid==0. Both permitted and
* effective sets will be retained.
* Without this change, it was impossible for a daemon to drop only some
* of its privilege. The call to setuid(!=0) would drop all privileges!
* Keeping uid 0 is not an option because uid 0 owns too many vital
* files..
* Thanks to Olaf Kirch and Peter Benie for spotting this.
*/
static inline void cap_emulate_setxuid(struct cred *new, const struct cred *old)
{
kuid_t root_uid = make_kuid(old->user_ns, 0);
if ((uid_eq(old->uid, root_uid) ||
uid_eq(old->euid, root_uid) ||
uid_eq(old->suid, root_uid)) &&
(!uid_eq(new->uid, root_uid) &&
!uid_eq(new->euid, root_uid) &&
!uid_eq(new->suid, root_uid)) &&
!issecure(SECURE_KEEP_CAPS)) {
cap_clear(new->cap_permitted);
cap_clear(new->cap_effective);
}
if (uid_eq(old->euid, root_uid) && !uid_eq(new->euid, root_uid))
cap_clear(new->cap_effective);
if (!uid_eq(old->euid, root_uid) && uid_eq(new->euid, root_uid))
new->cap_effective = new->cap_permitted;
}
/**
* cap_task_fix_setuid - Fix up the results of setuid() call
* @new: The proposed credentials
* @old: The current task's current credentials
* @flags: Indications of what has changed
*
* Fix up the results of setuid() call before the credential changes are
* actually applied, returning 0 to grant the changes, -ve to deny them.
*/
int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags)
{
switch (flags) {
case LSM_SETID_RE:
case LSM_SETID_ID:
case LSM_SETID_RES:
/* juggle the capabilities to follow [RES]UID changes unless
* otherwise suppressed */
if (!issecure(SECURE_NO_SETUID_FIXUP))
cap_emulate_setxuid(new, old);
break;
case LSM_SETID_FS:
/* juggle the capabilties to follow FSUID changes, unless
* otherwise suppressed
*
* FIXME - is fsuser used for all CAP_FS_MASK capabilities?
* if not, we might be a bit too harsh here.
*/
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
kuid_t root_uid = make_kuid(old->user_ns, 0);
if (uid_eq(old->fsuid, root_uid) && !uid_eq(new->fsuid, root_uid))
new->cap_effective =
cap_drop_fs_set(new->cap_effective);
if (!uid_eq(old->fsuid, root_uid) && uid_eq(new->fsuid, root_uid))
new->cap_effective =
cap_raise_fs_set(new->cap_effective,
new->cap_permitted);
}
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Rationale: code calling task_setscheduler, task_setioprio, and
* task_setnice, assumes that
* . if capable(cap_sys_nice), then those actions should be allowed
* . if not capable(cap_sys_nice), but acting on your own processes,
* then those actions should be allowed
* This is insufficient now since you can call code without suid, but
* yet with increased caps.
* So we check for increased caps on the target process.
*/
static int cap_safe_nice(struct task_struct *p)
{
int is_subset, ret = 0;
rcu_read_lock();
is_subset = cap_issubset(__task_cred(p)->cap_permitted,
current_cred()->cap_permitted);
if (!is_subset && !ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
ret = -EPERM;
rcu_read_unlock();
return ret;
}
/**
* cap_task_setscheduler - Detemine if scheduler policy change is permitted
* @p: The task to affect
*
* Detemine if the requested scheduler policy change is permitted for the
* specified task, returning 0 if permission is granted, -ve if denied.
*/
int cap_task_setscheduler(struct task_struct *p)
{
return cap_safe_nice(p);
}
/**
* cap_task_ioprio - Detemine if I/O priority change is permitted
* @p: The task to affect
* @ioprio: The I/O priority to set
*
* Detemine if the requested I/O priority change is permitted for the specified
* task, returning 0 if permission is granted, -ve if denied.
*/
int cap_task_setioprio(struct task_struct *p, int ioprio)
{
return cap_safe_nice(p);
}
/**
* cap_task_ioprio - Detemine if task priority change is permitted
* @p: The task to affect
* @nice: The nice value to set
*
* Detemine if the requested task priority change is permitted for the
* specified task, returning 0 if permission is granted, -ve if denied.
*/
int cap_task_setnice(struct task_struct *p, int nice)
{
return cap_safe_nice(p);
}
/*
* Implement PR_CAPBSET_DROP. Attempt to remove the specified capability from
* the current task's bounding set. Returns 0 on success, -ve on error.
*/
static int cap_prctl_drop(unsigned long cap)
{
struct cred *new;
if (!ns_capable(current_user_ns(), CAP_SETPCAP))
return -EPERM;
if (!cap_valid(cap))
return -EINVAL;
new = prepare_creds();
if (!new)
return -ENOMEM;
cap_lower(new->cap_bset, cap);
return commit_creds(new);
}
/**
* cap_task_prctl - Implement process control functions for this security module
* @option: The process control function requested
* @arg2, @arg3, @arg4, @arg5: The argument data for this function
*
* Allow process control functions (sys_prctl()) to alter capabilities; may
* also deny access to other functions not otherwise implemented here.
*
* Returns 0 or +ve on success, -ENOSYS if this function is not implemented
* here, other -ve on error. If -ENOSYS is returned, sys_prctl() and other LSM
* modules will consider performing the function.
*/
int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
const struct cred *old = current_cred();
struct cred *new;
switch (option) {
case PR_CAPBSET_READ:
if (!cap_valid(arg2))
return -EINVAL;
return !!cap_raised(old->cap_bset, arg2);
case PR_CAPBSET_DROP:
return cap_prctl_drop(arg2);
/*
* The next four prctl's remain to assist with transitioning a
* system from legacy UID=0 based privilege (when filesystem
* capabilities are not in use) to a system using filesystem
* capabilities only - as the POSIX.1e draft intended.
*
* Note:
*
* PR_SET_SECUREBITS =
* issecure_mask(SECURE_KEEP_CAPS_LOCKED)
* | issecure_mask(SECURE_NOROOT)
* | issecure_mask(SECURE_NOROOT_LOCKED)
* | issecure_mask(SECURE_NO_SETUID_FIXUP)
* | issecure_mask(SECURE_NO_SETUID_FIXUP_LOCKED)
*
* will ensure that the current process and all of its
* children will be locked into a pure
* capability-based-privilege environment.
*/
case PR_SET_SECUREBITS:
if ((((old->securebits & SECURE_ALL_LOCKS) >> 1)
& (old->securebits ^ arg2)) /*[1]*/
|| ((old->securebits & SECURE_ALL_LOCKS & ~arg2)) /*[2]*/
|| (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/
|| (cap_capable(current_cred(),
current_cred()->user_ns, CAP_SETPCAP,
SECURITY_CAP_AUDIT) != 0) /*[4]*/
/*
* [1] no changing of bits that are locked
* [2] no unlocking of locks
* [3] no setting of unsupported bits
* [4] doing anything requires privilege (go read about
* the "sendmail capabilities bug")
*/
)
/* cannot change a locked bit */
return -EPERM;
new = prepare_creds();
if (!new)
return -ENOMEM;
new->securebits = arg2;
return commit_creds(new);
case PR_GET_SECUREBITS:
return old->securebits;
case PR_GET_KEEPCAPS:
return !!issecure(SECURE_KEEP_CAPS);
case PR_SET_KEEPCAPS:
if (arg2 > 1) /* Note, we rely on arg2 being unsigned here */
return -EINVAL;
if (issecure(SECURE_KEEP_CAPS_LOCKED))
return -EPERM;
new = prepare_creds();
if (!new)
return -ENOMEM;
if (arg2)
new->securebits |= issecure_mask(SECURE_KEEP_CAPS);
else
new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
return commit_creds(new);
default:
/* No functionality available - continue with default */
return -ENOSYS;
}
}
/**
* cap_vm_enough_memory - Determine whether a new virtual mapping is permitted
* @mm: The VM space in which the new mapping is to be made
* @pages: The size of the mapping
*
* Determine whether the allocation of a new virtual mapping by the current
* task is permitted, returning 0 if permission is granted, -ve if not.
*/
int cap_vm_enough_memory(struct mm_struct *mm, long pages)
{
int cap_sys_admin = 0;
if (cap_capable(current_cred(), &init_user_ns, CAP_SYS_ADMIN,
SECURITY_CAP_NOAUDIT) == 0)
cap_sys_admin = 1;
return __vm_enough_memory(mm, pages, cap_sys_admin);
}
/*
* cap_mmap_addr - check if able to map given addr
* @addr: address attempting to be mapped
*
* If the process is attempting to map memory below dac_mmap_min_addr they need
* CAP_SYS_RAWIO. The other parameters to this function are unused by the
* capability security module. Returns 0 if this mapping should be allowed
* -EPERM if not.
*/
int cap_mmap_addr(unsigned long addr)
{
int ret = 0;
if (addr < dac_mmap_min_addr) {
ret = cap_capable(current_cred(), &init_user_ns, CAP_SYS_RAWIO,
SECURITY_CAP_AUDIT);
/* set PF_SUPERPRIV if it turns out we allow the low mmap */
if (ret == 0)
current->flags |= PF_SUPERPRIV;
}
return ret;
}
int cap_mmap_file(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags)
{
return 0;
}
| gpl-2.0 |
sharvanath/ModNet_Linux | fs/exofs/ore.c | 1730 | 29871 | /*
* Copyright (C) 2005, 2006
* Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
* This file is part of exofs.
*
* exofs is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation. Since it is based on ext2, and the only
* valid version of GPL for the Linux kernel is version 2, the only valid
* version of GPL for exofs is version 2.
*
* exofs is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with exofs; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <asm/div64.h>
#include <linux/lcm.h>
#include "ore_raid.h"
MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
MODULE_DESCRIPTION("Objects Raid Engine ore.ko");
MODULE_LICENSE("GPL");
/* ore_verify_layout does a couple of things:
* 1. Given a minimum number of needed parameters fixes up the rest of the
* members to be operatonals for the ore. The needed parameters are those
* that are defined by the pnfs-objects layout STD.
* 2. Check to see if the current ore code actually supports these parameters
* for example stripe_unit must be a multple of the system PAGE_SIZE,
* and etc...
* 3. Cache some havily used calculations that will be needed by users.
*/
enum { BIO_MAX_PAGES_KMALLOC =
(PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),};
int ore_verify_layout(unsigned total_comps, struct ore_layout *layout)
{
u64 stripe_length;
switch (layout->raid_algorithm) {
case PNFS_OSD_RAID_0:
layout->parity = 0;
break;
case PNFS_OSD_RAID_5:
layout->parity = 1;
break;
case PNFS_OSD_RAID_PQ:
case PNFS_OSD_RAID_4:
default:
ORE_ERR("Only RAID_0/5 for now\n");
return -EINVAL;
}
if (0 != (layout->stripe_unit & ~PAGE_MASK)) {
ORE_ERR("Stripe Unit(0x%llx)"
" must be Multples of PAGE_SIZE(0x%lx)\n",
_LLU(layout->stripe_unit), PAGE_SIZE);
return -EINVAL;
}
if (layout->group_width) {
if (!layout->group_depth) {
ORE_ERR("group_depth == 0 && group_width != 0\n");
return -EINVAL;
}
if (total_comps < (layout->group_width * layout->mirrors_p1)) {
ORE_ERR("Data Map wrong, "
"numdevs=%d < group_width=%d * mirrors=%d\n",
total_comps, layout->group_width,
layout->mirrors_p1);
return -EINVAL;
}
layout->group_count = total_comps / layout->mirrors_p1 /
layout->group_width;
} else {
if (layout->group_depth) {
printk(KERN_NOTICE "Warning: group_depth ignored "
"group_width == 0 && group_depth == %lld\n",
_LLU(layout->group_depth));
}
layout->group_width = total_comps / layout->mirrors_p1;
layout->group_depth = -1;
layout->group_count = 1;
}
stripe_length = (u64)layout->group_width * layout->stripe_unit;
if (stripe_length >= (1ULL << 32)) {
ORE_ERR("Stripe_length(0x%llx) >= 32bit is not supported\n",
_LLU(stripe_length));
return -EINVAL;
}
layout->max_io_length =
(BIO_MAX_PAGES_KMALLOC * PAGE_SIZE - layout->stripe_unit) *
(layout->group_width - layout->parity);
if (layout->parity) {
unsigned stripe_length =
(layout->group_width - layout->parity) *
layout->stripe_unit;
layout->max_io_length /= stripe_length;
layout->max_io_length *= stripe_length;
}
return 0;
}
EXPORT_SYMBOL(ore_verify_layout);
static u8 *_ios_cred(struct ore_io_state *ios, unsigned index)
{
return ios->oc->comps[index & ios->oc->single_comp].cred;
}
static struct osd_obj_id *_ios_obj(struct ore_io_state *ios, unsigned index)
{
return &ios->oc->comps[index & ios->oc->single_comp].obj;
}
static struct osd_dev *_ios_od(struct ore_io_state *ios, unsigned index)
{
ORE_DBGMSG2("oc->first_dev=%d oc->numdevs=%d i=%d oc->ods=%p\n",
ios->oc->first_dev, ios->oc->numdevs, index,
ios->oc->ods);
return ore_comp_dev(ios->oc, index);
}
int _ore_get_io_state(struct ore_layout *layout,
struct ore_components *oc, unsigned numdevs,
unsigned sgs_per_dev, unsigned num_par_pages,
struct ore_io_state **pios)
{
struct ore_io_state *ios;
struct page **pages;
struct osd_sg_entry *sgilist;
struct __alloc_all_io_state {
struct ore_io_state ios;
struct ore_per_dev_state per_dev[numdevs];
union {
struct osd_sg_entry sglist[sgs_per_dev * numdevs];
struct page *pages[num_par_pages];
};
} *_aios;
if (likely(sizeof(*_aios) <= PAGE_SIZE)) {
_aios = kzalloc(sizeof(*_aios), GFP_KERNEL);
if (unlikely(!_aios)) {
ORE_DBGMSG("Failed kzalloc bytes=%zd\n",
sizeof(*_aios));
*pios = NULL;
return -ENOMEM;
}
pages = num_par_pages ? _aios->pages : NULL;
sgilist = sgs_per_dev ? _aios->sglist : NULL;
ios = &_aios->ios;
} else {
struct __alloc_small_io_state {
struct ore_io_state ios;
struct ore_per_dev_state per_dev[numdevs];
} *_aio_small;
union __extra_part {
struct osd_sg_entry sglist[sgs_per_dev * numdevs];
struct page *pages[num_par_pages];
} *extra_part;
_aio_small = kzalloc(sizeof(*_aio_small), GFP_KERNEL);
if (unlikely(!_aio_small)) {
ORE_DBGMSG("Failed alloc first part bytes=%zd\n",
sizeof(*_aio_small));
*pios = NULL;
return -ENOMEM;
}
extra_part = kzalloc(sizeof(*extra_part), GFP_KERNEL);
if (unlikely(!extra_part)) {
ORE_DBGMSG("Failed alloc second part bytes=%zd\n",
sizeof(*extra_part));
kfree(_aio_small);
*pios = NULL;
return -ENOMEM;
}
pages = num_par_pages ? extra_part->pages : NULL;
sgilist = sgs_per_dev ? extra_part->sglist : NULL;
/* In this case the per_dev[0].sgilist holds the pointer to
* be freed
*/
ios = &_aio_small->ios;
ios->extra_part_alloc = true;
}
if (pages) {
ios->parity_pages = pages;
ios->max_par_pages = num_par_pages;
}
if (sgilist) {
unsigned d;
for (d = 0; d < numdevs; ++d) {
ios->per_dev[d].sglist = sgilist;
sgilist += sgs_per_dev;
}
ios->sgs_per_dev = sgs_per_dev;
}
ios->layout = layout;
ios->oc = oc;
*pios = ios;
return 0;
}
/* Allocate an io_state for only a single group of devices
*
* If a user needs to call ore_read/write() this version must be used becase it
* allocates extra stuff for striping and raid.
* The ore might decide to only IO less then @length bytes do to alignmets
* and constrains as follows:
* - The IO cannot cross group boundary.
* - In raid5/6 The end of the IO must align at end of a stripe eg.
* (@offset + @length) % strip_size == 0. Or the complete range is within a
* single stripe.
* - Memory condition only permitted a shorter IO. (A user can use @length=~0
* And check the returned ios->length for max_io_size.)
*
* The caller must check returned ios->length (and/or ios->nr_pages) and
* re-issue these pages that fall outside of ios->length
*/
int ore_get_rw_state(struct ore_layout *layout, struct ore_components *oc,
bool is_reading, u64 offset, u64 length,
struct ore_io_state **pios)
{
struct ore_io_state *ios;
unsigned numdevs = layout->group_width * layout->mirrors_p1;
unsigned sgs_per_dev = 0, max_par_pages = 0;
int ret;
if (layout->parity && length) {
unsigned data_devs = layout->group_width - layout->parity;
unsigned stripe_size = layout->stripe_unit * data_devs;
unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
u32 remainder;
u64 num_stripes;
u64 num_raid_units;
num_stripes = div_u64_rem(length, stripe_size, &remainder);
if (remainder)
++num_stripes;
num_raid_units = num_stripes * layout->parity;
if (is_reading) {
/* For reads add per_dev sglist array */
/* TODO: Raid 6 we need twice more. Actually:
* num_stripes / LCMdP(W,P);
* if (W%P != 0) num_stripes *= parity;
*/
/* first/last seg is split */
num_raid_units += layout->group_width;
sgs_per_dev = div_u64(num_raid_units, data_devs) + 2;
} else {
/* For Writes add parity pages array. */
max_par_pages = num_raid_units * pages_in_unit *
sizeof(struct page *);
}
}
ret = _ore_get_io_state(layout, oc, numdevs, sgs_per_dev, max_par_pages,
pios);
if (unlikely(ret))
return ret;
ios = *pios;
ios->reading = is_reading;
ios->offset = offset;
if (length) {
ore_calc_stripe_info(layout, offset, length, &ios->si);
ios->length = ios->si.length;
ios->nr_pages = ((ios->offset & (PAGE_SIZE - 1)) +
ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
if (layout->parity)
_ore_post_alloc_raid_stuff(ios);
}
return 0;
}
EXPORT_SYMBOL(ore_get_rw_state);
/* Allocate an io_state for all the devices in the comps array
*
* This version of io_state allocation is used mostly by create/remove
* and trunc where we currently need all the devices. The only wastful
* bit is the read/write_attributes with no IO. Those sites should
* be converted to use ore_get_rw_state() with length=0
*/
int ore_get_io_state(struct ore_layout *layout, struct ore_components *oc,
struct ore_io_state **pios)
{
return _ore_get_io_state(layout, oc, oc->numdevs, 0, 0, pios);
}
EXPORT_SYMBOL(ore_get_io_state);
void ore_put_io_state(struct ore_io_state *ios)
{
if (ios) {
unsigned i;
for (i = 0; i < ios->numdevs; i++) {
struct ore_per_dev_state *per_dev = &ios->per_dev[i];
if (per_dev->or)
osd_end_request(per_dev->or);
if (per_dev->bio)
bio_put(per_dev->bio);
}
_ore_free_raid_stuff(ios);
kfree(ios);
}
}
EXPORT_SYMBOL(ore_put_io_state);
static void _sync_done(struct ore_io_state *ios, void *p)
{
struct completion *waiting = p;
complete(waiting);
}
static void _last_io(struct kref *kref)
{
struct ore_io_state *ios = container_of(
kref, struct ore_io_state, kref);
ios->done(ios, ios->private);
}
static void _done_io(struct osd_request *or, void *p)
{
struct ore_io_state *ios = p;
kref_put(&ios->kref, _last_io);
}
int ore_io_execute(struct ore_io_state *ios)
{
DECLARE_COMPLETION_ONSTACK(wait);
bool sync = (ios->done == NULL);
int i, ret;
if (sync) {
ios->done = _sync_done;
ios->private = &wait;
}
for (i = 0; i < ios->numdevs; i++) {
struct osd_request *or = ios->per_dev[i].or;
if (unlikely(!or))
continue;
ret = osd_finalize_request(or, 0, _ios_cred(ios, i), NULL);
if (unlikely(ret)) {
ORE_DBGMSG("Failed to osd_finalize_request() => %d\n",
ret);
return ret;
}
}
kref_init(&ios->kref);
for (i = 0; i < ios->numdevs; i++) {
struct osd_request *or = ios->per_dev[i].or;
if (unlikely(!or))
continue;
kref_get(&ios->kref);
osd_execute_request_async(or, _done_io, ios);
}
kref_put(&ios->kref, _last_io);
ret = 0;
if (sync) {
wait_for_completion(&wait);
ret = ore_check_io(ios, NULL);
}
return ret;
}
static void _clear_bio(struct bio *bio)
{
struct bio_vec *bv;
unsigned i;
bio_for_each_segment_all(bv, bio, i) {
unsigned this_count = bv->bv_len;
if (likely(PAGE_SIZE == this_count))
clear_highpage(bv->bv_page);
else
zero_user(bv->bv_page, bv->bv_offset, this_count);
}
}
int ore_check_io(struct ore_io_state *ios, ore_on_dev_error on_dev_error)
{
enum osd_err_priority acumulated_osd_err = 0;
int acumulated_lin_err = 0;
int i;
for (i = 0; i < ios->numdevs; i++) {
struct osd_sense_info osi;
struct ore_per_dev_state *per_dev = &ios->per_dev[i];
struct osd_request *or = per_dev->or;
int ret;
if (unlikely(!or))
continue;
ret = osd_req_decode_sense(or, &osi);
if (likely(!ret))
continue;
if (OSD_ERR_PRI_CLEAR_PAGES == osi.osd_err_pri) {
/* start read offset passed endof file */
_clear_bio(per_dev->bio);
ORE_DBGMSG("start read offset passed end of file "
"offset=0x%llx, length=0x%llx\n",
_LLU(per_dev->offset),
_LLU(per_dev->length));
continue; /* we recovered */
}
if (on_dev_error) {
u64 residual = ios->reading ?
or->in.residual : or->out.residual;
u64 offset = (ios->offset + ios->length) - residual;
unsigned dev = per_dev->dev - ios->oc->first_dev;
struct ore_dev *od = ios->oc->ods[dev];
on_dev_error(ios, od, dev, osi.osd_err_pri,
offset, residual);
}
if (osi.osd_err_pri >= acumulated_osd_err) {
acumulated_osd_err = osi.osd_err_pri;
acumulated_lin_err = ret;
}
}
return acumulated_lin_err;
}
EXPORT_SYMBOL(ore_check_io);
/*
* L - logical offset into the file
*
* D - number of Data devices
* D = group_width - parity
*
* U - The number of bytes in a stripe within a group
* U = stripe_unit * D
*
* T - The number of bytes striped within a group of component objects
* (before advancing to the next group)
* T = U * group_depth
*
* S - The number of bytes striped across all component objects
* before the pattern repeats
* S = T * group_count
*
* M - The "major" (i.e., across all components) cycle number
* M = L / S
*
* G - Counts the groups from the beginning of the major cycle
* G = (L - (M * S)) / T [or (L % S) / T]
*
* H - The byte offset within the group
* H = (L - (M * S)) % T [or (L % S) % T]
*
* N - The "minor" (i.e., across the group) stripe number
* N = H / U
*
* C - The component index coresponding to L
*
* C = (H - (N * U)) / stripe_unit + G * D
* [or (L % U) / stripe_unit + G * D]
*
* O - The component offset coresponding to L
* O = L % stripe_unit + N * stripe_unit + M * group_depth * stripe_unit
*
* LCMdP – Parity cycle: Lowest Common Multiple of group_width, parity
* divide by parity
* LCMdP = lcm(group_width, parity) / parity
*
* R - The parity Rotation stripe
* (Note parity cycle always starts at a group's boundary)
* R = N % LCMdP
*
* I = the first parity device index
* I = (group_width + group_width - R*parity - parity) % group_width
*
* Craid - The component index Rotated
* Craid = (group_width + C - R*parity) % group_width
* (We add the group_width to avoid negative numbers modulo math)
*/
void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
u64 length, struct ore_striping_info *si)
{
u32 stripe_unit = layout->stripe_unit;
u32 group_width = layout->group_width;
u64 group_depth = layout->group_depth;
u32 parity = layout->parity;
u32 D = group_width - parity;
u32 U = D * stripe_unit;
u64 T = U * group_depth;
u64 S = T * layout->group_count;
u64 M = div64_u64(file_offset, S);
/*
G = (L - (M * S)) / T
H = (L - (M * S)) % T
*/
u64 LmodS = file_offset - M * S;
u32 G = div64_u64(LmodS, T);
u64 H = LmodS - G * T;
u32 N = div_u64(H, U);
u32 Nlast;
/* "H - (N * U)" is just "H % U" so it's bound to u32 */
u32 C = (u32)(H - (N * U)) / stripe_unit + G * group_width;
div_u64_rem(file_offset, stripe_unit, &si->unit_off);
si->obj_offset = si->unit_off + (N * stripe_unit) +
(M * group_depth * stripe_unit);
if (parity) {
u32 LCMdP = lcm(group_width, parity) / parity;
/* R = N % LCMdP; */
u32 RxP = (N % LCMdP) * parity;
u32 first_dev = C - C % group_width;
si->par_dev = (group_width + group_width - parity - RxP) %
group_width + first_dev;
si->dev = (group_width + C - RxP) % group_width + first_dev;
si->bytes_in_stripe = U;
si->first_stripe_start = M * S + G * T + N * U;
} else {
/* Make the math correct see _prepare_one_group */
si->par_dev = group_width;
si->dev = C;
}
si->dev *= layout->mirrors_p1;
si->par_dev *= layout->mirrors_p1;
si->offset = file_offset;
si->length = T - H;
if (si->length > length)
si->length = length;
Nlast = div_u64(H + si->length + U - 1, U);
si->maxdevUnits = Nlast - N;
si->M = M;
}
EXPORT_SYMBOL(ore_calc_stripe_info);
int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
unsigned pgbase, struct page **pages,
struct ore_per_dev_state *per_dev, int cur_len)
{
unsigned pg = *cur_pg;
struct request_queue *q =
osd_request_queue(_ios_od(ios, per_dev->dev));
unsigned len = cur_len;
int ret;
if (per_dev->bio == NULL) {
unsigned bio_size;
if (!ios->reading) {
bio_size = ios->si.maxdevUnits;
} else {
bio_size = (ios->si.maxdevUnits + 1) *
(ios->layout->group_width - ios->layout->parity) /
ios->layout->group_width;
}
bio_size *= (ios->layout->stripe_unit / PAGE_SIZE);
per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
if (unlikely(!per_dev->bio)) {
ORE_DBGMSG("Failed to allocate BIO size=%u\n",
bio_size);
ret = -ENOMEM;
goto out;
}
}
while (cur_len > 0) {
unsigned pglen = min_t(unsigned, PAGE_SIZE - pgbase, cur_len);
unsigned added_len;
cur_len -= pglen;
added_len = bio_add_pc_page(q, per_dev->bio, pages[pg],
pglen, pgbase);
if (unlikely(pglen != added_len)) {
/* If bi_vcnt == bi_max then this is a SW BUG */
ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=0x%x "
"bi_max=0x%x BIO_MAX=0x%x cur_len=0x%x\n",
per_dev->bio->bi_vcnt,
per_dev->bio->bi_max_vecs,
BIO_MAX_PAGES_KMALLOC, cur_len);
ret = -ENOMEM;
goto out;
}
_add_stripe_page(ios->sp2d, &ios->si, pages[pg]);
pgbase = 0;
++pg;
}
BUG_ON(cur_len);
per_dev->length += len;
*cur_pg = pg;
ret = 0;
out: /* we fail the complete unit on an error eg don't advance
* per_dev->length and cur_pg. This means that we might have a bigger
* bio than the CDB requested length (per_dev->length). That's fine
* only the oposite is fatal.
*/
return ret;
}
static int _prepare_for_striping(struct ore_io_state *ios)
{
struct ore_striping_info *si = &ios->si;
unsigned stripe_unit = ios->layout->stripe_unit;
unsigned mirrors_p1 = ios->layout->mirrors_p1;
unsigned group_width = ios->layout->group_width;
unsigned devs_in_group = group_width * mirrors_p1;
unsigned dev = si->dev;
unsigned first_dev = dev - (dev % devs_in_group);
unsigned dev_order;
unsigned cur_pg = ios->pages_consumed;
u64 length = ios->length;
int ret = 0;
if (!ios->pages) {
ios->numdevs = ios->layout->mirrors_p1;
return 0;
}
BUG_ON(length > si->length);
dev_order = _dev_order(devs_in_group, mirrors_p1, si->par_dev, dev);
si->cur_comp = dev_order;
si->cur_pg = si->unit_off / PAGE_SIZE;
while (length) {
unsigned comp = dev - first_dev;
struct ore_per_dev_state *per_dev = &ios->per_dev[comp];
unsigned cur_len, page_off = 0;
if (!per_dev->length) {
per_dev->dev = dev;
if (dev == si->dev) {
WARN_ON(dev == si->par_dev);
per_dev->offset = si->obj_offset;
cur_len = stripe_unit - si->unit_off;
page_off = si->unit_off & ~PAGE_MASK;
BUG_ON(page_off && (page_off != ios->pgbase));
} else {
if (si->cur_comp > dev_order)
per_dev->offset =
si->obj_offset - si->unit_off;
else /* si->cur_comp < dev_order */
per_dev->offset =
si->obj_offset + stripe_unit -
si->unit_off;
cur_len = stripe_unit;
}
} else {
cur_len = stripe_unit;
}
if (cur_len >= length)
cur_len = length;
ret = _ore_add_stripe_unit(ios, &cur_pg, page_off, ios->pages,
per_dev, cur_len);
if (unlikely(ret))
goto out;
dev += mirrors_p1;
dev = (dev % devs_in_group) + first_dev;
length -= cur_len;
si->cur_comp = (si->cur_comp + 1) % group_width;
if (unlikely((dev == si->par_dev) || (!length && ios->sp2d))) {
if (!length && ios->sp2d) {
/* If we are writing and this is the very last
* stripe. then operate on parity dev.
*/
dev = si->par_dev;
}
if (ios->sp2d)
/* In writes cur_len just means if it's the
* last one. See _ore_add_parity_unit.
*/
cur_len = length;
per_dev = &ios->per_dev[dev - first_dev];
if (!per_dev->length) {
/* Only/always the parity unit of the first
* stripe will be empty. So this is a chance to
* initialize the per_dev info.
*/
per_dev->dev = dev;
per_dev->offset = si->obj_offset - si->unit_off;
}
ret = _ore_add_parity_unit(ios, si, per_dev, cur_len);
if (unlikely(ret))
goto out;
/* Rotate next par_dev backwards with wraping */
si->par_dev = (devs_in_group + si->par_dev -
ios->layout->parity * mirrors_p1) %
devs_in_group + first_dev;
/* Next stripe, start fresh */
si->cur_comp = 0;
si->cur_pg = 0;
}
}
out:
ios->numdevs = devs_in_group;
ios->pages_consumed = cur_pg;
return ret;
}
int ore_create(struct ore_io_state *ios)
{
int i, ret;
for (i = 0; i < ios->oc->numdevs; i++) {
struct osd_request *or;
or = osd_start_request(_ios_od(ios, i), GFP_KERNEL);
if (unlikely(!or)) {
ORE_ERR("%s: osd_start_request failed\n", __func__);
ret = -ENOMEM;
goto out;
}
ios->per_dev[i].or = or;
ios->numdevs++;
osd_req_create_object(or, _ios_obj(ios, i));
}
ret = ore_io_execute(ios);
out:
return ret;
}
EXPORT_SYMBOL(ore_create);
int ore_remove(struct ore_io_state *ios)
{
int i, ret;
for (i = 0; i < ios->oc->numdevs; i++) {
struct osd_request *or;
or = osd_start_request(_ios_od(ios, i), GFP_KERNEL);
if (unlikely(!or)) {
ORE_ERR("%s: osd_start_request failed\n", __func__);
ret = -ENOMEM;
goto out;
}
ios->per_dev[i].or = or;
ios->numdevs++;
osd_req_remove_object(or, _ios_obj(ios, i));
}
ret = ore_io_execute(ios);
out:
return ret;
}
EXPORT_SYMBOL(ore_remove);
static int _write_mirror(struct ore_io_state *ios, int cur_comp)
{
struct ore_per_dev_state *master_dev = &ios->per_dev[cur_comp];
unsigned dev = ios->per_dev[cur_comp].dev;
unsigned last_comp = cur_comp + ios->layout->mirrors_p1;
int ret = 0;
if (ios->pages && !master_dev->length)
return 0; /* Just an empty slot */
for (; cur_comp < last_comp; ++cur_comp, ++dev) {
struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
struct osd_request *or;
or = osd_start_request(_ios_od(ios, dev), GFP_KERNEL);
if (unlikely(!or)) {
ORE_ERR("%s: osd_start_request failed\n", __func__);
ret = -ENOMEM;
goto out;
}
per_dev->or = or;
if (ios->pages) {
struct bio *bio;
if (per_dev != master_dev) {
bio = bio_clone_kmalloc(master_dev->bio,
GFP_KERNEL);
if (unlikely(!bio)) {
ORE_DBGMSG(
"Failed to allocate BIO size=%u\n",
master_dev->bio->bi_max_vecs);
ret = -ENOMEM;
goto out;
}
bio->bi_bdev = NULL;
bio->bi_next = NULL;
per_dev->offset = master_dev->offset;
per_dev->length = master_dev->length;
per_dev->bio = bio;
per_dev->dev = dev;
} else {
bio = master_dev->bio;
/* FIXME: bio_set_dir() */
bio->bi_rw |= REQ_WRITE;
}
osd_req_write(or, _ios_obj(ios, cur_comp),
per_dev->offset, bio, per_dev->length);
ORE_DBGMSG("write(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d\n",
_LLU(_ios_obj(ios, cur_comp)->id),
_LLU(per_dev->offset),
_LLU(per_dev->length), dev);
} else if (ios->kern_buff) {
per_dev->offset = ios->si.obj_offset;
per_dev->dev = ios->si.dev + dev;
/* no cross device without page array */
BUG_ON((ios->layout->group_width > 1) &&
(ios->si.unit_off + ios->length >
ios->layout->stripe_unit));
ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp),
per_dev->offset,
ios->kern_buff, ios->length);
if (unlikely(ret))
goto out;
ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d\n",
_LLU(_ios_obj(ios, cur_comp)->id),
_LLU(per_dev->offset),
_LLU(ios->length), per_dev->dev);
} else {
osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
_LLU(_ios_obj(ios, cur_comp)->id),
ios->out_attr_len, dev);
}
if (ios->out_attr)
osd_req_add_set_attr_list(or, ios->out_attr,
ios->out_attr_len);
if (ios->in_attr)
osd_req_add_get_attr_list(or, ios->in_attr,
ios->in_attr_len);
}
out:
return ret;
}
int ore_write(struct ore_io_state *ios)
{
int i;
int ret;
if (unlikely(ios->sp2d && !ios->r4w)) {
/* A library is attempting a RAID-write without providing
* a pages lock interface.
*/
WARN_ON_ONCE(1);
return -ENOTSUPP;
}
ret = _prepare_for_striping(ios);
if (unlikely(ret))
return ret;
for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
ret = _write_mirror(ios, i);
if (unlikely(ret))
return ret;
}
ret = ore_io_execute(ios);
return ret;
}
EXPORT_SYMBOL(ore_write);
int _ore_read_mirror(struct ore_io_state *ios, unsigned cur_comp)
{
struct osd_request *or;
struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
struct osd_obj_id *obj = _ios_obj(ios, cur_comp);
unsigned first_dev = (unsigned)obj->id;
if (ios->pages && !per_dev->length)
return 0; /* Just an empty slot */
first_dev = per_dev->dev + first_dev % ios->layout->mirrors_p1;
or = osd_start_request(_ios_od(ios, first_dev), GFP_KERNEL);
if (unlikely(!or)) {
ORE_ERR("%s: osd_start_request failed\n", __func__);
return -ENOMEM;
}
per_dev->or = or;
if (ios->pages) {
if (per_dev->cur_sg) {
/* finalize the last sg_entry */
_ore_add_sg_seg(per_dev, 0, false);
if (unlikely(!per_dev->cur_sg))
return 0; /* Skip parity only device */
osd_req_read_sg(or, obj, per_dev->bio,
per_dev->sglist, per_dev->cur_sg);
} else {
/* The no raid case */
osd_req_read(or, obj, per_dev->offset,
per_dev->bio, per_dev->length);
}
ORE_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx"
" dev=%d sg_len=%d\n", _LLU(obj->id),
_LLU(per_dev->offset), _LLU(per_dev->length),
first_dev, per_dev->cur_sg);
} else {
BUG_ON(ios->kern_buff);
osd_req_get_attributes(or, obj);
ORE_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n",
_LLU(obj->id),
ios->in_attr_len, first_dev);
}
if (ios->out_attr)
osd_req_add_set_attr_list(or, ios->out_attr, ios->out_attr_len);
if (ios->in_attr)
osd_req_add_get_attr_list(or, ios->in_attr, ios->in_attr_len);
return 0;
}
int ore_read(struct ore_io_state *ios)
{
int i;
int ret;
ret = _prepare_for_striping(ios);
if (unlikely(ret))
return ret;
for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
ret = _ore_read_mirror(ios, i);
if (unlikely(ret))
return ret;
}
ret = ore_io_execute(ios);
return ret;
}
EXPORT_SYMBOL(ore_read);
int extract_attr_from_ios(struct ore_io_state *ios, struct osd_attr *attr)
{
struct osd_attr cur_attr = {.attr_page = 0}; /* start with zeros */
void *iter = NULL;
int nelem;
do {
nelem = 1;
osd_req_decode_get_attr_list(ios->per_dev[0].or,
&cur_attr, &nelem, &iter);
if ((cur_attr.attr_page == attr->attr_page) &&
(cur_attr.attr_id == attr->attr_id)) {
attr->len = cur_attr.len;
attr->val_ptr = cur_attr.val_ptr;
return 0;
}
} while (iter);
return -EIO;
}
EXPORT_SYMBOL(extract_attr_from_ios);
static int _truncate_mirrors(struct ore_io_state *ios, unsigned cur_comp,
struct osd_attr *attr)
{
int last_comp = cur_comp + ios->layout->mirrors_p1;
for (; cur_comp < last_comp; ++cur_comp) {
struct ore_per_dev_state *per_dev = &ios->per_dev[cur_comp];
struct osd_request *or;
or = osd_start_request(_ios_od(ios, cur_comp), GFP_KERNEL);
if (unlikely(!or)) {
ORE_ERR("%s: osd_start_request failed\n", __func__);
return -ENOMEM;
}
per_dev->or = or;
osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
osd_req_add_set_attr_list(or, attr, 1);
}
return 0;
}
struct _trunc_info {
struct ore_striping_info si;
u64 prev_group_obj_off;
u64 next_group_obj_off;
unsigned first_group_dev;
unsigned nex_group_dev;
};
static void _calc_trunk_info(struct ore_layout *layout, u64 file_offset,
struct _trunc_info *ti)
{
unsigned stripe_unit = layout->stripe_unit;
ore_calc_stripe_info(layout, file_offset, 0, &ti->si);
ti->prev_group_obj_off = ti->si.M * stripe_unit;
ti->next_group_obj_off = ti->si.M ? (ti->si.M - 1) * stripe_unit : 0;
ti->first_group_dev = ti->si.dev - (ti->si.dev % layout->group_width);
ti->nex_group_dev = ti->first_group_dev + layout->group_width;
}
int ore_truncate(struct ore_layout *layout, struct ore_components *oc,
u64 size)
{
struct ore_io_state *ios;
struct exofs_trunc_attr {
struct osd_attr attr;
__be64 newsize;
} *size_attrs;
struct _trunc_info ti;
int i, ret;
ret = ore_get_io_state(layout, oc, &ios);
if (unlikely(ret))
return ret;
_calc_trunk_info(ios->layout, size, &ti);
size_attrs = kcalloc(ios->oc->numdevs, sizeof(*size_attrs),
GFP_KERNEL);
if (unlikely(!size_attrs)) {
ret = -ENOMEM;
goto out;
}
ios->numdevs = ios->oc->numdevs;
for (i = 0; i < ios->numdevs; ++i) {
struct exofs_trunc_attr *size_attr = &size_attrs[i];
u64 obj_size;
if (i < ti.first_group_dev)
obj_size = ti.prev_group_obj_off;
else if (i >= ti.nex_group_dev)
obj_size = ti.next_group_obj_off;
else if (i < ti.si.dev) /* dev within this group */
obj_size = ti.si.obj_offset +
ios->layout->stripe_unit - ti.si.unit_off;
else if (i == ti.si.dev)
obj_size = ti.si.obj_offset;
else /* i > ti.dev */
obj_size = ti.si.obj_offset - ti.si.unit_off;
size_attr->newsize = cpu_to_be64(obj_size);
size_attr->attr = g_attr_logical_length;
size_attr->attr.val_ptr = &size_attr->newsize;
ORE_DBGMSG2("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
_LLU(oc->comps->obj.id), _LLU(obj_size), i);
ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
&size_attr->attr);
if (unlikely(ret))
goto out;
}
ret = ore_io_execute(ios);
out:
kfree(size_attrs);
ore_put_io_state(ios);
return ret;
}
EXPORT_SYMBOL(ore_truncate);
const struct osd_attr g_attr_logical_length = ATTR_DEF(
OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8);
EXPORT_SYMBOL(g_attr_logical_length);
| gpl-2.0 |
vvanpo/linux | tools/usb/usbip/libsrc/usbip_common.c | 1730 | 6713 | /*
* Copyright (C) 2005-2007 Takahiro Hirofuchi
*/
#include <libudev.h>
#include "usbip_common.h"
#include "names.h"
#undef PROGNAME
#define PROGNAME "libusbip"
int usbip_use_syslog;
int usbip_use_stderr;
int usbip_use_debug;
extern struct udev *udev_context;
struct speed_string {
int num;
char *speed;
char *desc;
};
static const struct speed_string speed_strings[] = {
{ USB_SPEED_UNKNOWN, "unknown", "Unknown Speed"},
{ USB_SPEED_LOW, "1.5", "Low Speed(1.5Mbps)" },
{ USB_SPEED_FULL, "12", "Full Speed(12Mbps)" },
{ USB_SPEED_HIGH, "480", "High Speed(480Mbps)" },
{ USB_SPEED_WIRELESS, "53.3-480", "Wireless"},
{ USB_SPEED_SUPER, "5000", "Super Speed(5000Mbps)" },
{ 0, NULL, NULL }
};
struct portst_string {
int num;
char *desc;
};
static struct portst_string portst_strings[] = {
{ SDEV_ST_AVAILABLE, "Device Available" },
{ SDEV_ST_USED, "Device in Use" },
{ SDEV_ST_ERROR, "Device Error"},
{ VDEV_ST_NULL, "Port Available"},
{ VDEV_ST_NOTASSIGNED, "Port Initializing"},
{ VDEV_ST_USED, "Port in Use"},
{ VDEV_ST_ERROR, "Port Error"},
{ 0, NULL}
};
const char *usbip_status_string(int32_t status)
{
for (int i = 0; portst_strings[i].desc != NULL; i++)
if (portst_strings[i].num == status)
return portst_strings[i].desc;
return "Unknown Status";
}
const char *usbip_speed_string(int num)
{
for (int i = 0; speed_strings[i].speed != NULL; i++)
if (speed_strings[i].num == num)
return speed_strings[i].desc;
return "Unknown Speed";
}
#define DBG_UDEV_INTEGER(name)\
dbg("%-20s = %x", to_string(name), (int) udev->name)
#define DBG_UINF_INTEGER(name)\
dbg("%-20s = %x", to_string(name), (int) uinf->name)
void dump_usb_interface(struct usbip_usb_interface *uinf)
{
char buff[100];
usbip_names_get_class(buff, sizeof(buff),
uinf->bInterfaceClass,
uinf->bInterfaceSubClass,
uinf->bInterfaceProtocol);
dbg("%-20s = %s", "Interface(C/SC/P)", buff);
}
void dump_usb_device(struct usbip_usb_device *udev)
{
char buff[100];
dbg("%-20s = %s", "path", udev->path);
dbg("%-20s = %s", "busid", udev->busid);
usbip_names_get_class(buff, sizeof(buff),
udev->bDeviceClass,
udev->bDeviceSubClass,
udev->bDeviceProtocol);
dbg("%-20s = %s", "Device(C/SC/P)", buff);
DBG_UDEV_INTEGER(bcdDevice);
usbip_names_get_product(buff, sizeof(buff),
udev->idVendor,
udev->idProduct);
dbg("%-20s = %s", "Vendor/Product", buff);
DBG_UDEV_INTEGER(bNumConfigurations);
DBG_UDEV_INTEGER(bNumInterfaces);
dbg("%-20s = %s", "speed",
usbip_speed_string(udev->speed));
DBG_UDEV_INTEGER(busnum);
DBG_UDEV_INTEGER(devnum);
}
int read_attr_value(struct udev_device *dev, const char *name,
const char *format)
{
const char *attr;
int num = 0;
int ret;
attr = udev_device_get_sysattr_value(dev, name);
if (!attr) {
err("udev_device_get_sysattr_value failed");
goto err;
}
/* The client chooses the device configuration
* when attaching it so right after being bound
* to usbip-host on the server the device will
* have no configuration.
* Therefore, attributes such as bConfigurationValue
* and bNumInterfaces will not exist and sscanf will
* fail. Check for these cases and don't treat them
* as errors.
*/
ret = sscanf(attr, format, &num);
if (ret < 1) {
if (strcmp(name, "bConfigurationValue") &&
strcmp(name, "bNumInterfaces")) {
err("sscanf failed for attribute %s", name);
goto err;
}
}
err:
return num;
}
int read_attr_speed(struct udev_device *dev)
{
const char *speed;
speed = udev_device_get_sysattr_value(dev, "speed");
if (!speed) {
err("udev_device_get_sysattr_value failed");
goto err;
}
for (int i = 0; speed_strings[i].speed != NULL; i++) {
if (!strcmp(speed, speed_strings[i].speed))
return speed_strings[i].num;
}
err:
return USB_SPEED_UNKNOWN;
}
#define READ_ATTR(object, type, dev, name, format) \
do { \
(object)->name = (type) read_attr_value(dev, to_string(name), \
format); \
} while (0)
int read_usb_device(struct udev_device *sdev, struct usbip_usb_device *udev)
{
uint32_t busnum, devnum;
const char *path, *name;
READ_ATTR(udev, uint8_t, sdev, bDeviceClass, "%02x\n");
READ_ATTR(udev, uint8_t, sdev, bDeviceSubClass, "%02x\n");
READ_ATTR(udev, uint8_t, sdev, bDeviceProtocol, "%02x\n");
READ_ATTR(udev, uint16_t, sdev, idVendor, "%04x\n");
READ_ATTR(udev, uint16_t, sdev, idProduct, "%04x\n");
READ_ATTR(udev, uint16_t, sdev, bcdDevice, "%04x\n");
READ_ATTR(udev, uint8_t, sdev, bConfigurationValue, "%02x\n");
READ_ATTR(udev, uint8_t, sdev, bNumConfigurations, "%02x\n");
READ_ATTR(udev, uint8_t, sdev, bNumInterfaces, "%02x\n");
READ_ATTR(udev, uint8_t, sdev, devnum, "%d\n");
udev->speed = read_attr_speed(sdev);
path = udev_device_get_syspath(sdev);
name = udev_device_get_sysname(sdev);
strncpy(udev->path, path, SYSFS_PATH_MAX);
strncpy(udev->busid, name, SYSFS_BUS_ID_SIZE);
sscanf(name, "%u-%u", &busnum, &devnum);
udev->busnum = busnum;
return 0;
}
int read_usb_interface(struct usbip_usb_device *udev, int i,
struct usbip_usb_interface *uinf)
{
char busid[SYSFS_BUS_ID_SIZE];
struct udev_device *sif;
sprintf(busid, "%s:%d.%d", udev->busid, udev->bConfigurationValue, i);
sif = udev_device_new_from_subsystem_sysname(udev_context, "usb", busid);
if (!sif) {
err("udev_device_new_from_subsystem_sysname %s failed", busid);
return -1;
}
READ_ATTR(uinf, uint8_t, sif, bInterfaceClass, "%02x\n");
READ_ATTR(uinf, uint8_t, sif, bInterfaceSubClass, "%02x\n");
READ_ATTR(uinf, uint8_t, sif, bInterfaceProtocol, "%02x\n");
return 0;
}
int usbip_names_init(char *f)
{
return names_init(f);
}
void usbip_names_free(void)
{
names_free();
}
void usbip_names_get_product(char *buff, size_t size, uint16_t vendor,
uint16_t product)
{
const char *prod, *vend;
prod = names_product(vendor, product);
if (!prod)
prod = "unknown product";
vend = names_vendor(vendor);
if (!vend)
vend = "unknown vendor";
snprintf(buff, size, "%s : %s (%04x:%04x)", vend, prod, vendor, product);
}
void usbip_names_get_class(char *buff, size_t size, uint8_t class,
uint8_t subclass, uint8_t protocol)
{
const char *c, *s, *p;
if (class == 0 && subclass == 0 && protocol == 0) {
snprintf(buff, size, "(Defined at Interface level) (%02x/%02x/%02x)", class, subclass, protocol);
return;
}
p = names_protocol(class, subclass, protocol);
if (!p)
p = "unknown protocol";
s = names_subclass(class, subclass);
if (!s)
s = "unknown subclass";
c = names_class(class);
if (!c)
c = "unknown class";
snprintf(buff, size, "%s / %s / %s (%02x/%02x/%02x)", c, s, p, class, subclass, protocol);
}
| gpl-2.0 |
varunchitre15/thunderzap_sprout | drivers/hwmon/w83791d.c | 2242 | 51539 | /*
* w83791d.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
*
* Copyright (C) 2006-2007 Charles Spirakis <bezaur@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* Supports following chips:
*
* Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
* w83791d 10 5 5 3 0x71 0x5ca3 yes no
*
* The w83791d chip appears to be part way between the 83781d and the
* 83792d. Thus, this file is derived from both the w83792d.c and
* w83781d.c files.
*
* The w83791g chip is the same as the w83791d but lead-free.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-vid.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/jiffies.h>
#define NUMBER_OF_VIN 10
#define NUMBER_OF_FANIN 5
#define NUMBER_OF_TEMPIN 3
#define NUMBER_OF_PWM 5
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
I2C_CLIENT_END };
/* Insmod parameters */
static unsigned short force_subclients[4];
module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients,
"List of subclient addresses: {bus, clientaddr, subclientaddr1, subclientaddr2}");
static bool reset;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset, "Set to one to force a hardware chip reset");
static bool init;
module_param(init, bool, 0);
MODULE_PARM_DESC(init, "Set to one to force extra software initialization");
/* The W83791D registers */
static const u8 W83791D_REG_IN[NUMBER_OF_VIN] = {
0x20, /* VCOREA in DataSheet */
0x21, /* VINR0 in DataSheet */
0x22, /* +3.3VIN in DataSheet */
0x23, /* VDD5V in DataSheet */
0x24, /* +12VIN in DataSheet */
0x25, /* -12VIN in DataSheet */
0x26, /* -5VIN in DataSheet */
0xB0, /* 5VSB in DataSheet */
0xB1, /* VBAT in DataSheet */
0xB2 /* VINR1 in DataSheet */
};
static const u8 W83791D_REG_IN_MAX[NUMBER_OF_VIN] = {
0x2B, /* VCOREA High Limit in DataSheet */
0x2D, /* VINR0 High Limit in DataSheet */
0x2F, /* +3.3VIN High Limit in DataSheet */
0x31, /* VDD5V High Limit in DataSheet */
0x33, /* +12VIN High Limit in DataSheet */
0x35, /* -12VIN High Limit in DataSheet */
0x37, /* -5VIN High Limit in DataSheet */
0xB4, /* 5VSB High Limit in DataSheet */
0xB6, /* VBAT High Limit in DataSheet */
0xB8 /* VINR1 High Limit in DataSheet */
};
static const u8 W83791D_REG_IN_MIN[NUMBER_OF_VIN] = {
0x2C, /* VCOREA Low Limit in DataSheet */
0x2E, /* VINR0 Low Limit in DataSheet */
0x30, /* +3.3VIN Low Limit in DataSheet */
0x32, /* VDD5V Low Limit in DataSheet */
0x34, /* +12VIN Low Limit in DataSheet */
0x36, /* -12VIN Low Limit in DataSheet */
0x38, /* -5VIN Low Limit in DataSheet */
0xB5, /* 5VSB Low Limit in DataSheet */
0xB7, /* VBAT Low Limit in DataSheet */
0xB9 /* VINR1 Low Limit in DataSheet */
};
static const u8 W83791D_REG_FAN[NUMBER_OF_FANIN] = {
0x28, /* FAN 1 Count in DataSheet */
0x29, /* FAN 2 Count in DataSheet */
0x2A, /* FAN 3 Count in DataSheet */
0xBA, /* FAN 4 Count in DataSheet */
0xBB, /* FAN 5 Count in DataSheet */
};
static const u8 W83791D_REG_FAN_MIN[NUMBER_OF_FANIN] = {
0x3B, /* FAN 1 Count Low Limit in DataSheet */
0x3C, /* FAN 2 Count Low Limit in DataSheet */
0x3D, /* FAN 3 Count Low Limit in DataSheet */
0xBC, /* FAN 4 Count Low Limit in DataSheet */
0xBD, /* FAN 5 Count Low Limit in DataSheet */
};
static const u8 W83791D_REG_PWM[NUMBER_OF_PWM] = {
0x81, /* PWM 1 duty cycle register in DataSheet */
0x83, /* PWM 2 duty cycle register in DataSheet */
0x94, /* PWM 3 duty cycle register in DataSheet */
0xA0, /* PWM 4 duty cycle register in DataSheet */
0xA1, /* PWM 5 duty cycle register in DataSheet */
};
static const u8 W83791D_REG_TEMP_TARGET[3] = {
0x85, /* PWM 1 target temperature for temp 1 */
0x86, /* PWM 2 target temperature for temp 2 */
0x96, /* PWM 3 target temperature for temp 3 */
};
static const u8 W83791D_REG_TEMP_TOL[2] = {
0x87, /* PWM 1/2 temperature tolerance */
0x97, /* PWM 3 temperature tolerance */
};
static const u8 W83791D_REG_FAN_CFG[2] = {
0x84, /* FAN 1/2 configuration */
0x95, /* FAN 3 configuration */
};
static const u8 W83791D_REG_FAN_DIV[3] = {
0x47, /* contains FAN1 and FAN2 Divisor */
0x4b, /* contains FAN3 Divisor */
0x5C, /* contains FAN4 and FAN5 Divisor */
};
#define W83791D_REG_BANK 0x4E
#define W83791D_REG_TEMP2_CONFIG 0xC2
#define W83791D_REG_TEMP3_CONFIG 0xCA
static const u8 W83791D_REG_TEMP1[3] = {
0x27, /* TEMP 1 in DataSheet */
0x39, /* TEMP 1 Over in DataSheet */
0x3A, /* TEMP 1 Hyst in DataSheet */
};
static const u8 W83791D_REG_TEMP_ADD[2][6] = {
{0xC0, /* TEMP 2 in DataSheet */
0xC1, /* TEMP 2(0.5 deg) in DataSheet */
0xC5, /* TEMP 2 Over High part in DataSheet */
0xC6, /* TEMP 2 Over Low part in DataSheet */
0xC3, /* TEMP 2 Thyst High part in DataSheet */
0xC4}, /* TEMP 2 Thyst Low part in DataSheet */
{0xC8, /* TEMP 3 in DataSheet */
0xC9, /* TEMP 3(0.5 deg) in DataSheet */
0xCD, /* TEMP 3 Over High part in DataSheet */
0xCE, /* TEMP 3 Over Low part in DataSheet */
0xCB, /* TEMP 3 Thyst High part in DataSheet */
0xCC} /* TEMP 3 Thyst Low part in DataSheet */
};
#define W83791D_REG_BEEP_CONFIG 0x4D
static const u8 W83791D_REG_BEEP_CTRL[3] = {
0x56, /* BEEP Control Register 1 */
0x57, /* BEEP Control Register 2 */
0xA3, /* BEEP Control Register 3 */
};
#define W83791D_REG_GPIO 0x15
#define W83791D_REG_CONFIG 0x40
#define W83791D_REG_VID_FANDIV 0x47
#define W83791D_REG_DID_VID4 0x49
#define W83791D_REG_WCHIPID 0x58
#define W83791D_REG_CHIPMAN 0x4F
#define W83791D_REG_PIN 0x4B
#define W83791D_REG_I2C_SUBADDR 0x4A
#define W83791D_REG_ALARM1 0xA9 /* realtime status register1 */
#define W83791D_REG_ALARM2 0xAA /* realtime status register2 */
#define W83791D_REG_ALARM3 0xAB /* realtime status register3 */
#define W83791D_REG_VBAT 0x5D
#define W83791D_REG_I2C_ADDR 0x48
/*
* The SMBus locks itself. The Winbond W83791D has a bank select register
* (index 0x4e), but the driver only accesses registers in bank 0. Since
* we don't switch banks, we don't need any special code to handle
* locking access between bank switches
*/
static inline int w83791d_read(struct i2c_client *client, u8 reg)
{
return i2c_smbus_read_byte_data(client, reg);
}
static inline int w83791d_write(struct i2c_client *client, u8 reg, u8 value)
{
return i2c_smbus_write_byte_data(client, reg, value);
}
/*
* The analog voltage inputs have 16mV LSB. Since the sysfs output is
* in mV as would be measured on the chip input pin, need to just
* multiply/divide by 16 to translate from/to register values.
*/
#define IN_TO_REG(val) (clamp_val((((val) + 8) / 16), 0, 255))
#define IN_FROM_REG(val) ((val) * 16)
static u8 fan_to_reg(long rpm, int div)
{
if (rpm == 0)
return 255;
rpm = clamp_val(rpm, 1, 1000000);
return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
((val) == 255 ? 0 : \
1350000 / ((val) * (div))))
/* for temp1 which is 8-bit resolution, LSB = 1 degree Celsius */
#define TEMP1_FROM_REG(val) ((val) * 1000)
#define TEMP1_TO_REG(val) ((val) <= -128000 ? -128 : \
(val) >= 127000 ? 127 : \
(val) < 0 ? ((val) - 500) / 1000 : \
((val) + 500) / 1000)
/*
* for temp2 and temp3 which are 9-bit resolution, LSB = 0.5 degree Celsius
* Assumes the top 8 bits are the integral amount and the bottom 8 bits
* are the fractional amount. Since we only have 0.5 degree resolution,
* the bottom 7 bits will always be zero
*/
#define TEMP23_FROM_REG(val) ((val) / 128 * 500)
#define TEMP23_TO_REG(val) ((val) <= -128000 ? 0x8000 : \
(val) >= 127500 ? 0x7F80 : \
(val) < 0 ? ((val) - 250) / 500 * 128 : \
((val) + 250) / 500 * 128)
/* for thermal cruise target temp, 7-bits, LSB = 1 degree Celsius */
#define TARGET_TEMP_TO_REG(val) ((val) < 0 ? 0 : \
(val) >= 127000 ? 127 : \
((val) + 500) / 1000)
/* for thermal cruise temp tolerance, 4-bits, LSB = 1 degree Celsius */
#define TOL_TEMP_TO_REG(val) ((val) >= 15000 ? 15 : \
((val) + 500) / 1000)
#define BEEP_MASK_TO_REG(val) ((val) & 0xffffff)
#define BEEP_MASK_FROM_REG(val) ((val) & 0xffffff)
#define DIV_FROM_REG(val) (1 << (val))
static u8 div_to_reg(int nr, long val)
{
int i;
/* fan divisors max out at 128 */
val = clamp_val(val, 1, 128) >> 1;
for (i = 0; i < 7; i++) {
if (val == 0)
break;
val >>= 1;
}
return (u8) i;
}
struct w83791d_data {
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* array of 2 pointers to subclients */
struct i2c_client *lm75[2];
/* volts */
u8 in[NUMBER_OF_VIN]; /* Register value */
u8 in_max[NUMBER_OF_VIN]; /* Register value */
u8 in_min[NUMBER_OF_VIN]; /* Register value */
/* fans */
u8 fan[NUMBER_OF_FANIN]; /* Register value */
u8 fan_min[NUMBER_OF_FANIN]; /* Register value */
u8 fan_div[NUMBER_OF_FANIN]; /* Register encoding, shifted right */
/* Temperature sensors */
s8 temp1[3]; /* current, over, thyst */
s16 temp_add[2][3]; /* fixed point value. Top 8 bits are the
* integral part, bottom 8 bits are the
* fractional part. We only use the top
* 9 bits as the resolution is only
* to the 0.5 degree C...
* two sensors with three values
* (cur, over, hyst)
*/
/* PWMs */
u8 pwm[5]; /* pwm duty cycle */
u8 pwm_enable[3]; /* pwm enable status for fan 1-3
* (fan 4-5 only support manual mode)
*/
u8 temp_target[3]; /* pwm 1-3 target temperature */
u8 temp_tolerance[3]; /* pwm 1-3 temperature tolerance */
/* Misc */
u32 alarms; /* realtime status register encoding,combined */
u8 beep_enable; /* Global beep enable */
u32 beep_mask; /* Mask off specific beeps */
u8 vid; /* Register encoding, combined */
u8 vrm; /* hwmon-vid */
};
static int w83791d_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int w83791d_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83791d_remove(struct i2c_client *client);
static int w83791d_read(struct i2c_client *client, u8 reg);
static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
static struct w83791d_data *w83791d_update_device(struct device *dev);
#ifdef DEBUG
static void w83791d_print_debug(struct w83791d_data *data, struct device *dev);
#endif
static void w83791d_init_client(struct i2c_client *client);
static const struct i2c_device_id w83791d_id[] = {
{ "w83791d", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83791d_id);
static struct i2c_driver w83791d_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "w83791d",
},
.probe = w83791d_probe,
.remove = w83791d_remove,
.id_table = w83791d_id,
.detect = w83791d_detect,
.address_list = normal_i2c,
};
/* following are the sysfs callback functions */
#define show_in_reg(reg) \
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
struct w83791d_data *data = w83791d_update_device(dev); \
int nr = sensor_attr->index; \
return sprintf(buf, "%d\n", IN_FROM_REG(data->reg[nr])); \
}
show_in_reg(in);
show_in_reg(in_min);
show_in_reg(in_max);
#define store_in_reg(REG, reg) \
static ssize_t store_in_##reg(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
struct i2c_client *client = to_i2c_client(dev); \
struct w83791d_data *data = i2c_get_clientdata(client); \
int nr = sensor_attr->index; \
unsigned long val; \
int err = kstrtoul(buf, 10, &val); \
if (err) \
return err; \
mutex_lock(&data->update_lock); \
data->in_##reg[nr] = IN_TO_REG(val); \
w83791d_write(client, W83791D_REG_IN_##REG[nr], data->in_##reg[nr]); \
mutex_unlock(&data->update_lock); \
\
return count; \
}
store_in_reg(MIN, min);
store_in_reg(MAX, max);
static struct sensor_device_attribute sda_in_input[] = {
SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0),
SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1),
SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2),
SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3),
SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4),
SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5),
SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6),
SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7),
SENSOR_ATTR(in8_input, S_IRUGO, show_in, NULL, 8),
SENSOR_ATTR(in9_input, S_IRUGO, show_in, NULL, 9),
};
static struct sensor_device_attribute sda_in_min[] = {
SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0),
SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1),
SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2),
SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3),
SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4),
SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5),
SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6),
SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7),
SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8),
SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9),
};
static struct sensor_device_attribute sda_in_max[] = {
SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0),
SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1),
SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2),
SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3),
SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4),
SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5),
SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6),
SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7),
SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8),
SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9),
};
static ssize_t show_beep(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr =
to_sensor_dev_attr(attr);
struct w83791d_data *data = w83791d_update_device(dev);
int bitnr = sensor_attr->index;
return sprintf(buf, "%d\n", (data->beep_mask >> bitnr) & 1);
}
static ssize_t store_beep(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr =
to_sensor_dev_attr(attr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
int bitnr = sensor_attr->index;
int bytenr = bitnr / 8;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
val = val ? 1 : 0;
mutex_lock(&data->update_lock);
data->beep_mask &= ~(0xff << (bytenr * 8));
data->beep_mask |= w83791d_read(client, W83791D_REG_BEEP_CTRL[bytenr])
<< (bytenr * 8);
data->beep_mask &= ~(1 << bitnr);
data->beep_mask |= val << bitnr;
w83791d_write(client, W83791D_REG_BEEP_CTRL[bytenr],
(data->beep_mask >> (bytenr * 8)) & 0xff);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr =
to_sensor_dev_attr(attr);
struct w83791d_data *data = w83791d_update_device(dev);
int bitnr = sensor_attr->index;
return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
}
/*
* Note: The bitmask for the beep enable/disable is different than
* the bitmask for the alarm.
*/
static struct sensor_device_attribute sda_in_beep[] = {
SENSOR_ATTR(in0_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 0),
SENSOR_ATTR(in1_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 13),
SENSOR_ATTR(in2_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 2),
SENSOR_ATTR(in3_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 3),
SENSOR_ATTR(in4_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 8),
SENSOR_ATTR(in5_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 9),
SENSOR_ATTR(in6_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 10),
SENSOR_ATTR(in7_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 16),
SENSOR_ATTR(in8_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 17),
SENSOR_ATTR(in9_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 14),
};
static struct sensor_device_attribute sda_in_alarm[] = {
SENSOR_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0),
SENSOR_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1),
SENSOR_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2),
SENSOR_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3),
SENSOR_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8),
SENSOR_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9),
SENSOR_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 10),
SENSOR_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 19),
SENSOR_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 20),
SENSOR_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 14),
};
#define show_fan_reg(reg) \
static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct sensor_device_attribute *sensor_attr = \
to_sensor_dev_attr(attr); \
struct w83791d_data *data = w83791d_update_device(dev); \
int nr = sensor_attr->index; \
return sprintf(buf, "%d\n", \
FAN_FROM_REG(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \
}
show_fan_reg(fan);
show_fan_reg(fan_min);
static ssize_t store_fan_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
int nr = sensor_attr->index;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->fan_min[nr] = fan_to_reg(val, DIV_FROM_REG(data->fan_div[nr]));
w83791d_write(client, W83791D_REG_FAN_MIN[nr], data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%u\n", DIV_FROM_REG(data->fan_div[nr]));
}
/*
* Note: we save and restore the fan minimum here, because its value is
* determined in part by the fan divisor. This follows the principle of
* least surprise; the user doesn't expect the fan minimum to change just
* because the divisor changed.
*/
static ssize_t store_fan_div(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
int nr = sensor_attr->index;
unsigned long min;
u8 tmp_fan_div;
u8 fan_div_reg;
u8 vbat_reg;
int indx = 0;
u8 keep_mask = 0;
u8 new_shift = 0;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
/* Save fan_min */
min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]));
mutex_lock(&data->update_lock);
data->fan_div[nr] = div_to_reg(nr, val);
switch (nr) {
case 0:
indx = 0;
keep_mask = 0xcf;
new_shift = 4;
break;
case 1:
indx = 0;
keep_mask = 0x3f;
new_shift = 6;
break;
case 2:
indx = 1;
keep_mask = 0x3f;
new_shift = 6;
break;
case 3:
indx = 2;
keep_mask = 0xf8;
new_shift = 0;
break;
case 4:
indx = 2;
keep_mask = 0x8f;
new_shift = 4;
break;
#ifdef DEBUG
default:
dev_warn(dev, "store_fan_div: Unexpected nr seen: %d\n", nr);
count = -EINVAL;
goto err_exit;
#endif
}
fan_div_reg = w83791d_read(client, W83791D_REG_FAN_DIV[indx])
& keep_mask;
tmp_fan_div = (data->fan_div[nr] << new_shift) & ~keep_mask;
w83791d_write(client, W83791D_REG_FAN_DIV[indx],
fan_div_reg | tmp_fan_div);
/* Bit 2 of fans 0-2 is stored in the vbat register (bits 5-7) */
if (nr < 3) {
keep_mask = ~(1 << (nr + 5));
vbat_reg = w83791d_read(client, W83791D_REG_VBAT)
& keep_mask;
tmp_fan_div = (data->fan_div[nr] << (3 + nr)) & ~keep_mask;
w83791d_write(client, W83791D_REG_VBAT,
vbat_reg | tmp_fan_div);
}
/* Restore fan_min */
data->fan_min[nr] = fan_to_reg(min, DIV_FROM_REG(data->fan_div[nr]));
w83791d_write(client, W83791D_REG_FAN_MIN[nr], data->fan_min[nr]);
#ifdef DEBUG
err_exit:
#endif
mutex_unlock(&data->update_lock);
return count;
}
static struct sensor_device_attribute sda_fan_input[] = {
SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0),
SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1),
SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2),
SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3),
SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4),
};
static struct sensor_device_attribute sda_fan_min[] = {
SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO,
show_fan_min, store_fan_min, 0),
SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO,
show_fan_min, store_fan_min, 1),
SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO,
show_fan_min, store_fan_min, 2),
SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO,
show_fan_min, store_fan_min, 3),
SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO,
show_fan_min, store_fan_min, 4),
};
static struct sensor_device_attribute sda_fan_div[] = {
SENSOR_ATTR(fan1_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 0),
SENSOR_ATTR(fan2_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 1),
SENSOR_ATTR(fan3_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 2),
SENSOR_ATTR(fan4_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 3),
SENSOR_ATTR(fan5_div, S_IWUSR | S_IRUGO,
show_fan_div, store_fan_div, 4),
};
static struct sensor_device_attribute sda_fan_beep[] = {
SENSOR_ATTR(fan1_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 6),
SENSOR_ATTR(fan2_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 7),
SENSOR_ATTR(fan3_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 11),
SENSOR_ATTR(fan4_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 21),
SENSOR_ATTR(fan5_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 22),
};
static struct sensor_device_attribute sda_fan_alarm[] = {
SENSOR_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6),
SENSOR_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7),
SENSOR_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11),
SENSOR_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 21),
SENSOR_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 22),
};
/* read/write PWMs */
static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%u\n", data->pwm[nr]);
}
static ssize_t store_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
int nr = sensor_attr->index;
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->update_lock);
data->pwm[nr] = clamp_val(val, 0, 255);
w83791d_write(client, W83791D_REG_PWM[nr], data->pwm[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static struct sensor_device_attribute sda_pwm[] = {
SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO,
show_pwm, store_pwm, 0),
SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO,
show_pwm, store_pwm, 1),
SENSOR_ATTR(pwm3, S_IWUSR | S_IRUGO,
show_pwm, store_pwm, 2),
SENSOR_ATTR(pwm4, S_IWUSR | S_IRUGO,
show_pwm, store_pwm, 3),
SENSOR_ATTR(pwm5, S_IWUSR | S_IRUGO,
show_pwm, store_pwm, 4),
};
static ssize_t show_pwmenable(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%u\n", data->pwm_enable[nr] + 1);
}
static ssize_t store_pwmenable(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
int nr = sensor_attr->index;
unsigned long val;
u8 reg_cfg_tmp;
u8 reg_idx = 0;
u8 val_shift = 0;
u8 keep_mask = 0;
int ret = kstrtoul(buf, 10, &val);
if (ret || val < 1 || val > 3)
return -EINVAL;
mutex_lock(&data->update_lock);
data->pwm_enable[nr] = val - 1;
switch (nr) {
case 0:
reg_idx = 0;
val_shift = 2;
keep_mask = 0xf3;
break;
case 1:
reg_idx = 0;
val_shift = 4;
keep_mask = 0xcf;
break;
case 2:
reg_idx = 1;
val_shift = 2;
keep_mask = 0xf3;
break;
}
reg_cfg_tmp = w83791d_read(client, W83791D_REG_FAN_CFG[reg_idx]);
reg_cfg_tmp = (reg_cfg_tmp & keep_mask) |
data->pwm_enable[nr] << val_shift;
w83791d_write(client, W83791D_REG_FAN_CFG[reg_idx], reg_cfg_tmp);
mutex_unlock(&data->update_lock);
return count;
}
static struct sensor_device_attribute sda_pwmenable[] = {
SENSOR_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
show_pwmenable, store_pwmenable, 0),
SENSOR_ATTR(pwm2_enable, S_IWUSR | S_IRUGO,
show_pwmenable, store_pwmenable, 1),
SENSOR_ATTR(pwm3_enable, S_IWUSR | S_IRUGO,
show_pwmenable, store_pwmenable, 2),
};
/* For Smart Fan I / Thermal Cruise */
static ssize_t show_temp_target(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct w83791d_data *data = w83791d_update_device(dev);
int nr = sensor_attr->index;
return sprintf(buf, "%d\n", TEMP1_FROM_REG(data->temp_target[nr]));
}
static ssize_t store_temp_target(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
int nr = sensor_attr->index;
long val;
u8 target_mask;
if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->update_lock);
data->temp_target[nr] = TARGET_TEMP_TO_REG(val);
target_mask = w83791d_read(client,
W83791D_REG_TEMP_TARGET[nr]) & 0x80;
w83791d_write(client, W83791D_REG_TEMP_TARGET[nr],
data->temp_target[nr] | target_mask);
mutex_unlock(&data->update_lock);
return count;
}
static struct sensor_device_attribute sda_temp_target[] = {
SENSOR_ATTR(temp1_target, S_IWUSR | S_IRUGO,
show_temp_target, store_temp_target, 0),
SENSOR_ATTR(temp2_target, S_IWUSR | S_IRUGO,
show_temp_target, store_temp_target, 1),
SENSOR_ATTR(temp3_target, S_IWUSR | S_IRUGO,
show_temp_target, store_temp_target, 2),
};
static ssize_t show_temp_tolerance(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct w83791d_data *data = w83791d_update_device(dev);
int nr = sensor_attr->index;
return sprintf(buf, "%d\n", TEMP1_FROM_REG(data->temp_tolerance[nr]));
}
static ssize_t store_temp_tolerance(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
int nr = sensor_attr->index;
unsigned long val;
u8 target_mask;
u8 reg_idx = 0;
u8 val_shift = 0;
u8 keep_mask = 0;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
switch (nr) {
case 0:
reg_idx = 0;
val_shift = 0;
keep_mask = 0xf0;
break;
case 1:
reg_idx = 0;
val_shift = 4;
keep_mask = 0x0f;
break;
case 2:
reg_idx = 1;
val_shift = 0;
keep_mask = 0xf0;
break;
}
mutex_lock(&data->update_lock);
data->temp_tolerance[nr] = TOL_TEMP_TO_REG(val);
target_mask = w83791d_read(client,
W83791D_REG_TEMP_TOL[reg_idx]) & keep_mask;
w83791d_write(client, W83791D_REG_TEMP_TOL[reg_idx],
(data->temp_tolerance[nr] << val_shift) | target_mask);
mutex_unlock(&data->update_lock);
return count;
}
static struct sensor_device_attribute sda_temp_tolerance[] = {
SENSOR_ATTR(temp1_tolerance, S_IWUSR | S_IRUGO,
show_temp_tolerance, store_temp_tolerance, 0),
SENSOR_ATTR(temp2_tolerance, S_IWUSR | S_IRUGO,
show_temp_tolerance, store_temp_tolerance, 1),
SENSOR_ATTR(temp3_tolerance, S_IWUSR | S_IRUGO,
show_temp_tolerance, store_temp_tolerance, 2),
};
/* read/write the temperature1, includes measured value and limits */
static ssize_t show_temp1(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%d\n", TEMP1_FROM_REG(data->temp1[attr->index]));
}
static ssize_t store_temp1(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
int nr = attr->index;
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->temp1[nr] = TEMP1_TO_REG(val);
w83791d_write(client, W83791D_REG_TEMP1[nr], data->temp1[nr]);
mutex_unlock(&data->update_lock);
return count;
}
/* read/write temperature2-3, includes measured value and limits */
static ssize_t show_temp23(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
struct w83791d_data *data = w83791d_update_device(dev);
int nr = attr->nr;
int index = attr->index;
return sprintf(buf, "%d\n", TEMP23_FROM_REG(data->temp_add[nr][index]));
}
static ssize_t store_temp23(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
long val;
int err;
int nr = attr->nr;
int index = attr->index;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->temp_add[nr][index] = TEMP23_TO_REG(val);
w83791d_write(client, W83791D_REG_TEMP_ADD[nr][index * 2],
data->temp_add[nr][index] >> 8);
w83791d_write(client, W83791D_REG_TEMP_ADD[nr][index * 2 + 1],
data->temp_add[nr][index] & 0x80);
mutex_unlock(&data->update_lock);
return count;
}
static struct sensor_device_attribute_2 sda_temp_input[] = {
SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp1, NULL, 0, 0),
SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp23, NULL, 0, 0),
SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp23, NULL, 1, 0),
};
static struct sensor_device_attribute_2 sda_temp_max[] = {
SENSOR_ATTR_2(temp1_max, S_IRUGO | S_IWUSR,
show_temp1, store_temp1, 0, 1),
SENSOR_ATTR_2(temp2_max, S_IRUGO | S_IWUSR,
show_temp23, store_temp23, 0, 1),
SENSOR_ATTR_2(temp3_max, S_IRUGO | S_IWUSR,
show_temp23, store_temp23, 1, 1),
};
static struct sensor_device_attribute_2 sda_temp_max_hyst[] = {
SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO | S_IWUSR,
show_temp1, store_temp1, 0, 2),
SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO | S_IWUSR,
show_temp23, store_temp23, 0, 2),
SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO | S_IWUSR,
show_temp23, store_temp23, 1, 2),
};
/*
* Note: The bitmask for the beep enable/disable is different than
* the bitmask for the alarm.
*/
static struct sensor_device_attribute sda_temp_beep[] = {
SENSOR_ATTR(temp1_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 4),
SENSOR_ATTR(temp2_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 5),
SENSOR_ATTR(temp3_beep, S_IWUSR | S_IRUGO, show_beep, store_beep, 1),
};
static struct sensor_device_attribute sda_temp_alarm[] = {
SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4),
SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5),
SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13),
};
/* get reatime status of all sensors items: voltage, temp, fan */
static ssize_t show_alarms_reg(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%u\n", data->alarms);
}
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms_reg, NULL);
/* Beep control */
#define GLOBAL_BEEP_ENABLE_SHIFT 15
#define GLOBAL_BEEP_ENABLE_MASK (1 << GLOBAL_BEEP_ENABLE_SHIFT)
static ssize_t show_beep_enable(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%d\n", data->beep_enable);
}
static ssize_t show_beep_mask(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%d\n", BEEP_MASK_FROM_REG(data->beep_mask));
}
static ssize_t store_beep_mask(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
int i;
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
/*
* The beep_enable state overrides any enabling request from
* the masks
*/
data->beep_mask = BEEP_MASK_TO_REG(val) & ~GLOBAL_BEEP_ENABLE_MASK;
data->beep_mask |= (data->beep_enable << GLOBAL_BEEP_ENABLE_SHIFT);
val = data->beep_mask;
for (i = 0; i < 3; i++) {
w83791d_write(client, W83791D_REG_BEEP_CTRL[i], (val & 0xff));
val >>= 8;
}
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t store_beep_enable(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->beep_enable = val ? 1 : 0;
/* Keep the full mask value in sync with the current enable */
data->beep_mask &= ~GLOBAL_BEEP_ENABLE_MASK;
data->beep_mask |= (data->beep_enable << GLOBAL_BEEP_ENABLE_SHIFT);
/*
* The global control is in the second beep control register
* so only need to update that register
*/
val = (data->beep_mask >> 8) & 0xff;
w83791d_write(client, W83791D_REG_BEEP_CTRL[1], val);
mutex_unlock(&data->update_lock);
return count;
}
static struct sensor_device_attribute sda_beep_ctrl[] = {
SENSOR_ATTR(beep_enable, S_IRUGO | S_IWUSR,
show_beep_enable, store_beep_enable, 0),
SENSOR_ATTR(beep_mask, S_IRUGO | S_IWUSR,
show_beep_mask, store_beep_mask, 1)
};
/* cpu voltage regulation information */
static ssize_t show_vid_reg(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct w83791d_data *data = w83791d_update_device(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
static ssize_t show_vrm_reg(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct w83791d_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->vrm);
}
static ssize_t store_vrm_reg(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83791d_data *data = dev_get_drvdata(dev);
unsigned long val;
int err;
/*
* No lock needed as vrm is internal to the driver
* (not read from a chip register) and so is not
* updated in w83791d_update_device()
*/
err = kstrtoul(buf, 10, &val);
if (err)
return err;
data->vrm = val;
return count;
}
static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
#define IN_UNIT_ATTRS(X) \
&sda_in_input[X].dev_attr.attr, \
&sda_in_min[X].dev_attr.attr, \
&sda_in_max[X].dev_attr.attr, \
&sda_in_beep[X].dev_attr.attr, \
&sda_in_alarm[X].dev_attr.attr
#define FAN_UNIT_ATTRS(X) \
&sda_fan_input[X].dev_attr.attr, \
&sda_fan_min[X].dev_attr.attr, \
&sda_fan_div[X].dev_attr.attr, \
&sda_fan_beep[X].dev_attr.attr, \
&sda_fan_alarm[X].dev_attr.attr
#define TEMP_UNIT_ATTRS(X) \
&sda_temp_input[X].dev_attr.attr, \
&sda_temp_max[X].dev_attr.attr, \
&sda_temp_max_hyst[X].dev_attr.attr, \
&sda_temp_beep[X].dev_attr.attr, \
&sda_temp_alarm[X].dev_attr.attr
static struct attribute *w83791d_attributes[] = {
IN_UNIT_ATTRS(0),
IN_UNIT_ATTRS(1),
IN_UNIT_ATTRS(2),
IN_UNIT_ATTRS(3),
IN_UNIT_ATTRS(4),
IN_UNIT_ATTRS(5),
IN_UNIT_ATTRS(6),
IN_UNIT_ATTRS(7),
IN_UNIT_ATTRS(8),
IN_UNIT_ATTRS(9),
FAN_UNIT_ATTRS(0),
FAN_UNIT_ATTRS(1),
FAN_UNIT_ATTRS(2),
TEMP_UNIT_ATTRS(0),
TEMP_UNIT_ATTRS(1),
TEMP_UNIT_ATTRS(2),
&dev_attr_alarms.attr,
&sda_beep_ctrl[0].dev_attr.attr,
&sda_beep_ctrl[1].dev_attr.attr,
&dev_attr_cpu0_vid.attr,
&dev_attr_vrm.attr,
&sda_pwm[0].dev_attr.attr,
&sda_pwm[1].dev_attr.attr,
&sda_pwm[2].dev_attr.attr,
&sda_pwmenable[0].dev_attr.attr,
&sda_pwmenable[1].dev_attr.attr,
&sda_pwmenable[2].dev_attr.attr,
&sda_temp_target[0].dev_attr.attr,
&sda_temp_target[1].dev_attr.attr,
&sda_temp_target[2].dev_attr.attr,
&sda_temp_tolerance[0].dev_attr.attr,
&sda_temp_tolerance[1].dev_attr.attr,
&sda_temp_tolerance[2].dev_attr.attr,
NULL
};
static const struct attribute_group w83791d_group = {
.attrs = w83791d_attributes,
};
/*
* Separate group of attributes for fan/pwm 4-5. Their pins can also be
* in use for GPIO in which case their sysfs-interface should not be made
* available
*/
static struct attribute *w83791d_attributes_fanpwm45[] = {
FAN_UNIT_ATTRS(3),
FAN_UNIT_ATTRS(4),
&sda_pwm[3].dev_attr.attr,
&sda_pwm[4].dev_attr.attr,
NULL
};
static const struct attribute_group w83791d_group_fanpwm45 = {
.attrs = w83791d_attributes_fanpwm45,
};
static int w83791d_detect_subclients(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct w83791d_data *data = i2c_get_clientdata(client);
int address = client->addr;
int i, id, err;
u8 val;
id = i2c_adapter_id(adapter);
if (force_subclients[0] == id && force_subclients[1] == address) {
for (i = 2; i <= 3; i++) {
if (force_subclients[i] < 0x48 ||
force_subclients[i] > 0x4f) {
dev_err(&client->dev,
"invalid subclient "
"address %d; must be 0x48-0x4f\n",
force_subclients[i]);
err = -ENODEV;
goto error_sc_0;
}
}
w83791d_write(client, W83791D_REG_I2C_SUBADDR,
(force_subclients[2] & 0x07) |
((force_subclients[3] & 0x07) << 4));
}
val = w83791d_read(client, W83791D_REG_I2C_SUBADDR);
if (!(val & 0x08))
data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (val & 0x7));
if (!(val & 0x80)) {
if ((data->lm75[0] != NULL) &&
((val & 0x7) == ((val >> 4) & 0x7))) {
dev_err(&client->dev,
"duplicate addresses 0x%x, "
"use force_subclient\n",
data->lm75[0]->addr);
err = -ENODEV;
goto error_sc_1;
}
data->lm75[1] = i2c_new_dummy(adapter,
0x48 + ((val >> 4) & 0x7));
}
return 0;
/* Undo inits in case of errors */
error_sc_1:
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
error_sc_0:
return err;
}
/* Return 0 if detection is successful, -ENODEV otherwise */
static int w83791d_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int val1, val2;
unsigned short address = client->addr;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
if (w83791d_read(client, W83791D_REG_CONFIG) & 0x80)
return -ENODEV;
val1 = w83791d_read(client, W83791D_REG_BANK);
val2 = w83791d_read(client, W83791D_REG_CHIPMAN);
/* Check for Winbond ID if in bank 0 */
if (!(val1 & 0x07)) {
if ((!(val1 & 0x80) && val2 != 0xa3) ||
((val1 & 0x80) && val2 != 0x5c)) {
return -ENODEV;
}
}
/*
* If Winbond chip, address of chip and W83791D_REG_I2C_ADDR
* should match
*/
if (w83791d_read(client, W83791D_REG_I2C_ADDR) != address)
return -ENODEV;
/* We want bank 0 and Vendor ID high byte */
val1 = w83791d_read(client, W83791D_REG_BANK) & 0x78;
w83791d_write(client, W83791D_REG_BANK, val1 | 0x80);
/* Verify it is a Winbond w83791d */
val1 = w83791d_read(client, W83791D_REG_WCHIPID);
val2 = w83791d_read(client, W83791D_REG_CHIPMAN);
if (val1 != 0x71 || val2 != 0x5c)
return -ENODEV;
strlcpy(info->type, "w83791d", I2C_NAME_SIZE);
return 0;
}
static int w83791d_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct w83791d_data *data;
struct device *dev = &client->dev;
int i, err;
u8 has_fanpwm45;
#ifdef DEBUG
int val1;
val1 = w83791d_read(client, W83791D_REG_DID_VID4);
dev_dbg(dev, "Device ID version: %d.%d (0x%02x)\n",
(val1 >> 5) & 0x07, (val1 >> 1) & 0x0f, val1);
#endif
data = devm_kzalloc(&client->dev, sizeof(struct w83791d_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
err = w83791d_detect_subclients(client);
if (err)
return err;
/* Initialize the chip */
w83791d_init_client(client);
/*
* If the fan_div is changed, make sure there is a rational
* fan_min in place
*/
for (i = 0; i < NUMBER_OF_FANIN; i++)
data->fan_min[i] = w83791d_read(client, W83791D_REG_FAN_MIN[i]);
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &w83791d_group);
if (err)
goto error3;
/* Check if pins of fan/pwm 4-5 are in use as GPIO */
has_fanpwm45 = w83791d_read(client, W83791D_REG_GPIO) & 0x10;
if (has_fanpwm45) {
err = sysfs_create_group(&client->dev.kobj,
&w83791d_group_fanpwm45);
if (err)
goto error4;
}
/* Everything is ready, now register the working device */
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto error5;
}
return 0;
error5:
if (has_fanpwm45)
sysfs_remove_group(&client->dev.kobj, &w83791d_group_fanpwm45);
error4:
sysfs_remove_group(&client->dev.kobj, &w83791d_group);
error3:
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
return err;
}
static int w83791d_remove(struct i2c_client *client)
{
struct w83791d_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &w83791d_group);
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
return 0;
}
static void w83791d_init_client(struct i2c_client *client)
{
struct w83791d_data *data = i2c_get_clientdata(client);
u8 tmp;
u8 old_beep;
/*
* The difference between reset and init is that reset
* does a hard reset of the chip via index 0x40, bit 7,
* but init simply forces certain registers to have "sane"
* values. The hope is that the BIOS has done the right
* thing (which is why the default is reset=0, init=0),
* but if not, reset is the hard hammer and init
* is the soft mallet both of which are trying to whack
* things into place...
* NOTE: The data sheet makes a distinction between
* "power on defaults" and "reset by MR". As far as I can tell,
* the hard reset puts everything into a power-on state so I'm
* not sure what "reset by MR" means or how it can happen.
*/
if (reset || init) {
/* keep some BIOS settings when we... */
old_beep = w83791d_read(client, W83791D_REG_BEEP_CONFIG);
if (reset) {
/* ... reset the chip and ... */
w83791d_write(client, W83791D_REG_CONFIG, 0x80);
}
/* ... disable power-on abnormal beep */
w83791d_write(client, W83791D_REG_BEEP_CONFIG, old_beep | 0x80);
/* disable the global beep (not done by hard reset) */
tmp = w83791d_read(client, W83791D_REG_BEEP_CTRL[1]);
w83791d_write(client, W83791D_REG_BEEP_CTRL[1], tmp & 0xef);
if (init) {
/* Make sure monitoring is turned on for add-ons */
tmp = w83791d_read(client, W83791D_REG_TEMP2_CONFIG);
if (tmp & 1) {
w83791d_write(client, W83791D_REG_TEMP2_CONFIG,
tmp & 0xfe);
}
tmp = w83791d_read(client, W83791D_REG_TEMP3_CONFIG);
if (tmp & 1) {
w83791d_write(client, W83791D_REG_TEMP3_CONFIG,
tmp & 0xfe);
}
/* Start monitoring */
tmp = w83791d_read(client, W83791D_REG_CONFIG) & 0xf7;
w83791d_write(client, W83791D_REG_CONFIG, tmp | 0x01);
}
}
data->vrm = vid_which_vrm();
}
static struct w83791d_data *w83791d_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83791d_data *data = i2c_get_clientdata(client);
int i, j;
u8 reg_array_tmp[3];
u8 vbat_reg;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + (HZ * 3))
|| !data->valid) {
dev_dbg(dev, "Starting w83791d device update\n");
/* Update the voltages measured value and limits */
for (i = 0; i < NUMBER_OF_VIN; i++) {
data->in[i] = w83791d_read(client,
W83791D_REG_IN[i]);
data->in_max[i] = w83791d_read(client,
W83791D_REG_IN_MAX[i]);
data->in_min[i] = w83791d_read(client,
W83791D_REG_IN_MIN[i]);
}
/* Update the fan counts and limits */
for (i = 0; i < NUMBER_OF_FANIN; i++) {
/* Update the Fan measured value and limits */
data->fan[i] = w83791d_read(client,
W83791D_REG_FAN[i]);
data->fan_min[i] = w83791d_read(client,
W83791D_REG_FAN_MIN[i]);
}
/* Update the fan divisor */
for (i = 0; i < 3; i++) {
reg_array_tmp[i] = w83791d_read(client,
W83791D_REG_FAN_DIV[i]);
}
data->fan_div[0] = (reg_array_tmp[0] >> 4) & 0x03;
data->fan_div[1] = (reg_array_tmp[0] >> 6) & 0x03;
data->fan_div[2] = (reg_array_tmp[1] >> 6) & 0x03;
data->fan_div[3] = reg_array_tmp[2] & 0x07;
data->fan_div[4] = (reg_array_tmp[2] >> 4) & 0x07;
/*
* The fan divisor for fans 0-2 get bit 2 from
* bits 5-7 respectively of vbat register
*/
vbat_reg = w83791d_read(client, W83791D_REG_VBAT);
for (i = 0; i < 3; i++)
data->fan_div[i] |= (vbat_reg >> (3 + i)) & 0x04;
/* Update PWM duty cycle */
for (i = 0; i < NUMBER_OF_PWM; i++) {
data->pwm[i] = w83791d_read(client,
W83791D_REG_PWM[i]);
}
/* Update PWM enable status */
for (i = 0; i < 2; i++) {
reg_array_tmp[i] = w83791d_read(client,
W83791D_REG_FAN_CFG[i]);
}
data->pwm_enable[0] = (reg_array_tmp[0] >> 2) & 0x03;
data->pwm_enable[1] = (reg_array_tmp[0] >> 4) & 0x03;
data->pwm_enable[2] = (reg_array_tmp[1] >> 2) & 0x03;
/* Update PWM target temperature */
for (i = 0; i < 3; i++) {
data->temp_target[i] = w83791d_read(client,
W83791D_REG_TEMP_TARGET[i]) & 0x7f;
}
/* Update PWM temperature tolerance */
for (i = 0; i < 2; i++) {
reg_array_tmp[i] = w83791d_read(client,
W83791D_REG_TEMP_TOL[i]);
}
data->temp_tolerance[0] = reg_array_tmp[0] & 0x0f;
data->temp_tolerance[1] = (reg_array_tmp[0] >> 4) & 0x0f;
data->temp_tolerance[2] = reg_array_tmp[1] & 0x0f;
/* Update the first temperature sensor */
for (i = 0; i < 3; i++) {
data->temp1[i] = w83791d_read(client,
W83791D_REG_TEMP1[i]);
}
/* Update the rest of the temperature sensors */
for (i = 0; i < 2; i++) {
for (j = 0; j < 3; j++) {
data->temp_add[i][j] =
(w83791d_read(client,
W83791D_REG_TEMP_ADD[i][j * 2]) << 8) |
w83791d_read(client,
W83791D_REG_TEMP_ADD[i][j * 2 + 1]);
}
}
/* Update the realtime status */
data->alarms =
w83791d_read(client, W83791D_REG_ALARM1) +
(w83791d_read(client, W83791D_REG_ALARM2) << 8) +
(w83791d_read(client, W83791D_REG_ALARM3) << 16);
/* Update the beep configuration information */
data->beep_mask =
w83791d_read(client, W83791D_REG_BEEP_CTRL[0]) +
(w83791d_read(client, W83791D_REG_BEEP_CTRL[1]) << 8) +
(w83791d_read(client, W83791D_REG_BEEP_CTRL[2]) << 16);
/* Extract global beep enable flag */
data->beep_enable =
(data->beep_mask >> GLOBAL_BEEP_ENABLE_SHIFT) & 0x01;
/* Update the cpu voltage information */
i = w83791d_read(client, W83791D_REG_VID_FANDIV);
data->vid = i & 0x0f;
data->vid |= (w83791d_read(client, W83791D_REG_DID_VID4) & 0x01)
<< 4;
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
#ifdef DEBUG
w83791d_print_debug(data, dev);
#endif
return data;
}
#ifdef DEBUG
static void w83791d_print_debug(struct w83791d_data *data, struct device *dev)
{
int i = 0, j = 0;
dev_dbg(dev, "======Start of w83791d debug values======\n");
dev_dbg(dev, "%d set of Voltages: ===>\n", NUMBER_OF_VIN);
for (i = 0; i < NUMBER_OF_VIN; i++) {
dev_dbg(dev, "vin[%d] is: 0x%02x\n", i, data->in[i]);
dev_dbg(dev, "vin[%d] min is: 0x%02x\n", i, data->in_min[i]);
dev_dbg(dev, "vin[%d] max is: 0x%02x\n", i, data->in_max[i]);
}
dev_dbg(dev, "%d set of Fan Counts/Divisors: ===>\n", NUMBER_OF_FANIN);
for (i = 0; i < NUMBER_OF_FANIN; i++) {
dev_dbg(dev, "fan[%d] is: 0x%02x\n", i, data->fan[i]);
dev_dbg(dev, "fan[%d] min is: 0x%02x\n", i, data->fan_min[i]);
dev_dbg(dev, "fan_div[%d] is: 0x%02x\n", i, data->fan_div[i]);
}
/*
* temperature math is signed, but only print out the
* bits that matter
*/
dev_dbg(dev, "%d set of Temperatures: ===>\n", NUMBER_OF_TEMPIN);
for (i = 0; i < 3; i++)
dev_dbg(dev, "temp1[%d] is: 0x%02x\n", i, (u8) data->temp1[i]);
for (i = 0; i < 2; i++) {
for (j = 0; j < 3; j++) {
dev_dbg(dev, "temp_add[%d][%d] is: 0x%04x\n", i, j,
(u16) data->temp_add[i][j]);
}
}
dev_dbg(dev, "Misc Information: ===>\n");
dev_dbg(dev, "alarm is: 0x%08x\n", data->alarms);
dev_dbg(dev, "beep_mask is: 0x%08x\n", data->beep_mask);
dev_dbg(dev, "beep_enable is: %d\n", data->beep_enable);
dev_dbg(dev, "vid is: 0x%02x\n", data->vid);
dev_dbg(dev, "vrm is: 0x%02x\n", data->vrm);
dev_dbg(dev, "=======End of w83791d debug values========\n");
dev_dbg(dev, "\n");
}
#endif
module_i2c_driver(w83791d_driver);
MODULE_AUTHOR("Charles Spirakis <bezaur@gmail.com>");
MODULE_DESCRIPTION("W83791D driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
CyanogenMod/lge-kernel-p880 | kernel/tracepoint.c | 2754 | 16716 | /*
* Copyright (C) 2008 Mathieu Desnoyers
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/jhash.h>
#include <linux/list.h>
#include <linux/rcupdate.h>
#include <linux/tracepoint.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/jump_label.h>
extern struct tracepoint * const __start___tracepoints_ptrs[];
extern struct tracepoint * const __stop___tracepoints_ptrs[];
/* Set to 1 to enable tracepoint debug output */
static const int tracepoint_debug;
/*
* tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects the
* builtin and module tracepoints and the hash table.
*/
static DEFINE_MUTEX(tracepoints_mutex);
/*
* Tracepoint hash table, containing the active tracepoints.
* Protected by tracepoints_mutex.
*/
#define TRACEPOINT_HASH_BITS 6
#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
/*
* Note about RCU :
* It is used to delay the free of multiple probes array until a quiescent
* state is reached.
* Tracepoint entries modifications are protected by the tracepoints_mutex.
*/
struct tracepoint_entry {
struct hlist_node hlist;
struct tracepoint_func *funcs;
int refcount; /* Number of times armed. 0 if disarmed. */
char name[0];
};
struct tp_probes {
union {
struct rcu_head rcu;
struct list_head list;
} u;
struct tracepoint_func probes[0];
};
static inline void *allocate_probes(int count)
{
struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func)
+ sizeof(struct tp_probes), GFP_KERNEL);
return p == NULL ? NULL : p->probes;
}
static void rcu_free_old_probes(struct rcu_head *head)
{
kfree(container_of(head, struct tp_probes, u.rcu));
}
static inline void release_probes(struct tracepoint_func *old)
{
if (old) {
struct tp_probes *tp_probes = container_of(old,
struct tp_probes, probes[0]);
call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes);
}
}
static void debug_print_probes(struct tracepoint_entry *entry)
{
int i;
if (!tracepoint_debug || !entry->funcs)
return;
for (i = 0; entry->funcs[i].func; i++)
printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func);
}
static struct tracepoint_func *
tracepoint_entry_add_probe(struct tracepoint_entry *entry,
void *probe, void *data)
{
int nr_probes = 0;
struct tracepoint_func *old, *new;
WARN_ON(!probe);
debug_print_probes(entry);
old = entry->funcs;
if (old) {
/* (N -> N+1), (N != 0, 1) probes */
for (nr_probes = 0; old[nr_probes].func; nr_probes++)
if (old[nr_probes].func == probe &&
old[nr_probes].data == data)
return ERR_PTR(-EEXIST);
}
/* + 2 : one for new probe, one for NULL func */
new = allocate_probes(nr_probes + 2);
if (new == NULL)
return ERR_PTR(-ENOMEM);
if (old)
memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
new[nr_probes].func = probe;
new[nr_probes].data = data;
new[nr_probes + 1].func = NULL;
entry->refcount = nr_probes + 1;
entry->funcs = new;
debug_print_probes(entry);
return old;
}
static void *
tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
void *probe, void *data)
{
int nr_probes = 0, nr_del = 0, i;
struct tracepoint_func *old, *new;
old = entry->funcs;
if (!old)
return ERR_PTR(-ENOENT);
debug_print_probes(entry);
/* (N -> M), (N > 1, M >= 0) probes */
for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
if (!probe ||
(old[nr_probes].func == probe &&
old[nr_probes].data == data))
nr_del++;
}
if (nr_probes - nr_del == 0) {
/* N -> 0, (N > 1) */
entry->funcs = NULL;
entry->refcount = 0;
debug_print_probes(entry);
return old;
} else {
int j = 0;
/* N -> M, (N > 1, M > 0) */
/* + 1 for NULL */
new = allocate_probes(nr_probes - nr_del + 1);
if (new == NULL)
return ERR_PTR(-ENOMEM);
for (i = 0; old[i].func; i++)
if (probe &&
(old[i].func != probe || old[i].data != data))
new[j++] = old[i];
new[nr_probes - nr_del].func = NULL;
entry->refcount = nr_probes - nr_del;
entry->funcs = new;
}
debug_print_probes(entry);
return old;
}
/*
* Get tracepoint if the tracepoint is present in the tracepoint hash table.
* Must be called with tracepoints_mutex held.
* Returns NULL if not present.
*/
static struct tracepoint_entry *get_tracepoint(const char *name)
{
struct hlist_head *head;
struct hlist_node *node;
struct tracepoint_entry *e;
u32 hash = jhash(name, strlen(name), 0);
head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
hlist_for_each_entry(e, node, head, hlist) {
if (!strcmp(name, e->name))
return e;
}
return NULL;
}
/*
* Add the tracepoint to the tracepoint hash table. Must be called with
* tracepoints_mutex held.
*/
static struct tracepoint_entry *add_tracepoint(const char *name)
{
struct hlist_head *head;
struct hlist_node *node;
struct tracepoint_entry *e;
size_t name_len = strlen(name) + 1;
u32 hash = jhash(name, name_len-1, 0);
head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
hlist_for_each_entry(e, node, head, hlist) {
if (!strcmp(name, e->name)) {
printk(KERN_NOTICE
"tracepoint %s busy\n", name);
return ERR_PTR(-EEXIST); /* Already there */
}
}
/*
* Using kmalloc here to allocate a variable length element. Could
* cause some memory fragmentation if overused.
*/
e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
if (!e)
return ERR_PTR(-ENOMEM);
memcpy(&e->name[0], name, name_len);
e->funcs = NULL;
e->refcount = 0;
hlist_add_head(&e->hlist, head);
return e;
}
/*
* Remove the tracepoint from the tracepoint hash table. Must be called with
* mutex_lock held.
*/
static inline void remove_tracepoint(struct tracepoint_entry *e)
{
hlist_del(&e->hlist);
kfree(e);
}
/*
* Sets the probe callback corresponding to one tracepoint.
*/
static void set_tracepoint(struct tracepoint_entry **entry,
struct tracepoint *elem, int active)
{
WARN_ON(strcmp((*entry)->name, elem->name) != 0);
if (elem->regfunc && !jump_label_enabled(&elem->key) && active)
elem->regfunc();
else if (elem->unregfunc && jump_label_enabled(&elem->key) && !active)
elem->unregfunc();
/*
* rcu_assign_pointer has a smp_wmb() which makes sure that the new
* probe callbacks array is consistent before setting a pointer to it.
* This array is referenced by __DO_TRACE from
* include/linux/tracepoints.h. A matching smp_read_barrier_depends()
* is used.
*/
rcu_assign_pointer(elem->funcs, (*entry)->funcs);
if (active && !jump_label_enabled(&elem->key))
jump_label_inc(&elem->key);
else if (!active && jump_label_enabled(&elem->key))
jump_label_dec(&elem->key);
}
/*
* Disable a tracepoint and its probe callback.
* Note: only waiting an RCU period after setting elem->call to the empty
* function insures that the original callback is not used anymore. This insured
* by preempt_disable around the call site.
*/
static void disable_tracepoint(struct tracepoint *elem)
{
if (elem->unregfunc && jump_label_enabled(&elem->key))
elem->unregfunc();
if (jump_label_enabled(&elem->key))
jump_label_dec(&elem->key);
rcu_assign_pointer(elem->funcs, NULL);
}
/**
* tracepoint_update_probe_range - Update a probe range
* @begin: beginning of the range
* @end: end of the range
*
* Updates the probe callback corresponding to a range of tracepoints.
*/
void tracepoint_update_probe_range(struct tracepoint * const *begin,
struct tracepoint * const *end)
{
struct tracepoint * const *iter;
struct tracepoint_entry *mark_entry;
if (!begin)
return;
mutex_lock(&tracepoints_mutex);
for (iter = begin; iter < end; iter++) {
mark_entry = get_tracepoint((*iter)->name);
if (mark_entry) {
set_tracepoint(&mark_entry, *iter,
!!mark_entry->refcount);
} else {
disable_tracepoint(*iter);
}
}
mutex_unlock(&tracepoints_mutex);
}
/*
* Update probes, removing the faulty probes.
*/
static void tracepoint_update_probes(void)
{
/* Core kernel tracepoints */
tracepoint_update_probe_range(__start___tracepoints_ptrs,
__stop___tracepoints_ptrs);
/* tracepoints in modules. */
module_update_tracepoints();
}
static struct tracepoint_func *
tracepoint_add_probe(const char *name, void *probe, void *data)
{
struct tracepoint_entry *entry;
struct tracepoint_func *old;
entry = get_tracepoint(name);
if (!entry) {
entry = add_tracepoint(name);
if (IS_ERR(entry))
return (struct tracepoint_func *)entry;
}
old = tracepoint_entry_add_probe(entry, probe, data);
if (IS_ERR(old) && !entry->refcount)
remove_tracepoint(entry);
return old;
}
/**
* tracepoint_probe_register - Connect a probe to a tracepoint
* @name: tracepoint name
* @probe: probe handler
*
* Returns 0 if ok, error value on error.
* The probe address must at least be aligned on the architecture pointer size.
*/
int tracepoint_probe_register(const char *name, void *probe, void *data)
{
struct tracepoint_func *old;
mutex_lock(&tracepoints_mutex);
old = tracepoint_add_probe(name, probe, data);
mutex_unlock(&tracepoints_mutex);
if (IS_ERR(old))
return PTR_ERR(old);
tracepoint_update_probes(); /* may update entry */
release_probes(old);
return 0;
}
EXPORT_SYMBOL_GPL(tracepoint_probe_register);
static struct tracepoint_func *
tracepoint_remove_probe(const char *name, void *probe, void *data)
{
struct tracepoint_entry *entry;
struct tracepoint_func *old;
entry = get_tracepoint(name);
if (!entry)
return ERR_PTR(-ENOENT);
old = tracepoint_entry_remove_probe(entry, probe, data);
if (IS_ERR(old))
return old;
if (!entry->refcount)
remove_tracepoint(entry);
return old;
}
/**
* tracepoint_probe_unregister - Disconnect a probe from a tracepoint
* @name: tracepoint name
* @probe: probe function pointer
*
* We do not need to call a synchronize_sched to make sure the probes have
* finished running before doing a module unload, because the module unload
* itself uses stop_machine(), which insures that every preempt disabled section
* have finished.
*/
int tracepoint_probe_unregister(const char *name, void *probe, void *data)
{
struct tracepoint_func *old;
mutex_lock(&tracepoints_mutex);
old = tracepoint_remove_probe(name, probe, data);
mutex_unlock(&tracepoints_mutex);
if (IS_ERR(old))
return PTR_ERR(old);
tracepoint_update_probes(); /* may update entry */
release_probes(old);
return 0;
}
EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
static LIST_HEAD(old_probes);
static int need_update;
static void tracepoint_add_old_probes(void *old)
{
need_update = 1;
if (old) {
struct tp_probes *tp_probes = container_of(old,
struct tp_probes, probes[0]);
list_add(&tp_probes->u.list, &old_probes);
}
}
/**
* tracepoint_probe_register_noupdate - register a probe but not connect
* @name: tracepoint name
* @probe: probe handler
*
* caller must call tracepoint_probe_update_all()
*/
int tracepoint_probe_register_noupdate(const char *name, void *probe,
void *data)
{
struct tracepoint_func *old;
mutex_lock(&tracepoints_mutex);
old = tracepoint_add_probe(name, probe, data);
if (IS_ERR(old)) {
mutex_unlock(&tracepoints_mutex);
return PTR_ERR(old);
}
tracepoint_add_old_probes(old);
mutex_unlock(&tracepoints_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
/**
* tracepoint_probe_unregister_noupdate - remove a probe but not disconnect
* @name: tracepoint name
* @probe: probe function pointer
*
* caller must call tracepoint_probe_update_all()
*/
int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
void *data)
{
struct tracepoint_func *old;
mutex_lock(&tracepoints_mutex);
old = tracepoint_remove_probe(name, probe, data);
if (IS_ERR(old)) {
mutex_unlock(&tracepoints_mutex);
return PTR_ERR(old);
}
tracepoint_add_old_probes(old);
mutex_unlock(&tracepoints_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
/**
* tracepoint_probe_update_all - update tracepoints
*/
void tracepoint_probe_update_all(void)
{
LIST_HEAD(release_probes);
struct tp_probes *pos, *next;
mutex_lock(&tracepoints_mutex);
if (!need_update) {
mutex_unlock(&tracepoints_mutex);
return;
}
if (!list_empty(&old_probes))
list_replace_init(&old_probes, &release_probes);
need_update = 0;
mutex_unlock(&tracepoints_mutex);
tracepoint_update_probes();
list_for_each_entry_safe(pos, next, &release_probes, u.list) {
list_del(&pos->u.list);
call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
}
}
EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
/**
* tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
* @tracepoint: current tracepoints (in), next tracepoint (out)
* @begin: beginning of the range
* @end: end of the range
*
* Returns whether a next tracepoint has been found (1) or not (0).
* Will return the first tracepoint in the range if the input tracepoint is
* NULL.
*/
int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
struct tracepoint * const *begin, struct tracepoint * const *end)
{
if (!*tracepoint && begin != end) {
*tracepoint = begin;
return 1;
}
if (*tracepoint >= begin && *tracepoint < end)
return 1;
return 0;
}
EXPORT_SYMBOL_GPL(tracepoint_get_iter_range);
static void tracepoint_get_iter(struct tracepoint_iter *iter)
{
int found = 0;
/* Core kernel tracepoints */
if (!iter->module) {
found = tracepoint_get_iter_range(&iter->tracepoint,
__start___tracepoints_ptrs,
__stop___tracepoints_ptrs);
if (found)
goto end;
}
/* tracepoints in modules. */
found = module_get_iter_tracepoints(iter);
end:
if (!found)
tracepoint_iter_reset(iter);
}
void tracepoint_iter_start(struct tracepoint_iter *iter)
{
tracepoint_get_iter(iter);
}
EXPORT_SYMBOL_GPL(tracepoint_iter_start);
void tracepoint_iter_next(struct tracepoint_iter *iter)
{
iter->tracepoint++;
/*
* iter->tracepoint may be invalid because we blindly incremented it.
* Make sure it is valid by marshalling on the tracepoints, getting the
* tracepoints from following modules if necessary.
*/
tracepoint_get_iter(iter);
}
EXPORT_SYMBOL_GPL(tracepoint_iter_next);
void tracepoint_iter_stop(struct tracepoint_iter *iter)
{
}
EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
void tracepoint_iter_reset(struct tracepoint_iter *iter)
{
iter->module = NULL;
iter->tracepoint = NULL;
}
EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
#ifdef CONFIG_MODULES
int tracepoint_module_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct module *mod = data;
switch (val) {
case MODULE_STATE_COMING:
case MODULE_STATE_GOING:
tracepoint_update_probe_range(mod->tracepoints_ptrs,
mod->tracepoints_ptrs + mod->num_tracepoints);
break;
}
return 0;
}
struct notifier_block tracepoint_module_nb = {
.notifier_call = tracepoint_module_notify,
.priority = 0,
};
static int init_tracepoints(void)
{
return register_module_notifier(&tracepoint_module_nb);
}
__initcall(init_tracepoints);
#endif /* CONFIG_MODULES */
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
static int sys_tracepoint_refcount;
void syscall_regfunc(void)
{
unsigned long flags;
struct task_struct *g, *t;
if (!sys_tracepoint_refcount) {
read_lock_irqsave(&tasklist_lock, flags);
do_each_thread(g, t) {
/* Skip kernel threads. */
if (t->mm)
set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
} while_each_thread(g, t);
read_unlock_irqrestore(&tasklist_lock, flags);
}
sys_tracepoint_refcount++;
}
void syscall_unregfunc(void)
{
unsigned long flags;
struct task_struct *g, *t;
sys_tracepoint_refcount--;
if (!sys_tracepoint_refcount) {
read_lock_irqsave(&tasklist_lock, flags);
do_each_thread(g, t) {
clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
} while_each_thread(g, t);
read_unlock_irqrestore(&tasklist_lock, flags);
}
}
#endif
| gpl-2.0 |
aatjitra/Note2 | arch/mips/netlogic/xlr/xlr_console.c | 2754 | 1910 | /*
* Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
* reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the NetLogic
* license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/types.h>
#include <asm/netlogic/xlr/iomap.h>
void prom_putchar(char c)
{
nlm_reg_t *mmio;
mmio = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET);
while (netlogic_read_reg(mmio, 0x5) == 0)
;
netlogic_write_reg(mmio, 0x0, c);
}
| gpl-2.0 |
aapav01/android_kernel_samsung_ms013g_stock | drivers/mtd/nand/alauda.c | 4802 | 16001 | /*
* MTD driver for Alauda chips
*
* Copyright (C) 2007 Joern Engel <joern@logfs.org>
*
* Based on drivers/usb/usb-skeleton.c which is:
* Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
* and on drivers/usb/storage/alauda.c, which is:
* (c) 2005 Daniel Drake <dsd@gentoo.org>
*
* Idea and initial work by Arnd Bergmann <arnd@arndb.de>
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
#include <linux/usb.h>
#include <linux/mutex.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand_ecc.h>
/* Control commands */
#define ALAUDA_GET_XD_MEDIA_STATUS 0x08
#define ALAUDA_ACK_XD_MEDIA_CHANGE 0x0a
#define ALAUDA_GET_XD_MEDIA_SIG 0x86
/* Common prefix */
#define ALAUDA_BULK_CMD 0x40
/* The two ports */
#define ALAUDA_PORT_XD 0x00
#define ALAUDA_PORT_SM 0x01
/* Bulk commands */
#define ALAUDA_BULK_READ_PAGE 0x84
#define ALAUDA_BULK_READ_OOB 0x85 /* don't use, there's a chip bug */
#define ALAUDA_BULK_READ_BLOCK 0x94
#define ALAUDA_BULK_ERASE_BLOCK 0xa3
#define ALAUDA_BULK_WRITE_PAGE 0xa4
#define ALAUDA_BULK_WRITE_BLOCK 0xb4
#define ALAUDA_BULK_RESET_MEDIA 0xe0
/* Address shifting */
#define PBA_LO(pba) ((pba & 0xF) << 5)
#define PBA_HI(pba) (pba >> 3)
#define PBA_ZONE(pba) (pba >> 11)
#define TIMEOUT HZ
static const struct usb_device_id alauda_table[] = {
{ USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */
{ USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */
{ }
};
MODULE_DEVICE_TABLE(usb, alauda_table);
struct alauda_card {
u8 id; /* id byte */
u8 chipshift; /* 1<<chipshift total size */
u8 pageshift; /* 1<<pageshift page size */
u8 blockshift; /* 1<<blockshift block size */
};
struct alauda {
struct usb_device *dev;
struct usb_interface *interface;
struct mtd_info *mtd;
struct alauda_card *card;
struct mutex card_mutex;
u32 pagemask;
u32 bytemask;
u32 blockmask;
unsigned int write_out;
unsigned int bulk_in;
unsigned int bulk_out;
u8 port;
struct kref kref;
};
static struct alauda_card alauda_card_ids[] = {
/* NAND flash */
{ 0x6e, 20, 8, 12}, /* 1 MB */
{ 0xe8, 20, 8, 12}, /* 1 MB */
{ 0xec, 20, 8, 12}, /* 1 MB */
{ 0x64, 21, 8, 12}, /* 2 MB */
{ 0xea, 21, 8, 12}, /* 2 MB */
{ 0x6b, 22, 9, 13}, /* 4 MB */
{ 0xe3, 22, 9, 13}, /* 4 MB */
{ 0xe5, 22, 9, 13}, /* 4 MB */
{ 0xe6, 23, 9, 13}, /* 8 MB */
{ 0x73, 24, 9, 14}, /* 16 MB */
{ 0x75, 25, 9, 14}, /* 32 MB */
{ 0x76, 26, 9, 14}, /* 64 MB */
{ 0x79, 27, 9, 14}, /* 128 MB */
{ 0x71, 28, 9, 14}, /* 256 MB */
/* MASK ROM */
{ 0x5d, 21, 9, 13}, /* 2 MB */
{ 0xd5, 22, 9, 13}, /* 4 MB */
{ 0xd6, 23, 9, 13}, /* 8 MB */
{ 0x57, 24, 9, 13}, /* 16 MB */
{ 0x58, 25, 9, 13}, /* 32 MB */
{ }
};
static struct alauda_card *get_card(u8 id)
{
struct alauda_card *card;
for (card = alauda_card_ids; card->id; card++)
if (card->id == id)
return card;
return NULL;
}
static void alauda_delete(struct kref *kref)
{
struct alauda *al = container_of(kref, struct alauda, kref);
if (al->mtd) {
mtd_device_unregister(al->mtd);
kfree(al->mtd);
}
usb_put_dev(al->dev);
kfree(al);
}
static int alauda_get_media_status(struct alauda *al, void *buf)
{
int ret;
mutex_lock(&al->card_mutex);
ret = usb_control_msg(al->dev, usb_rcvctrlpipe(al->dev, 0),
ALAUDA_GET_XD_MEDIA_STATUS, 0xc0, 0, 1, buf, 2, HZ);
mutex_unlock(&al->card_mutex);
return ret;
}
static int alauda_ack_media(struct alauda *al)
{
int ret;
mutex_lock(&al->card_mutex);
ret = usb_control_msg(al->dev, usb_sndctrlpipe(al->dev, 0),
ALAUDA_ACK_XD_MEDIA_CHANGE, 0x40, 0, 1, NULL, 0, HZ);
mutex_unlock(&al->card_mutex);
return ret;
}
static int alauda_get_media_signatures(struct alauda *al, void *buf)
{
int ret;
mutex_lock(&al->card_mutex);
ret = usb_control_msg(al->dev, usb_rcvctrlpipe(al->dev, 0),
ALAUDA_GET_XD_MEDIA_SIG, 0xc0, 0, 0, buf, 4, HZ);
mutex_unlock(&al->card_mutex);
return ret;
}
static void alauda_reset(struct alauda *al)
{
u8 command[] = {
ALAUDA_BULK_CMD, ALAUDA_BULK_RESET_MEDIA, 0, 0,
0, 0, 0, 0, al->port
};
mutex_lock(&al->card_mutex);
usb_bulk_msg(al->dev, al->bulk_out, command, 9, NULL, HZ);
mutex_unlock(&al->card_mutex);
}
static void correct_data(void *buf, void *read_ecc,
int *corrected, int *uncorrected)
{
u8 calc_ecc[3];
int err;
nand_calculate_ecc(NULL, buf, calc_ecc);
err = nand_correct_data(NULL, buf, read_ecc, calc_ecc);
if (err) {
if (err > 0)
(*corrected)++;
else
(*uncorrected)++;
}
}
struct alauda_sg_request {
struct urb *urb[3];
struct completion comp;
};
static void alauda_complete(struct urb *urb)
{
struct completion *comp = urb->context;
if (comp)
complete(comp);
}
static int __alauda_read_page(struct mtd_info *mtd, loff_t from, void *buf,
void *oob)
{
struct alauda_sg_request sg;
struct alauda *al = mtd->priv;
u32 pba = from >> al->card->blockshift;
u32 page = (from >> al->card->pageshift) & al->pagemask;
u8 command[] = {
ALAUDA_BULK_CMD, ALAUDA_BULK_READ_PAGE, PBA_HI(pba),
PBA_ZONE(pba), 0, PBA_LO(pba) + page, 1, 0, al->port
};
int i, err;
for (i=0; i<3; i++)
sg.urb[i] = NULL;
err = -ENOMEM;
for (i=0; i<3; i++) {
sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
if (!sg.urb[i])
goto out;
}
init_completion(&sg.comp);
usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
alauda_complete, NULL);
usb_fill_bulk_urb(sg.urb[1], al->dev, al->bulk_in, buf, mtd->writesize,
alauda_complete, NULL);
usb_fill_bulk_urb(sg.urb[2], al->dev, al->bulk_in, oob, 16,
alauda_complete, &sg.comp);
mutex_lock(&al->card_mutex);
for (i=0; i<3; i++) {
err = usb_submit_urb(sg.urb[i], GFP_NOIO);
if (err)
goto cancel;
}
if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
err = -ETIMEDOUT;
cancel:
for (i=0; i<3; i++) {
usb_kill_urb(sg.urb[i]);
}
}
mutex_unlock(&al->card_mutex);
out:
usb_free_urb(sg.urb[0]);
usb_free_urb(sg.urb[1]);
usb_free_urb(sg.urb[2]);
return err;
}
static int alauda_read_page(struct mtd_info *mtd, loff_t from,
void *buf, u8 *oob, int *corrected, int *uncorrected)
{
int err;
err = __alauda_read_page(mtd, from, buf, oob);
if (err)
return err;
correct_data(buf, oob+13, corrected, uncorrected);
correct_data(buf+256, oob+8, corrected, uncorrected);
return 0;
}
static int alauda_write_page(struct mtd_info *mtd, loff_t to, void *buf,
void *oob)
{
struct alauda_sg_request sg;
struct alauda *al = mtd->priv;
u32 pba = to >> al->card->blockshift;
u32 page = (to >> al->card->pageshift) & al->pagemask;
u8 command[] = {
ALAUDA_BULK_CMD, ALAUDA_BULK_WRITE_PAGE, PBA_HI(pba),
PBA_ZONE(pba), 0, PBA_LO(pba) + page, 32, 0, al->port
};
int i, err;
for (i=0; i<3; i++)
sg.urb[i] = NULL;
err = -ENOMEM;
for (i=0; i<3; i++) {
sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
if (!sg.urb[i])
goto out;
}
init_completion(&sg.comp);
usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
alauda_complete, NULL);
usb_fill_bulk_urb(sg.urb[1], al->dev, al->write_out, buf,mtd->writesize,
alauda_complete, NULL);
usb_fill_bulk_urb(sg.urb[2], al->dev, al->write_out, oob, 16,
alauda_complete, &sg.comp);
mutex_lock(&al->card_mutex);
for (i=0; i<3; i++) {
err = usb_submit_urb(sg.urb[i], GFP_NOIO);
if (err)
goto cancel;
}
if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
err = -ETIMEDOUT;
cancel:
for (i=0; i<3; i++) {
usb_kill_urb(sg.urb[i]);
}
}
mutex_unlock(&al->card_mutex);
out:
usb_free_urb(sg.urb[0]);
usb_free_urb(sg.urb[1]);
usb_free_urb(sg.urb[2]);
return err;
}
static int alauda_erase_block(struct mtd_info *mtd, loff_t ofs)
{
struct alauda_sg_request sg;
struct alauda *al = mtd->priv;
u32 pba = ofs >> al->card->blockshift;
u8 command[] = {
ALAUDA_BULK_CMD, ALAUDA_BULK_ERASE_BLOCK, PBA_HI(pba),
PBA_ZONE(pba), 0, PBA_LO(pba), 0x02, 0, al->port
};
u8 buf[2];
int i, err;
for (i=0; i<2; i++)
sg.urb[i] = NULL;
err = -ENOMEM;
for (i=0; i<2; i++) {
sg.urb[i] = usb_alloc_urb(0, GFP_NOIO);
if (!sg.urb[i])
goto out;
}
init_completion(&sg.comp);
usb_fill_bulk_urb(sg.urb[0], al->dev, al->bulk_out, command, 9,
alauda_complete, NULL);
usb_fill_bulk_urb(sg.urb[1], al->dev, al->bulk_in, buf, 2,
alauda_complete, &sg.comp);
mutex_lock(&al->card_mutex);
for (i=0; i<2; i++) {
err = usb_submit_urb(sg.urb[i], GFP_NOIO);
if (err)
goto cancel;
}
if (!wait_for_completion_timeout(&sg.comp, TIMEOUT)) {
err = -ETIMEDOUT;
cancel:
for (i=0; i<2; i++) {
usb_kill_urb(sg.urb[i]);
}
}
mutex_unlock(&al->card_mutex);
out:
usb_free_urb(sg.urb[0]);
usb_free_urb(sg.urb[1]);
return err;
}
static int alauda_read_oob(struct mtd_info *mtd, loff_t from, void *oob)
{
static u8 ignore_buf[512]; /* write only */
return __alauda_read_page(mtd, from, ignore_buf, oob);
}
static int alauda_isbad(struct mtd_info *mtd, loff_t ofs)
{
u8 oob[16];
int err;
err = alauda_read_oob(mtd, ofs, oob);
if (err)
return err;
/* A block is marked bad if two or more bits are zero */
return hweight8(oob[5]) >= 7 ? 0 : 1;
}
static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct alauda *al = mtd->priv;
void *bounce_buf;
int err, corrected=0, uncorrected=0;
bounce_buf = kmalloc(mtd->writesize, GFP_KERNEL);
if (!bounce_buf)
return -ENOMEM;
*retlen = len;
while (len) {
u8 oob[16];
size_t byte = from & al->bytemask;
size_t cplen = min(len, mtd->writesize - byte);
err = alauda_read_page(mtd, from, bounce_buf, oob,
&corrected, &uncorrected);
if (err)
goto out;
memcpy(buf, bounce_buf + byte, cplen);
buf += cplen;
from += cplen;
len -= cplen;
}
err = 0;
if (corrected)
err = -EUCLEAN;
if (uncorrected)
err = -EBADMSG;
out:
kfree(bounce_buf);
return err;
}
static int alauda_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct alauda *al = mtd->priv;
int err, corrected=0, uncorrected=0;
if ((from & al->bytemask) || (len & al->bytemask))
return alauda_bounce_read(mtd, from, len, retlen, buf);
*retlen = len;
while (len) {
u8 oob[16];
err = alauda_read_page(mtd, from, buf, oob,
&corrected, &uncorrected);
if (err)
return err;
buf += mtd->writesize;
from += mtd->writesize;
len -= mtd->writesize;
}
err = 0;
if (corrected)
err = -EUCLEAN;
if (uncorrected)
err = -EBADMSG;
return err;
}
static int alauda_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct alauda *al = mtd->priv;
int err;
if ((to & al->bytemask) || (len & al->bytemask))
return -EINVAL;
*retlen = len;
while (len) {
u32 page = (to >> al->card->pageshift) & al->pagemask;
u8 oob[16] = { 'h', 'e', 'l', 'l', 'o', 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
/* don't write to bad blocks */
if (page == 0) {
err = alauda_isbad(mtd, to);
if (err) {
return -EIO;
}
}
nand_calculate_ecc(mtd, buf, &oob[13]);
nand_calculate_ecc(mtd, buf+256, &oob[8]);
err = alauda_write_page(mtd, to, (void*)buf, oob);
if (err)
return err;
buf += mtd->writesize;
to += mtd->writesize;
len -= mtd->writesize;
}
return 0;
}
static int __alauda_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct alauda *al = mtd->priv;
u32 ofs = instr->addr;
u32 len = instr->len;
int err;
if ((ofs & al->blockmask) || (len & al->blockmask))
return -EINVAL;
while (len) {
/* don't erase bad blocks */
err = alauda_isbad(mtd, ofs);
if (err > 0)
err = -EIO;
if (err < 0)
return err;
err = alauda_erase_block(mtd, ofs);
if (err < 0)
return err;
ofs += mtd->erasesize;
len -= mtd->erasesize;
}
return 0;
}
static int alauda_erase(struct mtd_info *mtd, struct erase_info *instr)
{
int err;
err = __alauda_erase(mtd, instr);
instr->state = err ? MTD_ERASE_FAILED : MTD_ERASE_DONE;
mtd_erase_callback(instr);
return err;
}
static int alauda_init_media(struct alauda *al)
{
u8 buf[4], *b0=buf, *b1=buf+1;
struct alauda_card *card;
struct mtd_info *mtd;
int err;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
if (!mtd)
return -ENOMEM;
for (;;) {
err = alauda_get_media_status(al, buf);
if (err < 0)
goto error;
if (*b0 & 0x10)
break;
msleep(20);
}
err = alauda_ack_media(al);
if (err)
goto error;
msleep(10);
err = alauda_get_media_status(al, buf);
if (err < 0)
goto error;
if (*b0 != 0x14) {
/* media not ready */
err = -EIO;
goto error;
}
err = alauda_get_media_signatures(al, buf);
if (err < 0)
goto error;
card = get_card(*b1);
if (!card) {
printk(KERN_ERR"Alauda: unknown card id %02x\n", *b1);
err = -EIO;
goto error;
}
printk(KERN_INFO"pagesize=%x\nerasesize=%x\nsize=%xMiB\n",
1<<card->pageshift, 1<<card->blockshift,
1<<(card->chipshift-20));
al->card = card;
al->pagemask = (1 << (card->blockshift - card->pageshift)) - 1;
al->bytemask = (1 << card->pageshift) - 1;
al->blockmask = (1 << card->blockshift) - 1;
mtd->name = "alauda";
mtd->size = 1<<card->chipshift;
mtd->erasesize = 1<<card->blockshift;
mtd->writesize = 1<<card->pageshift;
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
mtd->_read = alauda_read;
mtd->_write = alauda_write;
mtd->_erase = alauda_erase;
mtd->_block_isbad = alauda_isbad;
mtd->priv = al;
mtd->owner = THIS_MODULE;
mtd->ecc_strength = 1;
err = mtd_device_register(mtd, NULL, 0);
if (err) {
err = -ENFILE;
goto error;
}
al->mtd = mtd;
alauda_reset(al); /* no clue whether this is necessary */
return 0;
error:
kfree(mtd);
return err;
}
static int alauda_check_media(struct alauda *al)
{
u8 buf[2], *b0 = buf, *b1 = buf+1;
int err;
err = alauda_get_media_status(al, buf);
if (err < 0)
return err;
if ((*b1 & 0x01) == 0) {
/* door open */
return -EIO;
}
if ((*b0 & 0x80) || ((*b0 & 0x1F) == 0x10)) {
/* no media ? */
return -EIO;
}
if (*b0 & 0x08) {
/* media change ? */
return alauda_init_media(al);
}
return 0;
}
static int alauda_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct alauda *al;
struct usb_host_interface *iface;
struct usb_endpoint_descriptor *ep,
*ep_in=NULL, *ep_out=NULL, *ep_wr=NULL;
int i, err = -ENOMEM;
al = kzalloc(2*sizeof(*al), GFP_KERNEL);
if (!al)
goto error;
kref_init(&al->kref);
usb_set_intfdata(interface, al);
al->dev = usb_get_dev(interface_to_usbdev(interface));
al->interface = interface;
iface = interface->cur_altsetting;
for (i = 0; i < iface->desc.bNumEndpoints; ++i) {
ep = &iface->endpoint[i].desc;
if (usb_endpoint_is_bulk_in(ep)) {
ep_in = ep;
} else if (usb_endpoint_is_bulk_out(ep)) {
if (i==0)
ep_wr = ep;
else
ep_out = ep;
}
}
err = -EIO;
if (!ep_wr || !ep_in || !ep_out)
goto error;
al->write_out = usb_sndbulkpipe(al->dev,
usb_endpoint_num(ep_wr));
al->bulk_in = usb_rcvbulkpipe(al->dev,
usb_endpoint_num(ep_in));
al->bulk_out = usb_sndbulkpipe(al->dev,
usb_endpoint_num(ep_out));
/* second device is identical up to now */
memcpy(al+1, al, sizeof(*al));
mutex_init(&al[0].card_mutex);
mutex_init(&al[1].card_mutex);
al[0].port = ALAUDA_PORT_XD;
al[1].port = ALAUDA_PORT_SM;
dev_info(&interface->dev, "alauda probed\n");
alauda_check_media(al);
alauda_check_media(al+1);
return 0;
error:
if (al)
kref_put(&al->kref, alauda_delete);
return err;
}
static void alauda_disconnect(struct usb_interface *interface)
{
struct alauda *al;
al = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
/* FIXME: prevent more I/O from starting */
/* decrement our usage count */
if (al)
kref_put(&al->kref, alauda_delete);
dev_info(&interface->dev, "alauda gone");
}
static struct usb_driver alauda_driver = {
.name = "alauda",
.probe = alauda_probe,
.disconnect = alauda_disconnect,
.id_table = alauda_table,
};
module_usb_driver(alauda_driver);
MODULE_LICENSE("GPL");
| gpl-2.0 |
XMelancholy/kernel_snda_u8500 | drivers/net/wireless/wl12xx/sdio.c | 4802 | 9377 | /*
* This file is part of wl1271
*
* Copyright (C) 2009-2010 Nokia Corporation
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/platform_device.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/gpio.h>
#include <linux/wl12xx.h>
#include <linux/pm_runtime.h>
#include "wl12xx.h"
#include "wl12xx_80211.h"
#include "io.h"
#ifndef SDIO_VENDOR_ID_TI
#define SDIO_VENDOR_ID_TI 0x0097
#endif
#ifndef SDIO_DEVICE_ID_TI_WL1271
#define SDIO_DEVICE_ID_TI_WL1271 0x4076
#endif
struct wl12xx_sdio_glue {
struct device *dev;
struct platform_device *core;
};
static const struct sdio_device_id wl1271_devices[] __devinitconst = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
{}
};
MODULE_DEVICE_TABLE(sdio, wl1271_devices);
static void wl1271_sdio_set_block_size(struct device *child,
unsigned int blksz)
{
struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
struct sdio_func *func = dev_to_sdio_func(glue->dev);
sdio_claim_host(func);
sdio_set_block_size(func, blksz);
sdio_release_host(func);
}
static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
size_t len, bool fixed)
{
int ret;
struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
struct sdio_func *func = dev_to_sdio_func(glue->dev);
sdio_claim_host(func);
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
addr, ((u8 *)buf)[0]);
} else {
if (fixed)
ret = sdio_readsb(func, buf, addr, len);
else
ret = sdio_memcpy_fromio(func, buf, addr, len);
dev_dbg(child->parent, "sdio read 53 addr 0x%x, %zu bytes\n",
addr, len);
}
sdio_release_host(func);
if (ret)
dev_err(child->parent, "sdio read failed (%d)\n", ret);
}
static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
size_t len, bool fixed)
{
int ret;
struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
struct sdio_func *func = dev_to_sdio_func(glue->dev);
sdio_claim_host(func);
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
addr, ((u8 *)buf)[0]);
} else {
dev_dbg(child->parent, "sdio write 53 addr 0x%x, %zu bytes\n",
addr, len);
if (fixed)
ret = sdio_writesb(func, addr, buf, len);
else
ret = sdio_memcpy_toio(func, addr, buf, len);
}
sdio_release_host(func);
if (ret)
dev_err(child->parent, "sdio write failed (%d)\n", ret);
}
static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
{
int ret;
struct sdio_func *func = dev_to_sdio_func(glue->dev);
/* If enabled, tell runtime PM not to power off the card */
if (pm_runtime_enabled(&func->dev)) {
ret = pm_runtime_get_sync(&func->dev);
if (ret < 0)
goto out;
} else {
/* Runtime PM is disabled: power up the card manually */
ret = mmc_power_restore_host(func->card->host);
if (ret < 0)
goto out;
}
sdio_claim_host(func);
sdio_enable_func(func);
sdio_release_host(func);
out:
return ret;
}
static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
{
int ret;
struct sdio_func *func = dev_to_sdio_func(glue->dev);
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
/* Power off the card manually, even if runtime PM is enabled. */
ret = mmc_power_save_host(func->card->host);
if (ret < 0)
return ret;
/* If enabled, let runtime PM know the card is powered off */
if (pm_runtime_enabled(&func->dev))
ret = pm_runtime_put_sync(&func->dev);
return ret;
}
static int wl12xx_sdio_set_power(struct device *child, bool enable)
{
struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
if (enable)
return wl12xx_sdio_power_on(glue);
else
return wl12xx_sdio_power_off(glue);
}
static struct wl1271_if_operations sdio_ops = {
.read = wl12xx_sdio_raw_read,
.write = wl12xx_sdio_raw_write,
.power = wl12xx_sdio_set_power,
.set_block_size = wl1271_sdio_set_block_size,
};
static int __devinit wl1271_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
struct wl12xx_platform_data *wlan_data;
struct wl12xx_sdio_glue *glue;
struct resource res[1];
mmc_pm_flag_t mmcflags;
int ret = -ENOMEM;
/* We are only able to handle the wlan function */
if (func->num != 0x02)
return -ENODEV;
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
if (!glue) {
dev_err(&func->dev, "can't allocate glue\n");
goto out;
}
glue->dev = &func->dev;
/* Grab access to FN0 for ELP reg. */
func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
/* Use block mode for transferring over one block size of data */
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
wlan_data = wl12xx_get_platform_data();
if (IS_ERR(wlan_data)) {
ret = PTR_ERR(wlan_data);
dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
goto out_free_glue;
}
/* if sdio can keep power while host is suspended, enable wow */
mmcflags = sdio_get_host_pm_caps(func);
dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
if (mmcflags & MMC_PM_KEEP_POWER)
wlan_data->pwr_in_suspend = true;
wlan_data->ops = &sdio_ops;
sdio_set_drvdata(func, glue);
/* Tell PM core that we don't need the card to be powered now */
pm_runtime_put_noidle(&func->dev);
glue->core = platform_device_alloc("wl12xx", -1);
if (!glue->core) {
dev_err(glue->dev, "can't allocate platform_device");
ret = -ENOMEM;
goto out_free_glue;
}
glue->core->dev.parent = &func->dev;
memset(res, 0x00, sizeof(res));
res[0].start = wlan_data->irq;
res[0].flags = IORESOURCE_IRQ;
res[0].name = "irq";
ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
if (ret) {
dev_err(glue->dev, "can't add resources\n");
goto out_dev_put;
}
ret = platform_device_add_data(glue->core, wlan_data,
sizeof(*wlan_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
}
ret = platform_device_add(glue->core);
if (ret) {
dev_err(glue->dev, "can't add platform device\n");
goto out_dev_put;
}
return 0;
out_dev_put:
platform_device_put(glue->core);
out_free_glue:
kfree(glue);
out:
return ret;
}
static void __devexit wl1271_remove(struct sdio_func *func)
{
struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
/* Undo decrement done above in wl1271_probe */
pm_runtime_get_noresume(&func->dev);
platform_device_del(glue->core);
platform_device_put(glue->core);
kfree(glue);
}
#ifdef CONFIG_PM
static int wl1271_suspend(struct device *dev)
{
/* Tell MMC/SDIO core it's OK to power down the card
* (if it isn't already), but not to remove it completely */
struct sdio_func *func = dev_to_sdio_func(dev);
struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
struct wl1271 *wl = platform_get_drvdata(glue->core);
mmc_pm_flag_t sdio_flags;
int ret = 0;
dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n",
wl->wow_enabled);
/* check whether sdio should keep power */
if (wl->wow_enabled) {
sdio_flags = sdio_get_host_pm_caps(func);
if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
dev_err(dev, "can't keep power while host "
"is suspended\n");
ret = -EINVAL;
goto out;
}
/* keep power while host suspended */
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
if (ret) {
dev_err(dev, "error while trying to keep power\n");
goto out;
}
}
out:
return ret;
}
static int wl1271_resume(struct device *dev)
{
dev_dbg(dev, "wl1271 resume\n");
return 0;
}
static const struct dev_pm_ops wl1271_sdio_pm_ops = {
.suspend = wl1271_suspend,
.resume = wl1271_resume,
};
#endif
static struct sdio_driver wl1271_sdio_driver = {
.name = "wl1271_sdio",
.id_table = wl1271_devices,
.probe = wl1271_probe,
.remove = __devexit_p(wl1271_remove),
#ifdef CONFIG_PM
.drv = {
.pm = &wl1271_sdio_pm_ops,
},
#endif
};
static int __init wl1271_init(void)
{
return sdio_register_driver(&wl1271_sdio_driver);
}
static void __exit wl1271_exit(void)
{
sdio_unregister_driver(&wl1271_sdio_driver);
}
module_init(wl1271_init);
module_exit(wl1271_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
MODULE_FIRMWARE(WL127X_FW_NAME_MULTI);
MODULE_FIRMWARE(WL127X_PLT_FW_NAME);
MODULE_FIRMWARE(WL128X_FW_NAME_SINGLE);
MODULE_FIRMWARE(WL128X_FW_NAME_MULTI);
MODULE_FIRMWARE(WL128X_PLT_FW_NAME);
| gpl-2.0 |
engine95/navelC-990 | drivers/net/wireless/wl12xx/spi.c | 4802 | 10415 | /*
* This file is part of wl1271
*
* Copyright (C) 2008-2009 Nokia Corporation
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/wl12xx.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "wl12xx.h"
#include "wl12xx_80211.h"
#include "io.h"
#include "reg.h"
#define WSPI_CMD_READ 0x40000000
#define WSPI_CMD_WRITE 0x00000000
#define WSPI_CMD_FIXED 0x20000000
#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000
#define WSPI_CMD_BYTE_LENGTH_OFFSET 17
#define WSPI_CMD_BYTE_ADDR 0x0001FFFF
#define WSPI_INIT_CMD_CRC_LEN 5
#define WSPI_INIT_CMD_START 0x00
#define WSPI_INIT_CMD_TX 0x40
/* the extra bypass bit is sampled by the TNET as '1' */
#define WSPI_INIT_CMD_BYPASS_BIT 0x80
#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07
#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80
#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00
#define WSPI_INIT_CMD_IOD 0x40
#define WSPI_INIT_CMD_IP 0x20
#define WSPI_INIT_CMD_CS 0x10
#define WSPI_INIT_CMD_WS 0x08
#define WSPI_INIT_CMD_WSPI 0x01
#define WSPI_INIT_CMD_END 0x01
#define WSPI_INIT_CMD_LEN 8
#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \
((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
#define HW_ACCESS_WSPI_INIT_CMD_MASK 0
/* HW limitation: maximum possible chunk size is 4095 bytes */
#define WSPI_MAX_CHUNK_SIZE 4092
#define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
struct wl12xx_spi_glue {
struct device *dev;
struct platform_device *core;
};
static void wl12xx_spi_reset(struct device *child)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
u8 *cmd;
struct spi_transfer t;
struct spi_message m;
cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
dev_err(child->parent,
"could not allocate cmd for spi reset\n");
return;
}
memset(&t, 0, sizeof(t));
spi_message_init(&m);
memset(cmd, 0xff, WSPI_INIT_CMD_LEN);
t.tx_buf = cmd;
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
spi_sync(to_spi_device(glue->dev), &m);
kfree(cmd);
}
static void wl12xx_spi_init(struct device *child)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
struct spi_transfer t;
struct spi_message m;
cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
dev_err(child->parent,
"could not allocate cmd for spi init\n");
return;
}
memset(crc, 0, sizeof(crc));
memset(&t, 0, sizeof(t));
spi_message_init(&m);
/*
* Set WSPI_INIT_COMMAND
* the data is being send from the MSB to LSB
*/
cmd[2] = 0xff;
cmd[3] = 0xff;
cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
cmd[0] = 0;
cmd[7] = 0;
cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0)
cmd[5] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
else
cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
| WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
crc[0] = cmd[1];
crc[1] = cmd[0];
crc[2] = cmd[7];
crc[3] = cmd[6];
crc[4] = cmd[5];
cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1;
cmd[4] |= WSPI_INIT_CMD_END;
t.tx_buf = cmd;
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
spi_sync(to_spi_device(glue->dev), &m);
kfree(cmd);
}
#define WL1271_BUSY_WORD_TIMEOUT 1000
static int wl12xx_spi_read_busy(struct device *child)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
struct wl1271 *wl = dev_get_drvdata(child);
struct spi_transfer t[1];
struct spi_message m;
u32 *busy_buf;
int num_busy_bytes = 0;
/*
* Read further busy words from SPI until a non-busy word is
* encountered, then read the data itself into the buffer.
*/
num_busy_bytes = WL1271_BUSY_WORD_TIMEOUT;
busy_buf = wl->buffer_busyword;
while (num_busy_bytes) {
num_busy_bytes--;
spi_message_init(&m);
memset(t, 0, sizeof(t));
t[0].rx_buf = busy_buf;
t[0].len = sizeof(u32);
t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
spi_sync(to_spi_device(glue->dev), &m);
if (*busy_buf & 0x1)
return 0;
}
/* The SPI bus is unresponsive, the read failed. */
dev_err(child->parent, "SPI read busy-word timeout!\n");
return -ETIMEDOUT;
}
static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
size_t len, bool fixed)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
struct wl1271 *wl = dev_get_drvdata(child);
struct spi_transfer t[2];
struct spi_message m;
u32 *busy_buf;
u32 *cmd;
u32 chunk_len;
while (len > 0) {
chunk_len = min((size_t)WSPI_MAX_CHUNK_SIZE, len);
cmd = &wl->buffer_cmd;
busy_buf = wl->buffer_busyword;
*cmd = 0;
*cmd |= WSPI_CMD_READ;
*cmd |= (chunk_len << WSPI_CMD_BYTE_LENGTH_OFFSET) &
WSPI_CMD_BYTE_LENGTH;
*cmd |= addr & WSPI_CMD_BYTE_ADDR;
if (fixed)
*cmd |= WSPI_CMD_FIXED;
spi_message_init(&m);
memset(t, 0, sizeof(t));
t[0].tx_buf = cmd;
t[0].len = 4;
t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
/* Busy and non busy words read */
t[1].rx_buf = busy_buf;
t[1].len = WL1271_BUSY_WORD_LEN;
t[1].cs_change = true;
spi_message_add_tail(&t[1], &m);
spi_sync(to_spi_device(glue->dev), &m);
if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
wl12xx_spi_read_busy(child)) {
memset(buf, 0, chunk_len);
return;
}
spi_message_init(&m);
memset(t, 0, sizeof(t));
t[0].rx_buf = buf;
t[0].len = chunk_len;
t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
spi_sync(to_spi_device(glue->dev), &m);
if (!fixed)
addr += chunk_len;
buf += chunk_len;
len -= chunk_len;
}
}
static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf,
size_t len, bool fixed)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
struct spi_message m;
u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
u32 *cmd;
u32 chunk_len;
int i;
WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
spi_message_init(&m);
memset(t, 0, sizeof(t));
cmd = &commands[0];
i = 0;
while (len > 0) {
chunk_len = min((size_t)WSPI_MAX_CHUNK_SIZE, len);
*cmd = 0;
*cmd |= WSPI_CMD_WRITE;
*cmd |= (chunk_len << WSPI_CMD_BYTE_LENGTH_OFFSET) &
WSPI_CMD_BYTE_LENGTH;
*cmd |= addr & WSPI_CMD_BYTE_ADDR;
if (fixed)
*cmd |= WSPI_CMD_FIXED;
t[i].tx_buf = cmd;
t[i].len = sizeof(*cmd);
spi_message_add_tail(&t[i++], &m);
t[i].tx_buf = buf;
t[i].len = chunk_len;
spi_message_add_tail(&t[i++], &m);
if (!fixed)
addr += chunk_len;
buf += chunk_len;
len -= chunk_len;
cmd++;
}
spi_sync(to_spi_device(glue->dev), &m);
}
static struct wl1271_if_operations spi_ops = {
.read = wl12xx_spi_raw_read,
.write = wl12xx_spi_raw_write,
.reset = wl12xx_spi_reset,
.init = wl12xx_spi_init,
.set_block_size = NULL,
};
static int __devinit wl1271_probe(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue;
struct wl12xx_platform_data *pdata;
struct resource res[1];
int ret = -ENOMEM;
pdata = spi->dev.platform_data;
if (!pdata) {
dev_err(&spi->dev, "no platform data\n");
return -ENODEV;
}
pdata->ops = &spi_ops;
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
if (!glue) {
dev_err(&spi->dev, "can't allocate glue\n");
goto out;
}
glue->dev = &spi->dev;
spi_set_drvdata(spi, glue);
/* This is the only SPI value that we need to set here, the rest
* comes from the board-peripherals file */
spi->bits_per_word = 32;
ret = spi_setup(spi);
if (ret < 0) {
dev_err(glue->dev, "spi_setup failed\n");
goto out_free_glue;
}
glue->core = platform_device_alloc("wl12xx", -1);
if (!glue->core) {
dev_err(glue->dev, "can't allocate platform_device\n");
ret = -ENOMEM;
goto out_free_glue;
}
glue->core->dev.parent = &spi->dev;
memset(res, 0x00, sizeof(res));
res[0].start = spi->irq;
res[0].flags = IORESOURCE_IRQ;
res[0].name = "irq";
ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
if (ret) {
dev_err(glue->dev, "can't add resources\n");
goto out_dev_put;
}
ret = platform_device_add_data(glue->core, pdata, sizeof(*pdata));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
}
ret = platform_device_add(glue->core);
if (ret) {
dev_err(glue->dev, "can't register platform device\n");
goto out_dev_put;
}
return 0;
out_dev_put:
platform_device_put(glue->core);
out_free_glue:
kfree(glue);
out:
return ret;
}
static int __devexit wl1271_remove(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue = spi_get_drvdata(spi);
platform_device_del(glue->core);
platform_device_put(glue->core);
kfree(glue);
return 0;
}
static struct spi_driver wl1271_spi_driver = {
.driver = {
.name = "wl1271_spi",
.owner = THIS_MODULE,
},
.probe = wl1271_probe,
.remove = __devexit_p(wl1271_remove),
};
static int __init wl1271_init(void)
{
return spi_register_driver(&wl1271_spi_driver);
}
static void __exit wl1271_exit(void)
{
spi_unregister_driver(&wl1271_spi_driver);
}
module_init(wl1271_init);
module_exit(wl1271_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
MODULE_FIRMWARE(WL127X_FW_NAME_MULTI);
MODULE_FIRMWARE(WL127X_PLT_FW_NAME);
MODULE_FIRMWARE(WL128X_FW_NAME_SINGLE);
MODULE_FIRMWARE(WL128X_FW_NAME_MULTI);
MODULE_FIRMWARE(WL128X_PLT_FW_NAME);
MODULE_ALIAS("spi:wl1271");
| gpl-2.0 |
GalaxyTab4/aosparadox_kernel_samsung_msm8226 | drivers/video/backlight/tosa_bl.c | 4802 | 4182 | /*
* LCD / Backlight control code for Sharp SL-6000x (tosa)
*
* Copyright (c) 2005 Dirk Opfer
* Copyright (c) 2007,2008 Dmitry Baryshkov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/spi/spi.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/slab.h>
#include <asm/mach/sharpsl_param.h>
#include <mach/tosa.h>
#define COMADJ_DEFAULT 97
#define DAC_CH1 0
#define DAC_CH2 1
struct tosa_bl_data {
struct i2c_client *i2c;
struct backlight_device *bl;
int comadj;
};
static void tosa_bl_set_backlight(struct tosa_bl_data *data, int brightness)
{
struct spi_device *spi = data->i2c->dev.platform_data;
i2c_smbus_write_byte_data(data->i2c, DAC_CH1, data->comadj);
/* SetBacklightDuty */
i2c_smbus_write_byte_data(data->i2c, DAC_CH2, (u8)(brightness & 0xff));
/* SetBacklightVR */
gpio_set_value(TOSA_GPIO_BL_C20MA, brightness & 0x100);
tosa_bl_enable(spi, brightness);
}
static int tosa_bl_update_status(struct backlight_device *dev)
{
struct backlight_properties *props = &dev->props;
struct tosa_bl_data *data = dev_get_drvdata(&dev->dev);
int power = max(props->power, props->fb_blank);
int brightness = props->brightness;
if (power)
brightness = 0;
tosa_bl_set_backlight(data, brightness);
return 0;
}
static int tosa_bl_get_brightness(struct backlight_device *dev)
{
struct backlight_properties *props = &dev->props;
return props->brightness;
}
static const struct backlight_ops bl_ops = {
.get_brightness = tosa_bl_get_brightness,
.update_status = tosa_bl_update_status,
};
static int __devinit tosa_bl_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct backlight_properties props;
struct tosa_bl_data *data = kzalloc(sizeof(struct tosa_bl_data), GFP_KERNEL);
int ret = 0;
if (!data)
return -ENOMEM;
data->comadj = sharpsl_param.comadj == -1 ? COMADJ_DEFAULT : sharpsl_param.comadj;
ret = gpio_request(TOSA_GPIO_BL_C20MA, "backlight");
if (ret) {
dev_dbg(&data->bl->dev, "Unable to request gpio!\n");
goto err_gpio_bl;
}
ret = gpio_direction_output(TOSA_GPIO_BL_C20MA, 0);
if (ret)
goto err_gpio_dir;
i2c_set_clientdata(client, data);
data->i2c = client;
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = 512 - 1;
data->bl = backlight_device_register("tosa-bl", &client->dev, data,
&bl_ops, &props);
if (IS_ERR(data->bl)) {
ret = PTR_ERR(data->bl);
goto err_reg;
}
data->bl->props.brightness = 69;
data->bl->props.power = FB_BLANK_UNBLANK;
backlight_update_status(data->bl);
return 0;
err_reg:
data->bl = NULL;
err_gpio_dir:
gpio_free(TOSA_GPIO_BL_C20MA);
err_gpio_bl:
kfree(data);
return ret;
}
static int __devexit tosa_bl_remove(struct i2c_client *client)
{
struct tosa_bl_data *data = i2c_get_clientdata(client);
backlight_device_unregister(data->bl);
data->bl = NULL;
gpio_free(TOSA_GPIO_BL_C20MA);
kfree(data);
return 0;
}
#ifdef CONFIG_PM
static int tosa_bl_suspend(struct i2c_client *client, pm_message_t pm)
{
struct tosa_bl_data *data = i2c_get_clientdata(client);
tosa_bl_set_backlight(data, 0);
return 0;
}
static int tosa_bl_resume(struct i2c_client *client)
{
struct tosa_bl_data *data = i2c_get_clientdata(client);
backlight_update_status(data->bl);
return 0;
}
#else
#define tosa_bl_suspend NULL
#define tosa_bl_resume NULL
#endif
static const struct i2c_device_id tosa_bl_id[] = {
{ "tosa-bl", 0 },
{ },
};
static struct i2c_driver tosa_bl_driver = {
.driver = {
.name = "tosa-bl",
.owner = THIS_MODULE,
},
.probe = tosa_bl_probe,
.remove = __devexit_p(tosa_bl_remove),
.suspend = tosa_bl_suspend,
.resume = tosa_bl_resume,
.id_table = tosa_bl_id,
};
module_i2c_driver(tosa_bl_driver);
MODULE_AUTHOR("Dmitry Baryshkov");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("LCD/Backlight control for Sharp SL-6000 PDA");
| gpl-2.0 |
StelixROM/android_kernel_google_msm | drivers/gpio/gpio-mcp23s08.c | 5058 | 16031 | /*
* MCP23S08 SPI/GPIO gpio expander driver
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/spi/mcp23s08.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
/**
* MCP types supported by driver
*/
#define MCP_TYPE_S08 0
#define MCP_TYPE_S17 1
#define MCP_TYPE_008 2
#define MCP_TYPE_017 3
/* Registers are all 8 bits wide.
*
* The mcp23s17 has twice as many bits, and can be configured to work
* with either 16 bit registers or with two adjacent 8 bit banks.
*/
#define MCP_IODIR 0x00 /* init/reset: all ones */
#define MCP_IPOL 0x01
#define MCP_GPINTEN 0x02
#define MCP_DEFVAL 0x03
#define MCP_INTCON 0x04
#define MCP_IOCON 0x05
# define IOCON_SEQOP (1 << 5)
# define IOCON_HAEN (1 << 3)
# define IOCON_ODR (1 << 2)
# define IOCON_INTPOL (1 << 1)
#define MCP_GPPU 0x06
#define MCP_INTF 0x07
#define MCP_INTCAP 0x08
#define MCP_GPIO 0x09
#define MCP_OLAT 0x0a
struct mcp23s08;
struct mcp23s08_ops {
int (*read)(struct mcp23s08 *mcp, unsigned reg);
int (*write)(struct mcp23s08 *mcp, unsigned reg, unsigned val);
int (*read_regs)(struct mcp23s08 *mcp, unsigned reg,
u16 *vals, unsigned n);
};
struct mcp23s08 {
u8 addr;
u16 cache[11];
/* lock protects the cached values */
struct mutex lock;
struct gpio_chip chip;
const struct mcp23s08_ops *ops;
void *data; /* ops specific data */
};
/* A given spi_device can represent up to eight mcp23sxx chips
* sharing the same chipselect but using different addresses
* (e.g. chips #0 and #3 might be populated, but not #1 or $2).
* Driver data holds all the per-chip data.
*/
struct mcp23s08_driver_data {
unsigned ngpio;
struct mcp23s08 *mcp[8];
struct mcp23s08 chip[];
};
/*----------------------------------------------------------------------*/
#ifdef CONFIG_I2C
static int mcp23008_read(struct mcp23s08 *mcp, unsigned reg)
{
return i2c_smbus_read_byte_data(mcp->data, reg);
}
static int mcp23008_write(struct mcp23s08 *mcp, unsigned reg, unsigned val)
{
return i2c_smbus_write_byte_data(mcp->data, reg, val);
}
static int
mcp23008_read_regs(struct mcp23s08 *mcp, unsigned reg, u16 *vals, unsigned n)
{
while (n--) {
int ret = mcp23008_read(mcp, reg++);
if (ret < 0)
return ret;
*vals++ = ret;
}
return 0;
}
static int mcp23017_read(struct mcp23s08 *mcp, unsigned reg)
{
return i2c_smbus_read_word_data(mcp->data, reg << 1);
}
static int mcp23017_write(struct mcp23s08 *mcp, unsigned reg, unsigned val)
{
return i2c_smbus_write_word_data(mcp->data, reg << 1, val);
}
static int
mcp23017_read_regs(struct mcp23s08 *mcp, unsigned reg, u16 *vals, unsigned n)
{
while (n--) {
int ret = mcp23017_read(mcp, reg++);
if (ret < 0)
return ret;
*vals++ = ret;
}
return 0;
}
static const struct mcp23s08_ops mcp23008_ops = {
.read = mcp23008_read,
.write = mcp23008_write,
.read_regs = mcp23008_read_regs,
};
static const struct mcp23s08_ops mcp23017_ops = {
.read = mcp23017_read,
.write = mcp23017_write,
.read_regs = mcp23017_read_regs,
};
#endif /* CONFIG_I2C */
/*----------------------------------------------------------------------*/
#ifdef CONFIG_SPI_MASTER
static int mcp23s08_read(struct mcp23s08 *mcp, unsigned reg)
{
u8 tx[2], rx[1];
int status;
tx[0] = mcp->addr | 0x01;
tx[1] = reg;
status = spi_write_then_read(mcp->data, tx, sizeof tx, rx, sizeof rx);
return (status < 0) ? status : rx[0];
}
static int mcp23s08_write(struct mcp23s08 *mcp, unsigned reg, unsigned val)
{
u8 tx[3];
tx[0] = mcp->addr;
tx[1] = reg;
tx[2] = val;
return spi_write_then_read(mcp->data, tx, sizeof tx, NULL, 0);
}
static int
mcp23s08_read_regs(struct mcp23s08 *mcp, unsigned reg, u16 *vals, unsigned n)
{
u8 tx[2], *tmp;
int status;
if ((n + reg) > sizeof mcp->cache)
return -EINVAL;
tx[0] = mcp->addr | 0x01;
tx[1] = reg;
tmp = (u8 *)vals;
status = spi_write_then_read(mcp->data, tx, sizeof tx, tmp, n);
if (status >= 0) {
while (n--)
vals[n] = tmp[n]; /* expand to 16bit */
}
return status;
}
static int mcp23s17_read(struct mcp23s08 *mcp, unsigned reg)
{
u8 tx[2], rx[2];
int status;
tx[0] = mcp->addr | 0x01;
tx[1] = reg << 1;
status = spi_write_then_read(mcp->data, tx, sizeof tx, rx, sizeof rx);
return (status < 0) ? status : (rx[0] | (rx[1] << 8));
}
static int mcp23s17_write(struct mcp23s08 *mcp, unsigned reg, unsigned val)
{
u8 tx[4];
tx[0] = mcp->addr;
tx[1] = reg << 1;
tx[2] = val;
tx[3] = val >> 8;
return spi_write_then_read(mcp->data, tx, sizeof tx, NULL, 0);
}
static int
mcp23s17_read_regs(struct mcp23s08 *mcp, unsigned reg, u16 *vals, unsigned n)
{
u8 tx[2];
int status;
if ((n + reg) > sizeof mcp->cache)
return -EINVAL;
tx[0] = mcp->addr | 0x01;
tx[1] = reg << 1;
status = spi_write_then_read(mcp->data, tx, sizeof tx,
(u8 *)vals, n * 2);
if (status >= 0) {
while (n--)
vals[n] = __le16_to_cpu((__le16)vals[n]);
}
return status;
}
static const struct mcp23s08_ops mcp23s08_ops = {
.read = mcp23s08_read,
.write = mcp23s08_write,
.read_regs = mcp23s08_read_regs,
};
static const struct mcp23s08_ops mcp23s17_ops = {
.read = mcp23s17_read,
.write = mcp23s17_write,
.read_regs = mcp23s17_read_regs,
};
#endif /* CONFIG_SPI_MASTER */
/*----------------------------------------------------------------------*/
static int mcp23s08_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip);
int status;
mutex_lock(&mcp->lock);
mcp->cache[MCP_IODIR] |= (1 << offset);
status = mcp->ops->write(mcp, MCP_IODIR, mcp->cache[MCP_IODIR]);
mutex_unlock(&mcp->lock);
return status;
}
static int mcp23s08_get(struct gpio_chip *chip, unsigned offset)
{
struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip);
int status;
mutex_lock(&mcp->lock);
/* REVISIT reading this clears any IRQ ... */
status = mcp->ops->read(mcp, MCP_GPIO);
if (status < 0)
status = 0;
else {
mcp->cache[MCP_GPIO] = status;
status = !!(status & (1 << offset));
}
mutex_unlock(&mcp->lock);
return status;
}
static int __mcp23s08_set(struct mcp23s08 *mcp, unsigned mask, int value)
{
unsigned olat = mcp->cache[MCP_OLAT];
if (value)
olat |= mask;
else
olat &= ~mask;
mcp->cache[MCP_OLAT] = olat;
return mcp->ops->write(mcp, MCP_OLAT, olat);
}
static void mcp23s08_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip);
unsigned mask = 1 << offset;
mutex_lock(&mcp->lock);
__mcp23s08_set(mcp, mask, value);
mutex_unlock(&mcp->lock);
}
static int
mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value)
{
struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip);
unsigned mask = 1 << offset;
int status;
mutex_lock(&mcp->lock);
status = __mcp23s08_set(mcp, mask, value);
if (status == 0) {
mcp->cache[MCP_IODIR] &= ~mask;
status = mcp->ops->write(mcp, MCP_IODIR, mcp->cache[MCP_IODIR]);
}
mutex_unlock(&mcp->lock);
return status;
}
/*----------------------------------------------------------------------*/
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
/*
* This shows more info than the generic gpio dump code:
* pullups, deglitching, open drain drive.
*/
static void mcp23s08_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
struct mcp23s08 *mcp;
char bank;
int t;
unsigned mask;
mcp = container_of(chip, struct mcp23s08, chip);
/* NOTE: we only handle one bank for now ... */
bank = '0' + ((mcp->addr >> 1) & 0x7);
mutex_lock(&mcp->lock);
t = mcp->ops->read_regs(mcp, 0, mcp->cache, ARRAY_SIZE(mcp->cache));
if (t < 0) {
seq_printf(s, " I/O ERROR %d\n", t);
goto done;
}
for (t = 0, mask = 1; t < chip->ngpio; t++, mask <<= 1) {
const char *label;
label = gpiochip_is_requested(chip, t);
if (!label)
continue;
seq_printf(s, " gpio-%-3d P%c.%d (%-12s) %s %s %s",
chip->base + t, bank, t, label,
(mcp->cache[MCP_IODIR] & mask) ? "in " : "out",
(mcp->cache[MCP_GPIO] & mask) ? "hi" : "lo",
(mcp->cache[MCP_GPPU] & mask) ? " " : "up");
/* NOTE: ignoring the irq-related registers */
seq_printf(s, "\n");
}
done:
mutex_unlock(&mcp->lock);
}
#else
#define mcp23s08_dbg_show NULL
#endif
/*----------------------------------------------------------------------*/
static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
void *data, unsigned addr,
unsigned type, unsigned base, unsigned pullups)
{
int status;
mutex_init(&mcp->lock);
mcp->data = data;
mcp->addr = addr;
mcp->chip.direction_input = mcp23s08_direction_input;
mcp->chip.get = mcp23s08_get;
mcp->chip.direction_output = mcp23s08_direction_output;
mcp->chip.set = mcp23s08_set;
mcp->chip.dbg_show = mcp23s08_dbg_show;
switch (type) {
#ifdef CONFIG_SPI_MASTER
case MCP_TYPE_S08:
mcp->ops = &mcp23s08_ops;
mcp->chip.ngpio = 8;
mcp->chip.label = "mcp23s08";
break;
case MCP_TYPE_S17:
mcp->ops = &mcp23s17_ops;
mcp->chip.ngpio = 16;
mcp->chip.label = "mcp23s17";
break;
#endif /* CONFIG_SPI_MASTER */
#ifdef CONFIG_I2C
case MCP_TYPE_008:
mcp->ops = &mcp23008_ops;
mcp->chip.ngpio = 8;
mcp->chip.label = "mcp23008";
break;
case MCP_TYPE_017:
mcp->ops = &mcp23017_ops;
mcp->chip.ngpio = 16;
mcp->chip.label = "mcp23017";
break;
#endif /* CONFIG_I2C */
default:
dev_err(dev, "invalid device type (%d)\n", type);
return -EINVAL;
}
mcp->chip.base = base;
mcp->chip.can_sleep = 1;
mcp->chip.dev = dev;
mcp->chip.owner = THIS_MODULE;
/* verify MCP_IOCON.SEQOP = 0, so sequential reads work,
* and MCP_IOCON.HAEN = 1, so we work with all chips.
*/
status = mcp->ops->read(mcp, MCP_IOCON);
if (status < 0)
goto fail;
if ((status & IOCON_SEQOP) || !(status & IOCON_HAEN)) {
/* mcp23s17 has IOCON twice, make sure they are in sync */
status &= ~(IOCON_SEQOP | (IOCON_SEQOP << 8));
status |= IOCON_HAEN | (IOCON_HAEN << 8);
status = mcp->ops->write(mcp, MCP_IOCON, status);
if (status < 0)
goto fail;
}
/* configure ~100K pullups */
status = mcp->ops->write(mcp, MCP_GPPU, pullups);
if (status < 0)
goto fail;
status = mcp->ops->read_regs(mcp, 0, mcp->cache, ARRAY_SIZE(mcp->cache));
if (status < 0)
goto fail;
/* disable inverter on input */
if (mcp->cache[MCP_IPOL] != 0) {
mcp->cache[MCP_IPOL] = 0;
status = mcp->ops->write(mcp, MCP_IPOL, 0);
if (status < 0)
goto fail;
}
/* disable irqs */
if (mcp->cache[MCP_GPINTEN] != 0) {
mcp->cache[MCP_GPINTEN] = 0;
status = mcp->ops->write(mcp, MCP_GPINTEN, 0);
if (status < 0)
goto fail;
}
status = gpiochip_add(&mcp->chip);
fail:
if (status < 0)
dev_dbg(dev, "can't setup chip %d, --> %d\n",
addr, status);
return status;
}
/*----------------------------------------------------------------------*/
#ifdef CONFIG_I2C
static int __devinit mcp230xx_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct mcp23s08_platform_data *pdata;
struct mcp23s08 *mcp;
int status;
pdata = client->dev.platform_data;
if (!pdata || !gpio_is_valid(pdata->base)) {
dev_dbg(&client->dev, "invalid or missing platform data\n");
return -EINVAL;
}
mcp = kzalloc(sizeof *mcp, GFP_KERNEL);
if (!mcp)
return -ENOMEM;
status = mcp23s08_probe_one(mcp, &client->dev, client, client->addr,
id->driver_data, pdata->base,
pdata->chip[0].pullups);
if (status)
goto fail;
i2c_set_clientdata(client, mcp);
return 0;
fail:
kfree(mcp);
return status;
}
static int __devexit mcp230xx_remove(struct i2c_client *client)
{
struct mcp23s08 *mcp = i2c_get_clientdata(client);
int status;
status = gpiochip_remove(&mcp->chip);
if (status == 0)
kfree(mcp);
return status;
}
static const struct i2c_device_id mcp230xx_id[] = {
{ "mcp23008", MCP_TYPE_008 },
{ "mcp23017", MCP_TYPE_017 },
{ },
};
MODULE_DEVICE_TABLE(i2c, mcp230xx_id);
static struct i2c_driver mcp230xx_driver = {
.driver = {
.name = "mcp230xx",
.owner = THIS_MODULE,
},
.probe = mcp230xx_probe,
.remove = __devexit_p(mcp230xx_remove),
.id_table = mcp230xx_id,
};
static int __init mcp23s08_i2c_init(void)
{
return i2c_add_driver(&mcp230xx_driver);
}
static void mcp23s08_i2c_exit(void)
{
i2c_del_driver(&mcp230xx_driver);
}
#else
static int __init mcp23s08_i2c_init(void) { return 0; }
static void mcp23s08_i2c_exit(void) { }
#endif /* CONFIG_I2C */
/*----------------------------------------------------------------------*/
#ifdef CONFIG_SPI_MASTER
static int mcp23s08_probe(struct spi_device *spi)
{
struct mcp23s08_platform_data *pdata;
unsigned addr;
unsigned chips = 0;
struct mcp23s08_driver_data *data;
int status, type;
unsigned base;
type = spi_get_device_id(spi)->driver_data;
pdata = spi->dev.platform_data;
if (!pdata || !gpio_is_valid(pdata->base)) {
dev_dbg(&spi->dev, "invalid or missing platform data\n");
return -EINVAL;
}
for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
if (!pdata->chip[addr].is_present)
continue;
chips++;
if ((type == MCP_TYPE_S08) && (addr > 3)) {
dev_err(&spi->dev,
"mcp23s08 only supports address 0..3\n");
return -EINVAL;
}
}
if (!chips)
return -ENODEV;
data = kzalloc(sizeof *data + chips * sizeof(struct mcp23s08),
GFP_KERNEL);
if (!data)
return -ENOMEM;
spi_set_drvdata(spi, data);
base = pdata->base;
for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
if (!pdata->chip[addr].is_present)
continue;
chips--;
data->mcp[addr] = &data->chip[chips];
status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
0x40 | (addr << 1), type, base,
pdata->chip[addr].pullups);
if (status < 0)
goto fail;
base += (type == MCP_TYPE_S17) ? 16 : 8;
}
data->ngpio = base - pdata->base;
/* NOTE: these chips have a relatively sane IRQ framework, with
* per-signal masking and level/edge triggering. It's not yet
* handled here...
*/
return 0;
fail:
for (addr = 0; addr < ARRAY_SIZE(data->mcp); addr++) {
int tmp;
if (!data->mcp[addr])
continue;
tmp = gpiochip_remove(&data->mcp[addr]->chip);
if (tmp < 0)
dev_err(&spi->dev, "%s --> %d\n", "remove", tmp);
}
kfree(data);
return status;
}
static int mcp23s08_remove(struct spi_device *spi)
{
struct mcp23s08_driver_data *data = spi_get_drvdata(spi);
unsigned addr;
int status = 0;
for (addr = 0; addr < ARRAY_SIZE(data->mcp); addr++) {
int tmp;
if (!data->mcp[addr])
continue;
tmp = gpiochip_remove(&data->mcp[addr]->chip);
if (tmp < 0) {
dev_err(&spi->dev, "%s --> %d\n", "remove", tmp);
status = tmp;
}
}
if (status == 0)
kfree(data);
return status;
}
static const struct spi_device_id mcp23s08_ids[] = {
{ "mcp23s08", MCP_TYPE_S08 },
{ "mcp23s17", MCP_TYPE_S17 },
{ },
};
MODULE_DEVICE_TABLE(spi, mcp23s08_ids);
static struct spi_driver mcp23s08_driver = {
.probe = mcp23s08_probe,
.remove = mcp23s08_remove,
.id_table = mcp23s08_ids,
.driver = {
.name = "mcp23s08",
.owner = THIS_MODULE,
},
};
static int __init mcp23s08_spi_init(void)
{
return spi_register_driver(&mcp23s08_driver);
}
static void mcp23s08_spi_exit(void)
{
spi_unregister_driver(&mcp23s08_driver);
}
#else
static int __init mcp23s08_spi_init(void) { return 0; }
static void mcp23s08_spi_exit(void) { }
#endif /* CONFIG_SPI_MASTER */
/*----------------------------------------------------------------------*/
static int __init mcp23s08_init(void)
{
int ret;
ret = mcp23s08_spi_init();
if (ret)
goto spi_fail;
ret = mcp23s08_i2c_init();
if (ret)
goto i2c_fail;
return 0;
i2c_fail:
mcp23s08_spi_exit();
spi_fail:
return ret;
}
/* register after spi/i2c postcore initcall and before
* subsys initcalls that may rely on these GPIOs
*/
subsys_initcall(mcp23s08_init);
static void __exit mcp23s08_exit(void)
{
mcp23s08_spi_exit();
mcp23s08_i2c_exit();
}
module_exit(mcp23s08_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.