repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
uzairabdulmajeed/uam-kernel | arch/arm/mach-pxa/mxm8x10.c | 4231 | 10774 | /*
* linux/arch/arm/mach-pxa/mxm8x10.c
*
* Support for the Embedian MXM-8x10 Computer on Module
*
* Copyright (C) 2006 Marvell International Ltd.
* Copyright (C) 2009 Embedian Inc.
* Copyright (C) 2009 TMT Services & Supplies (Pty) Ltd.
*
* 2007-09-04: eric miao <eric.y.miao@gmail.com>
* rewrite to align with latest kernel
*
* 2010-01-09: Edwin Peer <epeer@tmtservices.co.za>
* Hennie van der Merwe <hvdmerwe@tmtservices.co.za>
* rework for upstream merge
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/serial_8250.h>
#include <linux/dm9000.h>
#include <linux/gpio.h>
#include <linux/i2c/pxa-i2c.h>
#include <linux/platform_data/mtd-nand-pxa3xx.h>
#include <linux/platform_data/video-pxafb.h>
#include <linux/platform_data/mmc-pxamci.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <mach/pxa320.h>
#include <mach/mxm8x10.h>
#include "devices.h"
#include "generic.h"
/* GPIO pin definition
External device stuff - Leave unconfigured for now...
---------------------
GPIO0 - DREQ (External DMA Request)
GPIO3 - nGCS2 (External Chip Select) Where is nGCS0; nGCS1; nGCS4; nGCS5 ?
GPIO4 - nGCS3
GPIO15 - EXT_GPIO1
GPIO16 - EXT_GPIO2
GPIO17 - EXT_GPIO3
GPIO24 - EXT_GPIO4
GPIO25 - EXT_GPIO5
GPIO26 - EXT_GPIO6
GPIO27 - EXT_GPIO7
GPIO28 - EXT_GPIO8
GPIO29 - EXT_GPIO9
GPIO30 - EXT_GPIO10
GPIO31 - EXT_GPIO11
GPIO57 - EXT_GPIO12
GPIO74 - EXT_IRQ1
GPIO75 - EXT_IRQ2
GPIO76 - EXT_IRQ3
GPIO77 - EXT_IRQ4
GPIO78 - EXT_IRQ5
GPIO79 - EXT_IRQ6
GPIO80 - EXT_IRQ7
GPIO81 - EXT_IRQ8
GPIO87 - VCCIO_PWREN (External Device PWREN)
Dallas 1-Wire - Leave unconfigured for now...
-------------
GPIO0_2 - DS - 1Wire
Ethernet
--------
GPIO1 - DM9000 PWR
GPIO9 - DM9K_nIRQ
GPIO36 - DM9K_RESET
Keypad - Leave unconfigured by for now...
------
GPIO1_2 - KP_DKIN0
GPIO5_2 - KP_MKOUT7
GPIO82 - KP_DKIN1
GPIO85 - KP_DKIN2
GPIO86 - KP_DKIN3
GPIO113 - KP_MKIN0
GPIO114 - KP_MKIN1
GPIO115 - KP_MKIN2
GPIO116 - KP_MKIN3
GPIO117 - KP_MKIN4
GPIO118 - KP_MKIN5
GPIO119 - KP_MKIN6
GPIO120 - KP_MKIN7
GPIO121 - KP_MKOUT0
GPIO122 - KP_MKOUT1
GPIO122 - KP_MKOUT2
GPIO123 - KP_MKOUT3
GPIO124 - KP_MKOUT4
GPIO125 - KP_MKOUT5
GPIO127 - KP_MKOUT6
Data Bus - Leave unconfigured for now...
--------
GPIO2 - nWait (Data Bus)
USB Device
----------
GPIO4_2 - USBD_PULLUP
GPIO10 - UTM_CLK (USB Device UTM Clk)
GPIO49 - USB 2.0 Device UTM_DATA0
GPIO50 - USB 2.0 Device UTM_DATA1
GPIO51 - USB 2.0 Device UTM_DATA2
GPIO52 - USB 2.0 Device UTM_DATA3
GPIO53 - USB 2.0 Device UTM_DATA4
GPIO54 - USB 2.0 Device UTM_DATA5
GPIO55 - USB 2.0 Device UTM_DATA6
GPIO56 - USB 2.0 Device UTM_DATA7
GPIO58 - UTM_RXVALID (USB 2.0 Device)
GPIO59 - UTM_RXACTIVE (USB 2.0 Device)
GPIO60 - UTM_RXERROR
GPIO61 - UTM_OPMODE0
GPIO62 - UTM_OPMODE1
GPIO71 - USBD_INT (USB Device?)
GPIO73 - UTM_TXREADY (USB 2.0 Device)
GPIO83 - UTM_TXVALID (USB 2.0 Device)
GPIO98 - UTM_RESET (USB 2.0 device)
GPIO99 - UTM_XCVR_SELECT
GPIO100 - UTM_TERM_SELECT
GPIO101 - UTM_SUSPENDM_X
GPIO102 - UTM_LINESTATE0
GPIO103 - UTM_LINESTATE1
Card-Bus Interface - Leave unconfigured for now...
------------------
GPIO5 - nPIOR (I/O space output enable)
GPIO6 - nPIOW (I/O space write enable)
GPIO7 - nIOS16 (Input from I/O space telling size of data bus)
GPIO8 - nPWAIT (Input for inserting wait states)
LCD
---
GPIO6_2 - LDD0
GPIO7_2 - LDD1
GPIO8_2 - LDD2
GPIO9_2 - LDD3
GPIO11_2 - LDD5
GPIO12_2 - LDD6
GPIO13_2 - LDD7
GPIO14_2 - VSYNC
GPIO15_2 - HSYNC
GPIO16_2 - VCLK
GPIO17_2 - HCLK
GPIO18_2 - VDEN
GPIO63 - LDD8 (CPU LCD)
GPIO64 - LDD9 (CPU LCD)
GPIO65 - LDD10 (CPU LCD)
GPIO66 - LDD11 (CPU LCD)
GPIO67 - LDD12 (CPU LCD)
GPIO68 - LDD13 (CPU LCD)
GPIO69 - LDD14 (CPU LCD)
GPIO70 - LDD15 (CPU LCD)
GPIO88 - VCCLCD_PWREN (LCD Panel PWREN)
GPIO97 - BACKLIGHT_EN
GPIO104 - LCD_PWREN
PWM - Leave unconfigured for now...
---
GPIO11 - PWM0
GPIO12 - PWM1
GPIO13 - PWM2
GPIO14 - PWM3
SD-CARD
-------
GPIO18 - SDDATA0
GPIO19 - SDDATA1
GPIO20 - SDDATA2
GPIO21 - SDDATA3
GPIO22 - SDCLK
GPIO23 - SDCMD
GPIO72 - SD_WP
GPIO84 - SD_nIRQ_CD (SD-Card)
I2C
---
GPIO32 - I2CSCL
GPIO33 - I2CSDA
AC97
----
GPIO35 - AC97_SDATA_IN
GPIO37 - AC97_SDATA_OUT
GPIO38 - AC97_SYNC
GPIO39 - AC97_BITCLK
GPIO40 - AC97_nRESET
UART1
-----
GPIO41 - UART_RXD1
GPIO42 - UART_TXD1
GPIO43 - UART_CTS1
GPIO44 - UART_DCD1
GPIO45 - UART_DSR1
GPIO46 - UART_nRI1
GPIO47 - UART_DTR1
GPIO48 - UART_RTS1
UART2
-----
GPIO109 - RTS2
GPIO110 - RXD2
GPIO111 - TXD2
GPIO112 - nCTS2
UART3
-----
GPIO105 - nCTS3
GPIO106 - nRTS3
GPIO107 - TXD3
GPIO108 - RXD3
SSP3 - Leave unconfigured for now...
----
GPIO89 - SSP3_CLK
GPIO90 - SSP3_SFRM
GPIO91 - SSP3_TXD
GPIO92 - SSP3_RXD
SSP4
GPIO93 - SSP4_CLK
GPIO94 - SSP4_SFRM
GPIO95 - SSP4_TXD
GPIO96 - SSP4_RXD
*/
static mfp_cfg_t mfp_cfg[] __initdata = {
/* USB */
GPIO10_UTM_CLK,
GPIO49_U2D_PHYDATA_0,
GPIO50_U2D_PHYDATA_1,
GPIO51_U2D_PHYDATA_2,
GPIO52_U2D_PHYDATA_3,
GPIO53_U2D_PHYDATA_4,
GPIO54_U2D_PHYDATA_5,
GPIO55_U2D_PHYDATA_6,
GPIO56_U2D_PHYDATA_7,
GPIO58_UTM_RXVALID,
GPIO59_UTM_RXACTIVE,
GPIO60_U2D_RXERROR,
GPIO61_U2D_OPMODE0,
GPIO62_U2D_OPMODE1,
GPIO71_GPIO, /* USBD_INT */
GPIO73_UTM_TXREADY,
GPIO83_U2D_TXVALID,
GPIO98_U2D_RESET,
GPIO99_U2D_XCVR_SEL,
GPIO100_U2D_TERM_SEL,
GPIO101_U2D_SUSPEND,
GPIO102_UTM_LINESTATE_0,
GPIO103_UTM_LINESTATE_1,
GPIO4_2_GPIO | MFP_PULL_HIGH, /* UTM_PULLUP */
/* DM9000 */
GPIO1_GPIO,
GPIO9_GPIO,
GPIO36_GPIO,
/* AC97 */
GPIO35_AC97_SDATA_IN_0,
GPIO37_AC97_SDATA_OUT,
GPIO38_AC97_SYNC,
GPIO39_AC97_BITCLK,
GPIO40_AC97_nACRESET,
/* UARTS */
GPIO41_UART1_RXD,
GPIO42_UART1_TXD,
GPIO43_UART1_CTS,
GPIO44_UART1_DCD,
GPIO45_UART1_DSR,
GPIO46_UART1_RI,
GPIO47_UART1_DTR,
GPIO48_UART1_RTS,
GPIO109_UART2_RTS,
GPIO110_UART2_RXD,
GPIO111_UART2_TXD,
GPIO112_UART2_CTS,
GPIO105_UART3_CTS,
GPIO106_UART3_RTS,
GPIO107_UART3_TXD,
GPIO108_UART3_RXD,
GPIO78_GPIO,
GPIO79_GPIO,
GPIO80_GPIO,
GPIO81_GPIO,
/* I2C */
GPIO32_I2C_SCL,
GPIO33_I2C_SDA,
/* MMC */
GPIO18_MMC1_DAT0,
GPIO19_MMC1_DAT1,
GPIO20_MMC1_DAT2,
GPIO21_MMC1_DAT3,
GPIO22_MMC1_CLK,
GPIO23_MMC1_CMD,
GPIO72_GPIO | MFP_PULL_HIGH, /* Card Detect */
GPIO84_GPIO | MFP_PULL_LOW, /* Write Protect */
/* IRQ */
GPIO74_GPIO | MFP_LPM_EDGE_RISE, /* EXT_IRQ1 */
GPIO75_GPIO | MFP_LPM_EDGE_RISE, /* EXT_IRQ2 */
GPIO76_GPIO | MFP_LPM_EDGE_RISE, /* EXT_IRQ3 */
GPIO77_GPIO | MFP_LPM_EDGE_RISE, /* EXT_IRQ4 */
GPIO78_GPIO | MFP_LPM_EDGE_RISE, /* EXT_IRQ5 */
GPIO79_GPIO | MFP_LPM_EDGE_RISE, /* EXT_IRQ6 */
GPIO80_GPIO | MFP_LPM_EDGE_RISE, /* EXT_IRQ7 */
GPIO81_GPIO | MFP_LPM_EDGE_RISE /* EXT_IRQ8 */
};
/* MMC/MCI Support */
#if defined(CONFIG_MMC)
static struct pxamci_platform_data mxm_8x10_mci_platform_data = {
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.detect_delay_ms = 10,
.gpio_card_detect = MXM_8X10_SD_nCD,
.gpio_card_ro = MXM_8X10_SD_WP,
.gpio_power = -1
};
void __init mxm_8x10_mmc_init(void)
{
pxa_set_mci_info(&mxm_8x10_mci_platform_data);
}
#endif
/* USB Open Host Controller Interface */
static struct pxaohci_platform_data mxm_8x10_ohci_platform_data = {
.port_mode = PMM_NPS_MODE,
.flags = ENABLE_PORT_ALL
};
void __init mxm_8x10_usb_host_init(void)
{
pxa_set_ohci_info(&mxm_8x10_ohci_platform_data);
}
/* AC97 Sound Support */
static struct platform_device mxm_8x10_ac97_device = {
.name = "pxa2xx-ac97"
};
void __init mxm_8x10_ac97_init(void)
{
platform_device_register(&mxm_8x10_ac97_device);
}
/* NAND flash Support */
#if defined(CONFIG_MTD_NAND_PXA3xx) || defined(CONFIG_MTD_NAND_PXA3xx_MODULE)
#define NAND_BLOCK_SIZE SZ_128K
#define NB(x) (NAND_BLOCK_SIZE * (x))
static struct mtd_partition mxm_8x10_nand_partitions[] = {
[0] = {
.name = "boot",
.size = NB(0x002),
.offset = NB(0x000),
.mask_flags = MTD_WRITEABLE
},
[1] = {
.name = "kernel",
.size = NB(0x010),
.offset = NB(0x002),
.mask_flags = MTD_WRITEABLE
},
[2] = {
.name = "root",
.size = NB(0x36c),
.offset = NB(0x012)
},
[3] = {
.name = "bbt",
.size = NB(0x082),
.offset = NB(0x37e),
.mask_flags = MTD_WRITEABLE
}
};
static struct pxa3xx_nand_platform_data mxm_8x10_nand_info = {
.enable_arbiter = 1,
.keep_config = 1,
.num_cs = 1,
.parts[0] = mxm_8x10_nand_partitions,
.nr_parts[0] = ARRAY_SIZE(mxm_8x10_nand_partitions)
};
static void __init mxm_8x10_nand_init(void)
{
pxa3xx_set_nand_info(&mxm_8x10_nand_info);
}
#else
static inline void mxm_8x10_nand_init(void) {}
#endif /* CONFIG_MTD_NAND_PXA3xx || CONFIG_MTD_NAND_PXA3xx_MODULE */
/* Ethernet support: Davicom DM9000 */
static struct resource dm9k_resources[] = {
[0] = {
.start = MXM_8X10_ETH_PHYS + 0x300,
.end = MXM_8X10_ETH_PHYS + 0x300,
.flags = IORESOURCE_MEM
},
[1] = {
.start = MXM_8X10_ETH_PHYS + 0x308,
.end = MXM_8X10_ETH_PHYS + 0x308,
.flags = IORESOURCE_MEM
},
[2] = {
.start = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO9)),
.end = PXA_GPIO_TO_IRQ(mfp_to_gpio(MFP_PIN_GPIO9)),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE
}
};
static struct dm9000_plat_data dm9k_plat_data = {
.flags = DM9000_PLATF_16BITONLY
};
static struct platform_device dm9k_device = {
.name = "dm9000",
.id = 0,
.num_resources = ARRAY_SIZE(dm9k_resources),
.resource = dm9k_resources,
.dev = {
.platform_data = &dm9k_plat_data
}
};
static void __init mxm_8x10_ethernet_init(void)
{
platform_device_register(&dm9k_device);
}
/* PXA UARTs */
static void __init mxm_8x10_uarts_init(void)
{
pxa_set_ffuart_info(NULL);
pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL);
}
/* I2C and Real Time Clock */
static struct i2c_board_info __initdata mxm_8x10_i2c_devices[] = {
{
I2C_BOARD_INFO("ds1337", 0x68)
}
};
static void __init mxm_8x10_i2c_init(void)
{
i2c_register_board_info(0, mxm_8x10_i2c_devices,
ARRAY_SIZE(mxm_8x10_i2c_devices));
pxa_set_i2c_info(NULL);
}
void __init mxm_8x10_barebones_init(void)
{
pxa3xx_mfp_config(ARRAY_AND_SIZE(mfp_cfg));
mxm_8x10_uarts_init();
mxm_8x10_nand_init();
mxm_8x10_i2c_init();
mxm_8x10_ethernet_init();
}
| gpl-2.0 |
XileForce/Vindicator-S6-Test | drivers/media/pci/ivtv/ivtv-vbi.c | 4743 | 14876 | /*
Vertical Blank Interval support functions
Copyright (C) 2004-2007 Hans Verkuil <hverkuil@xs4all.nl>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "ivtv-driver.h"
#include "ivtv-i2c.h"
#include "ivtv-ioctl.h"
#include "ivtv-queue.h"
#include "ivtv-cards.h"
#include "ivtv-vbi.h"
static void ivtv_set_vps(struct ivtv *itv, int enabled)
{
struct v4l2_sliced_vbi_data data;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return;
data.id = V4L2_SLICED_VPS;
data.field = 0;
data.line = enabled ? 16 : 0;
data.data[2] = itv->vbi.vps_payload.data[0];
data.data[8] = itv->vbi.vps_payload.data[1];
data.data[9] = itv->vbi.vps_payload.data[2];
data.data[10] = itv->vbi.vps_payload.data[3];
data.data[11] = itv->vbi.vps_payload.data[4];
ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static void ivtv_set_cc(struct ivtv *itv, int mode, const struct vbi_cc *cc)
{
struct v4l2_sliced_vbi_data data;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return;
data.id = V4L2_SLICED_CAPTION_525;
data.field = 0;
data.line = (mode & 1) ? 21 : 0;
data.data[0] = cc->odd[0];
data.data[1] = cc->odd[1];
ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
data.field = 1;
data.line = (mode & 2) ? 21 : 0;
data.data[0] = cc->even[0];
data.data[1] = cc->even[1];
ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static void ivtv_set_wss(struct ivtv *itv, int enabled, int mode)
{
struct v4l2_sliced_vbi_data data;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return;
/* When using a 50 Hz system, always turn on the
wide screen signal with 4x3 ratio as the default.
Turning this signal on and off can confuse certain
TVs. As far as I can tell there is no reason not to
transmit this signal. */
if ((itv->std_out & V4L2_STD_625_50) && !enabled) {
enabled = 1;
mode = 0x08; /* 4x3 full format */
}
data.id = V4L2_SLICED_WSS_625;
data.field = 0;
data.line = enabled ? 23 : 0;
data.data[0] = mode & 0xff;
data.data[1] = (mode >> 8) & 0xff;
ivtv_call_hw(itv, IVTV_HW_SAA7127, vbi, s_vbi_data, &data);
}
static int odd_parity(u8 c)
{
c ^= (c >> 4);
c ^= (c >> 2);
c ^= (c >> 1);
return c & 1;
}
static void ivtv_write_vbi_line(struct ivtv *itv,
const struct v4l2_sliced_vbi_data *d,
struct vbi_cc *cc, int *found_cc)
{
struct vbi_info *vi = &itv->vbi;
if (d->id == V4L2_SLICED_CAPTION_525 && d->line == 21) {
if (d->field) {
cc->even[0] = d->data[0];
cc->even[1] = d->data[1];
} else {
cc->odd[0] = d->data[0];
cc->odd[1] = d->data[1];
}
*found_cc = 1;
} else if (d->id == V4L2_SLICED_VPS && d->line == 16 && d->field == 0) {
struct vbi_vps vps;
vps.data[0] = d->data[2];
vps.data[1] = d->data[8];
vps.data[2] = d->data[9];
vps.data[3] = d->data[10];
vps.data[4] = d->data[11];
if (memcmp(&vps, &vi->vps_payload, sizeof(vps))) {
vi->vps_payload = vps;
set_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags);
}
} else if (d->id == V4L2_SLICED_WSS_625 &&
d->line == 23 && d->field == 0) {
int wss = d->data[0] | d->data[1] << 8;
if (vi->wss_payload != wss) {
vi->wss_payload = wss;
set_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags);
}
}
}
static void ivtv_write_vbi_cc_lines(struct ivtv *itv, const struct vbi_cc *cc)
{
struct vbi_info *vi = &itv->vbi;
if (vi->cc_payload_idx < ARRAY_SIZE(vi->cc_payload)) {
memcpy(&vi->cc_payload[vi->cc_payload_idx], cc,
sizeof(struct vbi_cc));
vi->cc_payload_idx++;
set_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
}
}
static void ivtv_write_vbi(struct ivtv *itv,
const struct v4l2_sliced_vbi_data *sliced,
size_t cnt)
{
struct vbi_cc cc = { .odd = { 0x80, 0x80 }, .even = { 0x80, 0x80 } };
int found_cc = 0;
size_t i;
for (i = 0; i < cnt; i++)
ivtv_write_vbi_line(itv, sliced + i, &cc, &found_cc);
if (found_cc)
ivtv_write_vbi_cc_lines(itv, &cc);
}
ssize_t
ivtv_write_vbi_from_user(struct ivtv *itv,
const struct v4l2_sliced_vbi_data __user *sliced,
size_t cnt)
{
struct vbi_cc cc = { .odd = { 0x80, 0x80 }, .even = { 0x80, 0x80 } };
int found_cc = 0;
size_t i;
struct v4l2_sliced_vbi_data d;
ssize_t ret = cnt * sizeof(struct v4l2_sliced_vbi_data);
for (i = 0; i < cnt; i++) {
if (copy_from_user(&d, sliced + i,
sizeof(struct v4l2_sliced_vbi_data))) {
ret = -EFAULT;
break;
}
ivtv_write_vbi_line(itv, &d, &cc, &found_cc);
}
if (found_cc)
ivtv_write_vbi_cc_lines(itv, &cc);
return ret;
}
static void copy_vbi_data(struct ivtv *itv, int lines, u32 pts_stamp)
{
int line = 0;
int i;
u32 linemask[2] = { 0, 0 };
unsigned short size;
static const u8 mpeg_hdr_data[] = {
0x00, 0x00, 0x01, 0xba, 0x44, 0x00, 0x0c, 0x66,
0x24, 0x01, 0x01, 0xd1, 0xd3, 0xfa, 0xff, 0xff,
0x00, 0x00, 0x01, 0xbd, 0x00, 0x1a, 0x84, 0x80,
0x07, 0x21, 0x00, 0x5d, 0x63, 0xa7, 0xff, 0xff
};
const int sd = sizeof(mpeg_hdr_data); /* start of vbi data */
int idx = itv->vbi.frame % IVTV_VBI_FRAMES;
u8 *dst = &itv->vbi.sliced_mpeg_data[idx][0];
for (i = 0; i < lines; i++) {
int f, l;
if (itv->vbi.sliced_data[i].id == 0)
continue;
l = itv->vbi.sliced_data[i].line - 6;
f = itv->vbi.sliced_data[i].field;
if (f)
l += 18;
if (l < 32)
linemask[0] |= (1 << l);
else
linemask[1] |= (1 << (l - 32));
dst[sd + 12 + line * 43] =
ivtv_service2vbi(itv->vbi.sliced_data[i].id);
memcpy(dst + sd + 12 + line * 43 + 1, itv->vbi.sliced_data[i].data, 42);
line++;
}
memcpy(dst, mpeg_hdr_data, sizeof(mpeg_hdr_data));
if (line == 36) {
/* All lines are used, so there is no space for the linemask
(the max size of the VBI data is 36 * 43 + 4 bytes).
So in this case we use the magic number 'ITV0'. */
memcpy(dst + sd, "ITV0", 4);
memmove(dst + sd + 4, dst + sd + 12, line * 43);
size = 4 + ((43 * line + 3) & ~3);
} else {
memcpy(dst + sd, "itv0", 4);
cpu_to_le32s(&linemask[0]);
cpu_to_le32s(&linemask[1]);
memcpy(dst + sd + 4, &linemask[0], 8);
size = 12 + ((43 * line + 3) & ~3);
}
dst[4+16] = (size + 10) >> 8;
dst[5+16] = (size + 10) & 0xff;
dst[9+16] = 0x21 | ((pts_stamp >> 29) & 0x6);
dst[10+16] = (pts_stamp >> 22) & 0xff;
dst[11+16] = 1 | ((pts_stamp >> 14) & 0xff);
dst[12+16] = (pts_stamp >> 7) & 0xff;
dst[13+16] = 1 | ((pts_stamp & 0x7f) << 1);
itv->vbi.sliced_mpeg_size[idx] = sd + size;
}
static int ivtv_convert_ivtv_vbi(struct ivtv *itv, u8 *p)
{
u32 linemask[2];
int i, l, id2;
int line = 0;
if (!memcmp(p, "itv0", 4)) {
memcpy(linemask, p + 4, 8);
p += 12;
} else if (!memcmp(p, "ITV0", 4)) {
linemask[0] = 0xffffffff;
linemask[1] = 0xf;
p += 4;
} else {
/* unknown VBI data, convert to empty VBI frame */
linemask[0] = linemask[1] = 0;
}
for (i = 0; i < 36; i++) {
int err = 0;
if (i < 32 && !(linemask[0] & (1 << i)))
continue;
if (i >= 32 && !(linemask[1] & (1 << (i - 32))))
continue;
id2 = *p & 0xf;
switch (id2) {
case IVTV_SLICED_TYPE_TELETEXT_B:
id2 = V4L2_SLICED_TELETEXT_B;
break;
case IVTV_SLICED_TYPE_CAPTION_525:
id2 = V4L2_SLICED_CAPTION_525;
err = !odd_parity(p[1]) || !odd_parity(p[2]);
break;
case IVTV_SLICED_TYPE_VPS:
id2 = V4L2_SLICED_VPS;
break;
case IVTV_SLICED_TYPE_WSS_625:
id2 = V4L2_SLICED_WSS_625;
break;
default:
id2 = 0;
break;
}
if (err == 0) {
l = (i < 18) ? i + 6 : i - 18 + 6;
itv->vbi.sliced_dec_data[line].line = l;
itv->vbi.sliced_dec_data[line].field = i >= 18;
itv->vbi.sliced_dec_data[line].id = id2;
memcpy(itv->vbi.sliced_dec_data[line].data, p + 1, 42);
line++;
}
p += 43;
}
while (line < 36) {
itv->vbi.sliced_dec_data[line].id = 0;
itv->vbi.sliced_dec_data[line].line = 0;
itv->vbi.sliced_dec_data[line].field = 0;
line++;
}
return line * sizeof(itv->vbi.sliced_dec_data[0]);
}
/* Compress raw VBI format, removes leading SAV codes and surplus space after the
field.
Returns new compressed size. */
static u32 compress_raw_buf(struct ivtv *itv, u8 *buf, u32 size)
{
u32 line_size = itv->vbi.raw_decoder_line_size;
u32 lines = itv->vbi.count;
u8 sav1 = itv->vbi.raw_decoder_sav_odd_field;
u8 sav2 = itv->vbi.raw_decoder_sav_even_field;
u8 *q = buf;
u8 *p;
int i;
for (i = 0; i < lines; i++) {
p = buf + i * line_size;
/* Look for SAV code */
if (p[0] != 0xff || p[1] || p[2] || (p[3] != sav1 && p[3] != sav2)) {
break;
}
memcpy(q, p + 4, line_size - 4);
q += line_size - 4;
}
return lines * (line_size - 4);
}
/* Compressed VBI format, all found sliced blocks put next to one another
Returns new compressed size */
static u32 compress_sliced_buf(struct ivtv *itv, u32 line, u8 *buf, u32 size, u8 sav)
{
u32 line_size = itv->vbi.sliced_decoder_line_size;
struct v4l2_decode_vbi_line vbi;
int i;
unsigned lines = 0;
/* find the first valid line */
for (i = 0; i < size; i++, buf++) {
if (buf[0] == 0xff && !buf[1] && !buf[2] && buf[3] == sav)
break;
}
size -= i;
if (size < line_size) {
return line;
}
for (i = 0; i < size / line_size; i++) {
u8 *p = buf + i * line_size;
/* Look for SAV code */
if (p[0] != 0xff || p[1] || p[2] || p[3] != sav) {
continue;
}
vbi.p = p + 4;
v4l2_subdev_call(itv->sd_video, vbi, decode_vbi_line, &vbi);
if (vbi.type && !(lines & (1 << vbi.line))) {
lines |= 1 << vbi.line;
itv->vbi.sliced_data[line].id = vbi.type;
itv->vbi.sliced_data[line].field = vbi.is_second_field;
itv->vbi.sliced_data[line].line = vbi.line;
memcpy(itv->vbi.sliced_data[line].data, vbi.p, 42);
line++;
}
}
return line;
}
void ivtv_process_vbi_data(struct ivtv *itv, struct ivtv_buffer *buf,
u64 pts_stamp, int streamtype)
{
u8 *p = (u8 *) buf->buf;
u32 size = buf->bytesused;
int y;
/* Raw VBI data */
if (streamtype == IVTV_ENC_STREAM_TYPE_VBI && ivtv_raw_vbi(itv)) {
u8 type;
ivtv_buf_swap(buf);
type = p[3];
size = buf->bytesused = compress_raw_buf(itv, p, size);
/* second field of the frame? */
if (type == itv->vbi.raw_decoder_sav_even_field) {
/* Dirty hack needed for backwards
compatibility of old VBI software. */
p += size - 4;
memcpy(p, &itv->vbi.frame, 4);
itv->vbi.frame++;
}
return;
}
/* Sliced VBI data with data insertion */
if (streamtype == IVTV_ENC_STREAM_TYPE_VBI) {
int lines;
ivtv_buf_swap(buf);
/* first field */
lines = compress_sliced_buf(itv, 0, p, size / 2,
itv->vbi.sliced_decoder_sav_odd_field);
/* second field */
/* experimentation shows that the second half does not always begin
at the exact address. So start a bit earlier (hence 32). */
lines = compress_sliced_buf(itv, lines, p + size / 2 - 32, size / 2 + 32,
itv->vbi.sliced_decoder_sav_even_field);
/* always return at least one empty line */
if (lines == 0) {
itv->vbi.sliced_data[0].id = 0;
itv->vbi.sliced_data[0].line = 0;
itv->vbi.sliced_data[0].field = 0;
lines = 1;
}
buf->bytesused = size = lines * sizeof(itv->vbi.sliced_data[0]);
memcpy(p, &itv->vbi.sliced_data[0], size);
if (itv->vbi.insert_mpeg) {
copy_vbi_data(itv, lines, pts_stamp);
}
itv->vbi.frame++;
return;
}
/* Sliced VBI re-inserted from an MPEG stream */
if (streamtype == IVTV_DEC_STREAM_TYPE_VBI) {
/* If the size is not 4-byte aligned, then the starting address
for the swapping is also shifted. After swapping the data the
real start address of the VBI data is exactly 4 bytes after the
original start. It's a bit fiddly but it works like a charm.
Non-4-byte alignment happens when an lseek is done on the input
mpeg file to a non-4-byte aligned position. So on arrival here
the VBI data is also non-4-byte aligned. */
int offset = size & 3;
int cnt;
if (offset) {
p += 4 - offset;
}
/* Swap Buffer */
for (y = 0; y < size; y += 4) {
swab32s((u32 *)(p + y));
}
cnt = ivtv_convert_ivtv_vbi(itv, p + offset);
memcpy(buf->buf, itv->vbi.sliced_dec_data, cnt);
buf->bytesused = cnt;
ivtv_write_vbi(itv, itv->vbi.sliced_dec_data,
cnt / sizeof(itv->vbi.sliced_dec_data[0]));
return;
}
}
void ivtv_disable_cc(struct ivtv *itv)
{
struct vbi_cc cc = { .odd = { 0x80, 0x80 }, .even = { 0x80, 0x80 } };
clear_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
ivtv_set_cc(itv, 0, &cc);
itv->vbi.cc_payload_idx = 0;
}
void ivtv_vbi_work_handler(struct ivtv *itv)
{
struct vbi_info *vi = &itv->vbi;
struct v4l2_sliced_vbi_data data;
struct vbi_cc cc = { .odd = { 0x80, 0x80 }, .even = { 0x80, 0x80 } };
/* Lock */
if (itv->output_mode == OUT_PASSTHROUGH) {
if (itv->is_50hz) {
data.id = V4L2_SLICED_WSS_625;
data.field = 0;
if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
ivtv_set_wss(itv, 1, data.data[0] & 0xf);
vi->wss_missing_cnt = 0;
} else if (vi->wss_missing_cnt == 4) {
ivtv_set_wss(itv, 1, 0x8); /* 4x3 full format */
} else {
vi->wss_missing_cnt++;
}
}
else {
int mode = 0;
data.id = V4L2_SLICED_CAPTION_525;
data.field = 0;
if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
mode |= 1;
cc.odd[0] = data.data[0];
cc.odd[1] = data.data[1];
}
data.field = 1;
if (v4l2_subdev_call(itv->sd_video, vbi, g_vbi_data, &data) == 0) {
mode |= 2;
cc.even[0] = data.data[0];
cc.even[1] = data.data[1];
}
if (mode) {
vi->cc_missing_cnt = 0;
ivtv_set_cc(itv, mode, &cc);
} else if (vi->cc_missing_cnt == 4) {
ivtv_set_cc(itv, 0, &cc);
} else {
vi->cc_missing_cnt++;
}
}
return;
}
if (test_and_clear_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags)) {
ivtv_set_wss(itv, 1, vi->wss_payload & 0xf);
}
if (test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags)) {
if (vi->cc_payload_idx == 0) {
clear_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags);
ivtv_set_cc(itv, 3, &cc);
}
while (vi->cc_payload_idx) {
cc = vi->cc_payload[0];
memmove(vi->cc_payload, vi->cc_payload + 1,
sizeof(vi->cc_payload) - sizeof(vi->cc_payload[0]));
vi->cc_payload_idx--;
if (vi->cc_payload_idx && cc.odd[0] == 0x80 && cc.odd[1] == 0x80)
continue;
ivtv_set_cc(itv, 3, &cc);
break;
}
}
if (test_and_clear_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags)) {
ivtv_set_vps(itv, 1);
}
}
| gpl-2.0 |
g7755725/Fitsugly | drivers/regulator/tps6586x-regulator.c | 4743 | 12257 | /*
* Regulator driver for TI TPS6586x
*
* Copyright (C) 2010 Compulab Ltd.
* Author: Mike Rapoport <mike@compulab.co.il>
*
* Based on da903x
* Copyright (C) 2006-2008 Marvell International Ltd.
* Copyright (C) 2008 Compulab Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/tps6586x.h>
/* supply control and voltage setting */
#define TPS6586X_SUPPLYENA 0x10
#define TPS6586X_SUPPLYENB 0x11
#define TPS6586X_SUPPLYENC 0x12
#define TPS6586X_SUPPLYEND 0x13
#define TPS6586X_SUPPLYENE 0x14
#define TPS6586X_VCC1 0x20
#define TPS6586X_VCC2 0x21
#define TPS6586X_SM1V1 0x23
#define TPS6586X_SM1V2 0x24
#define TPS6586X_SM1SL 0x25
#define TPS6586X_SM0V1 0x26
#define TPS6586X_SM0V2 0x27
#define TPS6586X_SM0SL 0x28
#define TPS6586X_LDO2AV1 0x29
#define TPS6586X_LDO2AV2 0x2A
#define TPS6586X_LDO2BV1 0x2F
#define TPS6586X_LDO2BV2 0x30
#define TPS6586X_LDO4V1 0x32
#define TPS6586X_LDO4V2 0x33
/* converter settings */
#define TPS6586X_SUPPLYV1 0x41
#define TPS6586X_SUPPLYV2 0x42
#define TPS6586X_SUPPLYV3 0x43
#define TPS6586X_SUPPLYV4 0x44
#define TPS6586X_SUPPLYV5 0x45
#define TPS6586X_SUPPLYV6 0x46
#define TPS6586X_SMODE1 0x47
#define TPS6586X_SMODE2 0x48
struct tps6586x_regulator {
struct regulator_desc desc;
int volt_reg;
int volt_shift;
int volt_nbits;
int enable_bit[2];
int enable_reg[2];
int *voltages;
/* for DVM regulators */
int go_reg;
int go_bit;
};
static inline struct device *to_tps6586x_dev(struct regulator_dev *rdev)
{
return rdev_get_dev(rdev)->parent->parent;
}
static int tps6586x_ldo_list_voltage(struct regulator_dev *rdev,
unsigned selector)
{
struct tps6586x_regulator *info = rdev_get_drvdata(rdev);
int rid = rdev_get_id(rdev);
/* LDO0 has minimal voltage 1.2V rather than 1.25V */
if ((rid == TPS6586X_ID_LDO_0) && (selector == 0))
return (info->voltages[0] - 50) * 1000;
return info->voltages[selector] * 1000;
}
static int __tps6586x_ldo_set_voltage(struct device *parent,
struct tps6586x_regulator *ri,
int min_uV, int max_uV,
unsigned *selector)
{
int val, uV;
uint8_t mask;
for (val = 0; val < ri->desc.n_voltages; val++) {
uV = ri->voltages[val] * 1000;
/* LDO0 has minimal voltage 1.2 rather than 1.25 */
if (ri->desc.id == TPS6586X_ID_LDO_0 && val == 0)
uV -= 50 * 1000;
/* use the first in-range value */
if (min_uV <= uV && uV <= max_uV) {
*selector = val;
val <<= ri->volt_shift;
mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
return tps6586x_update(parent, ri->volt_reg, val, mask);
}
}
return -EINVAL;
}
static int tps6586x_ldo_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned *selector)
{
struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
struct device *parent = to_tps6586x_dev(rdev);
return __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV,
selector);
}
static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev)
{
struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
struct device *parent = to_tps6586x_dev(rdev);
uint8_t val, mask;
int ret;
ret = tps6586x_read(parent, ri->volt_reg, &val);
if (ret)
return ret;
mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
val = (val & mask) >> ri->volt_shift;
if (val >= ri->desc.n_voltages)
BUG();
return ri->voltages[val] * 1000;
}
static int tps6586x_dvm_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned *selector)
{
struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
struct device *parent = to_tps6586x_dev(rdev);
int ret;
ret = __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV,
selector);
if (ret)
return ret;
return tps6586x_set_bits(parent, ri->go_reg, 1 << ri->go_bit);
}
static int tps6586x_regulator_enable(struct regulator_dev *rdev)
{
struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
struct device *parent = to_tps6586x_dev(rdev);
return tps6586x_set_bits(parent, ri->enable_reg[0],
1 << ri->enable_bit[0]);
}
static int tps6586x_regulator_disable(struct regulator_dev *rdev)
{
struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
struct device *parent = to_tps6586x_dev(rdev);
return tps6586x_clr_bits(parent, ri->enable_reg[0],
1 << ri->enable_bit[0]);
}
static int tps6586x_regulator_is_enabled(struct regulator_dev *rdev)
{
struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
struct device *parent = to_tps6586x_dev(rdev);
uint8_t reg_val;
int ret;
ret = tps6586x_read(parent, ri->enable_reg[0], ®_val);
if (ret)
return ret;
return !!(reg_val & (1 << ri->enable_bit[0]));
}
static struct regulator_ops tps6586x_regulator_ldo_ops = {
.list_voltage = tps6586x_ldo_list_voltage,
.get_voltage = tps6586x_ldo_get_voltage,
.set_voltage = tps6586x_ldo_set_voltage,
.is_enabled = tps6586x_regulator_is_enabled,
.enable = tps6586x_regulator_enable,
.disable = tps6586x_regulator_disable,
};
static struct regulator_ops tps6586x_regulator_dvm_ops = {
.list_voltage = tps6586x_ldo_list_voltage,
.get_voltage = tps6586x_ldo_get_voltage,
.set_voltage = tps6586x_dvm_set_voltage,
.is_enabled = tps6586x_regulator_is_enabled,
.enable = tps6586x_regulator_enable,
.disable = tps6586x_regulator_disable,
};
static int tps6586x_ldo_voltages[] = {
1250, 1500, 1800, 2500, 2700, 2850, 3100, 3300,
};
static int tps6586x_ldo4_voltages[] = {
1700, 1725, 1750, 1775, 1800, 1825, 1850, 1875,
1900, 1925, 1950, 1975, 2000, 2025, 2050, 2075,
2100, 2125, 2150, 2175, 2200, 2225, 2250, 2275,
2300, 2325, 2350, 2375, 2400, 2425, 2450, 2475,
};
static int tps6586x_sm2_voltages[] = {
3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350,
3400, 3450, 3500, 3550, 3600, 3650, 3700, 3750,
3800, 3850, 3900, 3950, 4000, 4050, 4100, 4150,
4200, 4250, 4300, 4350, 4400, 4450, 4500, 4550,
};
static int tps6586x_dvm_voltages[] = {
725, 750, 775, 800, 825, 850, 875, 900,
925, 950, 975, 1000, 1025, 1050, 1075, 1100,
1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300,
1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500,
};
#define TPS6586X_REGULATOR(_id, vdata, _ops, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1) \
.desc = { \
.name = "REG-" #_id, \
.ops = &tps6586x_regulator_##_ops, \
.type = REGULATOR_VOLTAGE, \
.id = TPS6586X_ID_##_id, \
.n_voltages = ARRAY_SIZE(tps6586x_##vdata##_voltages), \
.owner = THIS_MODULE, \
}, \
.volt_reg = TPS6586X_##vreg, \
.volt_shift = (shift), \
.volt_nbits = (nbits), \
.enable_reg[0] = TPS6586X_SUPPLY##ereg0, \
.enable_bit[0] = (ebit0), \
.enable_reg[1] = TPS6586X_SUPPLY##ereg1, \
.enable_bit[1] = (ebit1), \
.voltages = tps6586x_##vdata##_voltages,
#define TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \
.go_reg = TPS6586X_##goreg, \
.go_bit = (gobit),
#define TPS6586X_LDO(_id, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1) \
{ \
TPS6586X_REGULATOR(_id, vdata, ldo_ops, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1) \
}
#define TPS6586X_DVM(_id, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
{ \
TPS6586X_REGULATOR(_id, vdata, dvm_ops, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1) \
TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \
}
static struct tps6586x_regulator tps6586x_regulator[] = {
TPS6586X_LDO(LDO_0, ldo, SUPPLYV1, 5, 3, ENC, 0, END, 0),
TPS6586X_LDO(LDO_3, ldo, SUPPLYV4, 0, 3, ENC, 2, END, 2),
TPS6586X_LDO(LDO_5, ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6),
TPS6586X_LDO(LDO_6, ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4),
TPS6586X_LDO(LDO_7, ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5),
TPS6586X_LDO(LDO_8, ldo, SUPPLYV2, 5, 3, ENC, 6, END, 6),
TPS6586X_LDO(LDO_9, ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7),
TPS6586X_LDO(LDO_RTC, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7),
TPS6586X_LDO(LDO_1, dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1),
TPS6586X_LDO(SM_2, sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),
TPS6586X_DVM(LDO_2, dvm, LDO2BV1, 0, 5, ENA, 3, ENB, 3, VCC2, 6),
TPS6586X_DVM(LDO_4, ldo4, LDO4V1, 0, 5, ENC, 3, END, 3, VCC1, 6),
TPS6586X_DVM(SM_0, dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, VCC1, 2),
TPS6586X_DVM(SM_1, dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, VCC1, 0),
};
/*
* TPS6586X has 2 enable bits that are OR'ed to determine the actual
* regulator state. Clearing one of this bits allows switching
* regulator on and of with single register write.
*/
static inline int tps6586x_regulator_preinit(struct device *parent,
struct tps6586x_regulator *ri)
{
uint8_t val1, val2;
int ret;
if (ri->enable_reg[0] == ri->enable_reg[1] &&
ri->enable_bit[0] == ri->enable_bit[1])
return 0;
ret = tps6586x_read(parent, ri->enable_reg[0], &val1);
if (ret)
return ret;
ret = tps6586x_read(parent, ri->enable_reg[1], &val2);
if (ret)
return ret;
if (!(val2 & (1 << ri->enable_bit[1])))
return 0;
/*
* The regulator is on, but it's enabled with the bit we don't
* want to use, so we switch the enable bits
*/
if (!(val1 & (1 << ri->enable_bit[0]))) {
ret = tps6586x_set_bits(parent, ri->enable_reg[0],
1 << ri->enable_bit[0]);
if (ret)
return ret;
}
return tps6586x_clr_bits(parent, ri->enable_reg[1],
1 << ri->enable_bit[1]);
}
static int tps6586x_regulator_set_slew_rate(struct platform_device *pdev)
{
struct device *parent = pdev->dev.parent;
struct regulator_init_data *p = pdev->dev.platform_data;
struct tps6586x_settings *setting = p->driver_data;
uint8_t reg;
if (setting == NULL)
return 0;
if (!(setting->slew_rate & TPS6586X_SLEW_RATE_SET))
return 0;
/* only SM0 and SM1 can have the slew rate settings */
switch (pdev->id) {
case TPS6586X_ID_SM_0:
reg = TPS6586X_SM0SL;
break;
case TPS6586X_ID_SM_1:
reg = TPS6586X_SM1SL;
break;
default:
dev_warn(&pdev->dev, "Only SM0/SM1 can set slew rate\n");
return -EINVAL;
}
return tps6586x_write(parent, reg,
setting->slew_rate & TPS6586X_SLEW_RATE_MASK);
}
static inline struct tps6586x_regulator *find_regulator_info(int id)
{
struct tps6586x_regulator *ri;
int i;
for (i = 0; i < ARRAY_SIZE(tps6586x_regulator); i++) {
ri = &tps6586x_regulator[i];
if (ri->desc.id == id)
return ri;
}
return NULL;
}
static int __devinit tps6586x_regulator_probe(struct platform_device *pdev)
{
struct tps6586x_regulator *ri = NULL;
struct regulator_dev *rdev;
int id = pdev->id;
int err;
dev_dbg(&pdev->dev, "Probing regulator %d\n", id);
ri = find_regulator_info(id);
if (ri == NULL) {
dev_err(&pdev->dev, "invalid regulator ID specified\n");
return -EINVAL;
}
err = tps6586x_regulator_preinit(pdev->dev.parent, ri);
if (err)
return err;
rdev = regulator_register(&ri->desc, &pdev->dev,
pdev->dev.platform_data, ri, NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
return PTR_ERR(rdev);
}
platform_set_drvdata(pdev, rdev);
return tps6586x_regulator_set_slew_rate(pdev);
}
static int __devexit tps6586x_regulator_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
regulator_unregister(rdev);
return 0;
}
static struct platform_driver tps6586x_regulator_driver = {
.driver = {
.name = "tps6586x-regulator",
.owner = THIS_MODULE,
},
.probe = tps6586x_regulator_probe,
.remove = __devexit_p(tps6586x_regulator_remove),
};
static int __init tps6586x_regulator_init(void)
{
return platform_driver_register(&tps6586x_regulator_driver);
}
subsys_initcall(tps6586x_regulator_init);
static void __exit tps6586x_regulator_exit(void)
{
platform_driver_unregister(&tps6586x_regulator_driver);
}
module_exit(tps6586x_regulator_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
MODULE_DESCRIPTION("Regulator Driver for TI TPS6586X PMIC");
MODULE_ALIAS("platform:tps6586x-regulator");
| gpl-2.0 |
Frontier314/frontkernel35 | fs/reiserfs/prints.c | 7303 | 21190 | /*
* Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
*/
#include <linux/time.h>
#include <linux/fs.h>
#include "reiserfs.h"
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <stdarg.h>
static char error_buf[1024];
static char fmt_buf[1024];
static char off_buf[80];
static char *reiserfs_cpu_offset(struct cpu_key *key)
{
if (cpu_key_k_type(key) == TYPE_DIRENTRY)
sprintf(off_buf, "%Lu(%Lu)",
(unsigned long long)
GET_HASH_VALUE(cpu_key_k_offset(key)),
(unsigned long long)
GET_GENERATION_NUMBER(cpu_key_k_offset(key)));
else
sprintf(off_buf, "0x%Lx",
(unsigned long long)cpu_key_k_offset(key));
return off_buf;
}
static char *le_offset(struct reiserfs_key *key)
{
int version;
version = le_key_version(key);
if (le_key_k_type(version, key) == TYPE_DIRENTRY)
sprintf(off_buf, "%Lu(%Lu)",
(unsigned long long)
GET_HASH_VALUE(le_key_k_offset(version, key)),
(unsigned long long)
GET_GENERATION_NUMBER(le_key_k_offset(version, key)));
else
sprintf(off_buf, "0x%Lx",
(unsigned long long)le_key_k_offset(version, key));
return off_buf;
}
static char *cpu_type(struct cpu_key *key)
{
if (cpu_key_k_type(key) == TYPE_STAT_DATA)
return "SD";
if (cpu_key_k_type(key) == TYPE_DIRENTRY)
return "DIR";
if (cpu_key_k_type(key) == TYPE_DIRECT)
return "DIRECT";
if (cpu_key_k_type(key) == TYPE_INDIRECT)
return "IND";
return "UNKNOWN";
}
static char *le_type(struct reiserfs_key *key)
{
int version;
version = le_key_version(key);
if (le_key_k_type(version, key) == TYPE_STAT_DATA)
return "SD";
if (le_key_k_type(version, key) == TYPE_DIRENTRY)
return "DIR";
if (le_key_k_type(version, key) == TYPE_DIRECT)
return "DIRECT";
if (le_key_k_type(version, key) == TYPE_INDIRECT)
return "IND";
return "UNKNOWN";
}
/* %k */
static void sprintf_le_key(char *buf, struct reiserfs_key *key)
{
if (key)
sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id),
le32_to_cpu(key->k_objectid), le_offset(key),
le_type(key));
else
sprintf(buf, "[NULL]");
}
/* %K */
static void sprintf_cpu_key(char *buf, struct cpu_key *key)
{
if (key)
sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id,
key->on_disk_key.k_objectid, reiserfs_cpu_offset(key),
cpu_type(key));
else
sprintf(buf, "[NULL]");
}
static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh)
{
if (deh)
sprintf(buf,
"[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
deh_offset(deh), deh_dir_id(deh), deh_objectid(deh),
deh_location(deh), deh_state(deh));
else
sprintf(buf, "[NULL]");
}
static void sprintf_item_head(char *buf, struct item_head *ih)
{
if (ih) {
strcpy(buf,
(ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*");
sprintf_le_key(buf + strlen(buf), &(ih->ih_key));
sprintf(buf + strlen(buf), ", item_len %d, item_location %d, "
"free_space(entry_count) %d",
ih_item_len(ih), ih_location(ih), ih_free_space(ih));
} else
sprintf(buf, "[NULL]");
}
static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de)
{
char name[20];
memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen);
name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0;
sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid);
}
static void sprintf_block_head(char *buf, struct buffer_head *bh)
{
sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ",
B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
}
static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
{
char b[BDEVNAME_SIZE];
sprintf(buf,
"dev %s, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
bdevname(bh->b_bdev, b), bh->b_size,
(unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
bh->b_state, bh->b_page,
buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
buffer_dirty(bh) ? "DIRTY" : "CLEAN",
buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
}
static void sprintf_disk_child(char *buf, struct disk_child *dc)
{
sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc),
dc_size(dc));
}
static char *is_there_reiserfs_struct(char *fmt, int *what)
{
char *k = fmt;
while ((k = strchr(k, '%')) != NULL) {
if (k[1] == 'k' || k[1] == 'K' || k[1] == 'h' || k[1] == 't' ||
k[1] == 'z' || k[1] == 'b' || k[1] == 'y' || k[1] == 'a') {
*what = k[1];
break;
}
k++;
}
return k;
}
/* debugging reiserfs we used to print out a lot of different
variables, like keys, item headers, buffer heads etc. Values of
most fields matter. So it took a long time just to write
appropriative printk. With this reiserfs_warning you can use format
specification for complex structures like you used to do with
printfs for integers, doubles and pointers. For instance, to print
out key structure you have to write just:
reiserfs_warning ("bad key %k", key);
instead of
printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
key->k_offset, key->k_uniqueness);
*/
static DEFINE_SPINLOCK(error_lock);
static void prepare_error_buf(const char *fmt, va_list args)
{
char *fmt1 = fmt_buf;
char *k;
char *p = error_buf;
int what;
spin_lock(&error_lock);
strcpy(fmt1, fmt);
while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) {
*k = 0;
p += vsprintf(p, fmt1, args);
switch (what) {
case 'k':
sprintf_le_key(p, va_arg(args, struct reiserfs_key *));
break;
case 'K':
sprintf_cpu_key(p, va_arg(args, struct cpu_key *));
break;
case 'h':
sprintf_item_head(p, va_arg(args, struct item_head *));
break;
case 't':
sprintf_direntry(p,
va_arg(args,
struct reiserfs_dir_entry *));
break;
case 'y':
sprintf_disk_child(p,
va_arg(args, struct disk_child *));
break;
case 'z':
sprintf_block_head(p,
va_arg(args, struct buffer_head *));
break;
case 'b':
sprintf_buffer_head(p,
va_arg(args, struct buffer_head *));
break;
case 'a':
sprintf_de_head(p,
va_arg(args,
struct reiserfs_de_head *));
break;
}
p += strlen(p);
fmt1 = k + 2;
}
vsprintf(p, fmt1, args);
spin_unlock(&error_lock);
}
/* in addition to usual conversion specifiers this accepts reiserfs
specific conversion specifiers:
%k to print little endian key,
%K to print cpu key,
%h to print item_head,
%t to print directory entry
%z to print block head (arg must be struct buffer_head *
%b to print buffer_head
*/
#define do_reiserfs_warning(fmt)\
{\
va_list args;\
va_start( args, fmt );\
prepare_error_buf( fmt, args );\
va_end( args );\
}
void __reiserfs_warning(struct super_block *sb, const char *id,
const char *function, const char *fmt, ...)
{
do_reiserfs_warning(fmt);
if (sb)
printk(KERN_WARNING "REISERFS warning (device %s): %s%s%s: "
"%s\n", sb->s_id, id ? id : "", id ? " " : "",
function, error_buf);
else
printk(KERN_WARNING "REISERFS warning: %s%s%s: %s\n",
id ? id : "", id ? " " : "", function, error_buf);
}
/* No newline.. reiserfs_info calls can be followed by printk's */
void reiserfs_info(struct super_block *sb, const char *fmt, ...)
{
do_reiserfs_warning(fmt);
if (sb)
printk(KERN_NOTICE "REISERFS (device %s): %s",
sb->s_id, error_buf);
else
printk(KERN_NOTICE "REISERFS %s:", error_buf);
}
/* No newline.. reiserfs_printk calls can be followed by printk's */
static void reiserfs_printk(const char *fmt, ...)
{
do_reiserfs_warning(fmt);
printk(error_buf);
}
void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...)
{
#ifdef CONFIG_REISERFS_CHECK
do_reiserfs_warning(fmt);
if (s)
printk(KERN_DEBUG "REISERFS debug (device %s): %s\n",
s->s_id, error_buf);
else
printk(KERN_DEBUG "REISERFS debug: %s\n", error_buf);
#endif
}
/* The format:
maintainer-errorid: [function-name:] message
where errorid is unique to the maintainer and function-name is
optional, is recommended, so that anyone can easily find the bug
with a simple grep for the short to type string
maintainer-errorid. Don't bother with reusing errorids, there are
lots of numbers out there.
Example:
reiserfs_panic(
p_sb, "reiser-29: reiserfs_new_blocknrs: "
"one of search_start or rn(%d) is equal to MAX_B_NUM,"
"which means that we are optimizing location based on the bogus location of a temp buffer (%p).",
rn, bh
);
Regular panic()s sometimes clear the screen before the message can
be read, thus the need for the while loop.
Numbering scheme for panic used by Vladimir and Anatoly( Hans completely ignores this scheme, and considers it
pointless complexity):
panics in reiserfs.h have numbers from 1000 to 1999
super.c 2000 to 2999
preserve.c (unused) 3000 to 3999
bitmap.c 4000 to 4999
stree.c 5000 to 5999
prints.c 6000 to 6999
namei.c 7000 to 7999
fix_nodes.c 8000 to 8999
dir.c 9000 to 9999
lbalance.c 10000 to 10999
ibalance.c 11000 to 11999 not ready
do_balan.c 12000 to 12999
inode.c 13000 to 13999
file.c 14000 to 14999
objectid.c 15000 - 15999
buffer.c 16000 - 16999
symlink.c 17000 - 17999
. */
void __reiserfs_panic(struct super_block *sb, const char *id,
const char *function, const char *fmt, ...)
{
do_reiserfs_warning(fmt);
#ifdef CONFIG_REISERFS_CHECK
dump_stack();
#endif
if (sb)
panic(KERN_WARNING "REISERFS panic (device %s): %s%s%s: %s\n",
sb->s_id, id ? id : "", id ? " " : "",
function, error_buf);
else
panic(KERN_WARNING "REISERFS panic: %s%s%s: %s\n",
id ? id : "", id ? " " : "", function, error_buf);
}
void __reiserfs_error(struct super_block *sb, const char *id,
const char *function, const char *fmt, ...)
{
do_reiserfs_warning(fmt);
BUG_ON(sb == NULL);
if (reiserfs_error_panic(sb))
__reiserfs_panic(sb, id, function, error_buf);
if (id && id[0])
printk(KERN_CRIT "REISERFS error (device %s): %s %s: %s\n",
sb->s_id, id, function, error_buf);
else
printk(KERN_CRIT "REISERFS error (device %s): %s: %s\n",
sb->s_id, function, error_buf);
if (sb->s_flags & MS_RDONLY)
return;
reiserfs_info(sb, "Remounting filesystem read-only\n");
sb->s_flags |= MS_RDONLY;
reiserfs_abort_journal(sb, -EIO);
}
void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...)
{
do_reiserfs_warning(fmt);
if (reiserfs_error_panic(sb)) {
panic(KERN_CRIT "REISERFS panic (device %s): %s\n", sb->s_id,
error_buf);
}
if (reiserfs_is_journal_aborted(SB_JOURNAL(sb)))
return;
printk(KERN_CRIT "REISERFS abort (device %s): %s\n", sb->s_id,
error_buf);
sb->s_flags |= MS_RDONLY;
reiserfs_abort_journal(sb, errno);
}
/* this prints internal nodes (4 keys/items in line) (dc_number,
dc_size)[k_dirid, k_objectid, k_offset, k_uniqueness](dc_number,
dc_size)...*/
static int print_internal(struct buffer_head *bh, int first, int last)
{
struct reiserfs_key *key;
struct disk_child *dc;
int i;
int from, to;
if (!B_IS_KEYS_LEVEL(bh))
return 1;
check_internal(bh);
if (first == -1) {
from = 0;
to = B_NR_ITEMS(bh);
} else {
from = first;
to = last < B_NR_ITEMS(bh) ? last : B_NR_ITEMS(bh);
}
reiserfs_printk("INTERNAL NODE (%ld) contains %z\n", bh->b_blocknr, bh);
dc = B_N_CHILD(bh, from);
reiserfs_printk("PTR %d: %y ", from, dc);
for (i = from, key = B_N_PDELIM_KEY(bh, from), dc++; i < to;
i++, key++, dc++) {
reiserfs_printk("KEY %d: %k PTR %d: %y ", i, key, i + 1, dc);
if (i && i % 4 == 0)
printk("\n");
}
printk("\n");
return 0;
}
static int print_leaf(struct buffer_head *bh, int print_mode, int first,
int last)
{
struct block_head *blkh;
struct item_head *ih;
int i, nr;
int from, to;
if (!B_IS_ITEMS_LEVEL(bh))
return 1;
check_leaf(bh);
blkh = B_BLK_HEAD(bh);
ih = B_N_PITEM_HEAD(bh, 0);
nr = blkh_nr_item(blkh);
printk
("\n===================================================================\n");
reiserfs_printk("LEAF NODE (%ld) contains %z\n", bh->b_blocknr, bh);
if (!(print_mode & PRINT_LEAF_ITEMS)) {
reiserfs_printk("FIRST ITEM_KEY: %k, LAST ITEM KEY: %k\n",
&(ih->ih_key), &((ih + nr - 1)->ih_key));
return 0;
}
if (first < 0 || first > nr - 1)
from = 0;
else
from = first;
if (last < 0 || last > nr)
to = nr;
else
to = last;
ih += from;
printk
("-------------------------------------------------------------------------------\n");
printk
("|##| type | key | ilen | free_space | version | loc |\n");
for (i = from; i < to; i++, ih++) {
printk
("-------------------------------------------------------------------------------\n");
reiserfs_printk("|%2d| %h |\n", i, ih);
if (print_mode & PRINT_LEAF_ITEMS)
op_print_item(ih, B_I_PITEM(bh, ih));
}
printk
("===================================================================\n");
return 0;
}
char *reiserfs_hashname(int code)
{
if (code == YURA_HASH)
return "rupasov";
if (code == TEA_HASH)
return "tea";
if (code == R5_HASH)
return "r5";
return "unknown";
}
/* return 1 if this is not super block */
static int print_super_block(struct buffer_head *bh)
{
struct reiserfs_super_block *rs =
(struct reiserfs_super_block *)(bh->b_data);
int skipped, data_blocks;
char *version;
char b[BDEVNAME_SIZE];
if (is_reiserfs_3_5(rs)) {
version = "3.5";
} else if (is_reiserfs_3_6(rs)) {
version = "3.6";
} else if (is_reiserfs_jr(rs)) {
version = ((sb_version(rs) == REISERFS_VERSION_2) ?
"3.6" : "3.5");
} else {
return 1;
}
printk("%s\'s super block is in block %llu\n", bdevname(bh->b_bdev, b),
(unsigned long long)bh->b_blocknr);
printk("Reiserfs version %s\n", version);
printk("Block count %u\n", sb_block_count(rs));
printk("Blocksize %d\n", sb_blocksize(rs));
printk("Free blocks %u\n", sb_free_blocks(rs));
// FIXME: this would be confusing if
// someone stores reiserfs super block in some data block ;)
// skipped = (bh->b_blocknr * bh->b_size) / sb_blocksize(rs);
skipped = bh->b_blocknr;
data_blocks = sb_block_count(rs) - skipped - 1 - sb_bmap_nr(rs) -
(!is_reiserfs_jr(rs) ? sb_jp_journal_size(rs) +
1 : sb_reserved_for_journal(rs)) - sb_free_blocks(rs);
printk
("Busy blocks (skipped %d, bitmaps - %d, journal (or reserved) blocks - %d\n"
"1 super block, %d data blocks\n", skipped, sb_bmap_nr(rs),
(!is_reiserfs_jr(rs) ? (sb_jp_journal_size(rs) + 1) :
sb_reserved_for_journal(rs)), data_blocks);
printk("Root block %u\n", sb_root_block(rs));
printk("Journal block (first) %d\n", sb_jp_journal_1st_block(rs));
printk("Journal dev %d\n", sb_jp_journal_dev(rs));
printk("Journal orig size %d\n", sb_jp_journal_size(rs));
printk("FS state %d\n", sb_fs_state(rs));
printk("Hash function \"%s\"\n",
reiserfs_hashname(sb_hash_function_code(rs)));
printk("Tree height %d\n", sb_tree_height(rs));
return 0;
}
static int print_desc_block(struct buffer_head *bh)
{
struct reiserfs_journal_desc *desc;
if (memcmp(get_journal_desc_magic(bh), JOURNAL_DESC_MAGIC, 8))
return 1;
desc = (struct reiserfs_journal_desc *)(bh->b_data);
printk("Desc block %llu (j_trans_id %d, j_mount_id %d, j_len %d)",
(unsigned long long)bh->b_blocknr, get_desc_trans_id(desc),
get_desc_mount_id(desc), get_desc_trans_len(desc));
return 0;
}
void print_block(struct buffer_head *bh, ...) //int print_mode, int first, int last)
{
va_list args;
int mode, first, last;
if (!bh) {
printk("print_block: buffer is NULL\n");
return;
}
va_start(args, bh);
mode = va_arg(args, int);
first = va_arg(args, int);
last = va_arg(args, int);
if (print_leaf(bh, mode, first, last))
if (print_internal(bh, first, last))
if (print_super_block(bh))
if (print_desc_block(bh))
printk
("Block %llu contains unformatted data\n",
(unsigned long long)bh->b_blocknr);
va_end(args);
}
static char print_tb_buf[2048];
/* this stores initial state of tree balance in the print_tb_buf */
void store_print_tb(struct tree_balance *tb)
{
int h = 0;
int i;
struct buffer_head *tbSh, *tbFh;
if (!tb)
return;
sprintf(print_tb_buf, "\n"
"BALANCING %d\n"
"MODE=%c, ITEM_POS=%d POS_IN_ITEM=%d\n"
"=====================================================================\n"
"* h * S * L * R * F * FL * FR * CFL * CFR *\n",
REISERFS_SB(tb->tb_sb)->s_do_balance,
tb->tb_mode, PATH_LAST_POSITION(tb->tb_path),
tb->tb_path->pos_in_item);
for (h = 0; h < ARRAY_SIZE(tb->insert_size); h++) {
if (PATH_H_PATH_OFFSET(tb->tb_path, h) <=
tb->tb_path->path_length
&& PATH_H_PATH_OFFSET(tb->tb_path,
h) > ILLEGAL_PATH_ELEMENT_OFFSET) {
tbSh = PATH_H_PBUFFER(tb->tb_path, h);
tbFh = PATH_H_PPARENT(tb->tb_path, h);
} else {
tbSh = NULL;
tbFh = NULL;
}
sprintf(print_tb_buf + strlen(print_tb_buf),
"* %d * %3lld(%2d) * %3lld(%2d) * %3lld(%2d) * %5lld * %5lld * %5lld * %5lld * %5lld *\n",
h,
(tbSh) ? (long long)(tbSh->b_blocknr) : (-1LL),
(tbSh) ? atomic_read(&(tbSh->b_count)) : -1,
(tb->L[h]) ? (long long)(tb->L[h]->b_blocknr) : (-1LL),
(tb->L[h]) ? atomic_read(&(tb->L[h]->b_count)) : -1,
(tb->R[h]) ? (long long)(tb->R[h]->b_blocknr) : (-1LL),
(tb->R[h]) ? atomic_read(&(tb->R[h]->b_count)) : -1,
(tbFh) ? (long long)(tbFh->b_blocknr) : (-1LL),
(tb->FL[h]) ? (long long)(tb->FL[h]->
b_blocknr) : (-1LL),
(tb->FR[h]) ? (long long)(tb->FR[h]->
b_blocknr) : (-1LL),
(tb->CFL[h]) ? (long long)(tb->CFL[h]->
b_blocknr) : (-1LL),
(tb->CFR[h]) ? (long long)(tb->CFR[h]->
b_blocknr) : (-1LL));
}
sprintf(print_tb_buf + strlen(print_tb_buf),
"=====================================================================\n"
"* h * size * ln * lb * rn * rb * blkn * s0 * s1 * s1b * s2 * s2b * curb * lk * rk *\n"
"* 0 * %4d * %2d * %2d * %2d * %2d * %4d * %2d * %2d * %3d * %2d * %3d * %4d * %2d * %2d *\n",
tb->insert_size[0], tb->lnum[0], tb->lbytes, tb->rnum[0],
tb->rbytes, tb->blknum[0], tb->s0num, tb->s1num, tb->s1bytes,
tb->s2num, tb->s2bytes, tb->cur_blknum, tb->lkey[0],
tb->rkey[0]);
/* this prints balance parameters for non-leaf levels */
h = 0;
do {
h++;
sprintf(print_tb_buf + strlen(print_tb_buf),
"* %d * %4d * %2d * * %2d * * %2d *\n",
h, tb->insert_size[h], tb->lnum[h], tb->rnum[h],
tb->blknum[h]);
} while (tb->insert_size[h]);
sprintf(print_tb_buf + strlen(print_tb_buf),
"=====================================================================\n"
"FEB list: ");
/* print FEB list (list of buffers in form (bh (b_blocknr, b_count), that will be used for new nodes) */
h = 0;
for (i = 0; i < ARRAY_SIZE(tb->FEB); i++)
sprintf(print_tb_buf + strlen(print_tb_buf),
"%p (%llu %d)%s", tb->FEB[i],
tb->FEB[i] ? (unsigned long long)tb->FEB[i]->
b_blocknr : 0ULL,
tb->FEB[i] ? atomic_read(&(tb->FEB[i]->b_count)) : 0,
(i == ARRAY_SIZE(tb->FEB) - 1) ? "\n" : ", ");
sprintf(print_tb_buf + strlen(print_tb_buf),
"======================== the end ====================================\n");
}
void print_cur_tb(char *mes)
{
printk("%s\n%s", mes, print_tb_buf);
}
static void check_leaf_block_head(struct buffer_head *bh)
{
struct block_head *blkh;
int nr;
blkh = B_BLK_HEAD(bh);
nr = blkh_nr_item(blkh);
if (nr > (bh->b_size - BLKH_SIZE) / IH_SIZE)
reiserfs_panic(NULL, "vs-6010", "invalid item number %z",
bh);
if (blkh_free_space(blkh) > bh->b_size - BLKH_SIZE - IH_SIZE * nr)
reiserfs_panic(NULL, "vs-6020", "invalid free space %z",
bh);
}
static void check_internal_block_head(struct buffer_head *bh)
{
struct block_head *blkh;
blkh = B_BLK_HEAD(bh);
if (!(B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL && B_LEVEL(bh) <= MAX_HEIGHT))
reiserfs_panic(NULL, "vs-6025", "invalid level %z", bh);
if (B_NR_ITEMS(bh) > (bh->b_size - BLKH_SIZE) / IH_SIZE)
reiserfs_panic(NULL, "vs-6030", "invalid item number %z", bh);
if (B_FREE_SPACE(bh) !=
bh->b_size - BLKH_SIZE - KEY_SIZE * B_NR_ITEMS(bh) -
DC_SIZE * (B_NR_ITEMS(bh) + 1))
reiserfs_panic(NULL, "vs-6040", "invalid free space %z", bh);
}
void check_leaf(struct buffer_head *bh)
{
int i;
struct item_head *ih;
if (!bh)
return;
check_leaf_block_head(bh);
for (i = 0, ih = B_N_PITEM_HEAD(bh, 0); i < B_NR_ITEMS(bh); i++, ih++)
op_check_item(ih, B_I_PITEM(bh, ih));
}
void check_internal(struct buffer_head *bh)
{
if (!bh)
return;
check_internal_block_head(bh);
}
void print_statistics(struct super_block *s)
{
/*
printk ("reiserfs_put_super: session statistics: balances %d, fix_nodes %d, \
bmap with search %d, without %d, dir2ind %d, ind2dir %d\n",
REISERFS_SB(s)->s_do_balance, REISERFS_SB(s)->s_fix_nodes,
REISERFS_SB(s)->s_bmaps, REISERFS_SB(s)->s_bmaps_without_search,
REISERFS_SB(s)->s_direct2indirect, REISERFS_SB(s)->s_indirect2direct);
*/
}
| gpl-2.0 |
GeorgeIoak/AM1802-Kernel | arch/arm/plat-versatile/fpga-irq.c | 7815 | 1601 | /*
* Support for Versatile FPGA-based IRQ controllers
*/
#include <linux/irq.h>
#include <linux/io.h>
#include <asm/mach/irq.h>
#include <plat/fpga-irq.h>
#define IRQ_STATUS 0x00
#define IRQ_RAW_STATUS 0x04
#define IRQ_ENABLE_SET 0x08
#define IRQ_ENABLE_CLEAR 0x0c
static void fpga_irq_mask(struct irq_data *d)
{
struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
u32 mask = 1 << (d->irq - f->irq_start);
writel(mask, f->base + IRQ_ENABLE_CLEAR);
}
static void fpga_irq_unmask(struct irq_data *d)
{
struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
u32 mask = 1 << (d->irq - f->irq_start);
writel(mask, f->base + IRQ_ENABLE_SET);
}
static void fpga_irq_handle(unsigned int irq, struct irq_desc *desc)
{
struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
u32 status = readl(f->base + IRQ_STATUS);
if (status == 0) {
do_bad_IRQ(irq, desc);
return;
}
do {
irq = ffs(status) - 1;
status &= ~(1 << irq);
generic_handle_irq(irq + f->irq_start);
} while (status);
}
void __init fpga_irq_init(int parent_irq, u32 valid, struct fpga_irq_data *f)
{
unsigned int i;
f->chip.irq_ack = fpga_irq_mask;
f->chip.irq_mask = fpga_irq_mask;
f->chip.irq_unmask = fpga_irq_unmask;
if (parent_irq != -1) {
irq_set_handler_data(parent_irq, f);
irq_set_chained_handler(parent_irq, fpga_irq_handle);
}
for (i = 0; i < 32; i++) {
if (valid & (1 << i)) {
unsigned int irq = f->irq_start + i;
irq_set_chip_data(irq, f);
irq_set_chip_and_handler(irq, &f->chip,
handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
}
| gpl-2.0 |
MoKee/android_kernel_lge_star | drivers/gpu/drm/ttm/ttm_bo_manager.c | 8327 | 4368 | /**************************************************************************
*
* Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
#include "drm_mm.h"
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/module.h>
/**
* Currently we use a spinlock for the lock, but a mutex *may* be
* more appropriate to reduce scheduling latency if the range manager
* ends up with very fragmented allocation patterns.
*/
struct ttm_range_manager {
struct drm_mm mm;
spinlock_t lock;
};
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm;
struct drm_mm_node *node = NULL;
unsigned long lpfn;
int ret;
lpfn = placement->lpfn;
if (!lpfn)
lpfn = man->size;
do {
ret = drm_mm_pre_get(mm);
if (unlikely(ret))
return ret;
spin_lock(&rman->lock);
node = drm_mm_search_free_in_range(mm,
mem->num_pages, mem->page_alignment,
placement->fpfn, lpfn, 1);
if (unlikely(node == NULL)) {
spin_unlock(&rman->lock);
return 0;
}
node = drm_mm_get_block_atomic_range(node, mem->num_pages,
mem->page_alignment,
placement->fpfn,
lpfn);
spin_unlock(&rman->lock);
} while (node == NULL);
mem->mm_node = node;
mem->start = node->start;
return 0;
}
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
if (mem->mm_node) {
spin_lock(&rman->lock);
drm_mm_put_block(mem->mm_node);
spin_unlock(&rman->lock);
mem->mm_node = NULL;
}
}
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
struct ttm_range_manager *rman;
int ret;
rman = kzalloc(sizeof(*rman), GFP_KERNEL);
if (!rman)
return -ENOMEM;
ret = drm_mm_init(&rman->mm, 0, p_size);
if (ret) {
kfree(rman);
return ret;
}
spin_lock_init(&rman->lock);
man->priv = rman;
return 0;
}
static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm;
spin_lock(&rman->lock);
if (drm_mm_clean(mm)) {
drm_mm_takedown(mm);
spin_unlock(&rman->lock);
kfree(rman);
man->priv = NULL;
return 0;
}
spin_unlock(&rman->lock);
return -EBUSY;
}
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
const char *prefix)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
spin_lock(&rman->lock);
drm_mm_debug_table(&rman->mm, prefix);
spin_unlock(&rman->lock);
}
const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
ttm_bo_man_init,
ttm_bo_man_takedown,
ttm_bo_man_get_node,
ttm_bo_man_put_node,
ttm_bo_man_debug
};
EXPORT_SYMBOL(ttm_bo_manager_func);
| gpl-2.0 |
xenon92/android_kernel_nebula | drivers/staging/rtl8187se/ieee80211/dot11d.c | 9607 | 4965 | //-----------------------------------------------------------------------------
// File:
// Dot11d.c
//
// Description:
// Implement 802.11d.
//
//-----------------------------------------------------------------------------
#include "dot11d.h"
void
Dot11d_Init(struct ieee80211_device *ieee)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
pDot11dInfo->bEnabled = 0;
pDot11dInfo->State = DOT11D_STATE_NONE;
pDot11dInfo->CountryIeLen = 0;
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
RESET_CIE_WATCHDOG(ieee);
printk("Dot11d_Init()\n");
}
//
// Description:
// Reset to the state as we are just entering a regulatory domain.
//
void
Dot11d_Reset(struct ieee80211_device *ieee)
{
u32 i;
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
// Clear old channel map
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
// Set new channel map
for (i=1; i<=11; i++) {
(pDot11dInfo->channel_map)[i] = 1;
}
for (i=12; i<=14; i++) {
(pDot11dInfo->channel_map)[i] = 2;
}
pDot11dInfo->State = DOT11D_STATE_NONE;
pDot11dInfo->CountryIeLen = 0;
RESET_CIE_WATCHDOG(ieee);
//printk("Dot11d_Reset()\n");
}
//
// Description:
// Update country IE from Beacon or Probe Resopnse
// and configure PHY for operation in the regulatory domain.
//
// TODO:
// Configure Tx power.
//
// Assumption:
// 1. IS_DOT11D_ENABLE() is TRUE.
// 2. Input IE is an valid one.
//
void
Dot11d_UpdateCountryIe(
struct ieee80211_device *dev,
u8 * pTaddr,
u16 CoutryIeLen,
u8 * pCoutryIe
)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 i, j, NumTriples, MaxChnlNum;
PCHNL_TXPOWER_TRIPLE pTriple;
if((CoutryIeLen - 3)%3 != 0)
{
printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
Dot11d_Reset(dev);
return;
}
memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
MaxChnlNum = 0;
NumTriples = (CoutryIeLen - 3) / 3; // skip 3-byte country string.
pTriple = (PCHNL_TXPOWER_TRIPLE)(pCoutryIe + 3);
for(i = 0; i < NumTriples; i++)
{
if(MaxChnlNum >= pTriple->FirstChnl)
{ // It is not in a monotonically increasing order, so stop processing.
printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
Dot11d_Reset(dev);
return;
}
if(MAX_CHANNEL_NUMBER < (pTriple->FirstChnl + pTriple->NumChnls))
{ // It is not a valid set of channel id, so stop processing.
printk("Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n");
Dot11d_Reset(dev);
return;
}
for(j = 0 ; j < pTriple->NumChnls; j++)
{
pDot11dInfo->channel_map[pTriple->FirstChnl + j] = 1;
pDot11dInfo->MaxTxPwrDbmList[pTriple->FirstChnl + j] = pTriple->MaxTxPowerInDbm;
MaxChnlNum = pTriple->FirstChnl + j;
}
pTriple = (PCHNL_TXPOWER_TRIPLE)((u8*)pTriple + 3);
}
#if 1
//printk("Dot11d_UpdateCountryIe(): Channel List:\n");
printk("Channel List:");
for(i=1; i<= MAX_CHANNEL_NUMBER; i++)
if(pDot11dInfo->channel_map[i] > 0)
printk(" %d", i);
printk("\n");
#endif
UPDATE_CIE_SRC(dev, pTaddr);
pDot11dInfo->CountryIeLen = CoutryIeLen;
memcpy(pDot11dInfo->CountryIeBuf, pCoutryIe,CoutryIeLen);
pDot11dInfo->State = DOT11D_STATE_LEARNED;
}
u8
DOT11D_GetMaxTxPwrInDbm(
struct ieee80211_device *dev,
u8 Channel
)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 MaxTxPwrInDbm = 255;
if(MAX_CHANNEL_NUMBER < Channel)
{
printk("DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
return MaxTxPwrInDbm;
}
if(pDot11dInfo->channel_map[Channel])
{
MaxTxPwrInDbm = pDot11dInfo->MaxTxPwrDbmList[Channel];
}
return MaxTxPwrInDbm;
}
void
DOT11D_ScanComplete(
struct ieee80211_device * dev
)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
switch(pDot11dInfo->State)
{
case DOT11D_STATE_LEARNED:
pDot11dInfo->State = DOT11D_STATE_DONE;
break;
case DOT11D_STATE_DONE:
if( GET_CIE_WATCHDOG(dev) == 0 )
{ // Reset country IE if previous one is gone.
Dot11d_Reset(dev);
}
break;
case DOT11D_STATE_NONE:
break;
}
}
int IsLegalChannel(
struct ieee80211_device * dev,
u8 channel
)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
if(MAX_CHANNEL_NUMBER < channel)
{
printk("IsLegalChannel(): Invalid Channel\n");
return 0;
}
if(pDot11dInfo->channel_map[channel] > 0)
return 1;
return 0;
}
int ToLegalChannel(
struct ieee80211_device * dev,
u8 channel
)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 default_chn = 0;
u32 i = 0;
for (i=1; i<= MAX_CHANNEL_NUMBER; i++)
{
if(pDot11dInfo->channel_map[i] > 0)
{
default_chn = i;
break;
}
}
if(MAX_CHANNEL_NUMBER < channel)
{
printk("IsLegalChannel(): Invalid Channel\n");
return default_chn;
}
if(pDot11dInfo->channel_map[channel] > 0)
return channel;
return default_chn;
}
| gpl-2.0 |
pierdebeer/AudaxPlus_Kernel | net/sched/em_text.c | 11143 | 3805 | /*
* net/sched/em_text.c Textsearch ematch
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Thomas Graf <tgraf@suug.ch>
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/textsearch.h>
#include <linux/tc_ematch/tc_em_text.h>
#include <net/pkt_cls.h>
struct text_match {
u16 from_offset;
u16 to_offset;
u8 from_layer;
u8 to_layer;
struct ts_config *config;
};
#define EM_TEXT_PRIV(m) ((struct text_match *) (m)->data)
static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m,
struct tcf_pkt_info *info)
{
struct text_match *tm = EM_TEXT_PRIV(m);
int from, to;
struct ts_state state;
from = tcf_get_base_ptr(skb, tm->from_layer) - skb->data;
from += tm->from_offset;
to = tcf_get_base_ptr(skb, tm->to_layer) - skb->data;
to += tm->to_offset;
return skb_find_text(skb, from, to, tm->config, &state) != UINT_MAX;
}
static int em_text_change(struct tcf_proto *tp, void *data, int len,
struct tcf_ematch *m)
{
struct text_match *tm;
struct tcf_em_text *conf = data;
struct ts_config *ts_conf;
int flags = 0;
if (len < sizeof(*conf) || len < (sizeof(*conf) + conf->pattern_len))
return -EINVAL;
if (conf->from_layer > conf->to_layer)
return -EINVAL;
if (conf->from_layer == conf->to_layer &&
conf->from_offset > conf->to_offset)
return -EINVAL;
retry:
ts_conf = textsearch_prepare(conf->algo, (u8 *) conf + sizeof(*conf),
conf->pattern_len, GFP_KERNEL, flags);
if (flags & TS_AUTOLOAD)
rtnl_lock();
if (IS_ERR(ts_conf)) {
if (PTR_ERR(ts_conf) == -ENOENT && !(flags & TS_AUTOLOAD)) {
rtnl_unlock();
flags |= TS_AUTOLOAD;
goto retry;
} else
return PTR_ERR(ts_conf);
} else if (flags & TS_AUTOLOAD) {
textsearch_destroy(ts_conf);
return -EAGAIN;
}
tm = kmalloc(sizeof(*tm), GFP_KERNEL);
if (tm == NULL) {
textsearch_destroy(ts_conf);
return -ENOBUFS;
}
tm->from_offset = conf->from_offset;
tm->to_offset = conf->to_offset;
tm->from_layer = conf->from_layer;
tm->to_layer = conf->to_layer;
tm->config = ts_conf;
m->datalen = sizeof(*tm);
m->data = (unsigned long) tm;
return 0;
}
static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
{
if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config)
textsearch_destroy(EM_TEXT_PRIV(m)->config);
}
static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
{
struct text_match *tm = EM_TEXT_PRIV(m);
struct tcf_em_text conf;
strncpy(conf.algo, tm->config->ops->name, sizeof(conf.algo) - 1);
conf.from_offset = tm->from_offset;
conf.to_offset = tm->to_offset;
conf.from_layer = tm->from_layer;
conf.to_layer = tm->to_layer;
conf.pattern_len = textsearch_get_pattern_len(tm->config);
conf.pad = 0;
if (nla_put_nohdr(skb, sizeof(conf), &conf) < 0)
goto nla_put_failure;
if (nla_append(skb, conf.pattern_len,
textsearch_get_pattern(tm->config)) < 0)
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static struct tcf_ematch_ops em_text_ops = {
.kind = TCF_EM_TEXT,
.change = em_text_change,
.match = em_text_match,
.destroy = em_text_destroy,
.dump = em_text_dump,
.owner = THIS_MODULE,
.link = LIST_HEAD_INIT(em_text_ops.link)
};
static int __init init_em_text(void)
{
return tcf_em_register(&em_text_ops);
}
static void __exit exit_em_text(void)
{
tcf_em_unregister(&em_text_ops);
}
MODULE_LICENSE("GPL");
module_init(init_em_text);
module_exit(exit_em_text);
MODULE_ALIAS_TCF_EMATCH(TCF_EM_TEXT);
| gpl-2.0 |
mythos234/OnePlus2testing | drivers/video/sysimgblt.c | 12167 | 6937 | /*
* Generic 1-bit or 8-bit source to 1-32 bit destination expansion
* for frame buffer located in system RAM with packed pixels of any depth.
*
* Based almost entirely on cfbimgblt.c
*
* Copyright (C) April 2007 Antonino Daplas <adaplas@pol.net>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/fb.h>
#include <asm/types.h>
#define DEBUG
#ifdef DEBUG
#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt,__func__,## args)
#else
#define DPRINTK(fmt, args...)
#endif
static const u32 cfb_tab8_be[] = {
0x00000000,0x000000ff,0x0000ff00,0x0000ffff,
0x00ff0000,0x00ff00ff,0x00ffff00,0x00ffffff,
0xff000000,0xff0000ff,0xff00ff00,0xff00ffff,
0xffff0000,0xffff00ff,0xffffff00,0xffffffff
};
static const u32 cfb_tab8_le[] = {
0x00000000,0xff000000,0x00ff0000,0xffff0000,
0x0000ff00,0xff00ff00,0x00ffff00,0xffffff00,
0x000000ff,0xff0000ff,0x00ff00ff,0xffff00ff,
0x0000ffff,0xff00ffff,0x00ffffff,0xffffffff
};
static const u32 cfb_tab16_be[] = {
0x00000000, 0x0000ffff, 0xffff0000, 0xffffffff
};
static const u32 cfb_tab16_le[] = {
0x00000000, 0xffff0000, 0x0000ffff, 0xffffffff
};
static const u32 cfb_tab32[] = {
0x00000000, 0xffffffff
};
static void color_imageblit(const struct fb_image *image, struct fb_info *p,
void *dst1, u32 start_index, u32 pitch_index)
{
/* Draw the penguin */
u32 *dst, *dst2;
u32 color = 0, val, shift;
int i, n, bpp = p->var.bits_per_pixel;
u32 null_bits = 32 - bpp;
u32 *palette = (u32 *) p->pseudo_palette;
const u8 *src = image->data;
dst2 = dst1;
for (i = image->height; i--; ) {
n = image->width;
dst = dst1;
shift = 0;
val = 0;
if (start_index) {
u32 start_mask = ~(FB_SHIFT_HIGH(p, ~(u32)0,
start_index));
val = *dst & start_mask;
shift = start_index;
}
while (n--) {
if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
p->fix.visual == FB_VISUAL_DIRECTCOLOR )
color = palette[*src];
else
color = *src;
color <<= FB_LEFT_POS(p, bpp);
val |= FB_SHIFT_HIGH(p, color, shift);
if (shift >= null_bits) {
*dst++ = val;
val = (shift == null_bits) ? 0 :
FB_SHIFT_LOW(p, color, 32 - shift);
}
shift += bpp;
shift &= (32 - 1);
src++;
}
if (shift) {
u32 end_mask = FB_SHIFT_HIGH(p, ~(u32)0, shift);
*dst &= end_mask;
*dst |= val;
}
dst1 += p->fix.line_length;
if (pitch_index) {
dst2 += p->fix.line_length;
dst1 = (u8 *)((long)dst2 & ~(sizeof(u32) - 1));
start_index += pitch_index;
start_index &= 32 - 1;
}
}
}
static void slow_imageblit(const struct fb_image *image, struct fb_info *p,
void *dst1, u32 fgcolor, u32 bgcolor,
u32 start_index, u32 pitch_index)
{
u32 shift, color = 0, bpp = p->var.bits_per_pixel;
u32 *dst, *dst2;
u32 val, pitch = p->fix.line_length;
u32 null_bits = 32 - bpp;
u32 spitch = (image->width+7)/8;
const u8 *src = image->data, *s;
u32 i, j, l;
dst2 = dst1;
fgcolor <<= FB_LEFT_POS(p, bpp);
bgcolor <<= FB_LEFT_POS(p, bpp);
for (i = image->height; i--; ) {
shift = val = 0;
l = 8;
j = image->width;
dst = dst1;
s = src;
/* write leading bits */
if (start_index) {
u32 start_mask = ~(FB_SHIFT_HIGH(p, ~(u32)0,
start_index));
val = *dst & start_mask;
shift = start_index;
}
while (j--) {
l--;
color = (*s & (1 << l)) ? fgcolor : bgcolor;
val |= FB_SHIFT_HIGH(p, color, shift);
/* Did the bitshift spill bits to the next long? */
if (shift >= null_bits) {
*dst++ = val;
val = (shift == null_bits) ? 0 :
FB_SHIFT_LOW(p, color, 32 - shift);
}
shift += bpp;
shift &= (32 - 1);
if (!l) { l = 8; s++; };
}
/* write trailing bits */
if (shift) {
u32 end_mask = FB_SHIFT_HIGH(p, ~(u32)0, shift);
*dst &= end_mask;
*dst |= val;
}
dst1 += pitch;
src += spitch;
if (pitch_index) {
dst2 += pitch;
dst1 = (u8 *)((long)dst2 & ~(sizeof(u32) - 1));
start_index += pitch_index;
start_index &= 32 - 1;
}
}
}
/*
* fast_imageblit - optimized monochrome color expansion
*
* Only if: bits_per_pixel == 8, 16, or 32
* image->width is divisible by pixel/dword (ppw);
* fix->line_legth is divisible by 4;
* beginning and end of a scanline is dword aligned
*/
static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
void *dst1, u32 fgcolor, u32 bgcolor)
{
u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel;
u32 ppw = 32/bpp, spitch = (image->width + 7)/8;
u32 bit_mask, end_mask, eorx, shift;
const char *s = image->data, *src;
u32 *dst;
const u32 *tab = NULL;
int i, j, k;
switch (bpp) {
case 8:
tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
break;
case 16:
tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
break;
case 32:
default:
tab = cfb_tab32;
break;
}
for (i = ppw-1; i--; ) {
fgx <<= bpp;
bgx <<= bpp;
fgx |= fgcolor;
bgx |= bgcolor;
}
bit_mask = (1 << ppw) - 1;
eorx = fgx ^ bgx;
k = image->width/ppw;
for (i = image->height; i--; ) {
dst = dst1;
shift = 8;
src = s;
for (j = k; j--; ) {
shift -= ppw;
end_mask = tab[(*src >> shift) & bit_mask];
*dst++ = (end_mask & eorx) ^ bgx;
if (!shift) {
shift = 8;
src++;
}
}
dst1 += p->fix.line_length;
s += spitch;
}
}
void sys_imageblit(struct fb_info *p, const struct fb_image *image)
{
u32 fgcolor, bgcolor, start_index, bitstart, pitch_index = 0;
u32 bpl = sizeof(u32), bpp = p->var.bits_per_pixel;
u32 width = image->width;
u32 dx = image->dx, dy = image->dy;
void *dst1;
if (p->state != FBINFO_STATE_RUNNING)
return;
bitstart = (dy * p->fix.line_length * 8) + (dx * bpp);
start_index = bitstart & (32 - 1);
pitch_index = (p->fix.line_length & (bpl - 1)) * 8;
bitstart /= 8;
bitstart &= ~(bpl - 1);
dst1 = (void __force *)p->screen_base + bitstart;
if (p->fbops->fb_sync)
p->fbops->fb_sync(p);
if (image->depth == 1) {
if (p->fix.visual == FB_VISUAL_TRUECOLOR ||
p->fix.visual == FB_VISUAL_DIRECTCOLOR) {
fgcolor = ((u32*)(p->pseudo_palette))[image->fg_color];
bgcolor = ((u32*)(p->pseudo_palette))[image->bg_color];
} else {
fgcolor = image->fg_color;
bgcolor = image->bg_color;
}
if (32 % bpp == 0 && !start_index && !pitch_index &&
((width & (32/bpp-1)) == 0) &&
bpp >= 8 && bpp <= 32)
fast_imageblit(image, p, dst1, fgcolor, bgcolor);
else
slow_imageblit(image, p, dst1, fgcolor, bgcolor,
start_index, pitch_index);
} else
color_imageblit(image, p, dst1, start_index, pitch_index);
}
EXPORT_SYMBOL(sys_imageblit);
MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>");
MODULE_DESCRIPTION("1-bit/8-bit to 1-32 bit color expansion (sys-to-sys)");
MODULE_LICENSE("GPL");
| gpl-2.0 |
charles1018/kernel_blu_spark | block/blk-flush.c | 136 | 13404 | /*
* Functions to sequence FLUSH and FUA writes.
*
* Copyright (C) 2011 Max Planck Institute for Gravitational Physics
* Copyright (C) 2011 Tejun Heo <tj@kernel.org>
*
* This file is released under the GPLv2.
*
* REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
* optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
* properties and hardware capability.
*
* If a request doesn't have data, only REQ_FLUSH makes sense, which
* indicates a simple flush request. If there is data, REQ_FLUSH indicates
* that the device cache should be flushed before the data is executed, and
* REQ_FUA means that the data must be on non-volatile media on request
* completion.
*
* If the device doesn't have writeback cache, FLUSH and FUA don't make any
* difference. The requests are either completed immediately if there's no
* data or executed as normal requests otherwise.
*
* If the device has writeback cache and supports FUA, REQ_FLUSH is
* translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
*
* If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
* translated to PREFLUSH and REQ_FUA to POSTFLUSH.
*
* The actual execution of flush is double buffered. Whenever a request
* needs to execute PRE or POSTFLUSH, it queues at
* q->flush_queue[q->flush_pending_idx]. Once certain criteria are met, a
* flush is issued and the pending_idx is toggled. When the flush
* completes, all the requests which were pending are proceeded to the next
* step. This allows arbitrary merging of different types of FLUSH/FUA
* requests.
*
* Currently, the following conditions are used to determine when to issue
* flush.
*
* C1. At any given time, only one flush shall be in progress. This makes
* double buffering sufficient.
*
* C2. Flush is deferred if any request is executing DATA of its sequence.
* This avoids issuing separate POSTFLUSHes for requests which shared
* PREFLUSH.
*
* C3. The second condition is ignored if there is a request which has
* waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
* starvation in the unlikely case where there are continuous stream of
* FUA (without FLUSH) requests.
*
* For devices which support FUA, it isn't clear whether C2 (and thus C3)
* is beneficial.
*
* Note that a sequenced FLUSH/FUA request with DATA is completed twice.
* Once while executing DATA and again after the whole sequence is
* complete. The first completion updates the contained bio but doesn't
* finish it so that the bio submitter is notified only after the whole
* sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in
* req_bio_endio().
*
* The above peculiarity requires that each FLUSH/FUA request has only one
* bio attached to it, which is guaranteed as they aren't allowed to be
* merged in the usual way.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/gfp.h>
#include "blk.h"
/* FLUSH/FUA sequences */
enum {
REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
REQ_FSEQ_DONE = (1 << 3),
REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
REQ_FSEQ_POSTFLUSH,
/*
* If flush has been pending longer than the following timeout,
* it's issued even if flush_data requests are still in flight.
*/
FLUSH_PENDING_TIMEOUT = 5 * HZ,
};
static bool blk_kick_flush(struct request_queue *q);
static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
{
unsigned int policy = 0;
if (blk_rq_sectors(rq))
policy |= REQ_FSEQ_DATA;
if (fflags & REQ_FLUSH) {
if (rq->cmd_flags & REQ_FLUSH)
policy |= REQ_FSEQ_PREFLUSH;
if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
policy |= REQ_FSEQ_POSTFLUSH;
}
return policy;
}
static unsigned int blk_flush_cur_seq(struct request *rq)
{
return 1 << ffz(rq->flush.seq);
}
static void blk_flush_restore_request(struct request *rq)
{
/*
* After flush data completion, @rq->bio is %NULL but we need to
* complete the bio again. @rq->biotail is guaranteed to equal the
* original @rq->bio. Restore it.
*/
rq->bio = rq->biotail;
/* make @rq a normal request */
rq->cmd_flags &= ~REQ_FLUSH_SEQ;
rq->end_io = rq->flush.saved_end_io;
}
/**
* blk_flush_complete_seq - complete flush sequence
* @rq: FLUSH/FUA request being sequenced
* @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
* @error: whether an error occurred
*
* @rq just completed @seq part of its flush sequence, record the
* completion and trigger the next step.
*
* CONTEXT:
* spin_lock_irq(q->queue_lock)
*
* RETURNS:
* %true if requests were added to the dispatch queue, %false otherwise.
*/
static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
int error)
{
struct request_queue *q = rq->q;
struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
bool queued = false;
BUG_ON(rq->flush.seq & seq);
rq->flush.seq |= seq;
if (likely(!error))
seq = blk_flush_cur_seq(rq);
else
seq = REQ_FSEQ_DONE;
switch (seq) {
case REQ_FSEQ_PREFLUSH:
case REQ_FSEQ_POSTFLUSH:
/* queue for flush */
if (list_empty(pending))
q->flush_pending_since = jiffies;
list_move_tail(&rq->flush.list, pending);
break;
case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
list_add(&rq->queuelist, &q->queue_head);
queued = true;
break;
case REQ_FSEQ_DONE:
/*
* @rq was previously adjusted by blk_flush_issue() for
* flush sequencing and may already have gone through the
* flush data request completion path. Restore @rq for
* normal completion and end it.
*/
BUG_ON(!list_empty(&rq->queuelist));
list_del_init(&rq->flush.list);
blk_flush_restore_request(rq);
__blk_end_request_all(rq, error);
break;
default:
BUG();
}
return blk_kick_flush(q) | queued;
}
static void flush_end_io(struct request *flush_rq, int error)
{
struct request_queue *q = flush_rq->q;
struct list_head *running = &q->flush_queue[q->flush_running_idx];
bool queued = false;
struct request *rq, *n;
BUG_ON(q->flush_pending_idx == q->flush_running_idx);
/* account completion of the flush request */
q->flush_running_idx ^= 1;
elv_completed_request(q, flush_rq);
/* and push the waiting requests to the next stage */
list_for_each_entry_safe(rq, n, running, flush.list) {
unsigned int seq = blk_flush_cur_seq(rq);
BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
queued |= blk_flush_complete_seq(rq, seq, error);
}
/*
* Kick the queue to avoid stall for two cases:
* 1. Moving a request silently to empty queue_head may stall the
* queue.
* 2. When flush request is running in non-queueable queue, the
* queue is hold. Restart the queue after flush request is finished
* to avoid stall.
* This function is called from request completion path and calling
* directly into request_fn may confuse the driver. Always use
* kblockd.
*/
if (queued || q->flush_queue_delayed)
blk_run_queue_async(q);
q->flush_queue_delayed = 0;
}
/**
* blk_kick_flush - consider issuing flush request
* @q: request_queue being kicked
*
* Flush related states of @q have changed, consider issuing flush request.
* Please read the comment at the top of this file for more info.
*
* CONTEXT:
* spin_lock_irq(q->queue_lock)
*
* RETURNS:
* %true if flush was issued, %false otherwise.
*/
static bool blk_kick_flush(struct request_queue *q)
{
struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
struct request *first_rq =
list_first_entry(pending, struct request, flush.list);
/* C1 described at the top of this file */
if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
return false;
/* C2 and C3 */
if (!list_empty(&q->flush_data_in_flight) &&
time_before(jiffies,
q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
return false;
/*
* Issue flush and toggle pending_idx. This makes pending_idx
* different from running_idx, which means flush is in flight.
*/
blk_rq_init(q, &q->flush_rq);
q->flush_rq.cmd_type = REQ_TYPE_FS;
q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
q->flush_rq.rq_disk = first_rq->rq_disk;
q->flush_rq.end_io = flush_end_io;
q->flush_pending_idx ^= 1;
list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
return true;
}
static void flush_data_end_io(struct request *rq, int error)
{
struct request_queue *q = rq->q;
/*
* After populating an empty queue, kick it to avoid stall. Read
* the comment in flush_end_io().
*/
if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
blk_run_queue_async(q);
}
/**
* blk_insert_flush - insert a new FLUSH/FUA request
* @rq: request to insert
*
* To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
* @rq is being submitted. Analyze what needs to be done and put it on the
* right queue.
*
* CONTEXT:
* spin_lock_irq(q->queue_lock)
*/
void blk_insert_flush(struct request *rq)
{
struct request_queue *q = rq->q;
unsigned int fflags = q->flush_flags; /* may change, cache */
unsigned int policy = blk_flush_policy(fflags, rq);
/*
* @policy now records what operations need to be done. Adjust
* REQ_FLUSH and FUA for the driver.
*/
rq->cmd_flags &= ~REQ_FLUSH;
if (!(fflags & REQ_FUA))
rq->cmd_flags &= ~REQ_FUA;
/*
* An empty flush handed down from a stacking driver may
* translate into nothing if the underlying device does not
* advertise a write-back cache. In this case, simply
* complete the request.
*/
if (!policy) {
__blk_end_bidi_request(rq, 0, 0, 0);
return;
}
BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
/*
* If there's data but flush is not necessary, the request can be
* processed directly without going through flush machinery. Queue
* for normal execution.
*/
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
list_add_tail(&rq->queuelist, &q->queue_head);
return;
}
/*
* @rq should go through flush machinery. Mark it part of flush
* sequence and submit for further processing.
*/
memset(&rq->flush, 0, sizeof(rq->flush));
INIT_LIST_HEAD(&rq->flush.list);
rq->cmd_flags |= REQ_FLUSH_SEQ;
rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
rq->end_io = flush_data_end_io;
blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
}
/**
* blk_abort_flushes - @q is being aborted, abort flush requests
* @q: request_queue being aborted
*
* To be called from elv_abort_queue(). @q is being aborted. Prepare all
* FLUSH/FUA requests for abortion.
*
* CONTEXT:
* spin_lock_irq(q->queue_lock)
*/
void blk_abort_flushes(struct request_queue *q)
{
struct request *rq, *n;
int i;
/*
* Requests in flight for data are already owned by the dispatch
* queue or the device driver. Just restore for normal completion.
*/
list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
list_del_init(&rq->flush.list);
blk_flush_restore_request(rq);
}
/*
* We need to give away requests on flush queues. Restore for
* normal completion and put them on the dispatch queue.
*/
for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
list_for_each_entry_safe(rq, n, &q->flush_queue[i],
flush.list) {
list_del_init(&rq->flush.list);
blk_flush_restore_request(rq);
list_add_tail(&rq->queuelist, &q->queue_head);
}
}
}
static void bio_end_flush(struct bio *bio, int err)
{
if (err)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
if (bio->bi_private)
complete(bio->bi_private);
bio_put(bio);
}
/**
* blkdev_issue_flush - queue a flush
* @bdev: blockdev to issue flush for
* @gfp_mask: memory allocation flags (for bio_alloc)
* @error_sector: error sector
*
* Description:
* Issue a flush for the block device in question. Caller can supply
* room for storing the error offset in case of a flush error, if they
* wish to. If WAIT flag is not passed then caller may check only what
* request was pushed in some internal queue for later handling.
*/
int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
sector_t *error_sector)
{
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q;
struct bio *bio;
int ret = 0;
if (bdev->bd_disk == NULL)
return -ENXIO;
q = bdev_get_queue(bdev);
if (!q)
return -ENXIO;
/*
* some block devices may not have their queue correctly set up here
* (e.g. loop device without a backing file) and so issuing a flush
* here will panic. Ensure there is a request function before issuing
* the flush.
*/
if (!q->make_request_fn)
return -ENXIO;
bio = bio_alloc(gfp_mask, 0);
bio->bi_end_io = bio_end_flush;
bio->bi_bdev = bdev;
bio->bi_private = &wait;
bio_get(bio);
submit_bio(WRITE_FLUSH, bio);
wait_for_completion_io(&wait);
/*
* The driver must store the error location in ->bi_sector, if
* it supports it. For non-stacked drivers, this should be
* copied from blk_rq_pos(rq).
*/
if (error_sector)
*error_sector = bio->bi_sector;
if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;
bio_put(bio);
return ret;
}
EXPORT_SYMBOL(blkdev_issue_flush);
| gpl-2.0 |
rickyzhang82/linux-allwinner | modules/wifi/nano-c047.12/WiFiEngine/ewpa/src/bignum.c | 136 | 5706 | /*
* Big number math
* Copyright (c) 2006, Jouni Malinen <j@w1.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Alternatively, this software may be distributed under the terms of BSD
* license.
*
* See README and COPYING for more details.
*/
#include "includes.h"
#include "common.h"
#include "bignum.h"
#ifdef CONFIG_INTERNAL_LIBTOMMATH
#include "libtommath.c"
#else /* CONFIG_INTERNAL_LIBTOMMATH */
#include <tommath.h>
#endif /* CONFIG_INTERNAL_LIBTOMMATH */
/*
* The current version is just a wrapper for LibTomMath library, so
* struct bignum is just typecast to mp_int.
*/
/**
* bignum_init - Allocate memory for bignum
* Returns: Pointer to allocated bignum or %NULL on failure
*/
struct bignum * bignum_init(void)
{
struct bignum *n = os_zalloc(sizeof(mp_int));
if (n == NULL)
return NULL;
if (mp_init((mp_int *) n) != MP_OKAY) {
os_free(n);
n = NULL;
}
return n;
}
/**
* bignum_deinit - Free bignum
* @n: Bignum from bignum_init()
*/
void bignum_deinit(struct bignum *n)
{
if (n) {
mp_clear((mp_int *) n);
os_free(n);
}
}
/**
* bignum_get_unsigned_bin - Get length of bignum as an unsigned binary buffer
* @n: Bignum from bignum_init()
* Returns: Length of n if written to a binary buffer
*/
size_t bignum_get_unsigned_bin_len(struct bignum *n)
{
return mp_unsigned_bin_size((mp_int *) n);
}
/**
* bignum_get_unsigned_bin - Set binary buffer to unsigned bignum
* @n: Bignum from bignum_init()
* @buf: Buffer for the binary number
* @len: Length of the buffer, can be %NULL if buffer is known to be long
* enough. Set to used buffer length on success if not %NULL.
* Returns: 0 on success, -1 on failure
*/
int bignum_get_unsigned_bin(const struct bignum *n, u8 *buf, size_t *len)
{
size_t need = mp_unsigned_bin_size((mp_int *) n);
if (len && need > *len) {
*len = need;
return -1;
}
if (mp_to_unsigned_bin((mp_int *) n, buf) != MP_OKAY) {
wpa_printf(MSG_DEBUG, "BIGNUM: %s failed", __func__);
return -1;
}
if (len)
*len = need;
return 0;
}
/**
* bignum_set_unsigned_bin - Set bignum based on unsigned binary buffer
* @a: Bignum from bignum_init(); to be set to the given value
* @buf: Buffer with unsigned binary value
* @len: Length of buf in octets
* Returns: 0 on success, -1 on failure
*/
int bignum_set_unsigned_bin(struct bignum *n, const u8 *buf, size_t len)
{
if (mp_read_unsigned_bin((mp_int *) n, (u8 *) buf, len) != MP_OKAY) {
wpa_printf(MSG_DEBUG, "BIGNUM: %s failed", __func__);
return -1;
}
return 0;
}
/**
* bignum_cmp - Signed comparison
* @a: Bignum from bignum_init()
* @b: Bignum from bignum_init()
* Returns: 0 on success, -1 on failure
*/
int bignum_cmp(const struct bignum *a, const struct bignum *b)
{
return mp_cmp((mp_int *) a, (mp_int *) b);
}
/**
* bignum_cmd_d - Compare bignum to standard integer
* @a: Bignum from bignum_init()
* @b: Small integer
* Returns: 0 on success, -1 on failure
*/
int bignum_cmp_d(const struct bignum *a, unsigned long b)
{
return mp_cmp_d((mp_int *) a, b);
}
/**
* bignum_add - c = a + b
* @a: Bignum from bignum_init()
* @b: Bignum from bignum_init()
* @c: Bignum from bignum_init(); used to store the result of a + b
* Returns: 0 on success, -1 on failure
*/
int bignum_add(const struct bignum *a, const struct bignum *b,
struct bignum *c)
{
if (mp_add((mp_int *) a, (mp_int *) b, (mp_int *) c) != MP_OKAY) {
wpa_printf(MSG_DEBUG, "BIGNUM: %s failed", __func__);
return -1;
}
return 0;
}
/**
* bignum_sub - c = a - b
* @a: Bignum from bignum_init()
* @b: Bignum from bignum_init()
* @c: Bignum from bignum_init(); used to store the result of a - b
* Returns: 0 on success, -1 on failure
*/
int bignum_sub(const struct bignum *a, const struct bignum *b,
struct bignum *c)
{
if (mp_sub((mp_int *) a, (mp_int *) b, (mp_int *) c) != MP_OKAY) {
wpa_printf(MSG_DEBUG, "BIGNUM: %s failed", __func__);
return -1;
}
return 0;
}
/**
* bignum_mul - c = a * b
* @a: Bignum from bignum_init()
* @b: Bignum from bignum_init()
* @c: Bignum from bignum_init(); used to store the result of a * b
* Returns: 0 on success, -1 on failure
*/
int bignum_mul(const struct bignum *a, const struct bignum *b,
struct bignum *c)
{
if (mp_mul((mp_int *) a, (mp_int *) b, (mp_int *) c) != MP_OKAY) {
wpa_printf(MSG_DEBUG, "BIGNUM: %s failed", __func__);
return -1;
}
return 0;
}
/**
* bignum_mulmod - d = a * b (mod c)
* @a: Bignum from bignum_init()
* @b: Bignum from bignum_init()
* @c: Bignum from bignum_init(); modulus
* @d: Bignum from bignum_init(); used to store the result of a * b (mod c)
* Returns: 0 on success, -1 on failure
*/
int bignum_mulmod(const struct bignum *a, const struct bignum *b,
const struct bignum *c, struct bignum *d)
{
if (mp_mulmod((mp_int *) a, (mp_int *) b, (mp_int *) c, (mp_int *) d)
!= MP_OKAY) {
wpa_printf(MSG_DEBUG, "BIGNUM: %s failed", __func__);
return -1;
}
return 0;
}
/**
* bignum_exptmod - Modular exponentiation: d = a^b (mod c)
* @a: Bignum from bignum_init(); base
* @b: Bignum from bignum_init(); exponent
* @c: Bignum from bignum_init(); modulus
* @d: Bignum from bignum_init(); used to store the result of a^b (mod c)
* Returns: 0 on success, -1 on failure
*/
int bignum_exptmod(const struct bignum *a, const struct bignum *b,
const struct bignum *c, struct bignum *d)
{
if (mp_exptmod((mp_int *) a, (mp_int *) b, (mp_int *) c, (mp_int *) d)
!= MP_OKAY) {
wpa_printf(MSG_DEBUG, "BIGNUM: %s failed", __func__);
return -1;
}
return 0;
}
| gpl-2.0 |
giorgio130/linux-2.6.35-kobo-multitouch | drivers/usb/misc/usbled.c | 648 | 4516 | /*
* USB LED driver - 1.1
*
* Copyright (C) 2004 Greg Kroah-Hartman (greg@kroah.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
#define DRIVER_AUTHOR "Greg Kroah-Hartman, greg@kroah.com"
#define DRIVER_DESC "USB LED Driver"
#define VENDOR_ID 0x0fc5
#define PRODUCT_ID 0x1223
/* table of devices that work with this driver */
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(VENDOR_ID, PRODUCT_ID) },
{ },
};
MODULE_DEVICE_TABLE (usb, id_table);
struct usb_led {
struct usb_device * udev;
unsigned char blue;
unsigned char red;
unsigned char green;
};
#define BLUE 0x04
#define RED 0x02
#define GREEN 0x01
static void change_color(struct usb_led *led)
{
int retval;
unsigned char color = 0x07;
unsigned char *buffer;
buffer = kmalloc(8, GFP_KERNEL);
if (!buffer) {
dev_err(&led->udev->dev, "out of memory\n");
return;
}
if (led->blue)
color &= ~(BLUE);
if (led->red)
color &= ~(RED);
if (led->green)
color &= ~(GREEN);
dev_dbg(&led->udev->dev,
"blue = %d, red = %d, green = %d, color = %.2x\n",
led->blue, led->red, led->green, color);
retval = usb_control_msg(led->udev,
usb_sndctrlpipe(led->udev, 0),
0x12,
0xc8,
(0x02 * 0x100) + 0x0a,
(0x00 * 0x100) + color,
buffer,
8,
2000);
if (retval)
dev_dbg(&led->udev->dev, "retval = %d\n", retval);
kfree(buffer);
}
#define show_set(value) \
static ssize_t show_##value(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
struct usb_led *led = usb_get_intfdata(intf); \
\
return sprintf(buf, "%d\n", led->value); \
} \
static ssize_t set_##value(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
struct usb_led *led = usb_get_intfdata(intf); \
int temp = simple_strtoul(buf, NULL, 10); \
\
led->value = temp; \
change_color(led); \
return count; \
} \
static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value);
show_set(blue);
show_set(red);
show_set(green);
static int led_probe(struct usb_interface *interface, const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_led *dev = NULL;
int retval = -ENOMEM;
dev = kzalloc(sizeof(struct usb_led), GFP_KERNEL);
if (dev == NULL) {
dev_err(&interface->dev, "Out of memory\n");
goto error_mem;
}
dev->udev = usb_get_dev(udev);
usb_set_intfdata (interface, dev);
retval = device_create_file(&interface->dev, &dev_attr_blue);
if (retval)
goto error;
retval = device_create_file(&interface->dev, &dev_attr_red);
if (retval)
goto error;
retval = device_create_file(&interface->dev, &dev_attr_green);
if (retval)
goto error;
dev_info(&interface->dev, "USB LED device now attached\n");
return 0;
error:
device_remove_file(&interface->dev, &dev_attr_blue);
device_remove_file(&interface->dev, &dev_attr_red);
device_remove_file(&interface->dev, &dev_attr_green);
usb_set_intfdata (interface, NULL);
usb_put_dev(dev->udev);
kfree(dev);
error_mem:
return retval;
}
static void led_disconnect(struct usb_interface *interface)
{
struct usb_led *dev;
dev = usb_get_intfdata (interface);
device_remove_file(&interface->dev, &dev_attr_blue);
device_remove_file(&interface->dev, &dev_attr_red);
device_remove_file(&interface->dev, &dev_attr_green);
/* first remove the files, then set the pointer to NULL */
usb_set_intfdata (interface, NULL);
usb_put_dev(dev->udev);
kfree(dev);
dev_info(&interface->dev, "USB LED now disconnected\n");
}
static struct usb_driver led_driver = {
.name = "usbled",
.probe = led_probe,
.disconnect = led_disconnect,
.id_table = id_table,
};
static int __init usb_led_init(void)
{
int retval = 0;
retval = usb_register(&led_driver);
if (retval)
err("usb_register failed. Error number %d", retval);
return retval;
}
static void __exit usb_led_exit(void)
{
usb_deregister(&led_driver);
}
module_init (usb_led_init);
module_exit (usb_led_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| gpl-2.0 |
yseung123/android_kernel_oneplus_msm8994 | kernel/irq/proc.c | 904 | 12642 | /*
* linux/kernel/irq/proc.c
*
* Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
*
* This file contains the /proc/irq/ handling code.
*/
#include <linux/irq.h>
#include <linux/gfp.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include "internals.h"
static struct proc_dir_entry *root_irq_dir;
#ifdef CONFIG_SMP
static int show_irq_affinity(int type, struct seq_file *m, void *v)
{
struct irq_desc *desc = irq_to_desc((long)m->private);
const struct cpumask *mask = desc->irq_data.affinity;
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (irqd_is_setaffinity_pending(&desc->irq_data))
mask = desc->pending_mask;
#endif
if (type)
seq_cpumask_list(m, mask);
else
seq_cpumask(m, mask);
seq_putc(m, '\n');
return 0;
}
static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
{
struct irq_desc *desc = irq_to_desc((long)m->private);
unsigned long flags;
cpumask_var_t mask;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->affinity_hint)
cpumask_copy(mask, desc->affinity_hint);
raw_spin_unlock_irqrestore(&desc->lock, flags);
seq_cpumask(m, mask);
seq_putc(m, '\n');
free_cpumask_var(mask);
return 0;
}
#ifndef is_affinity_mask_valid
#define is_affinity_mask_valid(val) 1
#endif
int no_irq_affinity;
static int irq_affinity_proc_show(struct seq_file *m, void *v)
{
return show_irq_affinity(0, m, v);
}
static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
{
return show_irq_affinity(1, m, v);
}
static ssize_t write_irq_affinity(int type, struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
unsigned int irq = (int)(long)PDE_DATA(file_inode(file));
cpumask_var_t new_value;
int err;
if (!irq_can_set_affinity(irq) || no_irq_affinity)
return -EIO;
if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
return -ENOMEM;
if (type)
err = cpumask_parselist_user(buffer, count, new_value);
else
err = cpumask_parse_user(buffer, count, new_value);
if (err)
goto free_cpumask;
if (!is_affinity_mask_valid(new_value)) {
err = -EINVAL;
goto free_cpumask;
}
/*
* Do not allow disabling IRQs completely - it's a too easy
* way to make the system unusable accidentally :-) At least
* one online CPU still has to be targeted.
*/
if (!cpumask_intersects(new_value, cpu_online_mask)) {
/* Special case for empty set - allow the architecture
code to set default SMP affinity. */
err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
} else {
irq_set_affinity(irq, new_value);
err = count;
}
free_cpumask:
free_cpumask_var(new_value);
return err;
}
static ssize_t irq_affinity_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
return write_irq_affinity(0, file, buffer, count, pos);
}
static ssize_t irq_affinity_list_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
return write_irq_affinity(1, file, buffer, count, pos);
}
static int irq_affinity_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, irq_affinity_proc_show, PDE_DATA(inode));
}
static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
}
static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, irq_affinity_hint_proc_show, PDE_DATA(inode));
}
static const struct file_operations irq_affinity_proc_fops = {
.open = irq_affinity_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = irq_affinity_proc_write,
};
static const struct file_operations irq_affinity_hint_proc_fops = {
.open = irq_affinity_hint_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations irq_affinity_list_proc_fops = {
.open = irq_affinity_list_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = irq_affinity_list_proc_write,
};
static int default_affinity_show(struct seq_file *m, void *v)
{
seq_cpumask(m, irq_default_affinity);
seq_putc(m, '\n');
return 0;
}
static ssize_t default_affinity_write(struct file *file,
const char __user *buffer, size_t count, loff_t *ppos)
{
cpumask_var_t new_value;
int err;
if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
return -ENOMEM;
err = cpumask_parse_user(buffer, count, new_value);
if (err)
goto out;
if (!is_affinity_mask_valid(new_value)) {
err = -EINVAL;
goto out;
}
/*
* Do not allow disabling IRQs completely - it's a too easy
* way to make the system unusable accidentally :-) At least
* one online CPU still has to be targeted.
*/
if (!cpumask_intersects(new_value, cpu_online_mask)) {
err = -EINVAL;
goto out;
}
cpumask_copy(irq_default_affinity, new_value);
err = count;
out:
free_cpumask_var(new_value);
return err;
}
static int default_affinity_open(struct inode *inode, struct file *file)
{
return single_open(file, default_affinity_show, PDE_DATA(inode));
}
static const struct file_operations default_affinity_proc_fops = {
.open = default_affinity_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = default_affinity_write,
};
static int irq_node_proc_show(struct seq_file *m, void *v)
{
struct irq_desc *desc = irq_to_desc((long) m->private);
seq_printf(m, "%d\n", desc->irq_data.node);
return 0;
}
static int irq_node_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, irq_node_proc_show, PDE_DATA(inode));
}
static const struct file_operations irq_node_proc_fops = {
.open = irq_node_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif
static int irq_spurious_proc_show(struct seq_file *m, void *v)
{
struct irq_desc *desc = irq_to_desc((long) m->private);
seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
desc->irq_count, desc->irqs_unhandled,
jiffies_to_msecs(desc->last_unhandled));
return 0;
}
static int irq_spurious_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, irq_spurious_proc_show, PDE_DATA(inode));
}
static const struct file_operations irq_spurious_proc_fops = {
.open = irq_spurious_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int irq_wake_depth_proc_show(struct seq_file *m, void *v)
{
struct irq_desc *desc = irq_to_desc((long) m->private);
seq_printf(m, "wake_depth %u\n", desc->wake_depth);
return 0;
}
static int irq_wake_depth_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, irq_wake_depth_proc_show, PDE_DATA(inode));
}
static const struct file_operations irq_wake_depth_proc_fops = {
.open = irq_wake_depth_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int irq_disable_depth_proc_show(struct seq_file *m, void *v)
{
struct irq_desc *desc = irq_to_desc((long) m->private);
seq_printf(m, "disable_depth %u\n", desc->depth);
return 0;
}
static int irq_disable_depth_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, irq_disable_depth_proc_show, PDE_DATA(inode));
}
static const struct file_operations irq_disable_depth_proc_fops = {
.open = irq_disable_depth_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#define MAX_NAMELEN 128
static int name_unique(unsigned int irq, struct irqaction *new_action)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
unsigned long flags;
int ret = 1;
raw_spin_lock_irqsave(&desc->lock, flags);
for (action = desc->action ; action; action = action->next) {
if ((action != new_action) && action->name &&
!strcmp(new_action->name, action->name)) {
ret = 0;
break;
}
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
void register_handler_proc(unsigned int irq, struct irqaction *action)
{
char name [MAX_NAMELEN];
struct irq_desc *desc = irq_to_desc(irq);
if (!desc->dir || action->dir || !action->name ||
!name_unique(irq, action))
return;
memset(name, 0, MAX_NAMELEN);
snprintf(name, MAX_NAMELEN, "%s", action->name);
/* create /proc/irq/1234/handler/ */
action->dir = proc_mkdir(name, desc->dir);
}
#undef MAX_NAMELEN
#define MAX_NAMELEN 10
void register_irq_proc(unsigned int irq, struct irq_desc *desc)
{
char name [MAX_NAMELEN];
if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
return;
memset(name, 0, MAX_NAMELEN);
sprintf(name, "%d", irq);
/* create /proc/irq/1234 */
desc->dir = proc_mkdir(name, root_irq_dir);
if (!desc->dir)
return;
#ifdef CONFIG_SMP
/* create /proc/irq/<irq>/smp_affinity */
proc_create_data("smp_affinity", 0600, desc->dir,
&irq_affinity_proc_fops, (void *)(long)irq);
/* create /proc/irq/<irq>/affinity_hint */
proc_create_data("affinity_hint", 0400, desc->dir,
&irq_affinity_hint_proc_fops, (void *)(long)irq);
/* create /proc/irq/<irq>/smp_affinity_list */
proc_create_data("smp_affinity_list", 0600, desc->dir,
&irq_affinity_list_proc_fops, (void *)(long)irq);
proc_create_data("node", 0444, desc->dir,
&irq_node_proc_fops, (void *)(long)irq);
#endif
proc_create_data("spurious", 0444, desc->dir,
&irq_spurious_proc_fops, (void *)(long)irq);
proc_create_data("disable_depth", 0444, desc->dir,
&irq_disable_depth_proc_fops, (void *)(long)irq);
proc_create_data("wake_depth", 0444, desc->dir,
&irq_wake_depth_proc_fops, (void *)(long)irq);
}
void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
{
char name [MAX_NAMELEN];
if (!root_irq_dir || !desc->dir)
return;
#ifdef CONFIG_SMP
remove_proc_entry("smp_affinity", desc->dir);
remove_proc_entry("affinity_hint", desc->dir);
remove_proc_entry("smp_affinity_list", desc->dir);
remove_proc_entry("node", desc->dir);
#endif
remove_proc_entry("spurious", desc->dir);
memset(name, 0, MAX_NAMELEN);
sprintf(name, "%u", irq);
remove_proc_entry(name, root_irq_dir);
}
#undef MAX_NAMELEN
void unregister_handler_proc(unsigned int irq, struct irqaction *action)
{
proc_remove(action->dir);
}
static void register_default_affinity_proc(void)
{
#ifdef CONFIG_SMP
proc_create("irq/default_smp_affinity", 0600, NULL,
&default_affinity_proc_fops);
#endif
}
void init_irq_proc(void)
{
unsigned int irq;
struct irq_desc *desc;
/* create /proc/irq */
root_irq_dir = proc_mkdir("irq", NULL);
if (!root_irq_dir)
return;
register_default_affinity_proc();
/*
* Create entries for all existing IRQs.
*/
for_each_irq_desc(irq, desc) {
if (!desc)
continue;
register_irq_proc(irq, desc);
}
}
#ifdef CONFIG_GENERIC_IRQ_SHOW
int __weak arch_show_interrupts(struct seq_file *p, int prec)
{
return 0;
}
#ifndef ACTUAL_NR_IRQS
# define ACTUAL_NR_IRQS nr_irqs
#endif
int show_interrupts(struct seq_file *p, void *v)
{
static int prec;
unsigned long flags, any_count = 0;
int i = *(loff_t *) v, j;
struct irqaction *action;
struct irq_desc *desc;
if (i > ACTUAL_NR_IRQS)
return 0;
if (i == ACTUAL_NR_IRQS)
return arch_show_interrupts(p, prec);
/* print header and calculate the width of the first column */
if (i == 0) {
for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
j *= 10;
seq_printf(p, "%*s", prec + 8, "");
for_each_online_cpu(j)
seq_printf(p, "CPU%-8d", j);
seq_putc(p, '\n');
}
desc = irq_to_desc(i);
if (!desc)
return 0;
raw_spin_lock_irqsave(&desc->lock, flags);
for_each_online_cpu(j)
any_count |= kstat_irqs_cpu(i, j);
action = desc->action;
if (!action && !any_count)
goto out;
seq_printf(p, "%*d: ", prec, i);
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
if (desc->irq_data.chip) {
if (desc->irq_data.chip->irq_print_chip)
desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
else if (desc->irq_data.chip->name)
seq_printf(p, " %8s", desc->irq_data.chip->name);
else
seq_printf(p, " %8s", "-");
} else {
seq_printf(p, " %8s", "None");
}
#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
#endif
if (desc->name)
seq_printf(p, "-%-8s", desc->name);
if (action) {
seq_printf(p, " %s", action->name);
while ((action = action->next) != NULL)
seq_printf(p, ", %s", action->name);
}
seq_putc(p, '\n');
out:
raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
#endif
| gpl-2.0 |
XePeleato/ALE-L21_ESAL | drivers/dma/ioat/dma_v2.c | 1672 | 25026 | /*
* Intel I/OAT DMA Linux driver
* Copyright(c) 2004 - 2009 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
*/
/*
* This driver supports an Intel I/OAT DMA engine (versions >= 2), which
* does asynchronous data movement and checksumming operations.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
#include <linux/prefetch.h>
#include <linux/i7300_idle.h>
#include "dma.h"
#include "dma_v2.h"
#include "registers.h"
#include "hw.h"
#include "../dmaengine.h"
int ioat_ring_alloc_order = 8;
module_param(ioat_ring_alloc_order, int, 0644);
MODULE_PARM_DESC(ioat_ring_alloc_order,
"ioat2+: allocate 2^n descriptors per channel"
" (default: 8 max: 16)");
static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
module_param(ioat_ring_max_alloc_order, int, 0644);
MODULE_PARM_DESC(ioat_ring_max_alloc_order,
"ioat2+: upper limit for ring size (default: 16)");
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
ioat->dmacount += ioat2_ring_pending(ioat);
ioat->issued = ioat->head;
writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
dev_dbg(to_dev(chan),
"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
}
void ioat2_issue_pending(struct dma_chan *c)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
if (ioat2_ring_pending(ioat)) {
spin_lock_bh(&ioat->prep_lock);
__ioat2_issue_pending(ioat);
spin_unlock_bh(&ioat->prep_lock);
}
}
/**
* ioat2_update_pending - log pending descriptors
* @ioat: ioat2+ channel
*
* Check if the number of unsubmitted descriptors has exceeded the
* watermark. Called with prep_lock held
*/
static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
{
if (ioat2_ring_pending(ioat) > ioat_pending_level)
__ioat2_issue_pending(ioat);
}
static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
{
struct ioat_ring_ent *desc;
struct ioat_dma_descriptor *hw;
if (ioat2_ring_space(ioat) < 1) {
dev_err(to_dev(&ioat->base),
"Unable to start null desc - ring full\n");
return;
}
dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued);
desc = ioat2_get_ring_ent(ioat, ioat->head);
hw = desc->hw;
hw->ctl = 0;
hw->ctl_f.null = 1;
hw->ctl_f.int_en = 1;
hw->ctl_f.compl_write = 1;
/* set size to non-zero value (channel returns error when size is 0) */
hw->size = NULL_DESC_BUFFER_SIZE;
hw->src_addr = 0;
hw->dst_addr = 0;
async_tx_ack(&desc->txd);
ioat2_set_chainaddr(ioat, desc->txd.phys);
dump_desc_dbg(ioat, desc);
wmb();
ioat->head += 1;
__ioat2_issue_pending(ioat);
}
static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
{
spin_lock_bh(&ioat->prep_lock);
__ioat2_start_null_desc(ioat);
spin_unlock_bh(&ioat->prep_lock);
}
static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
{
struct ioat_chan_common *chan = &ioat->base;
struct dma_async_tx_descriptor *tx;
struct ioat_ring_ent *desc;
bool seen_current = false;
u16 active;
int idx = ioat->tail, i;
dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued);
active = ioat2_ring_active(ioat);
for (i = 0; i < active && !seen_current; i++) {
smp_read_barrier_depends();
prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
desc = ioat2_get_ring_ent(ioat, idx + i);
tx = &desc->txd;
dump_desc_dbg(ioat, desc);
if (tx->cookie) {
ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
dma_cookie_complete(tx);
if (tx->callback) {
tx->callback(tx->callback_param);
tx->callback = NULL;
}
}
if (tx->phys == phys_complete)
seen_current = true;
}
smp_mb(); /* finish all descriptor reads before incrementing tail */
ioat->tail = idx + i;
BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete;
if (active - i == 0) {
dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
__func__);
clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
}
/**
* ioat2_cleanup - clean finished descriptors (advance tail pointer)
* @chan: ioat channel to be cleaned up
*/
static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
dma_addr_t phys_complete;
spin_lock_bh(&chan->cleanup_lock);
if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
spin_unlock_bh(&chan->cleanup_lock);
}
void ioat2_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
ioat2_cleanup(ioat);
if (!test_bit(IOAT_RUN, &chan->state))
return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
/* set the tail to be re-issued */
ioat->issued = ioat->tail;
ioat->dmacount = 0;
set_bit(IOAT_COMPLETION_PENDING, &chan->state);
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
dev_dbg(to_dev(chan),
"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
if (ioat2_ring_pending(ioat)) {
struct ioat_ring_ent *desc;
desc = ioat2_get_ring_ent(ioat, ioat->tail);
ioat2_set_chainaddr(ioat, desc->txd.phys);
__ioat2_issue_pending(ioat);
} else
__ioat2_start_null_desc(ioat);
}
int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
{
unsigned long end = jiffies + tmo;
int err = 0;
u32 status;
status = ioat_chansts(chan);
if (is_ioat_active(status) || is_ioat_idle(status))
ioat_suspend(chan);
while (is_ioat_active(status) || is_ioat_idle(status)) {
if (tmo && time_after(jiffies, end)) {
err = -ETIMEDOUT;
break;
}
status = ioat_chansts(chan);
cpu_relax();
}
return err;
}
int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
{
unsigned long end = jiffies + tmo;
int err = 0;
ioat_reset(chan);
while (ioat_reset_pending(chan)) {
if (end && time_after(jiffies, end)) {
err = -ETIMEDOUT;
break;
}
cpu_relax();
}
return err;
}
static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
dma_addr_t phys_complete;
ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
__ioat2_restart_chan(ioat);
}
static void check_active(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
if (ioat2_ring_active(ioat)) {
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
return;
}
if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
else if (ioat->alloc_order > ioat_get_alloc_order()) {
/* if the ring is idle, empty, and oversized try to step
* down the size
*/
reshape_ring(ioat, ioat->alloc_order - 1);
/* keep shrinking until we get back to our minimum
* default size
*/
if (ioat->alloc_order > ioat_get_alloc_order())
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
}
void ioat2_timer_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
dma_addr_t phys_complete;
u64 status;
status = ioat_chansts(chan);
/* when halted due to errors check for channel
* programming errors before advancing the completion state
*/
if (is_ioat_halted(status)) {
u32 chanerr;
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
__func__, chanerr);
if (test_bit(IOAT_RUN, &chan->state))
BUG_ON(is_ioat_bug(chanerr));
else /* we never got off the ground */
return;
}
/* if we haven't made progress and we have already
* acknowledged a pending completion once, then be more
* forceful with a restart
*/
spin_lock_bh(&chan->cleanup_lock);
if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
spin_lock_bh(&ioat->prep_lock);
ioat2_restart_channel(ioat);
spin_unlock_bh(&ioat->prep_lock);
spin_unlock_bh(&chan->cleanup_lock);
return;
} else {
set_bit(IOAT_COMPLETION_ACK, &chan->state);
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
}
if (ioat2_ring_active(ioat))
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
else {
spin_lock_bh(&ioat->prep_lock);
check_active(ioat);
spin_unlock_bh(&ioat->prep_lock);
}
spin_unlock_bh(&chan->cleanup_lock);
}
static int ioat2_reset_hw(struct ioat_chan_common *chan)
{
/* throw away whatever the channel was doing and get it initialized */
u32 chanerr;
ioat2_quiesce(chan, msecs_to_jiffies(100));
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
return ioat2_reset_sync(chan, msecs_to_jiffies(200));
}
/**
* ioat2_enumerate_channels - find and initialize the device's channels
* @device: the device to be enumerated
*/
int ioat2_enumerate_channels(struct ioatdma_device *device)
{
struct ioat2_dma_chan *ioat;
struct device *dev = &device->pdev->dev;
struct dma_device *dma = &device->common;
u8 xfercap_log;
int i;
INIT_LIST_HEAD(&dma->channels);
dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
dma->chancnt &= 0x1f; /* bits [4:0] valid */
if (dma->chancnt > ARRAY_SIZE(device->idx)) {
dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
dma->chancnt, ARRAY_SIZE(device->idx));
dma->chancnt = ARRAY_SIZE(device->idx);
}
xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
xfercap_log &= 0x1f; /* bits [4:0] valid */
if (xfercap_log == 0)
return 0;
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
/* FIXME which i/oat version is i7300? */
#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
dma->chancnt--;
#endif
for (i = 0; i < dma->chancnt; i++) {
ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
if (!ioat)
break;
ioat_init_channel(device, &ioat->base, i);
ioat->xfercap_log = xfercap_log;
spin_lock_init(&ioat->prep_lock);
if (device->reset_hw(&ioat->base)) {
i = 0;
break;
}
}
dma->chancnt = i;
return i;
}
static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
{
struct dma_chan *c = tx->chan;
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_chan_common *chan = &ioat->base;
dma_cookie_t cookie;
cookie = dma_cookie_assign(tx);
dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state))
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
/* make descriptor updates visible before advancing ioat->head,
* this is purposefully not smp_wmb() since we are also
* publishing the descriptor updates to a dma device
*/
wmb();
ioat->head += ioat->produce;
ioat2_update_pending(ioat);
spin_unlock_bh(&ioat->prep_lock);
return cookie;
}
static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
{
struct ioat_dma_descriptor *hw;
struct ioat_ring_ent *desc;
struct ioatdma_device *dma;
dma_addr_t phys;
dma = to_ioatdma_device(chan->device);
hw = pci_pool_alloc(dma->dma_pool, flags, &phys);
if (!hw)
return NULL;
memset(hw, 0, sizeof(*hw));
desc = kmem_cache_zalloc(ioat2_cache, flags);
if (!desc) {
pci_pool_free(dma->dma_pool, hw, phys);
return NULL;
}
dma_async_tx_descriptor_init(&desc->txd, chan);
desc->txd.tx_submit = ioat2_tx_submit_unlock;
desc->hw = hw;
desc->txd.phys = phys;
return desc;
}
static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
{
struct ioatdma_device *dma;
dma = to_ioatdma_device(chan->device);
pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
kmem_cache_free(ioat2_cache, desc);
}
static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
{
struct ioat_ring_ent **ring;
int descs = 1 << order;
int i;
if (order > ioat_get_max_alloc_order())
return NULL;
/* allocate the array to hold the software ring */
ring = kcalloc(descs, sizeof(*ring), flags);
if (!ring)
return NULL;
for (i = 0; i < descs; i++) {
ring[i] = ioat2_alloc_ring_ent(c, flags);
if (!ring[i]) {
while (i--)
ioat2_free_ring_ent(ring[i], c);
kfree(ring);
return NULL;
}
set_desc_id(ring[i], i);
}
/* link descs */
for (i = 0; i < descs-1; i++) {
struct ioat_ring_ent *next = ring[i+1];
struct ioat_dma_descriptor *hw = ring[i]->hw;
hw->next = next->txd.phys;
}
ring[i]->hw->next = ring[0]->txd.phys;
return ring;
}
void ioat2_free_chan_resources(struct dma_chan *c);
/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
* @chan: channel to be initialized
*/
int ioat2_alloc_chan_resources(struct dma_chan *c)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_chan_common *chan = &ioat->base;
struct ioat_ring_ent **ring;
u64 status;
int order;
int i = 0;
/* have we already been set up? */
if (ioat->ring)
return 1 << ioat->alloc_order;
/* Setup register to interrupt and write completion status on error */
writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
/* allocate a completion writeback area */
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
chan->completion = pci_pool_alloc(chan->device->completion_pool,
GFP_KERNEL, &chan->completion_dma);
if (!chan->completion)
return -ENOMEM;
memset(chan->completion, 0, sizeof(*chan->completion));
writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
writel(((u64) chan->completion_dma) >> 32,
chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
order = ioat_get_alloc_order();
ring = ioat2_alloc_ring(c, order, GFP_KERNEL);
if (!ring)
return -ENOMEM;
spin_lock_bh(&chan->cleanup_lock);
spin_lock_bh(&ioat->prep_lock);
ioat->ring = ring;
ioat->head = 0;
ioat->issued = 0;
ioat->tail = 0;
ioat->alloc_order = order;
set_bit(IOAT_RUN, &chan->state);
spin_unlock_bh(&ioat->prep_lock);
spin_unlock_bh(&chan->cleanup_lock);
ioat2_start_null_desc(ioat);
/* check that we got off the ground */
do {
udelay(1);
status = ioat_chansts(chan);
} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
if (is_ioat_active(status) || is_ioat_idle(status)) {
return 1 << ioat->alloc_order;
} else {
u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
dev_WARN(to_dev(chan),
"failed to start channel chanerr: %#x\n", chanerr);
ioat2_free_chan_resources(c);
return -EFAULT;
}
}
bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
{
/* reshape differs from normal ring allocation in that we want
* to allocate a new software ring while only
* extending/truncating the hardware ring
*/
struct ioat_chan_common *chan = &ioat->base;
struct dma_chan *c = &chan->common;
const u32 curr_size = ioat2_ring_size(ioat);
const u16 active = ioat2_ring_active(ioat);
const u32 new_size = 1 << order;
struct ioat_ring_ent **ring;
u16 i;
if (order > ioat_get_max_alloc_order())
return false;
/* double check that we have at least 1 free descriptor */
if (active == curr_size)
return false;
/* when shrinking, verify that we can hold the current active
* set in the new ring
*/
if (active >= new_size)
return false;
/* allocate the array to hold the software ring */
ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
if (!ring)
return false;
/* allocate/trim descriptors as needed */
if (new_size > curr_size) {
/* copy current descriptors to the new ring */
for (i = 0; i < curr_size; i++) {
u16 curr_idx = (ioat->tail+i) & (curr_size-1);
u16 new_idx = (ioat->tail+i) & (new_size-1);
ring[new_idx] = ioat->ring[curr_idx];
set_desc_id(ring[new_idx], new_idx);
}
/* add new descriptors to the ring */
for (i = curr_size; i < new_size; i++) {
u16 new_idx = (ioat->tail+i) & (new_size-1);
ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT);
if (!ring[new_idx]) {
while (i--) {
u16 new_idx = (ioat->tail+i) & (new_size-1);
ioat2_free_ring_ent(ring[new_idx], c);
}
kfree(ring);
return false;
}
set_desc_id(ring[new_idx], new_idx);
}
/* hw link new descriptors */
for (i = curr_size-1; i < new_size; i++) {
u16 new_idx = (ioat->tail+i) & (new_size-1);
struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)];
struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
hw->next = next->txd.phys;
}
} else {
struct ioat_dma_descriptor *hw;
struct ioat_ring_ent *next;
/* copy current descriptors to the new ring, dropping the
* removed descriptors
*/
for (i = 0; i < new_size; i++) {
u16 curr_idx = (ioat->tail+i) & (curr_size-1);
u16 new_idx = (ioat->tail+i) & (new_size-1);
ring[new_idx] = ioat->ring[curr_idx];
set_desc_id(ring[new_idx], new_idx);
}
/* free deleted descriptors */
for (i = new_size; i < curr_size; i++) {
struct ioat_ring_ent *ent;
ent = ioat2_get_ring_ent(ioat, ioat->tail+i);
ioat2_free_ring_ent(ent, c);
}
/* fix up hardware ring */
hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw;
next = ring[(ioat->tail+new_size) & (new_size-1)];
hw->next = next->txd.phys;
}
dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
__func__, new_size);
kfree(ioat->ring);
ioat->ring = ring;
ioat->alloc_order = order;
return true;
}
/**
* ioat2_check_space_lock - verify space and grab ring producer lock
* @ioat: ioat2,3 channel (ring) to operate on
* @num_descs: allocation length
*/
int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
{
struct ioat_chan_common *chan = &ioat->base;
bool retry;
retry:
spin_lock_bh(&ioat->prep_lock);
/* never allow the last descriptor to be consumed, we need at
* least one free at all times to allow for on-the-fly ring
* resizing.
*/
if (likely(ioat2_ring_space(ioat) > num_descs)) {
dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
__func__, num_descs, ioat->head, ioat->tail, ioat->issued);
ioat->produce = num_descs;
return 0; /* with ioat->prep_lock held */
}
retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state);
spin_unlock_bh(&ioat->prep_lock);
/* is another cpu already trying to expand the ring? */
if (retry)
goto retry;
spin_lock_bh(&chan->cleanup_lock);
spin_lock_bh(&ioat->prep_lock);
retry = reshape_ring(ioat, ioat->alloc_order + 1);
clear_bit(IOAT_RESHAPE_PENDING, &chan->state);
spin_unlock_bh(&ioat->prep_lock);
spin_unlock_bh(&chan->cleanup_lock);
/* if we were able to expand the ring retry the allocation */
if (retry)
goto retry;
if (printk_ratelimit())
dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n",
__func__, num_descs, ioat->head, ioat->tail, ioat->issued);
/* progress reclaim in the allocation failure case we may be
* called under bh_disabled so we need to trigger the timer
* event directly
*/
if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) {
struct ioatdma_device *device = chan->device;
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
device->timer_fn((unsigned long) &chan->common);
}
return -ENOMEM;
}
struct dma_async_tx_descriptor *
ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
dma_addr_t dma_src, size_t len, unsigned long flags)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_dma_descriptor *hw;
struct ioat_ring_ent *desc;
dma_addr_t dst = dma_dest;
dma_addr_t src = dma_src;
size_t total_len = len;
int num_descs, idx, i;
num_descs = ioat2_xferlen_to_descs(ioat, len);
if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
idx = ioat->head;
else
return NULL;
i = 0;
do {
size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log);
desc = ioat2_get_ring_ent(ioat, idx + i);
hw = desc->hw;
hw->size = copy;
hw->ctl = 0;
hw->src_addr = src;
hw->dst_addr = dst;
len -= copy;
dst += copy;
src += copy;
dump_desc_dbg(ioat, desc);
} while (++i < num_descs);
desc->txd.flags = flags;
desc->len = total_len;
hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
hw->ctl_f.compl_write = 1;
dump_desc_dbg(ioat, desc);
/* we leave the channel locked to ensure in order submission */
return &desc->txd;
}
/**
* ioat2_free_chan_resources - release all the descriptors
* @chan: the channel to be cleaned
*/
void ioat2_free_chan_resources(struct dma_chan *c)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_chan_common *chan = &ioat->base;
struct ioatdma_device *device = chan->device;
struct ioat_ring_ent *desc;
const u16 total_descs = 1 << ioat->alloc_order;
int descs;
int i;
/* Before freeing channel resources first check
* if they have been previously allocated for this channel.
*/
if (!ioat->ring)
return;
ioat_stop(chan);
device->reset_hw(chan);
spin_lock_bh(&chan->cleanup_lock);
spin_lock_bh(&ioat->prep_lock);
descs = ioat2_ring_space(ioat);
dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
for (i = 0; i < descs; i++) {
desc = ioat2_get_ring_ent(ioat, ioat->head + i);
ioat2_free_ring_ent(desc, c);
}
if (descs < total_descs)
dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
total_descs - descs);
for (i = 0; i < total_descs - descs; i++) {
desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
dump_desc_dbg(ioat, desc);
ioat2_free_ring_ent(desc, c);
}
kfree(ioat->ring);
ioat->ring = NULL;
ioat->alloc_order = 0;
pci_pool_free(device->completion_pool, chan->completion,
chan->completion_dma);
spin_unlock_bh(&ioat->prep_lock);
spin_unlock_bh(&chan->cleanup_lock);
chan->last_completion = 0;
chan->completion_dma = 0;
ioat->dmacount = 0;
}
static ssize_t ring_size_show(struct dma_chan *c, char *page)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1);
}
static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
static ssize_t ring_active_show(struct dma_chan *c, char *page)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
/* ...taken outside the lock, no need to be precise */
return sprintf(page, "%d\n", ioat2_ring_active(ioat));
}
static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
static struct attribute *ioat2_attrs[] = {
&ring_size_attr.attr,
&ring_active_attr.attr,
&ioat_cap_attr.attr,
&ioat_version_attr.attr,
NULL,
};
struct kobj_type ioat2_ktype = {
.sysfs_ops = &ioat_sysfs_ops,
.default_attrs = ioat2_attrs,
};
int ioat2_dma_probe(struct ioatdma_device *device, int dca)
{
struct pci_dev *pdev = device->pdev;
struct dma_device *dma;
struct dma_chan *c;
struct ioat_chan_common *chan;
int err;
device->enumerate_channels = ioat2_enumerate_channels;
device->reset_hw = ioat2_reset_hw;
device->cleanup_fn = ioat2_cleanup_event;
device->timer_fn = ioat2_timer_event;
device->self_test = ioat_dma_self_test;
dma = &device->common;
dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
dma->device_issue_pending = ioat2_issue_pending;
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
dma->device_tx_status = ioat_dma_tx_status;
err = ioat_probe(device);
if (err)
return err;
ioat_set_tcp_copy_break(2048);
list_for_each_entry(c, &dma->channels, device_node) {
chan = to_chan_common(c);
writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
chan->reg_base + IOAT_DCACTRL_OFFSET);
}
err = ioat_register(device);
if (err)
return err;
ioat_kobject_add(device, &ioat2_ktype);
if (dca)
device->dca = ioat2_dca_init(pdev, device->reg_base);
return err;
}
| gpl-2.0 |
TeamWin/android_kernel_htc_a32e | arch/arm/mach-tegra/platsmp.c | 1928 | 4799 | /*
* linux/arch/arm/mach-tegra/platsmp.c
*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
*
* Copyright (C) 2009 Palm
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/smp.h>
#include <linux/io.h>
#include <linux/clk/tegra.h>
#include <asm/cacheflush.h>
#include <asm/mach-types.h>
#include <asm/smp_scu.h>
#include <asm/smp_plat.h>
#include "fuse.h"
#include "flowctrl.h"
#include "reset.h"
#include "pmc.h"
#include "common.h"
#include "iomap.h"
static cpumask_t tegra_cpu_init_mask;
static void __cpuinit tegra_secondary_init(unsigned int cpu)
{
cpumask_set_cpu(cpu, &tegra_cpu_init_mask);
}
static int tegra20_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
cpu = cpu_logical_map(cpu);
/*
* Force the CPU into reset. The CPU must remain in reset when
* the flow controller state is cleared (which will cause the
* flow controller to stop driving reset if the CPU has been
* power-gated via the flow controller). This will have no
* effect on first boot of the CPU since it should already be
* in reset.
*/
tegra_put_cpu_in_reset(cpu);
/*
* Unhalt the CPU. If the flow controller was used to
* power-gate the CPU this will cause the flow controller to
* stop driving reset. The CPU will remain in reset because the
* clock and reset block is now driving reset.
*/
flowctrl_write_cpu_halt(cpu, 0);
tegra_enable_cpu_clock(cpu);
flowctrl_write_cpu_csr(cpu, 0); /* Clear flow controller CSR. */
tegra_cpu_out_of_reset(cpu);
return 0;
}
static int tegra30_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
int ret;
unsigned long timeout;
cpu = cpu_logical_map(cpu);
tegra_put_cpu_in_reset(cpu);
flowctrl_write_cpu_halt(cpu, 0);
/*
* The power up sequence of cold boot CPU and warm boot CPU
* was different.
*
* For warm boot CPU that was resumed from CPU hotplug, the
* power will be resumed automatically after un-halting the
* flow controller of the warm boot CPU. We need to wait for
* the confirmaiton that the CPU is powered then removing
* the IO clamps.
* For cold boot CPU, do not wait. After the cold boot CPU be
* booted, it will run to tegra_secondary_init() and set
* tegra_cpu_init_mask which influences what tegra30_boot_secondary()
* next time around.
*/
if (cpumask_test_cpu(cpu, &tegra_cpu_init_mask)) {
timeout = jiffies + msecs_to_jiffies(50);
do {
if (tegra_pmc_cpu_is_powered(cpu))
goto remove_clamps;
udelay(10);
} while (time_before(jiffies, timeout));
}
/*
* The power status of the cold boot CPU is power gated as
* default. To power up the cold boot CPU, the power should
* be un-gated by un-toggling the power gate register
* manually.
*/
if (!tegra_pmc_cpu_is_powered(cpu)) {
ret = tegra_pmc_cpu_power_on(cpu);
if (ret)
return ret;
/* Wait for the power to come up. */
timeout = jiffies + msecs_to_jiffies(100);
while (tegra_pmc_cpu_is_powered(cpu)) {
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
udelay(10);
}
}
remove_clamps:
/* CPU partition is powered. Enable the CPU clock. */
tegra_enable_cpu_clock(cpu);
udelay(10);
/* Remove I/O clamps. */
ret = tegra_pmc_cpu_remove_clamping(cpu);
if (ret)
return ret;
udelay(10);
flowctrl_write_cpu_csr(cpu, 0); /* Clear flow controller CSR. */
tegra_cpu_out_of_reset(cpu);
return 0;
}
static int tegra114_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
cpu = cpu_logical_map(cpu);
return tegra_pmc_cpu_power_on(cpu);
}
static int __cpuinit tegra_boot_secondary(unsigned int cpu,
struct task_struct *idle)
{
if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) && tegra_chip_id == TEGRA20)
return tegra20_boot_secondary(cpu, idle);
if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) && tegra_chip_id == TEGRA30)
return tegra30_boot_secondary(cpu, idle);
if (IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) && tegra_chip_id == TEGRA114)
return tegra114_boot_secondary(cpu, idle);
return -EINVAL;
}
static void __init tegra_smp_prepare_cpus(unsigned int max_cpus)
{
/* Always mark the boot CPU (CPU0) as initialized. */
cpumask_set_cpu(0, &tegra_cpu_init_mask);
if (scu_a9_has_base())
scu_enable(IO_ADDRESS(scu_a9_get_base()));
}
struct smp_operations tegra_smp_ops __initdata = {
.smp_prepare_cpus = tegra_smp_prepare_cpus,
.smp_secondary_init = tegra_secondary_init,
.smp_boot_secondary = tegra_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_kill = tegra_cpu_kill,
.cpu_die = tegra_cpu_die,
#endif
};
| gpl-2.0 |
sudosurootdev/kernel_samsung_u8500 | drivers/media/video/v4l2-mem2mem.c | 2440 | 17714 | /*
* Memory-to-memory device framework for Video for Linux 2 and videobuf.
*
* Helper functions for devices that use videobuf buffers for both their
* source and destination.
*
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* Pawel Osciak, <pawel@osciak.com>
* Marek Szyprowski, <m.szyprowski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <media/videobuf2-core.h>
#include <media/v4l2-mem2mem.h>
MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
MODULE_LICENSE("GPL");
static bool debug;
module_param(debug, bool, 0644);
#define dprintk(fmt, arg...) \
do { \
if (debug) \
printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
} while (0)
/* Instance is already queued on the job_queue */
#define TRANS_QUEUED (1 << 0)
/* Instance is currently running in hardware */
#define TRANS_RUNNING (1 << 1)
/* Offset base for buffers on the destination queue - used to distinguish
* between source and destination buffers when mmapping - they receive the same
* offsets but for different queues */
#define DST_QUEUE_OFF_BASE (1 << 30)
/**
* struct v4l2_m2m_dev - per-device context
* @curr_ctx: currently running instance
* @job_queue: instances queued to run
* @job_spinlock: protects job_queue
* @m2m_ops: driver callbacks
*/
struct v4l2_m2m_dev {
struct v4l2_m2m_ctx *curr_ctx;
struct list_head job_queue;
spinlock_t job_spinlock;
struct v4l2_m2m_ops *m2m_ops;
};
static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
if (V4L2_TYPE_IS_OUTPUT(type))
return &m2m_ctx->out_q_ctx;
else
return &m2m_ctx->cap_q_ctx;
}
/**
* v4l2_m2m_get_vq() - return vb2_queue for the given type
*/
struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
struct v4l2_m2m_queue_ctx *q_ctx;
q_ctx = get_queue_ctx(m2m_ctx, type);
if (!q_ctx)
return NULL;
return &q_ctx->q;
}
EXPORT_SYMBOL(v4l2_m2m_get_vq);
/**
* v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
*/
void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
{
struct v4l2_m2m_buffer *b = NULL;
unsigned long flags;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
if (list_empty(&q_ctx->rdy_queue))
goto end;
b = list_entry(q_ctx->rdy_queue.next, struct v4l2_m2m_buffer, list);
end:
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
return &b->vb;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
/**
* v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
* return it
*/
void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
{
struct v4l2_m2m_buffer *b = NULL;
unsigned long flags;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
if (!list_empty(&q_ctx->rdy_queue)) {
b = list_entry(q_ctx->rdy_queue.next, struct v4l2_m2m_buffer,
list);
list_del(&b->list);
q_ctx->num_rdy--;
}
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
return &b->vb;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
/*
* Scheduling handlers
*/
/**
* v4l2_m2m_get_curr_priv() - return driver private data for the currently
* running instance or NULL if no instance is running
*/
void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
{
unsigned long flags;
void *ret = NULL;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
if (m2m_dev->curr_ctx)
ret = m2m_dev->curr_ctx->priv;
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
return ret;
}
EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
/**
* v4l2_m2m_try_run() - select next job to perform and run it if possible
*
* Get next transaction (if present) from the waiting jobs list and run it.
*/
static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
{
unsigned long flags;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
if (NULL != m2m_dev->curr_ctx) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
dprintk("Another instance is running, won't run now\n");
return;
}
if (list_empty(&m2m_dev->job_queue)) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
dprintk("No job pending\n");
return;
}
m2m_dev->curr_ctx = list_entry(m2m_dev->job_queue.next,
struct v4l2_m2m_ctx, queue);
m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
}
/**
* v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
* the pending job queue and add it if so.
* @m2m_ctx: m2m context assigned to the instance to be checked
*
* There are three basic requirements an instance has to meet to be able to run:
* 1) at least one source buffer has to be queued,
* 2) at least one destination buffer has to be queued,
* 3) streaming has to be on.
*
* There may also be additional, custom requirements. In such case the driver
* should supply a custom callback (job_ready in v4l2_m2m_ops) that should
* return 1 if the instance is ready.
* An example of the above could be an instance that requires more than one
* src/dst buffer per transaction.
*/
static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
{
struct v4l2_m2m_dev *m2m_dev;
unsigned long flags_job, flags;
m2m_dev = m2m_ctx->m2m_dev;
dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
if (!m2m_ctx->out_q_ctx.q.streaming
|| !m2m_ctx->cap_q_ctx.q.streaming) {
dprintk("Streaming needs to be on for both queues\n");
return;
}
spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
if (m2m_ctx->job_flags & TRANS_QUEUED) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("On job queue already\n");
return;
}
spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("No input buffers available\n");
return;
}
if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("No output buffers available\n");
return;
}
spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
if (m2m_dev->m2m_ops->job_ready
&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("Driver not ready\n");
return;
}
list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
m2m_ctx->job_flags |= TRANS_QUEUED;
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
v4l2_m2m_try_run(m2m_dev);
}
/**
* v4l2_m2m_job_finish() - inform the framework that a job has been finished
* and have it clean up
*
* Called by a driver to yield back the device after it has finished with it.
* Should be called as soon as possible after reaching a state which allows
* other instances to take control of the device.
*
* This function has to be called only after device_run() callback has been
* called on the driver. To prevent recursion, it should not be called directly
* from the device_run() callback though.
*/
void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx)
{
unsigned long flags;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
dprintk("Called by an instance not currently running\n");
return;
}
list_del(&m2m_dev->curr_ctx->queue);
m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
wake_up(&m2m_dev->curr_ctx->finished);
m2m_dev->curr_ctx = NULL;
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
/* This instance might have more buffers ready, but since we do not
* allow more than one job on the job_queue per instance, each has
* to be scheduled separately after the previous one finishes. */
v4l2_m2m_try_schedule(m2m_ctx);
v4l2_m2m_try_run(m2m_dev);
}
EXPORT_SYMBOL(v4l2_m2m_job_finish);
/**
* v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
*/
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_requestbuffers *reqbufs)
{
struct vb2_queue *vq;
vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
return vb2_reqbufs(vq, reqbufs);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
/**
* v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
*
* See v4l2_m2m_mmap() documentation for details.
*/
int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
struct vb2_queue *vq;
int ret = 0;
unsigned int i;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
ret = vb2_querybuf(vq, buf);
/* Adjust MMAP memory offsets for the CAPTURE queue */
if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
for (i = 0; i < buf->length; ++i)
buf->m.planes[i].m.mem_offset
+= DST_QUEUE_OFF_BASE;
} else {
buf->m.offset += DST_QUEUE_OFF_BASE;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
/**
* v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
* the type
*/
int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
ret = vb2_qbuf(vq, buf);
if (!ret)
v4l2_m2m_try_schedule(m2m_ctx);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
/**
* v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
* the type
*/
int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
struct vb2_queue *vq;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
/**
* v4l2_m2m_streamon() - turn on streaming for a video queue
*/
int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, type);
ret = vb2_streamon(vq, type);
if (!ret)
v4l2_m2m_try_schedule(m2m_ctx);
return ret;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
/**
* v4l2_m2m_streamoff() - turn off streaming for a video queue
*/
int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type)
{
struct vb2_queue *vq;
vq = v4l2_m2m_get_vq(m2m_ctx, type);
return vb2_streamoff(vq, type);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
/**
* v4l2_m2m_poll() - poll replacement, for destination buffers only
*
* Call from the driver's poll() function. Will poll both queues. If a buffer
* is available to dequeue (with dqbuf) from the source queue, this will
* indicate that a non-blocking write can be performed, while read will be
* returned in case of the destination queue.
*/
unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait)
{
struct vb2_queue *src_q, *dst_q;
struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
unsigned int rc = 0;
unsigned long flags;
src_q = v4l2_m2m_get_src_vq(m2m_ctx);
dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
/*
* There has to be at least one buffer queued on each queued_list, which
* means either in driver already or waiting for driver to claim it
* and start processing.
*/
if ((!src_q->streaming || list_empty(&src_q->queued_list))
&& (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
rc = POLLERR;
goto end;
}
if (m2m_ctx->m2m_dev->m2m_ops->unlock)
m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
poll_wait(file, &src_q->done_wq, wait);
poll_wait(file, &dst_q->done_wq, wait);
if (m2m_ctx->m2m_dev->m2m_ops->lock)
m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
spin_lock_irqsave(&src_q->done_lock, flags);
if (!list_empty(&src_q->done_list))
src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
done_entry);
if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
|| src_vb->state == VB2_BUF_STATE_ERROR))
rc |= POLLOUT | POLLWRNORM;
spin_unlock_irqrestore(&src_q->done_lock, flags);
spin_lock_irqsave(&dst_q->done_lock, flags);
if (!list_empty(&dst_q->done_list))
dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
done_entry);
if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
|| dst_vb->state == VB2_BUF_STATE_ERROR))
rc |= POLLIN | POLLRDNORM;
spin_unlock_irqrestore(&dst_q->done_lock, flags);
end:
return rc;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
/**
* v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
*
* Call from driver's mmap() function. Will handle mmap() for both queues
* seamlessly for videobuffer, which will receive normal per-queue offsets and
* proper videobuf queue pointers. The differentiation is made outside videobuf
* by adding a predefined offset to buffers from one of the queues and
* subtracting it before passing it back to videobuf. Only drivers (and
* thus applications) receive modified offsets.
*/
int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct vm_area_struct *vma)
{
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
struct vb2_queue *vq;
if (offset < DST_QUEUE_OFF_BASE) {
vq = v4l2_m2m_get_src_vq(m2m_ctx);
} else {
vq = v4l2_m2m_get_dst_vq(m2m_ctx);
vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
}
return vb2_mmap(vq, vma);
}
EXPORT_SYMBOL(v4l2_m2m_mmap);
/**
* v4l2_m2m_init() - initialize per-driver m2m data
*
* Usually called from driver's probe() function.
*/
struct v4l2_m2m_dev *v4l2_m2m_init(struct v4l2_m2m_ops *m2m_ops)
{
struct v4l2_m2m_dev *m2m_dev;
if (!m2m_ops)
return ERR_PTR(-EINVAL);
BUG_ON(!m2m_ops->device_run);
BUG_ON(!m2m_ops->job_abort);
m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
if (!m2m_dev)
return ERR_PTR(-ENOMEM);
m2m_dev->curr_ctx = NULL;
m2m_dev->m2m_ops = m2m_ops;
INIT_LIST_HEAD(&m2m_dev->job_queue);
spin_lock_init(&m2m_dev->job_spinlock);
return m2m_dev;
}
EXPORT_SYMBOL_GPL(v4l2_m2m_init);
/**
* v4l2_m2m_release() - cleans up and frees a m2m_dev structure
*
* Usually called from driver's remove() function.
*/
void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
{
kfree(m2m_dev);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_release);
/**
* v4l2_m2m_ctx_init() - allocate and initialize a m2m context
* @priv - driver's instance private data
* @m2m_dev - a previously initialized m2m_dev struct
* @vq_init - a callback for queue type-specific initialization function to be
* used for initializing videobuf_queues
*
* Usually called from driver's open() function.
*/
struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
void *drv_priv,
int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
{
struct v4l2_m2m_ctx *m2m_ctx;
struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
int ret;
m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
if (!m2m_ctx)
return ERR_PTR(-ENOMEM);
m2m_ctx->priv = drv_priv;
m2m_ctx->m2m_dev = m2m_dev;
init_waitqueue_head(&m2m_ctx->finished);
out_q_ctx = &m2m_ctx->out_q_ctx;
cap_q_ctx = &m2m_ctx->cap_q_ctx;
INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
spin_lock_init(&out_q_ctx->rdy_spinlock);
spin_lock_init(&cap_q_ctx->rdy_spinlock);
INIT_LIST_HEAD(&m2m_ctx->queue);
ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
if (ret)
goto err;
return m2m_ctx;
err:
kfree(m2m_ctx);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
/**
* v4l2_m2m_ctx_release() - release m2m context
*
* Usually called from driver's release() function.
*/
void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
{
struct v4l2_m2m_dev *m2m_dev;
unsigned long flags;
m2m_dev = m2m_ctx->m2m_dev;
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
if (m2m_ctx->job_flags & TRANS_RUNNING) {
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING));
} else if (m2m_ctx->job_flags & TRANS_QUEUED) {
list_del(&m2m_ctx->queue);
m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
dprintk("m2m_ctx: %p had been on queue and was removed\n",
m2m_ctx);
} else {
/* Do nothing, was not on queue/running */
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
}
vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
vb2_queue_release(&m2m_ctx->out_q_ctx.q);
kfree(m2m_ctx);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
/**
* v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
*
* Call from buf_queue(), videobuf_queue_ops callback.
*/
void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
{
struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
struct v4l2_m2m_queue_ctx *q_ctx;
unsigned long flags;
q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
if (!q_ctx)
return;
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
list_add_tail(&b->list, &q_ctx->rdy_queue);
q_ctx->num_rdy++;
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
| gpl-2.0 |
dh-harald/android_kernel_samsung_codina | arch/blackfin/mach-bf527/boards/cm_bf527.c | 2440 | 24348 | /*
* Copyright 2004-2009 Analog Devices Inc.
* 2008-2009 Bluetechnix
* 2005 National ICT Australia (NICTA)
* Aidan Williams <aidan@nicta.com.au>
*
* Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/etherdevice.h>
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/usb/musb.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/reboot.h>
#include <asm/nand.h>
#include <asm/portmux.h>
#include <asm/dpmc.h>
#include <linux/spi/ad7877.h>
/*
* Name the Board for the /proc/cpuinfo
*/
const char bfin_board_name[] = "Bluetechnix CM-BF527";
/*
* Driver needs to know address, irq and flag pin.
*/
#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
#include <linux/usb/isp1760.h>
static struct resource bfin_isp1760_resources[] = {
[0] = {
.start = 0x203C0000,
.end = 0x203C0000 + 0x000fffff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ,
},
};
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
.dack_polarity_high = 0,
.dreq_polarity_high = 0,
};
static struct platform_device bfin_isp1760_device = {
.name = "isp1760",
.id = 0,
.dev = {
.platform_data = &isp1760_priv,
},
.num_resources = ARRAY_SIZE(bfin_isp1760_resources),
.resource = bfin_isp1760_resources,
};
#endif
#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE)
static struct resource musb_resources[] = {
[0] = {
.start = 0xffc03800,
.end = 0xffc03cff,
.flags = IORESOURCE_MEM,
},
[1] = { /* general IRQ */
.start = IRQ_USB_INT0,
.end = IRQ_USB_INT0,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
.name = "mc"
},
[2] = { /* DMA IRQ */
.start = IRQ_USB_DMA,
.end = IRQ_USB_DMA,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
.name = "dma"
},
};
static struct musb_hdrc_config musb_config = {
.multipoint = 0,
.dyn_fifo = 0,
.soft_con = 1,
.dma = 1,
.num_eps = 8,
.dma_channels = 8,
.gpio_vrsel = GPIO_PF11,
/* Some custom boards need to be active low, just set it to "0"
* if it is the case.
*/
.gpio_vrsel_active = 1,
.clkin = 24, /* musb CLKIN in MHZ */
};
static struct musb_hdrc_platform_data musb_plat = {
#if defined(CONFIG_USB_MUSB_OTG)
.mode = MUSB_OTG,
#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
.mode = MUSB_HOST,
#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
.mode = MUSB_PERIPHERAL,
#endif
.config = &musb_config,
};
static u64 musb_dmamask = ~(u32)0;
static struct platform_device musb_device = {
.name = "musb-blackfin",
.id = 0,
.dev = {
.dma_mask = &musb_dmamask,
.coherent_dma_mask = 0xffffffff,
.platform_data = &musb_plat,
},
.num_resources = ARRAY_SIZE(musb_resources),
.resource = musb_resources,
};
#endif
#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE)
static struct mtd_partition partition_info[] = {
{
.name = "linux kernel(nand)",
.offset = 0,
.size = 4 * 1024 * 1024,
},
{
.name = "file system(nand)",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
};
static struct bf5xx_nand_platform bf5xx_nand_platform = {
.data_width = NFC_NWIDTH_8,
.partitions = partition_info,
.nr_partitions = ARRAY_SIZE(partition_info),
.rd_dly = 3,
.wr_dly = 3,
};
static struct resource bf5xx_nand_resources[] = {
{
.start = NFC_CTL,
.end = NFC_DATA_RD + 2,
.flags = IORESOURCE_MEM,
},
{
.start = CH_NFC,
.end = CH_NFC,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bf5xx_nand_device = {
.name = "bf5xx-nand",
.id = 0,
.num_resources = ARRAY_SIZE(bf5xx_nand_resources),
.resource = bf5xx_nand_resources,
.dev = {
.platform_data = &bf5xx_nand_platform,
},
};
#endif
#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
static struct resource bfin_pcmcia_cf_resources[] = {
{
.start = 0x20310000, /* IO PORT */
.end = 0x20312000,
.flags = IORESOURCE_MEM,
}, {
.start = 0x20311000, /* Attribute Memory */
.end = 0x20311FFF,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF4,
.end = IRQ_PF4,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
}, {
.start = 6, /* Card Detect PF6 */
.end = 6,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_pcmcia_cf_device = {
.name = "bfin_cf_pcmcia",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_pcmcia_cf_resources),
.resource = bfin_pcmcia_cf_resources,
};
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
static struct platform_device rtc_device = {
.name = "rtc-bfin",
.id = -1,
};
#endif
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
#include <linux/smc91x.h>
static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
.leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX,
};
static struct resource smc91x_resources[] = {
{
.name = "smc91x-regs",
.start = 0x20300300,
.end = 0x20300300 + 16,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
.dev = {
.platform_data = &smc91x_info,
},
};
#endif
#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
static struct resource dm9000_resources[] = {
[0] = {
.start = 0x203FB800,
.end = 0x203FB800 + 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 0x203FB804,
.end = 0x203FB804 + 1,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = IRQ_PF9,
.end = IRQ_PF9,
.flags = (IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE),
},
};
static struct platform_device dm9000_device = {
.name = "dm9000",
.id = -1,
.num_resources = ARRAY_SIZE(dm9000_resources),
.resource = dm9000_resources,
};
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
#include <linux/bfin_mac.h>
static const unsigned short bfin_mac_peripherals[] = P_RMII0;
static struct bfin_phydev_platform_data bfin_phydev_data[] = {
{
.addr = 1,
.irq = IRQ_MAC_PHYINT,
},
};
static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
.phydev_number = 1,
.phydev_data = bfin_phydev_data,
.phy_mode = PHY_INTERFACE_MODE_RMII,
.mac_peripherals = bfin_mac_peripherals,
};
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
.dev = {
.platform_data = &bfin_mii_bus_data,
}
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
.dev = {
.platform_data = &bfin_mii_bus,
}
};
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
static struct resource net2272_bfin_resources[] = {
{
.start = 0x20300000,
.end = 0x20300000 + 0x100,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device net2272_bfin_device = {
.name = "net2272",
.id = -1,
.num_resources = ARRAY_SIZE(net2272_bfin_resources),
.resource = net2272_bfin_resources,
};
#endif
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
.name = "bootloader(spi)",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
.name = "linux kernel(spi)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct flash_platform_data bfin_spi_flash_data = {
.name = "m25p80",
.parts = bfin_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
.type = "m25p16",
};
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
.bits_per_word = 8,
};
#endif
#if defined(CONFIG_BFIN_SPI_ADC) \
|| defined(CONFIG_BFIN_SPI_ADC_MODULE)
/* SPI ADC chip */
static struct bfin5xx_spi_chip spi_adc_chip_info = {
.enable_dma = 1, /* use dma transfer with this chip*/
.bits_per_word = 16,
};
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
|| defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 8,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
.enable_dma = 0,
.bits_per_word = 16,
};
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
.x_plate_ohms = 419,
.y_plate_ohms = 486,
.pressure_max = 1000,
.pressure_min = 0,
.stopacq_polarity = 1,
.first_conversion_delay = 3,
.acquisition_time = 1,
.averaging = 1,
.pen_down_acc_interval = 1,
};
#endif
#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
&& defined(CONFIG_SND_SOC_WM8731_SPI)
static struct bfin5xx_spi_chip spi_wm8731_chip_info = {
.enable_dma = 0,
.bits_per_word = 16,
};
#endif
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
static struct bfin5xx_spi_chip spidev_chip_info = {
.enable_dma = 0,
.bits_per_word = 8,
};
#endif
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
{
/* the modalias must be the same as spi device driver name */
.modalias = "m25p80", /* Name of spi_driver for this device */
.max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
.platform_data = &bfin_spi_flash_data,
.controller_data = &spi_flash_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_BFIN_SPI_ADC) \
|| defined(CONFIG_BFIN_SPI_ADC_MODULE)
{
.modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
.max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. */
.platform_data = NULL, /* No spi_driver specific config */
.controller_data = &spi_adc_chip_info,
},
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
|| defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
.controller_data = &ad1836_spi_chip_info,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
.controller_data = &mmc_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
{
.modalias = "ad7877",
.platform_data = &bfin_ad7877_ts_info,
.irq = IRQ_PF8,
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
.controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \
&& defined(CONFIG_SND_SOC_WM8731_SPI)
{
.modalias = "wm8731",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
.controller_data = &spi_wm8731_chip_info,
.mode = SPI_MODE_0,
},
#endif
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
{
.modalias = "spidev",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
.controller_data = &spidev_chip_info,
},
#endif
};
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = 8,
.enable_dma = 1, /* master has the ability to do dma transfer */
.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
};
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
.start = SPI0_REGBASE,
.end = SPI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI,
.end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_spi0_device = {
.name = "bfin-spi",
.id = 0, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi0_resource),
.resource = bfin_spi0_resource,
.dev = {
.platform_data = &bfin_spi0_info, /* Passed to driver */
},
};
#endif /* spi master and devices */
#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE)
static struct mtd_partition cm_partitions[] = {
{
.name = "bootloader(nor)",
.size = 0x40000,
.offset = 0,
}, {
.name = "linux kernel(nor)",
.size = 0x100000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "file system(nor)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct physmap_flash_data cm_flash_data = {
.width = 2,
.parts = cm_partitions,
.nr_parts = ARRAY_SIZE(cm_partitions),
};
static unsigned cm_flash_gpios[] = { GPIO_PH9, GPIO_PG11 };
static struct resource cm_flash_resource[] = {
{
.name = "cfi_probe",
.start = 0x20000000,
.end = 0x201fffff,
.flags = IORESOURCE_MEM,
}, {
.start = (unsigned long)cm_flash_gpios,
.end = ARRAY_SIZE(cm_flash_gpios),
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device cm_flash_device = {
.name = "gpio-addr-flash",
.id = 0,
.dev = {
.platform_data = &cm_flash_data,
},
.num_resources = ARRAY_SIZE(cm_flash_resource),
.resource = cm_flash_resource,
};
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
static struct resource bfin_uart0_resources[] = {
{
.start = UART0_THR,
.end = UART0_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_ERROR,
.end = IRQ_UART0_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_TX,
.end = CH_UART0_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
static struct platform_device bfin_uart0_device = {
.name = "bfin-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_uart0_resources),
.resource = bfin_uart0_resources,
.dev = {
.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
static struct resource bfin_uart1_resources[] = {
{
.start = UART1_THR,
.end = UART1_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_ERROR,
.end = IRQ_UART1_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_TX,
.end = CH_UART1_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX,
.flags = IORESOURCE_DMA,
},
#ifdef CONFIG_BFIN_UART1_CTSRTS
{ /* CTS pin */
.start = GPIO_PF9,
.end = GPIO_PF9,
.flags = IORESOURCE_IO,
},
{ /* RTS pin */
.start = GPIO_PF10,
.end = GPIO_PF10,
.flags = IORESOURCE_IO,
},
#endif
};
static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
static struct platform_device bfin_uart1_device = {
.name = "bfin-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_uart1_resources),
.resource = bfin_uart1_resources,
.dev = {
.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
static struct resource bfin_sir0_resources[] = {
{
.start = 0xFFC00400,
.end = 0xFFC004FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir0_device = {
.name = "bfin_sir",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sir0_resources),
.resource = bfin_sir0_resources,
};
#endif
#ifdef CONFIG_BFIN_SIR1
static struct resource bfin_sir1_resources[] = {
{
.start = 0xFFC02000,
.end = 0xFFC020FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir1_device = {
.name = "bfin_sir",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sir1_resources),
.resource = bfin_sir1_resources,
};
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
static struct resource bfin_twi0_resource[] = {
[0] = {
.start = TWI0_REGBASE,
.end = TWI0_REGBASE,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TWI,
.end = IRQ_TWI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device i2c_bfin_twi_device = {
.name = "i2c-bfin-twi",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_twi0_resource),
.resource = bfin_twi0_resource,
};
#endif
static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE)
{
I2C_BOARD_INFO("pcf8574_lcd", 0x22),
},
#endif
#if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE)
{
I2C_BOARD_INFO("pcf8574_keypad", 0x27),
.irq = IRQ_PF8,
},
#endif
#if defined(CONFIG_FB_BFIN_7393) || defined(CONFIG_FB_BFIN_7393_MODULE)
{
I2C_BOARD_INFO("bfin-adv7393", 0x2B),
},
#endif
};
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
static struct resource bfin_sport0_uart_resources[] = {
{
.start = SPORT0_TCR1,
.end = SPORT0_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT0_RX,
.end = IRQ_SPORT0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
.name = "bfin-sport-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
.resource = bfin_sport0_uart_resources,
.dev = {
.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
static struct resource bfin_sport1_uart_resources[] = {
{
.start = SPORT1_TCR1,
.end = SPORT1_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT1_RX,
.end = IRQ_SPORT1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT1_ERROR,
.end = IRQ_SPORT1_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
.name = "bfin-sport-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
.resource = bfin_sport1_uart_resources,
.dev = {
.platform_data = &bfin_sport1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
#include <linux/input.h>
#include <linux/gpio_keys.h>
static struct gpio_keys_button bfin_gpio_keys_table[] = {
{BTN_0, GPIO_PF14, 1, "gpio-keys: BTN0"},
};
static struct gpio_keys_platform_data bfin_gpio_keys_data = {
.buttons = bfin_gpio_keys_table,
.nbuttons = ARRAY_SIZE(bfin_gpio_keys_table),
};
static struct platform_device bfin_device_gpiokeys = {
.name = "gpio-keys",
.dev = {
.platform_data = &bfin_gpio_keys_data,
},
};
#endif
static const unsigned int cclk_vlev_datasheet[] =
{
VRPAIR(VLEV_100, 400000000),
VRPAIR(VLEV_105, 426000000),
VRPAIR(VLEV_110, 500000000),
VRPAIR(VLEV_115, 533000000),
VRPAIR(VLEV_120, 600000000),
};
static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
.tuple_tab = cclk_vlev_datasheet,
.tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
.vr_settling_time = 25 /* us */,
};
static struct platform_device bfin_dpmc = {
.name = "bfin dpmc",
.dev = {
.platform_data = &bfin_dmpc_vreg_data,
},
};
static struct platform_device *cmbf527_devices[] __initdata = {
&bfin_dpmc,
#if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE)
&bf5xx_nand_device,
#endif
#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
&bfin_pcmcia_cf_device,
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
&rtc_device,
#endif
#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
&bfin_isp1760_device,
#endif
#if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE)
&musb_device,
#endif
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
&smc91x_device,
#endif
#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
&dm9000_device,
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
&bfin_mii_bus,
&bfin_mac_device,
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
&net2272_bfin_device,
#endif
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
&bfin_spi0_device,
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
&bfin_sir0_device,
#endif
#ifdef CONFIG_BFIN_SIR1
&bfin_sir1_device,
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
&i2c_bfin_twi_device,
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
&bfin_device_gpiokeys,
#endif
#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE)
&cm_flash_device,
#endif
};
static int __init cm_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
i2c_register_board_info(0, bfin_i2c_board_info,
ARRAY_SIZE(bfin_i2c_board_info));
platform_add_devices(cmbf527_devices, ARRAY_SIZE(cmbf527_devices));
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
return 0;
}
arch_initcall(cm_init);
static struct platform_device *cmbf527_early_devices[] __initdata = {
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
};
void __init native_machine_early_platform_add_devices(void)
{
printk(KERN_INFO "register early platform devices\n");
early_platform_add_devices(cmbf527_early_devices,
ARRAY_SIZE(cmbf527_early_devices));
}
void native_machine_restart(char *cmd)
{
/* workaround reboot hang when booting from SPI */
if ((bfin_read_SYSCR() & 0x7) == 0x3)
bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS);
}
void bfin_get_ether_addr(char *addr)
{
random_ether_addr(addr);
printk(KERN_WARNING "%s:%s: Setting Ethernet MAC to a random one\n", __FILE__, __func__);
}
EXPORT_SYMBOL(bfin_get_ether_addr);
| gpl-2.0 |
geneyeung/linux-3.10.17 | arch/arm/mach-omap2/clkt_clksel.c | 2440 | 13729 | /*
* clkt_clksel.c - OMAP2/3/4 clksel clock functions
*
* Copyright (C) 2005-2008 Texas Instruments, Inc.
* Copyright (C) 2004-2010 Nokia Corporation
*
* Contacts:
* Richard Woodruff <r-woodruff2@ti.com>
* Paul Walmsley
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* clksel clocks are clocks that do not have a fixed parent, or that
* can divide their parent's rate, or possibly both at the same time, based
* on the contents of a hardware register bitfield.
*
* All of the various mux and divider settings can be encoded into
* struct clksel* data structures, and then these can be autogenerated
* from some hardware database for each new chip generation. This
* should avoid the need to write, review, and validate a lot of new
* clock code for each new chip, since it can be exported from the SoC
* design flow. This is now done on OMAP4.
*
* The fusion of mux and divider clocks is a software creation. In
* hardware reality, the multiplexer (parent selection) and the
* divider exist separately. XXX At some point these clksel clocks
* should be split into "divider" clocks and "mux" clocks to better
* match the hardware.
*
* (The name "clksel" comes from the name of the corresponding
* register field in the OMAP2/3 family of SoCs.)
*
* XXX Currently these clocks are only used in the OMAP2/3/4 code, but
* many of the OMAP1 clocks should be convertible to use this
* mechanism.
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/bug.h>
#include "clock.h"
/* Private functions */
/**
* _get_clksel_by_parent() - return clksel struct for a given clk & parent
* @clk: OMAP struct clk ptr to inspect
* @src_clk: OMAP struct clk ptr of the parent clk to search for
*
* Scan the struct clksel array associated with the clock to find
* the element associated with the supplied parent clock address.
* Returns a pointer to the struct clksel on success or NULL on error.
*/
static const struct clksel *_get_clksel_by_parent(struct clk_hw_omap *clk,
struct clk *src_clk)
{
const struct clksel *clks;
if (!src_clk)
return NULL;
for (clks = clk->clksel; clks->parent; clks++)
if (clks->parent == src_clk)
break; /* Found the requested parent */
if (!clks->parent) {
/* This indicates a data problem */
WARN(1, "clock: %s: could not find parent clock %s in clksel array\n",
__clk_get_name(clk->hw.clk), __clk_get_name(src_clk));
return NULL;
}
return clks;
}
/**
* _write_clksel_reg() - program a clock's clksel register in hardware
* @clk: struct clk * to program
* @v: clksel bitfield value to program (with LSB at bit 0)
*
* Shift the clksel register bitfield value @v to its appropriate
* location in the clksel register and write it in. This function
* will ensure that the write to the clksel_reg reaches its
* destination before returning -- important since PRM and CM register
* accesses can be quite slow compared to ARM cycles -- but does not
* take into account any time the hardware might take to switch the
* clock source.
*/
static void _write_clksel_reg(struct clk_hw_omap *clk, u32 field_val)
{
u32 v;
v = __raw_readl(clk->clksel_reg);
v &= ~clk->clksel_mask;
v |= field_val << __ffs(clk->clksel_mask);
__raw_writel(v, clk->clksel_reg);
v = __raw_readl(clk->clksel_reg); /* OCP barrier */
}
/**
* _clksel_to_divisor() - turn clksel field value into integer divider
* @clk: OMAP struct clk to use
* @field_val: register field value to find
*
* Given a struct clk of a rate-selectable clksel clock, and a register field
* value to search for, find the corresponding clock divisor. The register
* field value should be pre-masked and shifted down so the LSB is at bit 0
* before calling. Returns 0 on error or returns the actual integer divisor
* upon success.
*/
static u32 _clksel_to_divisor(struct clk_hw_omap *clk, u32 field_val)
{
const struct clksel *clks;
const struct clksel_rate *clkr;
struct clk *parent;
parent = __clk_get_parent(clk->hw.clk);
clks = _get_clksel_by_parent(clk, parent);
if (!clks)
return 0;
for (clkr = clks->rates; clkr->div; clkr++) {
if (!(clkr->flags & cpu_mask))
continue;
if (clkr->val == field_val)
break;
}
if (!clkr->div) {
/* This indicates a data error */
WARN(1, "clock: %s: could not find fieldval %d for parent %s\n",
__clk_get_name(clk->hw.clk), field_val,
__clk_get_name(parent));
return 0;
}
return clkr->div;
}
/**
* _divisor_to_clksel() - turn clksel integer divisor into a field value
* @clk: OMAP struct clk to use
* @div: integer divisor to search for
*
* Given a struct clk of a rate-selectable clksel clock, and a clock
* divisor, find the corresponding register field value. Returns the
* register field value _before_ left-shifting (i.e., LSB is at bit
* 0); or returns 0xFFFFFFFF (~0) upon error.
*/
static u32 _divisor_to_clksel(struct clk_hw_omap *clk, u32 div)
{
const struct clksel *clks;
const struct clksel_rate *clkr;
struct clk *parent;
/* should never happen */
WARN_ON(div == 0);
parent = __clk_get_parent(clk->hw.clk);
clks = _get_clksel_by_parent(clk, parent);
if (!clks)
return ~0;
for (clkr = clks->rates; clkr->div; clkr++) {
if (!(clkr->flags & cpu_mask))
continue;
if (clkr->div == div)
break;
}
if (!clkr->div) {
pr_err("clock: %s: could not find divisor %d for parent %s\n",
__clk_get_name(clk->hw.clk), div,
__clk_get_name(parent));
return ~0;
}
return clkr->val;
}
/**
* _read_divisor() - get current divisor applied to parent clock (from hdwr)
* @clk: OMAP struct clk to use.
*
* Read the current divisor register value for @clk that is programmed
* into the hardware, convert it into the actual divisor value, and
* return it; or return 0 on error.
*/
static u32 _read_divisor(struct clk_hw_omap *clk)
{
u32 v;
if (!clk->clksel || !clk->clksel_mask)
return 0;
v = __raw_readl(clk->clksel_reg);
v &= clk->clksel_mask;
v >>= __ffs(clk->clksel_mask);
return _clksel_to_divisor(clk, v);
}
/* Public functions */
/**
* omap2_clksel_round_rate_div() - find divisor for the given clock and rate
* @clk: OMAP struct clk to use
* @target_rate: desired clock rate
* @new_div: ptr to where we should store the divisor
*
* Finds 'best' divider value in an array based on the source and target
* rates. The divider array must be sorted with smallest divider first.
* This function is also used by the DPLL3 M2 divider code.
*
* Returns the rounded clock rate or returns 0xffffffff on error.
*/
u32 omap2_clksel_round_rate_div(struct clk_hw_omap *clk,
unsigned long target_rate,
u32 *new_div)
{
unsigned long test_rate;
const struct clksel *clks;
const struct clksel_rate *clkr;
u32 last_div = 0;
struct clk *parent;
unsigned long parent_rate;
const char *clk_name;
parent = __clk_get_parent(clk->hw.clk);
clk_name = __clk_get_name(clk->hw.clk);
parent_rate = __clk_get_rate(parent);
if (!clk->clksel || !clk->clksel_mask)
return ~0;
pr_debug("clock: clksel_round_rate_div: %s target_rate %ld\n",
clk_name, target_rate);
*new_div = 1;
clks = _get_clksel_by_parent(clk, parent);
if (!clks)
return ~0;
for (clkr = clks->rates; clkr->div; clkr++) {
if (!(clkr->flags & cpu_mask))
continue;
/* Sanity check */
if (clkr->div <= last_div)
pr_err("clock: %s: clksel_rate table not sorted\n",
clk_name);
last_div = clkr->div;
test_rate = parent_rate / clkr->div;
if (test_rate <= target_rate)
break; /* found it */
}
if (!clkr->div) {
pr_err("clock: %s: could not find divisor for target rate %ld for parent %s\n",
clk_name, target_rate, __clk_get_name(parent));
return ~0;
}
*new_div = clkr->div;
pr_debug("clock: new_div = %d, new_rate = %ld\n", *new_div,
(parent_rate / clkr->div));
return parent_rate / clkr->div;
}
/*
* Clocktype interface functions to the OMAP clock code
* (i.e., those used in struct clk field function pointers, etc.)
*/
/**
* omap2_clksel_find_parent_index() - return the array index of the current
* hardware parent of @hw
* @hw: struct clk_hw * to find the current hardware parent of
*
* Given a struct clk_hw pointer @hw to the 'hw' member of a struct
* clk_hw_omap record representing a source-selectable hardware clock,
* read the hardware register and determine what its parent is
* currently set to. Intended to be called only by the common clock
* framework struct clk_hw_ops.get_parent function pointer. Return
* the array index of this parent clock upon success -- there is no
* way to return an error, so if we encounter an error, just WARN()
* and pretend that we know that we're doing.
*/
u8 omap2_clksel_find_parent_index(struct clk_hw *hw)
{
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
const struct clksel *clks;
const struct clksel_rate *clkr;
u32 r, found = 0;
struct clk *parent;
const char *clk_name;
int ret = 0, f = 0;
parent = __clk_get_parent(hw->clk);
clk_name = __clk_get_name(hw->clk);
/* XXX should be able to return an error */
WARN((!clk->clksel || !clk->clksel_mask),
"clock: %s: attempt to call on a non-clksel clock", clk_name);
r = __raw_readl(clk->clksel_reg) & clk->clksel_mask;
r >>= __ffs(clk->clksel_mask);
for (clks = clk->clksel; clks->parent && !found; clks++) {
for (clkr = clks->rates; clkr->div && !found; clkr++) {
if (!(clkr->flags & cpu_mask))
continue;
if (clkr->val == r) {
found = 1;
ret = f;
}
}
f++;
}
/* This indicates a data error */
WARN(!found, "clock: %s: init parent: could not find regval %0x\n",
clk_name, r);
return ret;
}
/**
* omap2_clksel_recalc() - function ptr to pass via struct clk .recalc field
* @clk: struct clk *
*
* This function is intended to be called only by the clock framework.
* Each clksel clock should have its struct clk .recalc field set to this
* function. Returns the clock's current rate, based on its parent's rate
* and its current divisor setting in the hardware.
*/
unsigned long omap2_clksel_recalc(struct clk_hw *hw, unsigned long parent_rate)
{
unsigned long rate;
u32 div = 0;
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
if (!parent_rate)
return 0;
div = _read_divisor(clk);
if (!div)
rate = parent_rate;
else
rate = parent_rate / div;
pr_debug("%s: recalc'd %s's rate to %lu (div %d)\n", __func__,
__clk_get_name(hw->clk), rate, div);
return rate;
}
/**
* omap2_clksel_round_rate() - find rounded rate for the given clock and rate
* @clk: OMAP struct clk to use
* @target_rate: desired clock rate
*
* This function is intended to be called only by the clock framework.
* Finds best target rate based on the source clock and possible dividers.
* rates. The divider array must be sorted with smallest divider first.
*
* Returns the rounded clock rate or returns 0xffffffff on error.
*/
long omap2_clksel_round_rate(struct clk_hw *hw, unsigned long target_rate,
unsigned long *parent_rate)
{
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
u32 new_div;
return omap2_clksel_round_rate_div(clk, target_rate, &new_div);
}
/**
* omap2_clksel_set_rate() - program clock rate in hardware
* @clk: struct clk * to program rate
* @rate: target rate to program
*
* This function is intended to be called only by the clock framework.
* Program @clk's rate to @rate in the hardware. The clock can be
* either enabled or disabled when this happens, although if the clock
* is enabled, some downstream devices may glitch or behave
* unpredictably when the clock rate is changed - this depends on the
* hardware. This function does not currently check the usecount of
* the clock, so if multiple drivers are using the clock, and the rate
* is changed, they will all be affected without any notification.
* Returns -EINVAL upon error, or 0 upon success.
*/
int omap2_clksel_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
u32 field_val, validrate, new_div = 0;
if (!clk->clksel || !clk->clksel_mask)
return -EINVAL;
validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
if (validrate != rate)
return -EINVAL;
field_val = _divisor_to_clksel(clk, new_div);
if (field_val == ~0)
return -EINVAL;
_write_clksel_reg(clk, field_val);
pr_debug("clock: %s: set rate to %ld\n", __clk_get_name(hw->clk),
__clk_get_rate(hw->clk));
return 0;
}
/*
* Clksel parent setting function - not passed in struct clk function
* pointer - instead, the OMAP clock code currently assumes that any
* parent-setting clock is a clksel clock, and calls
* omap2_clksel_set_parent() by default
*/
/**
* omap2_clksel_set_parent() - change a clock's parent clock
* @clk: struct clk * of the child clock
* @new_parent: struct clk * of the new parent clock
*
* This function is intended to be called only by the clock framework.
* Change the parent clock of clock @clk to @new_parent. This is
* intended to be used while @clk is disabled. This function does not
* currently check the usecount of the clock, so if multiple drivers
* are using the clock, and the parent is changed, they will all be
* affected without any notification. Returns -EINVAL upon error, or
* 0 upon success.
*/
int omap2_clksel_set_parent(struct clk_hw *hw, u8 field_val)
{
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
if (!clk->clksel || !clk->clksel_mask)
return -EINVAL;
_write_clksel_reg(clk, field_val);
return 0;
}
| gpl-2.0 |
garrikus/o3_linux | drivers/net/declance.c | 2696 | 35809 | /*
* Lance ethernet driver for the MIPS processor based
* DECstation family
*
*
* adopted from sunlance.c by Richard van den Berg
*
* Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki
*
* additional sources:
* - PMAD-AA TURBOchannel Ethernet Module Functional Specification,
* Revision 1.2
*
* History:
*
* v0.001: The kernel accepts the code and it shows the hardware address.
*
* v0.002: Removed most sparc stuff, left only some module and dma stuff.
*
* v0.003: Enhanced base address calculation from proposals by
* Harald Koerfgen and Thomas Riemer.
*
* v0.004: lance-regs is pointing at the right addresses, added prom
* check. First start of address mapping and DMA.
*
* v0.005: started to play around with LANCE-DMA. This driver will not
* work for non IOASIC lances. HK
*
* v0.006: added pointer arrays to lance_private and setup routine for
* them in dec_lance_init. HK
*
* v0.007: Big shit. The LANCE seems to use a different DMA mechanism to
* access the init block. This looks like one (short) word at a
* time, but the smallest amount the IOASIC can transfer is a
* (long) word. So we have a 2-2 padding here. Changed
* lance_init_block accordingly. The 16-16 padding for the buffers
* seems to be correct. HK
*
* v0.008: mods to make PMAX_LANCE work. 01/09/1999 triemer
*
* v0.009: Module support fixes, multiple interfaces support, various
* bits. macro
*
* v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the
* PMAX requirement to only use halfword accesses to the
* buffer. macro
*
* v0.011: Converted the PMAD to the driver model. macro
*/
#include <linux/crc32.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/if_ether.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/tc.h>
#include <linux/types.h>
#include <asm/addrspace.h>
#include <asm/system.h>
#include <asm/dec/interrupts.h>
#include <asm/dec/ioasic.h>
#include <asm/dec/ioasic_addrs.h>
#include <asm/dec/kn01.h>
#include <asm/dec/machtype.h>
#include <asm/dec/system.h>
static char version[] __devinitdata =
"declance.c: v0.011 by Linux MIPS DECstation task force\n";
MODULE_AUTHOR("Linux MIPS DECstation task force");
MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
MODULE_LICENSE("GPL");
#define __unused __attribute__ ((unused))
/*
* card types
*/
#define ASIC_LANCE 1
#define PMAD_LANCE 2
#define PMAX_LANCE 3
#define LE_CSR0 0
#define LE_CSR1 1
#define LE_CSR2 2
#define LE_CSR3 3
#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */
#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */
#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */
#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */
#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */
#define LE_C0_MERR 0x0800 /* ME: Memory error */
#define LE_C0_RINT 0x0400 /* Received interrupt */
#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */
#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */
#define LE_C0_INTR 0x0080 /* Interrupt or error */
#define LE_C0_INEA 0x0040 /* Interrupt enable */
#define LE_C0_RXON 0x0020 /* Receiver on */
#define LE_C0_TXON 0x0010 /* Transmitter on */
#define LE_C0_TDMD 0x0008 /* Transmitter demand */
#define LE_C0_STOP 0x0004 /* Stop the card */
#define LE_C0_STRT 0x0002 /* Start the card */
#define LE_C0_INIT 0x0001 /* Init the card */
#define LE_C3_BSWP 0x4 /* SWAP */
#define LE_C3_ACON 0x2 /* ALE Control */
#define LE_C3_BCON 0x1 /* Byte control */
/* Receive message descriptor 1 */
#define LE_R1_OWN 0x8000 /* Who owns the entry */
#define LE_R1_ERR 0x4000 /* Error: if FRA, OFL, CRC or BUF is set */
#define LE_R1_FRA 0x2000 /* FRA: Frame error */
#define LE_R1_OFL 0x1000 /* OFL: Frame overflow */
#define LE_R1_CRC 0x0800 /* CRC error */
#define LE_R1_BUF 0x0400 /* BUF: Buffer error */
#define LE_R1_SOP 0x0200 /* Start of packet */
#define LE_R1_EOP 0x0100 /* End of packet */
#define LE_R1_POK 0x0300 /* Packet is complete: SOP + EOP */
/* Transmit message descriptor 1 */
#define LE_T1_OWN 0x8000 /* Lance owns the packet */
#define LE_T1_ERR 0x4000 /* Error summary */
#define LE_T1_EMORE 0x1000 /* Error: more than one retry needed */
#define LE_T1_EONE 0x0800 /* Error: one retry needed */
#define LE_T1_EDEF 0x0400 /* Error: deferred */
#define LE_T1_SOP 0x0200 /* Start of packet */
#define LE_T1_EOP 0x0100 /* End of packet */
#define LE_T1_POK 0x0300 /* Packet is complete: SOP + EOP */
#define LE_T3_BUF 0x8000 /* Buffer error */
#define LE_T3_UFL 0x4000 /* Error underflow */
#define LE_T3_LCOL 0x1000 /* Error late collision */
#define LE_T3_CLOS 0x0800 /* Error carrier loss */
#define LE_T3_RTY 0x0400 /* Error retry */
#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */
/* Define: 2^4 Tx buffers and 2^4 Rx buffers */
#ifndef LANCE_LOG_TX_BUFFERS
#define LANCE_LOG_TX_BUFFERS 4
#define LANCE_LOG_RX_BUFFERS 4
#endif
#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
#define PKT_BUF_SZ 1536
#define RX_BUFF_SIZE PKT_BUF_SZ
#define TX_BUFF_SIZE PKT_BUF_SZ
#undef TEST_HITS
#define ZERO 0
/*
* The DS2100/3100 have a linear 64 kB buffer which supports halfword
* accesses only. Each halfword of the buffer is word-aligned in the
* CPU address space.
*
* The PMAD-AA has a 128 kB buffer on-board.
*
* The IOASIC LANCE devices use a shared memory region. This region
* as seen from the CPU is (max) 128 kB long and has to be on an 128 kB
* boundary. The LANCE sees this as a 64 kB long continuous memory
* region.
*
* The LANCE's DMA address is used as an index in this buffer and DMA
* takes place in bursts of eight 16-bit words which are packed into
* four 32-bit words by the IOASIC. This leads to a strange padding:
* 16 bytes of valid data followed by a 16 byte gap :-(.
*/
struct lance_rx_desc {
unsigned short rmd0; /* low address of packet */
unsigned short rmd1; /* high address of packet
and descriptor bits */
short length; /* 2s complement (negative!)
of buffer length */
unsigned short mblength; /* actual number of bytes received */
};
struct lance_tx_desc {
unsigned short tmd0; /* low address of packet */
unsigned short tmd1; /* high address of packet
and descriptor bits */
short length; /* 2s complement (negative!)
of buffer length */
unsigned short misc;
};
/* First part of the LANCE initialization block, described in databook. */
struct lance_init_block {
unsigned short mode; /* pre-set mode (reg. 15) */
unsigned short phys_addr[3]; /* physical ethernet address */
unsigned short filter[4]; /* multicast filter */
/* Receive and transmit ring base, along with extra bits. */
unsigned short rx_ptr; /* receive descriptor addr */
unsigned short rx_len; /* receive len and high addr */
unsigned short tx_ptr; /* transmit descriptor addr */
unsigned short tx_len; /* transmit len and high addr */
short gap[4];
/* The buffer descriptors */
struct lance_rx_desc brx_ring[RX_RING_SIZE];
struct lance_tx_desc btx_ring[TX_RING_SIZE];
};
#define BUF_OFFSET_CPU sizeof(struct lance_init_block)
#define BUF_OFFSET_LNC sizeof(struct lance_init_block)
#define shift_off(off, type) \
(type == ASIC_LANCE || type == PMAX_LANCE ? off << 1 : off)
#define lib_off(rt, type) \
shift_off(offsetof(struct lance_init_block, rt), type)
#define lib_ptr(ib, rt, type) \
((volatile u16 *)((u8 *)(ib) + lib_off(rt, type)))
#define rds_off(rt, type) \
shift_off(offsetof(struct lance_rx_desc, rt), type)
#define rds_ptr(rd, rt, type) \
((volatile u16 *)((u8 *)(rd) + rds_off(rt, type)))
#define tds_off(rt, type) \
shift_off(offsetof(struct lance_tx_desc, rt), type)
#define tds_ptr(td, rt, type) \
((volatile u16 *)((u8 *)(td) + tds_off(rt, type)))
struct lance_private {
struct net_device *next;
int type;
int dma_irq;
volatile struct lance_regs *ll;
spinlock_t lock;
int rx_new, tx_new;
int rx_old, tx_old;
unsigned short busmaster_regval;
struct timer_list multicast_timer;
/* Pointers to the ring buffers as seen from the CPU */
char *rx_buf_ptr_cpu[RX_RING_SIZE];
char *tx_buf_ptr_cpu[TX_RING_SIZE];
/* Pointers to the ring buffers as seen from the LANCE */
uint rx_buf_ptr_lnc[RX_RING_SIZE];
uint tx_buf_ptr_lnc[TX_RING_SIZE];
};
#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
lp->tx_old - lp->tx_new-1)
/* The lance control ports are at an absolute address, machine and tc-slot
* dependent.
* DECstations do only 32-bit access and the LANCE uses 16 bit addresses,
* so we have to give the structure an extra member making rap pointing
* at the right address
*/
struct lance_regs {
volatile unsigned short rdp; /* register data port */
unsigned short pad;
volatile unsigned short rap; /* register address port */
};
int dec_lance_debug = 2;
static struct tc_driver dec_lance_tc_driver;
static struct net_device *root_lance_dev;
static inline void writereg(volatile unsigned short *regptr, short value)
{
*regptr = value;
iob();
}
/* Load the CSR registers */
static void load_csrs(struct lance_private *lp)
{
volatile struct lance_regs *ll = lp->ll;
uint leptr;
/* The address space as seen from the LANCE
* begins at address 0. HK
*/
leptr = 0;
writereg(&ll->rap, LE_CSR1);
writereg(&ll->rdp, (leptr & 0xFFFF));
writereg(&ll->rap, LE_CSR2);
writereg(&ll->rdp, leptr >> 16);
writereg(&ll->rap, LE_CSR3);
writereg(&ll->rdp, lp->busmaster_regval);
/* Point back to csr0 */
writereg(&ll->rap, LE_CSR0);
}
/*
* Our specialized copy routines
*
*/
static void cp_to_buf(const int type, void *to, const void *from, int len)
{
unsigned short *tp, *fp, clen;
unsigned char *rtp, *rfp;
if (type == PMAD_LANCE) {
memcpy(to, from, len);
} else if (type == PMAX_LANCE) {
clen = len >> 1;
tp = (unsigned short *) to;
fp = (unsigned short *) from;
while (clen--) {
*tp++ = *fp++;
tp++;
}
clen = len & 1;
rtp = (unsigned char *) tp;
rfp = (unsigned char *) fp;
while (clen--) {
*rtp++ = *rfp++;
}
} else {
/*
* copy 16 Byte chunks
*/
clen = len >> 4;
tp = (unsigned short *) to;
fp = (unsigned short *) from;
while (clen--) {
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
tp += 8;
}
/*
* do the rest, if any.
*/
clen = len & 15;
rtp = (unsigned char *) tp;
rfp = (unsigned char *) fp;
while (clen--) {
*rtp++ = *rfp++;
}
}
iob();
}
static void cp_from_buf(const int type, void *to, const void *from, int len)
{
unsigned short *tp, *fp, clen;
unsigned char *rtp, *rfp;
if (type == PMAD_LANCE) {
memcpy(to, from, len);
} else if (type == PMAX_LANCE) {
clen = len >> 1;
tp = (unsigned short *) to;
fp = (unsigned short *) from;
while (clen--) {
*tp++ = *fp++;
fp++;
}
clen = len & 1;
rtp = (unsigned char *) tp;
rfp = (unsigned char *) fp;
while (clen--) {
*rtp++ = *rfp++;
}
} else {
/*
* copy 16 Byte chunks
*/
clen = len >> 4;
tp = (unsigned short *) to;
fp = (unsigned short *) from;
while (clen--) {
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
*tp++ = *fp++;
fp += 8;
}
/*
* do the rest, if any.
*/
clen = len & 15;
rtp = (unsigned char *) tp;
rfp = (unsigned char *) fp;
while (clen--) {
*rtp++ = *rfp++;
}
}
}
/* Setup the Lance Rx and Tx rings */
static void lance_init_ring(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile u16 *ib = (volatile u16 *)dev->mem_start;
uint leptr;
int i;
/* Lock out other processes while setting up hardware */
netif_stop_queue(dev);
lp->rx_new = lp->tx_new = 0;
lp->rx_old = lp->tx_old = 0;
/* Copy the ethernet address to the lance init block.
* XXX bit 0 of the physical address registers has to be zero
*/
*lib_ptr(ib, phys_addr[0], lp->type) = (dev->dev_addr[1] << 8) |
dev->dev_addr[0];
*lib_ptr(ib, phys_addr[1], lp->type) = (dev->dev_addr[3] << 8) |
dev->dev_addr[2];
*lib_ptr(ib, phys_addr[2], lp->type) = (dev->dev_addr[5] << 8) |
dev->dev_addr[4];
/* Setup the initialization block */
/* Setup rx descriptor pointer */
leptr = offsetof(struct lance_init_block, brx_ring);
*lib_ptr(ib, rx_len, lp->type) = (LANCE_LOG_RX_BUFFERS << 13) |
(leptr >> 16);
*lib_ptr(ib, rx_ptr, lp->type) = leptr;
if (ZERO)
printk("RX ptr: %8.8x(%8.8x)\n",
leptr, lib_off(brx_ring, lp->type));
/* Setup tx descriptor pointer */
leptr = offsetof(struct lance_init_block, btx_ring);
*lib_ptr(ib, tx_len, lp->type) = (LANCE_LOG_TX_BUFFERS << 13) |
(leptr >> 16);
*lib_ptr(ib, tx_ptr, lp->type) = leptr;
if (ZERO)
printk("TX ptr: %8.8x(%8.8x)\n",
leptr, lib_off(btx_ring, lp->type));
if (ZERO)
printk("TX rings:\n");
/* Setup the Tx ring entries */
for (i = 0; i < TX_RING_SIZE; i++) {
leptr = lp->tx_buf_ptr_lnc[i];
*lib_ptr(ib, btx_ring[i].tmd0, lp->type) = leptr;
*lib_ptr(ib, btx_ring[i].tmd1, lp->type) = (leptr >> 16) &
0xff;
*lib_ptr(ib, btx_ring[i].length, lp->type) = 0xf000;
/* The ones required by tmd2 */
*lib_ptr(ib, btx_ring[i].misc, lp->type) = 0;
if (i < 3 && ZERO)
printk("%d: 0x%8.8x(0x%8.8x)\n",
i, leptr, (uint)lp->tx_buf_ptr_cpu[i]);
}
/* Setup the Rx ring entries */
if (ZERO)
printk("RX rings:\n");
for (i = 0; i < RX_RING_SIZE; i++) {
leptr = lp->rx_buf_ptr_lnc[i];
*lib_ptr(ib, brx_ring[i].rmd0, lp->type) = leptr;
*lib_ptr(ib, brx_ring[i].rmd1, lp->type) = ((leptr >> 16) &
0xff) |
LE_R1_OWN;
*lib_ptr(ib, brx_ring[i].length, lp->type) = -RX_BUFF_SIZE |
0xf000;
*lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0;
if (i < 3 && ZERO)
printk("%d: 0x%8.8x(0x%8.8x)\n",
i, leptr, (uint)lp->rx_buf_ptr_cpu[i]);
}
iob();
}
static int init_restart_lance(struct lance_private *lp)
{
volatile struct lance_regs *ll = lp->ll;
int i;
writereg(&ll->rap, LE_CSR0);
writereg(&ll->rdp, LE_C0_INIT);
/* Wait for the lance to complete initialization */
for (i = 0; (i < 100) && !(ll->rdp & LE_C0_IDON); i++) {
udelay(10);
}
if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
i, ll->rdp);
return -1;
}
if ((ll->rdp & LE_C0_ERR)) {
printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
i, ll->rdp);
return -1;
}
writereg(&ll->rdp, LE_C0_IDON);
writereg(&ll->rdp, LE_C0_STRT);
writereg(&ll->rdp, LE_C0_INEA);
return 0;
}
static int lance_rx(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile u16 *ib = (volatile u16 *)dev->mem_start;
volatile u16 *rd;
unsigned short bits;
int entry, len;
struct sk_buff *skb;
#ifdef TEST_HITS
{
int i;
printk("[");
for (i = 0; i < RX_RING_SIZE; i++) {
if (i == lp->rx_new)
printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
lp->type) &
LE_R1_OWN ? "_" : "X");
else
printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
lp->type) &
LE_R1_OWN ? "." : "1");
}
printk("]");
}
#endif
for (rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type);
!((bits = *rds_ptr(rd, rmd1, lp->type)) & LE_R1_OWN);
rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type)) {
entry = lp->rx_new;
/* We got an incomplete frame? */
if ((bits & LE_R1_POK) != LE_R1_POK) {
dev->stats.rx_over_errors++;
dev->stats.rx_errors++;
} else if (bits & LE_R1_ERR) {
/* Count only the end frame as a rx error,
* not the beginning
*/
if (bits & LE_R1_BUF)
dev->stats.rx_fifo_errors++;
if (bits & LE_R1_CRC)
dev->stats.rx_crc_errors++;
if (bits & LE_R1_OFL)
dev->stats.rx_over_errors++;
if (bits & LE_R1_FRA)
dev->stats.rx_frame_errors++;
if (bits & LE_R1_EOP)
dev->stats.rx_errors++;
} else {
len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
skb = dev_alloc_skb(len + 2);
if (skb == 0) {
printk("%s: Memory squeeze, deferring packet.\n",
dev->name);
dev->stats.rx_dropped++;
*rds_ptr(rd, mblength, lp->type) = 0;
*rds_ptr(rd, rmd1, lp->type) =
((lp->rx_buf_ptr_lnc[entry] >> 16) &
0xff) | LE_R1_OWN;
lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
return 0;
}
dev->stats.rx_bytes += len;
skb_reserve(skb, 2); /* 16 byte align */
skb_put(skb, len); /* make room */
cp_from_buf(lp->type, skb->data,
(char *)lp->rx_buf_ptr_cpu[entry], len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_packets++;
}
/* Return the packet to the pool */
*rds_ptr(rd, mblength, lp->type) = 0;
*rds_ptr(rd, length, lp->type) = -RX_BUFF_SIZE | 0xf000;
*rds_ptr(rd, rmd1, lp->type) =
((lp->rx_buf_ptr_lnc[entry] >> 16) & 0xff) | LE_R1_OWN;
lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
}
return 0;
}
static void lance_tx(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile u16 *ib = (volatile u16 *)dev->mem_start;
volatile struct lance_regs *ll = lp->ll;
volatile u16 *td;
int i, j;
int status;
j = lp->tx_old;
spin_lock(&lp->lock);
for (i = j; i != lp->tx_new; i = j) {
td = lib_ptr(ib, btx_ring[i], lp->type);
/* If we hit a packet not owned by us, stop */
if (*tds_ptr(td, tmd1, lp->type) & LE_T1_OWN)
break;
if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) {
status = *tds_ptr(td, misc, lp->type);
dev->stats.tx_errors++;
if (status & LE_T3_RTY)
dev->stats.tx_aborted_errors++;
if (status & LE_T3_LCOL)
dev->stats.tx_window_errors++;
if (status & LE_T3_CLOS) {
dev->stats.tx_carrier_errors++;
printk("%s: Carrier Lost\n", dev->name);
/* Stop the lance */
writereg(&ll->rap, LE_CSR0);
writereg(&ll->rdp, LE_C0_STOP);
lance_init_ring(dev);
load_csrs(lp);
init_restart_lance(lp);
goto out;
}
/* Buffer errors and underflows turn off the
* transmitter, restart the adapter.
*/
if (status & (LE_T3_BUF | LE_T3_UFL)) {
dev->stats.tx_fifo_errors++;
printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
dev->name);
/* Stop the lance */
writereg(&ll->rap, LE_CSR0);
writereg(&ll->rdp, LE_C0_STOP);
lance_init_ring(dev);
load_csrs(lp);
init_restart_lance(lp);
goto out;
}
} else if ((*tds_ptr(td, tmd1, lp->type) & LE_T1_POK) ==
LE_T1_POK) {
/*
* So we don't count the packet more than once.
*/
*tds_ptr(td, tmd1, lp->type) &= ~(LE_T1_POK);
/* One collision before packet was sent. */
if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE)
dev->stats.collisions++;
/* More than one collision, be optimistic. */
if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE)
dev->stats.collisions += 2;
dev->stats.tx_packets++;
}
j = (j + 1) & TX_RING_MOD_MASK;
}
lp->tx_old = j;
out:
if (netif_queue_stopped(dev) &&
TX_BUFFS_AVAIL > 0)
netif_wake_queue(dev);
spin_unlock(&lp->lock);
}
static irqreturn_t lance_dma_merr_int(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
printk(KERN_ERR "%s: DMA error\n", dev->name);
return IRQ_HANDLED;
}
static irqreturn_t lance_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
int csr0;
writereg(&ll->rap, LE_CSR0);
csr0 = ll->rdp;
/* Acknowledge all the interrupt sources ASAP */
writereg(&ll->rdp, csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT));
if ((csr0 & LE_C0_ERR)) {
/* Clear the error condition */
writereg(&ll->rdp, LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
LE_C0_CERR | LE_C0_MERR);
}
if (csr0 & LE_C0_RINT)
lance_rx(dev);
if (csr0 & LE_C0_TINT)
lance_tx(dev);
if (csr0 & LE_C0_BABL)
dev->stats.tx_errors++;
if (csr0 & LE_C0_MISS)
dev->stats.rx_errors++;
if (csr0 & LE_C0_MERR) {
printk("%s: Memory error, status %04x\n", dev->name, csr0);
writereg(&ll->rdp, LE_C0_STOP);
lance_init_ring(dev);
load_csrs(lp);
init_restart_lance(lp);
netif_wake_queue(dev);
}
writereg(&ll->rdp, LE_C0_INEA);
writereg(&ll->rdp, LE_C0_INEA);
return IRQ_HANDLED;
}
static int lance_open(struct net_device *dev)
{
volatile u16 *ib = (volatile u16 *)dev->mem_start;
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
int status = 0;
/* Stop the Lance */
writereg(&ll->rap, LE_CSR0);
writereg(&ll->rdp, LE_C0_STOP);
/* Set mode and clear multicast filter only at device open,
* so that lance_init_ring() called at any error will not
* forget multicast filters.
*
* BTW it is common bug in all lance drivers! --ANK
*/
*lib_ptr(ib, mode, lp->type) = 0;
*lib_ptr(ib, filter[0], lp->type) = 0;
*lib_ptr(ib, filter[1], lp->type) = 0;
*lib_ptr(ib, filter[2], lp->type) = 0;
*lib_ptr(ib, filter[3], lp->type) = 0;
lance_init_ring(dev);
load_csrs(lp);
netif_start_queue(dev);
/* Associate IRQ with lance_interrupt */
if (request_irq(dev->irq, lance_interrupt, 0, "lance", dev)) {
printk("%s: Can't get IRQ %d\n", dev->name, dev->irq);
return -EAGAIN;
}
if (lp->dma_irq >= 0) {
unsigned long flags;
if (request_irq(lp->dma_irq, lance_dma_merr_int, 0,
"lance error", dev)) {
free_irq(dev->irq, dev);
printk("%s: Can't get DMA IRQ %d\n", dev->name,
lp->dma_irq);
return -EAGAIN;
}
spin_lock_irqsave(&ioasic_ssr_lock, flags);
fast_mb();
/* Enable I/O ASIC LANCE DMA. */
ioasic_write(IO_REG_SSR,
ioasic_read(IO_REG_SSR) | IO_SSR_LANCE_DMA_EN);
fast_mb();
spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
}
status = init_restart_lance(lp);
return status;
}
static int lance_close(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
netif_stop_queue(dev);
del_timer_sync(&lp->multicast_timer);
/* Stop the card */
writereg(&ll->rap, LE_CSR0);
writereg(&ll->rdp, LE_C0_STOP);
if (lp->dma_irq >= 0) {
unsigned long flags;
spin_lock_irqsave(&ioasic_ssr_lock, flags);
fast_mb();
/* Disable I/O ASIC LANCE DMA. */
ioasic_write(IO_REG_SSR,
ioasic_read(IO_REG_SSR) & ~IO_SSR_LANCE_DMA_EN);
fast_iob();
spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
free_irq(lp->dma_irq, dev);
}
free_irq(dev->irq, dev);
return 0;
}
static inline int lance_reset(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
int status;
/* Stop the lance */
writereg(&ll->rap, LE_CSR0);
writereg(&ll->rdp, LE_C0_STOP);
lance_init_ring(dev);
load_csrs(lp);
dev->trans_start = jiffies; /* prevent tx timeout */
status = init_restart_lance(lp);
return status;
}
static void lance_tx_timeout(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
dev->name, ll->rdp);
lance_reset(dev);
netif_wake_queue(dev);
}
static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
volatile u16 *ib = (volatile u16 *)dev->mem_start;
unsigned long flags;
int entry, len;
len = skb->len;
if (len < ETH_ZLEN) {
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
len = ETH_ZLEN;
}
dev->stats.tx_bytes += len;
spin_lock_irqsave(&lp->lock, flags);
entry = lp->tx_new;
*lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
*lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len);
/* Now, give the packet to the lance */
*lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
((lp->tx_buf_ptr_lnc[entry] >> 16) & 0xff) |
(LE_T1_POK | LE_T1_OWN);
lp->tx_new = (entry + 1) & TX_RING_MOD_MASK;
if (TX_BUFFS_AVAIL <= 0)
netif_stop_queue(dev);
/* Kick the lance: transmit now */
writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD);
spin_unlock_irqrestore(&lp->lock, flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static void lance_load_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile u16 *ib = (volatile u16 *)dev->mem_start;
struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
/* set all multicast bits */
if (dev->flags & IFF_ALLMULTI) {
*lib_ptr(ib, filter[0], lp->type) = 0xffff;
*lib_ptr(ib, filter[1], lp->type) = 0xffff;
*lib_ptr(ib, filter[2], lp->type) = 0xffff;
*lib_ptr(ib, filter[3], lp->type) = 0xffff;
return;
}
/* clear the multicast filter */
*lib_ptr(ib, filter[0], lp->type) = 0;
*lib_ptr(ib, filter[1], lp->type) = 0;
*lib_ptr(ib, filter[2], lp->type) = 0;
*lib_ptr(ib, filter[3], lp->type) = 0;
/* Add addresses */
netdev_for_each_mc_addr(ha, dev) {
addrs = ha->addr;
/* multicast address? */
if (!(*addrs & 1))
continue;
crc = ether_crc_le(ETH_ALEN, addrs);
crc = crc >> 26;
*lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf);
}
}
static void lance_set_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile u16 *ib = (volatile u16 *)dev->mem_start;
volatile struct lance_regs *ll = lp->ll;
if (!netif_running(dev))
return;
if (lp->tx_old != lp->tx_new) {
mod_timer(&lp->multicast_timer, jiffies + 4 * HZ/100);
netif_wake_queue(dev);
return;
}
netif_stop_queue(dev);
writereg(&ll->rap, LE_CSR0);
writereg(&ll->rdp, LE_C0_STOP);
lance_init_ring(dev);
if (dev->flags & IFF_PROMISC) {
*lib_ptr(ib, mode, lp->type) |= LE_MO_PROM;
} else {
*lib_ptr(ib, mode, lp->type) &= ~LE_MO_PROM;
lance_load_multicast(dev);
}
load_csrs(lp);
init_restart_lance(lp);
netif_wake_queue(dev);
}
static void lance_set_multicast_retry(unsigned long _opaque)
{
struct net_device *dev = (struct net_device *) _opaque;
lance_set_multicast(dev);
}
static const struct net_device_ops lance_netdev_ops = {
.ndo_open = lance_open,
.ndo_stop = lance_close,
.ndo_start_xmit = lance_start_xmit,
.ndo_tx_timeout = lance_tx_timeout,
.ndo_set_multicast_list = lance_set_multicast,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
static int __devinit dec_lance_probe(struct device *bdev, const int type)
{
static unsigned version_printed;
static const char fmt[] = "declance%d";
char name[10];
struct net_device *dev;
struct lance_private *lp;
volatile struct lance_regs *ll;
resource_size_t start = 0, len = 0;
int i, ret;
unsigned long esar_base;
unsigned char *esar;
if (dec_lance_debug && version_printed++ == 0)
printk(version);
if (bdev)
snprintf(name, sizeof(name), "%s", dev_name(bdev));
else {
i = 0;
dev = root_lance_dev;
while (dev) {
i++;
lp = netdev_priv(dev);
dev = lp->next;
}
snprintf(name, sizeof(name), fmt, i);
}
dev = alloc_etherdev(sizeof(struct lance_private));
if (!dev) {
printk(KERN_ERR "%s: Unable to allocate etherdev, aborting.\n",
name);
ret = -ENOMEM;
goto err_out;
}
/*
* alloc_etherdev ensures the data structures used by the LANCE
* are aligned.
*/
lp = netdev_priv(dev);
spin_lock_init(&lp->lock);
lp->type = type;
switch (type) {
case ASIC_LANCE:
dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
/* buffer space for the on-board LANCE shared memory */
/*
* FIXME: ugly hack!
*/
dev->mem_start = CKSEG1ADDR(0x00020000);
dev->mem_end = dev->mem_start + 0x00020000;
dev->irq = dec_interrupt[DEC_IRQ_LANCE];
esar_base = CKSEG1ADDR(dec_kn_slot_base + IOASIC_ESAR);
/* Workaround crash with booting KN04 2.1k from Disk */
memset((void *)dev->mem_start, 0,
dev->mem_end - dev->mem_start);
/*
* setup the pointer arrays, this sucks [tm] :-(
*/
for (i = 0; i < RX_RING_SIZE; i++) {
lp->rx_buf_ptr_cpu[i] =
(char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
2 * i * RX_BUFF_SIZE);
lp->rx_buf_ptr_lnc[i] =
(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
}
for (i = 0; i < TX_RING_SIZE; i++) {
lp->tx_buf_ptr_cpu[i] =
(char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
2 * RX_RING_SIZE * RX_BUFF_SIZE +
2 * i * TX_BUFF_SIZE);
lp->tx_buf_ptr_lnc[i] =
(BUF_OFFSET_LNC +
RX_RING_SIZE * RX_BUFF_SIZE +
i * TX_BUFF_SIZE);
}
/* Setup I/O ASIC LANCE DMA. */
lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR];
ioasic_write(IO_REG_LANCE_DMA_P,
CPHYSADDR(dev->mem_start) << 3);
break;
#ifdef CONFIG_TC
case PMAD_LANCE:
dev_set_drvdata(bdev, dev);
start = to_tc_dev(bdev)->resource.start;
len = to_tc_dev(bdev)->resource.end - start + 1;
if (!request_mem_region(start, len, dev_name(bdev))) {
printk(KERN_ERR
"%s: Unable to reserve MMIO resource\n",
dev_name(bdev));
ret = -EBUSY;
goto err_out_dev;
}
dev->mem_start = CKSEG1ADDR(start);
dev->mem_end = dev->mem_start + 0x100000;
dev->base_addr = dev->mem_start + 0x100000;
dev->irq = to_tc_dev(bdev)->interrupt;
esar_base = dev->mem_start + 0x1c0002;
lp->dma_irq = -1;
for (i = 0; i < RX_RING_SIZE; i++) {
lp->rx_buf_ptr_cpu[i] =
(char *)(dev->mem_start + BUF_OFFSET_CPU +
i * RX_BUFF_SIZE);
lp->rx_buf_ptr_lnc[i] =
(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
}
for (i = 0; i < TX_RING_SIZE; i++) {
lp->tx_buf_ptr_cpu[i] =
(char *)(dev->mem_start + BUF_OFFSET_CPU +
RX_RING_SIZE * RX_BUFF_SIZE +
i * TX_BUFF_SIZE);
lp->tx_buf_ptr_lnc[i] =
(BUF_OFFSET_LNC +
RX_RING_SIZE * RX_BUFF_SIZE +
i * TX_BUFF_SIZE);
}
break;
#endif
case PMAX_LANCE:
dev->irq = dec_interrupt[DEC_IRQ_LANCE];
dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE);
dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM);
dev->mem_end = dev->mem_start + KN01_SLOT_SIZE;
esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1);
lp->dma_irq = -1;
/*
* setup the pointer arrays, this sucks [tm] :-(
*/
for (i = 0; i < RX_RING_SIZE; i++) {
lp->rx_buf_ptr_cpu[i] =
(char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
2 * i * RX_BUFF_SIZE);
lp->rx_buf_ptr_lnc[i] =
(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
}
for (i = 0; i < TX_RING_SIZE; i++) {
lp->tx_buf_ptr_cpu[i] =
(char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
2 * RX_RING_SIZE * RX_BUFF_SIZE +
2 * i * TX_BUFF_SIZE);
lp->tx_buf_ptr_lnc[i] =
(BUF_OFFSET_LNC +
RX_RING_SIZE * RX_BUFF_SIZE +
i * TX_BUFF_SIZE);
}
break;
default:
printk(KERN_ERR "%s: declance_init called with unknown type\n",
name);
ret = -ENODEV;
goto err_out_dev;
}
ll = (struct lance_regs *) dev->base_addr;
esar = (unsigned char *) esar_base;
/* prom checks */
/* First, check for test pattern */
if (esar[0x60] != 0xff && esar[0x64] != 0x00 &&
esar[0x68] != 0x55 && esar[0x6c] != 0xaa) {
printk(KERN_ERR
"%s: Ethernet station address prom not found!\n",
name);
ret = -ENODEV;
goto err_out_resource;
}
/* Check the prom contents */
for (i = 0; i < 8; i++) {
if (esar[i * 4] != esar[0x3c - i * 4] &&
esar[i * 4] != esar[0x40 + i * 4] &&
esar[0x3c - i * 4] != esar[0x40 + i * 4]) {
printk(KERN_ERR "%s: Something is wrong with the "
"ethernet station address prom!\n", name);
ret = -ENODEV;
goto err_out_resource;
}
}
/* Copy the ethernet address to the device structure, later to the
* lance initialization block so the lance gets it every time it's
* (re)initialized.
*/
switch (type) {
case ASIC_LANCE:
printk("%s: IOASIC onboard LANCE", name);
break;
case PMAD_LANCE:
printk("%s: PMAD-AA", name);
break;
case PMAX_LANCE:
printk("%s: PMAX onboard LANCE", name);
break;
}
for (i = 0; i < 6; i++)
dev->dev_addr[i] = esar[i * 4];
printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
dev->netdev_ops = &lance_netdev_ops;
dev->watchdog_timeo = 5*HZ;
/* lp->ll is the location of the registers for lance card */
lp->ll = ll;
/* busmaster_regval (CSR3) should be zero according to the PMAD-AA
* specification.
*/
lp->busmaster_regval = 0;
dev->dma = 0;
/* We cannot sleep if the chip is busy during a
* multicast list update event, because such events
* can occur from interrupts (ex. IPv6). So we
* use a timer to try again later when necessary. -DaveM
*/
init_timer(&lp->multicast_timer);
lp->multicast_timer.data = (unsigned long) dev;
lp->multicast_timer.function = lance_set_multicast_retry;
ret = register_netdev(dev);
if (ret) {
printk(KERN_ERR
"%s: Unable to register netdev, aborting.\n", name);
goto err_out_resource;
}
if (!bdev) {
lp->next = root_lance_dev;
root_lance_dev = dev;
}
printk("%s: registered as %s.\n", name, dev->name);
return 0;
err_out_resource:
if (bdev)
release_mem_region(start, len);
err_out_dev:
free_netdev(dev);
err_out:
return ret;
}
static void __exit dec_lance_remove(struct device *bdev)
{
struct net_device *dev = dev_get_drvdata(bdev);
resource_size_t start, len;
unregister_netdev(dev);
start = to_tc_dev(bdev)->resource.start;
len = to_tc_dev(bdev)->resource.end - start + 1;
release_mem_region(start, len);
free_netdev(dev);
}
/* Find all the lance cards on the system and initialize them */
static int __init dec_lance_platform_probe(void)
{
int count = 0;
if (dec_interrupt[DEC_IRQ_LANCE] >= 0) {
if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) {
if (dec_lance_probe(NULL, ASIC_LANCE) >= 0)
count++;
} else if (!TURBOCHANNEL) {
if (dec_lance_probe(NULL, PMAX_LANCE) >= 0)
count++;
}
}
return (count > 0) ? 0 : -ENODEV;
}
static void __exit dec_lance_platform_remove(void)
{
while (root_lance_dev) {
struct net_device *dev = root_lance_dev;
struct lance_private *lp = netdev_priv(dev);
unregister_netdev(dev);
root_lance_dev = lp->next;
free_netdev(dev);
}
}
#ifdef CONFIG_TC
static int __devinit dec_lance_tc_probe(struct device *dev);
static int __exit dec_lance_tc_remove(struct device *dev);
static const struct tc_device_id dec_lance_tc_table[] = {
{ "DEC ", "PMAD-AA " },
{ }
};
MODULE_DEVICE_TABLE(tc, dec_lance_tc_table);
static struct tc_driver dec_lance_tc_driver = {
.id_table = dec_lance_tc_table,
.driver = {
.name = "declance",
.bus = &tc_bus_type,
.probe = dec_lance_tc_probe,
.remove = __exit_p(dec_lance_tc_remove),
},
};
static int __devinit dec_lance_tc_probe(struct device *dev)
{
int status = dec_lance_probe(dev, PMAD_LANCE);
if (!status)
get_device(dev);
return status;
}
static int __exit dec_lance_tc_remove(struct device *dev)
{
put_device(dev);
dec_lance_remove(dev);
return 0;
}
#endif
static int __init dec_lance_init(void)
{
int status;
status = tc_register_driver(&dec_lance_tc_driver);
if (!status)
dec_lance_platform_probe();
return status;
}
static void __exit dec_lance_exit(void)
{
dec_lance_platform_remove();
tc_unregister_driver(&dec_lance_tc_driver);
}
module_init(dec_lance_init);
module_exit(dec_lance_exit);
| gpl-2.0 |
andip71/boeffla-kernel-samsung-s3 | net/netfilter/nf_conntrack_standalone.c | 2952 | 13748 | /* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/percpu.h>
#include <linux/netdevice.h>
#include <linux/security.h>
#include <net/net_namespace.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <linux/rculist_nulls.h>
MODULE_LICENSE("GPL");
#ifdef CONFIG_PROC_FS
int
print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto)
{
return l3proto->print_tuple(s, tuple) || l4proto->print_tuple(s, tuple);
}
EXPORT_SYMBOL_GPL(print_tuple);
struct ct_iter_state {
struct seq_net_private p;
unsigned int bucket;
u_int64_t time_now;
};
static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
{
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
struct hlist_nulls_node *n;
for (st->bucket = 0;
st->bucket < net->ct.htable_size;
st->bucket++) {
n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
if (!is_a_nulls(n))
return n;
}
return NULL;
}
static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
struct hlist_nulls_node *head)
{
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
head = rcu_dereference(hlist_nulls_next_rcu(head));
while (is_a_nulls(head)) {
if (likely(get_nulls_value(head) == st->bucket)) {
if (++st->bucket >= net->ct.htable_size)
return NULL;
}
head = rcu_dereference(
hlist_nulls_first_rcu(
&net->ct.hash[st->bucket]));
}
return head;
}
static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
{
struct hlist_nulls_node *head = ct_get_first(seq);
if (head)
while (pos && (head = ct_get_next(seq, head)))
pos--;
return pos ? NULL : head;
}
static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
struct ct_iter_state *st = seq->private;
st->time_now = ktime_to_ns(ktime_get_real());
rcu_read_lock();
return ct_get_idx(seq, *pos);
}
static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
(*pos)++;
return ct_get_next(s, v);
}
static void ct_seq_stop(struct seq_file *s, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
#ifdef CONFIG_NF_CONNTRACK_SECMARK
static int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
{
int ret;
u32 len;
char *secctx;
ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
if (ret)
return 0;
ret = seq_printf(s, "secctx=%s ", secctx);
security_release_secctx(secctx, len);
return ret;
}
#else
static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
{
return 0;
}
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
static int ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
{
struct ct_iter_state *st = s->private;
struct nf_conn_tstamp *tstamp;
s64 delta_time;
tstamp = nf_conn_tstamp_find(ct);
if (tstamp) {
delta_time = st->time_now - tstamp->start;
if (delta_time > 0)
delta_time = div_s64(delta_time, NSEC_PER_SEC);
else
delta_time = 0;
return seq_printf(s, "delta-time=%llu ",
(unsigned long long)delta_time);
}
return 0;
}
#else
static inline int
ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
{
return 0;
}
#endif
/* return 0 on success, 1 in case of error */
static int ct_seq_show(struct seq_file *s, void *v)
{
struct nf_conntrack_tuple_hash *hash = v;
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
const struct nf_conntrack_l3proto *l3proto;
const struct nf_conntrack_l4proto *l4proto;
int ret = 0;
NF_CT_ASSERT(ct);
if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
return 0;
/* we only want to print DIR_ORIGINAL */
if (NF_CT_DIRECTION(hash))
goto release;
l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
NF_CT_ASSERT(l3proto);
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
NF_CT_ASSERT(l4proto);
ret = -ENOSPC;
if (seq_printf(s, "%-8s %u %-8s %u %ld ",
l3proto->name, nf_ct_l3num(ct),
l4proto->name, nf_ct_protonum(ct),
timer_pending(&ct->timeout)
? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
goto release;
if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
goto release;
if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
l3proto, l4proto))
goto release;
if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
goto release;
if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
if (seq_printf(s, "[UNREPLIED] "))
goto release;
if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
l3proto, l4proto))
goto release;
if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
goto release;
if (test_bit(IPS_ASSURED_BIT, &ct->status))
if (seq_printf(s, "[ASSURED] "))
goto release;
#if defined(CONFIG_NF_CONNTRACK_MARK)
if (seq_printf(s, "mark=%u ", ct->mark))
goto release;
#endif
if (ct_show_secctx(s, ct))
goto release;
#ifdef CONFIG_NF_CONNTRACK_ZONES
if (seq_printf(s, "zone=%u ", nf_ct_zone(ct)))
goto release;
#endif
if (ct_show_delta_time(s, ct))
goto release;
if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
goto release;
ret = 0;
release:
nf_ct_put(ct);
return ret;
}
static const struct seq_operations ct_seq_ops = {
.start = ct_seq_start,
.next = ct_seq_next,
.stop = ct_seq_stop,
.show = ct_seq_show
};
static int ct_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ct_seq_ops,
sizeof(struct ct_iter_state));
}
static const struct file_operations ct_file_ops = {
.owner = THIS_MODULE,
.open = ct_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(net->ct.stat, cpu);
}
return NULL;
}
static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(net->ct.stat, cpu);
}
return NULL;
}
static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
{
}
static int ct_cpu_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq_file_net(seq);
unsigned int nr_conntracks = atomic_read(&net->ct.count);
const struct ip_conntrack_stat *st = v;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n");
return 0;
}
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
"%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
nr_conntracks,
st->searched,
st->found,
st->new,
st->invalid,
st->ignore,
st->delete,
st->delete_list,
st->insert,
st->insert_failed,
st->drop,
st->early_drop,
st->error,
st->expect_new,
st->expect_create,
st->expect_delete,
st->search_restart
);
return 0;
}
static const struct seq_operations ct_cpu_seq_ops = {
.start = ct_cpu_seq_start,
.next = ct_cpu_seq_next,
.stop = ct_cpu_seq_stop,
.show = ct_cpu_seq_show,
};
static int ct_cpu_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ct_cpu_seq_ops,
sizeof(struct seq_net_private));
}
static const struct file_operations ct_cpu_seq_fops = {
.owner = THIS_MODULE,
.open = ct_cpu_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static int nf_conntrack_standalone_init_proc(struct net *net)
{
struct proc_dir_entry *pde;
pde = proc_net_fops_create(net, "nf_conntrack", 0440, &ct_file_ops);
if (!pde)
goto out_nf_conntrack;
pde = proc_create("nf_conntrack", S_IRUGO, net->proc_net_stat,
&ct_cpu_seq_fops);
if (!pde)
goto out_stat_nf_conntrack;
return 0;
out_stat_nf_conntrack:
proc_net_remove(net, "nf_conntrack");
out_nf_conntrack:
return -ENOMEM;
}
static void nf_conntrack_standalone_fini_proc(struct net *net)
{
remove_proc_entry("nf_conntrack", net->proc_net_stat);
proc_net_remove(net, "nf_conntrack");
}
#else
static int nf_conntrack_standalone_init_proc(struct net *net)
{
return 0;
}
static void nf_conntrack_standalone_fini_proc(struct net *net)
{
}
#endif /* CONFIG_PROC_FS */
/* Sysctl support */
#ifdef CONFIG_SYSCTL
/* Log invalid packets of a given protocol */
static int log_invalid_proto_min = 0;
static int log_invalid_proto_max = 255;
static struct ctl_table_header *nf_ct_netfilter_header;
static ctl_table nf_ct_sysctl_table[] = {
{
.procname = "nf_conntrack_max",
.data = &nf_conntrack_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_count",
.data = &init_net.ct.count,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_buckets",
.data = &init_net.ct.htable_size,
.maxlen = sizeof(unsigned int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_checksum",
.data = &init_net.ct.sysctl_checksum,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_log_invalid",
.data = &init_net.ct.sysctl_log_invalid,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &log_invalid_proto_min,
.extra2 = &log_invalid_proto_max,
},
{
.procname = "nf_conntrack_expect_max",
.data = &nf_ct_expect_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
#define NET_NF_CONNTRACK_MAX 2089
static ctl_table nf_ct_netfilter_table[] = {
{
.procname = "nf_conntrack_max",
.data = &nf_conntrack_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
static struct ctl_path nf_ct_path[] = {
{ .procname = "net", },
{ }
};
static int nf_conntrack_standalone_init_sysctl(struct net *net)
{
struct ctl_table *table;
if (net_eq(net, &init_net)) {
nf_ct_netfilter_header =
register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table);
if (!nf_ct_netfilter_header)
goto out;
}
table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
GFP_KERNEL);
if (!table)
goto out_kmemdup;
table[1].data = &net->ct.count;
table[2].data = &net->ct.htable_size;
table[3].data = &net->ct.sysctl_checksum;
table[4].data = &net->ct.sysctl_log_invalid;
net->ct.sysctl_header = register_net_sysctl_table(net,
nf_net_netfilter_sysctl_path, table);
if (!net->ct.sysctl_header)
goto out_unregister_netfilter;
return 0;
out_unregister_netfilter:
kfree(table);
out_kmemdup:
if (net_eq(net, &init_net))
unregister_sysctl_table(nf_ct_netfilter_header);
out:
printk(KERN_ERR "nf_conntrack: can't register to sysctl.\n");
return -ENOMEM;
}
static void nf_conntrack_standalone_fini_sysctl(struct net *net)
{
struct ctl_table *table;
if (net_eq(net, &init_net))
unregister_sysctl_table(nf_ct_netfilter_header);
table = net->ct.sysctl_header->ctl_table_arg;
unregister_net_sysctl_table(net->ct.sysctl_header);
kfree(table);
}
#else
static int nf_conntrack_standalone_init_sysctl(struct net *net)
{
return 0;
}
static void nf_conntrack_standalone_fini_sysctl(struct net *net)
{
}
#endif /* CONFIG_SYSCTL */
static int nf_conntrack_net_init(struct net *net)
{
int ret;
ret = nf_conntrack_init(net);
if (ret < 0)
goto out_init;
ret = nf_conntrack_standalone_init_proc(net);
if (ret < 0)
goto out_proc;
net->ct.sysctl_checksum = 1;
net->ct.sysctl_log_invalid = 0;
ret = nf_conntrack_standalone_init_sysctl(net);
if (ret < 0)
goto out_sysctl;
return 0;
out_sysctl:
nf_conntrack_standalone_fini_proc(net);
out_proc:
nf_conntrack_cleanup(net);
out_init:
return ret;
}
static void nf_conntrack_net_exit(struct net *net)
{
nf_conntrack_standalone_fini_sysctl(net);
nf_conntrack_standalone_fini_proc(net);
nf_conntrack_cleanup(net);
}
static struct pernet_operations nf_conntrack_net_ops = {
.init = nf_conntrack_net_init,
.exit = nf_conntrack_net_exit,
};
static int __init nf_conntrack_standalone_init(void)
{
return register_pernet_subsys(&nf_conntrack_net_ops);
}
static void __exit nf_conntrack_standalone_fini(void)
{
unregister_pernet_subsys(&nf_conntrack_net_ops);
}
module_init(nf_conntrack_standalone_init);
module_exit(nf_conntrack_standalone_fini);
/* Some modules need us, but don't depend directly on any symbol.
They should call this. */
void need_conntrack(void)
{
}
EXPORT_SYMBOL_GPL(need_conntrack);
| gpl-2.0 |
TeamWin/kernel_samsung_lt02ltetmo | net/netfilter/ipvs/ip_vs_conn.c | 5000 | 33748 | /*
* IPVS An implementation of the IP virtual server support for the
* LINUX operating system. IPVS is now implemented as a module
* over the Netfilter framework. IPVS can be used to build a
* high-performance and highly available server based on a
* cluster of servers.
*
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
* Peter Kese <peter.kese@ijs.si>
* Julian Anastasov <ja@ssi.bg>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* The IPVS code for kernel 2.2 was done by Wensong Zhang and Peter Kese,
* with changes/fixes from Julian Anastasov, Lars Marowsky-Bree, Horms
* and others. Many code here is taken from IP MASQ code of kernel 2.2.
*
* Changes:
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/net.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/proc_fs.h> /* for proc_net_* */
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/jhash.h>
#include <linux/random.h>
#include <net/net_namespace.h>
#include <net/ip_vs.h>
#ifndef CONFIG_IP_VS_TAB_BITS
#define CONFIG_IP_VS_TAB_BITS 12
#endif
/*
* Connection hash size. Default is what was selected at compile time.
*/
static int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
/* size and mask values */
int ip_vs_conn_tab_size __read_mostly;
static int ip_vs_conn_tab_mask __read_mostly;
/*
* Connection hash table: for input and output packets lookups of IPVS
*/
static struct hlist_head *ip_vs_conn_tab __read_mostly;
/* SLAB cache for IPVS connections */
static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
/* counter for no client port connections */
static atomic_t ip_vs_conn_no_cport_cnt = ATOMIC_INIT(0);
/* random value for IPVS connection hash */
static unsigned int ip_vs_conn_rnd __read_mostly;
/*
* Fine locking granularity for big connection hash table
*/
#define CT_LOCKARRAY_BITS 5
#define CT_LOCKARRAY_SIZE (1<<CT_LOCKARRAY_BITS)
#define CT_LOCKARRAY_MASK (CT_LOCKARRAY_SIZE-1)
struct ip_vs_aligned_lock
{
rwlock_t l;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
/* lock array for conn table */
static struct ip_vs_aligned_lock
__ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned;
static inline void ct_read_lock(unsigned key)
{
read_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static inline void ct_read_unlock(unsigned key)
{
read_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static inline void ct_write_lock(unsigned key)
{
write_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static inline void ct_write_unlock(unsigned key)
{
write_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static inline void ct_read_lock_bh(unsigned key)
{
read_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static inline void ct_read_unlock_bh(unsigned key)
{
read_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static inline void ct_write_lock_bh(unsigned key)
{
write_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
static inline void ct_write_unlock_bh(unsigned key)
{
write_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
}
/*
* Returns hash value for IPVS connection entry
*/
static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned proto,
const union nf_inet_addr *addr,
__be16 port)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
return (jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
(__force u32)port, proto, ip_vs_conn_rnd) ^
((size_t)net>>8)) & ip_vs_conn_tab_mask;
#endif
return (jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
ip_vs_conn_rnd) ^
((size_t)net>>8)) & ip_vs_conn_tab_mask;
}
static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
bool inverse)
{
const union nf_inet_addr *addr;
__be16 port;
if (p->pe_data && p->pe->hashkey_raw)
return p->pe->hashkey_raw(p, ip_vs_conn_rnd, inverse) &
ip_vs_conn_tab_mask;
if (likely(!inverse)) {
addr = p->caddr;
port = p->cport;
} else {
addr = p->vaddr;
port = p->vport;
}
return ip_vs_conn_hashkey(p->net, p->af, p->protocol, addr, port);
}
static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
{
struct ip_vs_conn_param p;
ip_vs_conn_fill_param(ip_vs_conn_net(cp), cp->af, cp->protocol,
&cp->caddr, cp->cport, NULL, 0, &p);
if (cp->pe) {
p.pe = cp->pe;
p.pe_data = cp->pe_data;
p.pe_data_len = cp->pe_data_len;
}
return ip_vs_conn_hashkey_param(&p, false);
}
/*
* Hashes ip_vs_conn in ip_vs_conn_tab by netns,proto,addr,port.
* returns bool success.
*/
static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
{
unsigned hash;
int ret;
if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
return 0;
/* Hash by protocol, client address and port */
hash = ip_vs_conn_hashkey_conn(cp);
ct_write_lock(hash);
spin_lock(&cp->lock);
if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
hlist_add_head(&cp->c_list, &ip_vs_conn_tab[hash]);
cp->flags |= IP_VS_CONN_F_HASHED;
atomic_inc(&cp->refcnt);
ret = 1;
} else {
pr_err("%s(): request for already hashed, called from %pF\n",
__func__, __builtin_return_address(0));
ret = 0;
}
spin_unlock(&cp->lock);
ct_write_unlock(hash);
return ret;
}
/*
* UNhashes ip_vs_conn from ip_vs_conn_tab.
* returns bool success.
*/
static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
{
unsigned hash;
int ret;
/* unhash it and decrease its reference counter */
hash = ip_vs_conn_hashkey_conn(cp);
ct_write_lock(hash);
spin_lock(&cp->lock);
if (cp->flags & IP_VS_CONN_F_HASHED) {
hlist_del(&cp->c_list);
cp->flags &= ~IP_VS_CONN_F_HASHED;
atomic_dec(&cp->refcnt);
ret = 1;
} else
ret = 0;
spin_unlock(&cp->lock);
ct_write_unlock(hash);
return ret;
}
/*
* Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab.
* Called for pkts coming from OUTside-to-INside.
* p->caddr, p->cport: pkt source address (foreign host)
* p->vaddr, p->vport: pkt dest address (load balancer)
*/
static inline struct ip_vs_conn *
__ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
{
unsigned hash;
struct ip_vs_conn *cp;
struct hlist_node *n;
hash = ip_vs_conn_hashkey_param(p, false);
ct_read_lock(hash);
hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
if (cp->af == p->af &&
p->cport == cp->cport && p->vport == cp->vport &&
ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
p->protocol == cp->protocol &&
ip_vs_conn_net_eq(cp, p->net)) {
/* HIT */
atomic_inc(&cp->refcnt);
ct_read_unlock(hash);
return cp;
}
}
ct_read_unlock(hash);
return NULL;
}
struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
{
struct ip_vs_conn *cp;
cp = __ip_vs_conn_in_get(p);
if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) {
struct ip_vs_conn_param cport_zero_p = *p;
cport_zero_p.cport = 0;
cp = __ip_vs_conn_in_get(&cport_zero_p);
}
IP_VS_DBG_BUF(9, "lookup/in %s %s:%d->%s:%d %s\n",
ip_vs_proto_name(p->protocol),
IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
cp ? "hit" : "not hit");
return cp;
}
static int
ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb,
const struct ip_vs_iphdr *iph,
unsigned int proto_off, int inverse,
struct ip_vs_conn_param *p)
{
__be16 _ports[2], *pptr;
struct net *net = skb_net(skb);
pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
if (pptr == NULL)
return 1;
if (likely(!inverse))
ip_vs_conn_fill_param(net, af, iph->protocol, &iph->saddr,
pptr[0], &iph->daddr, pptr[1], p);
else
ip_vs_conn_fill_param(net, af, iph->protocol, &iph->daddr,
pptr[1], &iph->saddr, pptr[0], p);
return 0;
}
struct ip_vs_conn *
ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
const struct ip_vs_iphdr *iph,
unsigned int proto_off, int inverse)
{
struct ip_vs_conn_param p;
if (ip_vs_conn_fill_param_proto(af, skb, iph, proto_off, inverse, &p))
return NULL;
return ip_vs_conn_in_get(&p);
}
EXPORT_SYMBOL_GPL(ip_vs_conn_in_get_proto);
/* Get reference to connection template */
struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
{
unsigned hash;
struct ip_vs_conn *cp;
struct hlist_node *n;
hash = ip_vs_conn_hashkey_param(p, false);
ct_read_lock(hash);
hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
if (!ip_vs_conn_net_eq(cp, p->net))
continue;
if (p->pe_data && p->pe->ct_match) {
if (p->pe == cp->pe && p->pe->ct_match(p, cp))
goto out;
continue;
}
if (cp->af == p->af &&
ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
/* protocol should only be IPPROTO_IP if
* p->vaddr is a fwmark */
ip_vs_addr_equal(p->protocol == IPPROTO_IP ? AF_UNSPEC :
p->af, p->vaddr, &cp->vaddr) &&
p->cport == cp->cport && p->vport == cp->vport &&
cp->flags & IP_VS_CONN_F_TEMPLATE &&
p->protocol == cp->protocol)
goto out;
}
cp = NULL;
out:
if (cp)
atomic_inc(&cp->refcnt);
ct_read_unlock(hash);
IP_VS_DBG_BUF(9, "template lookup/in %s %s:%d->%s:%d %s\n",
ip_vs_proto_name(p->protocol),
IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
cp ? "hit" : "not hit");
return cp;
}
/* Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab.
* Called for pkts coming from inside-to-OUTside.
* p->caddr, p->cport: pkt source address (inside host)
* p->vaddr, p->vport: pkt dest address (foreign host) */
struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
{
unsigned hash;
struct ip_vs_conn *cp, *ret=NULL;
struct hlist_node *n;
/*
* Check for "full" addressed entries
*/
hash = ip_vs_conn_hashkey_param(p, true);
ct_read_lock(hash);
hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
if (cp->af == p->af &&
p->vport == cp->cport && p->cport == cp->dport &&
ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) &&
p->protocol == cp->protocol &&
ip_vs_conn_net_eq(cp, p->net)) {
/* HIT */
atomic_inc(&cp->refcnt);
ret = cp;
break;
}
}
ct_read_unlock(hash);
IP_VS_DBG_BUF(9, "lookup/out %s %s:%d->%s:%d %s\n",
ip_vs_proto_name(p->protocol),
IP_VS_DBG_ADDR(p->af, p->caddr), ntohs(p->cport),
IP_VS_DBG_ADDR(p->af, p->vaddr), ntohs(p->vport),
ret ? "hit" : "not hit");
return ret;
}
struct ip_vs_conn *
ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
const struct ip_vs_iphdr *iph,
unsigned int proto_off, int inverse)
{
struct ip_vs_conn_param p;
if (ip_vs_conn_fill_param_proto(af, skb, iph, proto_off, inverse, &p))
return NULL;
return ip_vs_conn_out_get(&p);
}
EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto);
/*
* Put back the conn and restart its timer with its timeout
*/
void ip_vs_conn_put(struct ip_vs_conn *cp)
{
unsigned long t = (cp->flags & IP_VS_CONN_F_ONE_PACKET) ?
0 : cp->timeout;
mod_timer(&cp->timer, jiffies+t);
__ip_vs_conn_put(cp);
}
/*
* Fill a no_client_port connection with a client port number
*/
void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport)
{
if (ip_vs_conn_unhash(cp)) {
spin_lock(&cp->lock);
if (cp->flags & IP_VS_CONN_F_NO_CPORT) {
atomic_dec(&ip_vs_conn_no_cport_cnt);
cp->flags &= ~IP_VS_CONN_F_NO_CPORT;
cp->cport = cport;
}
spin_unlock(&cp->lock);
/* hash on new dport */
ip_vs_conn_hash(cp);
}
}
/*
* Bind a connection entry with the corresponding packet_xmit.
* Called by ip_vs_conn_new.
*/
static inline void ip_vs_bind_xmit(struct ip_vs_conn *cp)
{
switch (IP_VS_FWD_METHOD(cp)) {
case IP_VS_CONN_F_MASQ:
cp->packet_xmit = ip_vs_nat_xmit;
break;
case IP_VS_CONN_F_TUNNEL:
cp->packet_xmit = ip_vs_tunnel_xmit;
break;
case IP_VS_CONN_F_DROUTE:
cp->packet_xmit = ip_vs_dr_xmit;
break;
case IP_VS_CONN_F_LOCALNODE:
cp->packet_xmit = ip_vs_null_xmit;
break;
case IP_VS_CONN_F_BYPASS:
cp->packet_xmit = ip_vs_bypass_xmit;
break;
}
}
#ifdef CONFIG_IP_VS_IPV6
static inline void ip_vs_bind_xmit_v6(struct ip_vs_conn *cp)
{
switch (IP_VS_FWD_METHOD(cp)) {
case IP_VS_CONN_F_MASQ:
cp->packet_xmit = ip_vs_nat_xmit_v6;
break;
case IP_VS_CONN_F_TUNNEL:
cp->packet_xmit = ip_vs_tunnel_xmit_v6;
break;
case IP_VS_CONN_F_DROUTE:
cp->packet_xmit = ip_vs_dr_xmit_v6;
break;
case IP_VS_CONN_F_LOCALNODE:
cp->packet_xmit = ip_vs_null_xmit;
break;
case IP_VS_CONN_F_BYPASS:
cp->packet_xmit = ip_vs_bypass_xmit_v6;
break;
}
}
#endif
static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest)
{
return atomic_read(&dest->activeconns)
+ atomic_read(&dest->inactconns);
}
/*
* Bind a connection entry with a virtual service destination
* Called just after a new connection entry is created.
*/
static inline void
ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
{
unsigned int conn_flags;
/* if dest is NULL, then return directly */
if (!dest)
return;
/* Increase the refcnt counter of the dest */
atomic_inc(&dest->refcnt);
conn_flags = atomic_read(&dest->conn_flags);
if (cp->protocol != IPPROTO_UDP)
conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
/* Bind with the destination and its corresponding transmitter */
if (cp->flags & IP_VS_CONN_F_SYNC) {
/* if the connection is not template and is created
* by sync, preserve the activity flag.
*/
if (!(cp->flags & IP_VS_CONN_F_TEMPLATE))
conn_flags &= ~IP_VS_CONN_F_INACTIVE;
/* connections inherit forwarding method from dest */
cp->flags &= ~IP_VS_CONN_F_FWD_MASK;
}
cp->flags |= conn_flags;
cp->dest = dest;
IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
"d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
"dest->refcnt:%d\n",
ip_vs_proto_name(cp->protocol),
IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
ip_vs_fwd_tag(cp), cp->state,
cp->flags, atomic_read(&cp->refcnt),
atomic_read(&dest->refcnt));
/* Update the connection counters */
if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
/* It is a normal connection, so increase the inactive
connection counter because it is in TCP SYNRECV
state (inactive) or other protocol inacive state */
if ((cp->flags & IP_VS_CONN_F_SYNC) &&
(!(cp->flags & IP_VS_CONN_F_INACTIVE)))
atomic_inc(&dest->activeconns);
else
atomic_inc(&dest->inactconns);
} else {
/* It is a persistent connection/template, so increase
the persistent connection counter */
atomic_inc(&dest->persistconns);
}
if (dest->u_threshold != 0 &&
ip_vs_dest_totalconns(dest) >= dest->u_threshold)
dest->flags |= IP_VS_DEST_F_OVERLOAD;
}
/*
* Check if there is a destination for the connection, if so
* bind the connection to the destination.
*/
struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
{
struct ip_vs_dest *dest;
if ((cp) && (!cp->dest)) {
dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
cp->dport, &cp->vaddr, cp->vport,
cp->protocol, cp->fwmark, cp->flags);
ip_vs_bind_dest(cp, dest);
return dest;
} else
return NULL;
}
/*
* Unbind a connection entry with its VS destination
* Called by the ip_vs_conn_expire function.
*/
static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
{
struct ip_vs_dest *dest = cp->dest;
if (!dest)
return;
IP_VS_DBG_BUF(7, "Unbind-dest %s c:%s:%d v:%s:%d "
"d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
"dest->refcnt:%d\n",
ip_vs_proto_name(cp->protocol),
IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
ip_vs_fwd_tag(cp), cp->state,
cp->flags, atomic_read(&cp->refcnt),
atomic_read(&dest->refcnt));
/* Update the connection counters */
if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
/* It is a normal connection, so decrease the inactconns
or activeconns counter */
if (cp->flags & IP_VS_CONN_F_INACTIVE) {
atomic_dec(&dest->inactconns);
} else {
atomic_dec(&dest->activeconns);
}
} else {
/* It is a persistent connection/template, so decrease
the persistent connection counter */
atomic_dec(&dest->persistconns);
}
if (dest->l_threshold != 0) {
if (ip_vs_dest_totalconns(dest) < dest->l_threshold)
dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
} else if (dest->u_threshold != 0) {
if (ip_vs_dest_totalconns(dest) * 4 < dest->u_threshold * 3)
dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
} else {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
}
/*
* Simply decrease the refcnt of the dest, because the
* dest will be either in service's destination list
* or in the trash.
*/
atomic_dec(&dest->refcnt);
}
static int expire_quiescent_template(struct netns_ipvs *ipvs,
struct ip_vs_dest *dest)
{
#ifdef CONFIG_SYSCTL
return ipvs->sysctl_expire_quiescent_template &&
(atomic_read(&dest->weight) == 0);
#else
return 0;
#endif
}
/*
* Checking if the destination of a connection template is available.
* If available, return 1, otherwise invalidate this connection
* template and return 0.
*/
int ip_vs_check_template(struct ip_vs_conn *ct)
{
struct ip_vs_dest *dest = ct->dest;
struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(ct));
/*
* Checking the dest server status.
*/
if ((dest == NULL) ||
!(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
expire_quiescent_template(ipvs, dest)) {
IP_VS_DBG_BUF(9, "check_template: dest not available for "
"protocol %s s:%s:%d v:%s:%d "
"-> d:%s:%d\n",
ip_vs_proto_name(ct->protocol),
IP_VS_DBG_ADDR(ct->af, &ct->caddr),
ntohs(ct->cport),
IP_VS_DBG_ADDR(ct->af, &ct->vaddr),
ntohs(ct->vport),
IP_VS_DBG_ADDR(ct->af, &ct->daddr),
ntohs(ct->dport));
/*
* Invalidate the connection template
*/
if (ct->vport != htons(0xffff)) {
if (ip_vs_conn_unhash(ct)) {
ct->dport = htons(0xffff);
ct->vport = htons(0xffff);
ct->cport = 0;
ip_vs_conn_hash(ct);
}
}
/*
* Simply decrease the refcnt of the template,
* don't restart its timer.
*/
atomic_dec(&ct->refcnt);
return 0;
}
return 1;
}
static void ip_vs_conn_expire(unsigned long data)
{
struct ip_vs_conn *cp = (struct ip_vs_conn *)data;
struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
cp->timeout = 60*HZ;
/*
* hey, I'm using it
*/
atomic_inc(&cp->refcnt);
/*
* do I control anybody?
*/
if (atomic_read(&cp->n_control))
goto expire_later;
/*
* unhash it if it is hashed in the conn table
*/
if (!ip_vs_conn_unhash(cp) && !(cp->flags & IP_VS_CONN_F_ONE_PACKET))
goto expire_later;
/*
* refcnt==1 implies I'm the only one referrer
*/
if (likely(atomic_read(&cp->refcnt) == 1)) {
/* delete the timer if it is activated by other users */
if (timer_pending(&cp->timer))
del_timer(&cp->timer);
/* does anybody control me? */
if (cp->control)
ip_vs_control_del(cp);
if (cp->flags & IP_VS_CONN_F_NFCT) {
ip_vs_conn_drop_conntrack(cp);
/* Do not access conntracks during subsys cleanup
* because nf_conntrack_find_get can not be used after
* conntrack cleanup for the net.
*/
smp_rmb();
if (ipvs->enable)
ip_vs_conn_drop_conntrack(cp);
}
ip_vs_pe_put(cp->pe);
kfree(cp->pe_data);
if (unlikely(cp->app != NULL))
ip_vs_unbind_app(cp);
ip_vs_unbind_dest(cp);
if (cp->flags & IP_VS_CONN_F_NO_CPORT)
atomic_dec(&ip_vs_conn_no_cport_cnt);
atomic_dec(&ipvs->conn_count);
kmem_cache_free(ip_vs_conn_cachep, cp);
return;
}
/* hash it back to the table */
ip_vs_conn_hash(cp);
expire_later:
IP_VS_DBG(7, "delayed: conn->refcnt-1=%d conn->n_control=%d\n",
atomic_read(&cp->refcnt)-1,
atomic_read(&cp->n_control));
ip_vs_conn_put(cp);
}
void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
{
if (del_timer(&cp->timer))
mod_timer(&cp->timer, jiffies);
}
/*
* Create a new connection entry and hash it into the ip_vs_conn_tab
*/
struct ip_vs_conn *
ip_vs_conn_new(const struct ip_vs_conn_param *p,
const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
struct ip_vs_dest *dest, __u32 fwmark)
{
struct ip_vs_conn *cp;
struct netns_ipvs *ipvs = net_ipvs(p->net);
struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->net,
p->protocol);
cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC);
if (cp == NULL) {
IP_VS_ERR_RL("%s(): no memory\n", __func__);
return NULL;
}
INIT_HLIST_NODE(&cp->c_list);
setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
ip_vs_conn_net_set(cp, p->net);
cp->af = p->af;
cp->protocol = p->protocol;
ip_vs_addr_copy(p->af, &cp->caddr, p->caddr);
cp->cport = p->cport;
ip_vs_addr_copy(p->af, &cp->vaddr, p->vaddr);
cp->vport = p->vport;
/* proto should only be IPPROTO_IP if d_addr is a fwmark */
ip_vs_addr_copy(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
&cp->daddr, daddr);
cp->dport = dport;
cp->flags = flags;
cp->fwmark = fwmark;
if (flags & IP_VS_CONN_F_TEMPLATE && p->pe) {
ip_vs_pe_get(p->pe);
cp->pe = p->pe;
cp->pe_data = p->pe_data;
cp->pe_data_len = p->pe_data_len;
}
spin_lock_init(&cp->lock);
/*
* Set the entry is referenced by the current thread before hashing
* it in the table, so that other thread run ip_vs_random_dropentry
* but cannot drop this entry.
*/
atomic_set(&cp->refcnt, 1);
atomic_set(&cp->n_control, 0);
atomic_set(&cp->in_pkts, 0);
atomic_inc(&ipvs->conn_count);
if (flags & IP_VS_CONN_F_NO_CPORT)
atomic_inc(&ip_vs_conn_no_cport_cnt);
/* Bind the connection with a destination server */
ip_vs_bind_dest(cp, dest);
/* Set its state and timeout */
cp->state = 0;
cp->timeout = 3*HZ;
/* Bind its packet transmitter */
#ifdef CONFIG_IP_VS_IPV6
if (p->af == AF_INET6)
ip_vs_bind_xmit_v6(cp);
else
#endif
ip_vs_bind_xmit(cp);
if (unlikely(pd && atomic_read(&pd->appcnt)))
ip_vs_bind_app(cp, pd->pp);
/*
* Allow conntrack to be preserved. By default, conntrack
* is created and destroyed for every packet.
* Sometimes keeping conntrack can be useful for
* IP_VS_CONN_F_ONE_PACKET too.
*/
if (ip_vs_conntrack_enabled(ipvs))
cp->flags |= IP_VS_CONN_F_NFCT;
/* Hash it in the ip_vs_conn_tab finally */
ip_vs_conn_hash(cp);
return cp;
}
/*
* /proc/net/ip_vs_conn entries
*/
#ifdef CONFIG_PROC_FS
struct ip_vs_iter_state {
struct seq_net_private p;
struct hlist_head *l;
};
static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
{
int idx;
struct ip_vs_conn *cp;
struct ip_vs_iter_state *iter = seq->private;
struct hlist_node *n;
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
ct_read_lock_bh(idx);
hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
if (pos-- == 0) {
iter->l = &ip_vs_conn_tab[idx];
return cp;
}
}
ct_read_unlock_bh(idx);
}
return NULL;
}
static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos)
{
struct ip_vs_iter_state *iter = seq->private;
iter->l = NULL;
return *pos ? ip_vs_conn_array(seq, *pos - 1) :SEQ_START_TOKEN;
}
static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip_vs_conn *cp = v;
struct ip_vs_iter_state *iter = seq->private;
struct hlist_node *e;
struct hlist_head *l = iter->l;
int idx;
++*pos;
if (v == SEQ_START_TOKEN)
return ip_vs_conn_array(seq, 0);
/* more on same hash chain? */
if ((e = cp->c_list.next))
return hlist_entry(e, struct ip_vs_conn, c_list);
idx = l - ip_vs_conn_tab;
ct_read_unlock_bh(idx);
while (++idx < ip_vs_conn_tab_size) {
ct_read_lock_bh(idx);
hlist_for_each_entry(cp, e, &ip_vs_conn_tab[idx], c_list) {
iter->l = &ip_vs_conn_tab[idx];
return cp;
}
ct_read_unlock_bh(idx);
}
iter->l = NULL;
return NULL;
}
static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
{
struct ip_vs_iter_state *iter = seq->private;
struct hlist_head *l = iter->l;
if (l)
ct_read_unlock_bh(l - ip_vs_conn_tab);
}
static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"Pro FromIP FPrt ToIP TPrt DestIP DPrt State Expires PEName PEData\n");
else {
const struct ip_vs_conn *cp = v;
struct net *net = seq_file_net(seq);
char pe_data[IP_VS_PENAME_MAXLEN + IP_VS_PEDATA_MAXLEN + 3];
size_t len = 0;
if (!ip_vs_conn_net_eq(cp, net))
return 0;
if (cp->pe_data) {
pe_data[0] = ' ';
len = strlen(cp->pe->name);
memcpy(pe_data + 1, cp->pe->name, len);
pe_data[len + 1] = ' ';
len += 2;
len += cp->pe->show_pe_data(cp, pe_data + len);
}
pe_data[len] = '\0';
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X "
"%pI6 %04X %-11s %7lu%s\n",
ip_vs_proto_name(cp->protocol),
&cp->caddr.in6, ntohs(cp->cport),
&cp->vaddr.in6, ntohs(cp->vport),
&cp->daddr.in6, ntohs(cp->dport),
ip_vs_state_name(cp->protocol, cp->state),
(cp->timer.expires-jiffies)/HZ, pe_data);
else
#endif
seq_printf(seq,
"%-3s %08X %04X %08X %04X"
" %08X %04X %-11s %7lu%s\n",
ip_vs_proto_name(cp->protocol),
ntohl(cp->caddr.ip), ntohs(cp->cport),
ntohl(cp->vaddr.ip), ntohs(cp->vport),
ntohl(cp->daddr.ip), ntohs(cp->dport),
ip_vs_state_name(cp->protocol, cp->state),
(cp->timer.expires-jiffies)/HZ, pe_data);
}
return 0;
}
static const struct seq_operations ip_vs_conn_seq_ops = {
.start = ip_vs_conn_seq_start,
.next = ip_vs_conn_seq_next,
.stop = ip_vs_conn_seq_stop,
.show = ip_vs_conn_seq_show,
};
static int ip_vs_conn_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ip_vs_conn_seq_ops,
sizeof(struct ip_vs_iter_state));
}
static const struct file_operations ip_vs_conn_fops = {
.owner = THIS_MODULE,
.open = ip_vs_conn_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static const char *ip_vs_origin_name(unsigned flags)
{
if (flags & IP_VS_CONN_F_SYNC)
return "SYNC";
else
return "LOCAL";
}
static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"Pro FromIP FPrt ToIP TPrt DestIP DPrt State Origin Expires\n");
else {
const struct ip_vs_conn *cp = v;
struct net *net = seq_file_net(seq);
if (!ip_vs_conn_net_eq(cp, net))
return 0;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X %pI6 %04X %-11s %-6s %7lu\n",
ip_vs_proto_name(cp->protocol),
&cp->caddr.in6, ntohs(cp->cport),
&cp->vaddr.in6, ntohs(cp->vport),
&cp->daddr.in6, ntohs(cp->dport),
ip_vs_state_name(cp->protocol, cp->state),
ip_vs_origin_name(cp->flags),
(cp->timer.expires-jiffies)/HZ);
else
#endif
seq_printf(seq,
"%-3s %08X %04X %08X %04X "
"%08X %04X %-11s %-6s %7lu\n",
ip_vs_proto_name(cp->protocol),
ntohl(cp->caddr.ip), ntohs(cp->cport),
ntohl(cp->vaddr.ip), ntohs(cp->vport),
ntohl(cp->daddr.ip), ntohs(cp->dport),
ip_vs_state_name(cp->protocol, cp->state),
ip_vs_origin_name(cp->flags),
(cp->timer.expires-jiffies)/HZ);
}
return 0;
}
static const struct seq_operations ip_vs_conn_sync_seq_ops = {
.start = ip_vs_conn_seq_start,
.next = ip_vs_conn_seq_next,
.stop = ip_vs_conn_seq_stop,
.show = ip_vs_conn_sync_seq_show,
};
static int ip_vs_conn_sync_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ip_vs_conn_sync_seq_ops,
sizeof(struct ip_vs_iter_state));
}
static const struct file_operations ip_vs_conn_sync_fops = {
.owner = THIS_MODULE,
.open = ip_vs_conn_sync_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
#endif
/*
* Randomly drop connection entries before running out of memory
*/
static inline int todrop_entry(struct ip_vs_conn *cp)
{
/*
* The drop rate array needs tuning for real environments.
* Called from timer bh only => no locking
*/
static const char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
static char todrop_counter[9] = {0};
int i;
/* if the conn entry hasn't lasted for 60 seconds, don't drop it.
This will leave enough time for normal connection to get
through. */
if (time_before(cp->timeout + jiffies, cp->timer.expires + 60*HZ))
return 0;
/* Don't drop the entry if its number of incoming packets is not
located in [0, 8] */
i = atomic_read(&cp->in_pkts);
if (i > 8 || i < 0) return 0;
if (!todrop_rate[i]) return 0;
if (--todrop_counter[i] > 0) return 0;
todrop_counter[i] = todrop_rate[i];
return 1;
}
/* Called from keventd and must protect itself from softirqs */
void ip_vs_random_dropentry(struct net *net)
{
int idx;
struct ip_vs_conn *cp;
/*
* Randomly scan 1/32 of the whole table every second
*/
for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
unsigned hash = net_random() & ip_vs_conn_tab_mask;
struct hlist_node *n;
/*
* Lock is actually needed in this loop.
*/
ct_write_lock_bh(hash);
hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
if (cp->flags & IP_VS_CONN_F_TEMPLATE)
/* connection template */
continue;
if (!ip_vs_conn_net_eq(cp, net))
continue;
if (cp->protocol == IPPROTO_TCP) {
switch(cp->state) {
case IP_VS_TCP_S_SYN_RECV:
case IP_VS_TCP_S_SYNACK:
break;
case IP_VS_TCP_S_ESTABLISHED:
if (todrop_entry(cp))
break;
continue;
default:
continue;
}
} else {
if (!todrop_entry(cp))
continue;
}
IP_VS_DBG(4, "del connection\n");
ip_vs_conn_expire_now(cp);
if (cp->control) {
IP_VS_DBG(4, "del conn template\n");
ip_vs_conn_expire_now(cp->control);
}
}
ct_write_unlock_bh(hash);
}
}
/*
* Flush all the connection entries in the ip_vs_conn_tab
*/
static void ip_vs_conn_flush(struct net *net)
{
int idx;
struct ip_vs_conn *cp;
struct netns_ipvs *ipvs = net_ipvs(net);
flush_again:
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
struct hlist_node *n;
/*
* Lock is actually needed in this loop.
*/
ct_write_lock_bh(idx);
hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
if (!ip_vs_conn_net_eq(cp, net))
continue;
IP_VS_DBG(4, "del connection\n");
ip_vs_conn_expire_now(cp);
if (cp->control) {
IP_VS_DBG(4, "del conn template\n");
ip_vs_conn_expire_now(cp->control);
}
}
ct_write_unlock_bh(idx);
}
/* the counter may be not NULL, because maybe some conn entries
are run by slow timer handler or unhashed but still referred */
if (atomic_read(&ipvs->conn_count) != 0) {
schedule();
goto flush_again;
}
}
/*
* per netns init and exit
*/
int __net_init ip_vs_conn_net_init(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
atomic_set(&ipvs->conn_count, 0);
proc_net_fops_create(net, "ip_vs_conn", 0, &ip_vs_conn_fops);
proc_net_fops_create(net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops);
return 0;
}
void __net_exit ip_vs_conn_net_cleanup(struct net *net)
{
/* flush all the connection entries first */
ip_vs_conn_flush(net);
proc_net_remove(net, "ip_vs_conn");
proc_net_remove(net, "ip_vs_conn_sync");
}
int __init ip_vs_conn_init(void)
{
int idx;
/* Compute size and mask */
ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
/*
* Allocate the connection hash table and initialize its list heads
*/
ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size * sizeof(*ip_vs_conn_tab));
if (!ip_vs_conn_tab)
return -ENOMEM;
/* Allocate ip_vs_conn slab cache */
ip_vs_conn_cachep = kmem_cache_create("ip_vs_conn",
sizeof(struct ip_vs_conn), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!ip_vs_conn_cachep) {
vfree(ip_vs_conn_tab);
return -ENOMEM;
}
pr_info("Connection hash table configured "
"(size=%d, memory=%ldKbytes)\n",
ip_vs_conn_tab_size,
(long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024);
IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
sizeof(struct ip_vs_conn));
for (idx = 0; idx < ip_vs_conn_tab_size; idx++)
INIT_HLIST_HEAD(&ip_vs_conn_tab[idx]);
for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++) {
rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);
}
/* calculate the random value for connection hash */
get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
return 0;
}
void ip_vs_conn_cleanup(void)
{
/* Release the empty cache */
kmem_cache_destroy(ip_vs_conn_cachep);
vfree(ip_vs_conn_tab);
}
| gpl-2.0 |
djvoleur/V_920P_BOF7 | drivers/usb/misc/cypress_cy7c63.c | 7560 | 7282 | /*
* cypress_cy7c63.c
*
* Copyright (c) 2006-2007 Oliver Bock (bock@tfh-berlin.de)
*
* This driver is based on the Cypress USB Driver by Marcus Maul
* (cyport) and the 2.0 version of Greg Kroah-Hartman's
* USB Skeleton driver.
*
* This is a generic driver for the Cypress CY7C63xxx family.
* For the time being it enables you to read from and write to
* the single I/O ports of the device.
*
* Supported vendors: AK Modul-Bus Computer GmbH
* (Firmware "Port-Chip")
*
* Supported devices: CY7C63001A-PC
* CY7C63001C-PXC
* CY7C63001C-SXC
*
* Supported functions: Read/Write Ports
*
*
* For up-to-date information please visit:
* http://www.obock.de/kernel/cypress
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/usb.h>
#define DRIVER_AUTHOR "Oliver Bock (bock@tfh-berlin.de)"
#define DRIVER_DESC "Cypress CY7C63xxx USB driver"
#define CYPRESS_VENDOR_ID 0xa2c
#define CYPRESS_PRODUCT_ID 0x8
#define CYPRESS_READ_PORT 0x4
#define CYPRESS_WRITE_PORT 0x5
#define CYPRESS_READ_RAM 0x2
#define CYPRESS_WRITE_RAM 0x3
#define CYPRESS_READ_ROM 0x1
#define CYPRESS_READ_PORT_ID0 0
#define CYPRESS_WRITE_PORT_ID0 0
#define CYPRESS_READ_PORT_ID1 0x2
#define CYPRESS_WRITE_PORT_ID1 1
#define CYPRESS_MAX_REQSIZE 8
/* table of devices that work with this driver */
static const struct usb_device_id cypress_table[] = {
{ USB_DEVICE(CYPRESS_VENDOR_ID, CYPRESS_PRODUCT_ID) },
{ }
};
MODULE_DEVICE_TABLE(usb, cypress_table);
/* structure to hold all of our device specific stuff */
struct cypress {
struct usb_device * udev;
unsigned char port[2];
};
/* used to send usb control messages to device */
static int vendor_command(struct cypress *dev, unsigned char request,
unsigned char address, unsigned char data)
{
int retval = 0;
unsigned int pipe;
unsigned char *iobuf;
/* allocate some memory for the i/o buffer*/
iobuf = kzalloc(CYPRESS_MAX_REQSIZE, GFP_KERNEL);
if (!iobuf) {
dev_err(&dev->udev->dev, "Out of memory!\n");
retval = -ENOMEM;
goto error;
}
dev_dbg(&dev->udev->dev, "Sending usb_control_msg (data: %d)\n", data);
/* prepare usb control message and send it upstream */
pipe = usb_rcvctrlpipe(dev->udev, 0);
retval = usb_control_msg(dev->udev, pipe, request,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER,
address, data, iobuf, CYPRESS_MAX_REQSIZE,
USB_CTRL_GET_TIMEOUT);
/* store returned data (more READs to be added) */
switch (request) {
case CYPRESS_READ_PORT:
if (address == CYPRESS_READ_PORT_ID0) {
dev->port[0] = iobuf[1];
dev_dbg(&dev->udev->dev,
"READ_PORT0 returned: %d\n",
dev->port[0]);
}
else if (address == CYPRESS_READ_PORT_ID1) {
dev->port[1] = iobuf[1];
dev_dbg(&dev->udev->dev,
"READ_PORT1 returned: %d\n",
dev->port[1]);
}
break;
}
kfree(iobuf);
error:
return retval;
}
/* write port value */
static ssize_t write_port(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count,
int port_num, int write_id)
{
int value = -1;
int result = 0;
struct usb_interface *intf = to_usb_interface(dev);
struct cypress *cyp = usb_get_intfdata(intf);
dev_dbg(&cyp->udev->dev, "WRITE_PORT%d called\n", port_num);
/* validate input data */
if (sscanf(buf, "%d", &value) < 1) {
result = -EINVAL;
goto error;
}
if (value < 0 || value > 255) {
result = -EINVAL;
goto error;
}
result = vendor_command(cyp, CYPRESS_WRITE_PORT, write_id,
(unsigned char)value);
dev_dbg(&cyp->udev->dev, "Result of vendor_command: %d\n\n", result);
error:
return result < 0 ? result : count;
}
/* attribute callback handler (write) */
static ssize_t set_port0_handler(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return write_port(dev, attr, buf, count, 0, CYPRESS_WRITE_PORT_ID0);
}
/* attribute callback handler (write) */
static ssize_t set_port1_handler(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return write_port(dev, attr, buf, count, 1, CYPRESS_WRITE_PORT_ID1);
}
/* read port value */
static ssize_t read_port(struct device *dev, struct device_attribute *attr,
char *buf, int port_num, int read_id)
{
int result = 0;
struct usb_interface *intf = to_usb_interface(dev);
struct cypress *cyp = usb_get_intfdata(intf);
dev_dbg(&cyp->udev->dev, "READ_PORT%d called\n", port_num);
result = vendor_command(cyp, CYPRESS_READ_PORT, read_id, 0);
dev_dbg(&cyp->udev->dev, "Result of vendor_command: %d\n\n", result);
return sprintf(buf, "%d", cyp->port[port_num]);
}
/* attribute callback handler (read) */
static ssize_t get_port0_handler(struct device *dev,
struct device_attribute *attr, char *buf)
{
return read_port(dev, attr, buf, 0, CYPRESS_READ_PORT_ID0);
}
/* attribute callback handler (read) */
static ssize_t get_port1_handler(struct device *dev,
struct device_attribute *attr, char *buf)
{
return read_port(dev, attr, buf, 1, CYPRESS_READ_PORT_ID1);
}
static DEVICE_ATTR(port0, S_IRUGO | S_IWUSR, get_port0_handler, set_port0_handler);
static DEVICE_ATTR(port1, S_IRUGO | S_IWUSR, get_port1_handler, set_port1_handler);
static int cypress_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct cypress *dev = NULL;
int retval = -ENOMEM;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
dev_err(&interface->dev, "Out of memory!\n");
goto error_mem;
}
dev->udev = usb_get_dev(interface_to_usbdev(interface));
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
/* create device attribute files */
retval = device_create_file(&interface->dev, &dev_attr_port0);
if (retval)
goto error;
retval = device_create_file(&interface->dev, &dev_attr_port1);
if (retval)
goto error;
/* let the user know that the device is now attached */
dev_info(&interface->dev,
"Cypress CY7C63xxx device now attached\n");
return 0;
error:
device_remove_file(&interface->dev, &dev_attr_port0);
device_remove_file(&interface->dev, &dev_attr_port1);
usb_set_intfdata(interface, NULL);
usb_put_dev(dev->udev);
kfree(dev);
error_mem:
return retval;
}
static void cypress_disconnect(struct usb_interface *interface)
{
struct cypress *dev;
dev = usb_get_intfdata(interface);
/* remove device attribute files */
device_remove_file(&interface->dev, &dev_attr_port0);
device_remove_file(&interface->dev, &dev_attr_port1);
/* the intfdata can be set to NULL only after the
* device files have been removed */
usb_set_intfdata(interface, NULL);
usb_put_dev(dev->udev);
dev_info(&interface->dev,
"Cypress CY7C63xxx device now disconnected\n");
kfree(dev);
}
static struct usb_driver cypress_driver = {
.name = "cypress_cy7c63",
.probe = cypress_probe,
.disconnect = cypress_disconnect,
.id_table = cypress_table,
};
module_usb_driver(cypress_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| gpl-2.0 |
CyanogenMod/android_kernel_samsung_espresso10 | drivers/staging/comedi/drivers/addi-data/hwdrv_apci2016.c | 8328 | 19542 | /**
@verbatim
Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
ADDI-DATA GmbH
Dieselstrasse 3
D-77833 Ottersweier
Tel: +19(0)7223/9493-0
Fax: +49(0)7223/9493-92
http://www.addi-data.com
info@addi-data.com
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
You should also find the complete GPL in the COPYING file accompanying this source code.
@endverbatim
*/
/*
+-----------------------------------------------------------------------+
| (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
+-----------------------------------------------------------------------+
| Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
| Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
+-------------------------------+---------------------------------------+
| Project : APCI-2016 | Compiler : GCC |
| Module name : hwdrv_apci2016.c| Version : 2.96 |
+-------------------------------+---------------------------------------+
| Project manager: Eric Stolz | Date : 02/12/2002 |
+-------------------------------+---------------------------------------+
| Description : Hardware Layer Access For APCI-2016 |
+-----------------------------------------------------------------------+
| UPDATES |
+----------+-----------+------------------------------------------------+
| Date | Author | Description of updates |
+----------+-----------+------------------------------------------------+
| | | |
| | | |
| | | |
+----------+-----------+------------------------------------------------+
*/
/*
+----------------------------------------------------------------------------+
| Included files |
+----------------------------------------------------------------------------+
*/
#include "hwdrv_apci2016.h"
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI2016_ConfigDigitalOutput |
| (struct comedi_device *dev,struct comedi_subdevice *s, |
| struct comedi_insn *insn,unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Configures The Digital Output Subdevice. |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev : Driver handle |
| unsigned int *data : Data Pointer contains |
| configuration parameters as below |
| |
| data[0] : 1 Digital Memory On |
| 0 Digital Memory Off |
+----------------------------------------------------------------------------+
| Output Parameters : -- |
+----------------------------------------------------------------------------+
| Return Value : TRUE : No error occur |
| : FALSE : Error occur. Return the error |
| |
+----------------------------------------------------------------------------+
*/
int i_APCI2016_ConfigDigitalOutput(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if ((data[0] != 0) && (data[0] != 1)) {
comedi_error(dev,
"Not a valid Data !!! ,Data should be 1 or 0\n");
return -EINVAL;
} /* if ((data[0]!=0) && (data[0]!=1)) */
if (data[0]) {
devpriv->b_OutputMemoryStatus = ADDIDATA_ENABLE;
} /* if (data[0] */
else {
devpriv->b_OutputMemoryStatus = ADDIDATA_DISABLE;
} /* else if (data[0] */
return insn->n;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI2016_WriteDigitalOutput |
| (struct comedi_device *dev,struct comedi_subdevice *s, |
| struct comedi_insn *insn,unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Writes port value To the selected port |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev : Driver handle |
| unsigned int ui_NoOfChannels : No Of Channels To Write |
| unsigned int *data : Data Pointer to read status |
+----------------------------------------------------------------------------+
| Output Parameters : -- |
+----------------------------------------------------------------------------+
| Return Value : TRUE : No error occur |
| : FALSE : Error occur. Return the error |
| |
+----------------------------------------------------------------------------+
*/
int i_APCI2016_WriteDigitalOutput(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int ui_NoOfChannel;
unsigned int ui_Temp, ui_Temp1;
ui_NoOfChannel = CR_CHAN(insn->chanspec);
if (ui_NoOfChannel > 15) {
comedi_error(dev,
"Invalid Channel Numbers !!!, Channel Numbers must be between 0 and 15\n");
return -EINVAL;
} /* if ((ui_NoOfChannel<0) || (ui_NoOfChannel>15)) */
if (devpriv->b_OutputMemoryStatus) {
ui_Temp = inw(devpriv->iobase + APCI2016_DIGITAL_OP);
} /* if (devpriv->b_OutputMemoryStatus ) */
else {
ui_Temp = 0;
} /* else if (devpriv->b_OutputMemoryStatus ) */
if ((data[1] != 0) && (data[1] != 1)) {
comedi_error(dev,
"Invalid Data[1] value !!!, Data[1] should be 0 or 1\n");
return -EINVAL;
} /* if ((data[1]!=0) && (data[1]!=1)) */
if (data[3] == 0) {
if (data[1] == 0) {
data[0] = (data[0] << ui_NoOfChannel) | ui_Temp;
outw(data[0], devpriv->iobase + APCI2016_DIGITAL_OP);
} /* if (data[1]==0) */
else {
if (data[1] == 1) {
switch (ui_NoOfChannel) {
case 2:
data[0] =
(data[0] << (2 *
data[2])) | ui_Temp;
break;
case 4:
data[0] =
(data[0] << (4 *
data[2])) | ui_Temp;
break;
case 8:
data[0] =
(data[0] << (8 *
data[2])) | ui_Temp;
break;
case 15:
data[0] = data[0] | ui_Temp;
break;
default:
comedi_error(dev, " chan spec wrong");
return -EINVAL; /* "sorry channel spec wrong " */
} /* switch(ui_NoOfChannels) */
outw(data[0],
devpriv->iobase + APCI2016_DIGITAL_OP);
} /* if (data[1]==1) */
else {
printk("\nSpecified channel not supported\n");
} /* else if (data[1]==1) */
} /* else if (data[1]==0) */
} /* if (data[3]==0) */
else {
if (data[3] == 1) {
if (data[1] == 0) {
data[0] = ~data[0] & 0x1;
ui_Temp1 = 1;
ui_Temp1 = ui_Temp1 << ui_NoOfChannel;
ui_Temp = ui_Temp | ui_Temp1;
data[0] = (data[0] << ui_NoOfChannel) ^ 0xffff;
data[0] = data[0] & ui_Temp;
outw(data[0],
devpriv->iobase + APCI2016_DIGITAL_OP);
} /* if (data[1]==0) */
else {
if (data[1] == 1) {
switch (ui_NoOfChannel) {
case 2:
data[0] = ~data[0] & 0x3;
ui_Temp1 = 3;
ui_Temp1 =
ui_Temp1 << 2 * data[2];
ui_Temp = ui_Temp | ui_Temp1;
data[0] =
((data[0] << (2 *
data
[2])) ^
0xffff) & ui_Temp;
break;
case 4:
data[0] = ~data[0] & 0xf;
ui_Temp1 = 15;
ui_Temp1 =
ui_Temp1 << 4 * data[2];
ui_Temp = ui_Temp | ui_Temp1;
data[0] =
((data[0] << (4 *
data
[2])) ^
0xffff) & ui_Temp;
break;
case 8:
data[0] = ~data[0] & 0xff;
ui_Temp1 = 255;
ui_Temp1 =
ui_Temp1 << 8 * data[2];
ui_Temp = ui_Temp | ui_Temp1;
data[0] =
((data[0] << (8 *
data
[2])) ^
0xffff) & ui_Temp;
break;
case 15:
break;
default:
comedi_error(dev,
" chan spec wrong");
return -EINVAL; /* "sorry channel spec wrong " */
} /* switch(ui_NoOfChannels) */
outw(data[0],
devpriv->iobase +
APCI2016_DIGITAL_OP);
} /* if(data[1]==1) */
else {
printk("\nSpecified channel not supported\n");
} /* else if(data[1]==1) */
} /* elseif(data[1]==0) */
} /* if(data[3]==1); */
else {
printk("\nSpecified functionality does not exist\n");
return -EINVAL;
} /* if else data[3]==1) */
} /* if else data[3]==0) */
return insn->n;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI2016_BitsDigitalOutput |
| (struct comedi_device *dev,struct comedi_subdevice *s, |
| struct comedi_insn *insn,unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Read value of the selected channel or port |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev : Driver handle |
| unsigned int ui_NoOfChannels : No Of Channels To read |
| unsigned int *data : Data Pointer to read status |
+----------------------------------------------------------------------------+
| Output Parameters : -- |
+----------------------------------------------------------------------------+
| Return Value : TRUE : No error occur |
| : FALSE : Error occur. Return the error |
| |
+----------------------------------------------------------------------------+
*/
int i_APCI2016_BitsDigitalOutput(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int ui_Temp;
unsigned int ui_NoOfChannel;
ui_NoOfChannel = CR_CHAN(insn->chanspec);
if (ui_NoOfChannel > 15) {
comedi_error(dev,
"Invalid Channel Numbers !!!, Channel Numbers must be between 0 and 15\n");
return -EINVAL;
} /* if ((ui_NoOfChannel<0) || (ui_NoOfChannel>15)) */
if ((data[0] != 0) && (data[0] != 1)) {
comedi_error(dev,
"Invalid Data[0] value !!!, Data[0] should be 0 or 1\n");
return -EINVAL;
} /* if ((data[0]!=0) && (data[0]!=1)) */
ui_Temp = data[0];
*data = inw(devpriv->iobase + APCI2016_DIGITAL_OP_RW);
if (ui_Temp == 0) {
*data = (*data >> ui_NoOfChannel) & 0x1;
} /* if (ui_Temp==0) */
else {
if (ui_Temp == 1) {
switch (ui_NoOfChannel) {
case 2:
*data = (*data >> (2 * data[1])) & 3;
break;
case 4:
*data = (*data >> (4 * data[1])) & 15;
break;
case 8:
*data = (*data >> (8 * data[1])) & 255;
break;
case 15:
break;
default:
comedi_error(dev, " chan spec wrong");
return -EINVAL; /* "sorry channel spec wrong " */
} /* switch(ui_NoOfChannel) */
} /* if (ui_Temp==1) */
else {
printk("\nSpecified channel not supported \n");
} /* else if (ui_Temp==1) */
} /* if (ui_Temp==0) */
return insn->n;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI2016_ConfigWatchdog |
| (struct comedi_device *dev,struct comedi_subdevice *s, |
| struct comedi_insn *insn,unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Configures The Watchdog |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev : Driver handle |
| struct comedi_subdevice *s, :pointer to subdevice structure |
| struct comedi_insn *insn :pointer to insn structure |
| unsigned int *data : Data Pointer to read status |
+----------------------------------------------------------------------------+
| Output Parameters : -- |
+----------------------------------------------------------------------------+
| Return Value : TRUE : No error occur |
| : FALSE : Error occur. Return the error |
| |
+----------------------------------------------------------------------------+
*/
int i_APCI2016_ConfigWatchdog(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (data[0] == 0) {
/* Disable the watchdog */
outw(0x0,
devpriv->i_IobaseAddon +
APCI2016_WATCHDOG_ENABLEDISABLE);
/* Loading the Reload value */
outw(data[1],
devpriv->i_IobaseAddon +
APCI2016_WATCHDOG_RELOAD_VALUE);
data[1] = data[1] >> 16;
outw(data[1],
devpriv->i_IobaseAddon +
APCI2016_WATCHDOG_RELOAD_VALUE + 2);
} else {
printk("\nThe input parameters are wrong\n");
}
return insn->n;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI2016_StartStopWriteWatchdog |
| (struct comedi_device *dev,struct comedi_subdevice *s, |
| struct comedi_insn *insn,unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Start / Stop The Watchdog |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev : Driver handle |
| struct comedi_subdevice *s, :pointer to subdevice structure |
| struct comedi_insn *insn :pointer to insn structure |
| unsigned int *data : Data Pointer to read status |
+----------------------------------------------------------------------------+
| Output Parameters : -- |
+----------------------------------------------------------------------------+
| Return Value : TRUE : No error occur |
| : FALSE : Error occur. Return the error |
| |
+----------------------------------------------------------------------------+
*/
int i_APCI2016_StartStopWriteWatchdog(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
switch (data[0]) {
case 0: /* stop the watchdog */
outw(0x0, devpriv->i_IobaseAddon + APCI2016_WATCHDOG_ENABLEDISABLE); /* disable the watchdog */
break;
case 1: /* start the watchdog */
outw(0x0001,
devpriv->i_IobaseAddon +
APCI2016_WATCHDOG_ENABLEDISABLE);
break;
case 2: /* Software trigger */
outw(0x0201,
devpriv->i_IobaseAddon +
APCI2016_WATCHDOG_ENABLEDISABLE);
break;
default:
printk("\nSpecified functionality does not exist\n");
return -EINVAL;
} /* switch(data[0]) */
return insn->n;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI2016_ReadWatchdog |
| (struct comedi_device *dev,struct comedi_subdevice *s, |
| struct comedi_insn *insn,unsigned int *data) |
+----------------------------------------------------------------------------+
| Task : Read The Watchdog |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev : Driver handle |
| struct comedi_subdevice *s, :pointer to subdevice structure |
| struct comedi_insn *insn :pointer to insn structure |
| unsigned int *data : Data Pointer to read status |
+----------------------------------------------------------------------------+
| Output Parameters : -- |
+----------------------------------------------------------------------------+
| Return Value : TRUE : No error occur |
| : FALSE : Error occur. Return the error |
| |
+----------------------------------------------------------------------------+
*/
int i_APCI2016_ReadWatchdog(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
udelay(5);
data[0] = inw(devpriv->i_IobaseAddon + APCI2016_WATCHDOG_STATUS) & 0x1;
return insn->n;
}
/*
+----------------------------------------------------------------------------+
| Function Name : int i_APCI2016_Reset(struct comedi_device *dev) | |
+----------------------------------------------------------------------------+
| Task :resets all the registers |
+----------------------------------------------------------------------------+
| Input Parameters : struct comedi_device *dev
+----------------------------------------------------------------------------+
| Output Parameters : -- |
+----------------------------------------------------------------------------+
| Return Value : |
| |
+----------------------------------------------------------------------------+
*/
int i_APCI2016_Reset(struct comedi_device *dev)
{
outw(0x0, devpriv->iobase + APCI2016_DIGITAL_OP); /* Resets the digital output channels */
outw(0x0, devpriv->i_IobaseAddon + APCI2016_WATCHDOG_ENABLEDISABLE);
outw(0x0, devpriv->i_IobaseAddon + APCI2016_WATCHDOG_RELOAD_VALUE);
outw(0x0, devpriv->i_IobaseAddon + APCI2016_WATCHDOG_RELOAD_VALUE + 2);
return 0;
}
| gpl-2.0 |
somcom3x/android_kernel_motorola_msm8226 | crypto/aes_generic.c | 8840 | 63405 | /*
* Cryptographic API.
*
* AES Cipher Algorithm.
*
* Based on Brian Gladman's code.
*
* Linux developers:
* Alexander Kjeldaas <astor@fast.no>
* Herbert Valerio Riedel <hvr@hvrlab.org>
* Kyle McMartin <kyle@debian.org>
* Adam J. Richter <adam@yggdrasil.com> (conversion to 2.5 API).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* ---------------------------------------------------------------------------
* Copyright (c) 2002, Dr Brian Gladman <brg@gladman.me.uk>, Worcester, UK.
* All rights reserved.
*
* LICENSE TERMS
*
* The free distribution and use of this software in both source and binary
* form is allowed (with or without changes) provided that:
*
* 1. distributions of this source code include the above copyright
* notice, this list of conditions and the following disclaimer;
*
* 2. distributions in binary form include the above copyright
* notice, this list of conditions and the following disclaimer
* in the documentation and/or other associated materials;
*
* 3. the copyright holder's name is not used to endorse products
* built using this software without specific written permission.
*
* ALTERNATIVELY, provided that this notice is retained in full, this product
* may be distributed under the terms of the GNU General Public License (GPL),
* in which case the provisions of the GPL apply INSTEAD OF those given above.
*
* DISCLAIMER
*
* This software is provided 'as is' with no explicit or implied warranties
* in respect of its properties, including, but not limited to, correctness
* and/or fitness for purpose.
* ---------------------------------------------------------------------------
*/
#include <crypto/aes.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/crypto.h>
#include <asm/byteorder.h>
static inline u8 byte(const u32 x, const unsigned n)
{
return x >> (n << 3);
}
static const u32 rco_tab[10] = { 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 };
const u32 crypto_ft_tab[4][256] = {
{
0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6,
0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591,
0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56,
0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec,
0x45caca8f, 0x9d82821f, 0x40c9c989, 0x877d7dfa,
0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb,
0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45,
0xbf9c9c23, 0xf7a4a453, 0x967272e4, 0x5bc0c09b,
0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c,
0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83,
0x5c343468, 0xf4a5a551, 0x34e5e5d1, 0x08f1f1f9,
0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a,
0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d,
0x28181830, 0xa1969637, 0x0f05050a, 0xb59a9a2f,
0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df,
0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea,
0x1b090912, 0x9e83831d, 0x742c2c58, 0x2e1a1a34,
0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b,
0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d,
0x7b292952, 0x3ee3e3dd, 0x712f2f5e, 0x97848413,
0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1,
0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6,
0xbe6a6ad4, 0x46cbcb8d, 0xd9bebe67, 0x4b393972,
0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85,
0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed,
0xc5434386, 0xd74d4d9a, 0x55333366, 0x94858511,
0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe,
0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b,
0xf35151a2, 0xfea3a35d, 0xc0404080, 0x8a8f8f05,
0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1,
0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142,
0x30101020, 0x1affffe5, 0x0ef3f3fd, 0x6dd2d2bf,
0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3,
0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e,
0x57c4c493, 0xf2a7a755, 0x827e7efc, 0x473d3d7a,
0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6,
0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3,
0x66222244, 0x7e2a2a54, 0xab90903b, 0x8388880b,
0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428,
0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad,
0x3be0e0db, 0x56323264, 0x4e3a3a74, 0x1e0a0a14,
0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8,
0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4,
0xa8919139, 0xa4959531, 0x37e4e4d3, 0x8b7979f2,
0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda,
0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949,
0xb46c6cd8, 0xfa5656ac, 0x07f4f4f3, 0x25eaeacf,
0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810,
0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c,
0x241c1c38, 0xf1a6a657, 0xc7b4b473, 0x51c6c697,
0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e,
0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f,
0x907070e0, 0x423e3e7c, 0xc4b5b571, 0xaa6666cc,
0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c,
0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969,
0x91868617, 0x58c1c199, 0x271d1d3a, 0xb99e9e27,
0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122,
0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433,
0xb69b9b2d, 0x221e1e3c, 0x92878715, 0x20e9e9c9,
0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5,
0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a,
0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0,
0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e,
0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c,
}, {
0x6363c6a5, 0x7c7cf884, 0x7777ee99, 0x7b7bf68d,
0xf2f2ff0d, 0x6b6bd6bd, 0x6f6fdeb1, 0xc5c59154,
0x30306050, 0x01010203, 0x6767cea9, 0x2b2b567d,
0xfefee719, 0xd7d7b562, 0xabab4de6, 0x7676ec9a,
0xcaca8f45, 0x82821f9d, 0xc9c98940, 0x7d7dfa87,
0xfafaef15, 0x5959b2eb, 0x47478ec9, 0xf0f0fb0b,
0xadad41ec, 0xd4d4b367, 0xa2a25ffd, 0xafaf45ea,
0x9c9c23bf, 0xa4a453f7, 0x7272e496, 0xc0c09b5b,
0xb7b775c2, 0xfdfde11c, 0x93933dae, 0x26264c6a,
0x36366c5a, 0x3f3f7e41, 0xf7f7f502, 0xcccc834f,
0x3434685c, 0xa5a551f4, 0xe5e5d134, 0xf1f1f908,
0x7171e293, 0xd8d8ab73, 0x31316253, 0x15152a3f,
0x0404080c, 0xc7c79552, 0x23234665, 0xc3c39d5e,
0x18183028, 0x969637a1, 0x05050a0f, 0x9a9a2fb5,
0x07070e09, 0x12122436, 0x80801b9b, 0xe2e2df3d,
0xebebcd26, 0x27274e69, 0xb2b27fcd, 0x7575ea9f,
0x0909121b, 0x83831d9e, 0x2c2c5874, 0x1a1a342e,
0x1b1b362d, 0x6e6edcb2, 0x5a5ab4ee, 0xa0a05bfb,
0x5252a4f6, 0x3b3b764d, 0xd6d6b761, 0xb3b37dce,
0x2929527b, 0xe3e3dd3e, 0x2f2f5e71, 0x84841397,
0x5353a6f5, 0xd1d1b968, 0x00000000, 0xededc12c,
0x20204060, 0xfcfce31f, 0xb1b179c8, 0x5b5bb6ed,
0x6a6ad4be, 0xcbcb8d46, 0xbebe67d9, 0x3939724b,
0x4a4a94de, 0x4c4c98d4, 0x5858b0e8, 0xcfcf854a,
0xd0d0bb6b, 0xefefc52a, 0xaaaa4fe5, 0xfbfbed16,
0x434386c5, 0x4d4d9ad7, 0x33336655, 0x85851194,
0x45458acf, 0xf9f9e910, 0x02020406, 0x7f7ffe81,
0x5050a0f0, 0x3c3c7844, 0x9f9f25ba, 0xa8a84be3,
0x5151a2f3, 0xa3a35dfe, 0x404080c0, 0x8f8f058a,
0x92923fad, 0x9d9d21bc, 0x38387048, 0xf5f5f104,
0xbcbc63df, 0xb6b677c1, 0xdadaaf75, 0x21214263,
0x10102030, 0xffffe51a, 0xf3f3fd0e, 0xd2d2bf6d,
0xcdcd814c, 0x0c0c1814, 0x13132635, 0xececc32f,
0x5f5fbee1, 0x979735a2, 0x444488cc, 0x17172e39,
0xc4c49357, 0xa7a755f2, 0x7e7efc82, 0x3d3d7a47,
0x6464c8ac, 0x5d5dbae7, 0x1919322b, 0x7373e695,
0x6060c0a0, 0x81811998, 0x4f4f9ed1, 0xdcdca37f,
0x22224466, 0x2a2a547e, 0x90903bab, 0x88880b83,
0x46468cca, 0xeeeec729, 0xb8b86bd3, 0x1414283c,
0xdedea779, 0x5e5ebce2, 0x0b0b161d, 0xdbdbad76,
0xe0e0db3b, 0x32326456, 0x3a3a744e, 0x0a0a141e,
0x494992db, 0x06060c0a, 0x2424486c, 0x5c5cb8e4,
0xc2c29f5d, 0xd3d3bd6e, 0xacac43ef, 0x6262c4a6,
0x919139a8, 0x959531a4, 0xe4e4d337, 0x7979f28b,
0xe7e7d532, 0xc8c88b43, 0x37376e59, 0x6d6ddab7,
0x8d8d018c, 0xd5d5b164, 0x4e4e9cd2, 0xa9a949e0,
0x6c6cd8b4, 0x5656acfa, 0xf4f4f307, 0xeaeacf25,
0x6565caaf, 0x7a7af48e, 0xaeae47e9, 0x08081018,
0xbaba6fd5, 0x7878f088, 0x25254a6f, 0x2e2e5c72,
0x1c1c3824, 0xa6a657f1, 0xb4b473c7, 0xc6c69751,
0xe8e8cb23, 0xdddda17c, 0x7474e89c, 0x1f1f3e21,
0x4b4b96dd, 0xbdbd61dc, 0x8b8b0d86, 0x8a8a0f85,
0x7070e090, 0x3e3e7c42, 0xb5b571c4, 0x6666ccaa,
0x484890d8, 0x03030605, 0xf6f6f701, 0x0e0e1c12,
0x6161c2a3, 0x35356a5f, 0x5757aef9, 0xb9b969d0,
0x86861791, 0xc1c19958, 0x1d1d3a27, 0x9e9e27b9,
0xe1e1d938, 0xf8f8eb13, 0x98982bb3, 0x11112233,
0x6969d2bb, 0xd9d9a970, 0x8e8e0789, 0x949433a7,
0x9b9b2db6, 0x1e1e3c22, 0x87871592, 0xe9e9c920,
0xcece8749, 0x5555aaff, 0x28285078, 0xdfdfa57a,
0x8c8c038f, 0xa1a159f8, 0x89890980, 0x0d0d1a17,
0xbfbf65da, 0xe6e6d731, 0x424284c6, 0x6868d0b8,
0x414182c3, 0x999929b0, 0x2d2d5a77, 0x0f0f1e11,
0xb0b07bcb, 0x5454a8fc, 0xbbbb6dd6, 0x16162c3a,
}, {
0x63c6a563, 0x7cf8847c, 0x77ee9977, 0x7bf68d7b,
0xf2ff0df2, 0x6bd6bd6b, 0x6fdeb16f, 0xc59154c5,
0x30605030, 0x01020301, 0x67cea967, 0x2b567d2b,
0xfee719fe, 0xd7b562d7, 0xab4de6ab, 0x76ec9a76,
0xca8f45ca, 0x821f9d82, 0xc98940c9, 0x7dfa877d,
0xfaef15fa, 0x59b2eb59, 0x478ec947, 0xf0fb0bf0,
0xad41ecad, 0xd4b367d4, 0xa25ffda2, 0xaf45eaaf,
0x9c23bf9c, 0xa453f7a4, 0x72e49672, 0xc09b5bc0,
0xb775c2b7, 0xfde11cfd, 0x933dae93, 0x264c6a26,
0x366c5a36, 0x3f7e413f, 0xf7f502f7, 0xcc834fcc,
0x34685c34, 0xa551f4a5, 0xe5d134e5, 0xf1f908f1,
0x71e29371, 0xd8ab73d8, 0x31625331, 0x152a3f15,
0x04080c04, 0xc79552c7, 0x23466523, 0xc39d5ec3,
0x18302818, 0x9637a196, 0x050a0f05, 0x9a2fb59a,
0x070e0907, 0x12243612, 0x801b9b80, 0xe2df3de2,
0xebcd26eb, 0x274e6927, 0xb27fcdb2, 0x75ea9f75,
0x09121b09, 0x831d9e83, 0x2c58742c, 0x1a342e1a,
0x1b362d1b, 0x6edcb26e, 0x5ab4ee5a, 0xa05bfba0,
0x52a4f652, 0x3b764d3b, 0xd6b761d6, 0xb37dceb3,
0x29527b29, 0xe3dd3ee3, 0x2f5e712f, 0x84139784,
0x53a6f553, 0xd1b968d1, 0x00000000, 0xedc12ced,
0x20406020, 0xfce31ffc, 0xb179c8b1, 0x5bb6ed5b,
0x6ad4be6a, 0xcb8d46cb, 0xbe67d9be, 0x39724b39,
0x4a94de4a, 0x4c98d44c, 0x58b0e858, 0xcf854acf,
0xd0bb6bd0, 0xefc52aef, 0xaa4fe5aa, 0xfbed16fb,
0x4386c543, 0x4d9ad74d, 0x33665533, 0x85119485,
0x458acf45, 0xf9e910f9, 0x02040602, 0x7ffe817f,
0x50a0f050, 0x3c78443c, 0x9f25ba9f, 0xa84be3a8,
0x51a2f351, 0xa35dfea3, 0x4080c040, 0x8f058a8f,
0x923fad92, 0x9d21bc9d, 0x38704838, 0xf5f104f5,
0xbc63dfbc, 0xb677c1b6, 0xdaaf75da, 0x21426321,
0x10203010, 0xffe51aff, 0xf3fd0ef3, 0xd2bf6dd2,
0xcd814ccd, 0x0c18140c, 0x13263513, 0xecc32fec,
0x5fbee15f, 0x9735a297, 0x4488cc44, 0x172e3917,
0xc49357c4, 0xa755f2a7, 0x7efc827e, 0x3d7a473d,
0x64c8ac64, 0x5dbae75d, 0x19322b19, 0x73e69573,
0x60c0a060, 0x81199881, 0x4f9ed14f, 0xdca37fdc,
0x22446622, 0x2a547e2a, 0x903bab90, 0x880b8388,
0x468cca46, 0xeec729ee, 0xb86bd3b8, 0x14283c14,
0xdea779de, 0x5ebce25e, 0x0b161d0b, 0xdbad76db,
0xe0db3be0, 0x32645632, 0x3a744e3a, 0x0a141e0a,
0x4992db49, 0x060c0a06, 0x24486c24, 0x5cb8e45c,
0xc29f5dc2, 0xd3bd6ed3, 0xac43efac, 0x62c4a662,
0x9139a891, 0x9531a495, 0xe4d337e4, 0x79f28b79,
0xe7d532e7, 0xc88b43c8, 0x376e5937, 0x6ddab76d,
0x8d018c8d, 0xd5b164d5, 0x4e9cd24e, 0xa949e0a9,
0x6cd8b46c, 0x56acfa56, 0xf4f307f4, 0xeacf25ea,
0x65caaf65, 0x7af48e7a, 0xae47e9ae, 0x08101808,
0xba6fd5ba, 0x78f08878, 0x254a6f25, 0x2e5c722e,
0x1c38241c, 0xa657f1a6, 0xb473c7b4, 0xc69751c6,
0xe8cb23e8, 0xdda17cdd, 0x74e89c74, 0x1f3e211f,
0x4b96dd4b, 0xbd61dcbd, 0x8b0d868b, 0x8a0f858a,
0x70e09070, 0x3e7c423e, 0xb571c4b5, 0x66ccaa66,
0x4890d848, 0x03060503, 0xf6f701f6, 0x0e1c120e,
0x61c2a361, 0x356a5f35, 0x57aef957, 0xb969d0b9,
0x86179186, 0xc19958c1, 0x1d3a271d, 0x9e27b99e,
0xe1d938e1, 0xf8eb13f8, 0x982bb398, 0x11223311,
0x69d2bb69, 0xd9a970d9, 0x8e07898e, 0x9433a794,
0x9b2db69b, 0x1e3c221e, 0x87159287, 0xe9c920e9,
0xce8749ce, 0x55aaff55, 0x28507828, 0xdfa57adf,
0x8c038f8c, 0xa159f8a1, 0x89098089, 0x0d1a170d,
0xbf65dabf, 0xe6d731e6, 0x4284c642, 0x68d0b868,
0x4182c341, 0x9929b099, 0x2d5a772d, 0x0f1e110f,
0xb07bcbb0, 0x54a8fc54, 0xbb6dd6bb, 0x162c3a16,
}, {
0xc6a56363, 0xf8847c7c, 0xee997777, 0xf68d7b7b,
0xff0df2f2, 0xd6bd6b6b, 0xdeb16f6f, 0x9154c5c5,
0x60503030, 0x02030101, 0xcea96767, 0x567d2b2b,
0xe719fefe, 0xb562d7d7, 0x4de6abab, 0xec9a7676,
0x8f45caca, 0x1f9d8282, 0x8940c9c9, 0xfa877d7d,
0xef15fafa, 0xb2eb5959, 0x8ec94747, 0xfb0bf0f0,
0x41ecadad, 0xb367d4d4, 0x5ffda2a2, 0x45eaafaf,
0x23bf9c9c, 0x53f7a4a4, 0xe4967272, 0x9b5bc0c0,
0x75c2b7b7, 0xe11cfdfd, 0x3dae9393, 0x4c6a2626,
0x6c5a3636, 0x7e413f3f, 0xf502f7f7, 0x834fcccc,
0x685c3434, 0x51f4a5a5, 0xd134e5e5, 0xf908f1f1,
0xe2937171, 0xab73d8d8, 0x62533131, 0x2a3f1515,
0x080c0404, 0x9552c7c7, 0x46652323, 0x9d5ec3c3,
0x30281818, 0x37a19696, 0x0a0f0505, 0x2fb59a9a,
0x0e090707, 0x24361212, 0x1b9b8080, 0xdf3de2e2,
0xcd26ebeb, 0x4e692727, 0x7fcdb2b2, 0xea9f7575,
0x121b0909, 0x1d9e8383, 0x58742c2c, 0x342e1a1a,
0x362d1b1b, 0xdcb26e6e, 0xb4ee5a5a, 0x5bfba0a0,
0xa4f65252, 0x764d3b3b, 0xb761d6d6, 0x7dceb3b3,
0x527b2929, 0xdd3ee3e3, 0x5e712f2f, 0x13978484,
0xa6f55353, 0xb968d1d1, 0x00000000, 0xc12ceded,
0x40602020, 0xe31ffcfc, 0x79c8b1b1, 0xb6ed5b5b,
0xd4be6a6a, 0x8d46cbcb, 0x67d9bebe, 0x724b3939,
0x94de4a4a, 0x98d44c4c, 0xb0e85858, 0x854acfcf,
0xbb6bd0d0, 0xc52aefef, 0x4fe5aaaa, 0xed16fbfb,
0x86c54343, 0x9ad74d4d, 0x66553333, 0x11948585,
0x8acf4545, 0xe910f9f9, 0x04060202, 0xfe817f7f,
0xa0f05050, 0x78443c3c, 0x25ba9f9f, 0x4be3a8a8,
0xa2f35151, 0x5dfea3a3, 0x80c04040, 0x058a8f8f,
0x3fad9292, 0x21bc9d9d, 0x70483838, 0xf104f5f5,
0x63dfbcbc, 0x77c1b6b6, 0xaf75dada, 0x42632121,
0x20301010, 0xe51affff, 0xfd0ef3f3, 0xbf6dd2d2,
0x814ccdcd, 0x18140c0c, 0x26351313, 0xc32fecec,
0xbee15f5f, 0x35a29797, 0x88cc4444, 0x2e391717,
0x9357c4c4, 0x55f2a7a7, 0xfc827e7e, 0x7a473d3d,
0xc8ac6464, 0xbae75d5d, 0x322b1919, 0xe6957373,
0xc0a06060, 0x19988181, 0x9ed14f4f, 0xa37fdcdc,
0x44662222, 0x547e2a2a, 0x3bab9090, 0x0b838888,
0x8cca4646, 0xc729eeee, 0x6bd3b8b8, 0x283c1414,
0xa779dede, 0xbce25e5e, 0x161d0b0b, 0xad76dbdb,
0xdb3be0e0, 0x64563232, 0x744e3a3a, 0x141e0a0a,
0x92db4949, 0x0c0a0606, 0x486c2424, 0xb8e45c5c,
0x9f5dc2c2, 0xbd6ed3d3, 0x43efacac, 0xc4a66262,
0x39a89191, 0x31a49595, 0xd337e4e4, 0xf28b7979,
0xd532e7e7, 0x8b43c8c8, 0x6e593737, 0xdab76d6d,
0x018c8d8d, 0xb164d5d5, 0x9cd24e4e, 0x49e0a9a9,
0xd8b46c6c, 0xacfa5656, 0xf307f4f4, 0xcf25eaea,
0xcaaf6565, 0xf48e7a7a, 0x47e9aeae, 0x10180808,
0x6fd5baba, 0xf0887878, 0x4a6f2525, 0x5c722e2e,
0x38241c1c, 0x57f1a6a6, 0x73c7b4b4, 0x9751c6c6,
0xcb23e8e8, 0xa17cdddd, 0xe89c7474, 0x3e211f1f,
0x96dd4b4b, 0x61dcbdbd, 0x0d868b8b, 0x0f858a8a,
0xe0907070, 0x7c423e3e, 0x71c4b5b5, 0xccaa6666,
0x90d84848, 0x06050303, 0xf701f6f6, 0x1c120e0e,
0xc2a36161, 0x6a5f3535, 0xaef95757, 0x69d0b9b9,
0x17918686, 0x9958c1c1, 0x3a271d1d, 0x27b99e9e,
0xd938e1e1, 0xeb13f8f8, 0x2bb39898, 0x22331111,
0xd2bb6969, 0xa970d9d9, 0x07898e8e, 0x33a79494,
0x2db69b9b, 0x3c221e1e, 0x15928787, 0xc920e9e9,
0x8749cece, 0xaaff5555, 0x50782828, 0xa57adfdf,
0x038f8c8c, 0x59f8a1a1, 0x09808989, 0x1a170d0d,
0x65dabfbf, 0xd731e6e6, 0x84c64242, 0xd0b86868,
0x82c34141, 0x29b09999, 0x5a772d2d, 0x1e110f0f,
0x7bcbb0b0, 0xa8fc5454, 0x6dd6bbbb, 0x2c3a1616,
}
};
const u32 crypto_fl_tab[4][256] = {
{
0x00000063, 0x0000007c, 0x00000077, 0x0000007b,
0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5,
0x00000030, 0x00000001, 0x00000067, 0x0000002b,
0x000000fe, 0x000000d7, 0x000000ab, 0x00000076,
0x000000ca, 0x00000082, 0x000000c9, 0x0000007d,
0x000000fa, 0x00000059, 0x00000047, 0x000000f0,
0x000000ad, 0x000000d4, 0x000000a2, 0x000000af,
0x0000009c, 0x000000a4, 0x00000072, 0x000000c0,
0x000000b7, 0x000000fd, 0x00000093, 0x00000026,
0x00000036, 0x0000003f, 0x000000f7, 0x000000cc,
0x00000034, 0x000000a5, 0x000000e5, 0x000000f1,
0x00000071, 0x000000d8, 0x00000031, 0x00000015,
0x00000004, 0x000000c7, 0x00000023, 0x000000c3,
0x00000018, 0x00000096, 0x00000005, 0x0000009a,
0x00000007, 0x00000012, 0x00000080, 0x000000e2,
0x000000eb, 0x00000027, 0x000000b2, 0x00000075,
0x00000009, 0x00000083, 0x0000002c, 0x0000001a,
0x0000001b, 0x0000006e, 0x0000005a, 0x000000a0,
0x00000052, 0x0000003b, 0x000000d6, 0x000000b3,
0x00000029, 0x000000e3, 0x0000002f, 0x00000084,
0x00000053, 0x000000d1, 0x00000000, 0x000000ed,
0x00000020, 0x000000fc, 0x000000b1, 0x0000005b,
0x0000006a, 0x000000cb, 0x000000be, 0x00000039,
0x0000004a, 0x0000004c, 0x00000058, 0x000000cf,
0x000000d0, 0x000000ef, 0x000000aa, 0x000000fb,
0x00000043, 0x0000004d, 0x00000033, 0x00000085,
0x00000045, 0x000000f9, 0x00000002, 0x0000007f,
0x00000050, 0x0000003c, 0x0000009f, 0x000000a8,
0x00000051, 0x000000a3, 0x00000040, 0x0000008f,
0x00000092, 0x0000009d, 0x00000038, 0x000000f5,
0x000000bc, 0x000000b6, 0x000000da, 0x00000021,
0x00000010, 0x000000ff, 0x000000f3, 0x000000d2,
0x000000cd, 0x0000000c, 0x00000013, 0x000000ec,
0x0000005f, 0x00000097, 0x00000044, 0x00000017,
0x000000c4, 0x000000a7, 0x0000007e, 0x0000003d,
0x00000064, 0x0000005d, 0x00000019, 0x00000073,
0x00000060, 0x00000081, 0x0000004f, 0x000000dc,
0x00000022, 0x0000002a, 0x00000090, 0x00000088,
0x00000046, 0x000000ee, 0x000000b8, 0x00000014,
0x000000de, 0x0000005e, 0x0000000b, 0x000000db,
0x000000e0, 0x00000032, 0x0000003a, 0x0000000a,
0x00000049, 0x00000006, 0x00000024, 0x0000005c,
0x000000c2, 0x000000d3, 0x000000ac, 0x00000062,
0x00000091, 0x00000095, 0x000000e4, 0x00000079,
0x000000e7, 0x000000c8, 0x00000037, 0x0000006d,
0x0000008d, 0x000000d5, 0x0000004e, 0x000000a9,
0x0000006c, 0x00000056, 0x000000f4, 0x000000ea,
0x00000065, 0x0000007a, 0x000000ae, 0x00000008,
0x000000ba, 0x00000078, 0x00000025, 0x0000002e,
0x0000001c, 0x000000a6, 0x000000b4, 0x000000c6,
0x000000e8, 0x000000dd, 0x00000074, 0x0000001f,
0x0000004b, 0x000000bd, 0x0000008b, 0x0000008a,
0x00000070, 0x0000003e, 0x000000b5, 0x00000066,
0x00000048, 0x00000003, 0x000000f6, 0x0000000e,
0x00000061, 0x00000035, 0x00000057, 0x000000b9,
0x00000086, 0x000000c1, 0x0000001d, 0x0000009e,
0x000000e1, 0x000000f8, 0x00000098, 0x00000011,
0x00000069, 0x000000d9, 0x0000008e, 0x00000094,
0x0000009b, 0x0000001e, 0x00000087, 0x000000e9,
0x000000ce, 0x00000055, 0x00000028, 0x000000df,
0x0000008c, 0x000000a1, 0x00000089, 0x0000000d,
0x000000bf, 0x000000e6, 0x00000042, 0x00000068,
0x00000041, 0x00000099, 0x0000002d, 0x0000000f,
0x000000b0, 0x00000054, 0x000000bb, 0x00000016,
}, {
0x00006300, 0x00007c00, 0x00007700, 0x00007b00,
0x0000f200, 0x00006b00, 0x00006f00, 0x0000c500,
0x00003000, 0x00000100, 0x00006700, 0x00002b00,
0x0000fe00, 0x0000d700, 0x0000ab00, 0x00007600,
0x0000ca00, 0x00008200, 0x0000c900, 0x00007d00,
0x0000fa00, 0x00005900, 0x00004700, 0x0000f000,
0x0000ad00, 0x0000d400, 0x0000a200, 0x0000af00,
0x00009c00, 0x0000a400, 0x00007200, 0x0000c000,
0x0000b700, 0x0000fd00, 0x00009300, 0x00002600,
0x00003600, 0x00003f00, 0x0000f700, 0x0000cc00,
0x00003400, 0x0000a500, 0x0000e500, 0x0000f100,
0x00007100, 0x0000d800, 0x00003100, 0x00001500,
0x00000400, 0x0000c700, 0x00002300, 0x0000c300,
0x00001800, 0x00009600, 0x00000500, 0x00009a00,
0x00000700, 0x00001200, 0x00008000, 0x0000e200,
0x0000eb00, 0x00002700, 0x0000b200, 0x00007500,
0x00000900, 0x00008300, 0x00002c00, 0x00001a00,
0x00001b00, 0x00006e00, 0x00005a00, 0x0000a000,
0x00005200, 0x00003b00, 0x0000d600, 0x0000b300,
0x00002900, 0x0000e300, 0x00002f00, 0x00008400,
0x00005300, 0x0000d100, 0x00000000, 0x0000ed00,
0x00002000, 0x0000fc00, 0x0000b100, 0x00005b00,
0x00006a00, 0x0000cb00, 0x0000be00, 0x00003900,
0x00004a00, 0x00004c00, 0x00005800, 0x0000cf00,
0x0000d000, 0x0000ef00, 0x0000aa00, 0x0000fb00,
0x00004300, 0x00004d00, 0x00003300, 0x00008500,
0x00004500, 0x0000f900, 0x00000200, 0x00007f00,
0x00005000, 0x00003c00, 0x00009f00, 0x0000a800,
0x00005100, 0x0000a300, 0x00004000, 0x00008f00,
0x00009200, 0x00009d00, 0x00003800, 0x0000f500,
0x0000bc00, 0x0000b600, 0x0000da00, 0x00002100,
0x00001000, 0x0000ff00, 0x0000f300, 0x0000d200,
0x0000cd00, 0x00000c00, 0x00001300, 0x0000ec00,
0x00005f00, 0x00009700, 0x00004400, 0x00001700,
0x0000c400, 0x0000a700, 0x00007e00, 0x00003d00,
0x00006400, 0x00005d00, 0x00001900, 0x00007300,
0x00006000, 0x00008100, 0x00004f00, 0x0000dc00,
0x00002200, 0x00002a00, 0x00009000, 0x00008800,
0x00004600, 0x0000ee00, 0x0000b800, 0x00001400,
0x0000de00, 0x00005e00, 0x00000b00, 0x0000db00,
0x0000e000, 0x00003200, 0x00003a00, 0x00000a00,
0x00004900, 0x00000600, 0x00002400, 0x00005c00,
0x0000c200, 0x0000d300, 0x0000ac00, 0x00006200,
0x00009100, 0x00009500, 0x0000e400, 0x00007900,
0x0000e700, 0x0000c800, 0x00003700, 0x00006d00,
0x00008d00, 0x0000d500, 0x00004e00, 0x0000a900,
0x00006c00, 0x00005600, 0x0000f400, 0x0000ea00,
0x00006500, 0x00007a00, 0x0000ae00, 0x00000800,
0x0000ba00, 0x00007800, 0x00002500, 0x00002e00,
0x00001c00, 0x0000a600, 0x0000b400, 0x0000c600,
0x0000e800, 0x0000dd00, 0x00007400, 0x00001f00,
0x00004b00, 0x0000bd00, 0x00008b00, 0x00008a00,
0x00007000, 0x00003e00, 0x0000b500, 0x00006600,
0x00004800, 0x00000300, 0x0000f600, 0x00000e00,
0x00006100, 0x00003500, 0x00005700, 0x0000b900,
0x00008600, 0x0000c100, 0x00001d00, 0x00009e00,
0x0000e100, 0x0000f800, 0x00009800, 0x00001100,
0x00006900, 0x0000d900, 0x00008e00, 0x00009400,
0x00009b00, 0x00001e00, 0x00008700, 0x0000e900,
0x0000ce00, 0x00005500, 0x00002800, 0x0000df00,
0x00008c00, 0x0000a100, 0x00008900, 0x00000d00,
0x0000bf00, 0x0000e600, 0x00004200, 0x00006800,
0x00004100, 0x00009900, 0x00002d00, 0x00000f00,
0x0000b000, 0x00005400, 0x0000bb00, 0x00001600,
}, {
0x00630000, 0x007c0000, 0x00770000, 0x007b0000,
0x00f20000, 0x006b0000, 0x006f0000, 0x00c50000,
0x00300000, 0x00010000, 0x00670000, 0x002b0000,
0x00fe0000, 0x00d70000, 0x00ab0000, 0x00760000,
0x00ca0000, 0x00820000, 0x00c90000, 0x007d0000,
0x00fa0000, 0x00590000, 0x00470000, 0x00f00000,
0x00ad0000, 0x00d40000, 0x00a20000, 0x00af0000,
0x009c0000, 0x00a40000, 0x00720000, 0x00c00000,
0x00b70000, 0x00fd0000, 0x00930000, 0x00260000,
0x00360000, 0x003f0000, 0x00f70000, 0x00cc0000,
0x00340000, 0x00a50000, 0x00e50000, 0x00f10000,
0x00710000, 0x00d80000, 0x00310000, 0x00150000,
0x00040000, 0x00c70000, 0x00230000, 0x00c30000,
0x00180000, 0x00960000, 0x00050000, 0x009a0000,
0x00070000, 0x00120000, 0x00800000, 0x00e20000,
0x00eb0000, 0x00270000, 0x00b20000, 0x00750000,
0x00090000, 0x00830000, 0x002c0000, 0x001a0000,
0x001b0000, 0x006e0000, 0x005a0000, 0x00a00000,
0x00520000, 0x003b0000, 0x00d60000, 0x00b30000,
0x00290000, 0x00e30000, 0x002f0000, 0x00840000,
0x00530000, 0x00d10000, 0x00000000, 0x00ed0000,
0x00200000, 0x00fc0000, 0x00b10000, 0x005b0000,
0x006a0000, 0x00cb0000, 0x00be0000, 0x00390000,
0x004a0000, 0x004c0000, 0x00580000, 0x00cf0000,
0x00d00000, 0x00ef0000, 0x00aa0000, 0x00fb0000,
0x00430000, 0x004d0000, 0x00330000, 0x00850000,
0x00450000, 0x00f90000, 0x00020000, 0x007f0000,
0x00500000, 0x003c0000, 0x009f0000, 0x00a80000,
0x00510000, 0x00a30000, 0x00400000, 0x008f0000,
0x00920000, 0x009d0000, 0x00380000, 0x00f50000,
0x00bc0000, 0x00b60000, 0x00da0000, 0x00210000,
0x00100000, 0x00ff0000, 0x00f30000, 0x00d20000,
0x00cd0000, 0x000c0000, 0x00130000, 0x00ec0000,
0x005f0000, 0x00970000, 0x00440000, 0x00170000,
0x00c40000, 0x00a70000, 0x007e0000, 0x003d0000,
0x00640000, 0x005d0000, 0x00190000, 0x00730000,
0x00600000, 0x00810000, 0x004f0000, 0x00dc0000,
0x00220000, 0x002a0000, 0x00900000, 0x00880000,
0x00460000, 0x00ee0000, 0x00b80000, 0x00140000,
0x00de0000, 0x005e0000, 0x000b0000, 0x00db0000,
0x00e00000, 0x00320000, 0x003a0000, 0x000a0000,
0x00490000, 0x00060000, 0x00240000, 0x005c0000,
0x00c20000, 0x00d30000, 0x00ac0000, 0x00620000,
0x00910000, 0x00950000, 0x00e40000, 0x00790000,
0x00e70000, 0x00c80000, 0x00370000, 0x006d0000,
0x008d0000, 0x00d50000, 0x004e0000, 0x00a90000,
0x006c0000, 0x00560000, 0x00f40000, 0x00ea0000,
0x00650000, 0x007a0000, 0x00ae0000, 0x00080000,
0x00ba0000, 0x00780000, 0x00250000, 0x002e0000,
0x001c0000, 0x00a60000, 0x00b40000, 0x00c60000,
0x00e80000, 0x00dd0000, 0x00740000, 0x001f0000,
0x004b0000, 0x00bd0000, 0x008b0000, 0x008a0000,
0x00700000, 0x003e0000, 0x00b50000, 0x00660000,
0x00480000, 0x00030000, 0x00f60000, 0x000e0000,
0x00610000, 0x00350000, 0x00570000, 0x00b90000,
0x00860000, 0x00c10000, 0x001d0000, 0x009e0000,
0x00e10000, 0x00f80000, 0x00980000, 0x00110000,
0x00690000, 0x00d90000, 0x008e0000, 0x00940000,
0x009b0000, 0x001e0000, 0x00870000, 0x00e90000,
0x00ce0000, 0x00550000, 0x00280000, 0x00df0000,
0x008c0000, 0x00a10000, 0x00890000, 0x000d0000,
0x00bf0000, 0x00e60000, 0x00420000, 0x00680000,
0x00410000, 0x00990000, 0x002d0000, 0x000f0000,
0x00b00000, 0x00540000, 0x00bb0000, 0x00160000,
}, {
0x63000000, 0x7c000000, 0x77000000, 0x7b000000,
0xf2000000, 0x6b000000, 0x6f000000, 0xc5000000,
0x30000000, 0x01000000, 0x67000000, 0x2b000000,
0xfe000000, 0xd7000000, 0xab000000, 0x76000000,
0xca000000, 0x82000000, 0xc9000000, 0x7d000000,
0xfa000000, 0x59000000, 0x47000000, 0xf0000000,
0xad000000, 0xd4000000, 0xa2000000, 0xaf000000,
0x9c000000, 0xa4000000, 0x72000000, 0xc0000000,
0xb7000000, 0xfd000000, 0x93000000, 0x26000000,
0x36000000, 0x3f000000, 0xf7000000, 0xcc000000,
0x34000000, 0xa5000000, 0xe5000000, 0xf1000000,
0x71000000, 0xd8000000, 0x31000000, 0x15000000,
0x04000000, 0xc7000000, 0x23000000, 0xc3000000,
0x18000000, 0x96000000, 0x05000000, 0x9a000000,
0x07000000, 0x12000000, 0x80000000, 0xe2000000,
0xeb000000, 0x27000000, 0xb2000000, 0x75000000,
0x09000000, 0x83000000, 0x2c000000, 0x1a000000,
0x1b000000, 0x6e000000, 0x5a000000, 0xa0000000,
0x52000000, 0x3b000000, 0xd6000000, 0xb3000000,
0x29000000, 0xe3000000, 0x2f000000, 0x84000000,
0x53000000, 0xd1000000, 0x00000000, 0xed000000,
0x20000000, 0xfc000000, 0xb1000000, 0x5b000000,
0x6a000000, 0xcb000000, 0xbe000000, 0x39000000,
0x4a000000, 0x4c000000, 0x58000000, 0xcf000000,
0xd0000000, 0xef000000, 0xaa000000, 0xfb000000,
0x43000000, 0x4d000000, 0x33000000, 0x85000000,
0x45000000, 0xf9000000, 0x02000000, 0x7f000000,
0x50000000, 0x3c000000, 0x9f000000, 0xa8000000,
0x51000000, 0xa3000000, 0x40000000, 0x8f000000,
0x92000000, 0x9d000000, 0x38000000, 0xf5000000,
0xbc000000, 0xb6000000, 0xda000000, 0x21000000,
0x10000000, 0xff000000, 0xf3000000, 0xd2000000,
0xcd000000, 0x0c000000, 0x13000000, 0xec000000,
0x5f000000, 0x97000000, 0x44000000, 0x17000000,
0xc4000000, 0xa7000000, 0x7e000000, 0x3d000000,
0x64000000, 0x5d000000, 0x19000000, 0x73000000,
0x60000000, 0x81000000, 0x4f000000, 0xdc000000,
0x22000000, 0x2a000000, 0x90000000, 0x88000000,
0x46000000, 0xee000000, 0xb8000000, 0x14000000,
0xde000000, 0x5e000000, 0x0b000000, 0xdb000000,
0xe0000000, 0x32000000, 0x3a000000, 0x0a000000,
0x49000000, 0x06000000, 0x24000000, 0x5c000000,
0xc2000000, 0xd3000000, 0xac000000, 0x62000000,
0x91000000, 0x95000000, 0xe4000000, 0x79000000,
0xe7000000, 0xc8000000, 0x37000000, 0x6d000000,
0x8d000000, 0xd5000000, 0x4e000000, 0xa9000000,
0x6c000000, 0x56000000, 0xf4000000, 0xea000000,
0x65000000, 0x7a000000, 0xae000000, 0x08000000,
0xba000000, 0x78000000, 0x25000000, 0x2e000000,
0x1c000000, 0xa6000000, 0xb4000000, 0xc6000000,
0xe8000000, 0xdd000000, 0x74000000, 0x1f000000,
0x4b000000, 0xbd000000, 0x8b000000, 0x8a000000,
0x70000000, 0x3e000000, 0xb5000000, 0x66000000,
0x48000000, 0x03000000, 0xf6000000, 0x0e000000,
0x61000000, 0x35000000, 0x57000000, 0xb9000000,
0x86000000, 0xc1000000, 0x1d000000, 0x9e000000,
0xe1000000, 0xf8000000, 0x98000000, 0x11000000,
0x69000000, 0xd9000000, 0x8e000000, 0x94000000,
0x9b000000, 0x1e000000, 0x87000000, 0xe9000000,
0xce000000, 0x55000000, 0x28000000, 0xdf000000,
0x8c000000, 0xa1000000, 0x89000000, 0x0d000000,
0xbf000000, 0xe6000000, 0x42000000, 0x68000000,
0x41000000, 0x99000000, 0x2d000000, 0x0f000000,
0xb0000000, 0x54000000, 0xbb000000, 0x16000000,
}
};
const u32 crypto_it_tab[4][256] = {
{
0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a,
0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b,
0x55fa3020, 0xf66d76ad, 0x9176cc88, 0x254c02f5,
0xfcd7e54f, 0xd7cb2ac5, 0x80443526, 0x8fa362b5,
0x495ab1de, 0x671bba25, 0x980eea45, 0xe1c0fe5d,
0x02752fc3, 0x12f04c81, 0xa397468d, 0xc6f9d36b,
0xe75f8f03, 0x959c9215, 0xeb7a6dbf, 0xda595295,
0x2d83bed4, 0xd3217458, 0x2969e049, 0x44c8c98e,
0x6a89c275, 0x78798ef4, 0x6b3e5899, 0xdd71b927,
0xb64fe1be, 0x17ad88f0, 0x66ac20c9, 0xb43ace7d,
0x184adf63, 0x82311ae5, 0x60335197, 0x457f5362,
0xe07764b1, 0x84ae6bbb, 0x1ca081fe, 0x942b08f9,
0x58684870, 0x19fd458f, 0x876cde94, 0xb7f87b52,
0x23d373ab, 0xe2024b72, 0x578f1fe3, 0x2aab5566,
0x0728ebb2, 0x03c2b52f, 0x9a7bc586, 0xa50837d3,
0xf2872830, 0xb2a5bf23, 0xba6a0302, 0x5c8216ed,
0x2b1ccf8a, 0x92b479a7, 0xf0f207f3, 0xa1e2694e,
0xcdf4da65, 0xd5be0506, 0x1f6234d1, 0x8afea6c4,
0x9d532e34, 0xa055f3a2, 0x32e18a05, 0x75ebf6a4,
0x39ec830b, 0xaaef6040, 0x069f715e, 0x51106ebd,
0xf98a213e, 0x3d06dd96, 0xae053edd, 0x46bde64d,
0xb58d5491, 0x055dc471, 0x6fd40604, 0xff155060,
0x24fb9819, 0x97e9bdd6, 0xcc434089, 0x779ed967,
0xbd42e8b0, 0x888b8907, 0x385b19e7, 0xdbeec879,
0x470a7ca1, 0xe90f427c, 0xc91e84f8, 0x00000000,
0x83868009, 0x48ed2b32, 0xac70111e, 0x4e725a6c,
0xfbff0efd, 0x5638850f, 0x1ed5ae3d, 0x27392d36,
0x64d90f0a, 0x21a65c68, 0xd1545b9b, 0x3a2e3624,
0xb1670a0c, 0x0fe75793, 0xd296eeb4, 0x9e919b1b,
0x4fc5c080, 0xa220dc61, 0x694b775a, 0x161a121c,
0x0aba93e2, 0xe52aa0c0, 0x43e0223c, 0x1d171b12,
0x0b0d090e, 0xadc78bf2, 0xb9a8b62d, 0xc8a91e14,
0x8519f157, 0x4c0775af, 0xbbdd99ee, 0xfd607fa3,
0x9f2601f7, 0xbcf5725c, 0xc53b6644, 0x347efb5b,
0x7629438b, 0xdcc623cb, 0x68fcedb6, 0x63f1e4b8,
0xcadc31d7, 0x10856342, 0x40229713, 0x2011c684,
0x7d244a85, 0xf83dbbd2, 0x1132f9ae, 0x6da129c7,
0x4b2f9e1d, 0xf330b2dc, 0xec52860d, 0xd0e3c177,
0x6c16b32b, 0x99b970a9, 0xfa489411, 0x2264e947,
0xc48cfca8, 0x1a3ff0a0, 0xd82c7d56, 0xef903322,
0xc74e4987, 0xc1d138d9, 0xfea2ca8c, 0x360bd498,
0xcf81f5a6, 0x28de7aa5, 0x268eb7da, 0xa4bfad3f,
0xe49d3a2c, 0x0d927850, 0x9bcc5f6a, 0x62467e54,
0xc2138df6, 0xe8b8d890, 0x5ef7392e, 0xf5afc382,
0xbe805d9f, 0x7c93d069, 0xa92dd56f, 0xb31225cf,
0x3b99acc8, 0xa77d1810, 0x6e639ce8, 0x7bbb3bdb,
0x097826cd, 0xf418596e, 0x01b79aec, 0xa89a4f83,
0x656e95e6, 0x7ee6ffaa, 0x08cfbc21, 0xe6e815ef,
0xd99be7ba, 0xce366f4a, 0xd4099fea, 0xd67cb029,
0xafb2a431, 0x31233f2a, 0x3094a5c6, 0xc066a235,
0x37bc4e74, 0xa6ca82fc, 0xb0d090e0, 0x15d8a733,
0x4a9804f1, 0xf7daec41, 0x0e50cd7f, 0x2ff69117,
0x8dd64d76, 0x4db0ef43, 0x544daacc, 0xdf0496e4,
0xe3b5d19e, 0x1b886a4c, 0xb81f2cc1, 0x7f516546,
0x04ea5e9d, 0x5d358c01, 0x737487fa, 0x2e410bfb,
0x5a1d67b3, 0x52d2db92, 0x335610e9, 0x1347d66d,
0x8c61d79a, 0x7a0ca137, 0x8e14f859, 0x893c13eb,
0xee27a9ce, 0x35c961b7, 0xede51ce1, 0x3cb1477a,
0x59dfd29c, 0x3f73f255, 0x79ce1418, 0xbf37c773,
0xeacdf753, 0x5baafd5f, 0x146f3ddf, 0x86db4478,
0x81f3afca, 0x3ec468b9, 0x2c342438, 0x5f40a3c2,
0x72c31d16, 0x0c25e2bc, 0x8b493c28, 0x41950dff,
0x7101a839, 0xdeb30c08, 0x9ce4b4d8, 0x90c15664,
0x6184cb7b, 0x70b632d5, 0x745c6c48, 0x4257b8d0,
}, {
0xa7f45150, 0x65417e53, 0xa4171ac3, 0x5e273a96,
0x6bab3bcb, 0x459d1ff1, 0x58faacab, 0x03e34b93,
0xfa302055, 0x6d76adf6, 0x76cc8891, 0x4c02f525,
0xd7e54ffc, 0xcb2ac5d7, 0x44352680, 0xa362b58f,
0x5ab1de49, 0x1bba2567, 0x0eea4598, 0xc0fe5de1,
0x752fc302, 0xf04c8112, 0x97468da3, 0xf9d36bc6,
0x5f8f03e7, 0x9c921595, 0x7a6dbfeb, 0x595295da,
0x83bed42d, 0x217458d3, 0x69e04929, 0xc8c98e44,
0x89c2756a, 0x798ef478, 0x3e58996b, 0x71b927dd,
0x4fe1beb6, 0xad88f017, 0xac20c966, 0x3ace7db4,
0x4adf6318, 0x311ae582, 0x33519760, 0x7f536245,
0x7764b1e0, 0xae6bbb84, 0xa081fe1c, 0x2b08f994,
0x68487058, 0xfd458f19, 0x6cde9487, 0xf87b52b7,
0xd373ab23, 0x024b72e2, 0x8f1fe357, 0xab55662a,
0x28ebb207, 0xc2b52f03, 0x7bc5869a, 0x0837d3a5,
0x872830f2, 0xa5bf23b2, 0x6a0302ba, 0x8216ed5c,
0x1ccf8a2b, 0xb479a792, 0xf207f3f0, 0xe2694ea1,
0xf4da65cd, 0xbe0506d5, 0x6234d11f, 0xfea6c48a,
0x532e349d, 0x55f3a2a0, 0xe18a0532, 0xebf6a475,
0xec830b39, 0xef6040aa, 0x9f715e06, 0x106ebd51,
0x8a213ef9, 0x06dd963d, 0x053eddae, 0xbde64d46,
0x8d5491b5, 0x5dc47105, 0xd406046f, 0x155060ff,
0xfb981924, 0xe9bdd697, 0x434089cc, 0x9ed96777,
0x42e8b0bd, 0x8b890788, 0x5b19e738, 0xeec879db,
0x0a7ca147, 0x0f427ce9, 0x1e84f8c9, 0x00000000,
0x86800983, 0xed2b3248, 0x70111eac, 0x725a6c4e,
0xff0efdfb, 0x38850f56, 0xd5ae3d1e, 0x392d3627,
0xd90f0a64, 0xa65c6821, 0x545b9bd1, 0x2e36243a,
0x670a0cb1, 0xe757930f, 0x96eeb4d2, 0x919b1b9e,
0xc5c0804f, 0x20dc61a2, 0x4b775a69, 0x1a121c16,
0xba93e20a, 0x2aa0c0e5, 0xe0223c43, 0x171b121d,
0x0d090e0b, 0xc78bf2ad, 0xa8b62db9, 0xa91e14c8,
0x19f15785, 0x0775af4c, 0xdd99eebb, 0x607fa3fd,
0x2601f79f, 0xf5725cbc, 0x3b6644c5, 0x7efb5b34,
0x29438b76, 0xc623cbdc, 0xfcedb668, 0xf1e4b863,
0xdc31d7ca, 0x85634210, 0x22971340, 0x11c68420,
0x244a857d, 0x3dbbd2f8, 0x32f9ae11, 0xa129c76d,
0x2f9e1d4b, 0x30b2dcf3, 0x52860dec, 0xe3c177d0,
0x16b32b6c, 0xb970a999, 0x489411fa, 0x64e94722,
0x8cfca8c4, 0x3ff0a01a, 0x2c7d56d8, 0x903322ef,
0x4e4987c7, 0xd138d9c1, 0xa2ca8cfe, 0x0bd49836,
0x81f5a6cf, 0xde7aa528, 0x8eb7da26, 0xbfad3fa4,
0x9d3a2ce4, 0x9278500d, 0xcc5f6a9b, 0x467e5462,
0x138df6c2, 0xb8d890e8, 0xf7392e5e, 0xafc382f5,
0x805d9fbe, 0x93d0697c, 0x2dd56fa9, 0x1225cfb3,
0x99acc83b, 0x7d1810a7, 0x639ce86e, 0xbb3bdb7b,
0x7826cd09, 0x18596ef4, 0xb79aec01, 0x9a4f83a8,
0x6e95e665, 0xe6ffaa7e, 0xcfbc2108, 0xe815efe6,
0x9be7bad9, 0x366f4ace, 0x099fead4, 0x7cb029d6,
0xb2a431af, 0x233f2a31, 0x94a5c630, 0x66a235c0,
0xbc4e7437, 0xca82fca6, 0xd090e0b0, 0xd8a73315,
0x9804f14a, 0xdaec41f7, 0x50cd7f0e, 0xf691172f,
0xd64d768d, 0xb0ef434d, 0x4daacc54, 0x0496e4df,
0xb5d19ee3, 0x886a4c1b, 0x1f2cc1b8, 0x5165467f,
0xea5e9d04, 0x358c015d, 0x7487fa73, 0x410bfb2e,
0x1d67b35a, 0xd2db9252, 0x5610e933, 0x47d66d13,
0x61d79a8c, 0x0ca1377a, 0x14f8598e, 0x3c13eb89,
0x27a9ceee, 0xc961b735, 0xe51ce1ed, 0xb1477a3c,
0xdfd29c59, 0x73f2553f, 0xce141879, 0x37c773bf,
0xcdf753ea, 0xaafd5f5b, 0x6f3ddf14, 0xdb447886,
0xf3afca81, 0xc468b93e, 0x3424382c, 0x40a3c25f,
0xc31d1672, 0x25e2bc0c, 0x493c288b, 0x950dff41,
0x01a83971, 0xb30c08de, 0xe4b4d89c, 0xc1566490,
0x84cb7b61, 0xb632d570, 0x5c6c4874, 0x57b8d042,
}, {
0xf45150a7, 0x417e5365, 0x171ac3a4, 0x273a965e,
0xab3bcb6b, 0x9d1ff145, 0xfaacab58, 0xe34b9303,
0x302055fa, 0x76adf66d, 0xcc889176, 0x02f5254c,
0xe54ffcd7, 0x2ac5d7cb, 0x35268044, 0x62b58fa3,
0xb1de495a, 0xba25671b, 0xea45980e, 0xfe5de1c0,
0x2fc30275, 0x4c8112f0, 0x468da397, 0xd36bc6f9,
0x8f03e75f, 0x9215959c, 0x6dbfeb7a, 0x5295da59,
0xbed42d83, 0x7458d321, 0xe0492969, 0xc98e44c8,
0xc2756a89, 0x8ef47879, 0x58996b3e, 0xb927dd71,
0xe1beb64f, 0x88f017ad, 0x20c966ac, 0xce7db43a,
0xdf63184a, 0x1ae58231, 0x51976033, 0x5362457f,
0x64b1e077, 0x6bbb84ae, 0x81fe1ca0, 0x08f9942b,
0x48705868, 0x458f19fd, 0xde94876c, 0x7b52b7f8,
0x73ab23d3, 0x4b72e202, 0x1fe3578f, 0x55662aab,
0xebb20728, 0xb52f03c2, 0xc5869a7b, 0x37d3a508,
0x2830f287, 0xbf23b2a5, 0x0302ba6a, 0x16ed5c82,
0xcf8a2b1c, 0x79a792b4, 0x07f3f0f2, 0x694ea1e2,
0xda65cdf4, 0x0506d5be, 0x34d11f62, 0xa6c48afe,
0x2e349d53, 0xf3a2a055, 0x8a0532e1, 0xf6a475eb,
0x830b39ec, 0x6040aaef, 0x715e069f, 0x6ebd5110,
0x213ef98a, 0xdd963d06, 0x3eddae05, 0xe64d46bd,
0x5491b58d, 0xc471055d, 0x06046fd4, 0x5060ff15,
0x981924fb, 0xbdd697e9, 0x4089cc43, 0xd967779e,
0xe8b0bd42, 0x8907888b, 0x19e7385b, 0xc879dbee,
0x7ca1470a, 0x427ce90f, 0x84f8c91e, 0x00000000,
0x80098386, 0x2b3248ed, 0x111eac70, 0x5a6c4e72,
0x0efdfbff, 0x850f5638, 0xae3d1ed5, 0x2d362739,
0x0f0a64d9, 0x5c6821a6, 0x5b9bd154, 0x36243a2e,
0x0a0cb167, 0x57930fe7, 0xeeb4d296, 0x9b1b9e91,
0xc0804fc5, 0xdc61a220, 0x775a694b, 0x121c161a,
0x93e20aba, 0xa0c0e52a, 0x223c43e0, 0x1b121d17,
0x090e0b0d, 0x8bf2adc7, 0xb62db9a8, 0x1e14c8a9,
0xf1578519, 0x75af4c07, 0x99eebbdd, 0x7fa3fd60,
0x01f79f26, 0x725cbcf5, 0x6644c53b, 0xfb5b347e,
0x438b7629, 0x23cbdcc6, 0xedb668fc, 0xe4b863f1,
0x31d7cadc, 0x63421085, 0x97134022, 0xc6842011,
0x4a857d24, 0xbbd2f83d, 0xf9ae1132, 0x29c76da1,
0x9e1d4b2f, 0xb2dcf330, 0x860dec52, 0xc177d0e3,
0xb32b6c16, 0x70a999b9, 0x9411fa48, 0xe9472264,
0xfca8c48c, 0xf0a01a3f, 0x7d56d82c, 0x3322ef90,
0x4987c74e, 0x38d9c1d1, 0xca8cfea2, 0xd498360b,
0xf5a6cf81, 0x7aa528de, 0xb7da268e, 0xad3fa4bf,
0x3a2ce49d, 0x78500d92, 0x5f6a9bcc, 0x7e546246,
0x8df6c213, 0xd890e8b8, 0x392e5ef7, 0xc382f5af,
0x5d9fbe80, 0xd0697c93, 0xd56fa92d, 0x25cfb312,
0xacc83b99, 0x1810a77d, 0x9ce86e63, 0x3bdb7bbb,
0x26cd0978, 0x596ef418, 0x9aec01b7, 0x4f83a89a,
0x95e6656e, 0xffaa7ee6, 0xbc2108cf, 0x15efe6e8,
0xe7bad99b, 0x6f4ace36, 0x9fead409, 0xb029d67c,
0xa431afb2, 0x3f2a3123, 0xa5c63094, 0xa235c066,
0x4e7437bc, 0x82fca6ca, 0x90e0b0d0, 0xa73315d8,
0x04f14a98, 0xec41f7da, 0xcd7f0e50, 0x91172ff6,
0x4d768dd6, 0xef434db0, 0xaacc544d, 0x96e4df04,
0xd19ee3b5, 0x6a4c1b88, 0x2cc1b81f, 0x65467f51,
0x5e9d04ea, 0x8c015d35, 0x87fa7374, 0x0bfb2e41,
0x67b35a1d, 0xdb9252d2, 0x10e93356, 0xd66d1347,
0xd79a8c61, 0xa1377a0c, 0xf8598e14, 0x13eb893c,
0xa9ceee27, 0x61b735c9, 0x1ce1ede5, 0x477a3cb1,
0xd29c59df, 0xf2553f73, 0x141879ce, 0xc773bf37,
0xf753eacd, 0xfd5f5baa, 0x3ddf146f, 0x447886db,
0xafca81f3, 0x68b93ec4, 0x24382c34, 0xa3c25f40,
0x1d1672c3, 0xe2bc0c25, 0x3c288b49, 0x0dff4195,
0xa8397101, 0x0c08deb3, 0xb4d89ce4, 0x566490c1,
0xcb7b6184, 0x32d570b6, 0x6c48745c, 0xb8d04257,
}, {
0x5150a7f4, 0x7e536541, 0x1ac3a417, 0x3a965e27,
0x3bcb6bab, 0x1ff1459d, 0xacab58fa, 0x4b9303e3,
0x2055fa30, 0xadf66d76, 0x889176cc, 0xf5254c02,
0x4ffcd7e5, 0xc5d7cb2a, 0x26804435, 0xb58fa362,
0xde495ab1, 0x25671bba, 0x45980eea, 0x5de1c0fe,
0xc302752f, 0x8112f04c, 0x8da39746, 0x6bc6f9d3,
0x03e75f8f, 0x15959c92, 0xbfeb7a6d, 0x95da5952,
0xd42d83be, 0x58d32174, 0x492969e0, 0x8e44c8c9,
0x756a89c2, 0xf478798e, 0x996b3e58, 0x27dd71b9,
0xbeb64fe1, 0xf017ad88, 0xc966ac20, 0x7db43ace,
0x63184adf, 0xe582311a, 0x97603351, 0x62457f53,
0xb1e07764, 0xbb84ae6b, 0xfe1ca081, 0xf9942b08,
0x70586848, 0x8f19fd45, 0x94876cde, 0x52b7f87b,
0xab23d373, 0x72e2024b, 0xe3578f1f, 0x662aab55,
0xb20728eb, 0x2f03c2b5, 0x869a7bc5, 0xd3a50837,
0x30f28728, 0x23b2a5bf, 0x02ba6a03, 0xed5c8216,
0x8a2b1ccf, 0xa792b479, 0xf3f0f207, 0x4ea1e269,
0x65cdf4da, 0x06d5be05, 0xd11f6234, 0xc48afea6,
0x349d532e, 0xa2a055f3, 0x0532e18a, 0xa475ebf6,
0x0b39ec83, 0x40aaef60, 0x5e069f71, 0xbd51106e,
0x3ef98a21, 0x963d06dd, 0xddae053e, 0x4d46bde6,
0x91b58d54, 0x71055dc4, 0x046fd406, 0x60ff1550,
0x1924fb98, 0xd697e9bd, 0x89cc4340, 0x67779ed9,
0xb0bd42e8, 0x07888b89, 0xe7385b19, 0x79dbeec8,
0xa1470a7c, 0x7ce90f42, 0xf8c91e84, 0x00000000,
0x09838680, 0x3248ed2b, 0x1eac7011, 0x6c4e725a,
0xfdfbff0e, 0x0f563885, 0x3d1ed5ae, 0x3627392d,
0x0a64d90f, 0x6821a65c, 0x9bd1545b, 0x243a2e36,
0x0cb1670a, 0x930fe757, 0xb4d296ee, 0x1b9e919b,
0x804fc5c0, 0x61a220dc, 0x5a694b77, 0x1c161a12,
0xe20aba93, 0xc0e52aa0, 0x3c43e022, 0x121d171b,
0x0e0b0d09, 0xf2adc78b, 0x2db9a8b6, 0x14c8a91e,
0x578519f1, 0xaf4c0775, 0xeebbdd99, 0xa3fd607f,
0xf79f2601, 0x5cbcf572, 0x44c53b66, 0x5b347efb,
0x8b762943, 0xcbdcc623, 0xb668fced, 0xb863f1e4,
0xd7cadc31, 0x42108563, 0x13402297, 0x842011c6,
0x857d244a, 0xd2f83dbb, 0xae1132f9, 0xc76da129,
0x1d4b2f9e, 0xdcf330b2, 0x0dec5286, 0x77d0e3c1,
0x2b6c16b3, 0xa999b970, 0x11fa4894, 0x472264e9,
0xa8c48cfc, 0xa01a3ff0, 0x56d82c7d, 0x22ef9033,
0x87c74e49, 0xd9c1d138, 0x8cfea2ca, 0x98360bd4,
0xa6cf81f5, 0xa528de7a, 0xda268eb7, 0x3fa4bfad,
0x2ce49d3a, 0x500d9278, 0x6a9bcc5f, 0x5462467e,
0xf6c2138d, 0x90e8b8d8, 0x2e5ef739, 0x82f5afc3,
0x9fbe805d, 0x697c93d0, 0x6fa92dd5, 0xcfb31225,
0xc83b99ac, 0x10a77d18, 0xe86e639c, 0xdb7bbb3b,
0xcd097826, 0x6ef41859, 0xec01b79a, 0x83a89a4f,
0xe6656e95, 0xaa7ee6ff, 0x2108cfbc, 0xefe6e815,
0xbad99be7, 0x4ace366f, 0xead4099f, 0x29d67cb0,
0x31afb2a4, 0x2a31233f, 0xc63094a5, 0x35c066a2,
0x7437bc4e, 0xfca6ca82, 0xe0b0d090, 0x3315d8a7,
0xf14a9804, 0x41f7daec, 0x7f0e50cd, 0x172ff691,
0x768dd64d, 0x434db0ef, 0xcc544daa, 0xe4df0496,
0x9ee3b5d1, 0x4c1b886a, 0xc1b81f2c, 0x467f5165,
0x9d04ea5e, 0x015d358c, 0xfa737487, 0xfb2e410b,
0xb35a1d67, 0x9252d2db, 0xe9335610, 0x6d1347d6,
0x9a8c61d7, 0x377a0ca1, 0x598e14f8, 0xeb893c13,
0xceee27a9, 0xb735c961, 0xe1ede51c, 0x7a3cb147,
0x9c59dfd2, 0x553f73f2, 0x1879ce14, 0x73bf37c7,
0x53eacdf7, 0x5f5baafd, 0xdf146f3d, 0x7886db44,
0xca81f3af, 0xb93ec468, 0x382c3424, 0xc25f40a3,
0x1672c31d, 0xbc0c25e2, 0x288b493c, 0xff41950d,
0x397101a8, 0x08deb30c, 0xd89ce4b4, 0x6490c156,
0x7b6184cb, 0xd570b632, 0x48745c6c, 0xd04257b8,
}
};
const u32 crypto_il_tab[4][256] = {
{
0x00000052, 0x00000009, 0x0000006a, 0x000000d5,
0x00000030, 0x00000036, 0x000000a5, 0x00000038,
0x000000bf, 0x00000040, 0x000000a3, 0x0000009e,
0x00000081, 0x000000f3, 0x000000d7, 0x000000fb,
0x0000007c, 0x000000e3, 0x00000039, 0x00000082,
0x0000009b, 0x0000002f, 0x000000ff, 0x00000087,
0x00000034, 0x0000008e, 0x00000043, 0x00000044,
0x000000c4, 0x000000de, 0x000000e9, 0x000000cb,
0x00000054, 0x0000007b, 0x00000094, 0x00000032,
0x000000a6, 0x000000c2, 0x00000023, 0x0000003d,
0x000000ee, 0x0000004c, 0x00000095, 0x0000000b,
0x00000042, 0x000000fa, 0x000000c3, 0x0000004e,
0x00000008, 0x0000002e, 0x000000a1, 0x00000066,
0x00000028, 0x000000d9, 0x00000024, 0x000000b2,
0x00000076, 0x0000005b, 0x000000a2, 0x00000049,
0x0000006d, 0x0000008b, 0x000000d1, 0x00000025,
0x00000072, 0x000000f8, 0x000000f6, 0x00000064,
0x00000086, 0x00000068, 0x00000098, 0x00000016,
0x000000d4, 0x000000a4, 0x0000005c, 0x000000cc,
0x0000005d, 0x00000065, 0x000000b6, 0x00000092,
0x0000006c, 0x00000070, 0x00000048, 0x00000050,
0x000000fd, 0x000000ed, 0x000000b9, 0x000000da,
0x0000005e, 0x00000015, 0x00000046, 0x00000057,
0x000000a7, 0x0000008d, 0x0000009d, 0x00000084,
0x00000090, 0x000000d8, 0x000000ab, 0x00000000,
0x0000008c, 0x000000bc, 0x000000d3, 0x0000000a,
0x000000f7, 0x000000e4, 0x00000058, 0x00000005,
0x000000b8, 0x000000b3, 0x00000045, 0x00000006,
0x000000d0, 0x0000002c, 0x0000001e, 0x0000008f,
0x000000ca, 0x0000003f, 0x0000000f, 0x00000002,
0x000000c1, 0x000000af, 0x000000bd, 0x00000003,
0x00000001, 0x00000013, 0x0000008a, 0x0000006b,
0x0000003a, 0x00000091, 0x00000011, 0x00000041,
0x0000004f, 0x00000067, 0x000000dc, 0x000000ea,
0x00000097, 0x000000f2, 0x000000cf, 0x000000ce,
0x000000f0, 0x000000b4, 0x000000e6, 0x00000073,
0x00000096, 0x000000ac, 0x00000074, 0x00000022,
0x000000e7, 0x000000ad, 0x00000035, 0x00000085,
0x000000e2, 0x000000f9, 0x00000037, 0x000000e8,
0x0000001c, 0x00000075, 0x000000df, 0x0000006e,
0x00000047, 0x000000f1, 0x0000001a, 0x00000071,
0x0000001d, 0x00000029, 0x000000c5, 0x00000089,
0x0000006f, 0x000000b7, 0x00000062, 0x0000000e,
0x000000aa, 0x00000018, 0x000000be, 0x0000001b,
0x000000fc, 0x00000056, 0x0000003e, 0x0000004b,
0x000000c6, 0x000000d2, 0x00000079, 0x00000020,
0x0000009a, 0x000000db, 0x000000c0, 0x000000fe,
0x00000078, 0x000000cd, 0x0000005a, 0x000000f4,
0x0000001f, 0x000000dd, 0x000000a8, 0x00000033,
0x00000088, 0x00000007, 0x000000c7, 0x00000031,
0x000000b1, 0x00000012, 0x00000010, 0x00000059,
0x00000027, 0x00000080, 0x000000ec, 0x0000005f,
0x00000060, 0x00000051, 0x0000007f, 0x000000a9,
0x00000019, 0x000000b5, 0x0000004a, 0x0000000d,
0x0000002d, 0x000000e5, 0x0000007a, 0x0000009f,
0x00000093, 0x000000c9, 0x0000009c, 0x000000ef,
0x000000a0, 0x000000e0, 0x0000003b, 0x0000004d,
0x000000ae, 0x0000002a, 0x000000f5, 0x000000b0,
0x000000c8, 0x000000eb, 0x000000bb, 0x0000003c,
0x00000083, 0x00000053, 0x00000099, 0x00000061,
0x00000017, 0x0000002b, 0x00000004, 0x0000007e,
0x000000ba, 0x00000077, 0x000000d6, 0x00000026,
0x000000e1, 0x00000069, 0x00000014, 0x00000063,
0x00000055, 0x00000021, 0x0000000c, 0x0000007d,
}, {
0x00005200, 0x00000900, 0x00006a00, 0x0000d500,
0x00003000, 0x00003600, 0x0000a500, 0x00003800,
0x0000bf00, 0x00004000, 0x0000a300, 0x00009e00,
0x00008100, 0x0000f300, 0x0000d700, 0x0000fb00,
0x00007c00, 0x0000e300, 0x00003900, 0x00008200,
0x00009b00, 0x00002f00, 0x0000ff00, 0x00008700,
0x00003400, 0x00008e00, 0x00004300, 0x00004400,
0x0000c400, 0x0000de00, 0x0000e900, 0x0000cb00,
0x00005400, 0x00007b00, 0x00009400, 0x00003200,
0x0000a600, 0x0000c200, 0x00002300, 0x00003d00,
0x0000ee00, 0x00004c00, 0x00009500, 0x00000b00,
0x00004200, 0x0000fa00, 0x0000c300, 0x00004e00,
0x00000800, 0x00002e00, 0x0000a100, 0x00006600,
0x00002800, 0x0000d900, 0x00002400, 0x0000b200,
0x00007600, 0x00005b00, 0x0000a200, 0x00004900,
0x00006d00, 0x00008b00, 0x0000d100, 0x00002500,
0x00007200, 0x0000f800, 0x0000f600, 0x00006400,
0x00008600, 0x00006800, 0x00009800, 0x00001600,
0x0000d400, 0x0000a400, 0x00005c00, 0x0000cc00,
0x00005d00, 0x00006500, 0x0000b600, 0x00009200,
0x00006c00, 0x00007000, 0x00004800, 0x00005000,
0x0000fd00, 0x0000ed00, 0x0000b900, 0x0000da00,
0x00005e00, 0x00001500, 0x00004600, 0x00005700,
0x0000a700, 0x00008d00, 0x00009d00, 0x00008400,
0x00009000, 0x0000d800, 0x0000ab00, 0x00000000,
0x00008c00, 0x0000bc00, 0x0000d300, 0x00000a00,
0x0000f700, 0x0000e400, 0x00005800, 0x00000500,
0x0000b800, 0x0000b300, 0x00004500, 0x00000600,
0x0000d000, 0x00002c00, 0x00001e00, 0x00008f00,
0x0000ca00, 0x00003f00, 0x00000f00, 0x00000200,
0x0000c100, 0x0000af00, 0x0000bd00, 0x00000300,
0x00000100, 0x00001300, 0x00008a00, 0x00006b00,
0x00003a00, 0x00009100, 0x00001100, 0x00004100,
0x00004f00, 0x00006700, 0x0000dc00, 0x0000ea00,
0x00009700, 0x0000f200, 0x0000cf00, 0x0000ce00,
0x0000f000, 0x0000b400, 0x0000e600, 0x00007300,
0x00009600, 0x0000ac00, 0x00007400, 0x00002200,
0x0000e700, 0x0000ad00, 0x00003500, 0x00008500,
0x0000e200, 0x0000f900, 0x00003700, 0x0000e800,
0x00001c00, 0x00007500, 0x0000df00, 0x00006e00,
0x00004700, 0x0000f100, 0x00001a00, 0x00007100,
0x00001d00, 0x00002900, 0x0000c500, 0x00008900,
0x00006f00, 0x0000b700, 0x00006200, 0x00000e00,
0x0000aa00, 0x00001800, 0x0000be00, 0x00001b00,
0x0000fc00, 0x00005600, 0x00003e00, 0x00004b00,
0x0000c600, 0x0000d200, 0x00007900, 0x00002000,
0x00009a00, 0x0000db00, 0x0000c000, 0x0000fe00,
0x00007800, 0x0000cd00, 0x00005a00, 0x0000f400,
0x00001f00, 0x0000dd00, 0x0000a800, 0x00003300,
0x00008800, 0x00000700, 0x0000c700, 0x00003100,
0x0000b100, 0x00001200, 0x00001000, 0x00005900,
0x00002700, 0x00008000, 0x0000ec00, 0x00005f00,
0x00006000, 0x00005100, 0x00007f00, 0x0000a900,
0x00001900, 0x0000b500, 0x00004a00, 0x00000d00,
0x00002d00, 0x0000e500, 0x00007a00, 0x00009f00,
0x00009300, 0x0000c900, 0x00009c00, 0x0000ef00,
0x0000a000, 0x0000e000, 0x00003b00, 0x00004d00,
0x0000ae00, 0x00002a00, 0x0000f500, 0x0000b000,
0x0000c800, 0x0000eb00, 0x0000bb00, 0x00003c00,
0x00008300, 0x00005300, 0x00009900, 0x00006100,
0x00001700, 0x00002b00, 0x00000400, 0x00007e00,
0x0000ba00, 0x00007700, 0x0000d600, 0x00002600,
0x0000e100, 0x00006900, 0x00001400, 0x00006300,
0x00005500, 0x00002100, 0x00000c00, 0x00007d00,
}, {
0x00520000, 0x00090000, 0x006a0000, 0x00d50000,
0x00300000, 0x00360000, 0x00a50000, 0x00380000,
0x00bf0000, 0x00400000, 0x00a30000, 0x009e0000,
0x00810000, 0x00f30000, 0x00d70000, 0x00fb0000,
0x007c0000, 0x00e30000, 0x00390000, 0x00820000,
0x009b0000, 0x002f0000, 0x00ff0000, 0x00870000,
0x00340000, 0x008e0000, 0x00430000, 0x00440000,
0x00c40000, 0x00de0000, 0x00e90000, 0x00cb0000,
0x00540000, 0x007b0000, 0x00940000, 0x00320000,
0x00a60000, 0x00c20000, 0x00230000, 0x003d0000,
0x00ee0000, 0x004c0000, 0x00950000, 0x000b0000,
0x00420000, 0x00fa0000, 0x00c30000, 0x004e0000,
0x00080000, 0x002e0000, 0x00a10000, 0x00660000,
0x00280000, 0x00d90000, 0x00240000, 0x00b20000,
0x00760000, 0x005b0000, 0x00a20000, 0x00490000,
0x006d0000, 0x008b0000, 0x00d10000, 0x00250000,
0x00720000, 0x00f80000, 0x00f60000, 0x00640000,
0x00860000, 0x00680000, 0x00980000, 0x00160000,
0x00d40000, 0x00a40000, 0x005c0000, 0x00cc0000,
0x005d0000, 0x00650000, 0x00b60000, 0x00920000,
0x006c0000, 0x00700000, 0x00480000, 0x00500000,
0x00fd0000, 0x00ed0000, 0x00b90000, 0x00da0000,
0x005e0000, 0x00150000, 0x00460000, 0x00570000,
0x00a70000, 0x008d0000, 0x009d0000, 0x00840000,
0x00900000, 0x00d80000, 0x00ab0000, 0x00000000,
0x008c0000, 0x00bc0000, 0x00d30000, 0x000a0000,
0x00f70000, 0x00e40000, 0x00580000, 0x00050000,
0x00b80000, 0x00b30000, 0x00450000, 0x00060000,
0x00d00000, 0x002c0000, 0x001e0000, 0x008f0000,
0x00ca0000, 0x003f0000, 0x000f0000, 0x00020000,
0x00c10000, 0x00af0000, 0x00bd0000, 0x00030000,
0x00010000, 0x00130000, 0x008a0000, 0x006b0000,
0x003a0000, 0x00910000, 0x00110000, 0x00410000,
0x004f0000, 0x00670000, 0x00dc0000, 0x00ea0000,
0x00970000, 0x00f20000, 0x00cf0000, 0x00ce0000,
0x00f00000, 0x00b40000, 0x00e60000, 0x00730000,
0x00960000, 0x00ac0000, 0x00740000, 0x00220000,
0x00e70000, 0x00ad0000, 0x00350000, 0x00850000,
0x00e20000, 0x00f90000, 0x00370000, 0x00e80000,
0x001c0000, 0x00750000, 0x00df0000, 0x006e0000,
0x00470000, 0x00f10000, 0x001a0000, 0x00710000,
0x001d0000, 0x00290000, 0x00c50000, 0x00890000,
0x006f0000, 0x00b70000, 0x00620000, 0x000e0000,
0x00aa0000, 0x00180000, 0x00be0000, 0x001b0000,
0x00fc0000, 0x00560000, 0x003e0000, 0x004b0000,
0x00c60000, 0x00d20000, 0x00790000, 0x00200000,
0x009a0000, 0x00db0000, 0x00c00000, 0x00fe0000,
0x00780000, 0x00cd0000, 0x005a0000, 0x00f40000,
0x001f0000, 0x00dd0000, 0x00a80000, 0x00330000,
0x00880000, 0x00070000, 0x00c70000, 0x00310000,
0x00b10000, 0x00120000, 0x00100000, 0x00590000,
0x00270000, 0x00800000, 0x00ec0000, 0x005f0000,
0x00600000, 0x00510000, 0x007f0000, 0x00a90000,
0x00190000, 0x00b50000, 0x004a0000, 0x000d0000,
0x002d0000, 0x00e50000, 0x007a0000, 0x009f0000,
0x00930000, 0x00c90000, 0x009c0000, 0x00ef0000,
0x00a00000, 0x00e00000, 0x003b0000, 0x004d0000,
0x00ae0000, 0x002a0000, 0x00f50000, 0x00b00000,
0x00c80000, 0x00eb0000, 0x00bb0000, 0x003c0000,
0x00830000, 0x00530000, 0x00990000, 0x00610000,
0x00170000, 0x002b0000, 0x00040000, 0x007e0000,
0x00ba0000, 0x00770000, 0x00d60000, 0x00260000,
0x00e10000, 0x00690000, 0x00140000, 0x00630000,
0x00550000, 0x00210000, 0x000c0000, 0x007d0000,
}, {
0x52000000, 0x09000000, 0x6a000000, 0xd5000000,
0x30000000, 0x36000000, 0xa5000000, 0x38000000,
0xbf000000, 0x40000000, 0xa3000000, 0x9e000000,
0x81000000, 0xf3000000, 0xd7000000, 0xfb000000,
0x7c000000, 0xe3000000, 0x39000000, 0x82000000,
0x9b000000, 0x2f000000, 0xff000000, 0x87000000,
0x34000000, 0x8e000000, 0x43000000, 0x44000000,
0xc4000000, 0xde000000, 0xe9000000, 0xcb000000,
0x54000000, 0x7b000000, 0x94000000, 0x32000000,
0xa6000000, 0xc2000000, 0x23000000, 0x3d000000,
0xee000000, 0x4c000000, 0x95000000, 0x0b000000,
0x42000000, 0xfa000000, 0xc3000000, 0x4e000000,
0x08000000, 0x2e000000, 0xa1000000, 0x66000000,
0x28000000, 0xd9000000, 0x24000000, 0xb2000000,
0x76000000, 0x5b000000, 0xa2000000, 0x49000000,
0x6d000000, 0x8b000000, 0xd1000000, 0x25000000,
0x72000000, 0xf8000000, 0xf6000000, 0x64000000,
0x86000000, 0x68000000, 0x98000000, 0x16000000,
0xd4000000, 0xa4000000, 0x5c000000, 0xcc000000,
0x5d000000, 0x65000000, 0xb6000000, 0x92000000,
0x6c000000, 0x70000000, 0x48000000, 0x50000000,
0xfd000000, 0xed000000, 0xb9000000, 0xda000000,
0x5e000000, 0x15000000, 0x46000000, 0x57000000,
0xa7000000, 0x8d000000, 0x9d000000, 0x84000000,
0x90000000, 0xd8000000, 0xab000000, 0x00000000,
0x8c000000, 0xbc000000, 0xd3000000, 0x0a000000,
0xf7000000, 0xe4000000, 0x58000000, 0x05000000,
0xb8000000, 0xb3000000, 0x45000000, 0x06000000,
0xd0000000, 0x2c000000, 0x1e000000, 0x8f000000,
0xca000000, 0x3f000000, 0x0f000000, 0x02000000,
0xc1000000, 0xaf000000, 0xbd000000, 0x03000000,
0x01000000, 0x13000000, 0x8a000000, 0x6b000000,
0x3a000000, 0x91000000, 0x11000000, 0x41000000,
0x4f000000, 0x67000000, 0xdc000000, 0xea000000,
0x97000000, 0xf2000000, 0xcf000000, 0xce000000,
0xf0000000, 0xb4000000, 0xe6000000, 0x73000000,
0x96000000, 0xac000000, 0x74000000, 0x22000000,
0xe7000000, 0xad000000, 0x35000000, 0x85000000,
0xe2000000, 0xf9000000, 0x37000000, 0xe8000000,
0x1c000000, 0x75000000, 0xdf000000, 0x6e000000,
0x47000000, 0xf1000000, 0x1a000000, 0x71000000,
0x1d000000, 0x29000000, 0xc5000000, 0x89000000,
0x6f000000, 0xb7000000, 0x62000000, 0x0e000000,
0xaa000000, 0x18000000, 0xbe000000, 0x1b000000,
0xfc000000, 0x56000000, 0x3e000000, 0x4b000000,
0xc6000000, 0xd2000000, 0x79000000, 0x20000000,
0x9a000000, 0xdb000000, 0xc0000000, 0xfe000000,
0x78000000, 0xcd000000, 0x5a000000, 0xf4000000,
0x1f000000, 0xdd000000, 0xa8000000, 0x33000000,
0x88000000, 0x07000000, 0xc7000000, 0x31000000,
0xb1000000, 0x12000000, 0x10000000, 0x59000000,
0x27000000, 0x80000000, 0xec000000, 0x5f000000,
0x60000000, 0x51000000, 0x7f000000, 0xa9000000,
0x19000000, 0xb5000000, 0x4a000000, 0x0d000000,
0x2d000000, 0xe5000000, 0x7a000000, 0x9f000000,
0x93000000, 0xc9000000, 0x9c000000, 0xef000000,
0xa0000000, 0xe0000000, 0x3b000000, 0x4d000000,
0xae000000, 0x2a000000, 0xf5000000, 0xb0000000,
0xc8000000, 0xeb000000, 0xbb000000, 0x3c000000,
0x83000000, 0x53000000, 0x99000000, 0x61000000,
0x17000000, 0x2b000000, 0x04000000, 0x7e000000,
0xba000000, 0x77000000, 0xd6000000, 0x26000000,
0xe1000000, 0x69000000, 0x14000000, 0x63000000,
0x55000000, 0x21000000, 0x0c000000, 0x7d000000,
}
};
EXPORT_SYMBOL_GPL(crypto_ft_tab);
EXPORT_SYMBOL_GPL(crypto_fl_tab);
EXPORT_SYMBOL_GPL(crypto_it_tab);
EXPORT_SYMBOL_GPL(crypto_il_tab);
/* initialise the key schedule from the user supplied key */
#define star_x(x) (((x) & 0x7f7f7f7f) << 1) ^ ((((x) & 0x80808080) >> 7) * 0x1b)
#define imix_col(y, x) do { \
u = star_x(x); \
v = star_x(u); \
w = star_x(v); \
t = w ^ (x); \
(y) = u ^ v ^ w; \
(y) ^= ror32(u ^ t, 8) ^ \
ror32(v ^ t, 16) ^ \
ror32(t, 24); \
} while (0)
#define ls_box(x) \
crypto_fl_tab[0][byte(x, 0)] ^ \
crypto_fl_tab[1][byte(x, 1)] ^ \
crypto_fl_tab[2][byte(x, 2)] ^ \
crypto_fl_tab[3][byte(x, 3)]
#define loop4(i) do { \
t = ror32(t, 8); \
t = ls_box(t) ^ rco_tab[i]; \
t ^= ctx->key_enc[4 * i]; \
ctx->key_enc[4 * i + 4] = t; \
t ^= ctx->key_enc[4 * i + 1]; \
ctx->key_enc[4 * i + 5] = t; \
t ^= ctx->key_enc[4 * i + 2]; \
ctx->key_enc[4 * i + 6] = t; \
t ^= ctx->key_enc[4 * i + 3]; \
ctx->key_enc[4 * i + 7] = t; \
} while (0)
#define loop6(i) do { \
t = ror32(t, 8); \
t = ls_box(t) ^ rco_tab[i]; \
t ^= ctx->key_enc[6 * i]; \
ctx->key_enc[6 * i + 6] = t; \
t ^= ctx->key_enc[6 * i + 1]; \
ctx->key_enc[6 * i + 7] = t; \
t ^= ctx->key_enc[6 * i + 2]; \
ctx->key_enc[6 * i + 8] = t; \
t ^= ctx->key_enc[6 * i + 3]; \
ctx->key_enc[6 * i + 9] = t; \
t ^= ctx->key_enc[6 * i + 4]; \
ctx->key_enc[6 * i + 10] = t; \
t ^= ctx->key_enc[6 * i + 5]; \
ctx->key_enc[6 * i + 11] = t; \
} while (0)
#define loop8tophalf(i) do { \
t = ror32(t, 8); \
t = ls_box(t) ^ rco_tab[i]; \
t ^= ctx->key_enc[8 * i]; \
ctx->key_enc[8 * i + 8] = t; \
t ^= ctx->key_enc[8 * i + 1]; \
ctx->key_enc[8 * i + 9] = t; \
t ^= ctx->key_enc[8 * i + 2]; \
ctx->key_enc[8 * i + 10] = t; \
t ^= ctx->key_enc[8 * i + 3]; \
ctx->key_enc[8 * i + 11] = t; \
} while (0)
#define loop8(i) do { \
loop8tophalf(i); \
t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \
ctx->key_enc[8 * i + 12] = t; \
t ^= ctx->key_enc[8 * i + 5]; \
ctx->key_enc[8 * i + 13] = t; \
t ^= ctx->key_enc[8 * i + 6]; \
ctx->key_enc[8 * i + 14] = t; \
t ^= ctx->key_enc[8 * i + 7]; \
ctx->key_enc[8 * i + 15] = t; \
} while (0)
/**
* crypto_aes_expand_key - Expands the AES key as described in FIPS-197
* @ctx: The location where the computed key will be stored.
* @in_key: The supplied key.
* @key_len: The length of the supplied key.
*
* Returns 0 on success. The function fails only if an invalid key size (or
* pointer) is supplied.
* The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes
* key schedule plus a 16 bytes key which is used before the first round).
* The decryption key is prepared for the "Equivalent Inverse Cipher" as
* described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is
* for the initial combination, the second slot for the first round and so on.
*/
int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len)
{
const __le32 *key = (const __le32 *)in_key;
u32 i, t, u, v, w, j;
if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
key_len != AES_KEYSIZE_256)
return -EINVAL;
ctx->key_length = key_len;
ctx->key_dec[key_len + 24] = ctx->key_enc[0] = le32_to_cpu(key[0]);
ctx->key_dec[key_len + 25] = ctx->key_enc[1] = le32_to_cpu(key[1]);
ctx->key_dec[key_len + 26] = ctx->key_enc[2] = le32_to_cpu(key[2]);
ctx->key_dec[key_len + 27] = ctx->key_enc[3] = le32_to_cpu(key[3]);
switch (key_len) {
case AES_KEYSIZE_128:
t = ctx->key_enc[3];
for (i = 0; i < 10; ++i)
loop4(i);
break;
case AES_KEYSIZE_192:
ctx->key_enc[4] = le32_to_cpu(key[4]);
t = ctx->key_enc[5] = le32_to_cpu(key[5]);
for (i = 0; i < 8; ++i)
loop6(i);
break;
case AES_KEYSIZE_256:
ctx->key_enc[4] = le32_to_cpu(key[4]);
ctx->key_enc[5] = le32_to_cpu(key[5]);
ctx->key_enc[6] = le32_to_cpu(key[6]);
t = ctx->key_enc[7] = le32_to_cpu(key[7]);
for (i = 0; i < 6; ++i)
loop8(i);
loop8tophalf(i);
break;
}
ctx->key_dec[0] = ctx->key_enc[key_len + 24];
ctx->key_dec[1] = ctx->key_enc[key_len + 25];
ctx->key_dec[2] = ctx->key_enc[key_len + 26];
ctx->key_dec[3] = ctx->key_enc[key_len + 27];
for (i = 4; i < key_len + 24; ++i) {
j = key_len + 24 - (i & ~3) + (i & 3);
imix_col(ctx->key_dec[j], ctx->key_enc[i]);
}
return 0;
}
EXPORT_SYMBOL_GPL(crypto_aes_expand_key);
/**
* crypto_aes_set_key - Set the AES key.
* @tfm: The %crypto_tfm that is used in the context.
* @in_key: The input key.
* @key_len: The size of the key.
*
* Returns 0 on success, on failure the %CRYPTO_TFM_RES_BAD_KEY_LEN flag in tfm
* is set. The function uses crypto_aes_expand_key() to expand the key.
* &crypto_aes_ctx _must_ be the private data embedded in @tfm which is
* retrieved with crypto_tfm_ctx().
*/
int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
int ret;
ret = crypto_aes_expand_key(ctx, in_key, key_len);
if (!ret)
return 0;
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
EXPORT_SYMBOL_GPL(crypto_aes_set_key);
/* encrypt a block of text */
#define f_rn(bo, bi, n, k) do { \
bo[n] = crypto_ft_tab[0][byte(bi[n], 0)] ^ \
crypto_ft_tab[1][byte(bi[(n + 1) & 3], 1)] ^ \
crypto_ft_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \
crypto_ft_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n); \
} while (0)
#define f_nround(bo, bi, k) do {\
f_rn(bo, bi, 0, k); \
f_rn(bo, bi, 1, k); \
f_rn(bo, bi, 2, k); \
f_rn(bo, bi, 3, k); \
k += 4; \
} while (0)
#define f_rl(bo, bi, n, k) do { \
bo[n] = crypto_fl_tab[0][byte(bi[n], 0)] ^ \
crypto_fl_tab[1][byte(bi[(n + 1) & 3], 1)] ^ \
crypto_fl_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \
crypto_fl_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n); \
} while (0)
#define f_lround(bo, bi, k) do {\
f_rl(bo, bi, 0, k); \
f_rl(bo, bi, 1, k); \
f_rl(bo, bi, 2, k); \
f_rl(bo, bi, 3, k); \
} while (0)
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *src = (const __le32 *)in;
__le32 *dst = (__le32 *)out;
u32 b0[4], b1[4];
const u32 *kp = ctx->key_enc + 4;
const int key_len = ctx->key_length;
b0[0] = le32_to_cpu(src[0]) ^ ctx->key_enc[0];
b0[1] = le32_to_cpu(src[1]) ^ ctx->key_enc[1];
b0[2] = le32_to_cpu(src[2]) ^ ctx->key_enc[2];
b0[3] = le32_to_cpu(src[3]) ^ ctx->key_enc[3];
if (key_len > 24) {
f_nround(b1, b0, kp);
f_nround(b0, b1, kp);
}
if (key_len > 16) {
f_nround(b1, b0, kp);
f_nround(b0, b1, kp);
}
f_nround(b1, b0, kp);
f_nround(b0, b1, kp);
f_nround(b1, b0, kp);
f_nround(b0, b1, kp);
f_nround(b1, b0, kp);
f_nround(b0, b1, kp);
f_nround(b1, b0, kp);
f_nround(b0, b1, kp);
f_nround(b1, b0, kp);
f_lround(b0, b1, kp);
dst[0] = cpu_to_le32(b0[0]);
dst[1] = cpu_to_le32(b0[1]);
dst[2] = cpu_to_le32(b0[2]);
dst[3] = cpu_to_le32(b0[3]);
}
/* decrypt a block of text */
#define i_rn(bo, bi, n, k) do { \
bo[n] = crypto_it_tab[0][byte(bi[n], 0)] ^ \
crypto_it_tab[1][byte(bi[(n + 3) & 3], 1)] ^ \
crypto_it_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \
crypto_it_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n); \
} while (0)
#define i_nround(bo, bi, k) do {\
i_rn(bo, bi, 0, k); \
i_rn(bo, bi, 1, k); \
i_rn(bo, bi, 2, k); \
i_rn(bo, bi, 3, k); \
k += 4; \
} while (0)
#define i_rl(bo, bi, n, k) do { \
bo[n] = crypto_il_tab[0][byte(bi[n], 0)] ^ \
crypto_il_tab[1][byte(bi[(n + 3) & 3], 1)] ^ \
crypto_il_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \
crypto_il_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n); \
} while (0)
#define i_lround(bo, bi, k) do {\
i_rl(bo, bi, 0, k); \
i_rl(bo, bi, 1, k); \
i_rl(bo, bi, 2, k); \
i_rl(bo, bi, 3, k); \
} while (0)
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *src = (const __le32 *)in;
__le32 *dst = (__le32 *)out;
u32 b0[4], b1[4];
const int key_len = ctx->key_length;
const u32 *kp = ctx->key_dec + 4;
b0[0] = le32_to_cpu(src[0]) ^ ctx->key_dec[0];
b0[1] = le32_to_cpu(src[1]) ^ ctx->key_dec[1];
b0[2] = le32_to_cpu(src[2]) ^ ctx->key_dec[2];
b0[3] = le32_to_cpu(src[3]) ^ ctx->key_dec[3];
if (key_len > 24) {
i_nround(b1, b0, kp);
i_nround(b0, b1, kp);
}
if (key_len > 16) {
i_nround(b1, b0, kp);
i_nround(b0, b1, kp);
}
i_nround(b1, b0, kp);
i_nround(b0, b1, kp);
i_nround(b1, b0, kp);
i_nround(b0, b1, kp);
i_nround(b1, b0, kp);
i_nround(b0, b1, kp);
i_nround(b1, b0, kp);
i_nround(b0, b1, kp);
i_nround(b1, b0, kp);
i_lround(b0, b1, kp);
dst[0] = cpu_to_le32(b0[0]);
dst[1] = cpu_to_le32(b0[1]);
dst[2] = cpu_to_le32(b0[2]);
dst[3] = cpu_to_le32(b0[3]);
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = crypto_aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt
}
}
};
static int __init aes_init(void)
{
return crypto_register_alg(&aes_alg);
}
static void __exit aes_fini(void)
{
crypto_unregister_alg(&aes_alg);
}
module_init(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("aes");
| gpl-2.0 |
haydenbbickerton/zaracl_kernel | drivers/net/wimax/i2400m/tx.c | 9864 | 38202 | /*
* Intel Wireless WiMAX Connection 2400m
* Generic (non-bus specific) TX handling
*
*
* Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Intel Corporation <linux-wimax@intel.com>
* Yanir Lubetkin <yanirx.lubetkin@intel.com>
* - Initial implementation
*
* Intel Corporation <linux-wimax@intel.com>
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* - Rewritten to use a single FIFO to lower the memory allocation
* pressure and optimize cache hits when copying to the queue, as
* well as splitting out bus-specific code.
*
*
* Implements data transmission to the device; this is done through a
* software FIFO, as data/control frames can be coalesced (while the
* device is reading the previous tx transaction, others accumulate).
*
* A FIFO is used because at the end it is resource-cheaper that trying
* to implement scatter/gather over USB. As well, most traffic is going
* to be download (vs upload).
*
* The format for sending/receiving data to/from the i2400m is
* described in detail in rx.c:PROTOCOL FORMAT. In here we implement
* the transmission of that. This is split between a bus-independent
* part that just prepares everything and a bus-specific part that
* does the actual transmission over the bus to the device (in the
* bus-specific driver).
*
*
* The general format of a device-host transaction is MSG-HDR, PLD1,
* PLD2...PLDN, PL1, PL2,...PLN, PADDING.
*
* Because we need the send payload descriptors and then payloads and
* because it is kind of expensive to do scatterlists in USB (one URB
* per node), it becomes cheaper to append all the data to a FIFO
* (copying to a FIFO potentially in cache is cheaper).
*
* Then the bus-specific code takes the parts of that FIFO that are
* written and passes them to the device.
*
* So the concepts to keep in mind there are:
*
* We use a FIFO to queue the data in a linear buffer. We first append
* a MSG-HDR, space for I2400M_TX_PLD_MAX payload descriptors and then
* go appending payloads until we run out of space or of payload
* descriptors. Then we append padding to make the whole transaction a
* multiple of i2400m->bus_tx_block_size (as defined by the bus layer).
*
* - A TX message: a combination of a message header, payload
* descriptors and payloads.
*
* Open: it is marked as active (i2400m->tx_msg is valid) and we
* can keep adding payloads to it.
*
* Closed: we are not appending more payloads to this TX message
* (exahusted space in the queue, too many payloads or
* whichever). We have appended padding so the whole message
* length is aligned to i2400m->bus_tx_block_size (as set by the
* bus/transport layer).
*
* - Most of the time we keep a TX message open to which we append
* payloads.
*
* - If we are going to append and there is no more space (we are at
* the end of the FIFO), we close the message, mark the rest of the
* FIFO space unusable (skip_tail), create a new message at the
* beginning of the FIFO (if there is space) and append the message
* there.
*
* This is because we need to give linear TX messages to the bus
* engine. So we don't write a message to the remaining FIFO space
* until the tail and continue at the head of it.
*
* - We overload one of the fields in the message header to use it as
* 'size' of the TX message, so we can iterate over them. It also
* contains a flag that indicates if we have to skip it or not.
* When we send the buffer, we update that to its real on-the-wire
* value.
*
* - The MSG-HDR PLD1...PLD2 stuff has to be a size multiple of 16.
*
* It follows that if MSG-HDR says we have N messages, the whole
* header + descriptors is 16 + 4*N; for those to be a multiple of
* 16, it follows that N can be 4, 8, 12, ... (32, 48, 64, 80...
* bytes).
*
* So if we have only 1 payload, we have to submit a header that in
* all truth has space for 4.
*
* The implication is that we reserve space for 12 (64 bytes); but
* if we fill up only (eg) 2, our header becomes 32 bytes only. So
* the TX engine has to shift those 32 bytes of msg header and 2
* payloads and padding so that right after it the payloads start
* and the TX engine has to know about that.
*
* It is cheaper to move the header up than the whole payloads down.
*
* We do this in i2400m_tx_close(). See 'i2400m_msg_hdr->offset'.
*
* - Each payload has to be size-padded to 16 bytes; before appending
* it, we just do it.
*
* - The whole message has to be padded to i2400m->bus_tx_block_size;
* we do this at close time. Thus, when reserving space for the
* payload, we always make sure there is also free space for this
* padding that sooner or later will happen.
*
* When we append a message, we tell the bus specific code to kick in
* TXs. It will TX (in parallel) until the buffer is exhausted--hence
* the lockin we do. The TX code will only send a TX message at the
* time (which remember, might contain more than one payload). Of
* course, when the bus-specific driver attempts to TX a message that
* is still open, it gets closed first.
*
* Gee, this is messy; well a picture. In the example below we have a
* partially full FIFO, with a closed message ready to be delivered
* (with a moved message header to make sure it is size-aligned to
* 16), TAIL room that was unusable (and thus is marked with a message
* header that says 'skip this') and at the head of the buffer, an
* incomplete message with a couple of payloads.
*
* N ___________________________________________________
* | |
* | TAIL room |
* | |
* | msg_hdr to skip (size |= 0x80000) |
* |---------------------------------------------------|-------
* | | /|\
* | | |
* | TX message padding | |
* | | |
* | | |
* |- - - - - - - - - - - - - - - - - - - - - - - - - -| |
* | | |
* | payload 1 | |
* | | N * tx_block_size
* | | |
* |- - - - - - - - - - - - - - - - - - - - - - - - - -| |
* | | |
* | payload 1 | |
* | | |
* | | |
* |- - - - - - - - - - - - - - - - - - - - - - - - - -|- -|- - - -
* | padding 3 /|\ | | /|\
* | padding 2 | | | |
* | pld 1 32 bytes (2 * 16) | | |
* | pld 0 | | | |
* | moved msg_hdr \|/ | \|/ |
* |- - - - - - - - - - - - - - - - - - - - - - - - - -|- - - |
* | | _PLD_SIZE
* | unused | |
* | | |
* |- - - - - - - - - - - - - - - - - - - - - - - - - -| |
* | msg_hdr (size X) [this message is closed] | \|/
* |===================================================|========== <=== OUT
* | |
* | |
* | |
* | Free rooom |
* | |
* | |
* | |
* | |
* | |
* | |
* | |
* | |
* | |
* |===================================================|========== <=== IN
* | |
* | |
* | |
* | |
* | payload 1 |
* | |
* | |
* |- - - - - - - - - - - - - - - - - - - - - - - - - -|
* | |
* | payload 0 |
* | |
* | |
* |- - - - - - - - - - - - - - - - - - - - - - - - - -|
* | pld 11 /|\ |
* | ... | |
* | pld 1 64 bytes (2 * 16) |
* | pld 0 | |
* | msg_hdr (size X) \|/ [message is open] |
* 0 ---------------------------------------------------
*
*
* ROADMAP
*
* i2400m_tx_setup() Called by i2400m_setup
* i2400m_tx_release() Called by i2400m_release()
*
* i2400m_tx() Called to send data or control frames
* i2400m_tx_fifo_push() Allocates append-space in the FIFO
* i2400m_tx_new() Opens a new message in the FIFO
* i2400m_tx_fits() Checks if a new payload fits in the message
* i2400m_tx_close() Closes an open message in the FIFO
* i2400m_tx_skip_tail() Marks unusable FIFO tail space
* i2400m->bus_tx_kick()
*
* Now i2400m->bus_tx_kick() is the the bus-specific driver backend
* implementation; that would do:
*
* i2400m->bus_tx_kick()
* i2400m_tx_msg_get() Gets first message ready to go
* ...sends it...
* i2400m_tx_msg_sent() Ack the message is sent; repeat from
* _tx_msg_get() until it returns NULL
* (FIFO empty).
*/
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "i2400m.h"
#define D_SUBMODULE tx
#include "debug-levels.h"
enum {
/**
* TX Buffer size
*
* Doc says maximum transaction is 16KiB. If we had 16KiB en
* route and 16KiB being queued, it boils down to needing
* 32KiB.
* 32KiB is insufficient for 1400 MTU, hence increasing
* tx buffer size to 64KiB.
*/
I2400M_TX_BUF_SIZE = 65536,
/**
* Message header and payload descriptors have to be 16
* aligned (16 + 4 * N = 16 * M). If we take that average sent
* packets are MTU size (~1400-~1500) it follows that we could
* fit at most 10-11 payloads in one transaction. To meet the
* alignment requirement, that means we need to leave space
* for 12 (64 bytes). To simplify, we leave space for that. If
* at the end there are less, we pad up to the nearest
* multiple of 16.
*/
/*
* According to Intel Wimax i3200, i5x50 and i6x50 specification
* documents, the maximum number of payloads per message can be
* up to 60. Increasing the number of payloads to 60 per message
* helps to accommodate smaller payloads in a single transaction.
*/
I2400M_TX_PLD_MAX = 60,
I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr)
+ I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld),
I2400M_TX_SKIP = 0x80000000,
/*
* According to Intel Wimax i3200, i5x50 and i6x50 specification
* documents, the maximum size of each message can be up to 16KiB.
*/
I2400M_TX_MSG_SIZE = 16384,
};
#define TAIL_FULL ((void *)~(unsigned long)NULL)
/*
* Calculate how much tail room is available
*
* Note the trick here. This path is ONLY caleed for Case A (see
* i2400m_tx_fifo_push() below), where we have:
*
* Case A
* N ___________
* | tail room |
* | |
* |<- IN ->|
* | |
* | data |
* | |
* |<- OUT ->|
* | |
* | head room |
* 0 -----------
*
* When calculating the tail_room, tx_in might get to be zero if
* i2400m->tx_in is right at the end of the buffer (really full
* buffer) if there is no head room. In this case, tail_room would be
* I2400M_TX_BUF_SIZE, although it is actually zero. Hence the final
* mod (%) operation. However, when doing this kind of optimization,
* i2400m->tx_in being zero would fail, so we treat is an a special
* case.
*/
static inline
size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
{
size_t tail_room;
size_t tx_in;
if (unlikely(i2400m->tx_in == 0))
return I2400M_TX_BUF_SIZE;
tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE;
tail_room = I2400M_TX_BUF_SIZE - tx_in;
tail_room %= I2400M_TX_BUF_SIZE;
return tail_room;
}
/*
* Allocate @size bytes in the TX fifo, return a pointer to it
*
* @i2400m: device descriptor
* @size: size of the buffer we need to allocate
* @padding: ensure that there is at least this many bytes of free
* contiguous space in the fifo. This is needed because later on
* we might need to add padding.
* @try_head: specify either to allocate head room or tail room space
* in the TX FIFO. This boolean is required to avoids a system hang
* due to an infinite loop caused by i2400m_tx_fifo_push().
* The caller must always try to allocate tail room space first by
* calling this routine with try_head = 0. In case if there
* is not enough tail room space but there is enough head room space,
* (i2400m_tx_fifo_push() returns TAIL_FULL) try to allocate head
* room space, by calling this routine again with try_head = 1.
*
* Returns:
*
* Pointer to the allocated space. NULL if there is no
* space. TAIL_FULL if there is no space at the tail but there is at
* the head (Case B below).
*
* These are the two basic cases we need to keep an eye for -- it is
* much better explained in linux/kernel/kfifo.c, but this code
* basically does the same. No rocket science here.
*
* Case A Case B
* N ___________ ___________
* | tail room | | data |
* | | | |
* |<- IN ->| |<- OUT ->|
* | | | |
* | data | | room |
* | | | |
* |<- OUT ->| |<- IN ->|
* | | | |
* | head room | | data |
* 0 ----------- -----------
*
* We allocate only *contiguous* space.
*
* We can allocate only from 'room'. In Case B, it is simple; in case
* A, we only try from the tail room; if it is not enough, we just
* fail and return TAIL_FULL and let the caller figure out if we wants to
* skip the tail room and try to allocate from the head.
*
* There is a corner case, wherein i2400m_tx_new() can get into
* an infinite loop calling i2400m_tx_fifo_push().
* In certain situations, tx_in would have reached on the top of TX FIFO
* and i2400m_tx_tail_room() returns 0, as described below:
*
* N ___________ tail room is zero
* |<- IN ->|
* | |
* | |
* | |
* | data |
* |<- OUT ->|
* | |
* | |
* | head room |
* 0 -----------
* During such a time, where tail room is zero in the TX FIFO and if there
* is a request to add a payload to TX FIFO, which calls:
* i2400m_tx()
* ->calls i2400m_tx_close()
* ->calls i2400m_tx_skip_tail()
* goto try_new;
* ->calls i2400m_tx_new()
* |----> [try_head:]
* infinite loop | ->calls i2400m_tx_fifo_push()
* | if (tail_room < needed)
* | if (head_room => needed)
* | return TAIL_FULL;
* |<---- goto try_head;
*
* i2400m_tx() calls i2400m_tx_close() to close the message, since there
* is no tail room to accommodate the payload and calls
* i2400m_tx_skip_tail() to skip the tail space. Now i2400m_tx() calls
* i2400m_tx_new() to allocate space for new message header calling
* i2400m_tx_fifo_push() that returns TAIL_FULL, since there is no tail space
* to accommodate the message header, but there is enough head space.
* The i2400m_tx_new() keeps re-retrying by calling i2400m_tx_fifo_push()
* ending up in a loop causing system freeze.
*
* This corner case is avoided by using a try_head boolean,
* as an argument to i2400m_tx_fifo_push().
*
* Note:
*
* Assumes i2400m->tx_lock is taken, and we use that as a barrier
*
* The indexes keep increasing and we reset them to zero when we
* pop data off the queue
*/
static
void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size,
size_t padding, bool try_head)
{
struct device *dev = i2400m_dev(i2400m);
size_t room, tail_room, needed_size;
void *ptr;
needed_size = size + padding;
room = I2400M_TX_BUF_SIZE - (i2400m->tx_in - i2400m->tx_out);
if (room < needed_size) { /* this takes care of Case B */
d_printf(2, dev, "fifo push %zu/%zu: no space\n",
size, padding);
return NULL;
}
/* Is there space at the tail? */
tail_room = __i2400m_tx_tail_room(i2400m);
if (!try_head && tail_room < needed_size) {
/*
* If the tail room space is not enough to push the message
* in the TX FIFO, then there are two possibilities:
* 1. There is enough head room space to accommodate
* this message in the TX FIFO.
* 2. There is not enough space in the head room and
* in tail room of the TX FIFO to accommodate the message.
* In the case (1), return TAIL_FULL so that the caller
* can figure out, if the caller wants to push the message
* into the head room space.
* In the case (2), return NULL, indicating that the TX FIFO
* cannot accommodate the message.
*/
if (room - tail_room >= needed_size) {
d_printf(2, dev, "fifo push %zu/%zu: tail full\n",
size, padding);
return TAIL_FULL; /* There might be head space */
} else {
d_printf(2, dev, "fifo push %zu/%zu: no head space\n",
size, padding);
return NULL; /* There is no space */
}
}
ptr = i2400m->tx_buf + i2400m->tx_in % I2400M_TX_BUF_SIZE;
d_printf(2, dev, "fifo push %zu/%zu: at @%zu\n", size, padding,
i2400m->tx_in % I2400M_TX_BUF_SIZE);
i2400m->tx_in += size;
return ptr;
}
/*
* Mark the tail of the FIFO buffer as 'to-skip'
*
* We should never hit the BUG_ON() because all the sizes we push to
* the FIFO are padded to be a multiple of 16 -- the size of *msg
* (I2400M_PL_PAD for the payloads, I2400M_TX_PLD_SIZE for the
* header).
*
* Tail room can get to be zero if a message was opened when there was
* space only for a header. _tx_close() will mark it as to-skip (as it
* will have no payloads) and there will be no more space to flush, so
* nothing has to be done here. This is probably cheaper than ensuring
* in _tx_new() that there is some space for payloads...as we could
* always possibly hit the same problem if the payload wouldn't fit.
*
* Note:
*
* Assumes i2400m->tx_lock is taken, and we use that as a barrier
*
* This path is only taken for Case A FIFO situations [see
* i2400m_tx_fifo_push()]
*/
static
void i2400m_tx_skip_tail(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
size_t tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE;
size_t tail_room = __i2400m_tx_tail_room(i2400m);
struct i2400m_msg_hdr *msg = i2400m->tx_buf + tx_in;
if (unlikely(tail_room == 0))
return;
BUG_ON(tail_room < sizeof(*msg));
msg->size = tail_room | I2400M_TX_SKIP;
d_printf(2, dev, "skip tail: skipping %zu bytes @%zu\n",
tail_room, tx_in);
i2400m->tx_in += tail_room;
}
/*
* Check if a skb will fit in the TX queue's current active TX
* message (if there are still descriptors left unused).
*
* Returns:
* 0 if the message won't fit, 1 if it will.
*
* Note:
*
* Assumes a TX message is active (i2400m->tx_msg).
*
* Assumes i2400m->tx_lock is taken, and we use that as a barrier
*/
static
unsigned i2400m_tx_fits(struct i2400m *i2400m)
{
struct i2400m_msg_hdr *msg_hdr = i2400m->tx_msg;
return le16_to_cpu(msg_hdr->num_pls) < I2400M_TX_PLD_MAX;
}
/*
* Start a new TX message header in the queue.
*
* Reserve memory from the base FIFO engine and then just initialize
* the message header.
*
* We allocate the biggest TX message header we might need (one that'd
* fit I2400M_TX_PLD_MAX payloads) -- when it is closed it will be
* 'ironed it out' and the unneeded parts removed.
*
* NOTE:
*
* Assumes that the previous message is CLOSED (eg: either
* there was none or 'i2400m_tx_close()' was called on it).
*
* Assumes i2400m->tx_lock is taken, and we use that as a barrier
*/
static
void i2400m_tx_new(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
struct i2400m_msg_hdr *tx_msg;
bool try_head = false;
BUG_ON(i2400m->tx_msg != NULL);
/*
* In certain situations, TX queue might have enough space to
* accommodate the new message header I2400M_TX_PLD_SIZE, but
* might not have enough space to accommodate the payloads.
* Adding bus_tx_room_min padding while allocating a new TX message
* increases the possibilities of including at least one payload of the
* size <= bus_tx_room_min.
*/
try_head:
tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE,
i2400m->bus_tx_room_min, try_head);
if (tx_msg == NULL)
goto out;
else if (tx_msg == TAIL_FULL) {
i2400m_tx_skip_tail(i2400m);
d_printf(2, dev, "new TX message: tail full, trying head\n");
try_head = true;
goto try_head;
}
memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
tx_msg->size = I2400M_TX_PLD_SIZE;
out:
i2400m->tx_msg = tx_msg;
d_printf(2, dev, "new TX message: %p @%zu\n",
tx_msg, (void *) tx_msg - i2400m->tx_buf);
}
/*
* Finalize the current TX message header
*
* Sets the message header to be at the proper location depending on
* how many descriptors we have (check documentation at the file's
* header for more info on that).
*
* Appends padding bytes to make sure the whole TX message (counting
* from the 'relocated' message header) is aligned to
* tx_block_size. We assume the _append() code has left enough space
* in the FIFO for that. If there are no payloads, just pass, as it
* won't be transferred.
*
* The amount of padding bytes depends on how many payloads are in the
* TX message, as the "msg header and payload descriptors" will be
* shifted up in the buffer.
*/
static
void i2400m_tx_close(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg;
struct i2400m_msg_hdr *tx_msg_moved;
size_t aligned_size, padding, hdr_size;
void *pad_buf;
unsigned num_pls;
if (tx_msg->size & I2400M_TX_SKIP) /* a skipper? nothing to do */
goto out;
num_pls = le16_to_cpu(tx_msg->num_pls);
/* We can get this situation when a new message was started
* and there was no space to add payloads before hitting the
tail (and taking padding into consideration). */
if (num_pls == 0) {
tx_msg->size |= I2400M_TX_SKIP;
goto out;
}
/* Relocate the message header
*
* Find the current header size, align it to 16 and if we need
* to move it so the tail is next to the payloads, move it and
* set the offset.
*
* If it moved, this header is good only for transmission; the
* original one (it is kept if we moved) is still used to
* figure out where the next TX message starts (and where the
* offset to the moved header is).
*/
hdr_size = sizeof(*tx_msg)
+ le16_to_cpu(tx_msg->num_pls) * sizeof(tx_msg->pld[0]);
hdr_size = ALIGN(hdr_size, I2400M_PL_ALIGN);
tx_msg->offset = I2400M_TX_PLD_SIZE - hdr_size;
tx_msg_moved = (void *) tx_msg + tx_msg->offset;
memmove(tx_msg_moved, tx_msg, hdr_size);
tx_msg_moved->size -= tx_msg->offset;
/*
* Now figure out how much we have to add to the (moved!)
* message so the size is a multiple of i2400m->bus_tx_block_size.
*/
aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size);
padding = aligned_size - tx_msg_moved->size;
if (padding > 0) {
pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0, 0);
if (unlikely(WARN_ON(pad_buf == NULL
|| pad_buf == TAIL_FULL))) {
/* This should not happen -- append should verify
* there is always space left at least to append
* tx_block_size */
dev_err(dev,
"SW BUG! Possible data leakage from memory the "
"device should not read for padding - "
"size %lu aligned_size %zu tx_buf %p in "
"%zu out %zu\n",
(unsigned long) tx_msg_moved->size,
aligned_size, i2400m->tx_buf, i2400m->tx_in,
i2400m->tx_out);
} else
memset(pad_buf, 0xad, padding);
}
tx_msg_moved->padding = cpu_to_le16(padding);
tx_msg_moved->size += padding;
if (tx_msg != tx_msg_moved)
tx_msg->size += padding;
out:
i2400m->tx_msg = NULL;
}
/**
* i2400m_tx - send the data in a buffer to the device
*
* @buf: pointer to the buffer to transmit
*
* @buf_len: buffer size
*
* @pl_type: type of the payload we are sending.
*
* Returns:
* 0 if ok, < 0 errno code on error (-ENOSPC, if there is no more
* room for the message in the queue).
*
* Appends the buffer to the TX FIFO and notifies the bus-specific
* part of the driver that there is new data ready to transmit.
* Once this function returns, the buffer has been copied, so it can
* be reused.
*
* The steps followed to append are explained in detail in the file
* header.
*
* Whenever we write to a message, we increase msg->size, so it
* reflects exactly how big the message is. This is needed so that if
* we concatenate two messages before they can be sent, the code that
* sends the messages can find the boundaries (and it will replace the
* size with the real barker before sending).
*
* Note:
*
* Cold and warm reset payloads need to be sent as a single
* payload, so we handle that.
*/
int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
enum i2400m_pt pl_type)
{
int result = -ENOSPC;
struct device *dev = i2400m_dev(i2400m);
unsigned long flags;
size_t padded_len;
void *ptr;
bool try_head = false;
unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
|| pl_type == I2400M_PT_RESET_COLD;
d_fnstart(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u)\n",
i2400m, buf, buf_len, pl_type);
padded_len = ALIGN(buf_len, I2400M_PL_ALIGN);
d_printf(5, dev, "padded_len %zd buf_len %zd\n", padded_len, buf_len);
/* If there is no current TX message, create one; if the
* current one is out of payload slots or we have a singleton,
* close it and start a new one */
spin_lock_irqsave(&i2400m->tx_lock, flags);
/* If tx_buf is NULL, device is shutdown */
if (i2400m->tx_buf == NULL) {
result = -ESHUTDOWN;
goto error_tx_new;
}
try_new:
if (unlikely(i2400m->tx_msg == NULL))
i2400m_tx_new(i2400m);
else if (unlikely(!i2400m_tx_fits(i2400m)
|| (is_singleton && i2400m->tx_msg->num_pls != 0))) {
d_printf(2, dev, "closing TX message (fits %u singleton "
"%u num_pls %u)\n", i2400m_tx_fits(i2400m),
is_singleton, i2400m->tx_msg->num_pls);
i2400m_tx_close(i2400m);
i2400m_tx_new(i2400m);
}
if (i2400m->tx_msg == NULL)
goto error_tx_new;
/*
* Check if this skb will fit in the TX queue's current active
* TX message. The total message size must not exceed the maximum
* size of each message I2400M_TX_MSG_SIZE. If it exceeds,
* close the current message and push this skb into the new message.
*/
if (i2400m->tx_msg->size + padded_len > I2400M_TX_MSG_SIZE) {
d_printf(2, dev, "TX: message too big, going new\n");
i2400m_tx_close(i2400m);
i2400m_tx_new(i2400m);
}
if (i2400m->tx_msg == NULL)
goto error_tx_new;
/* So we have a current message header; now append space for
* the message -- if there is not enough, try the head */
ptr = i2400m_tx_fifo_push(i2400m, padded_len,
i2400m->bus_tx_block_size, try_head);
if (ptr == TAIL_FULL) { /* Tail is full, try head */
d_printf(2, dev, "pl append: tail full\n");
i2400m_tx_close(i2400m);
i2400m_tx_skip_tail(i2400m);
try_head = true;
goto try_new;
} else if (ptr == NULL) { /* All full */
result = -ENOSPC;
d_printf(2, dev, "pl append: all full\n");
} else { /* Got space, copy it, set padding */
struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg;
unsigned num_pls = le16_to_cpu(tx_msg->num_pls);
memcpy(ptr, buf, buf_len);
memset(ptr + buf_len, 0xad, padded_len - buf_len);
i2400m_pld_set(&tx_msg->pld[num_pls], buf_len, pl_type);
d_printf(3, dev, "pld 0x%08x (type 0x%1x len 0x%04zx\n",
le32_to_cpu(tx_msg->pld[num_pls].val),
pl_type, buf_len);
tx_msg->num_pls = le16_to_cpu(num_pls+1);
tx_msg->size += padded_len;
d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u\n",
padded_len, tx_msg->size, num_pls+1);
d_printf(2, dev,
"TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n",
(void *)tx_msg - i2400m->tx_buf, (size_t)tx_msg->size,
num_pls+1, ptr - i2400m->tx_buf, buf_len, padded_len);
result = 0;
if (is_singleton)
i2400m_tx_close(i2400m);
}
error_tx_new:
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
/* kick in most cases, except when the TX subsys is down, as
* it might free space */
if (likely(result != -ESHUTDOWN))
i2400m->bus_tx_kick(i2400m);
d_fnend(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u) = %d\n",
i2400m, buf, buf_len, pl_type, result);
return result;
}
EXPORT_SYMBOL_GPL(i2400m_tx);
/**
* i2400m_tx_msg_get - Get the first TX message in the FIFO to start sending it
*
* @i2400m: device descriptors
* @bus_size: where to place the size of the TX message
*
* Called by the bus-specific driver to get the first TX message at
* the FIF that is ready for transmission.
*
* It sets the state in @i2400m to indicate the bus-specific driver is
* transferring that message (i2400m->tx_msg_size).
*
* Once the transfer is completed, call i2400m_tx_msg_sent().
*
* Notes:
*
* The size of the TX message to be transmitted might be smaller than
* that of the TX message in the FIFO (in case the header was
* shorter). Hence, we copy it in @bus_size, for the bus layer to
* use. We keep the message's size in i2400m->tx_msg_size so that
* when the bus later is done transferring we know how much to
* advance the fifo.
*
* We collect statistics here as all the data is available and we
* assume it is going to work [see i2400m_tx_msg_sent()].
*/
struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *i2400m,
size_t *bus_size)
{
struct device *dev = i2400m_dev(i2400m);
struct i2400m_msg_hdr *tx_msg, *tx_msg_moved;
unsigned long flags, pls;
d_fnstart(3, dev, "(i2400m %p bus_size %p)\n", i2400m, bus_size);
spin_lock_irqsave(&i2400m->tx_lock, flags);
tx_msg_moved = NULL;
if (i2400m->tx_buf == NULL)
goto out_unlock;
skip:
tx_msg_moved = NULL;
if (i2400m->tx_in == i2400m->tx_out) { /* Empty FIFO? */
i2400m->tx_in = 0;
i2400m->tx_out = 0;
d_printf(2, dev, "TX: FIFO empty: resetting\n");
goto out_unlock;
}
tx_msg = i2400m->tx_buf + i2400m->tx_out % I2400M_TX_BUF_SIZE;
if (tx_msg->size & I2400M_TX_SKIP) { /* skip? */
d_printf(2, dev, "TX: skip: msg @%zu (%zu b)\n",
i2400m->tx_out % I2400M_TX_BUF_SIZE,
(size_t) tx_msg->size & ~I2400M_TX_SKIP);
i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP;
goto skip;
}
if (tx_msg->num_pls == 0) { /* No payloads? */
if (tx_msg == i2400m->tx_msg) { /* open, we are done */
d_printf(2, dev,
"TX: FIFO empty: open msg w/o payloads @%zu\n",
(void *) tx_msg - i2400m->tx_buf);
tx_msg = NULL;
goto out_unlock;
} else { /* closed, skip it */
d_printf(2, dev,
"TX: skip msg w/o payloads @%zu (%zu b)\n",
(void *) tx_msg - i2400m->tx_buf,
(size_t) tx_msg->size);
i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP;
goto skip;
}
}
if (tx_msg == i2400m->tx_msg) /* open msg? */
i2400m_tx_close(i2400m);
/* Now we have a valid TX message (with payloads) to TX */
tx_msg_moved = (void *) tx_msg + tx_msg->offset;
i2400m->tx_msg_size = tx_msg->size;
*bus_size = tx_msg_moved->size;
d_printf(2, dev, "TX: pid %d msg hdr at @%zu offset +@%zu "
"size %zu bus_size %zu\n",
current->pid, (void *) tx_msg - i2400m->tx_buf,
(size_t) tx_msg->offset, (size_t) tx_msg->size,
(size_t) tx_msg_moved->size);
tx_msg_moved->barker = le32_to_cpu(I2400M_H2D_PREVIEW_BARKER);
tx_msg_moved->sequence = le32_to_cpu(i2400m->tx_sequence++);
pls = le32_to_cpu(tx_msg_moved->num_pls);
i2400m->tx_pl_num += pls; /* Update stats */
if (pls > i2400m->tx_pl_max)
i2400m->tx_pl_max = pls;
if (pls < i2400m->tx_pl_min)
i2400m->tx_pl_min = pls;
i2400m->tx_num++;
i2400m->tx_size_acc += *bus_size;
if (*bus_size < i2400m->tx_size_min)
i2400m->tx_size_min = *bus_size;
if (*bus_size > i2400m->tx_size_max)
i2400m->tx_size_max = *bus_size;
out_unlock:
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
d_fnstart(3, dev, "(i2400m %p bus_size %p [%zu]) = %p\n",
i2400m, bus_size, *bus_size, tx_msg_moved);
return tx_msg_moved;
}
EXPORT_SYMBOL_GPL(i2400m_tx_msg_get);
/**
* i2400m_tx_msg_sent - indicate the transmission of a TX message
*
* @i2400m: device descriptor
*
* Called by the bus-specific driver when a message has been sent;
* this pops it from the FIFO; and as there is space, start the queue
* in case it was stopped.
*
* Should be called even if the message send failed and we are
* dropping this TX message.
*/
void i2400m_tx_msg_sent(struct i2400m *i2400m)
{
unsigned n;
unsigned long flags;
struct device *dev = i2400m_dev(i2400m);
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
spin_lock_irqsave(&i2400m->tx_lock, flags);
if (i2400m->tx_buf == NULL)
goto out_unlock;
i2400m->tx_out += i2400m->tx_msg_size;
d_printf(2, dev, "TX: sent %zu b\n", (size_t) i2400m->tx_msg_size);
i2400m->tx_msg_size = 0;
BUG_ON(i2400m->tx_out > i2400m->tx_in);
/* level them FIFO markers off */
n = i2400m->tx_out / I2400M_TX_BUF_SIZE;
i2400m->tx_out %= I2400M_TX_BUF_SIZE;
i2400m->tx_in -= n * I2400M_TX_BUF_SIZE;
out_unlock:
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
}
EXPORT_SYMBOL_GPL(i2400m_tx_msg_sent);
/**
* i2400m_tx_setup - Initialize the TX queue and infrastructure
*
* Make sure we reset the TX sequence to zero, as when this function
* is called, the firmware has been just restarted. Same rational
* for tx_in, tx_out, tx_msg_size and tx_msg. We reset them since
* the memory for TX queue is reallocated.
*/
int i2400m_tx_setup(struct i2400m *i2400m)
{
int result = 0;
void *tx_buf;
unsigned long flags;
/* Do this here only once -- can't do on
* i2400m_hard_start_xmit() as we'll cause race conditions if
* the WS was scheduled on another CPU */
INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work);
tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_ATOMIC);
if (tx_buf == NULL) {
result = -ENOMEM;
goto error_kmalloc;
}
/*
* Fail the build if we can't fit at least two maximum size messages
* on the TX FIFO [one being delivered while one is constructed].
*/
BUILD_BUG_ON(2 * I2400M_TX_MSG_SIZE > I2400M_TX_BUF_SIZE);
spin_lock_irqsave(&i2400m->tx_lock, flags);
i2400m->tx_sequence = 0;
i2400m->tx_in = 0;
i2400m->tx_out = 0;
i2400m->tx_msg_size = 0;
i2400m->tx_msg = NULL;
i2400m->tx_buf = tx_buf;
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
/* Huh? the bus layer has to define this... */
BUG_ON(i2400m->bus_tx_block_size == 0);
error_kmalloc:
return result;
}
/**
* i2400m_tx_release - Tear down the TX queue and infrastructure
*/
void i2400m_tx_release(struct i2400m *i2400m)
{
unsigned long flags;
spin_lock_irqsave(&i2400m->tx_lock, flags);
kfree(i2400m->tx_buf);
i2400m->tx_buf = NULL;
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
}
| gpl-2.0 |
drewx2/android_kernel_htc_dlx | virt/fs/jffs2/compr.c | 10376 | 11923 | /*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
* Copyright © 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
* University of Szeged, Hungary
*
* Created by Arjan van de Ven <arjan@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "compr.h"
static DEFINE_SPINLOCK(jffs2_compressor_list_lock);
/* Available compressors are on this list */
static LIST_HEAD(jffs2_compressor_list);
/* Actual compression mode */
static int jffs2_compression_mode = JFFS2_COMPR_MODE_PRIORITY;
/* Statistics for blocks stored without compression */
static uint32_t none_stat_compr_blocks=0,none_stat_decompr_blocks=0,none_stat_compr_size=0;
/*
* Return 1 to use this compression
*/
static int jffs2_is_best_compression(struct jffs2_compressor *this,
struct jffs2_compressor *best, uint32_t size, uint32_t bestsize)
{
switch (jffs2_compression_mode) {
case JFFS2_COMPR_MODE_SIZE:
if (bestsize > size)
return 1;
return 0;
case JFFS2_COMPR_MODE_FAVOURLZO:
if ((this->compr == JFFS2_COMPR_LZO) && (bestsize > size))
return 1;
if ((best->compr != JFFS2_COMPR_LZO) && (bestsize > size))
return 1;
if ((this->compr == JFFS2_COMPR_LZO) && (bestsize > (size * FAVOUR_LZO_PERCENT / 100)))
return 1;
if ((bestsize * FAVOUR_LZO_PERCENT / 100) > size)
return 1;
return 0;
}
/* Shouldn't happen */
return 0;
}
/*
* jffs2_selected_compress:
* @compr: Explicit compression type to use (ie, JFFS2_COMPR_ZLIB).
* If 0, just take the first available compression mode.
* @data_in: Pointer to uncompressed data
* @cpage_out: Pointer to returned pointer to buffer for compressed data
* @datalen: On entry, holds the amount of data available for compression.
* On exit, expected to hold the amount of data actually compressed.
* @cdatalen: On entry, holds the amount of space available for compressed
* data. On exit, expected to hold the actual size of the compressed
* data.
*
* Returns: the compression type used. Zero is used to show that the data
* could not be compressed; probably because we couldn't find the requested
* compression mode.
*/
static int jffs2_selected_compress(u8 compr, unsigned char *data_in,
unsigned char **cpage_out, u32 *datalen, u32 *cdatalen)
{
struct jffs2_compressor *this;
int err, ret = JFFS2_COMPR_NONE;
uint32_t orig_slen, orig_dlen;
char *output_buf;
output_buf = kmalloc(*cdatalen, GFP_KERNEL);
if (!output_buf) {
pr_warn("No memory for compressor allocation. Compression failed.\n");
return ret;
}
orig_slen = *datalen;
orig_dlen = *cdatalen;
spin_lock(&jffs2_compressor_list_lock);
list_for_each_entry(this, &jffs2_compressor_list, list) {
/* Skip decompress-only and disabled modules */
if (!this->compress || this->disabled)
continue;
/* Skip if not the desired compression type */
if (compr && (compr != this->compr))
continue;
/*
* Either compression type was unspecified, or we found our
* compressor; either way, we're good to go.
*/
this->usecount++;
spin_unlock(&jffs2_compressor_list_lock);
*datalen = orig_slen;
*cdatalen = orig_dlen;
err = this->compress(data_in, output_buf, datalen, cdatalen);
spin_lock(&jffs2_compressor_list_lock);
this->usecount--;
if (!err) {
/* Success */
ret = this->compr;
this->stat_compr_blocks++;
this->stat_compr_orig_size += *datalen;
this->stat_compr_new_size += *cdatalen;
break;
}
}
spin_unlock(&jffs2_compressor_list_lock);
if (ret == JFFS2_COMPR_NONE)
kfree(output_buf);
else
*cpage_out = output_buf;
return ret;
}
/* jffs2_compress:
* @data_in: Pointer to uncompressed data
* @cpage_out: Pointer to returned pointer to buffer for compressed data
* @datalen: On entry, holds the amount of data available for compression.
* On exit, expected to hold the amount of data actually compressed.
* @cdatalen: On entry, holds the amount of space available for compressed
* data. On exit, expected to hold the actual size of the compressed
* data.
*
* Returns: Lower byte to be stored with data indicating compression type used.
* Zero is used to show that the data could not be compressed - the
* compressed version was actually larger than the original.
* Upper byte will be used later. (soon)
*
* If the cdata buffer isn't large enough to hold all the uncompressed data,
* jffs2_compress should compress as much as will fit, and should set
* *datalen accordingly to show the amount of data which were compressed.
*/
uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
unsigned char *data_in, unsigned char **cpage_out,
uint32_t *datalen, uint32_t *cdatalen)
{
int ret = JFFS2_COMPR_NONE;
int mode, compr_ret;
struct jffs2_compressor *this, *best=NULL;
unsigned char *output_buf = NULL, *tmp_buf;
uint32_t orig_slen, orig_dlen;
uint32_t best_slen=0, best_dlen=0;
if (c->mount_opts.override_compr)
mode = c->mount_opts.compr;
else
mode = jffs2_compression_mode;
switch (mode) {
case JFFS2_COMPR_MODE_NONE:
break;
case JFFS2_COMPR_MODE_PRIORITY:
ret = jffs2_selected_compress(0, data_in, cpage_out, datalen,
cdatalen);
break;
case JFFS2_COMPR_MODE_SIZE:
case JFFS2_COMPR_MODE_FAVOURLZO:
orig_slen = *datalen;
orig_dlen = *cdatalen;
spin_lock(&jffs2_compressor_list_lock);
list_for_each_entry(this, &jffs2_compressor_list, list) {
/* Skip decompress-only backwards-compatibility and disabled modules */
if ((!this->compress)||(this->disabled))
continue;
/* Allocating memory for output buffer if necessary */
if ((this->compr_buf_size < orig_slen) && (this->compr_buf)) {
spin_unlock(&jffs2_compressor_list_lock);
kfree(this->compr_buf);
spin_lock(&jffs2_compressor_list_lock);
this->compr_buf_size=0;
this->compr_buf=NULL;
}
if (!this->compr_buf) {
spin_unlock(&jffs2_compressor_list_lock);
tmp_buf = kmalloc(orig_slen, GFP_KERNEL);
spin_lock(&jffs2_compressor_list_lock);
if (!tmp_buf) {
pr_warn("No memory for compressor allocation. (%d bytes)\n",
orig_slen);
continue;
}
else {
this->compr_buf = tmp_buf;
this->compr_buf_size = orig_slen;
}
}
this->usecount++;
spin_unlock(&jffs2_compressor_list_lock);
*datalen = orig_slen;
*cdatalen = orig_dlen;
compr_ret = this->compress(data_in, this->compr_buf, datalen, cdatalen);
spin_lock(&jffs2_compressor_list_lock);
this->usecount--;
if (!compr_ret) {
if (((!best_dlen) || jffs2_is_best_compression(this, best, *cdatalen, best_dlen))
&& (*cdatalen < *datalen)) {
best_dlen = *cdatalen;
best_slen = *datalen;
best = this;
}
}
}
if (best_dlen) {
*cdatalen = best_dlen;
*datalen = best_slen;
output_buf = best->compr_buf;
best->compr_buf = NULL;
best->compr_buf_size = 0;
best->stat_compr_blocks++;
best->stat_compr_orig_size += best_slen;
best->stat_compr_new_size += best_dlen;
ret = best->compr;
*cpage_out = output_buf;
}
spin_unlock(&jffs2_compressor_list_lock);
break;
case JFFS2_COMPR_MODE_FORCELZO:
ret = jffs2_selected_compress(JFFS2_COMPR_LZO, data_in,
cpage_out, datalen, cdatalen);
break;
case JFFS2_COMPR_MODE_FORCEZLIB:
ret = jffs2_selected_compress(JFFS2_COMPR_ZLIB, data_in,
cpage_out, datalen, cdatalen);
break;
default:
pr_err("unknown compression mode\n");
}
if (ret == JFFS2_COMPR_NONE) {
*cpage_out = data_in;
*datalen = *cdatalen;
none_stat_compr_blocks++;
none_stat_compr_size += *datalen;
}
return ret;
}
int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
uint16_t comprtype, unsigned char *cdata_in,
unsigned char *data_out, uint32_t cdatalen, uint32_t datalen)
{
struct jffs2_compressor *this;
int ret;
/* Older code had a bug where it would write non-zero 'usercompr'
fields. Deal with it. */
if ((comprtype & 0xff) <= JFFS2_COMPR_ZLIB)
comprtype &= 0xff;
switch (comprtype & 0xff) {
case JFFS2_COMPR_NONE:
/* This should be special-cased elsewhere, but we might as well deal with it */
memcpy(data_out, cdata_in, datalen);
none_stat_decompr_blocks++;
break;
case JFFS2_COMPR_ZERO:
memset(data_out, 0, datalen);
break;
default:
spin_lock(&jffs2_compressor_list_lock);
list_for_each_entry(this, &jffs2_compressor_list, list) {
if (comprtype == this->compr) {
this->usecount++;
spin_unlock(&jffs2_compressor_list_lock);
ret = this->decompress(cdata_in, data_out, cdatalen, datalen);
spin_lock(&jffs2_compressor_list_lock);
if (ret) {
pr_warn("Decompressor \"%s\" returned %d\n",
this->name, ret);
}
else {
this->stat_decompr_blocks++;
}
this->usecount--;
spin_unlock(&jffs2_compressor_list_lock);
return ret;
}
}
pr_warn("compression type 0x%02x not available\n", comprtype);
spin_unlock(&jffs2_compressor_list_lock);
return -EIO;
}
return 0;
}
int jffs2_register_compressor(struct jffs2_compressor *comp)
{
struct jffs2_compressor *this;
if (!comp->name) {
pr_warn("NULL compressor name at registering JFFS2 compressor. Failed.\n");
return -1;
}
comp->compr_buf_size=0;
comp->compr_buf=NULL;
comp->usecount=0;
comp->stat_compr_orig_size=0;
comp->stat_compr_new_size=0;
comp->stat_compr_blocks=0;
comp->stat_decompr_blocks=0;
jffs2_dbg(1, "Registering JFFS2 compressor \"%s\"\n", comp->name);
spin_lock(&jffs2_compressor_list_lock);
list_for_each_entry(this, &jffs2_compressor_list, list) {
if (this->priority < comp->priority) {
list_add(&comp->list, this->list.prev);
goto out;
}
}
list_add_tail(&comp->list, &jffs2_compressor_list);
out:
D2(list_for_each_entry(this, &jffs2_compressor_list, list) {
printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority);
})
spin_unlock(&jffs2_compressor_list_lock);
return 0;
}
int jffs2_unregister_compressor(struct jffs2_compressor *comp)
{
D2(struct jffs2_compressor *this);
jffs2_dbg(1, "Unregistering JFFS2 compressor \"%s\"\n", comp->name);
spin_lock(&jffs2_compressor_list_lock);
if (comp->usecount) {
spin_unlock(&jffs2_compressor_list_lock);
pr_warn("Compressor module is in use. Unregister failed.\n");
return -1;
}
list_del(&comp->list);
D2(list_for_each_entry(this, &jffs2_compressor_list, list) {
printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority);
})
spin_unlock(&jffs2_compressor_list_lock);
return 0;
}
void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig)
{
if (orig != comprbuf)
kfree(comprbuf);
}
int __init jffs2_compressors_init(void)
{
/* Registering compressors */
#ifdef CONFIG_JFFS2_ZLIB
jffs2_zlib_init();
#endif
#ifdef CONFIG_JFFS2_RTIME
jffs2_rtime_init();
#endif
#ifdef CONFIG_JFFS2_RUBIN
jffs2_rubinmips_init();
jffs2_dynrubin_init();
#endif
#ifdef CONFIG_JFFS2_LZO
jffs2_lzo_init();
#endif
/* Setting default compression mode */
#ifdef CONFIG_JFFS2_CMODE_NONE
jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
jffs2_dbg(1, "default compression mode: none\n");
#else
#ifdef CONFIG_JFFS2_CMODE_SIZE
jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE;
jffs2_dbg(1, "default compression mode: size\n");
#else
#ifdef CONFIG_JFFS2_CMODE_FAVOURLZO
jffs2_compression_mode = JFFS2_COMPR_MODE_FAVOURLZO;
jffs2_dbg(1, "default compression mode: favourlzo\n");
#else
jffs2_dbg(1, "default compression mode: priority\n");
#endif
#endif
#endif
return 0;
}
int jffs2_compressors_exit(void)
{
/* Unregistering compressors */
#ifdef CONFIG_JFFS2_LZO
jffs2_lzo_exit();
#endif
#ifdef CONFIG_JFFS2_RUBIN
jffs2_dynrubin_exit();
jffs2_rubinmips_exit();
#endif
#ifdef CONFIG_JFFS2_RTIME
jffs2_rtime_exit();
#endif
#ifdef CONFIG_JFFS2_ZLIB
jffs2_zlib_exit();
#endif
return 0;
}
| gpl-2.0 |
MoKee/android_kernel_huawei_msm8928 | drivers/net/wireless/ath/ath5k/caps.c | 10632 | 4390 | /*
* Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
* Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/**************\
* Capabilities *
\**************/
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
#include "../regd.h"
/*
* Fill the capabilities struct
* TODO: Merge this with EEPROM code when we are done with it
*/
int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
{
struct ath5k_capabilities *caps = &ah->ah_capabilities;
u16 ee_header;
/* Capabilities stored in the EEPROM */
ee_header = caps->cap_eeprom.ee_header;
if (ah->ah_version == AR5K_AR5210) {
/*
* Set radio capabilities
* (The AR5110 only supports the middle 5GHz band)
*/
caps->cap_range.range_5ghz_min = 5120;
caps->cap_range.range_5ghz_max = 5430;
caps->cap_range.range_2ghz_min = 0;
caps->cap_range.range_2ghz_max = 0;
/* Set supported modes */
__set_bit(AR5K_MODE_11A, caps->cap_mode);
} else {
/*
* XXX The transceiver supports frequencies from 4920 to 6100MHz
* XXX and from 2312 to 2732MHz. There are problems with the
* XXX current ieee80211 implementation because the IEEE
* XXX channel mapping does not support negative channel
* XXX numbers (2312MHz is channel -19). Of course, this
* XXX doesn't matter because these channels are out of the
* XXX legal range.
*/
/*
* Set radio capabilities
*/
if (AR5K_EEPROM_HDR_11A(ee_header)) {
if (ath_is_49ghz_allowed(caps->cap_eeprom.ee_regdomain))
caps->cap_range.range_5ghz_min = 4920;
else
caps->cap_range.range_5ghz_min = 5005;
caps->cap_range.range_5ghz_max = 6100;
/* Set supported modes */
__set_bit(AR5K_MODE_11A, caps->cap_mode);
}
/* Enable 802.11b if a 2GHz capable radio (2111/5112) is
* connected */
if (AR5K_EEPROM_HDR_11B(ee_header) ||
(AR5K_EEPROM_HDR_11G(ee_header) &&
ah->ah_version != AR5K_AR5211)) {
/* 2312 */
caps->cap_range.range_2ghz_min = 2412;
caps->cap_range.range_2ghz_max = 2732;
/* Override 2GHz modes on SoCs that need it
* NOTE: cap_needs_2GHz_ovr gets set from
* ath_ahb_probe */
if (!caps->cap_needs_2GHz_ovr) {
if (AR5K_EEPROM_HDR_11B(ee_header))
__set_bit(AR5K_MODE_11B,
caps->cap_mode);
if (AR5K_EEPROM_HDR_11G(ee_header) &&
ah->ah_version != AR5K_AR5211)
__set_bit(AR5K_MODE_11G,
caps->cap_mode);
}
}
}
if ((ah->ah_radio_5ghz_revision & 0xf0) == AR5K_SREV_RAD_2112)
__clear_bit(AR5K_MODE_11A, caps->cap_mode);
/* Set number of supported TX queues */
if (ah->ah_version == AR5K_AR5210)
caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES_NOQCU;
else
caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
/* Newer hardware has PHY error counters */
if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
caps->cap_has_phyerr_counters = true;
else
caps->cap_has_phyerr_counters = false;
/* MACs since AR5212 have MRR support */
if (ah->ah_version == AR5K_AR5212)
caps->cap_has_mrr_support = true;
else
caps->cap_has_mrr_support = false;
return 0;
}
/*
* TODO: Following functions should be part of a new function
* set_capability
*/
int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid,
u16 assoc_id)
{
if (ah->ah_version == AR5K_AR5210) {
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
return 0;
}
return -EIO;
}
int ath5k_hw_disable_pspoll(struct ath5k_hw *ah)
{
if (ah->ah_version == AR5K_AR5210) {
AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
return 0;
}
return -EIO;
}
| gpl-2.0 |
weera00/xbmc | xbmc/utils/Stopwatch.cpp | 137 | 1634 | /*
* Copyright (C) 2005-2013 Team XBMC
* http://xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include "Stopwatch.h"
#if defined(TARGET_POSIX)
#include "threads/SystemClock.h"
#if !defined(TARGET_DARWIN) && !defined(TARGET_FREEBSD)
#include <sys/sysinfo.h>
#endif
#endif
#include "utils/TimeUtils.h"
CStopWatch::CStopWatch(bool useFrameTime /*=false*/)
{
m_timerPeriod = 0.0f;
m_startTick = 0;
m_stopTick = 0;
m_isRunning = false;
m_useFrameTime = useFrameTime;
#ifdef TARGET_POSIX
m_timerPeriod = 1.0f / 1000.0f; // we want seconds
#else
if (m_useFrameTime)
m_timerPeriod = 1.0f / 1000.0f; //frametime is in milliseconds
else
m_timerPeriod = 1.0f / (float)CurrentHostFrequency();
#endif
}
CStopWatch::~CStopWatch()
{
}
int64_t CStopWatch::GetTicks() const
{
if (m_useFrameTime)
return CTimeUtils::GetFrameTime();
#ifndef TARGET_POSIX
return CurrentHostCounter();
#else
return XbmcThreads::SystemClockMillis();
#endif
}
| gpl-2.0 |
CyanogenMod/htc-kernel-liberty | arch/avr32/boards/hammerhead/setup.c | 137 | 6007 | /*
* Board-specific setup code for the Miromico Hammerhead board
*
* Copyright (C) 2008 Miromico AG
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/atmel-mci.h>
#include <linux/clk.h>
#include <linux/fb.h>
#include <linux/etherdevice.h>
#include <linux/i2c.h>
#include <linux/i2c-gpio.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/spi/spi.h>
#include <video/atmel_lcdc.h>
#include <linux/io.h>
#include <asm/setup.h>
#include <mach/at32ap700x.h>
#include <mach/board.h>
#include <mach/init.h>
#include <mach/portmux.h>
#include "../../mach-at32ap/clock.h"
#include "flash.h"
/* Oscillator frequencies. These are board-specific */
unsigned long at32_board_osc_rates[3] = {
[0] = 32768, /* 32.768 kHz on RTC osc */
[1] = 25000000, /* 25MHz on osc0 */
[2] = 12000000, /* 12 MHz on osc1 */
};
/* Initialized by bootloader-specific startup code. */
struct tag *bootloader_tags __initdata;
#ifdef CONFIG_BOARD_HAMMERHEAD_LCD
static struct fb_videomode __initdata hda350tlv_modes[] = {
{
.name = "320x240 @ 75",
.refresh = 75,
.xres = 320,
.yres = 240,
.pixclock = KHZ2PICOS(6891),
.left_margin = 48,
.right_margin = 18,
.upper_margin = 18,
.lower_margin = 4,
.hsync_len = 20,
.vsync_len = 2,
.sync = 0,
.vmode = FB_VMODE_NONINTERLACED,
},
};
static struct fb_monspecs __initdata hammerhead_hda350t_monspecs = {
.manufacturer = "HAN",
.monitor = "HDA350T-LV",
.modedb = hda350tlv_modes,
.modedb_len = ARRAY_SIZE(hda350tlv_modes),
.hfmin = 14900,
.hfmax = 22350,
.vfmin = 60,
.vfmax = 90,
.dclkmax = 10000000,
};
struct atmel_lcdfb_info __initdata hammerhead_lcdc_data = {
.default_bpp = 24,
.default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN,
.default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT
| ATMEL_LCDC_INVCLK
| ATMEL_LCDC_CLKMOD_ALWAYSACTIVE
| ATMEL_LCDC_MEMOR_BIG),
.default_monspecs = &hammerhead_hda350t_monspecs,
.guard_time = 2,
};
#endif
static struct mci_platform_data __initdata mci0_data = {
.slot[0] = {
.bus_width = 4,
.detect_pin = -ENODEV,
.wp_pin = -ENODEV,
},
};
struct eth_addr {
u8 addr[6];
};
static struct eth_addr __initdata hw_addr[1];
static struct eth_platform_data __initdata eth_data[1];
/*
* The next two functions should go away as the boot loader is
* supposed to initialize the macb address registers with a valid
* ethernet address. But we need to keep it around for a while until
* we can be reasonably sure the boot loader does this.
*
* The phy_id is ignored as the driver will probe for it.
*/
static int __init parse_tag_ethernet(struct tag *tag)
{
int i = tag->u.ethernet.mac_index;
if (i < ARRAY_SIZE(hw_addr))
memcpy(hw_addr[i].addr, tag->u.ethernet.hw_address,
sizeof(hw_addr[i].addr));
return 0;
}
__tagtable(ATAG_ETHERNET, parse_tag_ethernet);
static void __init set_hw_addr(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
const u8 *addr;
void __iomem *regs;
struct clk *pclk;
if (!res)
return;
if (pdev->id >= ARRAY_SIZE(hw_addr))
return;
addr = hw_addr[pdev->id].addr;
if (!is_valid_ether_addr(addr))
return;
/*
* Since this is board-specific code, we'll cheat and use the
* physical address directly as we happen to know that it's
* the same as the virtual address.
*/
regs = (void __iomem __force *)res->start;
pclk = clk_get(&pdev->dev, "pclk");
if (!pclk)
return;
clk_enable(pclk);
__raw_writel((addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) |
addr[0], regs + 0x98);
__raw_writel((addr[5] << 8) | addr[4], regs + 0x9c);
clk_disable(pclk);
clk_put(pclk);
}
void __init setup_board(void)
{
at32_map_usart(1, 0); /* USART 1: /dev/ttyS0, DB9 */
at32_setup_serial_console(0);
}
static struct i2c_gpio_platform_data i2c_gpio_data = {
.sda_pin = GPIO_PIN_PA(6),
.scl_pin = GPIO_PIN_PA(7),
.sda_is_open_drain = 1,
.scl_is_open_drain = 1,
.udelay = 2, /* close to 100 kHz */
};
static struct platform_device i2c_gpio_device = {
.name = "i2c-gpio",
.id = 0,
.dev = { .platform_data = &i2c_gpio_data, },
};
static struct i2c_board_info __initdata i2c_info[] = {};
#ifdef CONFIG_BOARD_HAMMERHEAD_SND
static struct ac97c_platform_data ac97c_data = {
.reset_pin = GPIO_PIN_PA(16),
};
#endif
static int __init hammerhead_init(void)
{
/*
* Hammerhead uses 32-bit SDRAM interface. Reserve the
* SDRAM-specific pins so that nobody messes with them.
*/
at32_reserve_pin(GPIO_PIOE_BASE, ATMEL_EBI_PE_DATA_ALL);
at32_add_device_usart(0);
/* Reserve PB29 (GCLK3). This pin is used as clock source
* for ETH PHY (25MHz). GCLK3 setup is done by U-Boot.
*/
at32_reserve_pin(GPIO_PIOB_BASE, (1<<29));
/*
* Hammerhead uses only one ethernet port, so we don't set
* address of second port
*/
set_hw_addr(at32_add_device_eth(0, ð_data[0]));
#ifdef CONFIG_BOARD_HAMMERHEAD_FPGA
at32_add_device_hh_fpga();
#endif
at32_add_device_mci(0, &mci0_data);
#ifdef CONFIG_BOARD_HAMMERHEAD_USB
at32_add_device_usba(0, NULL);
#endif
#ifdef CONFIG_BOARD_HAMMERHEAD_LCD
at32_add_device_lcdc(0, &hammerhead_lcdc_data, fbmem_start,
fbmem_size, ATMEL_LCDC_PRI_24BIT);
#endif
at32_select_gpio(i2c_gpio_data.sda_pin,
AT32_GPIOF_MULTIDRV | AT32_GPIOF_OUTPUT |
AT32_GPIOF_HIGH);
at32_select_gpio(i2c_gpio_data.scl_pin,
AT32_GPIOF_MULTIDRV | AT32_GPIOF_OUTPUT |
AT32_GPIOF_HIGH);
platform_device_register(&i2c_gpio_device);
i2c_register_board_info(0, i2c_info, ARRAY_SIZE(i2c_info));
#ifdef CONFIG_BOARD_HAMMERHEAD_SND
at32_add_device_ac97c(0, &ac97c_data);
#endif
/* Select the Touchscreen interrupt pin mode */
at32_select_periph(GPIO_PIOB_BASE, 0x08000000, GPIO_PERIPH_A, 0);
return 0;
}
postcore_initcall(hammerhead_init);
| gpl-2.0 |
kozmikkick/tripndroid-endeavoru-3.6 | drivers/net/ethernet/3com/typhoon.c | 137 | 72522 | /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
/*
Written 2002-2004 by David Dillow <dave@thedillows.org>
Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
This software may be used and distributed according to the terms of
the GNU General Public License (GPL), incorporated herein by reference.
Drivers based on or derived from this code fall under the GPL and must
retain the authorship, copyright and license notice. This file is not
a complete program and may only be used when the entire operating
system is licensed under the GPL.
This software is available on a public web site. It may enable
cryptographic capabilities of the 3Com hardware, and may be
exported from the United States under License Exception "TSU"
pursuant to 15 C.F.R. Section 740.13(e).
This work was funded by the National Library of Medicine under
the Department of Energy project number 0274DD06D1 and NLM project
number Y1-LM-2015-01.
This driver is designed for the 3Com 3CR990 Family of cards with the
3XP Processor. It has been tested on x86 and sparc64.
KNOWN ISSUES:
*) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
issue. Hopefully 3Com will fix it.
*) Waiting for a command response takes 8ms due to non-preemptable
polling. Only significant for getting stats and creating
SAs, but an ugly wart never the less.
TODO:
*) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
*) Add more support for ethtool (especially for NIC stats)
*) Allow disabling of RX checksum offloading
*) Fix MAC changing to work while the interface is up
(Need to put commands on the TX ring, which changes
the locking)
*) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
*/
/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
* Setting to > 1518 effectively disables this feature.
*/
static int rx_copybreak = 200;
/* Should we use MMIO or Port IO?
* 0: Port IO
* 1: MMIO
* 2: Try MMIO, fallback to Port IO
*/
static unsigned int use_mmio = 2;
/* end user-configurable values */
/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
*/
static const int multicast_filter_limit = 32;
/* Operational parameters that are set at compile time. */
/* Keep the ring sizes a power of two for compile efficiency.
* The compiler will convert <unsigned>'%'<2^N> into a bit mask.
* Making the Tx ring too large decreases the effectiveness of channel
* bonding and packet priority.
* There are no ill effects from too-large receive rings.
*
* We don't currently use the Hi Tx ring so, don't make it very big.
*
* Beware that if we start using the Hi Tx ring, we will need to change
* typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
*/
#define TXHI_ENTRIES 2
#define TXLO_ENTRIES 128
#define RX_ENTRIES 32
#define COMMAND_ENTRIES 16
#define RESPONSE_ENTRIES 32
#define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
#define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
/* The 3XP will preload and remove 64 entries from the free buffer
* list, and we need one entry to keep the ring from wrapping, so
* to keep this a power of two, we use 128 entries.
*/
#define RXFREE_ENTRIES 128
#define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
/* Operational parameters that usually are not changed. */
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (2*HZ)
#define PKT_BUF_SZ 1536
#define FIRMWARE_NAME "3com/typhoon.bin"
#define pr_fmt(fmt) KBUILD_MODNAME " " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/bitops.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <linux/in6.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include "typhoon.h"
MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_NAME);
MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
"the buffer given back to the NIC. Default "
"is 200.");
MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
"Default is to try MMIO and fallback to PIO.");
module_param(rx_copybreak, int, 0);
module_param(use_mmio, int, 0);
#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
#undef NETIF_F_TSO
#endif
#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
#error TX ring too small!
#endif
struct typhoon_card_info {
const char *name;
const int capabilities;
};
#define TYPHOON_CRYPTO_NONE 0x00
#define TYPHOON_CRYPTO_DES 0x01
#define TYPHOON_CRYPTO_3DES 0x02
#define TYPHOON_CRYPTO_VARIABLE 0x04
#define TYPHOON_FIBER 0x08
#define TYPHOON_WAKEUP_NEEDS_RESET 0x10
enum typhoon_cards {
TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
TYPHOON_FXM,
};
/* directly indexed by enum typhoon_cards, above */
static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
{ "3Com Typhoon (3C990-TX)",
TYPHOON_CRYPTO_NONE},
{ "3Com Typhoon (3CR990-TX-95)",
TYPHOON_CRYPTO_DES},
{ "3Com Typhoon (3CR990-TX-97)",
TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
{ "3Com Typhoon (3C990SVR)",
TYPHOON_CRYPTO_NONE},
{ "3Com Typhoon (3CR990SVR95)",
TYPHOON_CRYPTO_DES},
{ "3Com Typhoon (3CR990SVR97)",
TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
{ "3Com Typhoon2 (3C990B-TX-M)",
TYPHOON_CRYPTO_VARIABLE},
{ "3Com Typhoon2 (3C990BSVR)",
TYPHOON_CRYPTO_VARIABLE},
{ "3Com Typhoon (3CR990-FX-95)",
TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
{ "3Com Typhoon (3CR990-FX-97)",
TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
{ "3Com Typhoon (3CR990-FX-95 Server)",
TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
{ "3Com Typhoon (3CR990-FX-97 Server)",
TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
{ "3Com Typhoon2 (3C990B-FX-97)",
TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
};
/* Notes on the new subsystem numbering scheme:
* bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
* bit 4 indicates if this card has secured firmware (we don't support it)
* bit 8 indicates if this is a (0) copper or (1) fiber card
* bits 12-16 indicate card type: (0) client and (1) server
*/
static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
/* Define the shared memory area
* Align everything the 3XP will normally be using.
* We'll need to move/align txHi if we start using that ring.
*/
#define __3xp_aligned ____cacheline_aligned
struct typhoon_shared {
struct typhoon_interface iface;
struct typhoon_indexes indexes __3xp_aligned;
struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
u32 zeroWord;
struct tx_desc txHi[TXHI_ENTRIES];
} __packed;
struct rxbuff_ent {
struct sk_buff *skb;
dma_addr_t dma_addr;
};
struct typhoon {
/* Tx cache line section */
struct transmit_ring txLoRing ____cacheline_aligned;
struct pci_dev * tx_pdev;
void __iomem *tx_ioaddr;
u32 txlo_dma_addr;
/* Irq/Rx cache line section */
void __iomem *ioaddr ____cacheline_aligned;
struct typhoon_indexes *indexes;
u8 awaiting_resp;
u8 duplex;
u8 speed;
u8 card_state;
struct basic_ring rxLoRing;
struct pci_dev * pdev;
struct net_device * dev;
struct napi_struct napi;
struct basic_ring rxHiRing;
struct basic_ring rxBuffRing;
struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
/* general section */
spinlock_t command_lock ____cacheline_aligned;
struct basic_ring cmdRing;
struct basic_ring respRing;
struct net_device_stats stats;
struct net_device_stats stats_saved;
struct typhoon_shared * shared;
dma_addr_t shared_dma;
__le16 xcvr_select;
__le16 wol_events;
__le32 offload;
/* unused stuff (future use) */
int capabilities;
struct transmit_ring txHiRing;
};
enum completion_wait_values {
NoWait = 0, WaitNoSleep, WaitSleep,
};
/* These are the values for the typhoon.card_state variable.
* These determine where the statistics will come from in get_stats().
* The sleep image does not support the statistics we need.
*/
enum state_values {
Sleeping = 0, Running,
};
/* PCI writes are not guaranteed to be posted in order, but outstanding writes
* cannot pass a read, so this forces current writes to post.
*/
#define typhoon_post_pci_writes(x) \
do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
/* We'll wait up to six seconds for a reset, and half a second normally.
*/
#define TYPHOON_UDELAY 50
#define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
#define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
#define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
#if defined(NETIF_F_TSO)
#define skb_tso_size(x) (skb_shinfo(x)->gso_size)
#define TSO_NUM_DESCRIPTORS 2
#define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
#else
#define NETIF_F_TSO 0
#define skb_tso_size(x) 0
#define TSO_NUM_DESCRIPTORS 0
#define TSO_OFFLOAD_ON 0
#endif
static inline void
typhoon_inc_index(u32 *index, const int count, const int num_entries)
{
/* Increment a ring index -- we can use this for all rings execept
* the Rx rings, as they use different size descriptors
* otherwise, everything is the same size as a cmd_desc
*/
*index += count * sizeof(struct cmd_desc);
*index %= num_entries * sizeof(struct cmd_desc);
}
static inline void
typhoon_inc_cmd_index(u32 *index, const int count)
{
typhoon_inc_index(index, count, COMMAND_ENTRIES);
}
static inline void
typhoon_inc_resp_index(u32 *index, const int count)
{
typhoon_inc_index(index, count, RESPONSE_ENTRIES);
}
static inline void
typhoon_inc_rxfree_index(u32 *index, const int count)
{
typhoon_inc_index(index, count, RXFREE_ENTRIES);
}
static inline void
typhoon_inc_tx_index(u32 *index, const int count)
{
/* if we start using the Hi Tx ring, this needs updateing */
typhoon_inc_index(index, count, TXLO_ENTRIES);
}
static inline void
typhoon_inc_rx_index(u32 *index, const int count)
{
/* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
*index += count * sizeof(struct rx_desc);
*index %= RX_ENTRIES * sizeof(struct rx_desc);
}
static int
typhoon_reset(void __iomem *ioaddr, int wait_type)
{
int i, err = 0;
int timeout;
if(wait_type == WaitNoSleep)
timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
else
timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
typhoon_post_pci_writes(ioaddr);
udelay(1);
iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
if(wait_type != NoWait) {
for(i = 0; i < timeout; i++) {
if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
TYPHOON_STATUS_WAITING_FOR_HOST)
goto out;
if(wait_type == WaitSleep)
schedule_timeout_uninterruptible(1);
else
udelay(TYPHOON_UDELAY);
}
err = -ETIMEDOUT;
}
out:
iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
/* The 3XP seems to need a little extra time to complete the load
* of the sleep image before we can reliably boot it. Failure to
* do this occasionally results in a hung adapter after boot in
* typhoon_init_one() while trying to read the MAC address or
* putting the card to sleep. 3Com's driver waits 5ms, but
* that seems to be overkill. However, if we can sleep, we might
* as well give it that much time. Otherwise, we'll give it 500us,
* which should be enough (I've see it work well at 100us, but still
* saw occasional problems.)
*/
if(wait_type == WaitSleep)
msleep(5);
else
udelay(500);
return err;
}
static int
typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
{
int i, err = 0;
for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
goto out;
udelay(TYPHOON_UDELAY);
}
err = -ETIMEDOUT;
out:
return err;
}
static inline void
typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
{
if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
netif_carrier_off(dev);
else
netif_carrier_on(dev);
}
static inline void
typhoon_hello(struct typhoon *tp)
{
struct basic_ring *ring = &tp->cmdRing;
struct cmd_desc *cmd;
/* We only get a hello request if we've not sent anything to the
* card in a long while. If the lock is held, then we're in the
* process of issuing a command, so we don't need to respond.
*/
if(spin_trylock(&tp->command_lock)) {
cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
typhoon_inc_cmd_index(&ring->lastWrite, 1);
INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
wmb();
iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
spin_unlock(&tp->command_lock);
}
}
static int
typhoon_process_response(struct typhoon *tp, int resp_size,
struct resp_desc *resp_save)
{
struct typhoon_indexes *indexes = tp->indexes;
struct resp_desc *resp;
u8 *base = tp->respRing.ringBase;
int count, len, wrap_len;
u32 cleared;
u32 ready;
cleared = le32_to_cpu(indexes->respCleared);
ready = le32_to_cpu(indexes->respReady);
while(cleared != ready) {
resp = (struct resp_desc *)(base + cleared);
count = resp->numDesc + 1;
if(resp_save && resp->seqNo) {
if(count > resp_size) {
resp_save->flags = TYPHOON_RESP_ERROR;
goto cleanup;
}
wrap_len = 0;
len = count * sizeof(*resp);
if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
wrap_len = cleared + len - RESPONSE_RING_SIZE;
len = RESPONSE_RING_SIZE - cleared;
}
memcpy(resp_save, resp, len);
if(unlikely(wrap_len)) {
resp_save += len / sizeof(*resp);
memcpy(resp_save, base, wrap_len);
}
resp_save = NULL;
} else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
typhoon_media_status(tp->dev, resp);
} else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
typhoon_hello(tp);
} else {
netdev_err(tp->dev,
"dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
le16_to_cpu(resp->cmd),
resp->numDesc, resp->flags,
le16_to_cpu(resp->parm1),
le32_to_cpu(resp->parm2),
le32_to_cpu(resp->parm3));
}
cleanup:
typhoon_inc_resp_index(&cleared, count);
}
indexes->respCleared = cpu_to_le32(cleared);
wmb();
return resp_save == NULL;
}
static inline int
typhoon_num_free(int lastWrite, int lastRead, int ringSize)
{
/* this works for all descriptors but rx_desc, as they are a
* different size than the cmd_desc -- everyone else is the same
*/
lastWrite /= sizeof(struct cmd_desc);
lastRead /= sizeof(struct cmd_desc);
return (ringSize + lastRead - lastWrite - 1) % ringSize;
}
static inline int
typhoon_num_free_cmd(struct typhoon *tp)
{
int lastWrite = tp->cmdRing.lastWrite;
int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
}
static inline int
typhoon_num_free_resp(struct typhoon *tp)
{
int respReady = le32_to_cpu(tp->indexes->respReady);
int respCleared = le32_to_cpu(tp->indexes->respCleared);
return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
}
static inline int
typhoon_num_free_tx(struct transmit_ring *ring)
{
/* if we start using the Hi Tx ring, this needs updating */
return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
}
static int
typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
int num_resp, struct resp_desc *resp)
{
struct typhoon_indexes *indexes = tp->indexes;
struct basic_ring *ring = &tp->cmdRing;
struct resp_desc local_resp;
int i, err = 0;
int got_resp;
int freeCmd, freeResp;
int len, wrap_len;
spin_lock(&tp->command_lock);
freeCmd = typhoon_num_free_cmd(tp);
freeResp = typhoon_num_free_resp(tp);
if(freeCmd < num_cmd || freeResp < num_resp) {
netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
freeCmd, num_cmd, freeResp, num_resp);
err = -ENOMEM;
goto out;
}
if(cmd->flags & TYPHOON_CMD_RESPOND) {
/* If we're expecting a response, but the caller hasn't given
* us a place to put it, we'll provide one.
*/
tp->awaiting_resp = 1;
if(resp == NULL) {
resp = &local_resp;
num_resp = 1;
}
}
wrap_len = 0;
len = num_cmd * sizeof(*cmd);
if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
len = COMMAND_RING_SIZE - ring->lastWrite;
}
memcpy(ring->ringBase + ring->lastWrite, cmd, len);
if(unlikely(wrap_len)) {
struct cmd_desc *wrap_ptr = cmd;
wrap_ptr += len / sizeof(*cmd);
memcpy(ring->ringBase, wrap_ptr, wrap_len);
}
typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
/* "I feel a presence... another warrior is on the mesa."
*/
wmb();
iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
typhoon_post_pci_writes(tp->ioaddr);
if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
goto out;
/* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
* preempt or do anything other than take interrupts. So, don't
* wait for a response unless you have to.
*
* I've thought about trying to sleep here, but we're called
* from many contexts that don't allow that. Also, given the way
* 3Com has implemented irq coalescing, we would likely timeout --
* this has been observed in real life!
*
* The big killer is we have to wait to get stats from the card,
* though we could go to a periodic refresh of those if we don't
* mind them getting somewhat stale. The rest of the waiting
* commands occur during open/close/suspend/resume, so they aren't
* time critical. Creating SAs in the future will also have to
* wait here.
*/
got_resp = 0;
for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
if(indexes->respCleared != indexes->respReady)
got_resp = typhoon_process_response(tp, num_resp,
resp);
udelay(TYPHOON_UDELAY);
}
if(!got_resp) {
err = -ETIMEDOUT;
goto out;
}
/* Collect the error response even if we don't care about the
* rest of the response
*/
if(resp->flags & TYPHOON_RESP_ERROR)
err = -EIO;
out:
if(tp->awaiting_resp) {
tp->awaiting_resp = 0;
smp_wmb();
/* Ugh. If a response was added to the ring between
* the call to typhoon_process_response() and the clearing
* of tp->awaiting_resp, we could have missed the interrupt
* and it could hang in the ring an indeterminate amount of
* time. So, check for it, and interrupt ourselves if this
* is the case.
*/
if(indexes->respCleared != indexes->respReady)
iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
}
spin_unlock(&tp->command_lock);
return err;
}
static inline void
typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
u32 ring_dma)
{
struct tcpopt_desc *tcpd;
u32 tcpd_offset = ring_dma;
tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
tcpd_offset += txRing->lastWrite;
tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
typhoon_inc_tx_index(&txRing->lastWrite, 1);
tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
tcpd->numDesc = 1;
tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
tcpd->bytesTx = cpu_to_le32(skb->len);
tcpd->status = 0;
}
static netdev_tx_t
typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct typhoon *tp = netdev_priv(dev);
struct transmit_ring *txRing;
struct tx_desc *txd, *first_txd;
dma_addr_t skb_dma;
int numDesc;
/* we have two rings to choose from, but we only use txLo for now
* If we start using the Hi ring as well, we'll need to update
* typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
* and TXHI_ENTRIES to match, as well as update the TSO code below
* to get the right DMA address
*/
txRing = &tp->txLoRing;
/* We need one descriptor for each fragment of the sk_buff, plus the
* one for the ->data area of it.
*
* The docs say a maximum of 16 fragment descriptors per TCP option
* descriptor, then make a new packet descriptor and option descriptor
* for the next 16 fragments. The engineers say just an option
* descriptor is needed. I've tested up to 26 fragments with a single
* packet descriptor/option descriptor combo, so I use that for now.
*
* If problems develop with TSO, check this first.
*/
numDesc = skb_shinfo(skb)->nr_frags + 1;
if (skb_is_gso(skb))
numDesc++;
/* When checking for free space in the ring, we need to also
* account for the initial Tx descriptor, and we always must leave
* at least one descriptor unused in the ring so that it doesn't
* wrap and look empty.
*
* The only time we should loop here is when we hit the race
* between marking the queue awake and updating the cleared index.
* Just loop and it will appear. This comes from the acenic driver.
*/
while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
smp_rmb();
first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
typhoon_inc_tx_index(&txRing->lastWrite, 1);
first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
first_txd->numDesc = 0;
first_txd->len = 0;
first_txd->tx_addr = (u64)((unsigned long) skb);
first_txd->processFlags = 0;
if(skb->ip_summed == CHECKSUM_PARTIAL) {
/* The 3XP will figure out if this is UDP/TCP */
first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
}
if(vlan_tx_tag_present(skb)) {
first_txd->processFlags |=
TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
first_txd->processFlags |=
cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
TYPHOON_TX_PF_VLAN_TAG_SHIFT);
}
if (skb_is_gso(skb)) {
first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
first_txd->numDesc++;
typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
}
txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
typhoon_inc_tx_index(&txRing->lastWrite, 1);
/* No need to worry about padding packet -- the firmware pads
* it with zeros to ETH_ZLEN for us.
*/
if(skb_shinfo(skb)->nr_frags == 0) {
skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
txd->len = cpu_to_le16(skb->len);
txd->frag.addr = cpu_to_le32(skb_dma);
txd->frag.addrHi = 0;
first_txd->numDesc++;
} else {
int i, len;
len = skb_headlen(skb);
skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
PCI_DMA_TODEVICE);
txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
txd->len = cpu_to_le16(len);
txd->frag.addr = cpu_to_le32(skb_dma);
txd->frag.addrHi = 0;
first_txd->numDesc++;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *frag_addr;
txd = (struct tx_desc *) (txRing->ringBase +
txRing->lastWrite);
typhoon_inc_tx_index(&txRing->lastWrite, 1);
len = skb_frag_size(frag);
frag_addr = skb_frag_address(frag);
skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
PCI_DMA_TODEVICE);
txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
txd->len = cpu_to_le16(len);
txd->frag.addr = cpu_to_le32(skb_dma);
txd->frag.addrHi = 0;
first_txd->numDesc++;
}
}
/* Kick the 3XP
*/
wmb();
iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
/* If we don't have room to put the worst case packet on the
* queue, then we must stop the queue. We need 2 extra
* descriptors -- one to prevent ring wrap, and one for the
* Tx header.
*/
numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
netif_stop_queue(dev);
/* A Tx complete IRQ could have gotten between, making
* the ring free again. Only need to recheck here, since
* Tx is serialized.
*/
if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
netif_wake_queue(dev);
}
return NETDEV_TX_OK;
}
static void
typhoon_set_rx_mode(struct net_device *dev)
{
struct typhoon *tp = netdev_priv(dev);
struct cmd_desc xp_cmd;
u32 mc_filter[2];
__le16 filter;
filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
if(dev->flags & IFF_PROMISC) {
filter |= TYPHOON_RX_FILTER_PROMISCOUS;
} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
filter |= TYPHOON_RX_FILTER_ALL_MCAST;
} else if (!netdev_mc_empty(dev)) {
struct netdev_hw_addr *ha;
memset(mc_filter, 0, sizeof(mc_filter));
netdev_for_each_mc_addr(ha, dev) {
int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
}
INIT_COMMAND_NO_RESPONSE(&xp_cmd,
TYPHOON_CMD_SET_MULTICAST_HASH);
xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
filter |= TYPHOON_RX_FILTER_MCAST_HASH;
}
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
xp_cmd.parm1 = filter;
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
}
static int
typhoon_do_get_stats(struct typhoon *tp)
{
struct net_device_stats *stats = &tp->stats;
struct net_device_stats *saved = &tp->stats_saved;
struct cmd_desc xp_cmd;
struct resp_desc xp_resp[7];
struct stats_resp *s = (struct stats_resp *) xp_resp;
int err;
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
if(err < 0)
return err;
/* 3Com's Linux driver uses txMultipleCollisions as it's
* collisions value, but there is some other collision info as well...
*
* The extra status reported would be a good candidate for
* ethtool_ops->get_{strings,stats}()
*/
stats->tx_packets = le32_to_cpu(s->txPackets) +
saved->tx_packets;
stats->tx_bytes = le64_to_cpu(s->txBytes) +
saved->tx_bytes;
stats->tx_errors = le32_to_cpu(s->txCarrierLost) +
saved->tx_errors;
stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) +
saved->tx_carrier_errors;
stats->collisions = le32_to_cpu(s->txMultipleCollisions) +
saved->collisions;
stats->rx_packets = le32_to_cpu(s->rxPacketsGood) +
saved->rx_packets;
stats->rx_bytes = le64_to_cpu(s->rxBytesGood) +
saved->rx_bytes;
stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) +
saved->rx_fifo_errors;
stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) +
saved->rx_errors;
stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) +
saved->rx_crc_errors;
stats->rx_length_errors = le32_to_cpu(s->rxOversized) +
saved->rx_length_errors;
tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
SPEED_100 : SPEED_10;
tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
DUPLEX_FULL : DUPLEX_HALF;
return 0;
}
static struct net_device_stats *
typhoon_get_stats(struct net_device *dev)
{
struct typhoon *tp = netdev_priv(dev);
struct net_device_stats *stats = &tp->stats;
struct net_device_stats *saved = &tp->stats_saved;
smp_rmb();
if(tp->card_state == Sleeping)
return saved;
if(typhoon_do_get_stats(tp) < 0) {
netdev_err(dev, "error getting stats\n");
return saved;
}
return stats;
}
static void
typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct typhoon *tp = netdev_priv(dev);
struct pci_dev *pci_dev = tp->pdev;
struct cmd_desc xp_cmd;
struct resp_desc xp_resp[3];
smp_rmb();
if(tp->card_state == Sleeping) {
strlcpy(info->fw_version, "Sleep image",
sizeof(info->fw_version));
} else {
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
strlcpy(info->fw_version, "Unknown runtime",
sizeof(info->fw_version));
} else {
u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
snprintf(info->fw_version, sizeof(info->fw_version),
"%02x.%03x.%03x", sleep_ver >> 24,
(sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff);
}
}
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int
typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct typhoon *tp = netdev_priv(dev);
cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg;
switch (tp->xcvr_select) {
case TYPHOON_XCVR_10HALF:
cmd->advertising = ADVERTISED_10baseT_Half;
break;
case TYPHOON_XCVR_10FULL:
cmd->advertising = ADVERTISED_10baseT_Full;
break;
case TYPHOON_XCVR_100HALF:
cmd->advertising = ADVERTISED_100baseT_Half;
break;
case TYPHOON_XCVR_100FULL:
cmd->advertising = ADVERTISED_100baseT_Full;
break;
case TYPHOON_XCVR_AUTONEG:
cmd->advertising = ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_Autoneg;
break;
}
if(tp->capabilities & TYPHOON_FIBER) {
cmd->supported |= SUPPORTED_FIBRE;
cmd->advertising |= ADVERTISED_FIBRE;
cmd->port = PORT_FIBRE;
} else {
cmd->supported |= SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_TP;
cmd->advertising |= ADVERTISED_TP;
cmd->port = PORT_TP;
}
/* need to get stats to make these link speed/duplex valid */
typhoon_do_get_stats(tp);
ethtool_cmd_speed_set(cmd, tp->speed);
cmd->duplex = tp->duplex;
cmd->phy_address = 0;
cmd->transceiver = XCVR_INTERNAL;
if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
cmd->autoneg = AUTONEG_ENABLE;
else
cmd->autoneg = AUTONEG_DISABLE;
cmd->maxtxpkt = 1;
cmd->maxrxpkt = 1;
return 0;
}
static int
typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct typhoon *tp = netdev_priv(dev);
u32 speed = ethtool_cmd_speed(cmd);
struct cmd_desc xp_cmd;
__le16 xcvr;
int err;
err = -EINVAL;
if (cmd->autoneg == AUTONEG_ENABLE) {
xcvr = TYPHOON_XCVR_AUTONEG;
} else {
if (cmd->duplex == DUPLEX_HALF) {
if (speed == SPEED_10)
xcvr = TYPHOON_XCVR_10HALF;
else if (speed == SPEED_100)
xcvr = TYPHOON_XCVR_100HALF;
else
goto out;
} else if (cmd->duplex == DUPLEX_FULL) {
if (speed == SPEED_10)
xcvr = TYPHOON_XCVR_10FULL;
else if (speed == SPEED_100)
xcvr = TYPHOON_XCVR_100FULL;
else
goto out;
} else
goto out;
}
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
xp_cmd.parm1 = xcvr;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0)
goto out;
tp->xcvr_select = xcvr;
if(cmd->autoneg == AUTONEG_ENABLE) {
tp->speed = 0xff; /* invalid */
tp->duplex = 0xff; /* invalid */
} else {
tp->speed = speed;
tp->duplex = cmd->duplex;
}
out:
return err;
}
static void
typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct typhoon *tp = netdev_priv(dev);
wol->supported = WAKE_PHY | WAKE_MAGIC;
wol->wolopts = 0;
if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
wol->wolopts |= WAKE_PHY;
if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
wol->wolopts |= WAKE_MAGIC;
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
static int
typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct typhoon *tp = netdev_priv(dev);
if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
return -EINVAL;
tp->wol_events = 0;
if(wol->wolopts & WAKE_PHY)
tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
if(wol->wolopts & WAKE_MAGIC)
tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
return 0;
}
static void
typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
ering->rx_max_pending = RXENT_ENTRIES;
ering->tx_max_pending = TXLO_ENTRIES - 1;
ering->rx_pending = RXENT_ENTRIES;
ering->tx_pending = TXLO_ENTRIES - 1;
}
static const struct ethtool_ops typhoon_ethtool_ops = {
.get_settings = typhoon_get_settings,
.set_settings = typhoon_set_settings,
.get_drvinfo = typhoon_get_drvinfo,
.get_wol = typhoon_get_wol,
.set_wol = typhoon_set_wol,
.get_link = ethtool_op_get_link,
.get_ringparam = typhoon_get_ringparam,
};
static int
typhoon_wait_interrupt(void __iomem *ioaddr)
{
int i, err = 0;
for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
TYPHOON_INTR_BOOTCMD)
goto out;
udelay(TYPHOON_UDELAY);
}
err = -ETIMEDOUT;
out:
iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
return err;
}
#define shared_offset(x) offsetof(struct typhoon_shared, x)
static void
typhoon_init_interface(struct typhoon *tp)
{
struct typhoon_interface *iface = &tp->shared->iface;
dma_addr_t shared_dma;
memset(tp->shared, 0, sizeof(struct typhoon_shared));
/* The *Hi members of iface are all init'd to zero by the memset().
*/
shared_dma = tp->shared_dma + shared_offset(indexes);
iface->ringIndex = cpu_to_le32(shared_dma);
shared_dma = tp->shared_dma + shared_offset(txLo);
iface->txLoAddr = cpu_to_le32(shared_dma);
iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
shared_dma = tp->shared_dma + shared_offset(txHi);
iface->txHiAddr = cpu_to_le32(shared_dma);
iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
shared_dma = tp->shared_dma + shared_offset(rxBuff);
iface->rxBuffAddr = cpu_to_le32(shared_dma);
iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
sizeof(struct rx_free));
shared_dma = tp->shared_dma + shared_offset(rxLo);
iface->rxLoAddr = cpu_to_le32(shared_dma);
iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
shared_dma = tp->shared_dma + shared_offset(rxHi);
iface->rxHiAddr = cpu_to_le32(shared_dma);
iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
shared_dma = tp->shared_dma + shared_offset(cmd);
iface->cmdAddr = cpu_to_le32(shared_dma);
iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
shared_dma = tp->shared_dma + shared_offset(resp);
iface->respAddr = cpu_to_le32(shared_dma);
iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
shared_dma = tp->shared_dma + shared_offset(zeroWord);
iface->zeroAddr = cpu_to_le32(shared_dma);
tp->indexes = &tp->shared->indexes;
tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
tp->respRing.ringBase = (u8 *) tp->shared->resp;
tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
tp->card_state = Sleeping;
tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
tp->offload |= TYPHOON_OFFLOAD_VLAN;
spin_lock_init(&tp->command_lock);
/* Force the writes to the shared memory area out before continuing. */
wmb();
}
static void
typhoon_init_rings(struct typhoon *tp)
{
memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
tp->txLoRing.lastWrite = 0;
tp->txHiRing.lastWrite = 0;
tp->rxLoRing.lastWrite = 0;
tp->rxHiRing.lastWrite = 0;
tp->rxBuffRing.lastWrite = 0;
tp->cmdRing.lastWrite = 0;
tp->respRing.lastWrite = 0;
tp->txLoRing.lastRead = 0;
tp->txHiRing.lastRead = 0;
}
static const struct firmware *typhoon_fw;
static int
typhoon_request_firmware(struct typhoon *tp)
{
const struct typhoon_file_header *fHdr;
const struct typhoon_section_header *sHdr;
const u8 *image_data;
u32 numSections;
u32 section_len;
u32 remaining;
int err;
if (typhoon_fw)
return 0;
err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
if (err) {
netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
FIRMWARE_NAME);
return err;
}
image_data = (u8 *) typhoon_fw->data;
remaining = typhoon_fw->size;
if (remaining < sizeof(struct typhoon_file_header))
goto invalid_fw;
fHdr = (struct typhoon_file_header *) image_data;
if (memcmp(fHdr->tag, "TYPHOON", 8))
goto invalid_fw;
numSections = le32_to_cpu(fHdr->numSections);
image_data += sizeof(struct typhoon_file_header);
remaining -= sizeof(struct typhoon_file_header);
while (numSections--) {
if (remaining < sizeof(struct typhoon_section_header))
goto invalid_fw;
sHdr = (struct typhoon_section_header *) image_data;
image_data += sizeof(struct typhoon_section_header);
section_len = le32_to_cpu(sHdr->len);
if (remaining < section_len)
goto invalid_fw;
image_data += section_len;
remaining -= section_len;
}
return 0;
invalid_fw:
netdev_err(tp->dev, "Invalid firmware image\n");
release_firmware(typhoon_fw);
typhoon_fw = NULL;
return -EINVAL;
}
static int
typhoon_download_firmware(struct typhoon *tp)
{
void __iomem *ioaddr = tp->ioaddr;
struct pci_dev *pdev = tp->pdev;
const struct typhoon_file_header *fHdr;
const struct typhoon_section_header *sHdr;
const u8 *image_data;
void *dpage;
dma_addr_t dpage_dma;
__sum16 csum;
u32 irqEnabled;
u32 irqMasked;
u32 numSections;
u32 section_len;
u32 len;
u32 load_addr;
u32 hmac;
int i;
int err;
image_data = (u8 *) typhoon_fw->data;
fHdr = (struct typhoon_file_header *) image_data;
/* Cannot just map the firmware image using pci_map_single() as
* the firmware is vmalloc()'d and may not be physically contiguous,
* so we allocate some consistent memory to copy the sections into.
*/
err = -ENOMEM;
dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
if(!dpage) {
netdev_err(tp->dev, "no DMA mem for firmware\n");
goto err_out;
}
irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
ioaddr + TYPHOON_REG_INTR_ENABLE);
irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
ioaddr + TYPHOON_REG_INTR_MASK);
err = -ETIMEDOUT;
if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
netdev_err(tp->dev, "card ready timeout\n");
goto err_out_irq;
}
numSections = le32_to_cpu(fHdr->numSections);
load_addr = le32_to_cpu(fHdr->startAddr);
iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
hmac = le32_to_cpu(fHdr->hmacDigest[0]);
iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
hmac = le32_to_cpu(fHdr->hmacDigest[1]);
iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
hmac = le32_to_cpu(fHdr->hmacDigest[2]);
iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
hmac = le32_to_cpu(fHdr->hmacDigest[3]);
iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
hmac = le32_to_cpu(fHdr->hmacDigest[4]);
iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
typhoon_post_pci_writes(ioaddr);
iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
image_data += sizeof(struct typhoon_file_header);
/* The ioread32() in typhoon_wait_interrupt() will force the
* last write to the command register to post, so
* we don't need a typhoon_post_pci_writes() after it.
*/
for(i = 0; i < numSections; i++) {
sHdr = (struct typhoon_section_header *) image_data;
image_data += sizeof(struct typhoon_section_header);
load_addr = le32_to_cpu(sHdr->startAddr);
section_len = le32_to_cpu(sHdr->len);
while(section_len) {
len = min_t(u32, section_len, PAGE_SIZE);
if(typhoon_wait_interrupt(ioaddr) < 0 ||
ioread32(ioaddr + TYPHOON_REG_STATUS) !=
TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
netdev_err(tp->dev, "segment ready timeout\n");
goto err_out_irq;
}
/* Do an pseudo IPv4 checksum on the data -- first
* need to convert each u16 to cpu order before
* summing. Fortunately, due to the properties of
* the checksum, we can do this once, at the end.
*/
csum = csum_fold(csum_partial_copy_nocheck(image_data,
dpage, len,
0));
iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
iowrite32(le16_to_cpu((__force __le16)csum),
ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
iowrite32(load_addr,
ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
typhoon_post_pci_writes(ioaddr);
iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
ioaddr + TYPHOON_REG_COMMAND);
image_data += len;
load_addr += len;
section_len -= len;
}
}
if(typhoon_wait_interrupt(ioaddr) < 0 ||
ioread32(ioaddr + TYPHOON_REG_STATUS) !=
TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
netdev_err(tp->dev, "final segment ready timeout\n");
goto err_out_irq;
}
iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
ioread32(ioaddr + TYPHOON_REG_STATUS));
goto err_out_irq;
}
err = 0;
err_out_irq:
iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
err_out:
return err;
}
static int
typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
{
void __iomem *ioaddr = tp->ioaddr;
if(typhoon_wait_status(ioaddr, initial_status) < 0) {
netdev_err(tp->dev, "boot ready timeout\n");
goto out_timeout;
}
iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
typhoon_post_pci_writes(ioaddr);
iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
ioaddr + TYPHOON_REG_COMMAND);
if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
ioread32(ioaddr + TYPHOON_REG_STATUS));
goto out_timeout;
}
/* Clear the Transmit and Command ready registers
*/
iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
typhoon_post_pci_writes(ioaddr);
iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
return 0;
out_timeout:
return -ETIMEDOUT;
}
static u32
typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
volatile __le32 * index)
{
u32 lastRead = txRing->lastRead;
struct tx_desc *tx;
dma_addr_t skb_dma;
int dma_len;
int type;
while(lastRead != le32_to_cpu(*index)) {
tx = (struct tx_desc *) (txRing->ringBase + lastRead);
type = tx->flags & TYPHOON_TYPE_MASK;
if(type == TYPHOON_TX_DESC) {
/* This tx_desc describes a packet.
*/
unsigned long ptr = tx->tx_addr;
struct sk_buff *skb = (struct sk_buff *) ptr;
dev_kfree_skb_irq(skb);
} else if(type == TYPHOON_FRAG_DESC) {
/* This tx_desc describes a memory mapping. Free it.
*/
skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
dma_len = le16_to_cpu(tx->len);
pci_unmap_single(tp->pdev, skb_dma, dma_len,
PCI_DMA_TODEVICE);
}
tx->flags = 0;
typhoon_inc_tx_index(&lastRead, 1);
}
return lastRead;
}
static void
typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
volatile __le32 * index)
{
u32 lastRead;
int numDesc = MAX_SKB_FRAGS + 1;
/* This will need changing if we start to use the Hi Tx ring. */
lastRead = typhoon_clean_tx(tp, txRing, index);
if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
lastRead, TXLO_ENTRIES) > (numDesc + 2))
netif_wake_queue(tp->dev);
txRing->lastRead = lastRead;
smp_wmb();
}
static void
typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
{
struct typhoon_indexes *indexes = tp->indexes;
struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
struct basic_ring *ring = &tp->rxBuffRing;
struct rx_free *r;
if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
le32_to_cpu(indexes->rxBuffCleared)) {
/* no room in ring, just drop the skb
*/
dev_kfree_skb_any(rxb->skb);
rxb->skb = NULL;
return;
}
r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
typhoon_inc_rxfree_index(&ring->lastWrite, 1);
r->virtAddr = idx;
r->physAddr = cpu_to_le32(rxb->dma_addr);
/* Tell the card about it */
wmb();
indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
}
static int
typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
{
struct typhoon_indexes *indexes = tp->indexes;
struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
struct basic_ring *ring = &tp->rxBuffRing;
struct rx_free *r;
struct sk_buff *skb;
dma_addr_t dma_addr;
rxb->skb = NULL;
if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
le32_to_cpu(indexes->rxBuffCleared))
return -ENOMEM;
skb = netdev_alloc_skb(tp->dev, PKT_BUF_SZ);
if(!skb)
return -ENOMEM;
#if 0
/* Please, 3com, fix the firmware to allow DMA to a unaligned
* address! Pretty please?
*/
skb_reserve(skb, 2);
#endif
dma_addr = pci_map_single(tp->pdev, skb->data,
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
/* Since no card does 64 bit DAC, the high bits will never
* change from zero.
*/
r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
typhoon_inc_rxfree_index(&ring->lastWrite, 1);
r->virtAddr = idx;
r->physAddr = cpu_to_le32(dma_addr);
rxb->skb = skb;
rxb->dma_addr = dma_addr;
/* Tell the card about it */
wmb();
indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
return 0;
}
static int
typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
volatile __le32 * cleared, int budget)
{
struct rx_desc *rx;
struct sk_buff *skb, *new_skb;
struct rxbuff_ent *rxb;
dma_addr_t dma_addr;
u32 local_ready;
u32 rxaddr;
int pkt_len;
u32 idx;
__le32 csum_bits;
int received;
received = 0;
local_ready = le32_to_cpu(*ready);
rxaddr = le32_to_cpu(*cleared);
while(rxaddr != local_ready && budget > 0) {
rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
idx = rx->addr;
rxb = &tp->rxbuffers[idx];
skb = rxb->skb;
dma_addr = rxb->dma_addr;
typhoon_inc_rx_index(&rxaddr, 1);
if(rx->flags & TYPHOON_RX_ERROR) {
typhoon_recycle_rx_skb(tp, idx);
continue;
}
pkt_len = le16_to_cpu(rx->frameLen);
if(pkt_len < rx_copybreak &&
(new_skb = netdev_alloc_skb(tp->dev, pkt_len + 2)) != NULL) {
skb_reserve(new_skb, 2);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
pci_dma_sync_single_for_device(tp->pdev, dma_addr,
PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
skb_put(new_skb, pkt_len);
typhoon_recycle_rx_skb(tp, idx);
} else {
new_skb = skb;
skb_put(new_skb, pkt_len);
pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
typhoon_alloc_rx_skb(tp, idx);
}
new_skb->protocol = eth_type_trans(new_skb, tp->dev);
csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
if(csum_bits ==
(TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
csum_bits ==
(TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
new_skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
skb_checksum_none_assert(new_skb);
if (rx->rxStatus & TYPHOON_RX_VLAN)
__vlan_hwaccel_put_tag(new_skb,
ntohl(rx->vlanTag) & 0xffff);
netif_receive_skb(new_skb);
received++;
budget--;
}
*cleared = cpu_to_le32(rxaddr);
return received;
}
static void
typhoon_fill_free_ring(struct typhoon *tp)
{
u32 i;
for(i = 0; i < RXENT_ENTRIES; i++) {
struct rxbuff_ent *rxb = &tp->rxbuffers[i];
if(rxb->skb)
continue;
if(typhoon_alloc_rx_skb(tp, i) < 0)
break;
}
}
static int
typhoon_poll(struct napi_struct *napi, int budget)
{
struct typhoon *tp = container_of(napi, struct typhoon, napi);
struct typhoon_indexes *indexes = tp->indexes;
int work_done;
rmb();
if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
typhoon_process_response(tp, 0, NULL);
if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
work_done = 0;
if(indexes->rxHiCleared != indexes->rxHiReady) {
work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
&indexes->rxHiCleared, budget);
}
if(indexes->rxLoCleared != indexes->rxLoReady) {
work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
&indexes->rxLoCleared, budget - work_done);
}
if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
/* rxBuff ring is empty, try to fill it. */
typhoon_fill_free_ring(tp);
}
if (work_done < budget) {
napi_complete(napi);
iowrite32(TYPHOON_INTR_NONE,
tp->ioaddr + TYPHOON_REG_INTR_MASK);
typhoon_post_pci_writes(tp->ioaddr);
}
return work_done;
}
static irqreturn_t
typhoon_interrupt(int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct typhoon *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->ioaddr;
u32 intr_status;
intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
if(!(intr_status & TYPHOON_INTR_HOST_INT))
return IRQ_NONE;
iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
if (napi_schedule_prep(&tp->napi)) {
iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
typhoon_post_pci_writes(ioaddr);
__napi_schedule(&tp->napi);
} else {
netdev_err(dev, "Error, poll already scheduled\n");
}
return IRQ_HANDLED;
}
static void
typhoon_free_rx_rings(struct typhoon *tp)
{
u32 i;
for(i = 0; i < RXENT_ENTRIES; i++) {
struct rxbuff_ent *rxb = &tp->rxbuffers[i];
if(rxb->skb) {
pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(rxb->skb);
rxb->skb = NULL;
}
}
}
static int
typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
{
struct pci_dev *pdev = tp->pdev;
void __iomem *ioaddr = tp->ioaddr;
struct cmd_desc xp_cmd;
int err;
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
xp_cmd.parm1 = events;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0) {
netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
err);
return err;
}
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0) {
netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
return err;
}
if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
return -ETIMEDOUT;
/* Since we cannot monitor the status of the link while sleeping,
* tell the world it went away.
*/
netif_carrier_off(tp->dev);
pci_enable_wake(tp->pdev, state, 1);
pci_disable_device(pdev);
return pci_set_power_state(pdev, state);
}
static int
typhoon_wakeup(struct typhoon *tp, int wait_type)
{
struct pci_dev *pdev = tp->pdev;
void __iomem *ioaddr = tp->ioaddr;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/* Post 2.x.x versions of the Sleep Image require a reset before
* we can download the Runtime Image. But let's not make users of
* the old firmware pay for the reset.
*/
iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
(tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
return typhoon_reset(ioaddr, wait_type);
return 0;
}
static int
typhoon_start_runtime(struct typhoon *tp)
{
struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->ioaddr;
struct cmd_desc xp_cmd;
int err;
typhoon_init_rings(tp);
typhoon_fill_free_ring(tp);
err = typhoon_download_firmware(tp);
if(err < 0) {
netdev_err(tp->dev, "cannot load runtime on 3XP\n");
goto error_out;
}
if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
netdev_err(tp->dev, "cannot boot 3XP\n");
err = -EIO;
goto error_out;
}
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0)
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0)
goto error_out;
/* Disable IRQ coalescing -- we can reenable it when 3Com gives
* us some more information on how to control it.
*/
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
xp_cmd.parm1 = 0;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0)
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
xp_cmd.parm1 = tp->xcvr_select;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0)
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0)
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
xp_cmd.parm2 = tp->offload;
xp_cmd.parm3 = tp->offload;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0)
goto error_out;
typhoon_set_rx_mode(dev);
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0)
goto error_out;
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0)
goto error_out;
tp->card_state = Running;
smp_wmb();
iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
typhoon_post_pci_writes(ioaddr);
return 0;
error_out:
typhoon_reset(ioaddr, WaitNoSleep);
typhoon_free_rx_rings(tp);
typhoon_init_rings(tp);
return err;
}
static int
typhoon_stop_runtime(struct typhoon *tp, int wait_type)
{
struct typhoon_indexes *indexes = tp->indexes;
struct transmit_ring *txLo = &tp->txLoRing;
void __iomem *ioaddr = tp->ioaddr;
struct cmd_desc xp_cmd;
int i;
/* Disable interrupts early, since we can't schedule a poll
* when called with !netif_running(). This will be posted
* when we force the posting of the command.
*/
iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
/* Wait 1/2 sec for any outstanding transmits to occur
* We'll cleanup after the reset if this times out.
*/
for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
break;
udelay(TYPHOON_UDELAY);
}
if(i == TYPHOON_WAIT_TIMEOUT)
netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
/* save the statistics so when we bring the interface up again,
* the values reported to userspace are correct.
*/
tp->card_state = Sleeping;
smp_wmb();
typhoon_do_get_stats(tp);
memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
if(typhoon_reset(ioaddr, wait_type) < 0) {
netdev_err(tp->dev, "unable to reset 3XP\n");
return -ETIMEDOUT;
}
/* cleanup any outstanding Tx packets */
if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
}
return 0;
}
static void
typhoon_tx_timeout(struct net_device *dev)
{
struct typhoon *tp = netdev_priv(dev);
if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
netdev_warn(dev, "could not reset in tx timeout\n");
goto truly_dead;
}
/* If we ever start using the Hi ring, it will need cleaning too */
typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
typhoon_free_rx_rings(tp);
if(typhoon_start_runtime(tp) < 0) {
netdev_err(dev, "could not start runtime in tx timeout\n");
goto truly_dead;
}
netif_wake_queue(dev);
return;
truly_dead:
/* Reset the hardware, and turn off carrier to avoid more timeouts */
typhoon_reset(tp->ioaddr, NoWait);
netif_carrier_off(dev);
}
static int
typhoon_open(struct net_device *dev)
{
struct typhoon *tp = netdev_priv(dev);
int err;
err = typhoon_request_firmware(tp);
if (err)
goto out;
err = typhoon_wakeup(tp, WaitSleep);
if(err < 0) {
netdev_err(dev, "unable to wakeup device\n");
goto out_sleep;
}
err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
dev->name, dev);
if(err < 0)
goto out_sleep;
napi_enable(&tp->napi);
err = typhoon_start_runtime(tp);
if(err < 0) {
napi_disable(&tp->napi);
goto out_irq;
}
netif_start_queue(dev);
return 0;
out_irq:
free_irq(dev->irq, dev);
out_sleep:
if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
netdev_err(dev, "unable to reboot into sleep img\n");
typhoon_reset(tp->ioaddr, NoWait);
goto out;
}
if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
netdev_err(dev, "unable to go back to sleep\n");
out:
return err;
}
static int
typhoon_close(struct net_device *dev)
{
struct typhoon *tp = netdev_priv(dev);
netif_stop_queue(dev);
napi_disable(&tp->napi);
if(typhoon_stop_runtime(tp, WaitSleep) < 0)
netdev_err(dev, "unable to stop runtime\n");
/* Make sure there is no irq handler running on a different CPU. */
free_irq(dev->irq, dev);
typhoon_free_rx_rings(tp);
typhoon_init_rings(tp);
if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
netdev_err(dev, "unable to boot sleep image\n");
if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
netdev_err(dev, "unable to put card to sleep\n");
return 0;
}
#ifdef CONFIG_PM
static int
typhoon_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct typhoon *tp = netdev_priv(dev);
/* If we're down, resume when we are upped.
*/
if(!netif_running(dev))
return 0;
if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
netdev_err(dev, "critical: could not wake up in resume\n");
goto reset;
}
if(typhoon_start_runtime(tp) < 0) {
netdev_err(dev, "critical: could not start runtime in resume\n");
goto reset;
}
netif_device_attach(dev);
return 0;
reset:
typhoon_reset(tp->ioaddr, NoWait);
return -EBUSY;
}
static int
typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct typhoon *tp = netdev_priv(dev);
struct cmd_desc xp_cmd;
/* If we're down, we're already suspended.
*/
if(!netif_running(dev))
return 0;
/* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */
if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
netif_device_detach(dev);
if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
netdev_err(dev, "unable to stop runtime\n");
goto need_resume;
}
typhoon_free_rx_rings(tp);
typhoon_init_rings(tp);
if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
netdev_err(dev, "unable to boot sleep image\n");
goto need_resume;
}
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
netdev_err(dev, "unable to set mac address in suspend\n");
goto need_resume;
}
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
netdev_err(dev, "unable to set rx filter in suspend\n");
goto need_resume;
}
if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
netdev_err(dev, "unable to put card to sleep\n");
goto need_resume;
}
return 0;
need_resume:
typhoon_resume(pdev);
return -EBUSY;
}
#endif
static int __devinit
typhoon_test_mmio(struct pci_dev *pdev)
{
void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
int mode = 0;
u32 val;
if(!ioaddr)
goto out;
if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
TYPHOON_STATUS_WAITING_FOR_HOST)
goto out_unmap;
iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
/* Ok, see if we can change our interrupt status register by
* sending ourselves an interrupt. If so, then MMIO works.
* The 50usec delay is arbitrary -- it could probably be smaller.
*/
val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
if((val & TYPHOON_INTR_SELF) == 0) {
iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
udelay(50);
val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
if(val & TYPHOON_INTR_SELF)
mode = 1;
}
iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
out_unmap:
pci_iounmap(pdev, ioaddr);
out:
if(!mode)
pr_info("%s: falling back to port IO\n", pci_name(pdev));
return mode;
}
static const struct net_device_ops typhoon_netdev_ops = {
.ndo_open = typhoon_open,
.ndo_stop = typhoon_close,
.ndo_start_xmit = typhoon_start_tx,
.ndo_set_rx_mode = typhoon_set_rx_mode,
.ndo_tx_timeout = typhoon_tx_timeout,
.ndo_get_stats = typhoon_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
};
static int __devinit
typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
struct typhoon *tp;
int card_id = (int) ent->driver_data;
void __iomem *ioaddr;
void *shared;
dma_addr_t shared_dma;
struct cmd_desc xp_cmd;
struct resp_desc xp_resp[3];
int err = 0;
const char *err_msg;
dev = alloc_etherdev(sizeof(*tp));
if(dev == NULL) {
err_msg = "unable to alloc new net device";
err = -ENOMEM;
goto error_out;
}
SET_NETDEV_DEV(dev, &pdev->dev);
err = pci_enable_device(pdev);
if(err < 0) {
err_msg = "unable to enable device";
goto error_out_dev;
}
err = pci_set_mwi(pdev);
if(err < 0) {
err_msg = "unable to set MWI";
goto error_out_disable;
}
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if(err < 0) {
err_msg = "No usable DMA configuration";
goto error_out_mwi;
}
/* sanity checks on IO and MMIO BARs
*/
if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
err_msg = "region #1 not a PCI IO resource, aborting";
err = -ENODEV;
goto error_out_mwi;
}
if(pci_resource_len(pdev, 0) < 128) {
err_msg = "Invalid PCI IO region size, aborting";
err = -ENODEV;
goto error_out_mwi;
}
if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
err_msg = "region #1 not a PCI MMIO resource, aborting";
err = -ENODEV;
goto error_out_mwi;
}
if(pci_resource_len(pdev, 1) < 128) {
err_msg = "Invalid PCI MMIO region size, aborting";
err = -ENODEV;
goto error_out_mwi;
}
err = pci_request_regions(pdev, KBUILD_MODNAME);
if(err < 0) {
err_msg = "could not request regions";
goto error_out_mwi;
}
/* map our registers
*/
if(use_mmio != 0 && use_mmio != 1)
use_mmio = typhoon_test_mmio(pdev);
ioaddr = pci_iomap(pdev, use_mmio, 128);
if (!ioaddr) {
err_msg = "cannot remap registers, aborting";
err = -EIO;
goto error_out_regions;
}
/* allocate pci dma space for rx and tx descriptor rings
*/
shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
&shared_dma);
if(!shared) {
err_msg = "could not allocate DMA memory";
err = -ENOMEM;
goto error_out_remap;
}
dev->irq = pdev->irq;
tp = netdev_priv(dev);
tp->shared = shared;
tp->shared_dma = shared_dma;
tp->pdev = pdev;
tp->tx_pdev = pdev;
tp->ioaddr = ioaddr;
tp->tx_ioaddr = ioaddr;
tp->dev = dev;
/* Init sequence:
* 1) Reset the adapter to clear any bad juju
* 2) Reload the sleep image
* 3) Boot the sleep image
* 4) Get the hardware address.
* 5) Put the card to sleep.
*/
if (typhoon_reset(ioaddr, WaitSleep) < 0) {
err_msg = "could not reset 3XP";
err = -EIO;
goto error_out_dma;
}
/* Now that we've reset the 3XP and are sure it's not going to
* write all over memory, enable bus mastering, and save our
* state for resuming after a suspend.
*/
pci_set_master(pdev);
pci_save_state(pdev);
typhoon_init_interface(tp);
typhoon_init_rings(tp);
if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
err_msg = "cannot boot 3XP sleep image";
err = -EIO;
goto error_out_reset;
}
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
err_msg = "cannot read MAC address";
err = -EIO;
goto error_out_reset;
}
*(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
*(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
if(!is_valid_ether_addr(dev->dev_addr)) {
err_msg = "Could not obtain valid ethernet address, aborting";
goto error_out_reset;
}
/* Read the Sleep Image version last, so the response is valid
* later when we print out the version reported.
*/
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
err_msg = "Could not get Sleep Image version";
goto error_out_reset;
}
tp->capabilities = typhoon_card_info[card_id].capabilities;
tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
/* Typhoon 1.0 Sleep Images return one response descriptor to the
* READ_VERSIONS command. Those versions are OK after waking up
* from sleep without needing a reset. Typhoon 1.1+ Sleep Images
* seem to need a little extra help to get started. Since we don't
* know how to nudge it along, just kick it.
*/
if(xp_resp[0].numDesc != 0)
tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
err_msg = "cannot put adapter to sleep";
err = -EIO;
goto error_out_reset;
}
/* The chip-specific entries in the device structure. */
dev->netdev_ops = &typhoon_netdev_ops;
netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
dev->watchdog_timeo = TX_TIMEOUT;
SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
/* We can handle scatter gather, up to 16 entries, and
* we can do IP checksumming (only version 4, doh...)
*
* There's no way to turn off the RX VLAN offloading and stripping
* on the current 3XP firmware -- it does not respect the offload
* settings -- so we only allow the user to toggle the TX processing.
*/
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HW_VLAN_TX;
dev->features = dev->hw_features |
NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM;
if(register_netdev(dev) < 0) {
err_msg = "unable to register netdev";
goto error_out_reset;
}
pci_set_drvdata(pdev, dev);
netdev_info(dev, "%s at %s 0x%llx, %pM\n",
typhoon_card_info[card_id].name,
use_mmio ? "MMIO" : "IO",
(unsigned long long)pci_resource_start(pdev, use_mmio),
dev->dev_addr);
/* xp_resp still contains the response to the READ_VERSIONS command.
* For debugging, let the user know what version he has.
*/
if(xp_resp[0].numDesc == 0) {
/* This is the Typhoon 1.0 type Sleep Image, last 16 bits
* of version is Month/Day of build.
*/
u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
monthday >> 8, monthday & 0xff);
} else if(xp_resp[0].numDesc == 2) {
/* This is the Typhoon 1.1+ type Sleep Image
*/
u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
u8 *ver_string = (u8 *) &xp_resp[1];
ver_string[25] = 0;
netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
sleep_ver & 0xfff, ver_string);
} else {
netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
}
return 0;
error_out_reset:
typhoon_reset(ioaddr, NoWait);
error_out_dma:
pci_free_consistent(pdev, sizeof(struct typhoon_shared),
shared, shared_dma);
error_out_remap:
pci_iounmap(pdev, ioaddr);
error_out_regions:
pci_release_regions(pdev);
error_out_mwi:
pci_clear_mwi(pdev);
error_out_disable:
pci_disable_device(pdev);
error_out_dev:
free_netdev(dev);
error_out:
pr_err("%s: %s\n", pci_name(pdev), err_msg);
return err;
}
static void __devexit
typhoon_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct typhoon *tp = netdev_priv(dev);
unregister_netdev(dev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
typhoon_reset(tp->ioaddr, NoWait);
pci_iounmap(pdev, tp->ioaddr);
pci_free_consistent(pdev, sizeof(struct typhoon_shared),
tp->shared, tp->shared_dma);
pci_release_regions(pdev);
pci_clear_mwi(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(dev);
}
static struct pci_driver typhoon_driver = {
.name = KBUILD_MODNAME,
.id_table = typhoon_pci_tbl,
.probe = typhoon_init_one,
.remove = __devexit_p(typhoon_remove_one),
#ifdef CONFIG_PM
.suspend = typhoon_suspend,
.resume = typhoon_resume,
#endif
};
static int __init
typhoon_init(void)
{
return pci_register_driver(&typhoon_driver);
}
static void __exit
typhoon_cleanup(void)
{
release_firmware(typhoon_fw);
pci_unregister_driver(&typhoon_driver);
}
module_init(typhoon_init);
module_exit(typhoon_cleanup);
| gpl-2.0 |
j-r0dd/motus_kernel | arch/m68k/mac/via.c | 137 | 16788 | /*
* 6522 Versatile Interface Adapter (VIA)
*
* There are two of these on the Mac II. Some IRQs are vectored
* via them as are assorted bits and bobs - eg RTC, ADB.
*
* CSA: Motorola seems to have removed documentation on the 6522 from
* their web site; try
* http://nerini.drf.com/vectrex/other/text/chips/6522/
* http://www.zymurgy.net/classic/vic20/vicdet1.htm
* and
* http://193.23.168.87/mikro_laborversuche/via_iobaustein/via6522_1.html
* for info. A full-text web search on 6522 AND VIA will probably also
* net some usefulness. <cananian@alumni.princeton.edu> 20apr1999
*
* Additional data is here (the SY6522 was used in the Mac II etc):
* http://www.6502.org/documents/datasheets/synertek/synertek_sy6522.pdf
* http://www.6502.org/documents/datasheets/synertek/synertek_sy6522_programming_reference.pdf
*
* PRAM/RTC access algorithms are from the NetBSD RTC toolkit version 1.08b
* by Erik Vogan and adapted to Linux by Joshua M. Thompson (funaho@jurai.org)
*
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/module.h>
#include <asm/bootinfo.h>
#include <asm/macintosh.h>
#include <asm/macints.h>
#include <asm/mac_via.h>
#include <asm/mac_psc.h>
#include <asm/mac_oss.h>
volatile __u8 *via1, *via2;
int rbv_present;
int via_alt_mapping;
EXPORT_SYMBOL(via_alt_mapping);
static __u8 rbv_clear;
/*
* Globals for accessing the VIA chip registers without having to
* check if we're hitting a real VIA or an RBV. Normally you could
* just hit the combined register (ie, vIER|rIER) but that seems to
* break on AV Macs...probably because they actually decode more than
* eight address bits. Why can't Apple engineers at least be
* _consistently_ lazy? - 1999-05-21 (jmt)
*/
static int gIER,gIFR,gBufA,gBufB;
/*
* Timer defs.
*/
#define TICK_SIZE 10000
#define MAC_CLOCK_TICK (783300/HZ) /* ticks per HZ */
#define MAC_CLOCK_LOW (MAC_CLOCK_TICK&0xFF)
#define MAC_CLOCK_HIGH (MAC_CLOCK_TICK>>8)
/* To disable a NuBus slot on Quadras we make that slot IRQ line an output set
* high. On RBV we just use the slot interrupt enable register. On Macs with
* genuine VIA chips we must use nubus_disabled to keep track of disabled slot
* interrupts. When any slot IRQ is disabled we mask the (edge triggered) CA1
* or "SLOTS" interrupt. When no slot is disabled, we unmask the CA1 interrupt.
* So, on genuine VIAs, having more than one NuBus IRQ can mean trouble,
* because closing one of those drivers can mask all of the NuBus interrupts.
* Also, since we can't mask the unregistered slot IRQs on genuine VIAs, it's
* possible to get interrupts from cards that MacOS or the ROM has configured
* but we have not. FWIW, "Designing Cards and Drivers for Macintosh II and
* Macintosh SE", page 9-8, says, a slot IRQ with no driver would crash MacOS.
*/
static u8 nubus_disabled;
void via_debug_dump(void);
irqreturn_t via1_irq(int, void *);
irqreturn_t via2_irq(int, void *);
irqreturn_t via_nubus_irq(int, void *);
void via_irq_enable(int irq);
void via_irq_disable(int irq);
void via_irq_clear(int irq);
extern irqreturn_t mac_scc_dispatch(int, void *);
/*
* Initialize the VIAs
*
* First we figure out where they actually _are_ as well as what type of
* VIA we have for VIA2 (it could be a real VIA or an RBV or even an OSS.)
* Then we pretty much clear them out and disable all IRQ sources.
*
* Note: the OSS is actually "detected" here and not in oss_init(). It just
* seems more logical to do it here since via_init() needs to know
* these things anyways.
*/
void __init via_init(void)
{
switch(macintosh_config->via_type) {
/* IIci, IIsi, IIvx, IIvi (P6xx), LC series */
case MAC_VIA_IIci:
via1 = (void *) VIA1_BASE;
if (macintosh_config->ident == MAC_MODEL_IIFX) {
via2 = NULL;
rbv_present = 0;
oss_present = 1;
} else {
via2 = (void *) RBV_BASE;
rbv_present = 1;
oss_present = 0;
}
if (macintosh_config->ident == MAC_MODEL_LCIII) {
rbv_clear = 0x00;
} else {
/* on most RBVs (& unlike the VIAs), you */
/* need to set bit 7 when you write to IFR */
/* in order for your clear to occur. */
rbv_clear = 0x80;
}
gIER = rIER;
gIFR = rIFR;
gBufA = rSIFR;
gBufB = rBufB;
break;
/* Quadra and early MacIIs agree on the VIA locations */
case MAC_VIA_QUADRA:
case MAC_VIA_II:
via1 = (void *) VIA1_BASE;
via2 = (void *) VIA2_BASE;
rbv_present = 0;
oss_present = 0;
rbv_clear = 0x00;
gIER = vIER;
gIFR = vIFR;
gBufA = vBufA;
gBufB = vBufB;
break;
default:
panic("UNKNOWN VIA TYPE");
}
printk(KERN_INFO "VIA1 at %p is a 6522 or clone\n", via1);
printk(KERN_INFO "VIA2 at %p is ", via2);
if (rbv_present) {
printk("an RBV\n");
} else if (oss_present) {
printk("an OSS\n");
} else {
printk("a 6522 or clone\n");
}
#ifdef DEBUG_VIA
via_debug_dump();
#endif
/*
* Shut down all IRQ sources, reset the timers, and
* kill the timer latch on VIA1.
*/
via1[vIER] = 0x7F;
via1[vIFR] = 0x7F;
via1[vT1LL] = 0;
via1[vT1LH] = 0;
via1[vT1CL] = 0;
via1[vT1CH] = 0;
via1[vT2CL] = 0;
via1[vT2CH] = 0;
via1[vACR] &= ~0xC0; /* setup T1 timer with no PB7 output */
via1[vACR] &= ~0x03; /* disable port A & B latches */
/*
* SE/30: disable video IRQ
* XXX: testing for SE/30 VBL
*/
if (macintosh_config->ident == MAC_MODEL_SE30) {
via1[vDirB] |= 0x40;
via1[vBufB] |= 0x40;
}
/*
* Set the RTC bits to a known state: all lines to outputs and
* RTC disabled (yes that's 0 to enable and 1 to disable).
*/
via1[vDirB] |= (VIA1B_vRTCEnb | VIA1B_vRTCClk | VIA1B_vRTCData);
via1[vBufB] |= (VIA1B_vRTCEnb | VIA1B_vRTCClk);
/* Everything below this point is VIA2/RBV only... */
if (oss_present)
return;
/* Some machines support an alternate IRQ mapping that spreads */
/* Ethernet and Sound out to their own autolevel IRQs and moves */
/* VIA1 to level 6. A/UX uses this mapping and we do too. Note */
/* that the IIfx emulates this alternate mapping using the OSS. */
via_alt_mapping = 0;
if (macintosh_config->via_type == MAC_VIA_QUADRA)
switch (macintosh_config->ident) {
case MAC_MODEL_C660:
case MAC_MODEL_Q840:
/* not applicable */
break;
case MAC_MODEL_P588:
case MAC_MODEL_TV:
case MAC_MODEL_PB140:
case MAC_MODEL_PB145:
case MAC_MODEL_PB160:
case MAC_MODEL_PB165:
case MAC_MODEL_PB165C:
case MAC_MODEL_PB170:
case MAC_MODEL_PB180:
case MAC_MODEL_PB180C:
case MAC_MODEL_PB190:
case MAC_MODEL_PB520:
/* not yet tested */
break;
default:
via_alt_mapping = 1;
via1[vDirB] |= 0x40;
via1[vBufB] &= ~0x40;
break;
}
/*
* Now initialize VIA2. For RBV we just kill all interrupts;
* for a regular VIA we also reset the timers and stuff.
*/
via2[gIER] = 0x7F;
via2[gIFR] = 0x7F | rbv_clear;
if (!rbv_present) {
via2[vT1LL] = 0;
via2[vT1LH] = 0;
via2[vT1CL] = 0;
via2[vT1CH] = 0;
via2[vT2CL] = 0;
via2[vT2CH] = 0;
via2[vACR] &= ~0xC0; /* setup T1 timer with no PB7 output */
via2[vACR] &= ~0x03; /* disable port A & B latches */
}
/*
* Set vPCR for control line interrupts (but not on RBV)
*/
if (!rbv_present) {
/* For all VIA types, CA1 (SLOTS IRQ) and CB1 (ASC IRQ)
* are made negative edge triggered here.
*/
if (macintosh_config->scsi_type == MAC_SCSI_OLD) {
/* CB2 (IRQ) indep. input, positive edge */
/* CA2 (DRQ) indep. input, positive edge */
via2[vPCR] = 0x66;
} else {
/* CB2 (IRQ) indep. input, negative edge */
/* CA2 (DRQ) indep. input, negative edge */
via2[vPCR] = 0x22;
}
}
}
/*
* Start the 100 Hz clock
*/
void __init via_init_clock(irq_handler_t func)
{
via1[vACR] |= 0x40;
via1[vT1LL] = MAC_CLOCK_LOW;
via1[vT1LH] = MAC_CLOCK_HIGH;
via1[vT1CL] = MAC_CLOCK_LOW;
via1[vT1CH] = MAC_CLOCK_HIGH;
if (request_irq(IRQ_MAC_TIMER_1, func, IRQ_FLG_LOCK, "timer", func))
pr_err("Couldn't register %s interrupt\n", "timer");
}
/*
* Register the interrupt dispatchers for VIA or RBV machines only.
*/
void __init via_register_interrupts(void)
{
if (via_alt_mapping) {
if (request_irq(IRQ_AUTO_1, via1_irq,
IRQ_FLG_LOCK|IRQ_FLG_FAST, "software",
(void *) via1))
pr_err("Couldn't register %s interrupt\n", "software");
if (request_irq(IRQ_AUTO_6, via1_irq,
IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
(void *) via1))
pr_err("Couldn't register %s interrupt\n", "via1");
} else {
if (request_irq(IRQ_AUTO_1, via1_irq,
IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
(void *) via1))
pr_err("Couldn't register %s interrupt\n", "via1");
}
if (request_irq(IRQ_AUTO_2, via2_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
"via2", (void *) via2))
pr_err("Couldn't register %s interrupt\n", "via2");
if (!psc_present) {
if (request_irq(IRQ_AUTO_4, mac_scc_dispatch, IRQ_FLG_LOCK,
"scc", mac_scc_dispatch))
pr_err("Couldn't register %s interrupt\n", "scc");
}
if (request_irq(IRQ_MAC_NUBUS, via_nubus_irq,
IRQ_FLG_LOCK|IRQ_FLG_FAST, "nubus", (void *) via2))
pr_err("Couldn't register %s interrupt\n", "nubus");
}
/*
* Debugging dump, used in various places to see what's going on.
*/
void via_debug_dump(void)
{
printk(KERN_DEBUG "VIA1: DDRA = 0x%02X DDRB = 0x%02X ACR = 0x%02X\n",
(uint) via1[vDirA], (uint) via1[vDirB], (uint) via1[vACR]);
printk(KERN_DEBUG " PCR = 0x%02X IFR = 0x%02X IER = 0x%02X\n",
(uint) via1[vPCR], (uint) via1[vIFR], (uint) via1[vIER]);
if (oss_present) {
printk(KERN_DEBUG "VIA2: <OSS>\n");
} else if (rbv_present) {
printk(KERN_DEBUG "VIA2: IFR = 0x%02X IER = 0x%02X\n",
(uint) via2[rIFR], (uint) via2[rIER]);
printk(KERN_DEBUG " SIFR = 0x%02X SIER = 0x%02X\n",
(uint) via2[rSIFR], (uint) via2[rSIER]);
} else {
printk(KERN_DEBUG "VIA2: DDRA = 0x%02X DDRB = 0x%02X ACR = 0x%02X\n",
(uint) via2[vDirA], (uint) via2[vDirB],
(uint) via2[vACR]);
printk(KERN_DEBUG " PCR = 0x%02X IFR = 0x%02X IER = 0x%02X\n",
(uint) via2[vPCR],
(uint) via2[vIFR], (uint) via2[vIER]);
}
}
/*
* This is always executed with interrupts disabled.
*
* TBI: get time offset between scheduling timer ticks
*/
unsigned long mac_gettimeoffset (void)
{
unsigned long ticks, offset = 0;
/* read VIA1 timer 2 current value */
ticks = via1[vT1CL] | (via1[vT1CH] << 8);
/* The probability of underflow is less than 2% */
if (ticks > MAC_CLOCK_TICK - MAC_CLOCK_TICK / 50)
/* Check for pending timer interrupt in VIA1 IFR */
if (via1[vIFR] & 0x40) offset = TICK_SIZE;
ticks = MAC_CLOCK_TICK - ticks;
ticks = ticks * 10000L / MAC_CLOCK_TICK;
return ticks + offset;
}
/*
* Flush the L2 cache on Macs that have it by flipping
* the system into 24-bit mode for an instant.
*/
void via_flush_cache(void)
{
via2[gBufB] &= ~VIA2B_vMode32;
via2[gBufB] |= VIA2B_vMode32;
}
/*
* Return the status of the L2 cache on a IIci
*/
int via_get_cache_disable(void)
{
/* Safeguard against being called accidentally */
if (!via2) {
printk(KERN_ERR "via_get_cache_disable called on a non-VIA machine!\n");
return 1;
}
return (int) via2[gBufB] & VIA2B_vCDis;
}
/*
* Initialize VIA2 for Nubus access
*/
void __init via_nubus_init(void)
{
/* unlock nubus transactions */
if ((macintosh_config->adb_type != MAC_ADB_PB1) &&
(macintosh_config->adb_type != MAC_ADB_PB2)) {
/* set the line to be an output on non-RBV machines */
if (!rbv_present)
via2[vDirB] |= 0x02;
/* this seems to be an ADB bit on PMU machines */
/* according to MkLinux. -- jmt */
via2[gBufB] |= 0x02;
}
/* Disable all the slot interrupts (where possible). */
switch (macintosh_config->via_type) {
case MAC_VIA_II:
/* Just make the port A lines inputs. */
switch(macintosh_config->ident) {
case MAC_MODEL_II:
case MAC_MODEL_IIX:
case MAC_MODEL_IICX:
case MAC_MODEL_SE30:
/* The top two bits are RAM size outputs. */
via2[vDirA] &= 0xC0;
break;
default:
via2[vDirA] &= 0x80;
}
break;
case MAC_VIA_IIci:
/* RBV. Disable all the slot interrupts. SIER works like IER. */
via2[rSIER] = 0x7F;
break;
case MAC_VIA_QUADRA:
/* Disable the inactive slot interrupts by making those lines outputs. */
if ((macintosh_config->adb_type != MAC_ADB_PB1) &&
(macintosh_config->adb_type != MAC_ADB_PB2)) {
via2[vBufA] |= 0x7F;
via2[vDirA] |= 0x7F;
}
break;
}
}
/*
* The generic VIA interrupt routines (shamelessly stolen from Alan Cox's
* via6522.c :-), disable/pending masks added.
*/
irqreturn_t via1_irq(int irq, void *dev_id)
{
int irq_num;
unsigned char irq_bit, events;
events = via1[vIFR] & via1[vIER] & 0x7F;
if (!events)
return IRQ_NONE;
irq_num = VIA1_SOURCE_BASE;
irq_bit = 1;
do {
if (events & irq_bit) {
via1[vIFR] = irq_bit;
m68k_handle_int(irq_num);
}
++irq_num;
irq_bit <<= 1;
} while (events >= irq_bit);
return IRQ_HANDLED;
}
irqreturn_t via2_irq(int irq, void *dev_id)
{
int irq_num;
unsigned char irq_bit, events;
events = via2[gIFR] & via2[gIER] & 0x7F;
if (!events)
return IRQ_NONE;
irq_num = VIA2_SOURCE_BASE;
irq_bit = 1;
do {
if (events & irq_bit) {
via2[gIFR] = irq_bit | rbv_clear;
m68k_handle_int(irq_num);
}
++irq_num;
irq_bit <<= 1;
} while (events >= irq_bit);
return IRQ_HANDLED;
}
/*
* Dispatch Nubus interrupts. We are called as a secondary dispatch by the
* VIA2 dispatcher as a fast interrupt handler.
*/
irqreturn_t via_nubus_irq(int irq, void *dev_id)
{
int slot_irq;
unsigned char slot_bit, events;
events = ~via2[gBufA] & 0x7F;
if (rbv_present)
events &= via2[rSIER];
else
events &= ~via2[vDirA];
if (!events)
return IRQ_NONE;
do {
slot_irq = IRQ_NUBUS_F;
slot_bit = 0x40;
do {
if (events & slot_bit) {
events &= ~slot_bit;
m68k_handle_int(slot_irq);
}
--slot_irq;
slot_bit >>= 1;
} while (events);
/* clear the CA1 interrupt and make certain there's no more. */
via2[gIFR] = 0x02 | rbv_clear;
events = ~via2[gBufA] & 0x7F;
if (rbv_present)
events &= via2[rSIER];
else
events &= ~via2[vDirA];
} while (events);
return IRQ_HANDLED;
}
void via_irq_enable(int irq) {
int irq_src = IRQ_SRC(irq);
int irq_idx = IRQ_IDX(irq);
#ifdef DEBUG_IRQUSE
printk(KERN_DEBUG "via_irq_enable(%d)\n", irq);
#endif
if (irq_src == 1) {
via1[vIER] = IER_SET_BIT(irq_idx);
} else if (irq_src == 2) {
if (irq != IRQ_MAC_NUBUS || nubus_disabled == 0)
via2[gIER] = IER_SET_BIT(irq_idx);
} else if (irq_src == 7) {
switch (macintosh_config->via_type) {
case MAC_VIA_II:
nubus_disabled &= ~(1 << irq_idx);
/* Enable the CA1 interrupt when no slot is disabled. */
if (!nubus_disabled)
via2[gIER] = IER_SET_BIT(1);
break;
case MAC_VIA_IIci:
/* On RBV, enable the slot interrupt.
* SIER works like IER.
*/
via2[rSIER] = IER_SET_BIT(irq_idx);
break;
case MAC_VIA_QUADRA:
/* Make the port A line an input to enable the slot irq.
* But not on PowerBooks, that's ADB.
*/
if ((macintosh_config->adb_type != MAC_ADB_PB1) &&
(macintosh_config->adb_type != MAC_ADB_PB2))
via2[vDirA] &= ~(1 << irq_idx);
break;
}
}
}
void via_irq_disable(int irq) {
int irq_src = IRQ_SRC(irq);
int irq_idx = IRQ_IDX(irq);
#ifdef DEBUG_IRQUSE
printk(KERN_DEBUG "via_irq_disable(%d)\n", irq);
#endif
if (irq_src == 1) {
via1[vIER] = IER_CLR_BIT(irq_idx);
} else if (irq_src == 2) {
via2[gIER] = IER_CLR_BIT(irq_idx);
} else if (irq_src == 7) {
switch (macintosh_config->via_type) {
case MAC_VIA_II:
nubus_disabled |= 1 << irq_idx;
if (nubus_disabled)
via2[gIER] = IER_CLR_BIT(1);
break;
case MAC_VIA_IIci:
via2[rSIER] = IER_CLR_BIT(irq_idx);
break;
case MAC_VIA_QUADRA:
if ((macintosh_config->adb_type != MAC_ADB_PB1) &&
(macintosh_config->adb_type != MAC_ADB_PB2))
via2[vDirA] |= 1 << irq_idx;
break;
}
}
}
void via_irq_clear(int irq) {
int irq_src = IRQ_SRC(irq);
int irq_idx = IRQ_IDX(irq);
int irq_bit = 1 << irq_idx;
if (irq_src == 1) {
via1[vIFR] = irq_bit;
} else if (irq_src == 2) {
via2[gIFR] = irq_bit | rbv_clear;
} else if (irq_src == 7) {
/* FIXME: There is no way to clear an individual nubus slot
* IRQ flag, other than getting the device to do it.
*/
}
}
/*
* Returns nonzero if an interrupt is pending on the given
* VIA/IRQ combination.
*/
int via_irq_pending(int irq)
{
int irq_src = IRQ_SRC(irq);
int irq_idx = IRQ_IDX(irq);
int irq_bit = 1 << irq_idx;
if (irq_src == 1) {
return via1[vIFR] & irq_bit;
} else if (irq_src == 2) {
return via2[gIFR] & irq_bit;
} else if (irq_src == 7) {
/* Always 0 for MAC_VIA_QUADRA if the slot irq is disabled. */
return ~via2[gBufA] & irq_bit;
}
return 0;
}
| gpl-2.0 |
yohanes/Acer-BeTouch-E130-Linux-Kernel | fs/ext3/namei.c | 137 | 67538 | /*
* linux/fs/ext3/namei.c
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/namei.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
* Directory entry file type support and forward compatibility hooks
* for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
* Hash Tree Directory indexing (c)
* Daniel Phillips, 2001
* Hash Tree Directory indexing porting
* Christopher Li, 2002
* Hash Tree Directory indexing cleanup
* Theodore Ts'o, 2002
*/
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/jbd.h>
#include <linux/time.h>
#include <linux/ext3_fs.h>
#include <linux/ext3_jbd.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
#include "namei.h"
#include "xattr.h"
#include "acl.h"
/*
* define how far ahead to read directories while searching them.
*/
#define NAMEI_RA_CHUNKS 2
#define NAMEI_RA_BLOCKS 4
#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
static struct buffer_head *ext3_append(handle_t *handle,
struct inode *inode,
u32 *block, int *err)
{
struct buffer_head *bh;
*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
bh = ext3_bread(handle, inode, *block, 1, err);
if (bh) {
inode->i_size += inode->i_sb->s_blocksize;
EXT3_I(inode)->i_disksize = inode->i_size;
*err = ext3_journal_get_write_access(handle, bh);
if (*err) {
brelse(bh);
bh = NULL;
}
}
return bh;
}
#ifndef assert
#define assert(test) J_ASSERT(test)
#endif
#ifdef DX_DEBUG
#define dxtrace(command) command
#else
#define dxtrace(command)
#endif
struct fake_dirent
{
__le32 inode;
__le16 rec_len;
u8 name_len;
u8 file_type;
};
struct dx_countlimit
{
__le16 limit;
__le16 count;
};
struct dx_entry
{
__le32 hash;
__le32 block;
};
/*
* dx_root_info is laid out so that if it should somehow get overlaid by a
* dirent the two low bits of the hash version will be zero. Therefore, the
* hash version mod 4 should never be 0. Sincerely, the paranoia department.
*/
struct dx_root
{
struct fake_dirent dot;
char dot_name[4];
struct fake_dirent dotdot;
char dotdot_name[4];
struct dx_root_info
{
__le32 reserved_zero;
u8 hash_version;
u8 info_length; /* 8 */
u8 indirect_levels;
u8 unused_flags;
}
info;
struct dx_entry entries[0];
};
struct dx_node
{
struct fake_dirent fake;
struct dx_entry entries[0];
};
struct dx_frame
{
struct buffer_head *bh;
struct dx_entry *entries;
struct dx_entry *at;
};
struct dx_map_entry
{
u32 hash;
u16 offs;
u16 size;
};
static inline unsigned dx_get_block (struct dx_entry *entry);
static void dx_set_block (struct dx_entry *entry, unsigned value);
static inline unsigned dx_get_hash (struct dx_entry *entry);
static void dx_set_hash (struct dx_entry *entry, unsigned value);
static unsigned dx_get_count (struct dx_entry *entries);
static unsigned dx_get_limit (struct dx_entry *entries);
static void dx_set_count (struct dx_entry *entries, unsigned value);
static void dx_set_limit (struct dx_entry *entries, unsigned value);
static unsigned dx_root_limit (struct inode *dir, unsigned infosize);
static unsigned dx_node_limit (struct inode *dir);
static struct dx_frame *dx_probe(struct qstr *entry,
struct inode *dir,
struct dx_hash_info *hinfo,
struct dx_frame *frame,
int *err);
static void dx_release (struct dx_frame *frames);
static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
struct dx_hash_info *hinfo, struct dx_map_entry map[]);
static void dx_sort_map(struct dx_map_entry *map, unsigned count);
static struct ext3_dir_entry_2 *dx_move_dirents (char *from, char *to,
struct dx_map_entry *offsets, int count);
static struct ext3_dir_entry_2* dx_pack_dirents (char *base, int size);
static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block);
static int ext3_htree_next_block(struct inode *dir, __u32 hash,
struct dx_frame *frame,
struct dx_frame *frames,
__u32 *start_hash);
static struct buffer_head * ext3_dx_find_entry(struct inode *dir,
struct qstr *entry, struct ext3_dir_entry_2 **res_dir,
int *err);
static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
struct inode *inode);
/*
* p is at least 6 bytes before the end of page
*/
static inline struct ext3_dir_entry_2 *
ext3_next_entry(struct ext3_dir_entry_2 *p)
{
return (struct ext3_dir_entry_2 *)((char *)p +
ext3_rec_len_from_disk(p->rec_len));
}
/*
* Future: use high four bits of block for coalesce-on-delete flags
* Mask them off for now.
*/
static inline unsigned dx_get_block (struct dx_entry *entry)
{
return le32_to_cpu(entry->block) & 0x00ffffff;
}
static inline void dx_set_block (struct dx_entry *entry, unsigned value)
{
entry->block = cpu_to_le32(value);
}
static inline unsigned dx_get_hash (struct dx_entry *entry)
{
return le32_to_cpu(entry->hash);
}
static inline void dx_set_hash (struct dx_entry *entry, unsigned value)
{
entry->hash = cpu_to_le32(value);
}
static inline unsigned dx_get_count (struct dx_entry *entries)
{
return le16_to_cpu(((struct dx_countlimit *) entries)->count);
}
static inline unsigned dx_get_limit (struct dx_entry *entries)
{
return le16_to_cpu(((struct dx_countlimit *) entries)->limit);
}
static inline void dx_set_count (struct dx_entry *entries, unsigned value)
{
((struct dx_countlimit *) entries)->count = cpu_to_le16(value);
}
static inline void dx_set_limit (struct dx_entry *entries, unsigned value)
{
((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
}
static inline unsigned dx_root_limit (struct inode *dir, unsigned infosize)
{
unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(1) -
EXT3_DIR_REC_LEN(2) - infosize;
return entry_space / sizeof(struct dx_entry);
}
static inline unsigned dx_node_limit (struct inode *dir)
{
unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(0);
return entry_space / sizeof(struct dx_entry);
}
/*
* Debug
*/
#ifdef DX_DEBUG
static void dx_show_index (char * label, struct dx_entry *entries)
{
int i, n = dx_get_count (entries);
printk("%s index ", label);
for (i = 0; i < n; i++)
{
printk("%x->%u ", i? dx_get_hash(entries + i): 0, dx_get_block(entries + i));
}
printk("\n");
}
struct stats
{
unsigned names;
unsigned space;
unsigned bcount;
};
static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext3_dir_entry_2 *de,
int size, int show_names)
{
unsigned names = 0, space = 0;
char *base = (char *) de;
struct dx_hash_info h = *hinfo;
printk("names: ");
while ((char *) de < base + size)
{
if (de->inode)
{
if (show_names)
{
int len = de->name_len;
char *name = de->name;
while (len--) printk("%c", *name++);
ext3fs_dirhash(de->name, de->name_len, &h);
printk(":%x.%u ", h.hash,
((char *) de - base));
}
space += EXT3_DIR_REC_LEN(de->name_len);
names++;
}
de = ext3_next_entry(de);
}
printk("(%i)\n", names);
return (struct stats) { names, space, 1 };
}
struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
struct dx_entry *entries, int levels)
{
unsigned blocksize = dir->i_sb->s_blocksize;
unsigned count = dx_get_count (entries), names = 0, space = 0, i;
unsigned bcount = 0;
struct buffer_head *bh;
int err;
printk("%i indexed blocks...\n", count);
for (i = 0; i < count; i++, entries++)
{
u32 block = dx_get_block(entries), hash = i? dx_get_hash(entries): 0;
u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
struct stats stats;
printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range);
if (!(bh = ext3_bread (NULL,dir, block, 0,&err))) continue;
stats = levels?
dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
dx_show_leaf(hinfo, (struct ext3_dir_entry_2 *) bh->b_data, blocksize, 0);
names += stats.names;
space += stats.space;
bcount += stats.bcount;
brelse (bh);
}
if (bcount)
printk("%snames %u, fullness %u (%u%%)\n", levels?"":" ",
names, space/bcount,(space/bcount)*100/blocksize);
return (struct stats) { names, space, bcount};
}
#endif /* DX_DEBUG */
/*
* Probe for a directory leaf block to search.
*
* dx_probe can return ERR_BAD_DX_DIR, which means there was a format
* error in the directory index, and the caller should fall back to
* searching the directory normally. The callers of dx_probe **MUST**
* check for this error code, and make sure it never gets reflected
* back to userspace.
*/
static struct dx_frame *
dx_probe(struct qstr *entry, struct inode *dir,
struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
{
unsigned count, indirect;
struct dx_entry *at, *entries, *p, *q, *m;
struct dx_root *root;
struct buffer_head *bh;
struct dx_frame *frame = frame_in;
u32 hash;
frame->bh = NULL;
if (!(bh = ext3_bread (NULL,dir, 0, 0, err)))
goto fail;
root = (struct dx_root *) bh->b_data;
if (root->info.hash_version != DX_HASH_TEA &&
root->info.hash_version != DX_HASH_HALF_MD4 &&
root->info.hash_version != DX_HASH_LEGACY) {
ext3_warning(dir->i_sb, __func__,
"Unrecognised inode hash code %d",
root->info.hash_version);
brelse(bh);
*err = ERR_BAD_DX_DIR;
goto fail;
}
hinfo->hash_version = root->info.hash_version;
if (hinfo->hash_version <= DX_HASH_TEA)
hinfo->hash_version += EXT3_SB(dir->i_sb)->s_hash_unsigned;
hinfo->seed = EXT3_SB(dir->i_sb)->s_hash_seed;
if (entry)
ext3fs_dirhash(entry->name, entry->len, hinfo);
hash = hinfo->hash;
if (root->info.unused_flags & 1) {
ext3_warning(dir->i_sb, __func__,
"Unimplemented inode hash flags: %#06x",
root->info.unused_flags);
brelse(bh);
*err = ERR_BAD_DX_DIR;
goto fail;
}
if ((indirect = root->info.indirect_levels) > 1) {
ext3_warning(dir->i_sb, __func__,
"Unimplemented inode hash depth: %#06x",
root->info.indirect_levels);
brelse(bh);
*err = ERR_BAD_DX_DIR;
goto fail;
}
entries = (struct dx_entry *) (((char *)&root->info) +
root->info.info_length);
if (dx_get_limit(entries) != dx_root_limit(dir,
root->info.info_length)) {
ext3_warning(dir->i_sb, __func__,
"dx entry: limit != root limit");
brelse(bh);
*err = ERR_BAD_DX_DIR;
goto fail;
}
dxtrace (printk("Look up %x", hash));
while (1)
{
count = dx_get_count(entries);
if (!count || count > dx_get_limit(entries)) {
ext3_warning(dir->i_sb, __func__,
"dx entry: no count or count > limit");
brelse(bh);
*err = ERR_BAD_DX_DIR;
goto fail2;
}
p = entries + 1;
q = entries + count - 1;
while (p <= q)
{
m = p + (q - p)/2;
dxtrace(printk("."));
if (dx_get_hash(m) > hash)
q = m - 1;
else
p = m + 1;
}
if (0) // linear search cross check
{
unsigned n = count - 1;
at = entries;
while (n--)
{
dxtrace(printk(","));
if (dx_get_hash(++at) > hash)
{
at--;
break;
}
}
assert (at == p - 1);
}
at = p - 1;
dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
frame->bh = bh;
frame->entries = entries;
frame->at = at;
if (!indirect--) return frame;
if (!(bh = ext3_bread (NULL,dir, dx_get_block(at), 0, err)))
goto fail2;
at = entries = ((struct dx_node *) bh->b_data)->entries;
if (dx_get_limit(entries) != dx_node_limit (dir)) {
ext3_warning(dir->i_sb, __func__,
"dx entry: limit != node limit");
brelse(bh);
*err = ERR_BAD_DX_DIR;
goto fail2;
}
frame++;
frame->bh = NULL;
}
fail2:
while (frame >= frame_in) {
brelse(frame->bh);
frame--;
}
fail:
if (*err == ERR_BAD_DX_DIR)
ext3_warning(dir->i_sb, __func__,
"Corrupt dir inode %ld, running e2fsck is "
"recommended.", dir->i_ino);
return NULL;
}
static void dx_release (struct dx_frame *frames)
{
if (frames[0].bh == NULL)
return;
if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
brelse(frames[1].bh);
brelse(frames[0].bh);
}
/*
* This function increments the frame pointer to search the next leaf
* block, and reads in the necessary intervening nodes if the search
* should be necessary. Whether or not the search is necessary is
* controlled by the hash parameter. If the hash value is even, then
* the search is only continued if the next block starts with that
* hash value. This is used if we are searching for a specific file.
*
* If the hash value is HASH_NB_ALWAYS, then always go to the next block.
*
* This function returns 1 if the caller should continue to search,
* or 0 if it should not. If there is an error reading one of the
* index blocks, it will a negative error code.
*
* If start_hash is non-null, it will be filled in with the starting
* hash of the next page.
*/
static int ext3_htree_next_block(struct inode *dir, __u32 hash,
struct dx_frame *frame,
struct dx_frame *frames,
__u32 *start_hash)
{
struct dx_frame *p;
struct buffer_head *bh;
int err, num_frames = 0;
__u32 bhash;
p = frame;
/*
* Find the next leaf page by incrementing the frame pointer.
* If we run out of entries in the interior node, loop around and
* increment pointer in the parent node. When we break out of
* this loop, num_frames indicates the number of interior
* nodes need to be read.
*/
while (1) {
if (++(p->at) < p->entries + dx_get_count(p->entries))
break;
if (p == frames)
return 0;
num_frames++;
p--;
}
/*
* If the hash is 1, then continue only if the next page has a
* continuation hash of any value. This is used for readdir
* handling. Otherwise, check to see if the hash matches the
* desired contiuation hash. If it doesn't, return since
* there's no point to read in the successive index pages.
*/
bhash = dx_get_hash(p->at);
if (start_hash)
*start_hash = bhash;
if ((hash & 1) == 0) {
if ((bhash & ~1) != hash)
return 0;
}
/*
* If the hash is HASH_NB_ALWAYS, we always go to the next
* block so no check is necessary
*/
while (num_frames--) {
if (!(bh = ext3_bread(NULL, dir, dx_get_block(p->at),
0, &err)))
return err; /* Failure */
p++;
brelse (p->bh);
p->bh = bh;
p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
}
return 1;
}
/*
* This function fills a red-black tree with information from a
* directory block. It returns the number directory entries loaded
* into the tree. If there is an error it is returned in err.
*/
static int htree_dirblock_to_tree(struct file *dir_file,
struct inode *dir, int block,
struct dx_hash_info *hinfo,
__u32 start_hash, __u32 start_minor_hash)
{
struct buffer_head *bh;
struct ext3_dir_entry_2 *de, *top;
int err, count = 0;
dxtrace(printk("In htree dirblock_to_tree: block %d\n", block));
if (!(bh = ext3_bread (NULL, dir, block, 0, &err)))
return err;
de = (struct ext3_dir_entry_2 *) bh->b_data;
top = (struct ext3_dir_entry_2 *) ((char *) de +
dir->i_sb->s_blocksize -
EXT3_DIR_REC_LEN(0));
for (; de < top; de = ext3_next_entry(de)) {
if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
(block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb))
+((char *)de - bh->b_data))) {
/* On error, skip the f_pos to the next block. */
dir_file->f_pos = (dir_file->f_pos |
(dir->i_sb->s_blocksize - 1)) + 1;
brelse (bh);
return count;
}
ext3fs_dirhash(de->name, de->name_len, hinfo);
if ((hinfo->hash < start_hash) ||
((hinfo->hash == start_hash) &&
(hinfo->minor_hash < start_minor_hash)))
continue;
if (de->inode == 0)
continue;
if ((err = ext3_htree_store_dirent(dir_file,
hinfo->hash, hinfo->minor_hash, de)) != 0) {
brelse(bh);
return err;
}
count++;
}
brelse(bh);
return count;
}
/*
* This function fills a red-black tree with information from a
* directory. We start scanning the directory in hash order, starting
* at start_hash and start_minor_hash.
*
* This function returns the number of entries inserted into the tree,
* or a negative error code.
*/
int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
__u32 start_minor_hash, __u32 *next_hash)
{
struct dx_hash_info hinfo;
struct ext3_dir_entry_2 *de;
struct dx_frame frames[2], *frame;
struct inode *dir;
int block, err;
int count = 0;
int ret;
__u32 hashval;
dxtrace(printk("In htree_fill_tree, start hash: %x:%x\n", start_hash,
start_minor_hash));
dir = dir_file->f_path.dentry->d_inode;
if (!(EXT3_I(dir)->i_flags & EXT3_INDEX_FL)) {
hinfo.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
if (hinfo.hash_version <= DX_HASH_TEA)
hinfo.hash_version +=
EXT3_SB(dir->i_sb)->s_hash_unsigned;
hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed;
count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
start_hash, start_minor_hash);
*next_hash = ~0;
return count;
}
hinfo.hash = start_hash;
hinfo.minor_hash = 0;
frame = dx_probe(NULL, dir_file->f_path.dentry->d_inode, &hinfo, frames, &err);
if (!frame)
return err;
/* Add '.' and '..' from the htree header */
if (!start_hash && !start_minor_hash) {
de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data;
if ((err = ext3_htree_store_dirent(dir_file, 0, 0, de)) != 0)
goto errout;
count++;
}
if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) {
de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data;
de = ext3_next_entry(de);
if ((err = ext3_htree_store_dirent(dir_file, 2, 0, de)) != 0)
goto errout;
count++;
}
while (1) {
block = dx_get_block(frame->at);
ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
start_hash, start_minor_hash);
if (ret < 0) {
err = ret;
goto errout;
}
count += ret;
hashval = ~0;
ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS,
frame, frames, &hashval);
*next_hash = hashval;
if (ret < 0) {
err = ret;
goto errout;
}
/*
* Stop if: (a) there are no more entries, or
* (b) we have inserted at least one entry and the
* next hash value is not a continuation
*/
if ((ret == 0) ||
(count && ((hashval & 1) == 0)))
break;
}
dx_release(frames);
dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n",
count, *next_hash));
return count;
errout:
dx_release(frames);
return (err);
}
/*
* Directory block splitting, compacting
*/
/*
* Create map of hash values, offsets, and sizes, stored at end of block.
* Returns number of entries mapped.
*/
static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
struct dx_hash_info *hinfo, struct dx_map_entry *map_tail)
{
int count = 0;
char *base = (char *) de;
struct dx_hash_info h = *hinfo;
while ((char *) de < base + size)
{
if (de->name_len && de->inode) {
ext3fs_dirhash(de->name, de->name_len, &h);
map_tail--;
map_tail->hash = h.hash;
map_tail->offs = (u16) ((char *) de - base);
map_tail->size = le16_to_cpu(de->rec_len);
count++;
cond_resched();
}
/* XXX: do we need to check rec_len == 0 case? -Chris */
de = ext3_next_entry(de);
}
return count;
}
/* Sort map by hash value */
static void dx_sort_map (struct dx_map_entry *map, unsigned count)
{
struct dx_map_entry *p, *q, *top = map + count - 1;
int more;
/* Combsort until bubble sort doesn't suck */
while (count > 2)
{
count = count*10/13;
if (count - 9 < 2) /* 9, 10 -> 11 */
count = 11;
for (p = top, q = p - count; q >= map; p--, q--)
if (p->hash < q->hash)
swap(*p, *q);
}
/* Garden variety bubble sort */
do {
more = 0;
q = top;
while (q-- > map)
{
if (q[1].hash >= q[0].hash)
continue;
swap(*(q+1), *q);
more = 1;
}
} while(more);
}
static void dx_insert_block(struct dx_frame *frame, u32 hash, u32 block)
{
struct dx_entry *entries = frame->entries;
struct dx_entry *old = frame->at, *new = old + 1;
int count = dx_get_count(entries);
assert(count < dx_get_limit(entries));
assert(old < entries + count);
memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
dx_set_hash(new, hash);
dx_set_block(new, block);
dx_set_count(entries, count + 1);
}
static void ext3_update_dx_flag(struct inode *inode)
{
if (!EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
EXT3_FEATURE_COMPAT_DIR_INDEX))
EXT3_I(inode)->i_flags &= ~EXT3_INDEX_FL;
}
/*
* NOTE! unlike strncmp, ext3_match returns 1 for success, 0 for failure.
*
* `len <= EXT3_NAME_LEN' is guaranteed by caller.
* `de != NULL' is guaranteed by caller.
*/
static inline int ext3_match (int len, const char * const name,
struct ext3_dir_entry_2 * de)
{
if (len != de->name_len)
return 0;
if (!de->inode)
return 0;
return !memcmp(name, de->name, len);
}
/*
* Returns 0 if not found, -1 on failure, and 1 on success
*/
static inline int search_dirblock(struct buffer_head * bh,
struct inode *dir,
struct qstr *child,
unsigned long offset,
struct ext3_dir_entry_2 ** res_dir)
{
struct ext3_dir_entry_2 * de;
char * dlimit;
int de_len;
const char *name = child->name;
int namelen = child->len;
de = (struct ext3_dir_entry_2 *) bh->b_data;
dlimit = bh->b_data + dir->i_sb->s_blocksize;
while ((char *) de < dlimit) {
/* this code is executed quadratically often */
/* do minimal checking `by hand' */
if ((char *) de + namelen <= dlimit &&
ext3_match (namelen, name, de)) {
/* found a match - just to be sure, do a full check */
if (!ext3_check_dir_entry("ext3_find_entry",
dir, de, bh, offset))
return -1;
*res_dir = de;
return 1;
}
/* prevent looping on a bad block */
de_len = ext3_rec_len_from_disk(de->rec_len);
if (de_len <= 0)
return -1;
offset += de_len;
de = (struct ext3_dir_entry_2 *) ((char *) de + de_len);
}
return 0;
}
/*
* ext3_find_entry()
*
* finds an entry in the specified directory with the wanted name. It
* returns the cache buffer in which the entry was found, and the entry
* itself (as a parameter - res_dir). It does NOT read the inode of the
* entry - you'll have to do that yourself if you want to.
*
* The returned buffer_head has ->b_count elevated. The caller is expected
* to brelse() it when appropriate.
*/
static struct buffer_head *ext3_find_entry(struct inode *dir,
struct qstr *entry,
struct ext3_dir_entry_2 **res_dir)
{
struct super_block * sb;
struct buffer_head * bh_use[NAMEI_RA_SIZE];
struct buffer_head * bh, *ret = NULL;
unsigned long start, block, b;
int ra_max = 0; /* Number of bh's in the readahead
buffer, bh_use[] */
int ra_ptr = 0; /* Current index into readahead
buffer */
int num = 0;
int nblocks, i, err;
int namelen;
*res_dir = NULL;
sb = dir->i_sb;
namelen = entry->len;
if (namelen > EXT3_NAME_LEN)
return NULL;
if (is_dx(dir)) {
bh = ext3_dx_find_entry(dir, entry, res_dir, &err);
/*
* On success, or if the error was file not found,
* return. Otherwise, fall back to doing a search the
* old fashioned way.
*/
if (bh || (err != ERR_BAD_DX_DIR))
return bh;
dxtrace(printk("ext3_find_entry: dx failed, falling back\n"));
}
nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb);
start = EXT3_I(dir)->i_dir_start_lookup;
if (start >= nblocks)
start = 0;
block = start;
restart:
do {
/*
* We deal with the read-ahead logic here.
*/
if (ra_ptr >= ra_max) {
/* Refill the readahead buffer */
ra_ptr = 0;
b = block;
for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
/*
* Terminate if we reach the end of the
* directory and must wrap, or if our
* search has finished at this block.
*/
if (b >= nblocks || (num && block == start)) {
bh_use[ra_max] = NULL;
break;
}
num++;
bh = ext3_getblk(NULL, dir, b++, 0, &err);
bh_use[ra_max] = bh;
if (bh)
ll_rw_block(READ_META, 1, &bh);
}
}
if ((bh = bh_use[ra_ptr++]) == NULL)
goto next;
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
/* read error, skip block & hope for the best */
ext3_error(sb, __func__, "reading directory #%lu "
"offset %lu", dir->i_ino, block);
brelse(bh);
goto next;
}
i = search_dirblock(bh, dir, entry,
block << EXT3_BLOCK_SIZE_BITS(sb), res_dir);
if (i == 1) {
EXT3_I(dir)->i_dir_start_lookup = block;
ret = bh;
goto cleanup_and_exit;
} else {
brelse(bh);
if (i < 0)
goto cleanup_and_exit;
}
next:
if (++block >= nblocks)
block = 0;
} while (block != start);
/*
* If the directory has grown while we were searching, then
* search the last part of the directory before giving up.
*/
block = nblocks;
nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb);
if (block < nblocks) {
start = 0;
goto restart;
}
cleanup_and_exit:
/* Clean up the read-ahead blocks */
for (; ra_ptr < ra_max; ra_ptr++)
brelse (bh_use[ra_ptr]);
return ret;
}
static struct buffer_head * ext3_dx_find_entry(struct inode *dir,
struct qstr *entry, struct ext3_dir_entry_2 **res_dir,
int *err)
{
struct super_block * sb;
struct dx_hash_info hinfo;
u32 hash;
struct dx_frame frames[2], *frame;
struct ext3_dir_entry_2 *de, *top;
struct buffer_head *bh;
unsigned long block;
int retval;
int namelen = entry->len;
const u8 *name = entry->name;
sb = dir->i_sb;
/* NFS may look up ".." - look at dx_root directory block */
if (namelen > 2 || name[0] != '.'|| (namelen == 2 && name[1] != '.')) {
if (!(frame = dx_probe(entry, dir, &hinfo, frames, err)))
return NULL;
} else {
frame = frames;
frame->bh = NULL; /* for dx_release() */
frame->at = (struct dx_entry *)frames; /* hack for zero entry*/
dx_set_block(frame->at, 0); /* dx_root block is 0 */
}
hash = hinfo.hash;
do {
block = dx_get_block(frame->at);
if (!(bh = ext3_bread (NULL,dir, block, 0, err)))
goto errout;
de = (struct ext3_dir_entry_2 *) bh->b_data;
top = (struct ext3_dir_entry_2 *) ((char *) de + sb->s_blocksize -
EXT3_DIR_REC_LEN(0));
for (; de < top; de = ext3_next_entry(de)) {
int off = (block << EXT3_BLOCK_SIZE_BITS(sb))
+ ((char *) de - bh->b_data);
if (!ext3_check_dir_entry(__func__, dir, de, bh, off)) {
brelse(bh);
*err = ERR_BAD_DX_DIR;
goto errout;
}
if (ext3_match(namelen, name, de)) {
*res_dir = de;
dx_release(frames);
return bh;
}
}
brelse (bh);
/* Check to see if we should continue to search */
retval = ext3_htree_next_block(dir, hash, frame,
frames, NULL);
if (retval < 0) {
ext3_warning(sb, __func__,
"error reading index page in directory #%lu",
dir->i_ino);
*err = retval;
goto errout;
}
} while (retval == 1);
*err = -ENOENT;
errout:
dxtrace(printk("%s not found\n", name));
dx_release (frames);
return NULL;
}
static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
{
struct inode * inode;
struct ext3_dir_entry_2 * de;
struct buffer_head * bh;
if (dentry->d_name.len > EXT3_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
bh = ext3_find_entry(dir, &dentry->d_name, &de);
inode = NULL;
if (bh) {
unsigned long ino = le32_to_cpu(de->inode);
brelse (bh);
if (!ext3_valid_inum(dir->i_sb, ino)) {
ext3_error(dir->i_sb, "ext3_lookup",
"bad inode number: %lu", ino);
return ERR_PTR(-EIO);
}
inode = ext3_iget(dir->i_sb, ino);
if (IS_ERR(inode))
return ERR_CAST(inode);
}
return d_splice_alias(inode, dentry);
}
struct dentry *ext3_get_parent(struct dentry *child)
{
unsigned long ino;
struct qstr dotdot = {.name = "..", .len = 2};
struct ext3_dir_entry_2 * de;
struct buffer_head *bh;
bh = ext3_find_entry(child->d_inode, &dotdot, &de);
if (!bh)
return ERR_PTR(-ENOENT);
ino = le32_to_cpu(de->inode);
brelse(bh);
if (!ext3_valid_inum(child->d_inode->i_sb, ino)) {
ext3_error(child->d_inode->i_sb, "ext3_get_parent",
"bad inode number: %lu", ino);
return ERR_PTR(-EIO);
}
return d_obtain_alias(ext3_iget(child->d_inode->i_sb, ino));
}
#define S_SHIFT 12
static unsigned char ext3_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFREG >> S_SHIFT] = EXT3_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = EXT3_FT_DIR,
[S_IFCHR >> S_SHIFT] = EXT3_FT_CHRDEV,
[S_IFBLK >> S_SHIFT] = EXT3_FT_BLKDEV,
[S_IFIFO >> S_SHIFT] = EXT3_FT_FIFO,
[S_IFSOCK >> S_SHIFT] = EXT3_FT_SOCK,
[S_IFLNK >> S_SHIFT] = EXT3_FT_SYMLINK,
};
static inline void ext3_set_de_type(struct super_block *sb,
struct ext3_dir_entry_2 *de,
umode_t mode) {
if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_FILETYPE))
de->file_type = ext3_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
}
/*
* Move count entries from end of map between two memory locations.
* Returns pointer to last entry moved.
*/
static struct ext3_dir_entry_2 *
dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
{
unsigned rec_len = 0;
while (count--) {
struct ext3_dir_entry_2 *de = (struct ext3_dir_entry_2 *) (from + map->offs);
rec_len = EXT3_DIR_REC_LEN(de->name_len);
memcpy (to, de, rec_len);
((struct ext3_dir_entry_2 *) to)->rec_len =
ext3_rec_len_to_disk(rec_len);
de->inode = 0;
map++;
to += rec_len;
}
return (struct ext3_dir_entry_2 *) (to - rec_len);
}
/*
* Compact each dir entry in the range to the minimal rec_len.
* Returns pointer to last entry in range.
*/
static struct ext3_dir_entry_2* dx_pack_dirents(char *base, int size)
{
struct ext3_dir_entry_2 *next, *to, *prev, *de = (struct ext3_dir_entry_2 *) base;
unsigned rec_len = 0;
prev = to = de;
while ((char*)de < base + size) {
next = ext3_next_entry(de);
if (de->inode && de->name_len) {
rec_len = EXT3_DIR_REC_LEN(de->name_len);
if (de > to)
memmove(to, de, rec_len);
to->rec_len = ext3_rec_len_to_disk(rec_len);
prev = to;
to = (struct ext3_dir_entry_2 *) (((char *) to) + rec_len);
}
de = next;
}
return prev;
}
/*
* Split a full leaf block to make room for a new dir entry.
* Allocate a new block, and move entries so that they are approx. equally full.
* Returns pointer to de in block into which the new entry will be inserted.
*/
static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
struct buffer_head **bh,struct dx_frame *frame,
struct dx_hash_info *hinfo, int *error)
{
unsigned blocksize = dir->i_sb->s_blocksize;
unsigned count, continued;
struct buffer_head *bh2;
u32 newblock;
u32 hash2;
struct dx_map_entry *map;
char *data1 = (*bh)->b_data, *data2;
unsigned split, move, size;
struct ext3_dir_entry_2 *de = NULL, *de2;
int err = 0, i;
bh2 = ext3_append (handle, dir, &newblock, &err);
if (!(bh2)) {
brelse(*bh);
*bh = NULL;
goto errout;
}
BUFFER_TRACE(*bh, "get_write_access");
err = ext3_journal_get_write_access(handle, *bh);
if (err)
goto journal_error;
BUFFER_TRACE(frame->bh, "get_write_access");
err = ext3_journal_get_write_access(handle, frame->bh);
if (err)
goto journal_error;
data2 = bh2->b_data;
/* create map in the end of data2 block */
map = (struct dx_map_entry *) (data2 + blocksize);
count = dx_make_map ((struct ext3_dir_entry_2 *) data1,
blocksize, hinfo, map);
map -= count;
dx_sort_map (map, count);
/* Split the existing block in the middle, size-wise */
size = 0;
move = 0;
for (i = count-1; i >= 0; i--) {
/* is more than half of this entry in 2nd half of the block? */
if (size + map[i].size/2 > blocksize/2)
break;
size += map[i].size;
move++;
}
/* map index at which we will split */
split = count - move;
hash2 = map[split].hash;
continued = hash2 == map[split - 1].hash;
dxtrace(printk("Split block %i at %x, %i/%i\n",
dx_get_block(frame->at), hash2, split, count-split));
/* Fancy dance to stay within two buffers */
de2 = dx_move_dirents(data1, data2, map + split, count - split);
de = dx_pack_dirents(data1,blocksize);
de->rec_len = ext3_rec_len_to_disk(data1 + blocksize - (char *) de);
de2->rec_len = ext3_rec_len_to_disk(data2 + blocksize - (char *) de2);
dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data1, blocksize, 1));
dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data2, blocksize, 1));
/* Which block gets the new entry? */
if (hinfo->hash >= hash2)
{
swap(*bh, bh2);
de = de2;
}
dx_insert_block (frame, hash2 + continued, newblock);
err = ext3_journal_dirty_metadata (handle, bh2);
if (err)
goto journal_error;
err = ext3_journal_dirty_metadata (handle, frame->bh);
if (err)
goto journal_error;
brelse (bh2);
dxtrace(dx_show_index ("frame", frame->entries));
return de;
journal_error:
brelse(*bh);
brelse(bh2);
*bh = NULL;
ext3_std_error(dir->i_sb, err);
errout:
*error = err;
return NULL;
}
/*
* Add a new entry into a directory (leaf) block. If de is non-NULL,
* it points to a directory entry which is guaranteed to be large
* enough for new directory entry. If de is NULL, then
* add_dirent_to_buf will attempt search the directory block for
* space. It will return -ENOSPC if no space is available, and -EIO
* and -EEXIST if directory entry already exists.
*
* NOTE! bh is NOT released in the case where ENOSPC is returned. In
* all other cases bh is released.
*/
static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
struct inode *inode, struct ext3_dir_entry_2 *de,
struct buffer_head * bh)
{
struct inode *dir = dentry->d_parent->d_inode;
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
unsigned long offset = 0;
unsigned short reclen;
int nlen, rlen, err;
char *top;
reclen = EXT3_DIR_REC_LEN(namelen);
if (!de) {
de = (struct ext3_dir_entry_2 *)bh->b_data;
top = bh->b_data + dir->i_sb->s_blocksize - reclen;
while ((char *) de <= top) {
if (!ext3_check_dir_entry("ext3_add_entry", dir, de,
bh, offset)) {
brelse (bh);
return -EIO;
}
if (ext3_match (namelen, name, de)) {
brelse (bh);
return -EEXIST;
}
nlen = EXT3_DIR_REC_LEN(de->name_len);
rlen = ext3_rec_len_from_disk(de->rec_len);
if ((de->inode? rlen - nlen: rlen) >= reclen)
break;
de = (struct ext3_dir_entry_2 *)((char *)de + rlen);
offset += rlen;
}
if ((char *) de > top)
return -ENOSPC;
}
BUFFER_TRACE(bh, "get_write_access");
err = ext3_journal_get_write_access(handle, bh);
if (err) {
ext3_std_error(dir->i_sb, err);
brelse(bh);
return err;
}
/* By now the buffer is marked for journaling */
nlen = EXT3_DIR_REC_LEN(de->name_len);
rlen = ext3_rec_len_from_disk(de->rec_len);
if (de->inode) {
struct ext3_dir_entry_2 *de1 = (struct ext3_dir_entry_2 *)((char *)de + nlen);
de1->rec_len = ext3_rec_len_to_disk(rlen - nlen);
de->rec_len = ext3_rec_len_to_disk(nlen);
de = de1;
}
de->file_type = EXT3_FT_UNKNOWN;
if (inode) {
de->inode = cpu_to_le32(inode->i_ino);
ext3_set_de_type(dir->i_sb, de, inode->i_mode);
} else
de->inode = 0;
de->name_len = namelen;
memcpy (de->name, name, namelen);
/*
* XXX shouldn't update any times until successful
* completion of syscall, but too many callers depend
* on this.
*
* XXX similarly, too many callers depend on
* ext3_new_inode() setting the times, but error
* recovery deletes the inode, so the worst that can
* happen is that the times are slightly out of date
* and/or different from the directory change time.
*/
dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
ext3_update_dx_flag(dir);
dir->i_version++;
ext3_mark_inode_dirty(handle, dir);
BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
err = ext3_journal_dirty_metadata(handle, bh);
if (err)
ext3_std_error(dir->i_sb, err);
brelse(bh);
return 0;
}
/*
* This converts a one block unindexed directory to a 3 block indexed
* directory, and adds the dentry to the indexed directory.
*/
static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
struct inode *inode, struct buffer_head *bh)
{
struct inode *dir = dentry->d_parent->d_inode;
const char *name = dentry->d_name.name;
int namelen = dentry->d_name.len;
struct buffer_head *bh2;
struct dx_root *root;
struct dx_frame frames[2], *frame;
struct dx_entry *entries;
struct ext3_dir_entry_2 *de, *de2;
char *data1, *top;
unsigned len;
int retval;
unsigned blocksize;
struct dx_hash_info hinfo;
u32 block;
struct fake_dirent *fde;
blocksize = dir->i_sb->s_blocksize;
dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
retval = ext3_journal_get_write_access(handle, bh);
if (retval) {
ext3_std_error(dir->i_sb, retval);
brelse(bh);
return retval;
}
root = (struct dx_root *) bh->b_data;
/* The 0th block becomes the root, move the dirents out */
fde = &root->dotdot;
de = (struct ext3_dir_entry_2 *)((char *)fde +
ext3_rec_len_from_disk(fde->rec_len));
if ((char *) de >= (((char *) root) + blocksize)) {
ext3_error(dir->i_sb, __func__,
"invalid rec_len for '..' in inode %lu",
dir->i_ino);
brelse(bh);
return -EIO;
}
len = ((char *) root) + blocksize - (char *) de;
bh2 = ext3_append (handle, dir, &block, &retval);
if (!(bh2)) {
brelse(bh);
return retval;
}
EXT3_I(dir)->i_flags |= EXT3_INDEX_FL;
data1 = bh2->b_data;
memcpy (data1, de, len);
de = (struct ext3_dir_entry_2 *) data1;
top = data1 + len;
while ((char *)(de2 = ext3_next_entry(de)) < top)
de = de2;
de->rec_len = ext3_rec_len_to_disk(data1 + blocksize - (char *) de);
/* Initialize the root; the dot dirents already exist */
de = (struct ext3_dir_entry_2 *) (&root->dotdot);
de->rec_len = ext3_rec_len_to_disk(blocksize - EXT3_DIR_REC_LEN(2));
memset (&root->info, 0, sizeof(root->info));
root->info.info_length = sizeof(root->info);
root->info.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
entries = root->entries;
dx_set_block (entries, 1);
dx_set_count (entries, 1);
dx_set_limit (entries, dx_root_limit(dir, sizeof(root->info)));
/* Initialize as for dx_probe */
hinfo.hash_version = root->info.hash_version;
if (hinfo.hash_version <= DX_HASH_TEA)
hinfo.hash_version += EXT3_SB(dir->i_sb)->s_hash_unsigned;
hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed;
ext3fs_dirhash(name, namelen, &hinfo);
frame = frames;
frame->entries = entries;
frame->at = entries;
frame->bh = bh;
bh = bh2;
de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
dx_release (frames);
if (!(de))
return retval;
return add_dirent_to_buf(handle, dentry, inode, de, bh);
}
/*
* ext3_add_entry()
*
* adds a file entry to the specified directory, using the same
* semantics as ext3_find_entry(). It returns NULL if it failed.
*
* NOTE!! The inode part of 'de' is left at 0 - which means you
* may not sleep between calling this and putting something into
* the entry, as someone else might have used it while you slept.
*/
static int ext3_add_entry (handle_t *handle, struct dentry *dentry,
struct inode *inode)
{
struct inode *dir = dentry->d_parent->d_inode;
unsigned long offset;
struct buffer_head * bh;
struct ext3_dir_entry_2 *de;
struct super_block * sb;
int retval;
int dx_fallback=0;
unsigned blocksize;
u32 block, blocks;
sb = dir->i_sb;
blocksize = sb->s_blocksize;
if (!dentry->d_name.len)
return -EINVAL;
if (is_dx(dir)) {
retval = ext3_dx_add_entry(handle, dentry, inode);
if (!retval || (retval != ERR_BAD_DX_DIR))
return retval;
EXT3_I(dir)->i_flags &= ~EXT3_INDEX_FL;
dx_fallback++;
ext3_mark_inode_dirty(handle, dir);
}
blocks = dir->i_size >> sb->s_blocksize_bits;
for (block = 0, offset = 0; block < blocks; block++) {
bh = ext3_bread(handle, dir, block, 0, &retval);
if(!bh)
return retval;
retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
if (retval != -ENOSPC)
return retval;
if (blocks == 1 && !dx_fallback &&
EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
return make_indexed_dir(handle, dentry, inode, bh);
brelse(bh);
}
bh = ext3_append(handle, dir, &block, &retval);
if (!bh)
return retval;
de = (struct ext3_dir_entry_2 *) bh->b_data;
de->inode = 0;
de->rec_len = ext3_rec_len_to_disk(blocksize);
return add_dirent_to_buf(handle, dentry, inode, de, bh);
}
/*
* Returns 0 for success, or a negative error value
*/
static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
struct inode *inode)
{
struct dx_frame frames[2], *frame;
struct dx_entry *entries, *at;
struct dx_hash_info hinfo;
struct buffer_head * bh;
struct inode *dir = dentry->d_parent->d_inode;
struct super_block * sb = dir->i_sb;
struct ext3_dir_entry_2 *de;
int err;
frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
if (!frame)
return err;
entries = frame->entries;
at = frame->at;
if (!(bh = ext3_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
goto cleanup;
BUFFER_TRACE(bh, "get_write_access");
err = ext3_journal_get_write_access(handle, bh);
if (err)
goto journal_error;
err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
if (err != -ENOSPC) {
bh = NULL;
goto cleanup;
}
/* Block full, should compress but for now just split */
dxtrace(printk("using %u of %u node entries\n",
dx_get_count(entries), dx_get_limit(entries)));
/* Need to split index? */
if (dx_get_count(entries) == dx_get_limit(entries)) {
u32 newblock;
unsigned icount = dx_get_count(entries);
int levels = frame - frames;
struct dx_entry *entries2;
struct dx_node *node2;
struct buffer_head *bh2;
if (levels && (dx_get_count(frames->entries) ==
dx_get_limit(frames->entries))) {
ext3_warning(sb, __func__,
"Directory index full!");
err = -ENOSPC;
goto cleanup;
}
bh2 = ext3_append (handle, dir, &newblock, &err);
if (!(bh2))
goto cleanup;
node2 = (struct dx_node *)(bh2->b_data);
entries2 = node2->entries;
node2->fake.rec_len = ext3_rec_len_to_disk(sb->s_blocksize);
node2->fake.inode = 0;
BUFFER_TRACE(frame->bh, "get_write_access");
err = ext3_journal_get_write_access(handle, frame->bh);
if (err)
goto journal_error;
if (levels) {
unsigned icount1 = icount/2, icount2 = icount - icount1;
unsigned hash2 = dx_get_hash(entries + icount1);
dxtrace(printk("Split index %i/%i\n", icount1, icount2));
BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
err = ext3_journal_get_write_access(handle,
frames[0].bh);
if (err)
goto journal_error;
memcpy ((char *) entries2, (char *) (entries + icount1),
icount2 * sizeof(struct dx_entry));
dx_set_count (entries, icount1);
dx_set_count (entries2, icount2);
dx_set_limit (entries2, dx_node_limit(dir));
/* Which index block gets the new entry? */
if (at - entries >= icount1) {
frame->at = at = at - entries - icount1 + entries2;
frame->entries = entries = entries2;
swap(frame->bh, bh2);
}
dx_insert_block (frames + 0, hash2, newblock);
dxtrace(dx_show_index ("node", frames[1].entries));
dxtrace(dx_show_index ("node",
((struct dx_node *) bh2->b_data)->entries));
err = ext3_journal_dirty_metadata(handle, bh2);
if (err)
goto journal_error;
brelse (bh2);
} else {
dxtrace(printk("Creating second level index...\n"));
memcpy((char *) entries2, (char *) entries,
icount * sizeof(struct dx_entry));
dx_set_limit(entries2, dx_node_limit(dir));
/* Set up root */
dx_set_count(entries, 1);
dx_set_block(entries + 0, newblock);
((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
/* Add new access path frame */
frame = frames + 1;
frame->at = at = at - entries + entries2;
frame->entries = entries = entries2;
frame->bh = bh2;
err = ext3_journal_get_write_access(handle,
frame->bh);
if (err)
goto journal_error;
}
ext3_journal_dirty_metadata(handle, frames[0].bh);
}
de = do_split(handle, dir, &bh, frame, &hinfo, &err);
if (!de)
goto cleanup;
err = add_dirent_to_buf(handle, dentry, inode, de, bh);
bh = NULL;
goto cleanup;
journal_error:
ext3_std_error(dir->i_sb, err);
cleanup:
if (bh)
brelse(bh);
dx_release(frames);
return err;
}
/*
* ext3_delete_entry deletes a directory entry by merging it with the
* previous entry
*/
static int ext3_delete_entry (handle_t *handle,
struct inode * dir,
struct ext3_dir_entry_2 * de_del,
struct buffer_head * bh)
{
struct ext3_dir_entry_2 * de, * pde;
int i;
i = 0;
pde = NULL;
de = (struct ext3_dir_entry_2 *) bh->b_data;
while (i < bh->b_size) {
if (!ext3_check_dir_entry("ext3_delete_entry", dir, de, bh, i))
return -EIO;
if (de == de_del) {
BUFFER_TRACE(bh, "get_write_access");
ext3_journal_get_write_access(handle, bh);
if (pde)
pde->rec_len = ext3_rec_len_to_disk(
ext3_rec_len_from_disk(pde->rec_len) +
ext3_rec_len_from_disk(de->rec_len));
else
de->inode = 0;
dir->i_version++;
BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
ext3_journal_dirty_metadata(handle, bh);
return 0;
}
i += ext3_rec_len_from_disk(de->rec_len);
pde = de;
de = ext3_next_entry(de);
}
return -ENOENT;
}
static int ext3_add_nondir(handle_t *handle,
struct dentry *dentry, struct inode *inode)
{
int err = ext3_add_entry(handle, dentry, inode);
if (!err) {
ext3_mark_inode_dirty(handle, inode);
d_instantiate(dentry, inode);
unlock_new_inode(inode);
return 0;
}
drop_nlink(inode);
unlock_new_inode(inode);
iput(inode);
return err;
}
/*
* By the time this is called, we already have created
* the directory cache entry for the new file, but it
* is so far negative - it has no inode.
*
* If the create succeeds, we fill in the inode information
* with d_instantiate().
*/
static int ext3_create (struct inode * dir, struct dentry * dentry, int mode,
struct nameidata *nd)
{
handle_t *handle;
struct inode * inode;
int err, retries = 0;
retry:
handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_DIRSYNC(dir))
handle->h_sync = 1;
inode = ext3_new_inode (handle, dir, mode);
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
inode->i_op = &ext3_file_inode_operations;
inode->i_fop = &ext3_file_operations;
ext3_set_aops(inode);
err = ext3_add_nondir(handle, dentry, inode);
}
ext3_journal_stop(handle);
if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
}
static int ext3_mknod (struct inode * dir, struct dentry *dentry,
int mode, dev_t rdev)
{
handle_t *handle;
struct inode *inode;
int err, retries = 0;
if (!new_valid_dev(rdev))
return -EINVAL;
retry:
handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_DIRSYNC(dir))
handle->h_sync = 1;
inode = ext3_new_inode (handle, dir, mode);
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
init_special_inode(inode, inode->i_mode, rdev);
#ifdef CONFIG_EXT3_FS_XATTR
inode->i_op = &ext3_special_inode_operations;
#endif
err = ext3_add_nondir(handle, dentry, inode);
}
ext3_journal_stop(handle);
if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
}
static int ext3_mkdir(struct inode * dir, struct dentry * dentry, int mode)
{
handle_t *handle;
struct inode * inode;
struct buffer_head * dir_block;
struct ext3_dir_entry_2 * de;
int err, retries = 0;
if (dir->i_nlink >= EXT3_LINK_MAX)
return -EMLINK;
retry:
handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_DIRSYNC(dir))
handle->h_sync = 1;
inode = ext3_new_inode (handle, dir, S_IFDIR | mode);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_stop;
inode->i_op = &ext3_dir_inode_operations;
inode->i_fop = &ext3_dir_operations;
inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
dir_block = ext3_bread (handle, inode, 0, 1, &err);
if (!dir_block) {
drop_nlink(inode); /* is this nlink == 0? */
unlock_new_inode(inode);
ext3_mark_inode_dirty(handle, inode);
iput (inode);
goto out_stop;
}
BUFFER_TRACE(dir_block, "get_write_access");
ext3_journal_get_write_access(handle, dir_block);
de = (struct ext3_dir_entry_2 *) dir_block->b_data;
de->inode = cpu_to_le32(inode->i_ino);
de->name_len = 1;
de->rec_len = ext3_rec_len_to_disk(EXT3_DIR_REC_LEN(de->name_len));
strcpy (de->name, ".");
ext3_set_de_type(dir->i_sb, de, S_IFDIR);
de = ext3_next_entry(de);
de->inode = cpu_to_le32(dir->i_ino);
de->rec_len = ext3_rec_len_to_disk(inode->i_sb->s_blocksize -
EXT3_DIR_REC_LEN(1));
de->name_len = 2;
strcpy (de->name, "..");
ext3_set_de_type(dir->i_sb, de, S_IFDIR);
inode->i_nlink = 2;
BUFFER_TRACE(dir_block, "call ext3_journal_dirty_metadata");
ext3_journal_dirty_metadata(handle, dir_block);
brelse (dir_block);
ext3_mark_inode_dirty(handle, inode);
err = ext3_add_entry (handle, dentry, inode);
if (err) {
inode->i_nlink = 0;
unlock_new_inode(inode);
ext3_mark_inode_dirty(handle, inode);
iput (inode);
goto out_stop;
}
inc_nlink(dir);
ext3_update_dx_flag(dir);
ext3_mark_inode_dirty(handle, dir);
d_instantiate(dentry, inode);
unlock_new_inode(inode);
out_stop:
ext3_journal_stop(handle);
if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
}
/*
* routine to check that the specified directory is empty (for rmdir)
*/
static int empty_dir (struct inode * inode)
{
unsigned long offset;
struct buffer_head * bh;
struct ext3_dir_entry_2 * de, * de1;
struct super_block * sb;
int err = 0;
sb = inode->i_sb;
if (inode->i_size < EXT3_DIR_REC_LEN(1) + EXT3_DIR_REC_LEN(2) ||
!(bh = ext3_bread (NULL, inode, 0, 0, &err))) {
if (err)
ext3_error(inode->i_sb, __func__,
"error %d reading directory #%lu offset 0",
err, inode->i_ino);
else
ext3_warning(inode->i_sb, __func__,
"bad directory (dir #%lu) - no data block",
inode->i_ino);
return 1;
}
de = (struct ext3_dir_entry_2 *) bh->b_data;
de1 = ext3_next_entry(de);
if (le32_to_cpu(de->inode) != inode->i_ino ||
!le32_to_cpu(de1->inode) ||
strcmp (".", de->name) ||
strcmp ("..", de1->name)) {
ext3_warning (inode->i_sb, "empty_dir",
"bad directory (dir #%lu) - no `.' or `..'",
inode->i_ino);
brelse (bh);
return 1;
}
offset = ext3_rec_len_from_disk(de->rec_len) +
ext3_rec_len_from_disk(de1->rec_len);
de = ext3_next_entry(de1);
while (offset < inode->i_size ) {
if (!bh ||
(void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
err = 0;
brelse (bh);
bh = ext3_bread (NULL, inode,
offset >> EXT3_BLOCK_SIZE_BITS(sb), 0, &err);
if (!bh) {
if (err)
ext3_error(sb, __func__,
"error %d reading directory"
" #%lu offset %lu",
err, inode->i_ino, offset);
offset += sb->s_blocksize;
continue;
}
de = (struct ext3_dir_entry_2 *) bh->b_data;
}
if (!ext3_check_dir_entry("empty_dir", inode, de, bh, offset)) {
de = (struct ext3_dir_entry_2 *)(bh->b_data +
sb->s_blocksize);
offset = (offset | (sb->s_blocksize - 1)) + 1;
continue;
}
if (le32_to_cpu(de->inode)) {
brelse (bh);
return 0;
}
offset += ext3_rec_len_from_disk(de->rec_len);
de = ext3_next_entry(de);
}
brelse (bh);
return 1;
}
/* ext3_orphan_add() links an unlinked or truncated inode into a list of
* such inodes, starting at the superblock, in case we crash before the
* file is closed/deleted, or in case the inode truncate spans multiple
* transactions and the last transaction is not recovered after a crash.
*
* At filesystem recovery time, we walk this list deleting unlinked
* inodes and truncating linked inodes in ext3_orphan_cleanup().
*/
int ext3_orphan_add(handle_t *handle, struct inode *inode)
{
struct super_block *sb = inode->i_sb;
struct ext3_iloc iloc;
int err = 0, rc;
lock_super(sb);
if (!list_empty(&EXT3_I(inode)->i_orphan))
goto out_unlock;
/* Orphan handling is only valid for files with data blocks
* being truncated, or files being unlinked. */
/* @@@ FIXME: Observation from aviro:
* I think I can trigger J_ASSERT in ext3_orphan_add(). We block
* here (on lock_super()), so race with ext3_link() which might bump
* ->i_nlink. For, say it, character device. Not a regular file,
* not a directory, not a symlink and ->i_nlink > 0.
*/
J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "get_write_access");
err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
if (err)
goto out_unlock;
err = ext3_reserve_inode_write(handle, inode, &iloc);
if (err)
goto out_unlock;
/* Insert this inode at the head of the on-disk orphan list... */
NEXT_ORPHAN(inode) = le32_to_cpu(EXT3_SB(sb)->s_es->s_last_orphan);
EXT3_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
rc = ext3_mark_iloc_dirty(handle, inode, &iloc);
if (!err)
err = rc;
/* Only add to the head of the in-memory list if all the
* previous operations succeeded. If the orphan_add is going to
* fail (possibly taking the journal offline), we can't risk
* leaving the inode on the orphan list: stray orphan-list
* entries can cause panics at unmount time.
*
* This is safe: on error we're going to ignore the orphan list
* anyway on the next recovery. */
if (!err)
list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
jbd_debug(4, "orphan inode %lu will point to %d\n",
inode->i_ino, NEXT_ORPHAN(inode));
out_unlock:
unlock_super(sb);
ext3_std_error(inode->i_sb, err);
return err;
}
/*
* ext3_orphan_del() removes an unlinked or truncated inode from the list
* of such inodes stored on disk, because it is finally being cleaned up.
*/
int ext3_orphan_del(handle_t *handle, struct inode *inode)
{
struct list_head *prev;
struct ext3_inode_info *ei = EXT3_I(inode);
struct ext3_sb_info *sbi;
unsigned long ino_next;
struct ext3_iloc iloc;
int err = 0;
lock_super(inode->i_sb);
if (list_empty(&ei->i_orphan)) {
unlock_super(inode->i_sb);
return 0;
}
ino_next = NEXT_ORPHAN(inode);
prev = ei->i_orphan.prev;
sbi = EXT3_SB(inode->i_sb);
jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
list_del_init(&ei->i_orphan);
/* If we're on an error path, we may not have a valid
* transaction handle with which to update the orphan list on
* disk, but we still need to remove the inode from the linked
* list in memory. */
if (!handle)
goto out;
err = ext3_reserve_inode_write(handle, inode, &iloc);
if (err)
goto out_err;
if (prev == &sbi->s_orphan) {
jbd_debug(4, "superblock will point to %lu\n", ino_next);
BUFFER_TRACE(sbi->s_sbh, "get_write_access");
err = ext3_journal_get_write_access(handle, sbi->s_sbh);
if (err)
goto out_brelse;
sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
err = ext3_journal_dirty_metadata(handle, sbi->s_sbh);
} else {
struct ext3_iloc iloc2;
struct inode *i_prev =
&list_entry(prev, struct ext3_inode_info, i_orphan)->vfs_inode;
jbd_debug(4, "orphan inode %lu will point to %lu\n",
i_prev->i_ino, ino_next);
err = ext3_reserve_inode_write(handle, i_prev, &iloc2);
if (err)
goto out_brelse;
NEXT_ORPHAN(i_prev) = ino_next;
err = ext3_mark_iloc_dirty(handle, i_prev, &iloc2);
}
if (err)
goto out_brelse;
NEXT_ORPHAN(inode) = 0;
err = ext3_mark_iloc_dirty(handle, inode, &iloc);
out_err:
ext3_std_error(inode->i_sb, err);
out:
unlock_super(inode->i_sb);
return err;
out_brelse:
brelse(iloc.bh);
goto out_err;
}
static int ext3_rmdir (struct inode * dir, struct dentry *dentry)
{
int retval;
struct inode * inode;
struct buffer_head * bh;
struct ext3_dir_entry_2 * de;
handle_t *handle;
/* Initialize quotas before so that eventual writes go in
* separate transaction */
DQUOT_INIT(dentry->d_inode);
handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
retval = -ENOENT;
bh = ext3_find_entry(dir, &dentry->d_name, &de);
if (!bh)
goto end_rmdir;
if (IS_DIRSYNC(dir))
handle->h_sync = 1;
inode = dentry->d_inode;
retval = -EIO;
if (le32_to_cpu(de->inode) != inode->i_ino)
goto end_rmdir;
retval = -ENOTEMPTY;
if (!empty_dir (inode))
goto end_rmdir;
retval = ext3_delete_entry(handle, dir, de, bh);
if (retval)
goto end_rmdir;
if (inode->i_nlink != 2)
ext3_warning (inode->i_sb, "ext3_rmdir",
"empty directory has nlink!=2 (%d)",
inode->i_nlink);
inode->i_version++;
clear_nlink(inode);
/* There's no need to set i_disksize: the fact that i_nlink is
* zero will ensure that the right thing happens during any
* recovery. */
inode->i_size = 0;
ext3_orphan_add(handle, inode);
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
ext3_mark_inode_dirty(handle, inode);
drop_nlink(dir);
ext3_update_dx_flag(dir);
ext3_mark_inode_dirty(handle, dir);
end_rmdir:
ext3_journal_stop(handle);
brelse (bh);
return retval;
}
static int ext3_unlink(struct inode * dir, struct dentry *dentry)
{
int retval;
struct inode * inode;
struct buffer_head * bh;
struct ext3_dir_entry_2 * de;
handle_t *handle;
/* Initialize quotas before so that eventual writes go
* in separate transaction */
DQUOT_INIT(dentry->d_inode);
handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_DIRSYNC(dir))
handle->h_sync = 1;
retval = -ENOENT;
bh = ext3_find_entry(dir, &dentry->d_name, &de);
if (!bh)
goto end_unlink;
inode = dentry->d_inode;
retval = -EIO;
if (le32_to_cpu(de->inode) != inode->i_ino)
goto end_unlink;
if (!inode->i_nlink) {
ext3_warning (inode->i_sb, "ext3_unlink",
"Deleting nonexistent file (%lu), %d",
inode->i_ino, inode->i_nlink);
inode->i_nlink = 1;
}
retval = ext3_delete_entry(handle, dir, de, bh);
if (retval)
goto end_unlink;
dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
ext3_update_dx_flag(dir);
ext3_mark_inode_dirty(handle, dir);
drop_nlink(inode);
if (!inode->i_nlink)
ext3_orphan_add(handle, inode);
inode->i_ctime = dir->i_ctime;
ext3_mark_inode_dirty(handle, inode);
retval = 0;
end_unlink:
ext3_journal_stop(handle);
brelse (bh);
return retval;
}
static int ext3_symlink (struct inode * dir,
struct dentry *dentry, const char * symname)
{
handle_t *handle;
struct inode * inode;
int l, err, retries = 0;
l = strlen(symname)+1;
if (l > dir->i_sb->s_blocksize)
return -ENAMETOOLONG;
retry:
handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 +
2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb));
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_DIRSYNC(dir))
handle->h_sync = 1;
inode = ext3_new_inode (handle, dir, S_IFLNK|S_IRWXUGO);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_stop;
if (l > sizeof (EXT3_I(inode)->i_data)) {
inode->i_op = &ext3_symlink_inode_operations;
ext3_set_aops(inode);
/*
* page_symlink() calls into ext3_prepare/commit_write.
* We have a transaction open. All is sweetness. It also sets
* i_size in generic_commit_write().
*/
err = __page_symlink(inode, symname, l, 1);
if (err) {
drop_nlink(inode);
unlock_new_inode(inode);
ext3_mark_inode_dirty(handle, inode);
iput (inode);
goto out_stop;
}
} else {
inode->i_op = &ext3_fast_symlink_inode_operations;
memcpy((char*)&EXT3_I(inode)->i_data,symname,l);
inode->i_size = l-1;
}
EXT3_I(inode)->i_disksize = inode->i_size;
err = ext3_add_nondir(handle, dentry, inode);
out_stop:
ext3_journal_stop(handle);
if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
}
static int ext3_link (struct dentry * old_dentry,
struct inode * dir, struct dentry *dentry)
{
handle_t *handle;
struct inode *inode = old_dentry->d_inode;
int err, retries = 0;
if (inode->i_nlink >= EXT3_LINK_MAX)
return -EMLINK;
/*
* Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing
* otherwise has the potential to corrupt the orphan inode list.
*/
if (inode->i_nlink == 0)
return -ENOENT;
retry:
handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS);
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_DIRSYNC(dir))
handle->h_sync = 1;
inode->i_ctime = CURRENT_TIME_SEC;
inc_nlink(inode);
atomic_inc(&inode->i_count);
err = ext3_add_entry(handle, dentry, inode);
if (!err) {
ext3_mark_inode_dirty(handle, inode);
d_instantiate(dentry, inode);
} else {
drop_nlink(inode);
iput(inode);
}
ext3_journal_stop(handle);
if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
goto retry;
return err;
}
#define PARENT_INO(buffer) \
(ext3_next_entry((struct ext3_dir_entry_2 *)(buffer))->inode)
/*
* Anybody can rename anything with this: the permission checks are left to the
* higher-level routines.
*/
static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
struct inode * new_dir,struct dentry *new_dentry)
{
handle_t *handle;
struct inode * old_inode, * new_inode;
struct buffer_head * old_bh, * new_bh, * dir_bh;
struct ext3_dir_entry_2 * old_de, * new_de;
int retval;
old_bh = new_bh = dir_bh = NULL;
/* Initialize quotas before so that eventual writes go
* in separate transaction */
if (new_dentry->d_inode)
DQUOT_INIT(new_dentry->d_inode);
handle = ext3_journal_start(old_dir, 2 *
EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) +
EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
if (IS_ERR(handle))
return PTR_ERR(handle);
if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
handle->h_sync = 1;
old_bh = ext3_find_entry(old_dir, &old_dentry->d_name, &old_de);
/*
* Check for inode number is _not_ due to possible IO errors.
* We might rmdir the source, keep it as pwd of some process
* and merrily kill the link to whatever was created under the
* same name. Goodbye sticky bit ;-<
*/
old_inode = old_dentry->d_inode;
retval = -ENOENT;
if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino)
goto end_rename;
new_inode = new_dentry->d_inode;
new_bh = ext3_find_entry(new_dir, &new_dentry->d_name, &new_de);
if (new_bh) {
if (!new_inode) {
brelse (new_bh);
new_bh = NULL;
}
}
if (S_ISDIR(old_inode->i_mode)) {
if (new_inode) {
retval = -ENOTEMPTY;
if (!empty_dir (new_inode))
goto end_rename;
}
retval = -EIO;
dir_bh = ext3_bread (handle, old_inode, 0, 0, &retval);
if (!dir_bh)
goto end_rename;
if (le32_to_cpu(PARENT_INO(dir_bh->b_data)) != old_dir->i_ino)
goto end_rename;
retval = -EMLINK;
if (!new_inode && new_dir!=old_dir &&
new_dir->i_nlink >= EXT3_LINK_MAX)
goto end_rename;
}
if (!new_bh) {
retval = ext3_add_entry (handle, new_dentry, old_inode);
if (retval)
goto end_rename;
} else {
BUFFER_TRACE(new_bh, "get write access");
ext3_journal_get_write_access(handle, new_bh);
new_de->inode = cpu_to_le32(old_inode->i_ino);
if (EXT3_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
EXT3_FEATURE_INCOMPAT_FILETYPE))
new_de->file_type = old_de->file_type;
new_dir->i_version++;
new_dir->i_ctime = new_dir->i_mtime = CURRENT_TIME_SEC;
ext3_mark_inode_dirty(handle, new_dir);
BUFFER_TRACE(new_bh, "call ext3_journal_dirty_metadata");
ext3_journal_dirty_metadata(handle, new_bh);
brelse(new_bh);
new_bh = NULL;
}
/*
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
old_inode->i_ctime = CURRENT_TIME_SEC;
ext3_mark_inode_dirty(handle, old_inode);
/*
* ok, that's it
*/
if (le32_to_cpu(old_de->inode) != old_inode->i_ino ||
old_de->name_len != old_dentry->d_name.len ||
strncmp(old_de->name, old_dentry->d_name.name, old_de->name_len) ||
(retval = ext3_delete_entry(handle, old_dir,
old_de, old_bh)) == -ENOENT) {
/* old_de could have moved from under us during htree split, so
* make sure that we are deleting the right entry. We might
* also be pointing to a stale entry in the unused part of
* old_bh so just checking inum and the name isn't enough. */
struct buffer_head *old_bh2;
struct ext3_dir_entry_2 *old_de2;
old_bh2 = ext3_find_entry(old_dir, &old_dentry->d_name,
&old_de2);
if (old_bh2) {
retval = ext3_delete_entry(handle, old_dir,
old_de2, old_bh2);
brelse(old_bh2);
}
}
if (retval) {
ext3_warning(old_dir->i_sb, "ext3_rename",
"Deleting old file (%lu), %d, error=%d",
old_dir->i_ino, old_dir->i_nlink, retval);
}
if (new_inode) {
drop_nlink(new_inode);
new_inode->i_ctime = CURRENT_TIME_SEC;
}
old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
ext3_update_dx_flag(old_dir);
if (dir_bh) {
BUFFER_TRACE(dir_bh, "get_write_access");
ext3_journal_get_write_access(handle, dir_bh);
PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
BUFFER_TRACE(dir_bh, "call ext3_journal_dirty_metadata");
ext3_journal_dirty_metadata(handle, dir_bh);
drop_nlink(old_dir);
if (new_inode) {
drop_nlink(new_inode);
} else {
inc_nlink(new_dir);
ext3_update_dx_flag(new_dir);
ext3_mark_inode_dirty(handle, new_dir);
}
}
ext3_mark_inode_dirty(handle, old_dir);
if (new_inode) {
ext3_mark_inode_dirty(handle, new_inode);
if (!new_inode->i_nlink)
ext3_orphan_add(handle, new_inode);
}
retval = 0;
end_rename:
brelse (dir_bh);
brelse (old_bh);
brelse (new_bh);
ext3_journal_stop(handle);
return retval;
}
/*
* directories can handle most operations...
*/
const struct inode_operations ext3_dir_inode_operations = {
.create = ext3_create,
.lookup = ext3_lookup,
.link = ext3_link,
.unlink = ext3_unlink,
.symlink = ext3_symlink,
.mkdir = ext3_mkdir,
.rmdir = ext3_rmdir,
.mknod = ext3_mknod,
.rename = ext3_rename,
.setattr = ext3_setattr,
#ifdef CONFIG_EXT3_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = ext3_listxattr,
.removexattr = generic_removexattr,
#endif
.permission = ext3_permission,
};
const struct inode_operations ext3_special_inode_operations = {
.setattr = ext3_setattr,
#ifdef CONFIG_EXT3_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = ext3_listxattr,
.removexattr = generic_removexattr,
#endif
.permission = ext3_permission,
};
| gpl-2.0 |
aloksinha2001/rk3x_kernel_3.0.36 | drivers/net/wireless/wifi_power/vendor/wp_tchip_tr802d.c | 137 | 2147 | /*
* wifi_power.c for MID_AIGO_E700.
*
* Power control for WIFI module.
*
* There are Power supply and Power Up/Down controls for WIFI typically.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include "wifi_power.h"
#if (WIFI_GPIO_POWER_CONTROL == 1)
/*
* GPIO to control LDO/DCDC.
*
* 用于控制WIFI的电源,通常是3.3V和1.8V,可能1.2V也在其中。
*
* 如果是扩展IO,请参考下面的例子:
* POWER_USE_EXT_GPIO, 0, 0, 0, PCA9554_Pin1, GPIO_HIGH
*/
struct wifi_power power_gpio =
{
POWER_USE_GPIO, POWER_GPIO_IOMUX, GPIOG0_UART0_MMC1DET_NAME,
IOMUXA_GPIO1_C0, GPIOPortG_Pin0, GPIO_HIGH
};
/*
* GPIO to control WIFI PowerDOWN/RESET.
*
* 控制WIFI的PowerDown脚。有些模组PowerDown脚是和Reset脚短接在一起。
*/
struct wifi_power power_save_gpio =
{
POWER_USE_GPIO, POWER_GPIO_IOMUX, GPIOF01_UART2_SEL_NAME,
IOMUXB_GPIO0_B01, GPIOPortB_Pin0, GPIO_HIGH
};
/*
* GPIO to reset WIFI. Keep this as NULL normally.
*
* 控制WIFI的Reset脚,通常WiFi模组没有用到这个引脚。
*/
struct wifi_power power_reset_gpio =
{
0, 0, 0, 0, 0, 0
};
/*
* If external GPIO chip such as PCA9554 is being used, please
* implement the following 2 function.
*
* id: is GPIO identifier, such as GPIOPortF_Pin0, or external
* name defined in struct wifi_power.
* sens: the value should be set to GPIO, usually is GPIO_HIGH or GPIO_LOW.
*
* 如果有用扩展GPIO来控制WIFI,请实现下面的函数:
* 函数的功能是:控制指定的IO口id,使其状态切换为要求的sens状态。
* id : 是IO的标识号,以整数的形式标识。
* sens: 是要求的IO状态,为高或低。
*/
void wifi_extgpio_operation(u8 id, u8 sens)
{
//pca955x_gpio_direction_output(id, sens);
}
/*
* 在系统中如果要调用WIFI的IO控制,将WIFI下电,可以调用如下接口:
* void rockchip_wifi_shutdown();
* 但注意需要在宏WIFI_GPIO_POWER_CONTROL的控制下。
*/
#endif /* WIFI_GPIO_POWER_CONTROL */
| gpl-2.0 |
Anik1199/kernel_sprout | block/bfq-iosched.c | 137 | 133531 | /*
* Budget Fair Queueing (BFQ) disk scheduler.
*
* Based on ideas and code from CFQ:
* Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
*
* Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
* Paolo Valente <paolo.valente@unimore.it>
*
* Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
*
* Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
* file.
*
* BFQ is a proportional-share storage-I/O scheduling algorithm based on
* the slice-by-slice service scheme of CFQ. But BFQ assigns budgets,
* measured in number of sectors, to processes instead of time slices. The
* device is not granted to the in-service process for a given time slice,
* but until it has exhausted its assigned budget. This change from the time
* to the service domain allows BFQ to distribute the device throughput
* among processes as desired, without any distortion due to ZBR, workload
* fluctuations or other factors. BFQ uses an ad hoc internal scheduler,
* called B-WF2Q+, to schedule processes according to their budgets. More
* precisely, BFQ schedules queues associated to processes. Thanks to the
* accurate policy of B-WF2Q+, BFQ can afford to assign high budgets to
* I/O-bound processes issuing sequential requests (to boost the
* throughput), and yet guarantee a low latency to interactive and soft
* real-time applications.
*
* BFQ is described in [1], where also a reference to the initial, more
* theoretical paper on BFQ can be found. The interested reader can find
* in the latter paper full details on the main algorithm, as well as
* formulas of the guarantees and formal proofs of all the properties.
* With respect to the version of BFQ presented in these papers, this
* implementation adds a few more heuristics, such as the one that
* guarantees a low latency to soft real-time applications, and a
* hierarchical extension based on H-WF2Q+.
*
* B-WF2Q+ is based on WF2Q+, that is described in [2], together with
* H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N)
* complexity derives from the one introduced with EEVDF in [3].
*
* [1] P. Valente and M. Andreolini, ``Improving Application Responsiveness
* with the BFQ Disk I/O Scheduler'',
* Proceedings of the 5th Annual International Systems and Storage
* Conference (SYSTOR '12), June 2012.
*
* http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf
*
* [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing
* Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689,
* Oct 1997.
*
* http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
*
* [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline
* First: A Flexible and Accurate Mechanism for Proportional Share
* Resource Allocation,'' technical report.
*
* http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/cgroup.h>
#include <linux/elevator.h>
#include <linux/jiffies.h>
#include <linux/rbtree.h>
#include <linux/ioprio.h>
#include "bfq.h"
#include "blk.h"
/* Max number of dispatches in one round of service. */
static const int bfq_quantum = 4;
/* Expiration time of sync (0) and async (1) requests, in jiffies. */
static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
/* Maximum backwards seek, in KiB. */
static const int bfq_back_max = 16 * 1024;
/* Penalty of a backwards seek, in number of sectors. */
static const int bfq_back_penalty = 2;
/* Idling period duration, in jiffies. */
static int bfq_slice_idle = HZ / 125;
/* Default maximum budget values, in sectors and number of requests. */
static const int bfq_default_max_budget = 16 * 1024;
static const int bfq_max_budget_async_rq = 4;
/*
* Async to sync throughput distribution is controlled as follows:
* when an async request is served, the entity is charged the number
* of sectors of the request, multiplied by the factor below
*/
static const int bfq_async_charge_factor = 10;
/* Default timeout values, in jiffies, approximating CFQ defaults. */
static const int bfq_timeout_sync = HZ / 8;
static int bfq_timeout_async = HZ / 25;
struct kmem_cache *bfq_pool;
/* Below this threshold (in ms), we consider thinktime immediate. */
#define BFQ_MIN_TT 2
/* hw_tag detection: parallel requests threshold and min samples needed. */
#define BFQ_HW_QUEUE_THRESHOLD 4
#define BFQ_HW_QUEUE_SAMPLES 32
#define BFQQ_SEEK_THR (sector_t)(8 * 1024)
#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > BFQQ_SEEK_THR)
/* Min samples used for peak rate estimation (for autotuning). */
#define BFQ_PEAK_RATE_SAMPLES 32
/* Shift used for peak rate fixed precision calculations. */
#define BFQ_RATE_SHIFT 16
/*
* By default, BFQ computes the duration of the weight raising for
* interactive applications automatically, using the following formula:
* duration = (R / r) * T, where r is the peak rate of the device, and
* R and T are two reference parameters.
* In particular, R is the peak rate of the reference device (see below),
* and T is a reference time: given the systems that are likely to be
* installed on the reference device according to its speed class, T is
* about the maximum time needed, under BFQ and while reading two files in
* parallel, to load typical large applications on these systems.
* In practice, the slower/faster the device at hand is, the more/less it
* takes to load applications with respect to the reference device.
* Accordingly, the longer/shorter BFQ grants weight raising to interactive
* applications.
*
* BFQ uses four different reference pairs (R, T), depending on:
* . whether the device is rotational or non-rotational;
* . whether the device is slow, such as old or portable HDDs, as well as
* SD cards, or fast, such as newer HDDs and SSDs.
*
* The device's speed class is dynamically (re)detected in
* bfq_update_peak_rate() every time the estimated peak rate is updated.
*
* In the following definitions, R_slow[0]/R_fast[0] and T_slow[0]/T_fast[0]
* are the reference values for a slow/fast rotational device, whereas
* R_slow[1]/R_fast[1] and T_slow[1]/T_fast[1] are the reference values for
* a slow/fast non-rotational device. Finally, device_speed_thresh are the
* thresholds used to switch between speed classes.
* Both the reference peak rates and the thresholds are measured in
* sectors/usec, left-shifted by BFQ_RATE_SHIFT.
*/
static int R_slow[2] = {1536, 10752};
static int R_fast[2] = {17415, 34791};
/*
* To improve readability, a conversion function is used to initialize the
* following arrays, which entails that they can be initialized only in a
* function.
*/
static int T_slow[2];
static int T_fast[2];
static int device_speed_thresh[2];
#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
{ RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
static inline void bfq_schedule_dispatch(struct bfq_data *bfqd);
#include "bfq-ioc.c"
#include "bfq-sched.c"
#include "bfq-cgroup.c"
#define bfq_class_idle(bfqq) ((bfqq)->entity.ioprio_class ==\
IOPRIO_CLASS_IDLE)
#define bfq_class_rt(bfqq) ((bfqq)->entity.ioprio_class ==\
IOPRIO_CLASS_RT)
#define bfq_sample_valid(samples) ((samples) > 80)
/*
* We regard a request as SYNC, if either it's a read or has the SYNC bit
* set (in which case it could also be a direct WRITE).
*/
static inline int bfq_bio_sync(struct bio *bio)
{
if (bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC))
return 1;
return 0;
}
/*
* Scheduler run of queue, if there are requests pending and no one in the
* driver that will restart queueing.
*/
static inline void bfq_schedule_dispatch(struct bfq_data *bfqd)
{
if (bfqd->queued != 0) {
bfq_log(bfqd, "schedule dispatch");
kblockd_schedule_work(bfqd->queue, &bfqd->unplug_work);
}
}
/*
* Lifted from AS - choose which of rq1 and rq2 that is best served now.
* We choose the request that is closesr to the head right now. Distance
* behind the head is penalized and only allowed to a certain extent.
*/
static struct request *bfq_choose_req(struct bfq_data *bfqd,
struct request *rq1,
struct request *rq2,
sector_t last)
{
sector_t s1, s2, d1 = 0, d2 = 0;
unsigned long back_max;
#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
unsigned wrap = 0; /* bit mask: requests behind the disk head? */
if (rq1 == NULL || rq1 == rq2)
return rq2;
if (rq2 == NULL)
return rq1;
if (rq_is_sync(rq1) && !rq_is_sync(rq2))
return rq1;
else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
return rq2;
if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
return rq1;
else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
return rq2;
s1 = blk_rq_pos(rq1);
s2 = blk_rq_pos(rq2);
/*
* By definition, 1KiB is 2 sectors.
*/
back_max = bfqd->bfq_back_max * 2;
/*
* Strict one way elevator _except_ in the case where we allow
* short backward seeks which are biased as twice the cost of a
* similar forward seek.
*/
if (s1 >= last)
d1 = s1 - last;
else if (s1 + back_max >= last)
d1 = (last - s1) * bfqd->bfq_back_penalty;
else
wrap |= BFQ_RQ1_WRAP;
if (s2 >= last)
d2 = s2 - last;
else if (s2 + back_max >= last)
d2 = (last - s2) * bfqd->bfq_back_penalty;
else
wrap |= BFQ_RQ2_WRAP;
/* Found required data */
/*
* By doing switch() on the bit mask "wrap" we avoid having to
* check two variables for all permutations: --> faster!
*/
switch (wrap) {
case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
if (d1 < d2)
return rq1;
else if (d2 < d1)
return rq2;
else {
if (s1 >= s2)
return rq1;
else
return rq2;
}
case BFQ_RQ2_WRAP:
return rq1;
case BFQ_RQ1_WRAP:
return rq2;
case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */
default:
/*
* Since both rqs are wrapped,
* start with the one that's further behind head
* (--> only *one* back seek required),
* since back seek takes more time than forward.
*/
if (s1 <= s2)
return rq1;
else
return rq2;
}
}
static struct bfq_queue *
bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
sector_t sector, struct rb_node **ret_parent,
struct rb_node ***rb_link)
{
struct rb_node **p, *parent;
struct bfq_queue *bfqq = NULL;
parent = NULL;
p = &root->rb_node;
while (*p) {
struct rb_node **n;
parent = *p;
bfqq = rb_entry(parent, struct bfq_queue, pos_node);
/*
* Sort strictly based on sector. Smallest to the left,
* largest to the right.
*/
if (sector > blk_rq_pos(bfqq->next_rq))
n = &(*p)->rb_right;
else if (sector < blk_rq_pos(bfqq->next_rq))
n = &(*p)->rb_left;
else
break;
p = n;
bfqq = NULL;
}
*ret_parent = parent;
if (rb_link)
*rb_link = p;
bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
(long long unsigned)sector,
bfqq != NULL ? bfqq->pid : 0);
return bfqq;
}
static void bfq_rq_pos_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
struct rb_node **p, *parent;
struct bfq_queue *__bfqq;
if (bfqq->pos_root != NULL) {
rb_erase(&bfqq->pos_node, bfqq->pos_root);
bfqq->pos_root = NULL;
}
if (bfq_class_idle(bfqq))
return;
if (!bfqq->next_rq)
return;
bfqq->pos_root = &bfqd->rq_pos_tree;
__bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
blk_rq_pos(bfqq->next_rq), &parent, &p);
if (__bfqq == NULL) {
rb_link_node(&bfqq->pos_node, parent, p);
rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
} else
bfqq->pos_root = NULL;
}
/*
* Tell whether there are active queues or groups with differentiated weights.
*/
static inline bool bfq_differentiated_weights(struct bfq_data *bfqd)
{
BUG_ON(!bfqd->hw_tag);
/*
* For weights to differ, at least one of the trees must contain
* at least two nodes.
*/
return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
(bfqd->queue_weights_tree.rb_node->rb_left ||
bfqd->queue_weights_tree.rb_node->rb_right)
#ifdef CONFIG_CGROUP_BFQIO
) ||
(!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
(bfqd->group_weights_tree.rb_node->rb_left ||
bfqd->group_weights_tree.rb_node->rb_right)
#endif
);
}
/*
* If the weight-counter tree passed as input contains no counter for
* the weight of the input entity, then add that counter; otherwise just
* increment the existing counter.
*
* Note that weight-counter trees contain few nodes in mostly symmetric
* scenarios. For example, if all queues have the same weight, then the
* weight-counter tree for the queues may contain at most one node.
* This holds even if low_latency is on, because weight-raised queues
* are not inserted in the tree.
* In most scenarios, the rate at which nodes are created/destroyed
* should be low too.
*/
static void bfq_weights_tree_add(struct bfq_data *bfqd,
struct bfq_entity *entity,
struct rb_root *root)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
/*
* Do not insert if:
* - the device does not support queueing;
* - the entity is already associated with a counter, which happens if:
* 1) the entity is associated with a queue, 2) a request arrival
* has caused the queue to become both non-weight-raised, and hence
* change its weight, and backlogged; in this respect, each
* of the two events causes an invocation of this function,
* 3) this is the invocation of this function caused by the second
* event. This second invocation is actually useless, and we handle
* this fact by exiting immediately. More efficient or clearer
* solutions might possibly be adopted.
*/
if (!bfqd->hw_tag || entity->weight_counter)
return;
while (*new) {
struct bfq_weight_counter *__counter = container_of(*new,
struct bfq_weight_counter,
weights_node);
parent = *new;
if (entity->weight == __counter->weight) {
entity->weight_counter = __counter;
goto inc_counter;
}
if (entity->weight < __counter->weight)
new = &((*new)->rb_left);
else
new = &((*new)->rb_right);
}
entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
GFP_ATOMIC);
entity->weight_counter->weight = entity->weight;
rb_link_node(&entity->weight_counter->weights_node, parent, new);
rb_insert_color(&entity->weight_counter->weights_node, root);
inc_counter:
entity->weight_counter->num_active++;
}
/*
* Decrement the weight counter associated with the entity, and, if the
* counter reaches 0, remove the counter from the tree.
* See the comments to the function bfq_weights_tree_add() for considerations
* about overhead.
*/
static void bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_entity *entity,
struct rb_root *root)
{
/*
* Check whether the entity is actually associated with a counter.
* In fact, the device may not be considered NCQ-capable for a while,
* which implies that no insertion in the weight trees is performed,
* after which the device may start to be deemed NCQ-capable, and hence
* this function may start to be invoked. This may cause the function
* to be invoked for entities that are not associated with any counter.
*/
if (!entity->weight_counter)
return;
BUG_ON(RB_EMPTY_ROOT(root));
BUG_ON(entity->weight_counter->weight != entity->weight);
BUG_ON(!entity->weight_counter->num_active);
entity->weight_counter->num_active--;
if (entity->weight_counter->num_active > 0)
goto reset_entity_pointer;
rb_erase(&entity->weight_counter->weights_node, root);
kfree(entity->weight_counter);
reset_entity_pointer:
entity->weight_counter = NULL;
}
static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
struct request *last)
{
struct rb_node *rbnext = rb_next(&last->rb_node);
struct rb_node *rbprev = rb_prev(&last->rb_node);
struct request *next = NULL, *prev = NULL;
BUG_ON(RB_EMPTY_NODE(&last->rb_node));
if (rbprev != NULL)
prev = rb_entry_rq(rbprev);
if (rbnext != NULL)
next = rb_entry_rq(rbnext);
else {
rbnext = rb_first(&bfqq->sort_list);
if (rbnext && rbnext != &last->rb_node)
next = rb_entry_rq(rbnext);
}
return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
}
/* see the definition of bfq_async_charge_factor for details */
static inline unsigned long bfq_serv_to_charge(struct request *rq,
struct bfq_queue *bfqq)
{
return blk_rq_sectors(rq) *
(1 + ((!bfq_bfqq_sync(bfqq)) * (bfqq->wr_coeff == 1) *
bfq_async_charge_factor));
}
/**
* bfq_updated_next_req - update the queue after a new next_rq selection.
* @bfqd: the device data the queue belongs to.
* @bfqq: the queue to update.
*
* If the first request of a queue changes we make sure that the queue
* has enough budget to serve at least its first request (if the
* request has grown). We do this because if the queue has not enough
* budget for its first request, it has to go through two dispatch
* rounds to actually get it dispatched.
*/
static void bfq_updated_next_req(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
struct bfq_entity *entity = &bfqq->entity;
struct bfq_service_tree *st = bfq_entity_service_tree(entity);
struct request *next_rq = bfqq->next_rq;
unsigned long new_budget;
if (next_rq == NULL)
return;
if (bfqq == bfqd->in_service_queue)
/*
* In order not to break guarantees, budgets cannot be
* changed after an entity has been selected.
*/
return;
BUG_ON(entity->tree != &st->active);
BUG_ON(entity == entity->sched_data->in_service_entity);
new_budget = max_t(unsigned long, bfqq->max_budget,
bfq_serv_to_charge(next_rq, bfqq));
if (entity->budget != new_budget) {
entity->budget = new_budget;
bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
new_budget);
bfq_activate_bfqq(bfqd, bfqq);
}
}
static inline unsigned int bfq_wr_duration(struct bfq_data *bfqd)
{
u64 dur;
if (bfqd->bfq_wr_max_time > 0)
return bfqd->bfq_wr_max_time;
dur = bfqd->RT_prod;
do_div(dur, bfqd->peak_rate);
return dur;
}
static inline unsigned
bfq_bfqq_cooperations(struct bfq_queue *bfqq)
{
return bfqq->bic ? bfqq->bic->cooperations : 0;
}
static inline void
bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
{
if (bic->saved_idle_window)
bfq_mark_bfqq_idle_window(bfqq);
else
bfq_clear_bfqq_idle_window(bfqq);
if (bic->saved_IO_bound)
bfq_mark_bfqq_IO_bound(bfqq);
else
bfq_clear_bfqq_IO_bound(bfqq);
/* Assuming that the flag in_large_burst is already correctly set */
if (bic->wr_time_left && bfqq->bfqd->low_latency &&
!bfq_bfqq_in_large_burst(bfqq) &&
bic->cooperations < bfqq->bfqd->bfq_coop_thresh) {
/*
* Start a weight raising period with the duration given by
* the raising_time_left snapshot.
*/
if (bfq_bfqq_busy(bfqq))
bfqq->bfqd->wr_busy_queues++;
bfqq->wr_coeff = bfqq->bfqd->bfq_wr_coeff;
bfqq->wr_cur_max_time = bic->wr_time_left;
bfqq->last_wr_start_finish = jiffies;
bfqq->entity.ioprio_changed = 1;
}
/*
* Clear wr_time_left to prevent bfq_bfqq_save_state() from
* getting confused about the queue's need of a weight-raising
* period.
*/
bic->wr_time_left = 0;
}
/* Must be called with the queue_lock held. */
static int bfqq_process_refs(struct bfq_queue *bfqq)
{
int process_refs, io_refs;
io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
BUG_ON(process_refs < 0);
return process_refs;
}
/* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */
static inline void bfq_reset_burst_list(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
struct bfq_queue *item;
struct hlist_node *n;
hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
hlist_del_init(&item->burst_list_node);
hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
bfqd->burst_size = 1;
}
/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
/* Increment burst size to take into account also bfqq */
bfqd->burst_size++;
if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
struct bfq_queue *pos, *bfqq_item;
struct hlist_node *n;
/*
* Enough queues have been activated shortly after each
* other to consider this burst as large.
*/
bfqd->large_burst = true;
/*
* We can now mark all queues in the burst list as
* belonging to a large burst.
*/
hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
burst_list_node)
bfq_mark_bfqq_in_large_burst(bfqq_item);
bfq_mark_bfqq_in_large_burst(bfqq);
/*
* From now on, and until the current burst finishes, any
* new queue being activated shortly after the last queue
* was inserted in the burst can be immediately marked as
* belonging to a large burst. So the burst list is not
* needed any more. Remove it.
*/
hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
burst_list_node)
hlist_del_init(&pos->burst_list_node);
} else /* burst not yet large: add bfqq to the burst list */
hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
}
/*
* If many queues happen to become active shortly after each other, then,
* to help the processes associated to these queues get their job done as
* soon as possible, it is usually better to not grant either weight-raising
* or device idling to these queues. In this comment we describe, firstly,
* the reasons why this fact holds, and, secondly, the next function, which
* implements the main steps needed to properly mark these queues so that
* they can then be treated in a different way.
*
* As for the terminology, we say that a queue becomes active, i.e.,
* switches from idle to backlogged, either when it is created (as a
* consequence of the arrival of an I/O request), or, if already existing,
* when a new request for the queue arrives while the queue is idle.
* Bursts of activations, i.e., activations of different queues occurring
* shortly after each other, are typically caused by services or applications
* that spawn or reactivate many parallel threads/processes. Examples are
* systemd during boot or git grep.
*
* These services or applications benefit mostly from a high throughput:
* the quicker the requests of the activated queues are cumulatively served,
* the sooner the target job of these queues gets completed. As a consequence,
* weight-raising any of these queues, which also implies idling the device
* for it, is almost always counterproductive: in most cases it just lowers
* throughput.
*
* On the other hand, a burst of activations may be also caused by the start
* of an application that does not consist in a lot of parallel I/O-bound
* threads. In fact, with a complex application, the burst may be just a
* consequence of the fact that several processes need to be executed to
* start-up the application. To start an application as quickly as possible,
* the best thing to do is to privilege the I/O related to the application
* with respect to all other I/O. Therefore, the best strategy to start as
* quickly as possible an application that causes a burst of activations is
* to weight-raise all the queues activated during the burst. This is the
* exact opposite of the best strategy for the other type of bursts.
*
* In the end, to take the best action for each of the two cases, the two
* types of bursts need to be distinguished. Fortunately, this seems
* relatively easy to do, by looking at the sizes of the bursts. In
* particular, we found a threshold such that bursts with a larger size
* than that threshold are apparently caused only by services or commands
* such as systemd or git grep. For brevity, hereafter we call just 'large'
* these bursts. BFQ *does not* weight-raise queues whose activations occur
* in a large burst. In addition, for each of these queues BFQ performs or
* does not perform idling depending on which choice boosts the throughput
* most. The exact choice depends on the device and request pattern at
* hand.
*
* Turning back to the next function, it implements all the steps needed
* to detect the occurrence of a large burst and to properly mark all the
* queues belonging to it (so that they can then be treated in a different
* way). This goal is achieved by maintaining a special "burst list" that
* holds, temporarily, the queues that belong to the burst in progress. The
* list is then used to mark these queues as belonging to a large burst if
* the burst does become large. The main steps are the following.
*
* . when the very first queue is activated, the queue is inserted into the
* list (as it could be the first queue in a possible burst)
*
* . if the current burst has not yet become large, and a queue Q that does
* not yet belong to the burst is activated shortly after the last time
* at which a new queue entered the burst list, then the function appends
* Q to the burst list
*
* . if, as a consequence of the previous step, the burst size reaches
* the large-burst threshold, then
*
* . all the queues in the burst list are marked as belonging to a
* large burst
*
* . the burst list is deleted; in fact, the burst list already served
* its purpose (keeping temporarily track of the queues in a burst,
* so as to be able to mark them as belonging to a large burst in the
* previous sub-step), and now is not needed any more
*
* . the device enters a large-burst mode
*
* . if a queue Q that does not belong to the burst is activated while
* the device is in large-burst mode and shortly after the last time
* at which a queue either entered the burst list or was marked as
* belonging to the current large burst, then Q is immediately marked
* as belonging to a large burst.
*
* . if a queue Q that does not belong to the burst is activated a while
* later, i.e., not shortly after, than the last time at which a queue
* either entered the burst list or was marked as belonging to the
* current large burst, then the current burst is deemed as finished and:
*
* . the large-burst mode is reset if set
*
* . the burst list is emptied
*
* . Q is inserted in the burst list, as Q may be the first queue
* in a possible new burst (then the burst list contains just Q
* after this step).
*/
static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bool idle_for_long_time)
{
/*
* If bfqq happened to be activated in a burst, but has been idle
* for at least as long as an interactive queue, then we assume
* that, in the overall I/O initiated in the burst, the I/O
* associated to bfqq is finished. So bfqq does not need to be
* treated as a queue belonging to a burst anymore. Accordingly,
* we reset bfqq's in_large_burst flag if set, and remove bfqq
* from the burst list if it's there. We do not decrement instead
* burst_size, because the fact that bfqq does not need to belong
* to the burst list any more does not invalidate the fact that
* bfqq may have been activated during the current burst.
*/
if (idle_for_long_time) {
hlist_del_init(&bfqq->burst_list_node);
bfq_clear_bfqq_in_large_burst(bfqq);
}
/*
* If bfqq is already in the burst list or is part of a large
* burst, then there is nothing else to do.
*/
if (!hlist_unhashed(&bfqq->burst_list_node) ||
bfq_bfqq_in_large_burst(bfqq))
return;
/*
* If bfqq's activation happens late enough, then the current
* burst is finished, and related data structures must be reset.
*
* In this respect, consider the special case where bfqq is the very
* first queue being activated. In this case, last_ins_in_burst is
* not yet significant when we get here. But it is easy to verify
* that, whether or not the following condition is true, bfqq will
* end up being inserted into the burst list. In particular the
* list will happen to contain only bfqq. And this is exactly what
* has to happen, as bfqq may be the first queue in a possible
* burst.
*/
if (time_is_before_jiffies(bfqd->last_ins_in_burst +
bfqd->bfq_burst_interval)) {
bfqd->large_burst = false;
bfq_reset_burst_list(bfqd, bfqq);
return;
}
/*
* If we get here, then bfqq is being activated shortly after the
* last queue. So, if the current burst is also large, we can mark
* bfqq as belonging to this large burst immediately.
*/
if (bfqd->large_burst) {
bfq_mark_bfqq_in_large_burst(bfqq);
return;
}
/*
* If we get here, then a large-burst state has not yet been
* reached, but bfqq is being activated shortly after the last
* queue. Then we add bfqq to the burst.
*/
bfq_add_to_burst(bfqd, bfqq);
}
static void bfq_add_request(struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
struct bfq_entity *entity = &bfqq->entity;
struct bfq_data *bfqd = bfqq->bfqd;
struct request *next_rq, *prev;
unsigned long old_wr_coeff = bfqq->wr_coeff;
bool interactive = false;
bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
bfqq->queued[rq_is_sync(rq)]++;
bfqd->queued++;
elv_rb_add(&bfqq->sort_list, rq);
/*
* Check if this request is a better next-serve candidate.
*/
prev = bfqq->next_rq;
next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
BUG_ON(next_rq == NULL);
bfqq->next_rq = next_rq;
/*
* Adjust priority tree position, if next_rq changes.
*/
if (prev != bfqq->next_rq)
bfq_rq_pos_tree_add(bfqd, bfqq);
if (!bfq_bfqq_busy(bfqq)) {
bool soft_rt, coop_or_in_burst,
idle_for_long_time = time_is_before_jiffies(
bfqq->budget_timeout +
bfqd->bfq_wr_min_idle_time);
if (bfq_bfqq_sync(bfqq)) {
bool already_in_burst =
!hlist_unhashed(&bfqq->burst_list_node) ||
bfq_bfqq_in_large_burst(bfqq);
bfq_handle_burst(bfqd, bfqq, idle_for_long_time);
/*
* If bfqq was not already in the current burst,
* then, at this point, bfqq either has been
* added to the current burst or has caused the
* current burst to terminate. In particular, in
* the second case, bfqq has become the first
* queue in a possible new burst.
* In both cases last_ins_in_burst needs to be
* moved forward.
*/
if (!already_in_burst)
bfqd->last_ins_in_burst = jiffies;
}
coop_or_in_burst = bfq_bfqq_in_large_burst(bfqq) ||
bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh;
soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
!coop_or_in_burst &&
time_is_before_jiffies(bfqq->soft_rt_next_start);
interactive = !coop_or_in_burst && idle_for_long_time;
entity->budget = max_t(unsigned long, bfqq->max_budget,
bfq_serv_to_charge(next_rq, bfqq));
if (!bfq_bfqq_IO_bound(bfqq)) {
if (time_before(jiffies,
RQ_BIC(rq)->ttime.last_end_request +
bfqd->bfq_slice_idle)) {
bfqq->requests_within_timer++;
if (bfqq->requests_within_timer >=
bfqd->bfq_requests_within_timer)
bfq_mark_bfqq_IO_bound(bfqq);
} else
bfqq->requests_within_timer = 0;
}
if (!bfqd->low_latency)
goto add_bfqq_busy;
if (bfq_bfqq_just_split(bfqq))
goto set_ioprio_changed;
/*
* If the queue:
* - is not being boosted,
* - has been idle for enough time,
* - is not a sync queue or is linked to a bfq_io_cq (it is
* shared "for its nature" or it is not shared and its
* requests have not been redirected to a shared queue)
* start a weight-raising period.
*/
if (old_wr_coeff == 1 && (interactive || soft_rt) &&
(!bfq_bfqq_sync(bfqq) || bfqq->bic != NULL)) {
bfqq->wr_coeff = bfqd->bfq_wr_coeff;
if (interactive)
bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
else
bfqq->wr_cur_max_time =
bfqd->bfq_wr_rt_max_time;
bfq_log_bfqq(bfqd, bfqq,
"wrais starting at %lu, rais_max_time %u",
jiffies,
jiffies_to_msecs(bfqq->wr_cur_max_time));
} else if (old_wr_coeff > 1) {
if (interactive)
bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
else if (coop_or_in_burst ||
(bfqq->wr_cur_max_time ==
bfqd->bfq_wr_rt_max_time &&
!soft_rt)) {
bfqq->wr_coeff = 1;
bfq_log_bfqq(bfqd, bfqq,
"wrais ending at %lu, rais_max_time %u",
jiffies,
jiffies_to_msecs(bfqq->
wr_cur_max_time));
} else if (time_before(
bfqq->last_wr_start_finish +
bfqq->wr_cur_max_time,
jiffies +
bfqd->bfq_wr_rt_max_time) &&
soft_rt) {
/*
*
* The remaining weight-raising time is lower
* than bfqd->bfq_wr_rt_max_time, which means
* that the application is enjoying weight
* raising either because deemed soft-rt in
* the near past, or because deemed interactive
* a long ago.
* In both cases, resetting now the current
* remaining weight-raising time for the
* application to the weight-raising duration
* for soft rt applications would not cause any
* latency increase for the application (as the
* new duration would be higher than the
* remaining time).
*
* In addition, the application is now meeting
* the requirements for being deemed soft rt.
* In the end we can correctly and safely
* (re)charge the weight-raising duration for
* the application with the weight-raising
* duration for soft rt applications.
*
* In particular, doing this recharge now, i.e.,
* before the weight-raising period for the
* application finishes, reduces the probability
* of the following negative scenario:
* 1) the weight of a soft rt application is
* raised at startup (as for any newly
* created application),
* 2) since the application is not interactive,
* at a certain time weight-raising is
* stopped for the application,
* 3) at that time the application happens to
* still have pending requests, and hence
* is destined to not have a chance to be
* deemed soft rt before these requests are
* completed (see the comments to the
* function bfq_bfqq_softrt_next_start()
* for details on soft rt detection),
* 4) these pending requests experience a high
* latency because the application is not
* weight-raised while they are pending.
*/
bfqq->last_wr_start_finish = jiffies;
bfqq->wr_cur_max_time =
bfqd->bfq_wr_rt_max_time;
}
}
set_ioprio_changed:
if (old_wr_coeff != bfqq->wr_coeff)
entity->ioprio_changed = 1;
add_bfqq_busy:
bfqq->last_idle_bklogged = jiffies;
bfqq->service_from_backlogged = 0;
bfq_clear_bfqq_softrt_update(bfqq);
bfq_add_bfqq_busy(bfqd, bfqq);
} else {
if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
time_is_before_jiffies(
bfqq->last_wr_start_finish +
bfqd->bfq_wr_min_inter_arr_async)) {
bfqq->wr_coeff = bfqd->bfq_wr_coeff;
bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
bfqd->wr_busy_queues++;
entity->ioprio_changed = 1;
bfq_log_bfqq(bfqd, bfqq,
"non-idle wrais starting at %lu, rais_max_time %u",
jiffies,
jiffies_to_msecs(bfqq->wr_cur_max_time));
}
if (prev != bfqq->next_rq)
bfq_updated_next_req(bfqd, bfqq);
}
if (bfqd->low_latency &&
(old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
bfqq->last_wr_start_finish = jiffies;
}
static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
struct bio *bio)
{
struct task_struct *tsk = current;
struct bfq_io_cq *bic;
struct bfq_queue *bfqq;
bic = bfq_bic_lookup(bfqd, tsk->io_context);
if (bic == NULL)
return NULL;
bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
if (bfqq != NULL)
return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
return NULL;
}
static void bfq_activate_request(struct request_queue *q, struct request *rq)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
bfqd->rq_in_driver++;
bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
bfq_log(bfqd, "activate_request: new bfqd->last_position %llu",
(long long unsigned)bfqd->last_position);
}
static inline void bfq_deactivate_request(struct request_queue *q,
struct request *rq)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
BUG_ON(bfqd->rq_in_driver == 0);
bfqd->rq_in_driver--;
}
static void bfq_remove_request(struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
struct bfq_data *bfqd = bfqq->bfqd;
const int sync = rq_is_sync(rq);
if (bfqq->next_rq == rq) {
bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
bfq_updated_next_req(bfqd, bfqq);
}
list_del_init(&rq->queuelist);
BUG_ON(bfqq->queued[sync] == 0);
bfqq->queued[sync]--;
bfqd->queued--;
elv_rb_del(&bfqq->sort_list, rq);
if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue)
bfq_del_bfqq_busy(bfqd, bfqq, 1);
/*
* Remove queue from request-position tree as it is empty.
*/
if (bfqq->pos_root != NULL) {
rb_erase(&bfqq->pos_node, bfqq->pos_root);
bfqq->pos_root = NULL;
}
}
if (rq->cmd_flags & REQ_META) {
BUG_ON(bfqq->meta_pending == 0);
bfqq->meta_pending--;
}
}
static int bfq_merge(struct request_queue *q, struct request **req,
struct bio *bio)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
struct request *__rq;
__rq = bfq_find_rq_fmerge(bfqd, bio);
if (__rq != NULL && elv_rq_merge_ok(__rq, bio)) {
*req = __rq;
return ELEVATOR_FRONT_MERGE;
}
return ELEVATOR_NO_MERGE;
}
static void bfq_merged_request(struct request_queue *q, struct request *req,
int type)
{
if (type == ELEVATOR_FRONT_MERGE &&
rb_prev(&req->rb_node) &&
blk_rq_pos(req) <
blk_rq_pos(container_of(rb_prev(&req->rb_node),
struct request, rb_node))) {
struct bfq_queue *bfqq = RQ_BFQQ(req);
struct bfq_data *bfqd = bfqq->bfqd;
struct request *prev, *next_rq;
/* Reposition request in its sort_list */
elv_rb_del(&bfqq->sort_list, req);
elv_rb_add(&bfqq->sort_list, req);
/* Choose next request to be served for bfqq */
prev = bfqq->next_rq;
next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
bfqd->last_position);
BUG_ON(next_rq == NULL);
bfqq->next_rq = next_rq;
/*
* If next_rq changes, update both the queue's budget to
* fit the new request and the queue's position in its
* rq_pos_tree.
*/
if (prev != bfqq->next_rq) {
bfq_updated_next_req(bfqd, bfqq);
bfq_rq_pos_tree_add(bfqd, bfqq);
}
}
}
static void bfq_merged_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
/*
* Reposition in fifo if next is older than rq.
*/
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
list_move(&rq->queuelist, &next->queuelist);
rq_set_fifo_time(rq, rq_fifo_time(next));
}
if (bfqq->next_rq == next)
bfqq->next_rq = rq;
bfq_remove_request(next);
}
/* Must be called with bfqq != NULL */
static inline void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
{
BUG_ON(bfqq == NULL);
if (bfq_bfqq_busy(bfqq))
bfqq->bfqd->wr_busy_queues--;
bfqq->wr_coeff = 1;
bfqq->wr_cur_max_time = 0;
/* Trigger a weight change on the next activation of the queue */
bfqq->entity.ioprio_changed = 1;
}
static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
struct bfq_group *bfqg)
{
int i, j;
for (i = 0; i < 2; i++)
for (j = 0; j < IOPRIO_BE_NR; j++)
if (bfqg->async_bfqq[i][j] != NULL)
bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
if (bfqg->async_idle_bfqq != NULL)
bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
}
static void bfq_end_wr(struct bfq_data *bfqd)
{
struct bfq_queue *bfqq;
spin_lock_irq(bfqd->queue->queue_lock);
list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
bfq_bfqq_end_wr(bfqq);
list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
bfq_bfqq_end_wr(bfqq);
bfq_end_wr_async(bfqd);
spin_unlock_irq(bfqd->queue->queue_lock);
}
static inline sector_t bfq_io_struct_pos(void *io_struct, bool request)
{
if (request)
return blk_rq_pos(io_struct);
else
return ((struct bio *)io_struct)->bi_sector;
}
static inline sector_t bfq_dist_from(sector_t pos1,
sector_t pos2)
{
if (pos1 >= pos2)
return pos1 - pos2;
else
return pos2 - pos1;
}
static inline int bfq_rq_close_to_sector(void *io_struct, bool request,
sector_t sector)
{
return bfq_dist_from(bfq_io_struct_pos(io_struct, request), sector) <=
BFQQ_SEEK_THR;
}
static struct bfq_queue *bfqq_close(struct bfq_data *bfqd, sector_t sector)
{
struct rb_root *root = &bfqd->rq_pos_tree;
struct rb_node *parent, *node;
struct bfq_queue *__bfqq;
if (RB_EMPTY_ROOT(root))
return NULL;
/*
* First, if we find a request starting at the end of the last
* request, choose it.
*/
__bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
if (__bfqq != NULL)
return __bfqq;
/*
* If the exact sector wasn't found, the parent of the NULL leaf
* will contain the closest sector (rq_pos_tree sorted by
* next_request position).
*/
__bfqq = rb_entry(parent, struct bfq_queue, pos_node);
if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
return __bfqq;
if (blk_rq_pos(__bfqq->next_rq) < sector)
node = rb_next(&__bfqq->pos_node);
else
node = rb_prev(&__bfqq->pos_node);
if (node == NULL)
return NULL;
__bfqq = rb_entry(node, struct bfq_queue, pos_node);
if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
return __bfqq;
return NULL;
}
/*
* bfqd - obvious
* cur_bfqq - passed in so that we don't decide that the current queue
* is closely cooperating with itself
* sector - used as a reference point to search for a close queue
*/
static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
struct bfq_queue *cur_bfqq,
sector_t sector)
{
struct bfq_queue *bfqq;
if (bfq_class_idle(cur_bfqq))
return NULL;
if (!bfq_bfqq_sync(cur_bfqq))
return NULL;
if (BFQQ_SEEKY(cur_bfqq))
return NULL;
/* If device has only one backlogged bfq_queue, don't search. */
if (bfqd->busy_queues == 1)
return NULL;
/*
* We should notice if some of the queues are cooperating, e.g.
* working closely on the same area of the disk. In that case,
* we can group them together and don't waste time idling.
*/
bfqq = bfqq_close(bfqd, sector);
if (bfqq == NULL || bfqq == cur_bfqq)
return NULL;
/*
* Do not merge queues from different bfq_groups.
*/
if (bfqq->entity.parent != cur_bfqq->entity.parent)
return NULL;
/*
* It only makes sense to merge sync queues.
*/
if (!bfq_bfqq_sync(bfqq))
return NULL;
if (BFQQ_SEEKY(bfqq))
return NULL;
/*
* Do not merge queues of different priority classes.
*/
if (bfq_class_rt(bfqq) != bfq_class_rt(cur_bfqq))
return NULL;
return bfqq;
}
static struct bfq_queue *
bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
{
int process_refs, new_process_refs;
struct bfq_queue *__bfqq;
/*
* If there are no process references on the new_bfqq, then it is
* unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
* may have dropped their last reference (not just their last process
* reference).
*/
if (!bfqq_process_refs(new_bfqq))
return NULL;
/* Avoid a circular list and skip interim queue merges. */
while ((__bfqq = new_bfqq->new_bfqq)) {
if (__bfqq == bfqq)
return NULL;
new_bfqq = __bfqq;
}
process_refs = bfqq_process_refs(bfqq);
new_process_refs = bfqq_process_refs(new_bfqq);
/*
* If the process for the bfqq has gone away, there is no
* sense in merging the queues.
*/
if (process_refs == 0 || new_process_refs == 0)
return NULL;
bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
new_bfqq->pid);
/*
* Merging is just a redirection: the requests of the process
* owning one of the two queues are redirected to the other queue.
* The latter queue, in its turn, is set as shared if this is the
* first time that the requests of some process are redirected to
* it.
*
* We redirect bfqq to new_bfqq and not the opposite, because we
* are in the context of the process owning bfqq, hence we have
* the io_cq of this process. So we can immediately configure this
* io_cq to redirect the requests of the process to new_bfqq.
*
* NOTE, even if new_bfqq coincides with the in-service queue, the
* io_cq of new_bfqq is not available, because, if the in-service
* queue is shared, bfqd->in_service_bic may not point to the
* io_cq of the in-service queue.
* Redirecting the requests of the process owning bfqq to the
* currently in-service queue is in any case the best option, as
* we feed the in-service queue with new requests close to the
* last request served and, by doing so, hopefully increase the
* throughput.
*/
bfqq->new_bfqq = new_bfqq;
atomic_add(process_refs, &new_bfqq->ref);
return new_bfqq;
}
/*
* Attempt to schedule a merge of bfqq with the currently in-service queue
* or with a close queue among the scheduled queues.
* Return NULL if no merge was scheduled, a pointer to the shared bfq_queue
* structure otherwise.
*/
static struct bfq_queue *
bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
void *io_struct, bool request)
{
struct bfq_queue *in_service_bfqq, *new_bfqq;
if (bfqq->new_bfqq)
return bfqq->new_bfqq;
if (!io_struct)
return NULL;
in_service_bfqq = bfqd->in_service_queue;
if (in_service_bfqq == NULL || in_service_bfqq == bfqq ||
!bfqd->in_service_bic)
goto check_scheduled;
if (bfq_class_idle(in_service_bfqq) || bfq_class_idle(bfqq))
goto check_scheduled;
if (bfq_class_rt(in_service_bfqq) != bfq_class_rt(bfqq))
goto check_scheduled;
if (in_service_bfqq->entity.parent != bfqq->entity.parent)
goto check_scheduled;
if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
bfq_bfqq_sync(in_service_bfqq) && bfq_bfqq_sync(bfqq)) {
new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
if (new_bfqq != NULL)
return new_bfqq; /* Merge with in-service queue */
}
/*
* Check whether there is a cooperator among currently scheduled
* queues. The only thing we need is that the bio/request is not
* NULL, as we need it to establish whether a cooperator exists.
*/
check_scheduled:
new_bfqq = bfq_close_cooperator(bfqd, bfqq,
bfq_io_struct_pos(io_struct, request));
if (new_bfqq)
return bfq_setup_merge(bfqq, new_bfqq);
return NULL;
}
static inline void
bfq_bfqq_save_state(struct bfq_queue *bfqq)
{
/*
* If bfqq->bic == NULL, the queue is already shared or its requests
* have already been redirected to a shared queue; both idle window
* and weight raising state have already been saved. Do nothing.
*/
if (bfqq->bic == NULL)
return;
if (bfqq->bic->wr_time_left)
/*
* This is the queue of a just-started process, and would
* deserve weight raising: we set wr_time_left to the full
* weight-raising duration to trigger weight-raising when
* and if the queue is split and the first request of the
* queue is enqueued.
*/
bfqq->bic->wr_time_left = bfq_wr_duration(bfqq->bfqd);
else if (bfqq->wr_coeff > 1) {
unsigned long wr_duration =
jiffies - bfqq->last_wr_start_finish;
/*
* It may happen that a queue's weight raising period lasts
* longer than its wr_cur_max_time, as weight raising is
* handled only when a request is enqueued or dispatched (it
* does not use any timer). If the weight raising period is
* about to end, don't save it.
*/
if (bfqq->wr_cur_max_time <= wr_duration)
bfqq->bic->wr_time_left = 0;
else
bfqq->bic->wr_time_left =
bfqq->wr_cur_max_time - wr_duration;
/*
* The bfq_queue is becoming shared or the requests of the
* process owning the queue are being redirected to a shared
* queue. Stop the weight raising period of the queue, as in
* both cases it should not be owned by an interactive or
* soft real-time application.
*/
bfq_bfqq_end_wr(bfqq);
} else
bfqq->bic->wr_time_left = 0;
bfqq->bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
bfqq->bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
bfqq->bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
bfqq->bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
bfqq->bic->cooperations++;
bfqq->bic->failed_cooperations = 0;
}
static inline void
bfq_get_bic_reference(struct bfq_queue *bfqq)
{
/*
* If bfqq->bic has a non-NULL value, the bic to which it belongs
* is about to begin using a shared bfq_queue.
*/
if (bfqq->bic)
atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
}
static void
bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
{
bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
(long unsigned)new_bfqq->pid);
/* Save weight raising and idle window of the merged queues */
bfq_bfqq_save_state(bfqq);
bfq_bfqq_save_state(new_bfqq);
if (bfq_bfqq_IO_bound(bfqq))
bfq_mark_bfqq_IO_bound(new_bfqq);
bfq_clear_bfqq_IO_bound(bfqq);
/*
* Grab a reference to the bic, to prevent it from being destroyed
* before being possibly touched by a bfq_split_bfqq().
*/
bfq_get_bic_reference(bfqq);
bfq_get_bic_reference(new_bfqq);
/*
* Merge queues (that is, let bic redirect its requests to new_bfqq)
*/
bic_set_bfqq(bic, new_bfqq, 1);
bfq_mark_bfqq_coop(new_bfqq);
/*
* new_bfqq now belongs to at least two bics (it is a shared queue):
* set new_bfqq->bic to NULL. bfqq either:
* - does not belong to any bic any more, and hence bfqq->bic must
* be set to NULL, or
* - is a queue whose owning bics have already been redirected to a
* different queue, hence the queue is destined to not belong to
* any bic soon and bfqq->bic is already NULL (therefore the next
* assignment causes no harm).
*/
new_bfqq->bic = NULL;
bfqq->bic = NULL;
bfq_put_queue(bfqq);
}
static inline void bfq_bfqq_increase_failed_cooperations(struct bfq_queue *bfqq)
{
struct bfq_io_cq *bic = bfqq->bic;
struct bfq_data *bfqd = bfqq->bfqd;
if (bic && bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh) {
bic->failed_cooperations++;
if (bic->failed_cooperations >= bfqd->bfq_failed_cooperations)
bic->cooperations = 0;
}
}
static int bfq_allow_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
struct bfq_io_cq *bic;
struct bfq_queue *bfqq, *new_bfqq;
/*
* Disallow merge of a sync bio into an async request.
*/
if (bfq_bio_sync(bio) && !rq_is_sync(rq))
return 0;
/*
* Lookup the bfqq that this bio will be queued with. Allow
* merge only if rq is queued there.
* Queue lock is held here.
*/
bic = bfq_bic_lookup(bfqd, current->io_context);
if (bic == NULL)
return 0;
bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
/*
* We take advantage of this function to perform an early merge
* of the queues of possible cooperating processes.
*/
if (bfqq != NULL) {
new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
if (new_bfqq != NULL) {
bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
/*
* If we get here, the bio will be queued in the
* shared queue, i.e., new_bfqq, so use new_bfqq
* to decide whether bio and rq can be merged.
*/
bfqq = new_bfqq;
} else
bfq_bfqq_increase_failed_cooperations(bfqq);
}
return bfqq == RQ_BFQQ(rq);
}
static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
if (bfqq != NULL) {
bfq_mark_bfqq_must_alloc(bfqq);
bfq_mark_bfqq_budget_new(bfqq);
bfq_clear_bfqq_fifo_expire(bfqq);
bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
bfq_log_bfqq(bfqd, bfqq,
"set_in_service_queue, cur-budget = %lu",
bfqq->entity.budget);
}
bfqd->in_service_queue = bfqq;
}
/*
* Get and set a new queue for service.
*/
static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
{
struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
__bfq_set_in_service_queue(bfqd, bfqq);
return bfqq;
}
/*
* If enough samples have been computed, return the current max budget
* stored in bfqd, which is dynamically updated according to the
* estimated disk peak rate; otherwise return the default max budget
*/
static inline unsigned long bfq_max_budget(struct bfq_data *bfqd)
{
if (bfqd->budgets_assigned < 194)
return bfq_default_max_budget;
else
return bfqd->bfq_max_budget;
}
/*
* Return min budget, which is a fraction of the current or default
* max budget (trying with 1/32)
*/
static inline unsigned long bfq_min_budget(struct bfq_data *bfqd)
{
if (bfqd->budgets_assigned < 194)
return bfq_default_max_budget / 32;
else
return bfqd->bfq_max_budget / 32;
}
static void bfq_arm_slice_timer(struct bfq_data *bfqd)
{
struct bfq_queue *bfqq = bfqd->in_service_queue;
struct bfq_io_cq *bic;
unsigned long sl;
BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
/* Processes have exited, don't wait. */
bic = bfqd->in_service_bic;
if (bic == NULL || atomic_read(&bic->icq.ioc->active_ref) == 0)
return;
bfq_mark_bfqq_wait_request(bfqq);
/*
* We don't want to idle for seeks, but we do want to allow
* fair distribution of slice time for a process doing back-to-back
* seeks. So allow a little bit of time for him to submit a new rq.
*
* To prevent processes with (partly) seeky workloads from
* being too ill-treated, grant them a small fraction of the
* assigned budget before reducing the waiting time to
* BFQ_MIN_TT. This happened to help reduce latency.
*/
sl = bfqd->bfq_slice_idle;
/*
* Unless the queue is being weight-raised, grant only minimum idle
* time if the queue either has been seeky for long enough or has
* already proved to be constantly seeky.
*/
if (bfq_sample_valid(bfqq->seek_samples) &&
((BFQQ_SEEKY(bfqq) && bfqq->entity.service >
bfq_max_budget(bfqq->bfqd) / 8) ||
bfq_bfqq_constantly_seeky(bfqq)) && bfqq->wr_coeff == 1)
sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT));
else if (bfqq->wr_coeff > 1)
sl = sl * 3;
bfqd->last_idling_start = ktime_get();
mod_timer(&bfqd->idle_slice_timer, jiffies + sl);
bfq_log(bfqd, "arm idle: %u/%u ms",
jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle));
}
/*
* Set the maximum time for the in-service queue to consume its
* budget. This prevents seeky processes from lowering the disk
* throughput (always guaranteed with a time slice scheme as in CFQ).
*/
static void bfq_set_budget_timeout(struct bfq_data *bfqd)
{
struct bfq_queue *bfqq = bfqd->in_service_queue;
unsigned int timeout_coeff;
if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
timeout_coeff = 1;
else
timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
bfqd->last_budget_start = ktime_get();
bfq_clear_bfqq_budget_new(bfqq);
bfqq->budget_timeout = jiffies +
bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff;
bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] *
timeout_coeff));
}
/*
* Move request from internal lists to the request queue dispatch list.
*/
static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
struct bfq_queue *bfqq = RQ_BFQQ(rq);
/*
* For consistency, the next instruction should have been executed
* after removing the request from the queue and dispatching it.
* We execute instead this instruction before bfq_remove_request()
* (and hence introduce a temporary inconsistency), for efficiency.
* In fact, in a forced_dispatch, this prevents two counters related
* to bfqq->dispatched to risk to be uselessly decremented if bfqq
* is not in service, and then to be incremented again after
* incrementing bfqq->dispatched.
*/
bfqq->dispatched++;
bfq_remove_request(rq);
elv_dispatch_sort(q, rq);
if (bfq_bfqq_sync(bfqq))
bfqd->sync_flight++;
}
/*
* Return expired entry, or NULL to just start from scratch in rbtree.
*/
static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
{
struct request *rq = NULL;
if (bfq_bfqq_fifo_expire(bfqq))
return NULL;
bfq_mark_bfqq_fifo_expire(bfqq);
if (list_empty(&bfqq->fifo))
return NULL;
rq = rq_entry_fifo(bfqq->fifo.next);
if (time_before(jiffies, rq_fifo_time(rq)))
return NULL;
return rq;
}
static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)
{
struct bfq_entity *entity = &bfqq->entity;
return entity->budget - entity->service;
}
static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
BUG_ON(bfqq != bfqd->in_service_queue);
__bfq_bfqd_reset_in_service(bfqd);
/*
* If this bfqq is shared between multiple processes, check
* to make sure that those processes are still issuing I/Os
* within the mean seek distance. If not, it may be time to
* break the queues apart again.
*/
if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
bfq_mark_bfqq_split_coop(bfqq);
if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
/*
* Overloading budget_timeout field to store the time
* at which the queue remains with no backlog; used by
* the weight-raising mechanism.
*/
bfqq->budget_timeout = jiffies;
bfq_del_bfqq_busy(bfqd, bfqq, 1);
} else {
bfq_activate_bfqq(bfqd, bfqq);
/*
* Resort priority tree of potential close cooperators.
*/
bfq_rq_pos_tree_add(bfqd, bfqq);
}
}
/**
* __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
* @bfqd: device data.
* @bfqq: queue to update.
* @reason: reason for expiration.
*
* Handle the feedback on @bfqq budget. See the body for detailed
* comments.
*/
static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
enum bfqq_expiration reason)
{
struct request *next_rq;
unsigned long budget, min_budget;
budget = bfqq->max_budget;
min_budget = bfq_min_budget(bfqd);
BUG_ON(bfqq != bfqd->in_service_queue);
bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %lu, budg left %lu",
bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %lu, min budg %lu",
budget, bfq_min_budget(bfqd));
bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
if (bfq_bfqq_sync(bfqq)) {
switch (reason) {
/*
* Caveat: in all the following cases we trade latency
* for throughput.
*/
case BFQ_BFQQ_TOO_IDLE:
/*
* This is the only case where we may reduce
* the budget: if there is no request of the
* process still waiting for completion, then
* we assume (tentatively) that the timer has
* expired because the batch of requests of
* the process could have been served with a
* smaller budget. Hence, betting that
* process will behave in the same way when it
* becomes backlogged again, we reduce its
* next budget. As long as we guess right,
* this budget cut reduces the latency
* experienced by the process.
*
* However, if there are still outstanding
* requests, then the process may have not yet
* issued its next request just because it is
* still waiting for the completion of some of
* the still outstanding ones. So in this
* subcase we do not reduce its budget, on the
* contrary we increase it to possibly boost
* the throughput, as discussed in the
* comments to the BUDGET_TIMEOUT case.
*/
if (bfqq->dispatched > 0) /* still outstanding reqs */
budget = min(budget * 2, bfqd->bfq_max_budget);
else {
if (budget > 5 * min_budget)
budget -= 4 * min_budget;
else
budget = min_budget;
}
break;
case BFQ_BFQQ_BUDGET_TIMEOUT:
/*
* We double the budget here because: 1) it
* gives the chance to boost the throughput if
* this is not a seeky process (which may have
* bumped into this timeout because of, e.g.,
* ZBR), 2) together with charge_full_budget
* it helps give seeky processes higher
* timestamps, and hence be served less
* frequently.
*/
budget = min(budget * 2, bfqd->bfq_max_budget);
break;
case BFQ_BFQQ_BUDGET_EXHAUSTED:
/*
* The process still has backlog, and did not
* let either the budget timeout or the disk
* idling timeout expire. Hence it is not
* seeky, has a short thinktime and may be
* happy with a higher budget too. So
* definitely increase the budget of this good
* candidate to boost the disk throughput.
*/
budget = min(budget * 4, bfqd->bfq_max_budget);
break;
case BFQ_BFQQ_NO_MORE_REQUESTS:
/*
* Leave the budget unchanged.
*/
default:
return;
}
} else /* async queue */
/* async queues get always the maximum possible budget
* (their ability to dispatch is limited by
* @bfqd->bfq_max_budget_async_rq).
*/
budget = bfqd->bfq_max_budget;
bfqq->max_budget = budget;
if (bfqd->budgets_assigned >= 194 && bfqd->bfq_user_max_budget == 0 &&
bfqq->max_budget > bfqd->bfq_max_budget)
bfqq->max_budget = bfqd->bfq_max_budget;
/*
* Make sure that we have enough budget for the next request.
* Since the finish time of the bfqq must be kept in sync with
* the budget, be sure to call __bfq_bfqq_expire() after the
* update.
*/
next_rq = bfqq->next_rq;
if (next_rq != NULL)
bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
bfq_serv_to_charge(next_rq, bfqq));
else
bfqq->entity.budget = bfqq->max_budget;
bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %lu",
next_rq != NULL ? blk_rq_sectors(next_rq) : 0,
bfqq->entity.budget);
}
static unsigned long bfq_calc_max_budget(u64 peak_rate, u64 timeout)
{
unsigned long max_budget;
/*
* The max_budget calculated when autotuning is equal to the
* amount of sectors transfered in timeout_sync at the
* estimated peak rate.
*/
max_budget = (unsigned long)(peak_rate * 1000 *
timeout >> BFQ_RATE_SHIFT);
return max_budget;
}
/*
* In addition to updating the peak rate, checks whether the process
* is "slow", and returns 1 if so. This slow flag is used, in addition
* to the budget timeout, to reduce the amount of service provided to
* seeky processes, and hence reduce their chances to lower the
* throughput. See the code for more details.
*/
static int bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
int compensate, enum bfqq_expiration reason)
{
u64 bw, usecs, expected, timeout;
ktime_t delta;
int update = 0;
if (!bfq_bfqq_sync(bfqq) || bfq_bfqq_budget_new(bfqq))
return 0;
if (compensate)
delta = bfqd->last_idling_start;
else
delta = ktime_get();
delta = ktime_sub(delta, bfqd->last_budget_start);
usecs = ktime_to_us(delta);
/* Don't trust short/unrealistic values. */
if (usecs < 100 || usecs >= LONG_MAX)
return 0;
/*
* Calculate the bandwidth for the last slice. We use a 64 bit
* value to store the peak rate, in sectors per usec in fixed
* point math. We do so to have enough precision in the estimate
* and to avoid overflows.
*/
bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT;
do_div(bw, (unsigned long)usecs);
timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
/*
* Use only long (> 20ms) intervals to filter out spikes for
* the peak rate estimation.
*/
if (usecs > 20000) {
if (bw > bfqd->peak_rate ||
(!BFQQ_SEEKY(bfqq) &&
reason == BFQ_BFQQ_BUDGET_TIMEOUT)) {
bfq_log(bfqd, "measured bw =%llu", bw);
/*
* To smooth oscillations use a low-pass filter with
* alpha=7/8, i.e.,
* new_rate = (7/8) * old_rate + (1/8) * bw
*/
do_div(bw, 8);
if (bw == 0)
return 0;
bfqd->peak_rate *= 7;
do_div(bfqd->peak_rate, 8);
bfqd->peak_rate += bw;
update = 1;
bfq_log(bfqd, "new peak_rate=%llu", bfqd->peak_rate);
}
update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1;
if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES)
bfqd->peak_rate_samples++;
if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES &&
update) {
int dev_type = blk_queue_nonrot(bfqd->queue);
if (bfqd->bfq_user_max_budget == 0) {
bfqd->bfq_max_budget =
bfq_calc_max_budget(bfqd->peak_rate,
timeout);
bfq_log(bfqd, "new max_budget=%lu",
bfqd->bfq_max_budget);
}
if (bfqd->device_speed == BFQ_BFQD_FAST &&
bfqd->peak_rate < device_speed_thresh[dev_type]) {
bfqd->device_speed = BFQ_BFQD_SLOW;
bfqd->RT_prod = R_slow[dev_type] *
T_slow[dev_type];
} else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
bfqd->peak_rate > device_speed_thresh[dev_type]) {
bfqd->device_speed = BFQ_BFQD_FAST;
bfqd->RT_prod = R_fast[dev_type] *
T_fast[dev_type];
}
}
}
/*
* If the process has been served for a too short time
* interval to let its possible sequential accesses prevail on
* the initial seek time needed to move the disk head on the
* first sector it requested, then give the process a chance
* and for the moment return false.
*/
if (bfqq->entity.budget <= bfq_max_budget(bfqd) / 8)
return 0;
/*
* A process is considered ``slow'' (i.e., seeky, so that we
* cannot treat it fairly in the service domain, as it would
* slow down too much the other processes) if, when a slice
* ends for whatever reason, it has received service at a
* rate that would not be high enough to complete the budget
* before the budget timeout expiration.
*/
expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT;
/*
* Caveat: processes doing IO in the slower disk zones will
* tend to be slow(er) even if not seeky. And the estimated
* peak rate will actually be an average over the disk
* surface. Hence, to not be too harsh with unlucky processes,
* we keep a budget/3 margin of safety before declaring a
* process slow.
*/
return expected > (4 * bfqq->entity.budget) / 3;
}
/*
* To be deemed as soft real-time, an application must meet two
* requirements. First, the application must not require an average
* bandwidth higher than the approximate bandwidth required to playback or
* record a compressed high-definition video.
* The next function is invoked on the completion of the last request of a
* batch, to compute the next-start time instant, soft_rt_next_start, such
* that, if the next request of the application does not arrive before
* soft_rt_next_start, then the above requirement on the bandwidth is met.
*
* The second requirement is that the request pattern of the application is
* isochronous, i.e., that, after issuing a request or a batch of requests,
* the application stops issuing new requests until all its pending requests
* have been completed. After that, the application may issue a new batch,
* and so on.
* For this reason the next function is invoked to compute
* soft_rt_next_start only for applications that meet this requirement,
* whereas soft_rt_next_start is set to infinity for applications that do
* not.
*
* Unfortunately, even a greedy application may happen to behave in an
* isochronous way if the CPU load is high. In fact, the application may
* stop issuing requests while the CPUs are busy serving other processes,
* then restart, then stop again for a while, and so on. In addition, if
* the disk achieves a low enough throughput with the request pattern
* issued by the application (e.g., because the request pattern is random
* and/or the device is slow), then the application may meet the above
* bandwidth requirement too. To prevent such a greedy application to be
* deemed as soft real-time, a further rule is used in the computation of
* soft_rt_next_start: soft_rt_next_start must be higher than the current
* time plus the maximum time for which the arrival of a request is waited
* for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
* This filters out greedy applications, as the latter issue instead their
* next request as soon as possible after the last one has been completed
* (in contrast, when a batch of requests is completed, a soft real-time
* application spends some time processing data).
*
* Unfortunately, the last filter may easily generate false positives if
* only bfqd->bfq_slice_idle is used as a reference time interval and one
* or both the following cases occur:
* 1) HZ is so low that the duration of a jiffy is comparable to or higher
* than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
* HZ=100.
* 2) jiffies, instead of increasing at a constant rate, may stop increasing
* for a while, then suddenly 'jump' by several units to recover the lost
* increments. This seems to happen, e.g., inside virtual machines.
* To address this issue, we do not use as a reference time interval just
* bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
* particular we add the minimum number of jiffies for which the filter
* seems to be quite precise also in embedded systems and KVM/QEMU virtual
* machines.
*/
static inline unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
return max(bfqq->last_idle_bklogged +
HZ * bfqq->service_from_backlogged /
bfqd->bfq_wr_max_softrt_rate,
jiffies + bfqq->bfqd->bfq_slice_idle + 4);
}
/*
* Return the largest-possible time instant such that, for as long as possible,
* the current time will be lower than this time instant according to the macro
* time_is_before_jiffies().
*/
static inline unsigned long bfq_infinity_from_now(unsigned long now)
{
return now + ULONG_MAX / 2;
}
/**
* bfq_bfqq_expire - expire a queue.
* @bfqd: device owning the queue.
* @bfqq: the queue to expire.
* @compensate: if true, compensate for the time spent idling.
* @reason: the reason causing the expiration.
*
*
* If the process associated to the queue is slow (i.e., seeky), or in
* case of budget timeout, or, finally, if it is async, we
* artificially charge it an entire budget (independently of the
* actual service it received). As a consequence, the queue will get
* higher timestamps than the correct ones upon reactivation, and
* hence it will be rescheduled as if it had received more service
* than what it actually received. In the end, this class of processes
* will receive less service in proportion to how slowly they consume
* their budgets (and hence how seriously they tend to lower the
* throughput).
*
* In contrast, when a queue expires because it has been idling for
* too much or because it exhausted its budget, we do not touch the
* amount of service it has received. Hence when the queue will be
* reactivated and its timestamps updated, the latter will be in sync
* with the actual service received by the queue until expiration.
*
* Charging a full budget to the first type of queues and the exact
* service to the others has the effect of using the WF2Q+ policy to
* schedule the former on a timeslice basis, without violating the
* service domain guarantees of the latter.
*/
static void bfq_bfqq_expire(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
int compensate,
enum bfqq_expiration reason)
{
int slow;
BUG_ON(bfqq != bfqd->in_service_queue);
/* Update disk peak rate for autotuning and check whether the
* process is slow (see bfq_update_peak_rate).
*/
slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason);
/*
* As above explained, 'punish' slow (i.e., seeky), timed-out
* and async queues, to favor sequential sync workloads.
*
* Processes doing I/O in the slower disk zones will tend to be
* slow(er) even if not seeky. Hence, since the estimated peak
* rate is actually an average over the disk surface, these
* processes may timeout just for bad luck. To avoid punishing
* them we do not charge a full budget to a process that
* succeeded in consuming at least 2/3 of its budget.
*/
if (slow || (reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3))
bfq_bfqq_charge_full_budget(bfqq);
bfqq->service_from_backlogged += bfqq->entity.service;
if (BFQQ_SEEKY(bfqq) && reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
!bfq_bfqq_constantly_seeky(bfqq)) {
bfq_mark_bfqq_constantly_seeky(bfqq);
if (!blk_queue_nonrot(bfqd->queue))
bfqd->const_seeky_busy_in_flight_queues++;
}
if (reason == BFQ_BFQQ_TOO_IDLE &&
bfqq->entity.service <= 2 * bfqq->entity.budget / 10 )
bfq_clear_bfqq_IO_bound(bfqq);
if (bfqd->low_latency && bfqq->wr_coeff == 1)
bfqq->last_wr_start_finish = jiffies;
if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
RB_EMPTY_ROOT(&bfqq->sort_list)) {
/*
* If we get here, and there are no outstanding requests,
* then the request pattern is isochronous (see the comments
* to the function bfq_bfqq_softrt_next_start()). Hence we
* can compute soft_rt_next_start. If, instead, the queue
* still has outstanding requests, then we have to wait
* for the completion of all the outstanding requests to
* discover whether the request pattern is actually
* isochronous.
*/
if (bfqq->dispatched == 0)
bfqq->soft_rt_next_start =
bfq_bfqq_softrt_next_start(bfqd, bfqq);
else {
/*
* The application is still waiting for the
* completion of one or more requests:
* prevent it from possibly being incorrectly
* deemed as soft real-time by setting its
* soft_rt_next_start to infinity. In fact,
* without this assignment, the application
* would be incorrectly deemed as soft
* real-time if:
* 1) it issued a new request before the
* completion of all its in-flight
* requests, and
* 2) at that time, its soft_rt_next_start
* happened to be in the past.
*/
bfqq->soft_rt_next_start =
bfq_infinity_from_now(jiffies);
/*
* Schedule an update of soft_rt_next_start to when
* the task may be discovered to be isochronous.
*/
bfq_mark_bfqq_softrt_update(bfqq);
}
}
bfq_log_bfqq(bfqd, bfqq,
"expire (%d, slow %d, num_disp %d, idle_win %d)", reason,
slow, bfqq->dispatched, bfq_bfqq_idle_window(bfqq));
/*
* Increase, decrease or leave budget unchanged according to
* reason.
*/
__bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
__bfq_bfqq_expire(bfqd, bfqq);
}
/*
* Budget timeout is not implemented through a dedicated timer, but
* just checked on request arrivals and completions, as well as on
* idle timer expirations.
*/
static int bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
{
if (bfq_bfqq_budget_new(bfqq) ||
time_before(jiffies, bfqq->budget_timeout))
return 0;
return 1;
}
/*
* If we expire a queue that is waiting for the arrival of a new
* request, we may prevent the fictitious timestamp back-shifting that
* allows the guarantees of the queue to be preserved (see [1] for
* this tricky aspect). Hence we return true only if this condition
* does not hold, or if the queue is slow enough to deserve only to be
* kicked off for preserving a high throughput.
*/
static inline int bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
{
bfq_log_bfqq(bfqq->bfqd, bfqq,
"may_budget_timeout: wait_request %d left %d timeout %d",
bfq_bfqq_wait_request(bfqq),
bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
bfq_bfqq_budget_timeout(bfqq));
return (!bfq_bfqq_wait_request(bfqq) ||
bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
&&
bfq_bfqq_budget_timeout(bfqq);
}
/*
* Device idling is allowed only for the queues for which this function
* returns true. For this reason, the return value of this function plays a
* critical role for both throughput boosting and service guarantees. The
* return value is computed through a logical expression. In this rather
* long comment, we try to briefly describe all the details and motivations
* behind the components of this logical expression.
*
* First, the expression is false if bfqq is not sync, or if: bfqq happened
* to become active during a large burst of queue activations, and the
* pattern of requests bfqq contains boosts the throughput if bfqq is
* expired. In fact, queues that became active during a large burst benefit
* only from throughput, as discussed in the comments to bfq_handle_burst.
* In this respect, expiring bfqq certainly boosts the throughput on NCQ-
* capable flash-based devices, whereas, on rotational devices, it boosts
* the throughput only if bfqq contains random requests.
*
* On the opposite end, if (a) bfqq is sync, (b) the above burst-related
* condition does not hold, and (c) bfqq is being weight-raised, then the
* expression always evaluates to true, as device idling is instrumental
* for preserving low-latency guarantees (see [1]). If, instead, conditions
* (a) and (b) do hold, but (c) does not, then the expression evaluates to
* true only if: (1) bfqq is I/O-bound and has a non-null idle window, and
* (2) at least one of the following two conditions holds.
* The first condition is that the device is not performing NCQ, because
* idling the device most certainly boosts the throughput if this condition
* holds and bfqq is I/O-bound and has been granted a non-null idle window.
* The second compound condition is made of the logical AND of two components.
*
* The first component is true only if there is no weight-raised busy
* queue. This guarantees that the device is not idled for a sync non-
* weight-raised queue when there are busy weight-raised queues. The former
* is then expired immediately if empty. Combined with the timestamping
* rules of BFQ (see [1] for details), this causes sync non-weight-raised
* queues to get a lower number of requests served, and hence to ask for a
* lower number of requests from the request pool, before the busy weight-
* raised queues get served again.
*
* This is beneficial for the processes associated with weight-raised
* queues, when the request pool is saturated (e.g., in the presence of
* write hogs). In fact, if the processes associated with the other queues
* ask for requests at a lower rate, then weight-raised processes have a
* higher probability to get a request from the pool immediately (or at
* least soon) when they need one. Hence they have a higher probability to
* actually get a fraction of the disk throughput proportional to their
* high weight. This is especially true with NCQ-capable drives, which
* enqueue several requests in advance and further reorder internally-
* queued requests.
*
* In the end, mistreating non-weight-raised queues when there are busy
* weight-raised queues seems to mitigate starvation problems in the
* presence of heavy write workloads and NCQ, and hence to guarantee a
* higher application and system responsiveness in these hostile scenarios.
*
* If the first component of the compound condition is instead true, i.e.,
* there is no weight-raised busy queue, then the second component of the
* compound condition takes into account service-guarantee and throughput
* issues related to NCQ (recall that the compound condition is evaluated
* only if the device is detected as supporting NCQ).
*
* As for service guarantees, allowing the drive to enqueue more than one
* request at a time, and hence delegating de facto final scheduling
* decisions to the drive's internal scheduler, causes loss of control on
* the actual request service order. In this respect, when the drive is
* allowed to enqueue more than one request at a time, the service
* distribution enforced by the drive's internal scheduler is likely to
* coincide with the desired device-throughput distribution only in the
* following, perfectly symmetric, scenario:
* 1) all active queues have the same weight,
* 2) all active groups at the same level in the groups tree have the same
* weight,
* 3) all active groups at the same level in the groups tree have the same
* number of children.
*
* Even in such a scenario, sequential I/O may still receive a preferential
* treatment, but this is not likely to be a big issue with flash-based
* devices, because of their non-dramatic loss of throughput with random
* I/O. Things do differ with HDDs, for which additional care is taken, as
* explained after completing the discussion for flash-based devices.
*
* Unfortunately, keeping the necessary state for evaluating exactly the
* above symmetry conditions would be quite complex and time-consuming.
* Therefore BFQ evaluates instead the following stronger sub-conditions,
* for which it is much easier to maintain the needed state:
* 1) all active queues have the same weight,
* 2) all active groups have the same weight,
* 3) all active groups have at most one active child each.
* In particular, the last two conditions are always true if hierarchical
* support and the cgroups interface are not enabled, hence no state needs
* to be maintained in this case.
*
* According to the above considerations, the second component of the
* compound condition evaluates to true if any of the above symmetry
* sub-condition does not hold, or the device is not flash-based. Therefore,
* if also the first component is true, then idling is allowed for a sync
* queue. These are the only sub-conditions considered if the device is
* flash-based, as, for such a device, it is sensible to force idling only
* for service-guarantee issues. In fact, as for throughput, idling
* NCQ-capable flash-based devices would not boost the throughput even
* with sequential I/O; rather it would lower the throughput in proportion
* to how fast the device is. In the end, (only) if all the three
* sub-conditions hold and the device is flash-based, the compound
* condition evaluates to false and therefore no idling is performed.
*
* As already said, things change with a rotational device, where idling
* boosts the throughput with sequential I/O (even with NCQ). Hence, for
* such a device the second component of the compound condition evaluates
* to true also if the following additional sub-condition does not hold:
* the queue is constantly seeky. Unfortunately, this different behavior
* with respect to flash-based devices causes an additional asymmetry: if
* some sync queues enjoy idling and some other sync queues do not, then
* the latter get a low share of the device throughput, simply because the
* former get many requests served after being set as in service, whereas
* the latter do not. As a consequence, to guarantee the desired throughput
* distribution, on HDDs the compound expression evaluates to true (and
* hence device idling is performed) also if the following last symmetry
* condition does not hold: no other queue is benefiting from idling. Also
* this last condition is actually replaced with a simpler-to-maintain and
* stronger condition: there is no busy queue which is not constantly seeky
* (and hence may also benefit from idling).
*
* To sum up, when all the required symmetry and throughput-boosting
* sub-conditions hold, the second component of the compound condition
* evaluates to false, and hence no idling is performed. This helps to
* keep the drives' internal queues full on NCQ-capable devices, and hence
* to boost the throughput, without causing 'almost' any loss of service
* guarantees. The 'almost' follows from the fact that, if the internal
* queue of one such device is filled while all the sub-conditions hold,
* but at some point in time some sub-condition stops to hold, then it may
* become impossible to let requests be served in the new desired order
* until all the requests already queued in the device have been served.
*/
static inline bool bfq_bfqq_must_not_expire(struct bfq_queue *bfqq)
{
struct bfq_data *bfqd = bfqq->bfqd;
#ifdef CONFIG_CGROUP_BFQIO
#define symmetric_scenario (!bfqd->active_numerous_groups && \
!bfq_differentiated_weights(bfqd))
#else
#define symmetric_scenario (!bfq_differentiated_weights(bfqd))
#endif
#define cond_for_seeky_on_ncq_hdd (bfq_bfqq_constantly_seeky(bfqq) && \
bfqd->busy_in_flight_queues == \
bfqd->const_seeky_busy_in_flight_queues)
#define cond_for_expiring_in_burst (bfq_bfqq_in_large_burst(bfqq) && \
bfqd->hw_tag && \
(blk_queue_nonrot(bfqd->queue) || \
bfq_bfqq_constantly_seeky(bfqq)))
/*
* Condition for expiring a non-weight-raised queue (and hence not idling
* the device).
*/
#define cond_for_expiring_non_wr (bfqd->hw_tag && \
(bfqd->wr_busy_queues > 0 || \
(symmetric_scenario && \
(blk_queue_nonrot(bfqd->queue) || \
cond_for_seeky_on_ncq_hdd))))
return bfq_bfqq_sync(bfqq) &&
!cond_for_expiring_in_burst &&
(bfqq->wr_coeff > 1 ||
(bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_idle_window(bfqq) &&
!cond_for_expiring_non_wr)
);
}
/*
* If the in-service queue is empty but sync, and the function
* bfq_bfqq_must_not_expire returns true, then:
* 1) the queue must remain in service and cannot be expired, and
* 2) the disk must be idled to wait for the possible arrival of a new
* request for the queue.
* See the comments to the function bfq_bfqq_must_not_expire for the reasons
* why performing device idling is the best choice to boost the throughput
* and preserve service guarantees when bfq_bfqq_must_not_expire itself
* returns true.
*/
static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
{
struct bfq_data *bfqd = bfqq->bfqd;
return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 &&
bfq_bfqq_must_not_expire(bfqq);
}
/*
* Select a queue for service. If we have a current queue in service,
* check whether to continue servicing it, or retrieve and set a new one.
*/
static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
{
struct bfq_queue *bfqq;
struct request *next_rq;
enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
bfqq = bfqd->in_service_queue;
if (bfqq == NULL)
goto new_queue;
bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
if (bfq_may_expire_for_budg_timeout(bfqq) &&
!timer_pending(&bfqd->idle_slice_timer) &&
!bfq_bfqq_must_idle(bfqq))
goto expire;
next_rq = bfqq->next_rq;
/*
* If bfqq has requests queued and it has enough budget left to
* serve them, keep the queue, otherwise expire it.
*/
if (next_rq != NULL) {
if (bfq_serv_to_charge(next_rq, bfqq) >
bfq_bfqq_budget_left(bfqq)) {
reason = BFQ_BFQQ_BUDGET_EXHAUSTED;
goto expire;
} else {
/*
* The idle timer may be pending because we may
* not disable disk idling even when a new request
* arrives.
*/
if (timer_pending(&bfqd->idle_slice_timer)) {
/*
* If we get here: 1) at least a new request
* has arrived but we have not disabled the
* timer because the request was too small,
* 2) then the block layer has unplugged
* the device, causing the dispatch to be
* invoked.
*
* Since the device is unplugged, now the
* requests are probably large enough to
* provide a reasonable throughput.
* So we disable idling.
*/
bfq_clear_bfqq_wait_request(bfqq);
del_timer(&bfqd->idle_slice_timer);
}
goto keep_queue;
}
}
/*
* No requests pending. If the in-service queue still has requests
* in flight (possibly waiting for a completion) or is idling for a
* new request, then keep it.
*/
if (timer_pending(&bfqd->idle_slice_timer) ||
(bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq))) {
bfqq = NULL;
goto keep_queue;
}
reason = BFQ_BFQQ_NO_MORE_REQUESTS;
expire:
bfq_bfqq_expire(bfqd, bfqq, 0, reason);
new_queue:
bfqq = bfq_set_in_service_queue(bfqd);
bfq_log(bfqd, "select_queue: new queue %d returned",
bfqq != NULL ? bfqq->pid : 0);
keep_queue:
return bfqq;
}
static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
struct bfq_entity *entity = &bfqq->entity;
if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
bfq_log_bfqq(bfqd, bfqq,
"raising period dur %u/%u msec, old coeff %u, w %d(%d)",
jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
jiffies_to_msecs(bfqq->wr_cur_max_time),
bfqq->wr_coeff,
bfqq->entity.weight, bfqq->entity.orig_weight);
BUG_ON(bfqq != bfqd->in_service_queue && entity->weight !=
entity->orig_weight * bfqq->wr_coeff);
if (entity->ioprio_changed)
bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
/*
* If the queue was activated in a burst, or
* too much time has elapsed from the beginning
* of this weight-raising period, or the queue has
* exceeded the acceptable number of cooperations,
* then end weight raising.
*/
if (bfq_bfqq_in_large_burst(bfqq) ||
bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh ||
time_is_before_jiffies(bfqq->last_wr_start_finish +
bfqq->wr_cur_max_time)) {
bfqq->last_wr_start_finish = jiffies;
bfq_log_bfqq(bfqd, bfqq,
"wrais ending at %lu, rais_max_time %u",
bfqq->last_wr_start_finish,
jiffies_to_msecs(bfqq->wr_cur_max_time));
bfq_bfqq_end_wr(bfqq);
}
}
/* Update weight both if it must be raised and if it must be lowered */
if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
__bfq_entity_update_weight_prio(
bfq_entity_service_tree(entity),
entity);
}
/*
* Dispatch one request from bfqq, moving it to the request queue
* dispatch list.
*/
static int bfq_dispatch_request(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
int dispatched = 0;
struct request *rq;
unsigned long service_to_charge;
BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
/* Follow expired path, else get first next available. */
rq = bfq_check_fifo(bfqq);
if (rq == NULL)
rq = bfqq->next_rq;
service_to_charge = bfq_serv_to_charge(rq, bfqq);
if (service_to_charge > bfq_bfqq_budget_left(bfqq)) {
/*
* This may happen if the next rq is chosen in fifo order
* instead of sector order. The budget is properly
* dimensioned to be always sufficient to serve the next
* request only if it is chosen in sector order. The reason
* is that it would be quite inefficient and little useful
* to always make sure that the budget is large enough to
* serve even the possible next rq in fifo order.
* In fact, requests are seldom served in fifo order.
*
* Expire the queue for budget exhaustion, and make sure
* that the next act_budget is enough to serve the next
* request, even if it comes from the fifo expired path.
*/
bfqq->next_rq = rq;
/*
* Since this dispatch is failed, make sure that
* a new one will be performed
*/
if (!bfqd->rq_in_driver)
bfq_schedule_dispatch(bfqd);
goto expire;
}
/* Finally, insert request into driver dispatch list. */
bfq_bfqq_served(bfqq, service_to_charge);
bfq_dispatch_insert(bfqd->queue, rq);
bfq_update_wr_data(bfqd, bfqq);
bfq_log_bfqq(bfqd, bfqq,
"dispatched %u sec req (%llu), budg left %lu",
blk_rq_sectors(rq),
(long long unsigned)blk_rq_pos(rq),
bfq_bfqq_budget_left(bfqq));
dispatched++;
if (bfqd->in_service_bic == NULL) {
atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
bfqd->in_service_bic = RQ_BIC(rq);
}
if (bfqd->busy_queues > 1 && ((!bfq_bfqq_sync(bfqq) &&
dispatched >= bfqd->bfq_max_budget_async_rq) ||
bfq_class_idle(bfqq)))
goto expire;
return dispatched;
expire:
bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_EXHAUSTED);
return dispatched;
}
static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq)
{
int dispatched = 0;
while (bfqq->next_rq != NULL) {
bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq);
dispatched++;
}
BUG_ON(!list_empty(&bfqq->fifo));
return dispatched;
}
/*
* Drain our current requests.
* Used for barriers and when switching io schedulers on-the-fly.
*/
static int bfq_forced_dispatch(struct bfq_data *bfqd)
{
struct bfq_queue *bfqq, *n;
struct bfq_service_tree *st;
int dispatched = 0;
bfqq = bfqd->in_service_queue;
if (bfqq != NULL)
__bfq_bfqq_expire(bfqd, bfqq);
/*
* Loop through classes, and be careful to leave the scheduler
* in a consistent state, as feedback mechanisms and vtime
* updates cannot be disabled during the process.
*/
list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) {
st = bfq_entity_service_tree(&bfqq->entity);
dispatched += __bfq_forced_dispatch_bfqq(bfqq);
bfqq->max_budget = bfq_max_budget(bfqd);
bfq_forget_idle(st);
}
BUG_ON(bfqd->busy_queues != 0);
return dispatched;
}
static int bfq_dispatch_requests(struct request_queue *q, int force)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
struct bfq_queue *bfqq;
int max_dispatch;
bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
if (bfqd->busy_queues == 0)
return 0;
if (unlikely(force))
return bfq_forced_dispatch(bfqd);
bfqq = bfq_select_queue(bfqd);
if (bfqq == NULL)
return 0;
max_dispatch = bfqd->bfq_quantum;
if (bfq_class_idle(bfqq))
max_dispatch = 1;
if (!bfq_bfqq_sync(bfqq))
max_dispatch = bfqd->bfq_max_budget_async_rq;
if (bfqq->dispatched >= max_dispatch) {
if (bfqd->busy_queues > 1)
return 0;
if (bfqq->dispatched >= 4 * max_dispatch)
return 0;
}
if (bfqd->sync_flight != 0 && !bfq_bfqq_sync(bfqq))
return 0;
bfq_clear_bfqq_wait_request(bfqq);
BUG_ON(timer_pending(&bfqd->idle_slice_timer));
if (!bfq_dispatch_request(bfqd, bfqq))
return 0;
bfq_log_bfqq(bfqd, bfqq, "dispatched one request of %d (max_disp %d)",
bfqq->pid, max_dispatch);
return 1;
}
/*
* Task holds one reference to the queue, dropped when task exits. Each rq
* in-flight on this queue also holds a reference, dropped when rq is freed.
*
* Queue lock must be held here.
*/
static void bfq_put_queue(struct bfq_queue *bfqq)
{
struct bfq_data *bfqd = bfqq->bfqd;
BUG_ON(atomic_read(&bfqq->ref) <= 0);
bfq_log_bfqq(bfqd, bfqq, "put_queue: %p %d", bfqq,
atomic_read(&bfqq->ref));
if (!atomic_dec_and_test(&bfqq->ref))
return;
BUG_ON(rb_first(&bfqq->sort_list) != NULL);
BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0);
BUG_ON(bfqq->entity.tree != NULL);
BUG_ON(bfq_bfqq_busy(bfqq));
BUG_ON(bfqd->in_service_queue == bfqq);
if (bfq_bfqq_sync(bfqq))
/*
* The fact that this queue is being destroyed does not
* invalidate the fact that this queue may have been
* activated during the current burst. As a consequence,
* although the queue does not exist anymore, and hence
* needs to be removed from the burst list if there,
* the burst size has not to be decremented.
*/
hlist_del_init(&bfqq->burst_list_node);
bfq_log_bfqq(bfqd, bfqq, "put_queue: %p freed", bfqq);
kmem_cache_free(bfq_pool, bfqq);
}
static void bfq_put_cooperator(struct bfq_queue *bfqq)
{
struct bfq_queue *__bfqq, *next;
/*
* If this queue was scheduled to merge with another queue, be
* sure to drop the reference taken on that queue (and others in
* the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
*/
__bfqq = bfqq->new_bfqq;
while (__bfqq) {
if (__bfqq == bfqq)
break;
next = __bfqq->new_bfqq;
bfq_put_queue(__bfqq);
__bfqq = next;
}
}
static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
if (bfqq == bfqd->in_service_queue) {
__bfq_bfqq_expire(bfqd, bfqq);
bfq_schedule_dispatch(bfqd);
}
bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq,
atomic_read(&bfqq->ref));
bfq_put_cooperator(bfqq);
bfq_put_queue(bfqq);
}
static inline void bfq_init_icq(struct io_cq *icq)
{
struct bfq_io_cq *bic = icq_to_bic(icq);
bic->ttime.last_end_request = jiffies;
/*
* A newly created bic indicates that the process has just
* started doing I/O, and is probably mapping into memory its
* executable and libraries: it definitely needs weight raising.
* There is however the possibility that the process performs,
* for a while, I/O close to some other process. EQM intercepts
* this behavior and may merge the queue corresponding to the
* process with some other queue, BEFORE the weight of the queue
* is raised. Merged queues are not weight-raised (they are assumed
* to belong to processes that benefit only from high throughput).
* If the merge is basically the consequence of an accident, then
* the queue will be split soon and will get back its old weight.
* It is then important to write down somewhere that this queue
* does need weight raising, even if it did not make it to get its
* weight raised before being merged. To this purpose, we overload
* the field raising_time_left and assign 1 to it, to mark the queue
* as needing weight raising.
*/
bic->wr_time_left = 1;
}
static void bfq_exit_icq(struct io_cq *icq)
{
struct bfq_io_cq *bic = icq_to_bic(icq);
struct bfq_data *bfqd = bic_to_bfqd(bic);
if (bic->bfqq[BLK_RW_ASYNC]) {
bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_ASYNC]);
bic->bfqq[BLK_RW_ASYNC] = NULL;
}
if (bic->bfqq[BLK_RW_SYNC]) {
/*
* If the bic is using a shared queue, put the reference
* taken on the io_context when the bic started using a
* shared bfq_queue.
*/
if (bfq_bfqq_coop(bic->bfqq[BLK_RW_SYNC]))
put_io_context(icq->ioc);
bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]);
bic->bfqq[BLK_RW_SYNC] = NULL;
}
}
/*
* Update the entity prio values; note that the new values will not
* be used until the next (re)activation.
*/
static void bfq_init_prio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
{
struct task_struct *tsk = current;
int ioprio_class;
if (!bfq_bfqq_prio_changed(bfqq))
return;
ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
switch (ioprio_class) {
default:
dev_err(bfqq->bfqd->queue->backing_dev_info.dev,
"bfq: bad prio %x\n", ioprio_class);
case IOPRIO_CLASS_NONE:
/*
* No prio set, inherit CPU scheduling settings.
*/
bfqq->entity.new_ioprio = task_nice_ioprio(tsk);
bfqq->entity.new_ioprio_class = task_nice_ioclass(tsk);
break;
case IOPRIO_CLASS_RT:
bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
bfqq->entity.new_ioprio_class = IOPRIO_CLASS_RT;
break;
case IOPRIO_CLASS_BE:
bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE;
break;
case IOPRIO_CLASS_IDLE:
bfqq->entity.new_ioprio_class = IOPRIO_CLASS_IDLE;
bfqq->entity.new_ioprio = 7;
bfq_clear_bfqq_idle_window(bfqq);
break;
}
bfqq->entity.ioprio_changed = 1;
bfq_clear_bfqq_prio_changed(bfqq);
}
static void bfq_changed_ioprio(struct bfq_io_cq *bic)
{
struct bfq_data *bfqd;
struct bfq_queue *bfqq, *new_bfqq;
struct bfq_group *bfqg;
unsigned long uninitialized_var(flags);
int ioprio = bic->icq.ioc->ioprio;
bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data),
&flags);
/*
* This condition may trigger on a newly created bic, be sure to
* drop the lock before returning.
*/
if (unlikely(bfqd == NULL) || likely(bic->ioprio == ioprio))
goto out;
bfqq = bic->bfqq[BLK_RW_ASYNC];
if (bfqq != NULL) {
bfqg = container_of(bfqq->entity.sched_data, struct bfq_group,
sched_data);
new_bfqq = bfq_get_queue(bfqd, bfqg, BLK_RW_ASYNC, bic,
GFP_ATOMIC);
if (new_bfqq != NULL) {
bic->bfqq[BLK_RW_ASYNC] = new_bfqq;
bfq_log_bfqq(bfqd, bfqq,
"changed_ioprio: bfqq %p %d",
bfqq, atomic_read(&bfqq->ref));
bfq_put_queue(bfqq);
}
}
bfqq = bic->bfqq[BLK_RW_SYNC];
if (bfqq != NULL)
bfq_mark_bfqq_prio_changed(bfqq);
bic->ioprio = ioprio;
out:
bfq_put_bfqd_unlock(bfqd, &flags);
}
static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
pid_t pid, int is_sync)
{
RB_CLEAR_NODE(&bfqq->entity.rb_node);
INIT_LIST_HEAD(&bfqq->fifo);
INIT_HLIST_NODE(&bfqq->burst_list_node);
atomic_set(&bfqq->ref, 0);
bfqq->bfqd = bfqd;
bfq_mark_bfqq_prio_changed(bfqq);
if (is_sync) {
if (!bfq_class_idle(bfqq))
bfq_mark_bfqq_idle_window(bfqq);
bfq_mark_bfqq_sync(bfqq);
}
bfq_mark_bfqq_IO_bound(bfqq);
/* Tentative initial value to trade off between thr and lat */
bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
bfqq->pid = pid;
bfqq->wr_coeff = 1;
bfqq->last_wr_start_finish = 0;
/*
* Set to the value for which bfqq will not be deemed as
* soft rt when it becomes backlogged.
*/
bfqq->soft_rt_next_start = bfq_infinity_from_now(jiffies);
}
static struct bfq_queue *bfq_find_alloc_queue(struct bfq_data *bfqd,
struct bfq_group *bfqg,
int is_sync,
struct bfq_io_cq *bic,
gfp_t gfp_mask)
{
struct bfq_queue *bfqq, *new_bfqq = NULL;
retry:
/* bic always exists here */
bfqq = bic_to_bfqq(bic, is_sync);
/*
* Always try a new alloc if we fall back to the OOM bfqq
* originally, since it should just be a temporary situation.
*/
if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
bfqq = NULL;
if (new_bfqq != NULL) {
bfqq = new_bfqq;
new_bfqq = NULL;
} else if (gfp_mask & __GFP_WAIT) {
spin_unlock_irq(bfqd->queue->queue_lock);
new_bfqq = kmem_cache_alloc_node(bfq_pool,
gfp_mask | __GFP_ZERO,
bfqd->queue->node);
spin_lock_irq(bfqd->queue->queue_lock);
if (new_bfqq != NULL)
goto retry;
} else {
bfqq = kmem_cache_alloc_node(bfq_pool,
gfp_mask | __GFP_ZERO,
bfqd->queue->node);
}
if (bfqq != NULL) {
bfq_init_bfqq(bfqd, bfqq, current->pid, is_sync);
bfq_log_bfqq(bfqd, bfqq, "allocated");
} else {
bfqq = &bfqd->oom_bfqq;
bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
}
bfq_init_prio_data(bfqq, bic);
bfq_init_entity(&bfqq->entity, bfqg);
}
if (new_bfqq != NULL)
kmem_cache_free(bfq_pool, new_bfqq);
return bfqq;
}
static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
struct bfq_group *bfqg,
int ioprio_class, int ioprio)
{
switch (ioprio_class) {
case IOPRIO_CLASS_RT:
return &bfqg->async_bfqq[0][ioprio];
case IOPRIO_CLASS_NONE:
ioprio = IOPRIO_NORM;
/* fall through */
case IOPRIO_CLASS_BE:
return &bfqg->async_bfqq[1][ioprio];
case IOPRIO_CLASS_IDLE:
return &bfqg->async_idle_bfqq;
default:
BUG();
}
}
static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
struct bfq_group *bfqg, int is_sync,
struct bfq_io_cq *bic, gfp_t gfp_mask)
{
const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
struct bfq_queue **async_bfqq = NULL;
struct bfq_queue *bfqq = NULL;
if (!is_sync) {
async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
ioprio);
bfqq = *async_bfqq;
}
if (bfqq == NULL)
bfqq = bfq_find_alloc_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
/*
* Pin the queue now that it's allocated, scheduler exit will
* prune it.
*/
if (!is_sync && *async_bfqq == NULL) {
atomic_inc(&bfqq->ref);
bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
bfqq, atomic_read(&bfqq->ref));
*async_bfqq = bfqq;
}
atomic_inc(&bfqq->ref);
bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq,
atomic_read(&bfqq->ref));
return bfqq;
}
static void bfq_update_io_thinktime(struct bfq_data *bfqd,
struct bfq_io_cq *bic)
{
unsigned long elapsed = jiffies - bic->ttime.last_end_request;
unsigned long ttime = min(elapsed, 2UL * bfqd->bfq_slice_idle);
bic->ttime.ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8;
bic->ttime.ttime_total = (7*bic->ttime.ttime_total + 256*ttime) / 8;
bic->ttime.ttime_mean = (bic->ttime.ttime_total + 128) /
bic->ttime.ttime_samples;
}
static void bfq_update_io_seektime(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
struct request *rq)
{
sector_t sdist;
u64 total;
if (bfqq->last_request_pos < blk_rq_pos(rq))
sdist = blk_rq_pos(rq) - bfqq->last_request_pos;
else
sdist = bfqq->last_request_pos - blk_rq_pos(rq);
/*
* Don't allow the seek distance to get too large from the
* odd fragment, pagein, etc.
*/
if (bfqq->seek_samples == 0) /* first request, not really a seek */
sdist = 0;
else if (bfqq->seek_samples <= 60) /* second & third seek */
sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*1024);
else
sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*64);
bfqq->seek_samples = (7*bfqq->seek_samples + 256) / 8;
bfqq->seek_total = (7*bfqq->seek_total + (u64)256*sdist) / 8;
total = bfqq->seek_total + (bfqq->seek_samples/2);
do_div(total, bfqq->seek_samples);
bfqq->seek_mean = (sector_t)total;
bfq_log_bfqq(bfqd, bfqq, "dist=%llu mean=%llu", (u64)sdist,
(u64)bfqq->seek_mean);
}
/*
* Disable idle window if the process thinks too long or seeks so much that
* it doesn't matter.
*/
static void bfq_update_idle_window(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
struct bfq_io_cq *bic)
{
int enable_idle;
/* Don't idle for async or idle io prio class. */
if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
return;
/* Idle window just restored, statistics are meaningless. */
if (bfq_bfqq_just_split(bfqq))
return;
enable_idle = bfq_bfqq_idle_window(bfqq);
if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
bfqd->bfq_slice_idle == 0 ||
(bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
bfqq->wr_coeff == 1))
enable_idle = 0;
else if (bfq_sample_valid(bic->ttime.ttime_samples)) {
if (bic->ttime.ttime_mean > bfqd->bfq_slice_idle &&
bfqq->wr_coeff == 1)
enable_idle = 0;
else
enable_idle = 1;
}
bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
enable_idle);
if (enable_idle)
bfq_mark_bfqq_idle_window(bfqq);
else
bfq_clear_bfqq_idle_window(bfqq);
}
/*
* Called when a new fs request (rq) is added to bfqq. Check if there's
* something we should do about it.
*/
static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct request *rq)
{
struct bfq_io_cq *bic = RQ_BIC(rq);
if (rq->cmd_flags & REQ_META)
bfqq->meta_pending++;
bfq_update_io_thinktime(bfqd, bic);
bfq_update_io_seektime(bfqd, bfqq, rq);
if (!BFQQ_SEEKY(bfqq) && bfq_bfqq_constantly_seeky(bfqq)) {
bfq_clear_bfqq_constantly_seeky(bfqq);
if (!blk_queue_nonrot(bfqd->queue)) {
BUG_ON(!bfqd->const_seeky_busy_in_flight_queues);
bfqd->const_seeky_busy_in_flight_queues--;
}
}
if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
!BFQQ_SEEKY(bfqq))
bfq_update_idle_window(bfqd, bfqq, bic);
bfq_clear_bfqq_just_split(bfqq);
bfq_log_bfqq(bfqd, bfqq,
"rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq),
(long long unsigned)bfqq->seek_mean);
bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
int small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
blk_rq_sectors(rq) < 32;
int budget_timeout = bfq_bfqq_budget_timeout(bfqq);
/*
* There is just this request queued: if the request
* is small and the queue is not to be expired, then
* just exit.
*
* In this way, if the disk is being idled to wait for
* a new request from the in-service queue, we avoid
* unplugging the device and committing the disk to serve
* just a small request. On the contrary, we wait for
* the block layer to decide when to unplug the device:
* hopefully, new requests will be merged to this one
* quickly, then the device will be unplugged and
* larger requests will be dispatched.
*/
if (small_req && !budget_timeout)
return;
/*
* A large enough request arrived, or the queue is to
* be expired: in both cases disk idling is to be
* stopped, so clear wait_request flag and reset
* timer.
*/
bfq_clear_bfqq_wait_request(bfqq);
del_timer(&bfqd->idle_slice_timer);
/*
* The queue is not empty, because a new request just
* arrived. Hence we can safely expire the queue, in
* case of budget timeout, without risking that the
* timestamps of the queue are not updated correctly.
* See [1] for more details.
*/
if (budget_timeout)
bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
/*
* Let the request rip immediately, or let a new queue be
* selected if bfqq has just been expired.
*/
__blk_run_queue(bfqd->queue);
}
}
static void bfq_insert_request(struct request_queue *q, struct request *rq)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
assert_spin_locked(bfqd->queue->queue_lock);
/*
* An unplug may trigger a requeue of a request from the device
* driver: make sure we are in process context while trying to
* merge two bfq_queues.
*/
if (!in_interrupt()) {
new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
if (new_bfqq != NULL) {
if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
/*
* Release the request's reference to the old bfqq
* and make sure one is taken to the shared queue.
*/
new_bfqq->allocated[rq_data_dir(rq)]++;
bfqq->allocated[rq_data_dir(rq)]--;
atomic_inc(&new_bfqq->ref);
bfq_put_queue(bfqq);
if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
bfqq, new_bfqq);
rq->elv.priv[1] = new_bfqq;
bfqq = new_bfqq;
} else
bfq_bfqq_increase_failed_cooperations(bfqq);
}
bfq_init_prio_data(bfqq, RQ_BIC(rq));
bfq_add_request(rq);
/*
* Here a newly-created bfq_queue has already started a weight-raising
* period: clear raising_time_left to prevent bfq_bfqq_save_state()
* from assigning it a full weight-raising period. See the detailed
* comments about this field in bfq_init_icq().
*/
if (bfqq->bic != NULL)
bfqq->bic->wr_time_left = 0;
rq_set_fifo_time(rq, jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)]);
list_add_tail(&rq->queuelist, &bfqq->fifo);
bfq_rq_enqueued(bfqd, bfqq, rq);
}
static void bfq_update_hw_tag(struct bfq_data *bfqd)
{
bfqd->max_rq_in_driver = max(bfqd->max_rq_in_driver,
bfqd->rq_in_driver);
if (bfqd->hw_tag == 1)
return;
/*
* This sample is valid if the number of outstanding requests
* is large enough to allow a queueing behavior. Note that the
* sum is not exact, as it's not taking into account deactivated
* requests.
*/
if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
return;
if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
return;
bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
bfqd->max_rq_in_driver = 0;
bfqd->hw_tag_samples = 0;
}
static void bfq_completed_request(struct request_queue *q, struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
struct bfq_data *bfqd = bfqq->bfqd;
bool sync = bfq_bfqq_sync(bfqq);
bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left (%d)",
blk_rq_sectors(rq), sync);
bfq_update_hw_tag(bfqd);
BUG_ON(!bfqd->rq_in_driver);
BUG_ON(!bfqq->dispatched);
bfqd->rq_in_driver--;
bfqq->dispatched--;
if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
bfq_weights_tree_remove(bfqd, &bfqq->entity,
&bfqd->queue_weights_tree);
if (!blk_queue_nonrot(bfqd->queue)) {
BUG_ON(!bfqd->busy_in_flight_queues);
bfqd->busy_in_flight_queues--;
if (bfq_bfqq_constantly_seeky(bfqq)) {
BUG_ON(!bfqd->
const_seeky_busy_in_flight_queues);
bfqd->const_seeky_busy_in_flight_queues--;
}
}
}
if (sync) {
bfqd->sync_flight--;
RQ_BIC(rq)->ttime.last_end_request = jiffies;
}
/*
* If we are waiting to discover whether the request pattern of the
* task associated with the queue is actually isochronous, and
* both requisites for this condition to hold are satisfied, then
* compute soft_rt_next_start (see the comments to the function
* bfq_bfqq_softrt_next_start()).
*/
if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
RB_EMPTY_ROOT(&bfqq->sort_list))
bfqq->soft_rt_next_start =
bfq_bfqq_softrt_next_start(bfqd, bfqq);
/*
* If this is the in-service queue, check if it needs to be expired,
* or if we want to idle in case it has no pending requests.
*/
if (bfqd->in_service_queue == bfqq) {
if (bfq_bfqq_budget_new(bfqq))
bfq_set_budget_timeout(bfqd);
if (bfq_bfqq_must_idle(bfqq)) {
bfq_arm_slice_timer(bfqd);
goto out;
} else if (bfq_may_expire_for_budg_timeout(bfqq))
bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
(bfqq->dispatched == 0 ||
!bfq_bfqq_must_not_expire(bfqq)))
bfq_bfqq_expire(bfqd, bfqq, 0,
BFQ_BFQQ_NO_MORE_REQUESTS);
}
if (!bfqd->rq_in_driver)
bfq_schedule_dispatch(bfqd);
out:
return;
}
static inline int __bfq_may_queue(struct bfq_queue *bfqq)
{
if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) {
bfq_clear_bfqq_must_alloc(bfqq);
return ELV_MQUEUE_MUST;
}
return ELV_MQUEUE_MAY;
}
static int bfq_may_queue(struct request_queue *q, int rw)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
struct bfq_io_cq *bic;
struct bfq_queue *bfqq;
/*
* Don't force setup of a queue from here, as a call to may_queue
* does not necessarily imply that a request actually will be
* queued. So just lookup a possibly existing queue, or return
* 'may queue' if that fails.
*/
bic = bfq_bic_lookup(bfqd, tsk->io_context);
if (bic == NULL)
return ELV_MQUEUE_MAY;
bfqq = bic_to_bfqq(bic, rw_is_sync(rw));
if (bfqq != NULL) {
bfq_init_prio_data(bfqq, bic);
return __bfq_may_queue(bfqq);
}
return ELV_MQUEUE_MAY;
}
/*
* Queue lock held here.
*/
static void bfq_put_request(struct request *rq)
{
struct bfq_queue *bfqq = RQ_BFQQ(rq);
if (bfqq != NULL) {
const int rw = rq_data_dir(rq);
BUG_ON(!bfqq->allocated[rw]);
bfqq->allocated[rw]--;
rq->elv.priv[0] = NULL;
rq->elv.priv[1] = NULL;
bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
bfqq, atomic_read(&bfqq->ref));
bfq_put_queue(bfqq);
}
}
/*
* Returns NULL if a new bfqq should be allocated, or the old bfqq if this
* was the last process referring to said bfqq.
*/
static struct bfq_queue *
bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
{
bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
put_io_context(bic->icq.ioc);
if (bfqq_process_refs(bfqq) == 1) {
bfqq->pid = current->pid;
bfq_clear_bfqq_coop(bfqq);
bfq_clear_bfqq_split_coop(bfqq);
return bfqq;
}
bic_set_bfqq(bic, NULL, 1);
bfq_put_cooperator(bfqq);
bfq_put_queue(bfqq);
return NULL;
}
/*
* Allocate bfq data structures associated with this request.
*/
static int bfq_set_request(struct request_queue *q, struct request *rq,
struct bio *bio, gfp_t gfp_mask)
{
struct bfq_data *bfqd = q->elevator->elevator_data;
struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
const int rw = rq_data_dir(rq);
const int is_sync = rq_is_sync(rq);
struct bfq_queue *bfqq;
struct bfq_group *bfqg;
unsigned long flags;
bool split = false;
might_sleep_if(gfp_mask & __GFP_WAIT);
bfq_changed_ioprio(bic);
spin_lock_irqsave(q->queue_lock, flags);
if (bic == NULL)
goto queue_fail;
bfqg = bfq_bic_update_cgroup(bic);
new_queue:
bfqq = bic_to_bfqq(bic, is_sync);
if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
bic_set_bfqq(bic, bfqq, is_sync);
if (split && is_sync) {
if ((bic->was_in_burst_list && bfqd->large_burst) ||
bic->saved_in_large_burst)
bfq_mark_bfqq_in_large_burst(bfqq);
else {
bfq_clear_bfqq_in_large_burst(bfqq);
if (bic->was_in_burst_list)
hlist_add_head(&bfqq->burst_list_node,
&bfqd->burst_list);
}
}
} else {
/* If the queue was seeky for too long, break it apart. */
if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
bfqq = bfq_split_bfqq(bic, bfqq);
split = true;
if (!bfqq)
goto new_queue;
}
}
bfqq->allocated[rw]++;
atomic_inc(&bfqq->ref);
bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq,
atomic_read(&bfqq->ref));
rq->elv.priv[0] = bic;
rq->elv.priv[1] = bfqq;
/*
* If a bfq_queue has only one process reference, it is owned
* by only one bfq_io_cq: we can set the bic field of the
* bfq_queue to the address of that structure. Also, if the
* queue has just been split, mark a flag so that the
* information is available to the other scheduler hooks.
*/
if (bfqq_process_refs(bfqq) == 1) {
bfqq->bic = bic;
if (split) {
bfq_mark_bfqq_just_split(bfqq);
/*
* If the queue has just been split from a shared
* queue, restore the idle window and the possible
* weight raising period.
*/
bfq_bfqq_resume_state(bfqq, bic);
}
}
spin_unlock_irqrestore(q->queue_lock, flags);
return 0;
queue_fail:
bfq_schedule_dispatch(bfqd);
spin_unlock_irqrestore(q->queue_lock, flags);
return 1;
}
static void bfq_kick_queue(struct work_struct *work)
{
struct bfq_data *bfqd =
container_of(work, struct bfq_data, unplug_work);
struct request_queue *q = bfqd->queue;
spin_lock_irq(q->queue_lock);
__blk_run_queue(q);
spin_unlock_irq(q->queue_lock);
}
/*
* Handler of the expiration of the timer running if the in-service queue
* is idling inside its time slice.
*/
static void bfq_idle_slice_timer(unsigned long data)
{
struct bfq_data *bfqd = (struct bfq_data *)data;
struct bfq_queue *bfqq;
unsigned long flags;
enum bfqq_expiration reason;
spin_lock_irqsave(bfqd->queue->queue_lock, flags);
bfqq = bfqd->in_service_queue;
/*
* Theoretical race here: the in-service queue can be NULL or
* different from the queue that was idling if the timer handler
* spins on the queue_lock and a new request arrives for the
* current queue and there is a full dispatch cycle that changes
* the in-service queue. This can hardly happen, but in the worst
* case we just expire a queue too early.
*/
if (bfqq != NULL) {
bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
if (bfq_bfqq_budget_timeout(bfqq))
/*
* Also here the queue can be safely expired
* for budget timeout without wasting
* guarantees
*/
reason = BFQ_BFQQ_BUDGET_TIMEOUT;
else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
/*
* The queue may not be empty upon timer expiration,
* because we may not disable the timer when the
* first request of the in-service queue arrives
* during disk idling.
*/
reason = BFQ_BFQQ_TOO_IDLE;
else
goto schedule_dispatch;
bfq_bfqq_expire(bfqd, bfqq, 1, reason);
}
schedule_dispatch:
bfq_schedule_dispatch(bfqd);
spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
}
static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
{
del_timer_sync(&bfqd->idle_slice_timer);
cancel_work_sync(&bfqd->unplug_work);
}
static inline void __bfq_put_async_bfqq(struct bfq_data *bfqd,
struct bfq_queue **bfqq_ptr)
{
struct bfq_group *root_group = bfqd->root_group;
struct bfq_queue *bfqq = *bfqq_ptr;
bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
if (bfqq != NULL) {
bfq_bfqq_move(bfqd, bfqq, &bfqq->entity, root_group);
bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
bfqq, atomic_read(&bfqq->ref));
bfq_put_queue(bfqq);
*bfqq_ptr = NULL;
}
}
/*
* Release all the bfqg references to its async queues. If we are
* deallocating the group these queues may still contain requests, so
* we reparent them to the root cgroup (i.e., the only one that will
* exist for sure until all the requests on a device are gone).
*/
static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
{
int i, j;
for (i = 0; i < 2; i++)
for (j = 0; j < IOPRIO_BE_NR; j++)
__bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
__bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
}
static void bfq_exit_queue(struct elevator_queue *e)
{
struct bfq_data *bfqd = e->elevator_data;
struct request_queue *q = bfqd->queue;
struct bfq_queue *bfqq, *n;
bfq_shutdown_timer_wq(bfqd);
spin_lock_irq(q->queue_lock);
BUG_ON(bfqd->in_service_queue != NULL);
list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
bfq_deactivate_bfqq(bfqd, bfqq, 0);
bfq_disconnect_groups(bfqd);
spin_unlock_irq(q->queue_lock);
bfq_shutdown_timer_wq(bfqd);
synchronize_rcu();
BUG_ON(timer_pending(&bfqd->idle_slice_timer));
bfq_free_root_group(bfqd);
kfree(bfqd);
}
static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
{
struct bfq_group *bfqg;
struct bfq_data *bfqd;
struct elevator_queue *eq;
eq = elevator_alloc(q, e);
if (eq == NULL)
return -ENOMEM;
bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
if (bfqd == NULL) {
kobject_put(&eq->kobj);
return -ENOMEM;
}
eq->elevator_data = bfqd;
/*
* Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
* Grab a permanent reference to it, so that the normal code flow
* will not attempt to free it.
*/
bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, 1, 0);
atomic_inc(&bfqd->oom_bfqq.ref);
bfqd->queue = q;
spin_lock_irq(q->queue_lock);
q->elevator = eq;
spin_unlock_irq(q->queue_lock);
bfqg = bfq_alloc_root_group(bfqd, q->node);
if (bfqg == NULL) {
kfree(bfqd);
kobject_put(&eq->kobj);
return -ENOMEM;
}
bfqd->root_group = bfqg;
#ifdef CONFIG_CGROUP_BFQIO
bfqd->active_numerous_groups = 0;
#endif
init_timer(&bfqd->idle_slice_timer);
bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
bfqd->idle_slice_timer.data = (unsigned long)bfqd;
bfqd->rq_pos_tree = RB_ROOT;
bfqd->queue_weights_tree = RB_ROOT;
bfqd->group_weights_tree = RB_ROOT;
INIT_WORK(&bfqd->unplug_work, bfq_kick_queue);
INIT_LIST_HEAD(&bfqd->active_list);
INIT_LIST_HEAD(&bfqd->idle_list);
INIT_HLIST_HEAD(&bfqd->burst_list);
bfqd->hw_tag = -1;
bfqd->bfq_max_budget = bfq_default_max_budget;
bfqd->bfq_quantum = bfq_quantum;
bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
bfqd->bfq_back_max = bfq_back_max;
bfqd->bfq_back_penalty = bfq_back_penalty;
bfqd->bfq_slice_idle = bfq_slice_idle;
bfqd->bfq_class_idle_last_service = 0;
bfqd->bfq_max_budget_async_rq = bfq_max_budget_async_rq;
bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async;
bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync;
bfqd->bfq_coop_thresh = 2;
bfqd->bfq_failed_cooperations = 7000;
bfqd->bfq_requests_within_timer = 120;
bfqd->bfq_large_burst_thresh = 11;
bfqd->bfq_burst_interval = msecs_to_jiffies(500);
bfqd->low_latency = true;
bfqd->bfq_wr_coeff = 20;
bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
bfqd->bfq_wr_max_time = 0;
bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
bfqd->bfq_wr_max_softrt_rate = 7000; /*
* Approximate rate required
* to playback or record a
* high-definition compressed
* video.
*/
bfqd->wr_busy_queues = 0;
bfqd->busy_in_flight_queues = 0;
bfqd->const_seeky_busy_in_flight_queues = 0;
/*
* Begin by assuming, optimistically, that the device peak rate is
* equal to the highest reference rate.
*/
bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
T_fast[blk_queue_nonrot(bfqd->queue)];
bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)];
bfqd->device_speed = BFQ_BFQD_FAST;
return 0;
}
static void bfq_slab_kill(void)
{
if (bfq_pool != NULL)
kmem_cache_destroy(bfq_pool);
}
static int __init bfq_slab_setup(void)
{
bfq_pool = KMEM_CACHE(bfq_queue, 0);
if (bfq_pool == NULL)
return -ENOMEM;
return 0;
}
static ssize_t bfq_var_show(unsigned int var, char *page)
{
return sprintf(page, "%d\n", var);
}
static ssize_t bfq_var_store(unsigned long *var, const char *page,
size_t count)
{
unsigned long new_val;
int ret = kstrtoul(page, 10, &new_val);
if (ret == 0)
*var = new_val;
return count;
}
static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page)
{
struct bfq_data *bfqd = e->elevator_data;
return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ?
jiffies_to_msecs(bfqd->bfq_wr_max_time) :
jiffies_to_msecs(bfq_wr_duration(bfqd)));
}
static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
{
struct bfq_queue *bfqq;
struct bfq_data *bfqd = e->elevator_data;
ssize_t num_char = 0;
num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n",
bfqd->queued);
spin_lock_irq(bfqd->queue->queue_lock);
num_char += sprintf(page + num_char, "Active:\n");
list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
num_char += sprintf(page + num_char,
"pid%d: weight %hu, nr_queued %d %d, dur %d/%u\n",
bfqq->pid,
bfqq->entity.weight,
bfqq->queued[0],
bfqq->queued[1],
jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
jiffies_to_msecs(bfqq->wr_cur_max_time));
}
num_char += sprintf(page + num_char, "Idle:\n");
list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) {
num_char += sprintf(page + num_char,
"pid%d: weight %hu, dur %d/%u\n",
bfqq->pid,
bfqq->entity.weight,
jiffies_to_msecs(jiffies -
bfqq->last_wr_start_finish),
jiffies_to_msecs(bfqq->wr_cur_max_time));
}
spin_unlock_irq(bfqd->queue->queue_lock);
return num_char;
}
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
static ssize_t __FUNC(struct elevator_queue *e, char *page) \
{ \
struct bfq_data *bfqd = e->elevator_data; \
unsigned int __data = __VAR; \
if (__CONV) \
__data = jiffies_to_msecs(__data); \
return bfq_var_show(__data, (page)); \
}
SHOW_FUNCTION(bfq_quantum_show, bfqd->bfq_quantum, 0);
SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 1);
SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 1);
SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1);
SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
SHOW_FUNCTION(bfq_max_budget_async_rq_show,
bfqd->bfq_max_budget_async_rq, 0);
SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout[BLK_RW_SYNC], 1);
SHOW_FUNCTION(bfq_timeout_async_show, bfqd->bfq_timeout[BLK_RW_ASYNC], 1);
SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0);
SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1);
SHOW_FUNCTION(bfq_wr_min_idle_time_show, bfqd->bfq_wr_min_idle_time, 1);
SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async,
1);
SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
static ssize_t \
__FUNC(struct elevator_queue *e, const char *page, size_t count) \
{ \
struct bfq_data *bfqd = e->elevator_data; \
unsigned long uninitialized_var(__data); \
int ret = bfq_var_store(&__data, (page), count); \
if (__data < (MIN)) \
__data = (MIN); \
else if (__data > (MAX)) \
__data = (MAX); \
if (__CONV) \
*(__PTR) = msecs_to_jiffies(__data); \
else \
*(__PTR) = __data; \
return ret; \
}
STORE_FUNCTION(bfq_quantum_store, &bfqd->bfq_quantum, 1, INT_MAX, 0);
STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
INT_MAX, 1);
STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
INT_MAX, 1);
STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
INT_MAX, 0);
STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1);
STORE_FUNCTION(bfq_max_budget_async_rq_store, &bfqd->bfq_max_budget_async_rq,
1, INT_MAX, 0);
STORE_FUNCTION(bfq_timeout_async_store, &bfqd->bfq_timeout[BLK_RW_ASYNC], 0,
INT_MAX, 1);
STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0);
STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1);
STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX,
1);
STORE_FUNCTION(bfq_wr_min_idle_time_store, &bfqd->bfq_wr_min_idle_time, 0,
INT_MAX, 1);
STORE_FUNCTION(bfq_wr_min_inter_arr_async_store,
&bfqd->bfq_wr_min_inter_arr_async, 0, INT_MAX, 1);
STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0,
INT_MAX, 0);
#undef STORE_FUNCTION
/* do nothing for the moment */
static ssize_t bfq_weights_store(struct elevator_queue *e,
const char *page, size_t count)
{
return count;
}
static inline unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd)
{
u64 timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES)
return bfq_calc_max_budget(bfqd->peak_rate, timeout);
else
return bfq_default_max_budget;
}
static ssize_t bfq_max_budget_store(struct elevator_queue *e,
const char *page, size_t count)
{
struct bfq_data *bfqd = e->elevator_data;
unsigned long uninitialized_var(__data);
int ret = bfq_var_store(&__data, (page), count);
if (__data == 0)
bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
else {
if (__data > INT_MAX)
__data = INT_MAX;
bfqd->bfq_max_budget = __data;
}
bfqd->bfq_user_max_budget = __data;
return ret;
}
static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
const char *page, size_t count)
{
struct bfq_data *bfqd = e->elevator_data;
unsigned long uninitialized_var(__data);
int ret = bfq_var_store(&__data, (page), count);
if (__data < 1)
__data = 1;
else if (__data > INT_MAX)
__data = INT_MAX;
bfqd->bfq_timeout[BLK_RW_SYNC] = msecs_to_jiffies(__data);
if (bfqd->bfq_user_max_budget == 0)
bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
return ret;
}
static ssize_t bfq_low_latency_store(struct elevator_queue *e,
const char *page, size_t count)
{
struct bfq_data *bfqd = e->elevator_data;
unsigned long uninitialized_var(__data);
int ret = bfq_var_store(&__data, (page), count);
if (__data > 1)
__data = 1;
if (__data == 0 && bfqd->low_latency != 0)
bfq_end_wr(bfqd);
bfqd->low_latency = __data;
return ret;
}
#define BFQ_ATTR(name) \
__ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store)
static struct elv_fs_entry bfq_attrs[] = {
BFQ_ATTR(quantum),
BFQ_ATTR(fifo_expire_sync),
BFQ_ATTR(fifo_expire_async),
BFQ_ATTR(back_seek_max),
BFQ_ATTR(back_seek_penalty),
BFQ_ATTR(slice_idle),
BFQ_ATTR(max_budget),
BFQ_ATTR(max_budget_async_rq),
BFQ_ATTR(timeout_sync),
BFQ_ATTR(timeout_async),
BFQ_ATTR(low_latency),
BFQ_ATTR(wr_coeff),
BFQ_ATTR(wr_max_time),
BFQ_ATTR(wr_rt_max_time),
BFQ_ATTR(wr_min_idle_time),
BFQ_ATTR(wr_min_inter_arr_async),
BFQ_ATTR(wr_max_softrt_rate),
BFQ_ATTR(weights),
__ATTR_NULL
};
static struct elevator_type iosched_bfq = {
.ops = {
.elevator_merge_fn = bfq_merge,
.elevator_merged_fn = bfq_merged_request,
.elevator_merge_req_fn = bfq_merged_requests,
.elevator_allow_merge_fn = bfq_allow_merge,
.elevator_dispatch_fn = bfq_dispatch_requests,
.elevator_add_req_fn = bfq_insert_request,
.elevator_activate_req_fn = bfq_activate_request,
.elevator_deactivate_req_fn = bfq_deactivate_request,
.elevator_completed_req_fn = bfq_completed_request,
.elevator_former_req_fn = elv_rb_former_request,
.elevator_latter_req_fn = elv_rb_latter_request,
.elevator_init_icq_fn = bfq_init_icq,
.elevator_exit_icq_fn = bfq_exit_icq,
.elevator_set_req_fn = bfq_set_request,
.elevator_put_req_fn = bfq_put_request,
.elevator_may_queue_fn = bfq_may_queue,
.elevator_init_fn = bfq_init_queue,
.elevator_exit_fn = bfq_exit_queue,
},
.icq_size = sizeof(struct bfq_io_cq),
.icq_align = __alignof__(struct bfq_io_cq),
.elevator_attrs = bfq_attrs,
.elevator_name = "bfq",
.elevator_owner = THIS_MODULE,
};
static int __init bfq_init(void)
{
/*
* Can be 0 on HZ < 1000 setups.
*/
if (bfq_slice_idle == 0)
bfq_slice_idle = 1;
if (bfq_timeout_async == 0)
bfq_timeout_async = 1;
if (bfq_slab_setup())
return -ENOMEM;
/*
* Times to load large popular applications for the typical systems
* installed on the reference devices (see the comments before the
* definitions of the two arrays).
*/
T_slow[0] = msecs_to_jiffies(2600);
T_slow[1] = msecs_to_jiffies(1000);
T_fast[0] = msecs_to_jiffies(5500);
T_fast[1] = msecs_to_jiffies(2000);
/*
* Thresholds that determine the switch between speed classes (see
* the comments before the definition of the array).
*/
device_speed_thresh[0] = (R_fast[0] + R_slow[0]) / 2;
device_speed_thresh[1] = (R_fast[1] + R_slow[1]) / 2;
elv_register(&iosched_bfq);
pr_info("BFQ I/O-scheduler version: v7r6");
return 0;
}
static void __exit bfq_exit(void)
{
elv_unregister(&iosched_bfq);
bfq_slab_kill();
}
module_init(bfq_init);
module_exit(bfq_exit);
MODULE_AUTHOR("Fabio Checconi, Paolo Valente");
MODULE_LICENSE("GPL");
| gpl-2.0 |
GHackAnonymous/linux | net/xfrm/xfrm_input.c | 393 | 9084 | /*
* xfrm_input.c
*
* Changes:
* YOSHIFUJI Hideaki @USAGI
* Split up af-specific portion
*
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <net/dst.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/ip_tunnels.h>
#include <net/ip6_tunnel.h>
static struct kmem_cache *secpath_cachep __read_mostly;
static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
static struct xfrm_input_afinfo __rcu *xfrm_input_afinfo[NPROTO];
int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo)
{
int err = 0;
if (unlikely(afinfo == NULL))
return -EINVAL;
if (unlikely(afinfo->family >= NPROTO))
return -EAFNOSUPPORT;
spin_lock_bh(&xfrm_input_afinfo_lock);
if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL))
err = -EEXIST;
else
rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo);
spin_unlock_bh(&xfrm_input_afinfo_lock);
return err;
}
EXPORT_SYMBOL(xfrm_input_register_afinfo);
int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo)
{
int err = 0;
if (unlikely(afinfo == NULL))
return -EINVAL;
if (unlikely(afinfo->family >= NPROTO))
return -EAFNOSUPPORT;
spin_lock_bh(&xfrm_input_afinfo_lock);
if (likely(xfrm_input_afinfo[afinfo->family] != NULL)) {
if (unlikely(xfrm_input_afinfo[afinfo->family] != afinfo))
err = -EINVAL;
else
RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->family], NULL);
}
spin_unlock_bh(&xfrm_input_afinfo_lock);
synchronize_rcu();
return err;
}
EXPORT_SYMBOL(xfrm_input_unregister_afinfo);
static struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family)
{
struct xfrm_input_afinfo *afinfo;
if (unlikely(family >= NPROTO))
return NULL;
rcu_read_lock();
afinfo = rcu_dereference(xfrm_input_afinfo[family]);
if (unlikely(!afinfo))
rcu_read_unlock();
return afinfo;
}
static void xfrm_input_put_afinfo(struct xfrm_input_afinfo *afinfo)
{
rcu_read_unlock();
}
static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
int err)
{
int ret;
struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family);
if (!afinfo)
return -EAFNOSUPPORT;
ret = afinfo->callback(skb, protocol, err);
xfrm_input_put_afinfo(afinfo);
return ret;
}
void __secpath_destroy(struct sec_path *sp)
{
int i;
for (i = 0; i < sp->len; i++)
xfrm_state_put(sp->xvec[i]);
kmem_cache_free(secpath_cachep, sp);
}
EXPORT_SYMBOL(__secpath_destroy);
struct sec_path *secpath_dup(struct sec_path *src)
{
struct sec_path *sp;
sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
if (!sp)
return NULL;
sp->len = 0;
if (src) {
int i;
memcpy(sp, src, sizeof(*sp));
for (i = 0; i < sp->len; i++)
xfrm_state_hold(sp->xvec[i]);
}
atomic_set(&sp->refcnt, 1);
return sp;
}
EXPORT_SYMBOL(secpath_dup);
/* Fetch spi and seq from ipsec header */
int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
{
int offset, offset_seq;
int hlen;
switch (nexthdr) {
case IPPROTO_AH:
hlen = sizeof(struct ip_auth_hdr);
offset = offsetof(struct ip_auth_hdr, spi);
offset_seq = offsetof(struct ip_auth_hdr, seq_no);
break;
case IPPROTO_ESP:
hlen = sizeof(struct ip_esp_hdr);
offset = offsetof(struct ip_esp_hdr, spi);
offset_seq = offsetof(struct ip_esp_hdr, seq_no);
break;
case IPPROTO_COMP:
if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
return -EINVAL;
*spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2)));
*seq = 0;
return 0;
default:
return 1;
}
if (!pskb_may_pull(skb, hlen))
return -EINVAL;
*spi = *(__be32 *)(skb_transport_header(skb) + offset);
*seq = *(__be32 *)(skb_transport_header(skb) + offset_seq);
return 0;
}
int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
{
struct xfrm_mode *inner_mode = x->inner_mode;
int err;
err = x->outer_mode->afinfo->extract_input(x, skb);
if (err)
return err;
if (x->sel.family == AF_UNSPEC) {
inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
if (inner_mode == NULL)
return -EAFNOSUPPORT;
}
skb->protocol = inner_mode->afinfo->eth_proto;
return inner_mode->input2(x, skb);
}
EXPORT_SYMBOL(xfrm_prepare_input);
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
{
struct net *net = dev_net(skb->dev);
int err;
__be32 seq;
__be32 seq_hi;
struct xfrm_state *x = NULL;
xfrm_address_t *daddr;
struct xfrm_mode *inner_mode;
u32 mark = skb->mark;
unsigned int family;
int decaps = 0;
int async = 0;
/* A negative encap_type indicates async resumption. */
if (encap_type < 0) {
async = 1;
x = xfrm_input_state(skb);
seq = XFRM_SKB_CB(skb)->seq.input.low;
family = x->outer_mode->afinfo->family;
goto resume;
}
daddr = (xfrm_address_t *)(skb_network_header(skb) +
XFRM_SPI_SKB_CB(skb)->daddroff);
family = XFRM_SPI_SKB_CB(skb)->family;
/* if tunnel is present override skb->mark value with tunnel i_key */
if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) {
switch (family) {
case AF_INET:
mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
break;
case AF_INET6:
mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
break;
}
}
/* Allocate new secpath or COW existing one. */
if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
struct sec_path *sp;
sp = secpath_dup(skb->sp);
if (!sp) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
goto drop;
}
if (skb->sp)
secpath_put(skb->sp);
skb->sp = sp;
}
seq = 0;
if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
goto drop;
}
do {
if (skb->sp->len == XFRM_MAX_DEPTH) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
goto drop;
}
x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
if (x == NULL) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
xfrm_audit_state_notfound(skb, family, spi, seq);
goto drop;
}
skb->sp->xvec[skb->sp->len++] = x;
spin_lock(&x->lock);
if (unlikely(x->km.state != XFRM_STATE_VALID)) {
if (x->km.state == XFRM_STATE_ACQ)
XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
else
XFRM_INC_STATS(net,
LINUX_MIB_XFRMINSTATEINVALID);
goto drop_unlock;
}
if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
goto drop_unlock;
}
if (x->repl->check(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
}
if (xfrm_state_check_expire(x)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED);
goto drop_unlock;
}
spin_unlock(&x->lock);
if (xfrm_tunnel_check(skb, x, family)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
goto drop;
}
seq_hi = htonl(xfrm_replay_seqhi(x, seq));
XFRM_SKB_CB(skb)->seq.input.low = seq;
XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
skb_dst_force(skb);
nexthdr = x->type->input(x, skb);
if (nexthdr == -EINPROGRESS)
return 0;
resume:
spin_lock(&x->lock);
if (nexthdr <= 0) {
if (nexthdr == -EBADMSG) {
xfrm_audit_state_icvfail(x, skb,
x->type->proto);
x->stats.integrity_failed++;
}
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
goto drop_unlock;
}
/* only the first xfrm gets the encap type */
encap_type = 0;
if (async && x->repl->recheck(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
}
x->repl->advance(x, seq);
x->curlft.bytes += skb->len;
x->curlft.packets++;
spin_unlock(&x->lock);
XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
inner_mode = x->inner_mode;
if (x->sel.family == AF_UNSPEC) {
inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
if (inner_mode == NULL)
goto drop;
}
if (inner_mode->input(x, skb)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
goto drop;
}
if (x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) {
decaps = 1;
break;
}
/*
* We need the inner address. However, we only get here for
* transport mode so the outer address is identical.
*/
daddr = &x->id.daddr;
family = x->outer_mode->afinfo->family;
err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
if (err < 0) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
goto drop;
}
} while (!err);
err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
if (err)
goto drop;
nf_reset(skb);
if (decaps) {
skb_dst_drop(skb);
netif_rx(skb);
return 0;
} else {
return x->inner_mode->afinfo->transport_finish(skb, async);
}
drop_unlock:
spin_unlock(&x->lock);
drop:
xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL(xfrm_input);
int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
{
return xfrm_input(skb, nexthdr, 0, -1);
}
EXPORT_SYMBOL(xfrm_input_resume);
void __init xfrm_input_init(void)
{
secpath_cachep = kmem_cache_create("secpath_cache",
sizeof(struct sec_path),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
}
| gpl-2.0 |
MarginC/linux | drivers/usb/storage/isd200.c | 393 | 46380 | /* Transport & Protocol Driver for In-System Design, Inc. ISD200 ASIC
*
* Current development and maintenance:
* (C) 2001-2002 Björn Stenberg (bjorn@haxx.se)
*
* Developed with the assistance of:
* (C) 2002 Alan Stern <stern@rowland.org>
*
* Initial work:
* (C) 2000 In-System Design, Inc. (support@in-system.com)
*
* The ISD200 ASIC does not natively support ATA devices. The chip
* does implement an interface, the ATA Command Block (ATACB) which provides
* a means of passing ATA commands and ATA register accesses to a device.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
* History:
*
* 2002-10-19: Removed the specialized transfer routines.
* (Alan Stern <stern@rowland.harvard.edu>)
* 2001-02-24: Removed lots of duplicate code and simplified the structure.
* (bjorn@haxx.se)
* 2002-01-16: Fixed endianness bug so it works on the ppc arch.
* (Luc Saillard <luc@saillard.org>)
* 2002-01-17: All bitfields removed.
* (bjorn@haxx.se)
*/
/* Include files */
#include <linux/jiffies.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/ata.h>
#include <linux/hdreg.h>
#include <linux/scatterlist.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "debug.h"
#include "scsiglue.h"
#define DRV_NAME "ums-isd200"
MODULE_DESCRIPTION("Driver for In-System Design, Inc. ISD200 ASIC");
MODULE_AUTHOR("Björn Stenberg <bjorn@haxx.se>");
MODULE_LICENSE("GPL");
static int isd200_Initialization(struct us_data *us);
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags) }
static struct usb_device_id isd200_usb_ids[] = {
# include "unusual_isd200.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, isd200_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev isd200_unusual_dev_list[] = {
# include "unusual_isd200.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
/* Timeout defines (in Seconds) */
#define ISD200_ENUM_BSY_TIMEOUT 35
#define ISD200_ENUM_DETECT_TIMEOUT 30
#define ISD200_DEFAULT_TIMEOUT 30
/* device flags */
#define DF_ATA_DEVICE 0x0001
#define DF_MEDIA_STATUS_ENABLED 0x0002
#define DF_REMOVABLE_MEDIA 0x0004
/* capability bit definitions */
#define CAPABILITY_DMA 0x01
#define CAPABILITY_LBA 0x02
/* command_setX bit definitions */
#define COMMANDSET_REMOVABLE 0x02
#define COMMANDSET_MEDIA_STATUS 0x10
/* ATA Vendor Specific defines */
#define ATA_ADDRESS_DEVHEAD_STD 0xa0
#define ATA_ADDRESS_DEVHEAD_LBA_MODE 0x40
#define ATA_ADDRESS_DEVHEAD_SLAVE 0x10
/* Action Select bits */
#define ACTION_SELECT_0 0x01
#define ACTION_SELECT_1 0x02
#define ACTION_SELECT_2 0x04
#define ACTION_SELECT_3 0x08
#define ACTION_SELECT_4 0x10
#define ACTION_SELECT_5 0x20
#define ACTION_SELECT_6 0x40
#define ACTION_SELECT_7 0x80
/* Register Select bits */
#define REG_ALTERNATE_STATUS 0x01
#define REG_DEVICE_CONTROL 0x01
#define REG_ERROR 0x02
#define REG_FEATURES 0x02
#define REG_SECTOR_COUNT 0x04
#define REG_SECTOR_NUMBER 0x08
#define REG_CYLINDER_LOW 0x10
#define REG_CYLINDER_HIGH 0x20
#define REG_DEVICE_HEAD 0x40
#define REG_STATUS 0x80
#define REG_COMMAND 0x80
/* ATA registers offset definitions */
#define ATA_REG_ERROR_OFFSET 1
#define ATA_REG_LCYL_OFFSET 4
#define ATA_REG_HCYL_OFFSET 5
#define ATA_REG_STATUS_OFFSET 7
/* ATA error definitions not in <linux/hdreg.h> */
#define ATA_ERROR_MEDIA_CHANGE 0x20
/* ATA command definitions not in <linux/hdreg.h> */
#define ATA_COMMAND_GET_MEDIA_STATUS 0xDA
#define ATA_COMMAND_MEDIA_EJECT 0xED
/* ATA drive control definitions */
#define ATA_DC_DISABLE_INTERRUPTS 0x02
#define ATA_DC_RESET_CONTROLLER 0x04
#define ATA_DC_REENABLE_CONTROLLER 0x00
/*
* General purpose return codes
*/
#define ISD200_ERROR -1
#define ISD200_GOOD 0
/*
* Transport return codes
*/
#define ISD200_TRANSPORT_GOOD 0 /* Transport good, command good */
#define ISD200_TRANSPORT_FAILED 1 /* Transport good, command failed */
#define ISD200_TRANSPORT_ERROR 2 /* Transport bad (i.e. device dead) */
/* driver action codes */
#define ACTION_READ_STATUS 0
#define ACTION_RESET 1
#define ACTION_REENABLE 2
#define ACTION_SOFT_RESET 3
#define ACTION_ENUM 4
#define ACTION_IDENTIFY 5
/*
* ata_cdb struct
*/
union ata_cdb {
struct {
unsigned char SignatureByte0;
unsigned char SignatureByte1;
unsigned char ActionSelect;
unsigned char RegisterSelect;
unsigned char TransferBlockSize;
unsigned char WriteData3F6;
unsigned char WriteData1F1;
unsigned char WriteData1F2;
unsigned char WriteData1F3;
unsigned char WriteData1F4;
unsigned char WriteData1F5;
unsigned char WriteData1F6;
unsigned char WriteData1F7;
unsigned char Reserved[3];
} generic;
struct {
unsigned char SignatureByte0;
unsigned char SignatureByte1;
unsigned char ActionSelect;
unsigned char RegisterSelect;
unsigned char TransferBlockSize;
unsigned char AlternateStatusByte;
unsigned char ErrorByte;
unsigned char SectorCountByte;
unsigned char SectorNumberByte;
unsigned char CylinderLowByte;
unsigned char CylinderHighByte;
unsigned char DeviceHeadByte;
unsigned char StatusByte;
unsigned char Reserved[3];
} read;
struct {
unsigned char SignatureByte0;
unsigned char SignatureByte1;
unsigned char ActionSelect;
unsigned char RegisterSelect;
unsigned char TransferBlockSize;
unsigned char DeviceControlByte;
unsigned char FeaturesByte;
unsigned char SectorCountByte;
unsigned char SectorNumberByte;
unsigned char CylinderLowByte;
unsigned char CylinderHighByte;
unsigned char DeviceHeadByte;
unsigned char CommandByte;
unsigned char Reserved[3];
} write;
};
/*
* Inquiry data structure. This is the data returned from the target
* after it receives an inquiry.
*
* This structure may be extended by the number of bytes specified
* in the field AdditionalLength. The defined size constant only
* includes fields through ProductRevisionLevel.
*/
/*
* DeviceType field
*/
#define DIRECT_ACCESS_DEVICE 0x00 /* disks */
#define DEVICE_REMOVABLE 0x80
struct inquiry_data {
unsigned char DeviceType;
unsigned char DeviceTypeModifier;
unsigned char Versions;
unsigned char Format;
unsigned char AdditionalLength;
unsigned char Reserved[2];
unsigned char Capability;
unsigned char VendorId[8];
unsigned char ProductId[16];
unsigned char ProductRevisionLevel[4];
unsigned char VendorSpecific[20];
unsigned char Reserved3[40];
} __attribute__ ((packed));
/*
* INQUIRY data buffer size
*/
#define INQUIRYDATABUFFERSIZE 36
/*
* ISD200 CONFIG data struct
*/
#define ATACFG_TIMING 0x0f
#define ATACFG_ATAPI_RESET 0x10
#define ATACFG_MASTER 0x20
#define ATACFG_BLOCKSIZE 0xa0
#define ATACFGE_LAST_LUN 0x07
#define ATACFGE_DESC_OVERRIDE 0x08
#define ATACFGE_STATE_SUSPEND 0x10
#define ATACFGE_SKIP_BOOT 0x20
#define ATACFGE_CONF_DESC2 0x40
#define ATACFGE_INIT_STATUS 0x80
#define CFG_CAPABILITY_SRST 0x01
struct isd200_config {
unsigned char EventNotification;
unsigned char ExternalClock;
unsigned char ATAInitTimeout;
unsigned char ATAConfig;
unsigned char ATAMajorCommand;
unsigned char ATAMinorCommand;
unsigned char ATAExtraConfig;
unsigned char Capability;
}__attribute__ ((packed));
/*
* ISD200 driver information struct
*/
struct isd200_info {
struct inquiry_data InquiryData;
u16 *id;
struct isd200_config ConfigData;
unsigned char *RegsBuf;
unsigned char ATARegs[8];
unsigned char DeviceHead;
unsigned char DeviceFlags;
/* maximum number of LUNs supported */
unsigned char MaxLUNs;
unsigned char cmnd[BLK_MAX_CDB];
struct scsi_cmnd srb;
struct scatterlist sg;
};
/*
* Read Capacity Data - returned in Big Endian format
*/
struct read_capacity_data {
__be32 LogicalBlockAddress;
__be32 BytesPerBlock;
};
/*
* Read Block Limits Data - returned in Big Endian format
* This structure returns the maximum and minimum block
* size for a TAPE device.
*/
struct read_block_limits {
unsigned char Reserved;
unsigned char BlockMaximumSize[3];
unsigned char BlockMinimumSize[2];
};
/*
* Sense Data Format
*/
#define SENSE_ERRCODE 0x7f
#define SENSE_ERRCODE_VALID 0x80
#define SENSE_FLAG_SENSE_KEY 0x0f
#define SENSE_FLAG_BAD_LENGTH 0x20
#define SENSE_FLAG_END_OF_MEDIA 0x40
#define SENSE_FLAG_FILE_MARK 0x80
struct sense_data {
unsigned char ErrorCode;
unsigned char SegmentNumber;
unsigned char Flags;
unsigned char Information[4];
unsigned char AdditionalSenseLength;
unsigned char CommandSpecificInformation[4];
unsigned char AdditionalSenseCode;
unsigned char AdditionalSenseCodeQualifier;
unsigned char FieldReplaceableUnitCode;
unsigned char SenseKeySpecific[3];
} __attribute__ ((packed));
/*
* Default request sense buffer size
*/
#define SENSE_BUFFER_SIZE 18
/***********************************************************************
* Helper routines
***********************************************************************/
/**************************************************************************
* isd200_build_sense
*
* Builds an artificial sense buffer to report the results of a
* failed command.
*
* RETURNS:
* void
*/
static void isd200_build_sense(struct us_data *us, struct scsi_cmnd *srb)
{
struct isd200_info *info = (struct isd200_info *)us->extra;
struct sense_data *buf = (struct sense_data *) &srb->sense_buffer[0];
unsigned char error = info->ATARegs[ATA_REG_ERROR_OFFSET];
if(error & ATA_ERROR_MEDIA_CHANGE) {
buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID;
buf->AdditionalSenseLength = 0xb;
buf->Flags = UNIT_ATTENTION;
buf->AdditionalSenseCode = 0;
buf->AdditionalSenseCodeQualifier = 0;
} else if (error & ATA_MCR) {
buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID;
buf->AdditionalSenseLength = 0xb;
buf->Flags = UNIT_ATTENTION;
buf->AdditionalSenseCode = 0;
buf->AdditionalSenseCodeQualifier = 0;
} else if (error & ATA_TRK0NF) {
buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID;
buf->AdditionalSenseLength = 0xb;
buf->Flags = NOT_READY;
buf->AdditionalSenseCode = 0;
buf->AdditionalSenseCodeQualifier = 0;
} else if (error & ATA_UNC) {
buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID;
buf->AdditionalSenseLength = 0xb;
buf->Flags = DATA_PROTECT;
buf->AdditionalSenseCode = 0;
buf->AdditionalSenseCodeQualifier = 0;
} else {
buf->ErrorCode = 0;
buf->AdditionalSenseLength = 0;
buf->Flags = 0;
buf->AdditionalSenseCode = 0;
buf->AdditionalSenseCodeQualifier = 0;
}
}
/***********************************************************************
* Transport routines
***********************************************************************/
/**************************************************************************
* isd200_set_srb(), isd200_srb_set_bufflen()
*
* Two helpers to facilitate in initialization of scsi_cmnd structure
* Will need to change when struct scsi_cmnd changes
*/
static void isd200_set_srb(struct isd200_info *info,
enum dma_data_direction dir, void* buff, unsigned bufflen)
{
struct scsi_cmnd *srb = &info->srb;
if (buff)
sg_init_one(&info->sg, buff, bufflen);
srb->sc_data_direction = dir;
srb->sdb.table.sgl = buff ? &info->sg : NULL;
srb->sdb.length = bufflen;
srb->sdb.table.nents = buff ? 1 : 0;
}
static void isd200_srb_set_bufflen(struct scsi_cmnd *srb, unsigned bufflen)
{
srb->sdb.length = bufflen;
}
/**************************************************************************
* isd200_action
*
* Routine for sending commands to the isd200
*
* RETURNS:
* ISD status code
*/
static int isd200_action( struct us_data *us, int action,
void* pointer, int value )
{
union ata_cdb ata;
/* static to prevent this large struct being placed on the valuable stack */
static struct scsi_device srb_dev;
struct isd200_info *info = (struct isd200_info *)us->extra;
struct scsi_cmnd *srb = &info->srb;
int status;
memset(&ata, 0, sizeof(ata));
srb->cmnd = info->cmnd;
srb->device = &srb_dev;
ata.generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
ata.generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;
ata.generic.TransferBlockSize = 1;
switch ( action ) {
case ACTION_READ_STATUS:
usb_stor_dbg(us, " isd200_action(READ_STATUS)\n");
ata.generic.ActionSelect = ACTION_SELECT_0|ACTION_SELECT_2;
ata.generic.RegisterSelect =
REG_CYLINDER_LOW | REG_CYLINDER_HIGH |
REG_STATUS | REG_ERROR;
isd200_set_srb(info, DMA_FROM_DEVICE, pointer, value);
break;
case ACTION_ENUM:
usb_stor_dbg(us, " isd200_action(ENUM,0x%02x)\n", value);
ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2|
ACTION_SELECT_3|ACTION_SELECT_4|
ACTION_SELECT_5;
ata.generic.RegisterSelect = REG_DEVICE_HEAD;
ata.write.DeviceHeadByte = value;
isd200_set_srb(info, DMA_NONE, NULL, 0);
break;
case ACTION_RESET:
usb_stor_dbg(us, " isd200_action(RESET)\n");
ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2|
ACTION_SELECT_3|ACTION_SELECT_4;
ata.generic.RegisterSelect = REG_DEVICE_CONTROL;
ata.write.DeviceControlByte = ATA_DC_RESET_CONTROLLER;
isd200_set_srb(info, DMA_NONE, NULL, 0);
break;
case ACTION_REENABLE:
usb_stor_dbg(us, " isd200_action(REENABLE)\n");
ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2|
ACTION_SELECT_3|ACTION_SELECT_4;
ata.generic.RegisterSelect = REG_DEVICE_CONTROL;
ata.write.DeviceControlByte = ATA_DC_REENABLE_CONTROLLER;
isd200_set_srb(info, DMA_NONE, NULL, 0);
break;
case ACTION_SOFT_RESET:
usb_stor_dbg(us, " isd200_action(SOFT_RESET)\n");
ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_5;
ata.generic.RegisterSelect = REG_DEVICE_HEAD | REG_COMMAND;
ata.write.DeviceHeadByte = info->DeviceHead;
ata.write.CommandByte = ATA_CMD_DEV_RESET;
isd200_set_srb(info, DMA_NONE, NULL, 0);
break;
case ACTION_IDENTIFY:
usb_stor_dbg(us, " isd200_action(IDENTIFY)\n");
ata.generic.RegisterSelect = REG_COMMAND;
ata.write.CommandByte = ATA_CMD_ID_ATA;
isd200_set_srb(info, DMA_FROM_DEVICE, info->id,
ATA_ID_WORDS * 2);
break;
default:
usb_stor_dbg(us, "Error: Undefined action %d\n", action);
return ISD200_ERROR;
}
memcpy(srb->cmnd, &ata, sizeof(ata.generic));
srb->cmd_len = sizeof(ata.generic);
status = usb_stor_Bulk_transport(srb, us);
if (status == USB_STOR_TRANSPORT_GOOD)
status = ISD200_GOOD;
else {
usb_stor_dbg(us, " isd200_action(0x%02x) error: %d\n",
action, status);
status = ISD200_ERROR;
/* need to reset device here */
}
return status;
}
/**************************************************************************
* isd200_read_regs
*
* Read ATA Registers
*
* RETURNS:
* ISD status code
*/
static int isd200_read_regs( struct us_data *us )
{
struct isd200_info *info = (struct isd200_info *)us->extra;
int retStatus = ISD200_GOOD;
int transferStatus;
usb_stor_dbg(us, "Entering isd200_IssueATAReadRegs\n");
transferStatus = isd200_action( us, ACTION_READ_STATUS,
info->RegsBuf, sizeof(info->ATARegs) );
if (transferStatus != ISD200_TRANSPORT_GOOD) {
usb_stor_dbg(us, " Error reading ATA registers\n");
retStatus = ISD200_ERROR;
} else {
memcpy(info->ATARegs, info->RegsBuf, sizeof(info->ATARegs));
usb_stor_dbg(us, " Got ATA Register[ATA_REG_ERROR_OFFSET] = 0x%x\n",
info->ATARegs[ATA_REG_ERROR_OFFSET]);
}
return retStatus;
}
/**************************************************************************
* Invoke the transport and basic error-handling/recovery methods
*
* This is used by the protocol layers to actually send the message to
* the device and receive the response.
*/
static void isd200_invoke_transport( struct us_data *us,
struct scsi_cmnd *srb,
union ata_cdb *ataCdb )
{
int need_auto_sense = 0;
int transferStatus;
int result;
/* send the command to the transport layer */
memcpy(srb->cmnd, ataCdb, sizeof(ataCdb->generic));
srb->cmd_len = sizeof(ataCdb->generic);
transferStatus = usb_stor_Bulk_transport(srb, us);
/* if the command gets aborted by the higher layers, we need to
* short-circuit all other processing
*/
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
usb_stor_dbg(us, "-- command was aborted\n");
goto Handle_Abort;
}
switch (transferStatus) {
case USB_STOR_TRANSPORT_GOOD:
/* Indicate a good result */
srb->result = SAM_STAT_GOOD;
break;
case USB_STOR_TRANSPORT_NO_SENSE:
usb_stor_dbg(us, "-- transport indicates protocol failure\n");
srb->result = SAM_STAT_CHECK_CONDITION;
return;
case USB_STOR_TRANSPORT_FAILED:
usb_stor_dbg(us, "-- transport indicates command failure\n");
need_auto_sense = 1;
break;
case USB_STOR_TRANSPORT_ERROR:
usb_stor_dbg(us, "-- transport indicates transport error\n");
srb->result = DID_ERROR << 16;
/* Need reset here */
return;
default:
usb_stor_dbg(us, "-- transport indicates unknown error\n");
srb->result = DID_ERROR << 16;
/* Need reset here */
return;
}
if ((scsi_get_resid(srb) > 0) &&
!((srb->cmnd[0] == REQUEST_SENSE) ||
(srb->cmnd[0] == INQUIRY) ||
(srb->cmnd[0] == MODE_SENSE) ||
(srb->cmnd[0] == LOG_SENSE) ||
(srb->cmnd[0] == MODE_SENSE_10))) {
usb_stor_dbg(us, "-- unexpectedly short transfer\n");
need_auto_sense = 1;
}
if (need_auto_sense) {
result = isd200_read_regs(us);
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
usb_stor_dbg(us, "-- auto-sense aborted\n");
goto Handle_Abort;
}
if (result == ISD200_GOOD) {
isd200_build_sense(us, srb);
srb->result = SAM_STAT_CHECK_CONDITION;
/* If things are really okay, then let's show that */
if ((srb->sense_buffer[2] & 0xf) == 0x0)
srb->result = SAM_STAT_GOOD;
} else {
srb->result = DID_ERROR << 16;
/* Need reset here */
}
}
/* Regardless of auto-sense, if we _know_ we have an error
* condition, show that in the result code
*/
if (transferStatus == USB_STOR_TRANSPORT_FAILED)
srb->result = SAM_STAT_CHECK_CONDITION;
return;
/* abort processing: the bulk-only transport requires a reset
* following an abort */
Handle_Abort:
srb->result = DID_ABORT << 16;
/* permit the reset transfer to take place */
clear_bit(US_FLIDX_ABORTING, &us->dflags);
/* Need reset here */
}
#ifdef CONFIG_USB_STORAGE_DEBUG
static void isd200_log_config(struct us_data *us, struct isd200_info *info)
{
usb_stor_dbg(us, " Event Notification: 0x%x\n",
info->ConfigData.EventNotification);
usb_stor_dbg(us, " External Clock: 0x%x\n",
info->ConfigData.ExternalClock);
usb_stor_dbg(us, " ATA Init Timeout: 0x%x\n",
info->ConfigData.ATAInitTimeout);
usb_stor_dbg(us, " ATAPI Command Block Size: 0x%x\n",
(info->ConfigData.ATAConfig & ATACFG_BLOCKSIZE) >> 6);
usb_stor_dbg(us, " Master/Slave Selection: 0x%x\n",
info->ConfigData.ATAConfig & ATACFG_MASTER);
usb_stor_dbg(us, " ATAPI Reset: 0x%x\n",
info->ConfigData.ATAConfig & ATACFG_ATAPI_RESET);
usb_stor_dbg(us, " ATA Timing: 0x%x\n",
info->ConfigData.ATAConfig & ATACFG_TIMING);
usb_stor_dbg(us, " ATA Major Command: 0x%x\n",
info->ConfigData.ATAMajorCommand);
usb_stor_dbg(us, " ATA Minor Command: 0x%x\n",
info->ConfigData.ATAMinorCommand);
usb_stor_dbg(us, " Init Status: 0x%x\n",
info->ConfigData.ATAExtraConfig & ATACFGE_INIT_STATUS);
usb_stor_dbg(us, " Config Descriptor 2: 0x%x\n",
info->ConfigData.ATAExtraConfig & ATACFGE_CONF_DESC2);
usb_stor_dbg(us, " Skip Device Boot: 0x%x\n",
info->ConfigData.ATAExtraConfig & ATACFGE_SKIP_BOOT);
usb_stor_dbg(us, " ATA 3 State Suspend: 0x%x\n",
info->ConfigData.ATAExtraConfig & ATACFGE_STATE_SUSPEND);
usb_stor_dbg(us, " Descriptor Override: 0x%x\n",
info->ConfigData.ATAExtraConfig & ATACFGE_DESC_OVERRIDE);
usb_stor_dbg(us, " Last LUN Identifier: 0x%x\n",
info->ConfigData.ATAExtraConfig & ATACFGE_LAST_LUN);
usb_stor_dbg(us, " SRST Enable: 0x%x\n",
info->ConfigData.ATAExtraConfig & CFG_CAPABILITY_SRST);
}
#endif
/**************************************************************************
* isd200_write_config
*
* Write the ISD200 Configuration data
*
* RETURNS:
* ISD status code
*/
static int isd200_write_config( struct us_data *us )
{
struct isd200_info *info = (struct isd200_info *)us->extra;
int retStatus = ISD200_GOOD;
int result;
#ifdef CONFIG_USB_STORAGE_DEBUG
usb_stor_dbg(us, "Entering isd200_write_config\n");
usb_stor_dbg(us, " Writing the following ISD200 Config Data:\n");
isd200_log_config(us, info);
#endif
/* let's send the command via the control pipe */
result = usb_stor_ctrl_transfer(
us,
us->send_ctrl_pipe,
0x01,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
0x0000,
0x0002,
(void *) &info->ConfigData,
sizeof(info->ConfigData));
if (result >= 0) {
usb_stor_dbg(us, " ISD200 Config Data was written successfully\n");
} else {
usb_stor_dbg(us, " Request to write ISD200 Config Data failed!\n");
retStatus = ISD200_ERROR;
}
usb_stor_dbg(us, "Leaving isd200_write_config %08X\n", retStatus);
return retStatus;
}
/**************************************************************************
* isd200_read_config
*
* Reads the ISD200 Configuration data
*
* RETURNS:
* ISD status code
*/
static int isd200_read_config( struct us_data *us )
{
struct isd200_info *info = (struct isd200_info *)us->extra;
int retStatus = ISD200_GOOD;
int result;
usb_stor_dbg(us, "Entering isd200_read_config\n");
/* read the configuration information from ISD200. Use this to */
/* determine what the special ATA CDB bytes are. */
result = usb_stor_ctrl_transfer(
us,
us->recv_ctrl_pipe,
0x02,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x0000,
0x0002,
(void *) &info->ConfigData,
sizeof(info->ConfigData));
if (result >= 0) {
usb_stor_dbg(us, " Retrieved the following ISD200 Config Data:\n");
#ifdef CONFIG_USB_STORAGE_DEBUG
isd200_log_config(us, info);
#endif
} else {
usb_stor_dbg(us, " Request to get ISD200 Config Data failed!\n");
retStatus = ISD200_ERROR;
}
usb_stor_dbg(us, "Leaving isd200_read_config %08X\n", retStatus);
return retStatus;
}
/**************************************************************************
* isd200_atapi_soft_reset
*
* Perform an Atapi Soft Reset on the device
*
* RETURNS:
* NT status code
*/
static int isd200_atapi_soft_reset( struct us_data *us )
{
int retStatus = ISD200_GOOD;
int transferStatus;
usb_stor_dbg(us, "Entering isd200_atapi_soft_reset\n");
transferStatus = isd200_action( us, ACTION_SOFT_RESET, NULL, 0 );
if (transferStatus != ISD200_TRANSPORT_GOOD) {
usb_stor_dbg(us, " Error issuing Atapi Soft Reset\n");
retStatus = ISD200_ERROR;
}
usb_stor_dbg(us, "Leaving isd200_atapi_soft_reset %08X\n", retStatus);
return retStatus;
}
/**************************************************************************
* isd200_srst
*
* Perform an SRST on the device
*
* RETURNS:
* ISD status code
*/
static int isd200_srst( struct us_data *us )
{
int retStatus = ISD200_GOOD;
int transferStatus;
usb_stor_dbg(us, "Entering isd200_SRST\n");
transferStatus = isd200_action( us, ACTION_RESET, NULL, 0 );
/* check to see if this request failed */
if (transferStatus != ISD200_TRANSPORT_GOOD) {
usb_stor_dbg(us, " Error issuing SRST\n");
retStatus = ISD200_ERROR;
} else {
/* delay 10ms to give the drive a chance to see it */
msleep(10);
transferStatus = isd200_action( us, ACTION_REENABLE, NULL, 0 );
if (transferStatus != ISD200_TRANSPORT_GOOD) {
usb_stor_dbg(us, " Error taking drive out of reset\n");
retStatus = ISD200_ERROR;
} else {
/* delay 50ms to give the drive a chance to recover after SRST */
msleep(50);
}
}
usb_stor_dbg(us, "Leaving isd200_srst %08X\n", retStatus);
return retStatus;
}
/**************************************************************************
* isd200_try_enum
*
* Helper function for isd200_manual_enum(). Does ENUM and READ_STATUS
* and tries to analyze the status registers
*
* RETURNS:
* ISD status code
*/
static int isd200_try_enum(struct us_data *us, unsigned char master_slave,
int detect )
{
int status = ISD200_GOOD;
unsigned long endTime;
struct isd200_info *info = (struct isd200_info *)us->extra;
unsigned char *regs = info->RegsBuf;
int recheckAsMaster = 0;
if ( detect )
endTime = jiffies + ISD200_ENUM_DETECT_TIMEOUT * HZ;
else
endTime = jiffies + ISD200_ENUM_BSY_TIMEOUT * HZ;
/* loop until we detect !BSY or timeout */
while(1) {
status = isd200_action( us, ACTION_ENUM, NULL, master_slave );
if ( status != ISD200_GOOD )
break;
status = isd200_action( us, ACTION_READ_STATUS,
regs, 8 );
if ( status != ISD200_GOOD )
break;
if (!detect) {
if (regs[ATA_REG_STATUS_OFFSET] & ATA_BUSY) {
usb_stor_dbg(us, " %s status is still BSY, try again...\n",
master_slave == ATA_ADDRESS_DEVHEAD_STD ?
"Master" : "Slave");
} else {
usb_stor_dbg(us, " %s status !BSY, continue with next operation\n",
master_slave == ATA_ADDRESS_DEVHEAD_STD ?
"Master" : "Slave");
break;
}
}
/* check for ATA_BUSY and */
/* ATA_DF (workaround ATA Zip drive) and */
/* ATA_ERR (workaround for Archos CD-ROM) */
else if (regs[ATA_REG_STATUS_OFFSET] &
(ATA_BUSY | ATA_DF | ATA_ERR)) {
usb_stor_dbg(us, " Status indicates it is not ready, try again...\n");
}
/* check for DRDY, ATA devices set DRDY after SRST */
else if (regs[ATA_REG_STATUS_OFFSET] & ATA_DRDY) {
usb_stor_dbg(us, " Identified ATA device\n");
info->DeviceFlags |= DF_ATA_DEVICE;
info->DeviceHead = master_slave;
break;
}
/* check Cylinder High/Low to
determine if it is an ATAPI device
*/
else if (regs[ATA_REG_HCYL_OFFSET] == 0xEB &&
regs[ATA_REG_LCYL_OFFSET] == 0x14) {
/* It seems that the RICOH
MP6200A CD/RW drive will
report itself okay as a
slave when it is really a
master. So this check again
as a master device just to
make sure it doesn't report
itself okay as a master also
*/
if ((master_slave & ATA_ADDRESS_DEVHEAD_SLAVE) &&
!recheckAsMaster) {
usb_stor_dbg(us, " Identified ATAPI device as slave. Rechecking again as master\n");
recheckAsMaster = 1;
master_slave = ATA_ADDRESS_DEVHEAD_STD;
} else {
usb_stor_dbg(us, " Identified ATAPI device\n");
info->DeviceHead = master_slave;
status = isd200_atapi_soft_reset(us);
break;
}
} else {
usb_stor_dbg(us, " Not ATA, not ATAPI - Weird\n");
break;
}
/* check for timeout on this request */
if (time_after_eq(jiffies, endTime)) {
if (!detect)
usb_stor_dbg(us, " BSY check timeout, just continue with next operation...\n");
else
usb_stor_dbg(us, " Device detect timeout!\n");
break;
}
}
return status;
}
/**************************************************************************
* isd200_manual_enum
*
* Determines if the drive attached is an ATA or ATAPI and if it is a
* master or slave.
*
* RETURNS:
* ISD status code
*/
static int isd200_manual_enum(struct us_data *us)
{
struct isd200_info *info = (struct isd200_info *)us->extra;
int retStatus = ISD200_GOOD;
usb_stor_dbg(us, "Entering isd200_manual_enum\n");
retStatus = isd200_read_config(us);
if (retStatus == ISD200_GOOD) {
int isslave;
/* master or slave? */
retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_STD, 0);
if (retStatus == ISD200_GOOD)
retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_SLAVE, 0);
if (retStatus == ISD200_GOOD) {
retStatus = isd200_srst(us);
if (retStatus == ISD200_GOOD)
/* ata or atapi? */
retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_STD, 1);
}
isslave = (info->DeviceHead & ATA_ADDRESS_DEVHEAD_SLAVE) ? 1 : 0;
if (!(info->ConfigData.ATAConfig & ATACFG_MASTER)) {
usb_stor_dbg(us, " Setting Master/Slave selection to %d\n",
isslave);
info->ConfigData.ATAConfig &= 0x3f;
info->ConfigData.ATAConfig |= (isslave<<6);
retStatus = isd200_write_config(us);
}
}
usb_stor_dbg(us, "Leaving isd200_manual_enum %08X\n", retStatus);
return(retStatus);
}
static void isd200_fix_driveid(u16 *id)
{
#ifndef __LITTLE_ENDIAN
# ifdef __BIG_ENDIAN
int i;
for (i = 0; i < ATA_ID_WORDS; i++)
id[i] = __le16_to_cpu(id[i]);
# else
# error "Please fix <asm/byteorder.h>"
# endif
#endif
}
static void isd200_dump_driveid(struct us_data *us, u16 *id)
{
usb_stor_dbg(us, " Identify Data Structure:\n");
usb_stor_dbg(us, " config = 0x%x\n", id[ATA_ID_CONFIG]);
usb_stor_dbg(us, " cyls = 0x%x\n", id[ATA_ID_CYLS]);
usb_stor_dbg(us, " heads = 0x%x\n", id[ATA_ID_HEADS]);
usb_stor_dbg(us, " track_bytes = 0x%x\n", id[4]);
usb_stor_dbg(us, " sector_bytes = 0x%x\n", id[5]);
usb_stor_dbg(us, " sectors = 0x%x\n", id[ATA_ID_SECTORS]);
usb_stor_dbg(us, " serial_no[0] = 0x%x\n", *(char *)&id[ATA_ID_SERNO]);
usb_stor_dbg(us, " buf_type = 0x%x\n", id[20]);
usb_stor_dbg(us, " buf_size = 0x%x\n", id[ATA_ID_BUF_SIZE]);
usb_stor_dbg(us, " ecc_bytes = 0x%x\n", id[22]);
usb_stor_dbg(us, " fw_rev[0] = 0x%x\n", *(char *)&id[ATA_ID_FW_REV]);
usb_stor_dbg(us, " model[0] = 0x%x\n", *(char *)&id[ATA_ID_PROD]);
usb_stor_dbg(us, " max_multsect = 0x%x\n", id[ATA_ID_MAX_MULTSECT] & 0xff);
usb_stor_dbg(us, " dword_io = 0x%x\n", id[ATA_ID_DWORD_IO]);
usb_stor_dbg(us, " capability = 0x%x\n", id[ATA_ID_CAPABILITY] >> 8);
usb_stor_dbg(us, " tPIO = 0x%x\n", id[ATA_ID_OLD_PIO_MODES] >> 8);
usb_stor_dbg(us, " tDMA = 0x%x\n", id[ATA_ID_OLD_DMA_MODES] >> 8);
usb_stor_dbg(us, " field_valid = 0x%x\n", id[ATA_ID_FIELD_VALID]);
usb_stor_dbg(us, " cur_cyls = 0x%x\n", id[ATA_ID_CUR_CYLS]);
usb_stor_dbg(us, " cur_heads = 0x%x\n", id[ATA_ID_CUR_HEADS]);
usb_stor_dbg(us, " cur_sectors = 0x%x\n", id[ATA_ID_CUR_SECTORS]);
usb_stor_dbg(us, " cur_capacity = 0x%x\n", ata_id_u32(id, 57));
usb_stor_dbg(us, " multsect = 0x%x\n", id[ATA_ID_MULTSECT] & 0xff);
usb_stor_dbg(us, " lba_capacity = 0x%x\n", ata_id_u32(id, ATA_ID_LBA_CAPACITY));
usb_stor_dbg(us, " command_set_1 = 0x%x\n", id[ATA_ID_COMMAND_SET_1]);
usb_stor_dbg(us, " command_set_2 = 0x%x\n", id[ATA_ID_COMMAND_SET_2]);
}
/**************************************************************************
* isd200_get_inquiry_data
*
* Get inquiry data
*
* RETURNS:
* ISD status code
*/
static int isd200_get_inquiry_data( struct us_data *us )
{
struct isd200_info *info = (struct isd200_info *)us->extra;
int retStatus = ISD200_GOOD;
u16 *id = info->id;
usb_stor_dbg(us, "Entering isd200_get_inquiry_data\n");
/* set default to Master */
info->DeviceHead = ATA_ADDRESS_DEVHEAD_STD;
/* attempt to manually enumerate this device */
retStatus = isd200_manual_enum(us);
if (retStatus == ISD200_GOOD) {
int transferStatus;
/* check for an ATA device */
if (info->DeviceFlags & DF_ATA_DEVICE) {
/* this must be an ATA device */
/* perform an ATA Command Identify */
transferStatus = isd200_action( us, ACTION_IDENTIFY,
id, ATA_ID_WORDS * 2);
if (transferStatus != ISD200_TRANSPORT_GOOD) {
/* Error issuing ATA Command Identify */
usb_stor_dbg(us, " Error issuing ATA Command Identify\n");
retStatus = ISD200_ERROR;
} else {
/* ATA Command Identify successful */
int i;
__be16 *src;
__u16 *dest;
isd200_fix_driveid(id);
isd200_dump_driveid(us, id);
memset(&info->InquiryData, 0, sizeof(info->InquiryData));
/* Standard IDE interface only supports disks */
info->InquiryData.DeviceType = DIRECT_ACCESS_DEVICE;
/* The length must be at least 36 (5 + 31) */
info->InquiryData.AdditionalLength = 0x1F;
if (id[ATA_ID_COMMAND_SET_1] & COMMANDSET_MEDIA_STATUS) {
/* set the removable bit */
info->InquiryData.DeviceTypeModifier = DEVICE_REMOVABLE;
info->DeviceFlags |= DF_REMOVABLE_MEDIA;
}
/* Fill in vendor identification fields */
src = (__be16 *)&id[ATA_ID_PROD];
dest = (__u16*)info->InquiryData.VendorId;
for (i=0;i<4;i++)
dest[i] = be16_to_cpu(src[i]);
src = (__be16 *)&id[ATA_ID_PROD + 8/2];
dest = (__u16*)info->InquiryData.ProductId;
for (i=0;i<8;i++)
dest[i] = be16_to_cpu(src[i]);
src = (__be16 *)&id[ATA_ID_FW_REV];
dest = (__u16*)info->InquiryData.ProductRevisionLevel;
for (i=0;i<2;i++)
dest[i] = be16_to_cpu(src[i]);
/* determine if it supports Media Status Notification */
if (id[ATA_ID_COMMAND_SET_2] & COMMANDSET_MEDIA_STATUS) {
usb_stor_dbg(us, " Device supports Media Status Notification\n");
/* Indicate that it is enabled, even though it is not
* This allows the lock/unlock of the media to work
* correctly.
*/
info->DeviceFlags |= DF_MEDIA_STATUS_ENABLED;
}
else
info->DeviceFlags &= ~DF_MEDIA_STATUS_ENABLED;
}
} else {
/*
* this must be an ATAPI device
* use an ATAPI protocol (Transparent SCSI)
*/
us->protocol_name = "Transparent SCSI";
us->proto_handler = usb_stor_transparent_scsi_command;
usb_stor_dbg(us, "Protocol changed to: %s\n",
us->protocol_name);
/* Free driver structure */
us->extra_destructor(info);
kfree(info);
us->extra = NULL;
us->extra_destructor = NULL;
}
}
usb_stor_dbg(us, "Leaving isd200_get_inquiry_data %08X\n", retStatus);
return(retStatus);
}
/**************************************************************************
* isd200_scsi_to_ata
*
* Translate SCSI commands to ATA commands.
*
* RETURNS:
* 1 if the command needs to be sent to the transport layer
* 0 otherwise
*/
static int isd200_scsi_to_ata(struct scsi_cmnd *srb, struct us_data *us,
union ata_cdb * ataCdb)
{
struct isd200_info *info = (struct isd200_info *)us->extra;
u16 *id = info->id;
int sendToTransport = 1;
unsigned char sectnum, head;
unsigned short cylinder;
unsigned long lba;
unsigned long blockCount;
unsigned char senseData[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
memset(ataCdb, 0, sizeof(union ata_cdb));
/* SCSI Command */
switch (srb->cmnd[0]) {
case INQUIRY:
usb_stor_dbg(us, " ATA OUT - INQUIRY\n");
/* copy InquiryData */
usb_stor_set_xfer_buf((unsigned char *) &info->InquiryData,
sizeof(info->InquiryData), srb);
srb->result = SAM_STAT_GOOD;
sendToTransport = 0;
break;
case MODE_SENSE:
usb_stor_dbg(us, " ATA OUT - SCSIOP_MODE_SENSE\n");
/* Initialize the return buffer */
usb_stor_set_xfer_buf(senseData, sizeof(senseData), srb);
if (info->DeviceFlags & DF_MEDIA_STATUS_ENABLED)
{
ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;
ataCdb->generic.TransferBlockSize = 1;
ataCdb->generic.RegisterSelect = REG_COMMAND;
ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS;
isd200_srb_set_bufflen(srb, 0);
} else {
usb_stor_dbg(us, " Media Status not supported, just report okay\n");
srb->result = SAM_STAT_GOOD;
sendToTransport = 0;
}
break;
case TEST_UNIT_READY:
usb_stor_dbg(us, " ATA OUT - SCSIOP_TEST_UNIT_READY\n");
if (info->DeviceFlags & DF_MEDIA_STATUS_ENABLED)
{
ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;
ataCdb->generic.TransferBlockSize = 1;
ataCdb->generic.RegisterSelect = REG_COMMAND;
ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS;
isd200_srb_set_bufflen(srb, 0);
} else {
usb_stor_dbg(us, " Media Status not supported, just report okay\n");
srb->result = SAM_STAT_GOOD;
sendToTransport = 0;
}
break;
case READ_CAPACITY:
{
unsigned long capacity;
struct read_capacity_data readCapacityData;
usb_stor_dbg(us, " ATA OUT - SCSIOP_READ_CAPACITY\n");
if (ata_id_has_lba(id))
capacity = ata_id_u32(id, ATA_ID_LBA_CAPACITY) - 1;
else
capacity = (id[ATA_ID_HEADS] * id[ATA_ID_CYLS] *
id[ATA_ID_SECTORS]) - 1;
readCapacityData.LogicalBlockAddress = cpu_to_be32(capacity);
readCapacityData.BytesPerBlock = cpu_to_be32(0x200);
usb_stor_set_xfer_buf((unsigned char *) &readCapacityData,
sizeof(readCapacityData), srb);
srb->result = SAM_STAT_GOOD;
sendToTransport = 0;
}
break;
case READ_10:
usb_stor_dbg(us, " ATA OUT - SCSIOP_READ\n");
lba = be32_to_cpu(*(__be32 *)&srb->cmnd[2]);
blockCount = (unsigned long)srb->cmnd[7]<<8 | (unsigned long)srb->cmnd[8];
if (ata_id_has_lba(id)) {
sectnum = (unsigned char)(lba);
cylinder = (unsigned short)(lba>>8);
head = ATA_ADDRESS_DEVHEAD_LBA_MODE | (unsigned char)(lba>>24 & 0x0F);
} else {
sectnum = (u8)((lba % id[ATA_ID_SECTORS]) + 1);
cylinder = (u16)(lba / (id[ATA_ID_SECTORS] *
id[ATA_ID_HEADS]));
head = (u8)((lba / id[ATA_ID_SECTORS]) %
id[ATA_ID_HEADS]);
}
ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;
ataCdb->generic.TransferBlockSize = 1;
ataCdb->generic.RegisterSelect =
REG_SECTOR_COUNT | REG_SECTOR_NUMBER |
REG_CYLINDER_LOW | REG_CYLINDER_HIGH |
REG_DEVICE_HEAD | REG_COMMAND;
ataCdb->write.SectorCountByte = (unsigned char)blockCount;
ataCdb->write.SectorNumberByte = sectnum;
ataCdb->write.CylinderHighByte = (unsigned char)(cylinder>>8);
ataCdb->write.CylinderLowByte = (unsigned char)cylinder;
ataCdb->write.DeviceHeadByte = (head | ATA_ADDRESS_DEVHEAD_STD);
ataCdb->write.CommandByte = ATA_CMD_PIO_READ;
break;
case WRITE_10:
usb_stor_dbg(us, " ATA OUT - SCSIOP_WRITE\n");
lba = be32_to_cpu(*(__be32 *)&srb->cmnd[2]);
blockCount = (unsigned long)srb->cmnd[7]<<8 | (unsigned long)srb->cmnd[8];
if (ata_id_has_lba(id)) {
sectnum = (unsigned char)(lba);
cylinder = (unsigned short)(lba>>8);
head = ATA_ADDRESS_DEVHEAD_LBA_MODE | (unsigned char)(lba>>24 & 0x0F);
} else {
sectnum = (u8)((lba % id[ATA_ID_SECTORS]) + 1);
cylinder = (u16)(lba / (id[ATA_ID_SECTORS] *
id[ATA_ID_HEADS]));
head = (u8)((lba / id[ATA_ID_SECTORS]) %
id[ATA_ID_HEADS]);
}
ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;
ataCdb->generic.TransferBlockSize = 1;
ataCdb->generic.RegisterSelect =
REG_SECTOR_COUNT | REG_SECTOR_NUMBER |
REG_CYLINDER_LOW | REG_CYLINDER_HIGH |
REG_DEVICE_HEAD | REG_COMMAND;
ataCdb->write.SectorCountByte = (unsigned char)blockCount;
ataCdb->write.SectorNumberByte = sectnum;
ataCdb->write.CylinderHighByte = (unsigned char)(cylinder>>8);
ataCdb->write.CylinderLowByte = (unsigned char)cylinder;
ataCdb->write.DeviceHeadByte = (head | ATA_ADDRESS_DEVHEAD_STD);
ataCdb->write.CommandByte = ATA_CMD_PIO_WRITE;
break;
case ALLOW_MEDIUM_REMOVAL:
usb_stor_dbg(us, " ATA OUT - SCSIOP_MEDIUM_REMOVAL\n");
if (info->DeviceFlags & DF_REMOVABLE_MEDIA) {
usb_stor_dbg(us, " srb->cmnd[4] = 0x%X\n",
srb->cmnd[4]);
ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;
ataCdb->generic.TransferBlockSize = 1;
ataCdb->generic.RegisterSelect = REG_COMMAND;
ataCdb->write.CommandByte = (srb->cmnd[4] & 0x1) ?
ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK;
isd200_srb_set_bufflen(srb, 0);
} else {
usb_stor_dbg(us, " Not removeable media, just report okay\n");
srb->result = SAM_STAT_GOOD;
sendToTransport = 0;
}
break;
case START_STOP:
usb_stor_dbg(us, " ATA OUT - SCSIOP_START_STOP_UNIT\n");
usb_stor_dbg(us, " srb->cmnd[4] = 0x%X\n", srb->cmnd[4]);
if ((srb->cmnd[4] & 0x3) == 0x2) {
usb_stor_dbg(us, " Media Eject\n");
ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;
ataCdb->generic.TransferBlockSize = 0;
ataCdb->generic.RegisterSelect = REG_COMMAND;
ataCdb->write.CommandByte = ATA_COMMAND_MEDIA_EJECT;
} else if ((srb->cmnd[4] & 0x3) == 0x1) {
usb_stor_dbg(us, " Get Media Status\n");
ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand;
ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand;
ataCdb->generic.TransferBlockSize = 1;
ataCdb->generic.RegisterSelect = REG_COMMAND;
ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS;
isd200_srb_set_bufflen(srb, 0);
} else {
usb_stor_dbg(us, " Nothing to do, just report okay\n");
srb->result = SAM_STAT_GOOD;
sendToTransport = 0;
}
break;
default:
usb_stor_dbg(us, "Unsupported SCSI command - 0x%X\n",
srb->cmnd[0]);
srb->result = DID_ERROR << 16;
sendToTransport = 0;
break;
}
return(sendToTransport);
}
/**************************************************************************
* isd200_free_info
*
* Frees the driver structure.
*/
static void isd200_free_info_ptrs(void *info_)
{
struct isd200_info *info = (struct isd200_info *) info_;
if (info) {
kfree(info->id);
kfree(info->RegsBuf);
kfree(info->srb.sense_buffer);
}
}
/**************************************************************************
* isd200_init_info
*
* Allocates (if necessary) and initializes the driver structure.
*
* RETURNS:
* ISD status code
*/
static int isd200_init_info(struct us_data *us)
{
int retStatus = ISD200_GOOD;
struct isd200_info *info;
info = kzalloc(sizeof(struct isd200_info), GFP_KERNEL);
if (!info)
retStatus = ISD200_ERROR;
else {
info->id = kzalloc(ATA_ID_WORDS * 2, GFP_KERNEL);
info->RegsBuf = kmalloc(sizeof(info->ATARegs), GFP_KERNEL);
info->srb.sense_buffer =
kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
if (!info->id || !info->RegsBuf || !info->srb.sense_buffer) {
isd200_free_info_ptrs(info);
kfree(info);
retStatus = ISD200_ERROR;
}
}
if (retStatus == ISD200_GOOD) {
us->extra = info;
us->extra_destructor = isd200_free_info_ptrs;
}
return retStatus;
}
/**************************************************************************
* Initialization for the ISD200
*/
static int isd200_Initialization(struct us_data *us)
{
usb_stor_dbg(us, "ISD200 Initialization...\n");
/* Initialize ISD200 info struct */
if (isd200_init_info(us) == ISD200_ERROR) {
usb_stor_dbg(us, "ERROR Initializing ISD200 Info struct\n");
} else {
/* Get device specific data */
if (isd200_get_inquiry_data(us) != ISD200_GOOD)
usb_stor_dbg(us, "ISD200 Initialization Failure\n");
else
usb_stor_dbg(us, "ISD200 Initialization complete\n");
}
return 0;
}
/**************************************************************************
* Protocol and Transport for the ISD200 ASIC
*
* This protocol and transport are for ATA devices connected to an ISD200
* ASIC. An ATAPI device that is connected as a slave device will be
* detected in the driver initialization function and the protocol will
* be changed to an ATAPI protocol (Transparent SCSI).
*
*/
static void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us)
{
int sendToTransport = 1, orig_bufflen;
union ata_cdb ataCdb;
/* Make sure driver was initialized */
if (us->extra == NULL)
usb_stor_dbg(us, "ERROR Driver not initialized\n");
scsi_set_resid(srb, 0);
/* scsi_bufflen might change in protocol translation to ata */
orig_bufflen = scsi_bufflen(srb);
sendToTransport = isd200_scsi_to_ata(srb, us, &ataCdb);
/* send the command to the transport layer */
if (sendToTransport)
isd200_invoke_transport(us, srb, &ataCdb);
isd200_srb_set_bufflen(srb, orig_bufflen);
}
static struct scsi_host_template isd200_host_template;
static int isd200_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_data *us;
int result;
result = usb_stor_probe1(&us, intf, id,
(id - isd200_usb_ids) + isd200_unusual_dev_list,
&isd200_host_template);
if (result)
return result;
us->protocol_name = "ISD200 ATA/ATAPI";
us->proto_handler = isd200_ata_command;
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver isd200_driver = {
.name = DRV_NAME,
.probe = isd200_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = isd200_usb_ids,
.soft_unbind = 1,
.no_dynamic_id = 1,
};
module_usb_stor_driver(isd200_driver, isd200_host_template, DRV_NAME);
| gpl-2.0 |
JoinTheRealms/TF700-dualboot-stockbased | arch/arm/mach-omap2/pm24xx.c | 393 | 12859 | /*
* OMAP2 Power Management Routines
*
* Copyright (C) 2005 Texas Instruments, Inc.
* Copyright (C) 2006-2008 Nokia Corporation
*
* Written by:
* Richard Woodruff <r-woodruff2@ti.com>
* Tony Lindgren
* Juha Yrjola
* Amit Kucheria <amit.kucheria@nokia.com>
* Igor Stoppa <igor.stoppa@nokia.com>
*
* Based on pm.c for omap1
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/suspend.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
#include <linux/interrupt.h>
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/time.h>
#include <linux/gpio.h>
#include <linux/console.h>
#include <asm/mach/time.h>
#include <asm/mach/irq.h>
#include <asm/mach-types.h>
#include <mach/irqs.h>
#include <plat/clock.h>
#include <plat/sram.h>
#include <plat/dma.h>
#include <plat/board.h>
#include "prm2xxx_3xxx.h"
#include "prm-regbits-24xx.h"
#include "cm2xxx_3xxx.h"
#include "cm-regbits-24xx.h"
#include "sdrc.h"
#include "pm.h"
#include "control.h"
#include "powerdomain.h"
#include "clockdomain.h"
static int omap2_pm_debug;
#ifdef CONFIG_SUSPEND
static suspend_state_t suspend_state = PM_SUSPEND_ON;
static inline bool is_suspending(void)
{
return (suspend_state != PM_SUSPEND_ON);
}
#else
static inline bool is_suspending(void)
{
return false;
}
#endif
static void (*omap2_sram_idle)(void);
static void (*omap2_sram_suspend)(u32 dllctrl, void __iomem *sdrc_dlla_ctrl,
void __iomem *sdrc_power);
static struct powerdomain *mpu_pwrdm, *core_pwrdm;
static struct clockdomain *dsp_clkdm, *mpu_clkdm, *wkup_clkdm, *gfx_clkdm;
static struct clk *osc_ck, *emul_ck;
static int omap2_fclks_active(void)
{
u32 f1, f2;
f1 = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
f2 = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
/* Ignore UART clocks. These are handled by UART core (serial.c) */
f1 &= ~(OMAP24XX_EN_UART1_MASK | OMAP24XX_EN_UART2_MASK);
f2 &= ~OMAP24XX_EN_UART3_MASK;
if (f1 | f2)
return 1;
return 0;
}
static void omap2_enter_full_retention(void)
{
u32 l;
struct timespec ts_preidle, ts_postidle, ts_idle;
/* There is 1 reference hold for all children of the oscillator
* clock, the following will remove it. If no one else uses the
* oscillator itself it will be disabled if/when we enter retention
* mode.
*/
clk_disable(osc_ck);
/* Clear old wake-up events */
/* REVISIT: These write to reserved bits? */
omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1);
omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);
omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST);
/*
* Set MPU powerdomain's next power state to RETENTION;
* preserve logic state during retention
*/
pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET);
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET);
/* Workaround to kill USB */
l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL;
omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0);
omap2_gpio_prepare_for_idle(0);
if (omap2_pm_debug) {
getnstimeofday(&ts_preidle);
}
/* One last check for pending IRQs to avoid extra latency due
* to sleeping unnecessarily. */
if (omap_irq_pending())
goto no_sleep;
/* Block console output in case it is on one of the OMAP UARTs */
if (!is_suspending())
if (!console_trylock())
goto no_sleep;
omap_uart_prepare_idle(0);
omap_uart_prepare_idle(1);
omap_uart_prepare_idle(2);
/* Jump to SRAM suspend code */
omap2_sram_suspend(sdrc_read_reg(SDRC_DLLA_CTRL),
OMAP_SDRC_REGADDR(SDRC_DLLA_CTRL),
OMAP_SDRC_REGADDR(SDRC_POWER));
omap_uart_resume_idle(2);
omap_uart_resume_idle(1);
omap_uart_resume_idle(0);
if (!is_suspending())
console_unlock();
no_sleep:
if (omap2_pm_debug) {
unsigned long long tmp;
getnstimeofday(&ts_postidle);
ts_idle = timespec_sub(ts_postidle, ts_preidle);
tmp = timespec_to_ns(&ts_idle) * NSEC_PER_USEC;
}
omap2_gpio_resume_after_idle();
clk_enable(osc_ck);
/* clear CORE wake-up events */
omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1);
omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);
/* wakeup domain events - bit 1: GPT1, bit5 GPIO */
omap2_prm_clear_mod_reg_bits(0x4 | 0x1, WKUP_MOD, PM_WKST);
/* MPU domain wake events */
l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
if (l & 0x01)
omap2_prm_write_mod_reg(0x01, OCP_MOD,
OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
if (l & 0x20)
omap2_prm_write_mod_reg(0x20, OCP_MOD,
OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
/* Mask future PRCM-to-MPU interrupts */
omap2_prm_write_mod_reg(0x0, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
}
static int omap2_i2c_active(void)
{
u32 l;
l = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
return l & (OMAP2420_EN_I2C2_MASK | OMAP2420_EN_I2C1_MASK);
}
static int sti_console_enabled;
static int omap2_allow_mpu_retention(void)
{
u32 l;
/* Check for MMC, UART2, UART1, McSPI2, McSPI1 and DSS1. */
l = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
if (l & (OMAP2420_EN_MMC_MASK | OMAP24XX_EN_UART2_MASK |
OMAP24XX_EN_UART1_MASK | OMAP24XX_EN_MCSPI2_MASK |
OMAP24XX_EN_MCSPI1_MASK | OMAP24XX_EN_DSS1_MASK))
return 0;
/* Check for UART3. */
l = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
if (l & OMAP24XX_EN_UART3_MASK)
return 0;
if (sti_console_enabled)
return 0;
return 1;
}
static void omap2_enter_mpu_retention(void)
{
int only_idle = 0;
struct timespec ts_preidle, ts_postidle, ts_idle;
/* Putting MPU into the WFI state while a transfer is active
* seems to cause the I2C block to timeout. Why? Good question. */
if (omap2_i2c_active())
return;
/* The peripherals seem not to be able to wake up the MPU when
* it is in retention mode. */
if (omap2_allow_mpu_retention()) {
/* REVISIT: These write to reserved bits? */
omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1);
omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);
omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST);
/* Try to enter MPU retention */
omap2_prm_write_mod_reg((0x01 << OMAP_POWERSTATE_SHIFT) |
OMAP_LOGICRETSTATE_MASK,
MPU_MOD, OMAP2_PM_PWSTCTRL);
} else {
/* Block MPU retention */
omap2_prm_write_mod_reg(OMAP_LOGICRETSTATE_MASK, MPU_MOD,
OMAP2_PM_PWSTCTRL);
only_idle = 1;
}
if (omap2_pm_debug) {
getnstimeofday(&ts_preidle);
}
omap2_sram_idle();
if (omap2_pm_debug) {
unsigned long long tmp;
getnstimeofday(&ts_postidle);
ts_idle = timespec_sub(ts_postidle, ts_preidle);
tmp = timespec_to_ns(&ts_idle) * NSEC_PER_USEC;
}
}
static int omap2_can_sleep(void)
{
if (omap2_fclks_active())
return 0;
if (!omap_uart_can_sleep())
return 0;
if (osc_ck->usecount > 1)
return 0;
if (omap_dma_running())
return 0;
return 1;
}
static void omap2_pm_idle(void)
{
local_irq_disable();
local_fiq_disable();
if (!omap2_can_sleep()) {
if (omap_irq_pending())
goto out;
omap2_enter_mpu_retention();
goto out;
}
if (omap_irq_pending())
goto out;
omap2_enter_full_retention();
out:
local_fiq_enable();
local_irq_enable();
}
#ifdef CONFIG_SUSPEND
static int omap2_pm_begin(suspend_state_t state)
{
disable_hlt();
suspend_state = state;
return 0;
}
static int omap2_pm_suspend(void)
{
u32 wken_wkup, mir1;
wken_wkup = omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN);
wken_wkup &= ~OMAP24XX_EN_GPT1_MASK;
omap2_prm_write_mod_reg(wken_wkup, WKUP_MOD, PM_WKEN);
/* Mask GPT1 */
mir1 = omap_readl(0x480fe0a4);
omap_writel(1 << 5, 0x480fe0ac);
omap_uart_prepare_suspend();
omap2_enter_full_retention();
omap_writel(mir1, 0x480fe0a4);
omap2_prm_write_mod_reg(wken_wkup, WKUP_MOD, PM_WKEN);
return 0;
}
static int omap2_pm_enter(suspend_state_t state)
{
int ret = 0;
switch (state) {
case PM_SUSPEND_STANDBY:
case PM_SUSPEND_MEM:
ret = omap2_pm_suspend();
break;
default:
ret = -EINVAL;
}
return ret;
}
static void omap2_pm_end(void)
{
suspend_state = PM_SUSPEND_ON;
enable_hlt();
}
static const struct platform_suspend_ops omap_pm_ops = {
.begin = omap2_pm_begin,
.enter = omap2_pm_enter,
.end = omap2_pm_end,
.valid = suspend_valid_only_mem,
};
#else
static const struct platform_suspend_ops __initdata omap_pm_ops;
#endif /* CONFIG_SUSPEND */
/* XXX This function should be shareable between OMAP2xxx and OMAP3 */
static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
{
if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
clkdm_allow_idle(clkdm);
else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
atomic_read(&clkdm->usecount) == 0)
clkdm_sleep(clkdm);
return 0;
}
static void __init prcm_setup_regs(void)
{
int i, num_mem_banks;
struct powerdomain *pwrdm;
/*
* Enable autoidle
* XXX This should be handled by hwmod code or PRCM init code
*/
omap2_prm_write_mod_reg(OMAP24XX_AUTOIDLE_MASK, OCP_MOD,
OMAP2_PRCM_SYSCONFIG_OFFSET);
/*
* Set CORE powerdomain memory banks to retain their contents
* during RETENTION
*/
num_mem_banks = pwrdm_get_mem_bank_count(core_pwrdm);
for (i = 0; i < num_mem_banks; i++)
pwrdm_set_mem_retst(core_pwrdm, i, PWRDM_POWER_RET);
/* Set CORE powerdomain's next power state to RETENTION */
pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_RET);
/*
* Set MPU powerdomain's next power state to RETENTION;
* preserve logic state during retention
*/
pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET);
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET);
/* Force-power down DSP, GFX powerdomains */
pwrdm = clkdm_get_pwrdm(dsp_clkdm);
pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF);
clkdm_sleep(dsp_clkdm);
pwrdm = clkdm_get_pwrdm(gfx_clkdm);
pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF);
clkdm_sleep(gfx_clkdm);
/* Enable hardware-supervised idle for all clkdms */
clkdm_for_each(clkdms_setup, NULL);
clkdm_add_wkdep(mpu_clkdm, wkup_clkdm);
/* REVISIT: Configure number of 32 kHz clock cycles for sys_clk
* stabilisation */
omap2_prm_write_mod_reg(15 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD,
OMAP2_PRCM_CLKSSETUP_OFFSET);
/* Configure automatic voltage transition */
omap2_prm_write_mod_reg(2 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD,
OMAP2_PRCM_VOLTSETUP_OFFSET);
omap2_prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT_MASK |
(0x1 << OMAP24XX_SETOFF_LEVEL_SHIFT) |
OMAP24XX_MEMRETCTRL_MASK |
(0x1 << OMAP24XX_SETRET_LEVEL_SHIFT) |
(0x0 << OMAP24XX_VOLT_LEVEL_SHIFT),
OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTCTRL_OFFSET);
/* Enable wake-up events */
omap2_prm_write_mod_reg(OMAP24XX_EN_GPIOS_MASK | OMAP24XX_EN_GPT1_MASK,
WKUP_MOD, PM_WKEN);
}
static int __init omap2_pm_init(void)
{
u32 l;
if (!cpu_is_omap24xx())
return -ENODEV;
printk(KERN_INFO "Power Management for OMAP2 initializing\n");
l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET);
printk(KERN_INFO "PRCM revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
/* Look up important powerdomains */
mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
if (!mpu_pwrdm)
pr_err("PM: mpu_pwrdm not found\n");
core_pwrdm = pwrdm_lookup("core_pwrdm");
if (!core_pwrdm)
pr_err("PM: core_pwrdm not found\n");
/* Look up important clockdomains */
mpu_clkdm = clkdm_lookup("mpu_clkdm");
if (!mpu_clkdm)
pr_err("PM: mpu_clkdm not found\n");
wkup_clkdm = clkdm_lookup("wkup_clkdm");
if (!wkup_clkdm)
pr_err("PM: wkup_clkdm not found\n");
dsp_clkdm = clkdm_lookup("dsp_clkdm");
if (!dsp_clkdm)
pr_err("PM: dsp_clkdm not found\n");
gfx_clkdm = clkdm_lookup("gfx_clkdm");
if (!gfx_clkdm)
pr_err("PM: gfx_clkdm not found\n");
osc_ck = clk_get(NULL, "osc_ck");
if (IS_ERR(osc_ck)) {
printk(KERN_ERR "could not get osc_ck\n");
return -ENODEV;
}
if (cpu_is_omap242x()) {
emul_ck = clk_get(NULL, "emul_ck");
if (IS_ERR(emul_ck)) {
printk(KERN_ERR "could not get emul_ck\n");
clk_put(osc_ck);
return -ENODEV;
}
}
prcm_setup_regs();
/* Hack to prevent MPU retention when STI console is enabled. */
{
const struct omap_sti_console_config *sti;
sti = omap_get_config(OMAP_TAG_STI_CONSOLE,
struct omap_sti_console_config);
if (sti != NULL && sti->enable)
sti_console_enabled = 1;
}
/*
* We copy the assembler sleep/wakeup routines to SRAM.
* These routines need to be in SRAM as that's the only
* memory the MPU can see when it wakes up.
*/
if (cpu_is_omap24xx()) {
omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend,
omap24xx_idle_loop_suspend_sz);
omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend,
omap24xx_cpu_suspend_sz);
}
suspend_set_ops(&omap_pm_ops);
pm_idle = omap2_pm_idle;
return 0;
}
late_initcall(omap2_pm_init);
| gpl-2.0 |
TripNRaVeR/tripndroid-endeavoru-3.1.10 | arch/arm/mach-omap1/gpio15xx.c | 649 | 3188 | /*
* OMAP15xx specific gpio init
*
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
*
* Author:
* Charulatha V <charu@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/gpio.h>
#define OMAP1_MPUIO_VBASE OMAP1_MPUIO_BASE
#define OMAP1510_GPIO_BASE 0xFFFCE000
/* gpio1 */
static struct __initdata resource omap15xx_mpu_gpio_resources[] = {
{
.start = OMAP1_MPUIO_VBASE,
.end = OMAP1_MPUIO_VBASE + SZ_2K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_MPUIO,
.flags = IORESOURCE_IRQ,
},
};
static struct omap_gpio_reg_offs omap15xx_mpuio_regs = {
.revision = USHRT_MAX,
.direction = OMAP_MPUIO_IO_CNTL,
.datain = OMAP_MPUIO_INPUT_LATCH,
.dataout = OMAP_MPUIO_OUTPUT,
.irqstatus = OMAP_MPUIO_GPIO_INT,
.irqenable = OMAP_MPUIO_GPIO_MASKIT,
.irqenable_inv = true,
};
static struct __initdata omap_gpio_platform_data omap15xx_mpu_gpio_config = {
.virtual_irq_start = IH_MPUIO_BASE,
.bank_type = METHOD_MPUIO,
.bank_width = 16,
.bank_stride = 1,
.regs = &omap15xx_mpuio_regs,
};
static struct platform_device omap15xx_mpu_gpio = {
.name = "omap_gpio",
.id = 0,
.dev = {
.platform_data = &omap15xx_mpu_gpio_config,
},
.num_resources = ARRAY_SIZE(omap15xx_mpu_gpio_resources),
.resource = omap15xx_mpu_gpio_resources,
};
/* gpio2 */
static struct __initdata resource omap15xx_gpio_resources[] = {
{
.start = OMAP1510_GPIO_BASE,
.end = OMAP1510_GPIO_BASE + SZ_2K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_GPIO_BANK1,
.flags = IORESOURCE_IRQ,
},
};
static struct omap_gpio_reg_offs omap15xx_gpio_regs = {
.revision = USHRT_MAX,
.direction = OMAP1510_GPIO_DIR_CONTROL,
.datain = OMAP1510_GPIO_DATA_INPUT,
.dataout = OMAP1510_GPIO_DATA_OUTPUT,
.irqstatus = OMAP1510_GPIO_INT_STATUS,
.irqenable = OMAP1510_GPIO_INT_MASK,
.irqenable_inv = true,
};
static struct __initdata omap_gpio_platform_data omap15xx_gpio_config = {
.virtual_irq_start = IH_GPIO_BASE,
.bank_type = METHOD_GPIO_1510,
.bank_width = 16,
.regs = &omap15xx_gpio_regs,
};
static struct platform_device omap15xx_gpio = {
.name = "omap_gpio",
.id = 1,
.dev = {
.platform_data = &omap15xx_gpio_config,
},
.num_resources = ARRAY_SIZE(omap15xx_gpio_resources),
.resource = omap15xx_gpio_resources,
};
/*
* omap15xx_gpio_init needs to be done before
* machine_init functions access gpio APIs.
* Hence omap15xx_gpio_init is a postcore_initcall.
*/
static int __init omap15xx_gpio_init(void)
{
if (!cpu_is_omap15xx())
return -EINVAL;
platform_device_register(&omap15xx_mpu_gpio);
platform_device_register(&omap15xx_gpio);
gpio_bank_count = 2;
return 0;
}
postcore_initcall(omap15xx_gpio_init);
| gpl-2.0 |
kensoc/kendroid_kernel | drivers/acpi/acpica/hwacpi.c | 905 | 5654 | /******************************************************************************
*
* Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#define _COMPONENT ACPI_HARDWARE
ACPI_MODULE_NAME("hwacpi")
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
/******************************************************************************
*
* FUNCTION: acpi_hw_set_mode
*
* PARAMETERS: mode - SYS_MODE_ACPI or SYS_MODE_LEGACY
*
* RETURN: Status
*
* DESCRIPTION: Transitions the system into the requested mode.
*
******************************************************************************/
acpi_status acpi_hw_set_mode(u32 mode)
{
acpi_status status;
ACPI_FUNCTION_TRACE(hw_set_mode);
/* If the Hardware Reduced flag is set, machine is always in acpi mode */
if (acpi_gbl_reduced_hardware) {
return_ACPI_STATUS(AE_OK);
}
/*
* ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
* system does not support mode transition.
*/
if (!acpi_gbl_FADT.smi_command) {
ACPI_ERROR((AE_INFO,
"No SMI_CMD in FADT, mode transition failed"));
return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
}
/*
* ACPI 2.0 clarified the meaning of ACPI_ENABLE and ACPI_DISABLE
* in FADT: If it is zero, enabling or disabling is not supported.
* As old systems may have used zero for mode transition,
* we make sure both the numbers are zero to determine these
* transitions are not supported.
*/
if (!acpi_gbl_FADT.acpi_enable && !acpi_gbl_FADT.acpi_disable) {
ACPI_ERROR((AE_INFO,
"No ACPI mode transition supported in this system "
"(enable/disable both zero)"));
return_ACPI_STATUS(AE_OK);
}
switch (mode) {
case ACPI_SYS_MODE_ACPI:
/* BIOS should have disabled ALL fixed and GP events */
status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
(u32) acpi_gbl_FADT.acpi_enable, 8);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Attempting to enable ACPI mode\n"));
break;
case ACPI_SYS_MODE_LEGACY:
/*
* BIOS should clear all fixed status bits and restore fixed event
* enable bits to default
*/
status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
(u32)acpi_gbl_FADT.acpi_disable, 8);
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Attempting to enable Legacy (non-ACPI) mode\n"));
break;
default:
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not write ACPI mode change"));
return_ACPI_STATUS(status);
}
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_hw_get_mode
*
* PARAMETERS: none
*
* RETURN: SYS_MODE_ACPI or SYS_MODE_LEGACY
*
* DESCRIPTION: Return current operating state of system. Determined by
* querying the SCI_EN bit.
*
******************************************************************************/
u32 acpi_hw_get_mode(void)
{
acpi_status status;
u32 value;
ACPI_FUNCTION_TRACE(hw_get_mode);
/* If the Hardware Reduced flag is set, machine is always in acpi mode */
if (acpi_gbl_reduced_hardware) {
return_UINT32(ACPI_SYS_MODE_ACPI);
}
/*
* ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
* system does not support mode transition.
*/
if (!acpi_gbl_FADT.smi_command) {
return_UINT32(ACPI_SYS_MODE_ACPI);
}
status = acpi_read_bit_register(ACPI_BITREG_SCI_ENABLE, &value);
if (ACPI_FAILURE(status)) {
return_UINT32(ACPI_SYS_MODE_LEGACY);
}
if (value) {
return_UINT32(ACPI_SYS_MODE_ACPI);
} else {
return_UINT32(ACPI_SYS_MODE_LEGACY);
}
}
#endif /* !ACPI_REDUCED_HARDWARE */
| gpl-2.0 |
sembre/kernel_totoro_update3 | common/arch/arm/mach-pxa/capc7117.c | 905 | 4045 | /*
* linux/arch/arm/mach-pxa/capc7117.c
*
* Support for the Embedian CAPC-7117 Evaluation Kit
* based on the Embedian MXM-8x10 Computer on Module
*
* Copyright (C) 2009 Embedian Inc.
* Copyright (C) 2009 TMT Services & Supplies (Pty) Ltd.
*
* 2007-09-04: eric miao <eric.y.miao@gmail.com>
* rewrite to align with latest kernel
*
* 2010-01-09: Edwin Peer <epeer@tmtservices.co.za>
* Hennie van der Merwe <hvdmerwe@tmtservices.co.za>
* rework for upstream merge
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/serial_8250.h>
#include <linux/gpio.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/pxa320.h>
#include <mach/mxm8x10.h>
#include "generic.h"
/* IDE (PATA) Support */
static struct pata_platform_info pata_platform_data = {
.ioport_shift = 1
};
static struct resource capc7117_ide_resources[] = {
[0] = {
.start = 0x11000020,
.end = 0x1100003f,
.flags = IORESOURCE_MEM
},
[1] = {
.start = 0x1100001c,
.end = 0x1100001c,
.flags = IORESOURCE_MEM
},
[2] = {
.start = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO76)),
.end = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO76)),
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_RISING
}
};
static struct platform_device capc7117_ide_device = {
.name = "pata_platform",
.num_resources = ARRAY_SIZE(capc7117_ide_resources),
.resource = capc7117_ide_resources,
.dev = {
.platform_data = &pata_platform_data,
.coherent_dma_mask = ~0 /* grumble */
}
};
static void __init capc7117_ide_init(void)
{
platform_device_register(&capc7117_ide_device);
}
/* TI16C752 UART support */
#define TI16C752_FLAGS (UPF_BOOT_AUTOCONF | \
UPF_IOREMAP | \
UPF_BUGGY_UART | \
UPF_SKIP_TEST)
#define TI16C752_UARTCLK (22118400)
static struct plat_serial8250_port ti16c752_platform_data[] = {
[0] = {
.mapbase = 0x14000000,
.irq = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO78)),
.irqflags = IRQF_TRIGGER_RISING,
.flags = TI16C752_FLAGS,
.iotype = UPIO_MEM,
.regshift = 1,
.uartclk = TI16C752_UARTCLK
},
[1] = {
.mapbase = 0x14000040,
.irq = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO79)),
.irqflags = IRQF_TRIGGER_RISING,
.flags = TI16C752_FLAGS,
.iotype = UPIO_MEM,
.regshift = 1,
.uartclk = TI16C752_UARTCLK
},
[2] = {
.mapbase = 0x14000080,
.irq = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO80)),
.irqflags = IRQF_TRIGGER_RISING,
.flags = TI16C752_FLAGS,
.iotype = UPIO_MEM,
.regshift = 1,
.uartclk = TI16C752_UARTCLK
},
[3] = {
.mapbase = 0x140000c0,
.irq = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO81)),
.irqflags = IRQF_TRIGGER_RISING,
.flags = TI16C752_FLAGS,
.iotype = UPIO_MEM,
.regshift = 1,
.uartclk = TI16C752_UARTCLK
},
[4] = {
/* end of array */
}
};
static struct platform_device ti16c752_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = ti16c752_platform_data
}
};
static void __init capc7117_uarts_init(void)
{
platform_device_register(&ti16c752_device);
}
static void __init capc7117_init(void)
{
/* Init CoM */
mxm_8x10_barebones_init();
/* Init evaluation board peripherals */
mxm_8x10_ac97_init();
mxm_8x10_usb_host_init();
mxm_8x10_mmc_init();
capc7117_uarts_init();
capc7117_ide_init();
}
MACHINE_START(CAPC7117,
"Embedian CAPC-7117 evaluation kit based on the MXM-8x10 CoM")
.phys_io = 0x40000000,
.boot_params = 0xa0000100,
.io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
.map_io = pxa_map_io,
.init_irq = pxa3xx_init_irq,
.timer = &pxa_timer,
.init_machine = capc7117_init
MACHINE_END
| gpl-2.0 |
Hundsbuah/SGP771_SGP712 | drivers/mfd/db8500-prcmu.c | 1161 | 84247 | /*
* Copyright (C) STMicroelectronics 2009
* Copyright (C) ST-Ericsson SA 2010
*
* License Terms: GNU General Public License v2
* Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
* Author: Sundar Iyer <sundar.iyer@stericsson.com>
* Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
*
* U8500 PRCM Unit interface driver
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/irq.h>
#include <linux/jiffies.h>
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/mfd/core.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/regulator/db8500-prcmu.h>
#include <linux/regulator/machine.h>
#include <linux/cpufreq.h>
#include <linux/platform_data/ux500_wdt.h>
#include <linux/platform_data/db8500_thermal.h>
#include "dbx500-prcmu-regs.h"
/* Index of different voltages to be used when accessing AVSData */
#define PRCM_AVS_BASE 0x2FC
#define PRCM_AVS_VBB_RET (PRCM_AVS_BASE + 0x0)
#define PRCM_AVS_VBB_MAX_OPP (PRCM_AVS_BASE + 0x1)
#define PRCM_AVS_VBB_100_OPP (PRCM_AVS_BASE + 0x2)
#define PRCM_AVS_VBB_50_OPP (PRCM_AVS_BASE + 0x3)
#define PRCM_AVS_VARM_MAX_OPP (PRCM_AVS_BASE + 0x4)
#define PRCM_AVS_VARM_100_OPP (PRCM_AVS_BASE + 0x5)
#define PRCM_AVS_VARM_50_OPP (PRCM_AVS_BASE + 0x6)
#define PRCM_AVS_VARM_RET (PRCM_AVS_BASE + 0x7)
#define PRCM_AVS_VAPE_100_OPP (PRCM_AVS_BASE + 0x8)
#define PRCM_AVS_VAPE_50_OPP (PRCM_AVS_BASE + 0x9)
#define PRCM_AVS_VMOD_100_OPP (PRCM_AVS_BASE + 0xA)
#define PRCM_AVS_VMOD_50_OPP (PRCM_AVS_BASE + 0xB)
#define PRCM_AVS_VSAFE (PRCM_AVS_BASE + 0xC)
#define PRCM_AVS_VOLTAGE 0
#define PRCM_AVS_VOLTAGE_MASK 0x3f
#define PRCM_AVS_ISSLOWSTARTUP 6
#define PRCM_AVS_ISSLOWSTARTUP_MASK (1 << PRCM_AVS_ISSLOWSTARTUP)
#define PRCM_AVS_ISMODEENABLE 7
#define PRCM_AVS_ISMODEENABLE_MASK (1 << PRCM_AVS_ISMODEENABLE)
#define PRCM_BOOT_STATUS 0xFFF
#define PRCM_ROMCODE_A2P 0xFFE
#define PRCM_ROMCODE_P2A 0xFFD
#define PRCM_XP70_CUR_PWR_STATE 0xFFC /* 4 BYTES */
#define PRCM_SW_RST_REASON 0xFF8 /* 2 bytes */
#define _PRCM_MBOX_HEADER 0xFE8 /* 16 bytes */
#define PRCM_MBOX_HEADER_REQ_MB0 (_PRCM_MBOX_HEADER + 0x0)
#define PRCM_MBOX_HEADER_REQ_MB1 (_PRCM_MBOX_HEADER + 0x1)
#define PRCM_MBOX_HEADER_REQ_MB2 (_PRCM_MBOX_HEADER + 0x2)
#define PRCM_MBOX_HEADER_REQ_MB3 (_PRCM_MBOX_HEADER + 0x3)
#define PRCM_MBOX_HEADER_REQ_MB4 (_PRCM_MBOX_HEADER + 0x4)
#define PRCM_MBOX_HEADER_REQ_MB5 (_PRCM_MBOX_HEADER + 0x5)
#define PRCM_MBOX_HEADER_ACK_MB0 (_PRCM_MBOX_HEADER + 0x8)
/* Req Mailboxes */
#define PRCM_REQ_MB0 0xFDC /* 12 bytes */
#define PRCM_REQ_MB1 0xFD0 /* 12 bytes */
#define PRCM_REQ_MB2 0xFC0 /* 16 bytes */
#define PRCM_REQ_MB3 0xE4C /* 372 bytes */
#define PRCM_REQ_MB4 0xE48 /* 4 bytes */
#define PRCM_REQ_MB5 0xE44 /* 4 bytes */
/* Ack Mailboxes */
#define PRCM_ACK_MB0 0xE08 /* 52 bytes */
#define PRCM_ACK_MB1 0xE04 /* 4 bytes */
#define PRCM_ACK_MB2 0xE00 /* 4 bytes */
#define PRCM_ACK_MB3 0xDFC /* 4 bytes */
#define PRCM_ACK_MB4 0xDF8 /* 4 bytes */
#define PRCM_ACK_MB5 0xDF4 /* 4 bytes */
/* Mailbox 0 headers */
#define MB0H_POWER_STATE_TRANS 0
#define MB0H_CONFIG_WAKEUPS_EXE 1
#define MB0H_READ_WAKEUP_ACK 3
#define MB0H_CONFIG_WAKEUPS_SLEEP 4
#define MB0H_WAKEUP_EXE 2
#define MB0H_WAKEUP_SLEEP 5
/* Mailbox 0 REQs */
#define PRCM_REQ_MB0_AP_POWER_STATE (PRCM_REQ_MB0 + 0x0)
#define PRCM_REQ_MB0_AP_PLL_STATE (PRCM_REQ_MB0 + 0x1)
#define PRCM_REQ_MB0_ULP_CLOCK_STATE (PRCM_REQ_MB0 + 0x2)
#define PRCM_REQ_MB0_DO_NOT_WFI (PRCM_REQ_MB0 + 0x3)
#define PRCM_REQ_MB0_WAKEUP_8500 (PRCM_REQ_MB0 + 0x4)
#define PRCM_REQ_MB0_WAKEUP_4500 (PRCM_REQ_MB0 + 0x8)
/* Mailbox 0 ACKs */
#define PRCM_ACK_MB0_AP_PWRSTTR_STATUS (PRCM_ACK_MB0 + 0x0)
#define PRCM_ACK_MB0_READ_POINTER (PRCM_ACK_MB0 + 0x1)
#define PRCM_ACK_MB0_WAKEUP_0_8500 (PRCM_ACK_MB0 + 0x4)
#define PRCM_ACK_MB0_WAKEUP_0_4500 (PRCM_ACK_MB0 + 0x8)
#define PRCM_ACK_MB0_WAKEUP_1_8500 (PRCM_ACK_MB0 + 0x1C)
#define PRCM_ACK_MB0_WAKEUP_1_4500 (PRCM_ACK_MB0 + 0x20)
#define PRCM_ACK_MB0_EVENT_4500_NUMBERS 20
/* Mailbox 1 headers */
#define MB1H_ARM_APE_OPP 0x0
#define MB1H_RESET_MODEM 0x2
#define MB1H_REQUEST_APE_OPP_100_VOLT 0x3
#define MB1H_RELEASE_APE_OPP_100_VOLT 0x4
#define MB1H_RELEASE_USB_WAKEUP 0x5
#define MB1H_PLL_ON_OFF 0x6
/* Mailbox 1 Requests */
#define PRCM_REQ_MB1_ARM_OPP (PRCM_REQ_MB1 + 0x0)
#define PRCM_REQ_MB1_APE_OPP (PRCM_REQ_MB1 + 0x1)
#define PRCM_REQ_MB1_PLL_ON_OFF (PRCM_REQ_MB1 + 0x4)
#define PLL_SOC0_OFF 0x1
#define PLL_SOC0_ON 0x2
#define PLL_SOC1_OFF 0x4
#define PLL_SOC1_ON 0x8
/* Mailbox 1 ACKs */
#define PRCM_ACK_MB1_CURRENT_ARM_OPP (PRCM_ACK_MB1 + 0x0)
#define PRCM_ACK_MB1_CURRENT_APE_OPP (PRCM_ACK_MB1 + 0x1)
#define PRCM_ACK_MB1_APE_VOLTAGE_STATUS (PRCM_ACK_MB1 + 0x2)
#define PRCM_ACK_MB1_DVFS_STATUS (PRCM_ACK_MB1 + 0x3)
/* Mailbox 2 headers */
#define MB2H_DPS 0x0
#define MB2H_AUTO_PWR 0x1
/* Mailbox 2 REQs */
#define PRCM_REQ_MB2_SVA_MMDSP (PRCM_REQ_MB2 + 0x0)
#define PRCM_REQ_MB2_SVA_PIPE (PRCM_REQ_MB2 + 0x1)
#define PRCM_REQ_MB2_SIA_MMDSP (PRCM_REQ_MB2 + 0x2)
#define PRCM_REQ_MB2_SIA_PIPE (PRCM_REQ_MB2 + 0x3)
#define PRCM_REQ_MB2_SGA (PRCM_REQ_MB2 + 0x4)
#define PRCM_REQ_MB2_B2R2_MCDE (PRCM_REQ_MB2 + 0x5)
#define PRCM_REQ_MB2_ESRAM12 (PRCM_REQ_MB2 + 0x6)
#define PRCM_REQ_MB2_ESRAM34 (PRCM_REQ_MB2 + 0x7)
#define PRCM_REQ_MB2_AUTO_PM_SLEEP (PRCM_REQ_MB2 + 0x8)
#define PRCM_REQ_MB2_AUTO_PM_IDLE (PRCM_REQ_MB2 + 0xC)
/* Mailbox 2 ACKs */
#define PRCM_ACK_MB2_DPS_STATUS (PRCM_ACK_MB2 + 0x0)
#define HWACC_PWR_ST_OK 0xFE
/* Mailbox 3 headers */
#define MB3H_ANC 0x0
#define MB3H_SIDETONE 0x1
#define MB3H_SYSCLK 0xE
/* Mailbox 3 Requests */
#define PRCM_REQ_MB3_ANC_FIR_COEFF (PRCM_REQ_MB3 + 0x0)
#define PRCM_REQ_MB3_ANC_IIR_COEFF (PRCM_REQ_MB3 + 0x20)
#define PRCM_REQ_MB3_ANC_SHIFTER (PRCM_REQ_MB3 + 0x60)
#define PRCM_REQ_MB3_ANC_WARP (PRCM_REQ_MB3 + 0x64)
#define PRCM_REQ_MB3_SIDETONE_FIR_GAIN (PRCM_REQ_MB3 + 0x68)
#define PRCM_REQ_MB3_SIDETONE_FIR_COEFF (PRCM_REQ_MB3 + 0x6C)
#define PRCM_REQ_MB3_SYSCLK_MGT (PRCM_REQ_MB3 + 0x16C)
/* Mailbox 4 headers */
#define MB4H_DDR_INIT 0x0
#define MB4H_MEM_ST 0x1
#define MB4H_HOTDOG 0x12
#define MB4H_HOTMON 0x13
#define MB4H_HOT_PERIOD 0x14
#define MB4H_A9WDOG_CONF 0x16
#define MB4H_A9WDOG_EN 0x17
#define MB4H_A9WDOG_DIS 0x18
#define MB4H_A9WDOG_LOAD 0x19
#define MB4H_A9WDOG_KICK 0x20
/* Mailbox 4 Requests */
#define PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE (PRCM_REQ_MB4 + 0x0)
#define PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE (PRCM_REQ_MB4 + 0x1)
#define PRCM_REQ_MB4_ESRAM0_ST (PRCM_REQ_MB4 + 0x3)
#define PRCM_REQ_MB4_HOTDOG_THRESHOLD (PRCM_REQ_MB4 + 0x0)
#define PRCM_REQ_MB4_HOTMON_LOW (PRCM_REQ_MB4 + 0x0)
#define PRCM_REQ_MB4_HOTMON_HIGH (PRCM_REQ_MB4 + 0x1)
#define PRCM_REQ_MB4_HOTMON_CONFIG (PRCM_REQ_MB4 + 0x2)
#define PRCM_REQ_MB4_HOT_PERIOD (PRCM_REQ_MB4 + 0x0)
#define HOTMON_CONFIG_LOW BIT(0)
#define HOTMON_CONFIG_HIGH BIT(1)
#define PRCM_REQ_MB4_A9WDOG_0 (PRCM_REQ_MB4 + 0x0)
#define PRCM_REQ_MB4_A9WDOG_1 (PRCM_REQ_MB4 + 0x1)
#define PRCM_REQ_MB4_A9WDOG_2 (PRCM_REQ_MB4 + 0x2)
#define PRCM_REQ_MB4_A9WDOG_3 (PRCM_REQ_MB4 + 0x3)
#define A9WDOG_AUTO_OFF_EN BIT(7)
#define A9WDOG_AUTO_OFF_DIS 0
#define A9WDOG_ID_MASK 0xf
/* Mailbox 5 Requests */
#define PRCM_REQ_MB5_I2C_SLAVE_OP (PRCM_REQ_MB5 + 0x0)
#define PRCM_REQ_MB5_I2C_HW_BITS (PRCM_REQ_MB5 + 0x1)
#define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 0x2)
#define PRCM_REQ_MB5_I2C_VAL (PRCM_REQ_MB5 + 0x3)
#define PRCMU_I2C_WRITE(slave) (((slave) << 1) | BIT(6))
#define PRCMU_I2C_READ(slave) (((slave) << 1) | BIT(0) | BIT(6))
#define PRCMU_I2C_STOP_EN BIT(3)
/* Mailbox 5 ACKs */
#define PRCM_ACK_MB5_I2C_STATUS (PRCM_ACK_MB5 + 0x1)
#define PRCM_ACK_MB5_I2C_VAL (PRCM_ACK_MB5 + 0x3)
#define I2C_WR_OK 0x1
#define I2C_RD_OK 0x2
#define NUM_MB 8
#define MBOX_BIT BIT
#define ALL_MBOX_BITS (MBOX_BIT(NUM_MB) - 1)
/*
* Wakeups/IRQs
*/
#define WAKEUP_BIT_RTC BIT(0)
#define WAKEUP_BIT_RTT0 BIT(1)
#define WAKEUP_BIT_RTT1 BIT(2)
#define WAKEUP_BIT_HSI0 BIT(3)
#define WAKEUP_BIT_HSI1 BIT(4)
#define WAKEUP_BIT_CA_WAKE BIT(5)
#define WAKEUP_BIT_USB BIT(6)
#define WAKEUP_BIT_ABB BIT(7)
#define WAKEUP_BIT_ABB_FIFO BIT(8)
#define WAKEUP_BIT_SYSCLK_OK BIT(9)
#define WAKEUP_BIT_CA_SLEEP BIT(10)
#define WAKEUP_BIT_AC_WAKE_ACK BIT(11)
#define WAKEUP_BIT_SIDE_TONE_OK BIT(12)
#define WAKEUP_BIT_ANC_OK BIT(13)
#define WAKEUP_BIT_SW_ERROR BIT(14)
#define WAKEUP_BIT_AC_SLEEP_ACK BIT(15)
#define WAKEUP_BIT_ARM BIT(17)
#define WAKEUP_BIT_HOTMON_LOW BIT(18)
#define WAKEUP_BIT_HOTMON_HIGH BIT(19)
#define WAKEUP_BIT_MODEM_SW_RESET_REQ BIT(20)
#define WAKEUP_BIT_GPIO0 BIT(23)
#define WAKEUP_BIT_GPIO1 BIT(24)
#define WAKEUP_BIT_GPIO2 BIT(25)
#define WAKEUP_BIT_GPIO3 BIT(26)
#define WAKEUP_BIT_GPIO4 BIT(27)
#define WAKEUP_BIT_GPIO5 BIT(28)
#define WAKEUP_BIT_GPIO6 BIT(29)
#define WAKEUP_BIT_GPIO7 BIT(30)
#define WAKEUP_BIT_GPIO8 BIT(31)
static struct {
bool valid;
struct prcmu_fw_version version;
} fw_info;
static struct irq_domain *db8500_irq_domain;
/*
* This vector maps irq numbers to the bits in the bit field used in
* communication with the PRCMU firmware.
*
* The reason for having this is to keep the irq numbers contiguous even though
* the bits in the bit field are not. (The bits also have a tendency to move
* around, to further complicate matters.)
*/
#define IRQ_INDEX(_name) ((IRQ_PRCMU_##_name))
#define IRQ_ENTRY(_name)[IRQ_INDEX(_name)] = (WAKEUP_BIT_##_name)
#define IRQ_PRCMU_RTC 0
#define IRQ_PRCMU_RTT0 1
#define IRQ_PRCMU_RTT1 2
#define IRQ_PRCMU_HSI0 3
#define IRQ_PRCMU_HSI1 4
#define IRQ_PRCMU_CA_WAKE 5
#define IRQ_PRCMU_USB 6
#define IRQ_PRCMU_ABB 7
#define IRQ_PRCMU_ABB_FIFO 8
#define IRQ_PRCMU_ARM 9
#define IRQ_PRCMU_MODEM_SW_RESET_REQ 10
#define IRQ_PRCMU_GPIO0 11
#define IRQ_PRCMU_GPIO1 12
#define IRQ_PRCMU_GPIO2 13
#define IRQ_PRCMU_GPIO3 14
#define IRQ_PRCMU_GPIO4 15
#define IRQ_PRCMU_GPIO5 16
#define IRQ_PRCMU_GPIO6 17
#define IRQ_PRCMU_GPIO7 18
#define IRQ_PRCMU_GPIO8 19
#define IRQ_PRCMU_CA_SLEEP 20
#define IRQ_PRCMU_HOTMON_LOW 21
#define IRQ_PRCMU_HOTMON_HIGH 22
#define NUM_PRCMU_WAKEUPS 23
static u32 prcmu_irq_bit[NUM_PRCMU_WAKEUPS] = {
IRQ_ENTRY(RTC),
IRQ_ENTRY(RTT0),
IRQ_ENTRY(RTT1),
IRQ_ENTRY(HSI0),
IRQ_ENTRY(HSI1),
IRQ_ENTRY(CA_WAKE),
IRQ_ENTRY(USB),
IRQ_ENTRY(ABB),
IRQ_ENTRY(ABB_FIFO),
IRQ_ENTRY(CA_SLEEP),
IRQ_ENTRY(ARM),
IRQ_ENTRY(HOTMON_LOW),
IRQ_ENTRY(HOTMON_HIGH),
IRQ_ENTRY(MODEM_SW_RESET_REQ),
IRQ_ENTRY(GPIO0),
IRQ_ENTRY(GPIO1),
IRQ_ENTRY(GPIO2),
IRQ_ENTRY(GPIO3),
IRQ_ENTRY(GPIO4),
IRQ_ENTRY(GPIO5),
IRQ_ENTRY(GPIO6),
IRQ_ENTRY(GPIO7),
IRQ_ENTRY(GPIO8)
};
#define VALID_WAKEUPS (BIT(NUM_PRCMU_WAKEUP_INDICES) - 1)
#define WAKEUP_ENTRY(_name)[PRCMU_WAKEUP_INDEX_##_name] = (WAKEUP_BIT_##_name)
static u32 prcmu_wakeup_bit[NUM_PRCMU_WAKEUP_INDICES] = {
WAKEUP_ENTRY(RTC),
WAKEUP_ENTRY(RTT0),
WAKEUP_ENTRY(RTT1),
WAKEUP_ENTRY(HSI0),
WAKEUP_ENTRY(HSI1),
WAKEUP_ENTRY(USB),
WAKEUP_ENTRY(ABB),
WAKEUP_ENTRY(ABB_FIFO),
WAKEUP_ENTRY(ARM)
};
/*
* mb0_transfer - state needed for mailbox 0 communication.
* @lock: The transaction lock.
* @dbb_events_lock: A lock used to handle concurrent access to (parts of)
* the request data.
* @mask_work: Work structure used for (un)masking wakeup interrupts.
* @req: Request data that need to persist between requests.
*/
static struct {
spinlock_t lock;
spinlock_t dbb_irqs_lock;
struct work_struct mask_work;
struct mutex ac_wake_lock;
struct completion ac_wake_work;
struct {
u32 dbb_irqs;
u32 dbb_wakeups;
u32 abb_events;
} req;
} mb0_transfer;
/*
* mb1_transfer - state needed for mailbox 1 communication.
* @lock: The transaction lock.
* @work: The transaction completion structure.
* @ape_opp: The current APE OPP.
* @ack: Reply ("acknowledge") data.
*/
static struct {
struct mutex lock;
struct completion work;
u8 ape_opp;
struct {
u8 header;
u8 arm_opp;
u8 ape_opp;
u8 ape_voltage_status;
} ack;
} mb1_transfer;
/*
* mb2_transfer - state needed for mailbox 2 communication.
* @lock: The transaction lock.
* @work: The transaction completion structure.
* @auto_pm_lock: The autonomous power management configuration lock.
* @auto_pm_enabled: A flag indicating whether autonomous PM is enabled.
* @req: Request data that need to persist between requests.
* @ack: Reply ("acknowledge") data.
*/
static struct {
struct mutex lock;
struct completion work;
spinlock_t auto_pm_lock;
bool auto_pm_enabled;
struct {
u8 status;
} ack;
} mb2_transfer;
/*
* mb3_transfer - state needed for mailbox 3 communication.
* @lock: The request lock.
* @sysclk_lock: A lock used to handle concurrent sysclk requests.
* @sysclk_work: Work structure used for sysclk requests.
*/
static struct {
spinlock_t lock;
struct mutex sysclk_lock;
struct completion sysclk_work;
} mb3_transfer;
/*
* mb4_transfer - state needed for mailbox 4 communication.
* @lock: The transaction lock.
* @work: The transaction completion structure.
*/
static struct {
struct mutex lock;
struct completion work;
} mb4_transfer;
/*
* mb5_transfer - state needed for mailbox 5 communication.
* @lock: The transaction lock.
* @work: The transaction completion structure.
* @ack: Reply ("acknowledge") data.
*/
static struct {
struct mutex lock;
struct completion work;
struct {
u8 status;
u8 value;
} ack;
} mb5_transfer;
static atomic_t ac_wake_req_state = ATOMIC_INIT(0);
/* Spinlocks */
static DEFINE_SPINLOCK(prcmu_lock);
static DEFINE_SPINLOCK(clkout_lock);
/* Global var to runtime determine TCDM base for v2 or v1 */
static __iomem void *tcdm_base;
static __iomem void *prcmu_base;
struct clk_mgt {
u32 offset;
u32 pllsw;
int branch;
bool clk38div;
};
enum {
PLL_RAW,
PLL_FIX,
PLL_DIV
};
static DEFINE_SPINLOCK(clk_mgt_lock);
#define CLK_MGT_ENTRY(_name, _branch, _clk38div)[PRCMU_##_name] = \
{ (PRCM_##_name##_MGT), 0 , _branch, _clk38div}
struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
CLK_MGT_ENTRY(SGACLK, PLL_DIV, false),
CLK_MGT_ENTRY(UARTCLK, PLL_FIX, true),
CLK_MGT_ENTRY(MSP02CLK, PLL_FIX, true),
CLK_MGT_ENTRY(MSP1CLK, PLL_FIX, true),
CLK_MGT_ENTRY(I2CCLK, PLL_FIX, true),
CLK_MGT_ENTRY(SDMMCCLK, PLL_DIV, true),
CLK_MGT_ENTRY(SLIMCLK, PLL_FIX, true),
CLK_MGT_ENTRY(PER1CLK, PLL_DIV, true),
CLK_MGT_ENTRY(PER2CLK, PLL_DIV, true),
CLK_MGT_ENTRY(PER3CLK, PLL_DIV, true),
CLK_MGT_ENTRY(PER5CLK, PLL_DIV, true),
CLK_MGT_ENTRY(PER6CLK, PLL_DIV, true),
CLK_MGT_ENTRY(PER7CLK, PLL_DIV, true),
CLK_MGT_ENTRY(LCDCLK, PLL_FIX, true),
CLK_MGT_ENTRY(BMLCLK, PLL_DIV, true),
CLK_MGT_ENTRY(HSITXCLK, PLL_DIV, true),
CLK_MGT_ENTRY(HSIRXCLK, PLL_DIV, true),
CLK_MGT_ENTRY(HDMICLK, PLL_FIX, false),
CLK_MGT_ENTRY(APEATCLK, PLL_DIV, true),
CLK_MGT_ENTRY(APETRACECLK, PLL_DIV, true),
CLK_MGT_ENTRY(MCDECLK, PLL_DIV, true),
CLK_MGT_ENTRY(IPI2CCLK, PLL_FIX, true),
CLK_MGT_ENTRY(DSIALTCLK, PLL_FIX, false),
CLK_MGT_ENTRY(DMACLK, PLL_DIV, true),
CLK_MGT_ENTRY(B2R2CLK, PLL_DIV, true),
CLK_MGT_ENTRY(TVCLK, PLL_FIX, true),
CLK_MGT_ENTRY(SSPCLK, PLL_FIX, true),
CLK_MGT_ENTRY(RNGCLK, PLL_FIX, true),
CLK_MGT_ENTRY(UICCCLK, PLL_FIX, false),
};
struct dsiclk {
u32 divsel_mask;
u32 divsel_shift;
u32 divsel;
};
static struct dsiclk dsiclk[2] = {
{
.divsel_mask = PRCM_DSI_PLLOUT_SEL_DSI0_PLLOUT_DIVSEL_MASK,
.divsel_shift = PRCM_DSI_PLLOUT_SEL_DSI0_PLLOUT_DIVSEL_SHIFT,
.divsel = PRCM_DSI_PLLOUT_SEL_PHI,
},
{
.divsel_mask = PRCM_DSI_PLLOUT_SEL_DSI1_PLLOUT_DIVSEL_MASK,
.divsel_shift = PRCM_DSI_PLLOUT_SEL_DSI1_PLLOUT_DIVSEL_SHIFT,
.divsel = PRCM_DSI_PLLOUT_SEL_PHI,
}
};
struct dsiescclk {
u32 en;
u32 div_mask;
u32 div_shift;
};
static struct dsiescclk dsiescclk[3] = {
{
.en = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_EN,
.div_mask = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_DIV_MASK,
.div_shift = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_DIV_SHIFT,
},
{
.en = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_EN,
.div_mask = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_DIV_MASK,
.div_shift = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_DIV_SHIFT,
},
{
.en = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_EN,
.div_mask = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_DIV_MASK,
.div_shift = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_DIV_SHIFT,
}
};
/*
* Used by MCDE to setup all necessary PRCMU registers
*/
#define PRCMU_RESET_DSIPLL 0x00004000
#define PRCMU_UNCLAMP_DSIPLL 0x00400800
#define PRCMU_CLK_PLL_DIV_SHIFT 0
#define PRCMU_CLK_PLL_SW_SHIFT 5
#define PRCMU_CLK_38 (1 << 9)
#define PRCMU_CLK_38_SRC (1 << 10)
#define PRCMU_CLK_38_DIV (1 << 11)
/* PLLDIV=12, PLLSW=4 (PLLDDR) */
#define PRCMU_DSI_CLOCK_SETTING 0x0000008C
/* DPI 50000000 Hz */
#define PRCMU_DPI_CLOCK_SETTING ((1 << PRCMU_CLK_PLL_SW_SHIFT) | \
(16 << PRCMU_CLK_PLL_DIV_SHIFT))
#define PRCMU_DSI_LP_CLOCK_SETTING 0x00000E00
/* D=101, N=1, R=4, SELDIV2=0 */
#define PRCMU_PLLDSI_FREQ_SETTING 0x00040165
#define PRCMU_ENABLE_PLLDSI 0x00000001
#define PRCMU_DISABLE_PLLDSI 0x00000000
#define PRCMU_RELEASE_RESET_DSS 0x0000400C
#define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000202
/* ESC clk, div0=1, div1=1, div2=3 */
#define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x07030101
#define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00030101
#define PRCMU_DSI_RESET_SW 0x00000007
#define PRCMU_PLLDSI_LOCKP_LOCKED 0x3
int db8500_prcmu_enable_dsipll(void)
{
int i;
/* Clear DSIPLL_RESETN */
writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR);
/* Unclamp DSIPLL in/out */
writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR);
/* Set DSI PLL FREQ */
writel(PRCMU_PLLDSI_FREQ_SETTING, PRCM_PLLDSI_FREQ);
writel(PRCMU_DSI_PLLOUT_SEL_SETTING, PRCM_DSI_PLLOUT_SEL);
/* Enable Escape clocks */
writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
/* Start DSI PLL */
writel(PRCMU_ENABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
/* Reset DSI PLL */
writel(PRCMU_DSI_RESET_SW, PRCM_DSI_SW_RESET);
for (i = 0; i < 10; i++) {
if ((readl(PRCM_PLLDSI_LOCKP) & PRCMU_PLLDSI_LOCKP_LOCKED)
== PRCMU_PLLDSI_LOCKP_LOCKED)
break;
udelay(100);
}
/* Set DSIPLL_RESETN */
writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_SET);
return 0;
}
int db8500_prcmu_disable_dsipll(void)
{
/* Disable dsi pll */
writel(PRCMU_DISABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
/* Disable escapeclock */
writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
return 0;
}
int db8500_prcmu_set_display_clocks(void)
{
unsigned long flags;
spin_lock_irqsave(&clk_mgt_lock, flags);
/* Grab the HW semaphore. */
while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
cpu_relax();
writel(PRCMU_DSI_CLOCK_SETTING, prcmu_base + PRCM_HDMICLK_MGT);
writel(PRCMU_DSI_LP_CLOCK_SETTING, prcmu_base + PRCM_TVCLK_MGT);
writel(PRCMU_DPI_CLOCK_SETTING, prcmu_base + PRCM_LCDCLK_MGT);
/* Release the HW semaphore. */
writel(0, PRCM_SEM);
spin_unlock_irqrestore(&clk_mgt_lock, flags);
return 0;
}
u32 db8500_prcmu_read(unsigned int reg)
{
return readl(prcmu_base + reg);
}
void db8500_prcmu_write(unsigned int reg, u32 value)
{
unsigned long flags;
spin_lock_irqsave(&prcmu_lock, flags);
writel(value, (prcmu_base + reg));
spin_unlock_irqrestore(&prcmu_lock, flags);
}
void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&prcmu_lock, flags);
val = readl(prcmu_base + reg);
val = ((val & ~mask) | (value & mask));
writel(val, (prcmu_base + reg));
spin_unlock_irqrestore(&prcmu_lock, flags);
}
struct prcmu_fw_version *prcmu_get_fw_version(void)
{
return fw_info.valid ? &fw_info.version : NULL;
}
bool prcmu_has_arm_maxopp(void)
{
return (readb(tcdm_base + PRCM_AVS_VARM_MAX_OPP) &
PRCM_AVS_ISMODEENABLE_MASK) == PRCM_AVS_ISMODEENABLE_MASK;
}
/**
* prcmu_get_boot_status - PRCMU boot status checking
* Returns: the current PRCMU boot status
*/
int prcmu_get_boot_status(void)
{
return readb(tcdm_base + PRCM_BOOT_STATUS);
}
/**
* prcmu_set_rc_a2p - This function is used to run few power state sequences
* @val: Value to be set, i.e. transition requested
* Returns: 0 on success, -EINVAL on invalid argument
*
* This function is used to run the following power state sequences -
* any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep
*/
int prcmu_set_rc_a2p(enum romcode_write val)
{
if (val < RDY_2_DS || val > RDY_2_XP70_RST)
return -EINVAL;
writeb(val, (tcdm_base + PRCM_ROMCODE_A2P));
return 0;
}
/**
* prcmu_get_rc_p2a - This function is used to get power state sequences
* Returns: the power transition that has last happened
*
* This function can return the following transitions-
* any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep
*/
enum romcode_read prcmu_get_rc_p2a(void)
{
return readb(tcdm_base + PRCM_ROMCODE_P2A);
}
/**
* prcmu_get_current_mode - Return the current XP70 power mode
* Returns: Returns the current AP(ARM) power mode: init,
* apBoot, apExecute, apDeepSleep, apSleep, apIdle, apReset
*/
enum ap_pwrst prcmu_get_xp70_current_state(void)
{
return readb(tcdm_base + PRCM_XP70_CUR_PWR_STATE);
}
/**
* prcmu_config_clkout - Configure one of the programmable clock outputs.
* @clkout: The CLKOUT number (0 or 1).
* @source: The clock to be used (one of the PRCMU_CLKSRC_*).
* @div: The divider to be applied.
*
* Configures one of the programmable clock outputs (CLKOUTs).
* @div should be in the range [1,63] to request a configuration, or 0 to
* inform that the configuration is no longer requested.
*/
int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
{
static int requests[2];
int r = 0;
unsigned long flags;
u32 val;
u32 bits;
u32 mask;
u32 div_mask;
BUG_ON(clkout > 1);
BUG_ON(div > 63);
BUG_ON((clkout == 0) && (source > PRCMU_CLKSRC_CLK009));
if (!div && !requests[clkout])
return -EINVAL;
switch (clkout) {
case 0:
div_mask = PRCM_CLKOCR_CLKODIV0_MASK;
mask = (PRCM_CLKOCR_CLKODIV0_MASK | PRCM_CLKOCR_CLKOSEL0_MASK);
bits = ((source << PRCM_CLKOCR_CLKOSEL0_SHIFT) |
(div << PRCM_CLKOCR_CLKODIV0_SHIFT));
break;
case 1:
div_mask = PRCM_CLKOCR_CLKODIV1_MASK;
mask = (PRCM_CLKOCR_CLKODIV1_MASK | PRCM_CLKOCR_CLKOSEL1_MASK |
PRCM_CLKOCR_CLK1TYPE);
bits = ((source << PRCM_CLKOCR_CLKOSEL1_SHIFT) |
(div << PRCM_CLKOCR_CLKODIV1_SHIFT));
break;
}
bits &= mask;
spin_lock_irqsave(&clkout_lock, flags);
val = readl(PRCM_CLKOCR);
if (val & div_mask) {
if (div) {
if ((val & mask) != bits) {
r = -EBUSY;
goto unlock_and_return;
}
} else {
if ((val & mask & ~div_mask) != bits) {
r = -EINVAL;
goto unlock_and_return;
}
}
}
writel((bits | (val & ~mask)), PRCM_CLKOCR);
requests[clkout] += (div ? 1 : -1);
unlock_and_return:
spin_unlock_irqrestore(&clkout_lock, flags);
return r;
}
int db8500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll)
{
unsigned long flags;
BUG_ON((state < PRCMU_AP_SLEEP) || (PRCMU_AP_DEEP_IDLE < state));
spin_lock_irqsave(&mb0_transfer.lock, flags);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
cpu_relax();
writeb(MB0H_POWER_STATE_TRANS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
writeb(state, (tcdm_base + PRCM_REQ_MB0_AP_POWER_STATE));
writeb((keep_ap_pll ? 1 : 0), (tcdm_base + PRCM_REQ_MB0_AP_PLL_STATE));
writeb((keep_ulp_clk ? 1 : 0),
(tcdm_base + PRCM_REQ_MB0_ULP_CLOCK_STATE));
writeb(0, (tcdm_base + PRCM_REQ_MB0_DO_NOT_WFI));
writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
spin_unlock_irqrestore(&mb0_transfer.lock, flags);
return 0;
}
u8 db8500_prcmu_get_power_state_result(void)
{
return readb(tcdm_base + PRCM_ACK_MB0_AP_PWRSTTR_STATUS);
}
/* This function should only be called while mb0_transfer.lock is held. */
static void config_wakeups(void)
{
const u8 header[2] = {
MB0H_CONFIG_WAKEUPS_EXE,
MB0H_CONFIG_WAKEUPS_SLEEP
};
static u32 last_dbb_events;
static u32 last_abb_events;
u32 dbb_events;
u32 abb_events;
unsigned int i;
dbb_events = mb0_transfer.req.dbb_irqs | mb0_transfer.req.dbb_wakeups;
dbb_events |= (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK);
abb_events = mb0_transfer.req.abb_events;
if ((dbb_events == last_dbb_events) && (abb_events == last_abb_events))
return;
for (i = 0; i < 2; i++) {
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
cpu_relax();
writel(dbb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_8500));
writel(abb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_4500));
writeb(header[i], (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
}
last_dbb_events = dbb_events;
last_abb_events = abb_events;
}
void db8500_prcmu_enable_wakeups(u32 wakeups)
{
unsigned long flags;
u32 bits;
int i;
BUG_ON(wakeups != (wakeups & VALID_WAKEUPS));
for (i = 0, bits = 0; i < NUM_PRCMU_WAKEUP_INDICES; i++) {
if (wakeups & BIT(i))
bits |= prcmu_wakeup_bit[i];
}
spin_lock_irqsave(&mb0_transfer.lock, flags);
mb0_transfer.req.dbb_wakeups = bits;
config_wakeups();
spin_unlock_irqrestore(&mb0_transfer.lock, flags);
}
void db8500_prcmu_config_abb_event_readout(u32 abb_events)
{
unsigned long flags;
spin_lock_irqsave(&mb0_transfer.lock, flags);
mb0_transfer.req.abb_events = abb_events;
config_wakeups();
spin_unlock_irqrestore(&mb0_transfer.lock, flags);
}
void db8500_prcmu_get_abb_event_buffer(void __iomem **buf)
{
if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1)
*buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_1_4500);
else
*buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_0_4500);
}
/**
* db8500_prcmu_set_arm_opp - set the appropriate ARM OPP
* @opp: The new ARM operating point to which transition is to be made
* Returns: 0 on success, non-zero on failure
*
* This function sets the the operating point of the ARM.
*/
int db8500_prcmu_set_arm_opp(u8 opp)
{
int r;
if (opp < ARM_NO_CHANGE || opp > ARM_EXTCLK)
return -EINVAL;
r = 0;
mutex_lock(&mb1_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writeb(opp, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
writeb(APE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
(mb1_transfer.ack.arm_opp != opp))
r = -EIO;
mutex_unlock(&mb1_transfer.lock);
return r;
}
/**
* db8500_prcmu_get_arm_opp - get the current ARM OPP
*
* Returns: the current ARM OPP
*/
int db8500_prcmu_get_arm_opp(void)
{
return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_ARM_OPP);
}
/**
* db8500_prcmu_get_ddr_opp - get the current DDR OPP
*
* Returns: the current DDR OPP
*/
int db8500_prcmu_get_ddr_opp(void)
{
return readb(PRCM_DDR_SUBSYS_APE_MINBW);
}
/**
* db8500_set_ddr_opp - set the appropriate DDR OPP
* @opp: The new DDR operating point to which transition is to be made
* Returns: 0 on success, non-zero on failure
*
* This function sets the operating point of the DDR.
*/
static bool enable_set_ddr_opp;
int db8500_prcmu_set_ddr_opp(u8 opp)
{
if (opp < DDR_100_OPP || opp > DDR_25_OPP)
return -EINVAL;
/* Changing the DDR OPP can hang the hardware pre-v21 */
if (enable_set_ddr_opp)
writeb(opp, PRCM_DDR_SUBSYS_APE_MINBW);
return 0;
}
/* Divide the frequency of certain clocks by 2 for APE_50_PARTLY_25_OPP. */
static void request_even_slower_clocks(bool enable)
{
u32 clock_reg[] = {
PRCM_ACLK_MGT,
PRCM_DMACLK_MGT
};
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&clk_mgt_lock, flags);
/* Grab the HW semaphore. */
while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
cpu_relax();
for (i = 0; i < ARRAY_SIZE(clock_reg); i++) {
u32 val;
u32 div;
val = readl(prcmu_base + clock_reg[i]);
div = (val & PRCM_CLK_MGT_CLKPLLDIV_MASK);
if (enable) {
if ((div <= 1) || (div > 15)) {
pr_err("prcmu: Bad clock divider %d in %s\n",
div, __func__);
goto unlock_and_return;
}
div <<= 1;
} else {
if (div <= 2)
goto unlock_and_return;
div >>= 1;
}
val = ((val & ~PRCM_CLK_MGT_CLKPLLDIV_MASK) |
(div & PRCM_CLK_MGT_CLKPLLDIV_MASK));
writel(val, prcmu_base + clock_reg[i]);
}
unlock_and_return:
/* Release the HW semaphore. */
writel(0, PRCM_SEM);
spin_unlock_irqrestore(&clk_mgt_lock, flags);
}
/**
* db8500_set_ape_opp - set the appropriate APE OPP
* @opp: The new APE operating point to which transition is to be made
* Returns: 0 on success, non-zero on failure
*
* This function sets the operating point of the APE.
*/
int db8500_prcmu_set_ape_opp(u8 opp)
{
int r = 0;
if (opp == mb1_transfer.ape_opp)
return 0;
mutex_lock(&mb1_transfer.lock);
if (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP)
request_even_slower_clocks(false);
if ((opp != APE_100_OPP) && (mb1_transfer.ape_opp != APE_100_OPP))
goto skip_message;
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
writeb(((opp == APE_50_PARTLY_25_OPP) ? APE_50_OPP : opp),
(tcdm_base + PRCM_REQ_MB1_APE_OPP));
writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
(mb1_transfer.ack.ape_opp != opp))
r = -EIO;
skip_message:
if ((!r && (opp == APE_50_PARTLY_25_OPP)) ||
(r && (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP)))
request_even_slower_clocks(true);
if (!r)
mb1_transfer.ape_opp = opp;
mutex_unlock(&mb1_transfer.lock);
return r;
}
/**
* db8500_prcmu_get_ape_opp - get the current APE OPP
*
* Returns: the current APE OPP
*/
int db8500_prcmu_get_ape_opp(void)
{
return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_APE_OPP);
}
/**
* db8500_prcmu_request_ape_opp_100_voltage - Request APE OPP 100% voltage
* @enable: true to request the higher voltage, false to drop a request.
*
* Calls to this function to enable and disable requests must be balanced.
*/
int db8500_prcmu_request_ape_opp_100_voltage(bool enable)
{
int r = 0;
u8 header;
static unsigned int requests;
mutex_lock(&mb1_transfer.lock);
if (enable) {
if (0 != requests++)
goto unlock_and_return;
header = MB1H_REQUEST_APE_OPP_100_VOLT;
} else {
if (requests == 0) {
r = -EIO;
goto unlock_and_return;
} else if (1 != requests--) {
goto unlock_and_return;
}
header = MB1H_RELEASE_APE_OPP_100_VOLT;
}
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(header, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
if ((mb1_transfer.ack.header != header) ||
((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0))
r = -EIO;
unlock_and_return:
mutex_unlock(&mb1_transfer.lock);
return r;
}
/**
* prcmu_release_usb_wakeup_state - release the state required by a USB wakeup
*
* This function releases the power state requirements of a USB wakeup.
*/
int prcmu_release_usb_wakeup_state(void)
{
int r = 0;
mutex_lock(&mb1_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(MB1H_RELEASE_USB_WAKEUP,
(tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
if ((mb1_transfer.ack.header != MB1H_RELEASE_USB_WAKEUP) ||
((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0))
r = -EIO;
mutex_unlock(&mb1_transfer.lock);
return r;
}
static int request_pll(u8 clock, bool enable)
{
int r = 0;
if (clock == PRCMU_PLLSOC0)
clock = (enable ? PLL_SOC0_ON : PLL_SOC0_OFF);
else if (clock == PRCMU_PLLSOC1)
clock = (enable ? PLL_SOC1_ON : PLL_SOC1_OFF);
else
return -EINVAL;
mutex_lock(&mb1_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(MB1H_PLL_ON_OFF, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writeb(clock, (tcdm_base + PRCM_REQ_MB1_PLL_ON_OFF));
writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
if (mb1_transfer.ack.header != MB1H_PLL_ON_OFF)
r = -EIO;
mutex_unlock(&mb1_transfer.lock);
return r;
}
/**
* db8500_prcmu_set_epod - set the state of a EPOD (power domain)
* @epod_id: The EPOD to set
* @epod_state: The new EPOD state
*
* This function sets the state of a EPOD (power domain). It may not be called
* from interrupt context.
*/
int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state)
{
int r = 0;
bool ram_retention = false;
int i;
/* check argument */
BUG_ON(epod_id >= NUM_EPOD_ID);
/* set flag if retention is possible */
switch (epod_id) {
case EPOD_ID_SVAMMDSP:
case EPOD_ID_SIAMMDSP:
case EPOD_ID_ESRAM12:
case EPOD_ID_ESRAM34:
ram_retention = true;
break;
}
/* check argument */
BUG_ON(epod_state > EPOD_STATE_ON);
BUG_ON(epod_state == EPOD_STATE_RAMRET && !ram_retention);
/* get lock */
mutex_lock(&mb2_transfer.lock);
/* wait for mailbox */
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
cpu_relax();
/* fill in mailbox */
for (i = 0; i < NUM_EPOD_ID; i++)
writeb(EPOD_STATE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB2 + i));
writeb(epod_state, (tcdm_base + PRCM_REQ_MB2 + epod_id));
writeb(MB2H_DPS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB2));
writel(MBOX_BIT(2), PRCM_MBOX_CPU_SET);
/*
* The current firmware version does not handle errors correctly,
* and we cannot recover if there is an error.
* This is expected to change when the firmware is updated.
*/
if (!wait_for_completion_timeout(&mb2_transfer.work,
msecs_to_jiffies(20000))) {
pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
__func__);
r = -EIO;
goto unlock_and_return;
}
if (mb2_transfer.ack.status != HWACC_PWR_ST_OK)
r = -EIO;
unlock_and_return:
mutex_unlock(&mb2_transfer.lock);
return r;
}
/**
* prcmu_configure_auto_pm - Configure autonomous power management.
* @sleep: Configuration for ApSleep.
* @idle: Configuration for ApIdle.
*/
void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
struct prcmu_auto_pm_config *idle)
{
u32 sleep_cfg;
u32 idle_cfg;
unsigned long flags;
BUG_ON((sleep == NULL) || (idle == NULL));
sleep_cfg = (sleep->sva_auto_pm_enable & 0xF);
sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_auto_pm_enable & 0xF));
sleep_cfg = ((sleep_cfg << 8) | (sleep->sva_power_on & 0xFF));
sleep_cfg = ((sleep_cfg << 8) | (sleep->sia_power_on & 0xFF));
sleep_cfg = ((sleep_cfg << 4) | (sleep->sva_policy & 0xF));
sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_policy & 0xF));
idle_cfg = (idle->sva_auto_pm_enable & 0xF);
idle_cfg = ((idle_cfg << 4) | (idle->sia_auto_pm_enable & 0xF));
idle_cfg = ((idle_cfg << 8) | (idle->sva_power_on & 0xFF));
idle_cfg = ((idle_cfg << 8) | (idle->sia_power_on & 0xFF));
idle_cfg = ((idle_cfg << 4) | (idle->sva_policy & 0xF));
idle_cfg = ((idle_cfg << 4) | (idle->sia_policy & 0xF));
spin_lock_irqsave(&mb2_transfer.auto_pm_lock, flags);
/*
* The autonomous power management configuration is done through
* fields in mailbox 2, but these fields are only used as shared
* variables - i.e. there is no need to send a message.
*/
writel(sleep_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_SLEEP));
writel(idle_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_IDLE));
mb2_transfer.auto_pm_enabled =
((sleep->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
(sleep->sia_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
(idle->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
(idle->sia_auto_pm_enable == PRCMU_AUTO_PM_ON));
spin_unlock_irqrestore(&mb2_transfer.auto_pm_lock, flags);
}
EXPORT_SYMBOL(prcmu_configure_auto_pm);
bool prcmu_is_auto_pm_enabled(void)
{
return mb2_transfer.auto_pm_enabled;
}
static int request_sysclk(bool enable)
{
int r;
unsigned long flags;
r = 0;
mutex_lock(&mb3_transfer.sysclk_lock);
spin_lock_irqsave(&mb3_transfer.lock, flags);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(3))
cpu_relax();
writeb((enable ? ON : OFF), (tcdm_base + PRCM_REQ_MB3_SYSCLK_MGT));
writeb(MB3H_SYSCLK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB3));
writel(MBOX_BIT(3), PRCM_MBOX_CPU_SET);
spin_unlock_irqrestore(&mb3_transfer.lock, flags);
/*
* The firmware only sends an ACK if we want to enable the
* SysClk, and it succeeds.
*/
if (enable && !wait_for_completion_timeout(&mb3_transfer.sysclk_work,
msecs_to_jiffies(20000))) {
pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
__func__);
r = -EIO;
}
mutex_unlock(&mb3_transfer.sysclk_lock);
return r;
}
static int request_timclk(bool enable)
{
u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK);
if (!enable)
val |= PRCM_TCR_STOP_TIMERS;
writel(val, PRCM_TCR);
return 0;
}
static int request_clock(u8 clock, bool enable)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&clk_mgt_lock, flags);
/* Grab the HW semaphore. */
while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
cpu_relax();
val = readl(prcmu_base + clk_mgt[clock].offset);
if (enable) {
val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw);
} else {
clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK);
}
writel(val, prcmu_base + clk_mgt[clock].offset);
/* Release the HW semaphore. */
writel(0, PRCM_SEM);
spin_unlock_irqrestore(&clk_mgt_lock, flags);
return 0;
}
static int request_sga_clock(u8 clock, bool enable)
{
u32 val;
int ret;
if (enable) {
val = readl(PRCM_CGATING_BYPASS);
writel(val | PRCM_CGATING_BYPASS_ICN2, PRCM_CGATING_BYPASS);
}
ret = request_clock(clock, enable);
if (!ret && !enable) {
val = readl(PRCM_CGATING_BYPASS);
writel(val & ~PRCM_CGATING_BYPASS_ICN2, PRCM_CGATING_BYPASS);
}
return ret;
}
static inline bool plldsi_locked(void)
{
return (readl(PRCM_PLLDSI_LOCKP) &
(PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP10 |
PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP3)) ==
(PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP10 |
PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP3);
}
static int request_plldsi(bool enable)
{
int r = 0;
u32 val;
writel((PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMP |
PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMPI), (enable ?
PRCM_MMIP_LS_CLAMP_CLR : PRCM_MMIP_LS_CLAMP_SET));
val = readl(PRCM_PLLDSI_ENABLE);
if (enable)
val |= PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE;
else
val &= ~PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE;
writel(val, PRCM_PLLDSI_ENABLE);
if (enable) {
unsigned int i;
bool locked = plldsi_locked();
for (i = 10; !locked && (i > 0); --i) {
udelay(100);
locked = plldsi_locked();
}
if (locked) {
writel(PRCM_APE_RESETN_DSIPLL_RESETN,
PRCM_APE_RESETN_SET);
} else {
writel((PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMP |
PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMPI),
PRCM_MMIP_LS_CLAMP_SET);
val &= ~PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE;
writel(val, PRCM_PLLDSI_ENABLE);
r = -EAGAIN;
}
} else {
writel(PRCM_APE_RESETN_DSIPLL_RESETN, PRCM_APE_RESETN_CLR);
}
return r;
}
static int request_dsiclk(u8 n, bool enable)
{
u32 val;
val = readl(PRCM_DSI_PLLOUT_SEL);
val &= ~dsiclk[n].divsel_mask;
val |= ((enable ? dsiclk[n].divsel : PRCM_DSI_PLLOUT_SEL_OFF) <<
dsiclk[n].divsel_shift);
writel(val, PRCM_DSI_PLLOUT_SEL);
return 0;
}
static int request_dsiescclk(u8 n, bool enable)
{
u32 val;
val = readl(PRCM_DSITVCLK_DIV);
enable ? (val |= dsiescclk[n].en) : (val &= ~dsiescclk[n].en);
writel(val, PRCM_DSITVCLK_DIV);
return 0;
}
/**
* db8500_prcmu_request_clock() - Request for a clock to be enabled or disabled.
* @clock: The clock for which the request is made.
* @enable: Whether the clock should be enabled (true) or disabled (false).
*
* This function should only be used by the clock implementation.
* Do not use it from any other place!
*/
int db8500_prcmu_request_clock(u8 clock, bool enable)
{
if (clock == PRCMU_SGACLK)
return request_sga_clock(clock, enable);
else if (clock < PRCMU_NUM_REG_CLOCKS)
return request_clock(clock, enable);
else if (clock == PRCMU_TIMCLK)
return request_timclk(enable);
else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK))
return request_dsiclk((clock - PRCMU_DSI0CLK), enable);
else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK))
return request_dsiescclk((clock - PRCMU_DSI0ESCCLK), enable);
else if (clock == PRCMU_PLLDSI)
return request_plldsi(enable);
else if (clock == PRCMU_SYSCLK)
return request_sysclk(enable);
else if ((clock == PRCMU_PLLSOC0) || (clock == PRCMU_PLLSOC1))
return request_pll(clock, enable);
else
return -EINVAL;
}
static unsigned long pll_rate(void __iomem *reg, unsigned long src_rate,
int branch)
{
u64 rate;
u32 val;
u32 d;
u32 div = 1;
val = readl(reg);
rate = src_rate;
rate *= ((val & PRCM_PLL_FREQ_D_MASK) >> PRCM_PLL_FREQ_D_SHIFT);
d = ((val & PRCM_PLL_FREQ_N_MASK) >> PRCM_PLL_FREQ_N_SHIFT);
if (d > 1)
div *= d;
d = ((val & PRCM_PLL_FREQ_R_MASK) >> PRCM_PLL_FREQ_R_SHIFT);
if (d > 1)
div *= d;
if (val & PRCM_PLL_FREQ_SELDIV2)
div *= 2;
if ((branch == PLL_FIX) || ((branch == PLL_DIV) &&
(val & PRCM_PLL_FREQ_DIV2EN) &&
((reg == PRCM_PLLSOC0_FREQ) ||
(reg == PRCM_PLLARM_FREQ) ||
(reg == PRCM_PLLDDR_FREQ))))
div *= 2;
(void)do_div(rate, div);
return (unsigned long)rate;
}
#define ROOT_CLOCK_RATE 38400000
static unsigned long clock_rate(u8 clock)
{
u32 val;
u32 pllsw;
unsigned long rate = ROOT_CLOCK_RATE;
val = readl(prcmu_base + clk_mgt[clock].offset);
if (val & PRCM_CLK_MGT_CLK38) {
if (clk_mgt[clock].clk38div && (val & PRCM_CLK_MGT_CLK38DIV))
rate /= 2;
return rate;
}
val |= clk_mgt[clock].pllsw;
pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
if (pllsw == PRCM_CLK_MGT_CLKPLLSW_SOC0)
rate = pll_rate(PRCM_PLLSOC0_FREQ, rate, clk_mgt[clock].branch);
else if (pllsw == PRCM_CLK_MGT_CLKPLLSW_SOC1)
rate = pll_rate(PRCM_PLLSOC1_FREQ, rate, clk_mgt[clock].branch);
else if (pllsw == PRCM_CLK_MGT_CLKPLLSW_DDR)
rate = pll_rate(PRCM_PLLDDR_FREQ, rate, clk_mgt[clock].branch);
else
return 0;
if ((clock == PRCMU_SGACLK) &&
(val & PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN)) {
u64 r = (rate * 10);
(void)do_div(r, 25);
return (unsigned long)r;
}
val &= PRCM_CLK_MGT_CLKPLLDIV_MASK;
if (val)
return rate / val;
else
return 0;
}
static unsigned long armss_rate(void)
{
u32 r;
unsigned long rate;
r = readl(PRCM_ARM_CHGCLKREQ);
if (r & PRCM_ARM_CHGCLKREQ_PRCM_ARM_CHGCLKREQ) {
/* External ARMCLKFIX clock */
rate = pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, PLL_FIX);
/* Check PRCM_ARM_CHGCLKREQ divider */
if (!(r & PRCM_ARM_CHGCLKREQ_PRCM_ARM_DIVSEL))
rate /= 2;
/* Check PRCM_ARMCLKFIX_MGT divider */
r = readl(PRCM_ARMCLKFIX_MGT);
r &= PRCM_CLK_MGT_CLKPLLDIV_MASK;
rate /= r;
} else {/* ARM PLL */
rate = pll_rate(PRCM_PLLARM_FREQ, ROOT_CLOCK_RATE, PLL_DIV);
}
return rate;
}
static unsigned long dsiclk_rate(u8 n)
{
u32 divsel;
u32 div = 1;
divsel = readl(PRCM_DSI_PLLOUT_SEL);
divsel = ((divsel & dsiclk[n].divsel_mask) >> dsiclk[n].divsel_shift);
if (divsel == PRCM_DSI_PLLOUT_SEL_OFF)
divsel = dsiclk[n].divsel;
else
dsiclk[n].divsel = divsel;
switch (divsel) {
case PRCM_DSI_PLLOUT_SEL_PHI_4:
div *= 2;
case PRCM_DSI_PLLOUT_SEL_PHI_2:
div *= 2;
case PRCM_DSI_PLLOUT_SEL_PHI:
return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK),
PLL_RAW) / div;
default:
return 0;
}
}
static unsigned long dsiescclk_rate(u8 n)
{
u32 div;
div = readl(PRCM_DSITVCLK_DIV);
div = ((div & dsiescclk[n].div_mask) >> (dsiescclk[n].div_shift));
return clock_rate(PRCMU_TVCLK) / max((u32)1, div);
}
unsigned long prcmu_clock_rate(u8 clock)
{
if (clock < PRCMU_NUM_REG_CLOCKS)
return clock_rate(clock);
else if (clock == PRCMU_TIMCLK)
return ROOT_CLOCK_RATE / 16;
else if (clock == PRCMU_SYSCLK)
return ROOT_CLOCK_RATE;
else if (clock == PRCMU_PLLSOC0)
return pll_rate(PRCM_PLLSOC0_FREQ, ROOT_CLOCK_RATE, PLL_RAW);
else if (clock == PRCMU_PLLSOC1)
return pll_rate(PRCM_PLLSOC1_FREQ, ROOT_CLOCK_RATE, PLL_RAW);
else if (clock == PRCMU_ARMSS)
return armss_rate();
else if (clock == PRCMU_PLLDDR)
return pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, PLL_RAW);
else if (clock == PRCMU_PLLDSI)
return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK),
PLL_RAW);
else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK))
return dsiclk_rate(clock - PRCMU_DSI0CLK);
else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK))
return dsiescclk_rate(clock - PRCMU_DSI0ESCCLK);
else
return 0;
}
static unsigned long clock_source_rate(u32 clk_mgt_val, int branch)
{
if (clk_mgt_val & PRCM_CLK_MGT_CLK38)
return ROOT_CLOCK_RATE;
clk_mgt_val &= PRCM_CLK_MGT_CLKPLLSW_MASK;
if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_SOC0)
return pll_rate(PRCM_PLLSOC0_FREQ, ROOT_CLOCK_RATE, branch);
else if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_SOC1)
return pll_rate(PRCM_PLLSOC1_FREQ, ROOT_CLOCK_RATE, branch);
else if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_DDR)
return pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, branch);
else
return 0;
}
static u32 clock_divider(unsigned long src_rate, unsigned long rate)
{
u32 div;
div = (src_rate / rate);
if (div == 0)
return 1;
if (rate < (src_rate / div))
div++;
return div;
}
static long round_clock_rate(u8 clock, unsigned long rate)
{
u32 val;
u32 div;
unsigned long src_rate;
long rounded_rate;
val = readl(prcmu_base + clk_mgt[clock].offset);
src_rate = clock_source_rate((val | clk_mgt[clock].pllsw),
clk_mgt[clock].branch);
div = clock_divider(src_rate, rate);
if (val & PRCM_CLK_MGT_CLK38) {
if (clk_mgt[clock].clk38div) {
if (div > 2)
div = 2;
} else {
div = 1;
}
} else if ((clock == PRCMU_SGACLK) && (div == 3)) {
u64 r = (src_rate * 10);
(void)do_div(r, 25);
if (r <= rate)
return (unsigned long)r;
}
rounded_rate = (src_rate / min(div, (u32)31));
return rounded_rate;
}
/* CPU FREQ table, may be changed due to if MAX_OPP is supported. */
static struct cpufreq_frequency_table db8500_cpufreq_table[] = {
{ .frequency = 200000, .driver_data = ARM_EXTCLK,},
{ .frequency = 400000, .driver_data = ARM_50_OPP,},
{ .frequency = 800000, .driver_data = ARM_100_OPP,},
{ .frequency = CPUFREQ_TABLE_END,}, /* To be used for MAX_OPP. */
{ .frequency = CPUFREQ_TABLE_END,},
};
static long round_armss_rate(unsigned long rate)
{
long freq = 0;
int i = 0;
/* cpufreq table frequencies is in KHz. */
rate = rate / 1000;
/* Find the corresponding arm opp from the cpufreq table. */
while (db8500_cpufreq_table[i].frequency != CPUFREQ_TABLE_END) {
freq = db8500_cpufreq_table[i].frequency;
if (freq == rate)
break;
i++;
}
/* Return the last valid value, even if a match was not found. */
return freq * 1000;
}
#define MIN_PLL_VCO_RATE 600000000ULL
#define MAX_PLL_VCO_RATE 1680640000ULL
static long round_plldsi_rate(unsigned long rate)
{
long rounded_rate = 0;
unsigned long src_rate;
unsigned long rem;
u32 r;
src_rate = clock_rate(PRCMU_HDMICLK);
rem = rate;
for (r = 7; (rem > 0) && (r > 0); r--) {
u64 d;
d = (r * rate);
(void)do_div(d, src_rate);
if (d < 6)
d = 6;
else if (d > 255)
d = 255;
d *= src_rate;
if (((2 * d) < (r * MIN_PLL_VCO_RATE)) ||
((r * MAX_PLL_VCO_RATE) < (2 * d)))
continue;
(void)do_div(d, r);
if (rate < d) {
if (rounded_rate == 0)
rounded_rate = (long)d;
break;
}
if ((rate - d) < rem) {
rem = (rate - d);
rounded_rate = (long)d;
}
}
return rounded_rate;
}
static long round_dsiclk_rate(unsigned long rate)
{
u32 div;
unsigned long src_rate;
long rounded_rate;
src_rate = pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK),
PLL_RAW);
div = clock_divider(src_rate, rate);
rounded_rate = (src_rate / ((div > 2) ? 4 : div));
return rounded_rate;
}
static long round_dsiescclk_rate(unsigned long rate)
{
u32 div;
unsigned long src_rate;
long rounded_rate;
src_rate = clock_rate(PRCMU_TVCLK);
div = clock_divider(src_rate, rate);
rounded_rate = (src_rate / min(div, (u32)255));
return rounded_rate;
}
long prcmu_round_clock_rate(u8 clock, unsigned long rate)
{
if (clock < PRCMU_NUM_REG_CLOCKS)
return round_clock_rate(clock, rate);
else if (clock == PRCMU_ARMSS)
return round_armss_rate(rate);
else if (clock == PRCMU_PLLDSI)
return round_plldsi_rate(rate);
else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK))
return round_dsiclk_rate(rate);
else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK))
return round_dsiescclk_rate(rate);
else
return (long)prcmu_clock_rate(clock);
}
static void set_clock_rate(u8 clock, unsigned long rate)
{
u32 val;
u32 div;
unsigned long src_rate;
unsigned long flags;
spin_lock_irqsave(&clk_mgt_lock, flags);
/* Grab the HW semaphore. */
while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
cpu_relax();
val = readl(prcmu_base + clk_mgt[clock].offset);
src_rate = clock_source_rate((val | clk_mgt[clock].pllsw),
clk_mgt[clock].branch);
div = clock_divider(src_rate, rate);
if (val & PRCM_CLK_MGT_CLK38) {
if (clk_mgt[clock].clk38div) {
if (div > 1)
val |= PRCM_CLK_MGT_CLK38DIV;
else
val &= ~PRCM_CLK_MGT_CLK38DIV;
}
} else if (clock == PRCMU_SGACLK) {
val &= ~(PRCM_CLK_MGT_CLKPLLDIV_MASK |
PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN);
if (div == 3) {
u64 r = (src_rate * 10);
(void)do_div(r, 25);
if (r <= rate) {
val |= PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN;
div = 0;
}
}
val |= min(div, (u32)31);
} else {
val &= ~PRCM_CLK_MGT_CLKPLLDIV_MASK;
val |= min(div, (u32)31);
}
writel(val, prcmu_base + clk_mgt[clock].offset);
/* Release the HW semaphore. */
writel(0, PRCM_SEM);
spin_unlock_irqrestore(&clk_mgt_lock, flags);
}
static int set_armss_rate(unsigned long rate)
{
int i = 0;
/* cpufreq table frequencies is in KHz. */
rate = rate / 1000;
/* Find the corresponding arm opp from the cpufreq table. */
while (db8500_cpufreq_table[i].frequency != CPUFREQ_TABLE_END) {
if (db8500_cpufreq_table[i].frequency == rate)
break;
i++;
}
if (db8500_cpufreq_table[i].frequency != rate)
return -EINVAL;
/* Set the new arm opp. */
return db8500_prcmu_set_arm_opp(db8500_cpufreq_table[i].driver_data);
}
static int set_plldsi_rate(unsigned long rate)
{
unsigned long src_rate;
unsigned long rem;
u32 pll_freq = 0;
u32 r;
src_rate = clock_rate(PRCMU_HDMICLK);
rem = rate;
for (r = 7; (rem > 0) && (r > 0); r--) {
u64 d;
u64 hwrate;
d = (r * rate);
(void)do_div(d, src_rate);
if (d < 6)
d = 6;
else if (d > 255)
d = 255;
hwrate = (d * src_rate);
if (((2 * hwrate) < (r * MIN_PLL_VCO_RATE)) ||
((r * MAX_PLL_VCO_RATE) < (2 * hwrate)))
continue;
(void)do_div(hwrate, r);
if (rate < hwrate) {
if (pll_freq == 0)
pll_freq = (((u32)d << PRCM_PLL_FREQ_D_SHIFT) |
(r << PRCM_PLL_FREQ_R_SHIFT));
break;
}
if ((rate - hwrate) < rem) {
rem = (rate - hwrate);
pll_freq = (((u32)d << PRCM_PLL_FREQ_D_SHIFT) |
(r << PRCM_PLL_FREQ_R_SHIFT));
}
}
if (pll_freq == 0)
return -EINVAL;
pll_freq |= (1 << PRCM_PLL_FREQ_N_SHIFT);
writel(pll_freq, PRCM_PLLDSI_FREQ);
return 0;
}
static void set_dsiclk_rate(u8 n, unsigned long rate)
{
u32 val;
u32 div;
div = clock_divider(pll_rate(PRCM_PLLDSI_FREQ,
clock_rate(PRCMU_HDMICLK), PLL_RAW), rate);
dsiclk[n].divsel = (div == 1) ? PRCM_DSI_PLLOUT_SEL_PHI :
(div == 2) ? PRCM_DSI_PLLOUT_SEL_PHI_2 :
/* else */ PRCM_DSI_PLLOUT_SEL_PHI_4;
val = readl(PRCM_DSI_PLLOUT_SEL);
val &= ~dsiclk[n].divsel_mask;
val |= (dsiclk[n].divsel << dsiclk[n].divsel_shift);
writel(val, PRCM_DSI_PLLOUT_SEL);
}
static void set_dsiescclk_rate(u8 n, unsigned long rate)
{
u32 val;
u32 div;
div = clock_divider(clock_rate(PRCMU_TVCLK), rate);
val = readl(PRCM_DSITVCLK_DIV);
val &= ~dsiescclk[n].div_mask;
val |= (min(div, (u32)255) << dsiescclk[n].div_shift);
writel(val, PRCM_DSITVCLK_DIV);
}
int prcmu_set_clock_rate(u8 clock, unsigned long rate)
{
if (clock < PRCMU_NUM_REG_CLOCKS)
set_clock_rate(clock, rate);
else if (clock == PRCMU_ARMSS)
return set_armss_rate(rate);
else if (clock == PRCMU_PLLDSI)
return set_plldsi_rate(rate);
else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK))
set_dsiclk_rate((clock - PRCMU_DSI0CLK), rate);
else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK))
set_dsiescclk_rate((clock - PRCMU_DSI0ESCCLK), rate);
return 0;
}
int db8500_prcmu_config_esram0_deep_sleep(u8 state)
{
if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) ||
(state < ESRAM0_DEEP_SLEEP_STATE_OFF))
return -EINVAL;
mutex_lock(&mb4_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
cpu_relax();
writeb(MB4H_MEM_ST, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
writeb(((DDR_PWR_STATE_OFFHIGHLAT << 4) | DDR_PWR_STATE_ON),
(tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE));
writeb(DDR_PWR_STATE_ON,
(tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE));
writeb(state, (tcdm_base + PRCM_REQ_MB4_ESRAM0_ST));
writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb4_transfer.work);
mutex_unlock(&mb4_transfer.lock);
return 0;
}
int db8500_prcmu_config_hotdog(u8 threshold)
{
mutex_lock(&mb4_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
cpu_relax();
writeb(threshold, (tcdm_base + PRCM_REQ_MB4_HOTDOG_THRESHOLD));
writeb(MB4H_HOTDOG, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb4_transfer.work);
mutex_unlock(&mb4_transfer.lock);
return 0;
}
int db8500_prcmu_config_hotmon(u8 low, u8 high)
{
mutex_lock(&mb4_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
cpu_relax();
writeb(low, (tcdm_base + PRCM_REQ_MB4_HOTMON_LOW));
writeb(high, (tcdm_base + PRCM_REQ_MB4_HOTMON_HIGH));
writeb((HOTMON_CONFIG_LOW | HOTMON_CONFIG_HIGH),
(tcdm_base + PRCM_REQ_MB4_HOTMON_CONFIG));
writeb(MB4H_HOTMON, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb4_transfer.work);
mutex_unlock(&mb4_transfer.lock);
return 0;
}
static int config_hot_period(u16 val)
{
mutex_lock(&mb4_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
cpu_relax();
writew(val, (tcdm_base + PRCM_REQ_MB4_HOT_PERIOD));
writeb(MB4H_HOT_PERIOD, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb4_transfer.work);
mutex_unlock(&mb4_transfer.lock);
return 0;
}
int db8500_prcmu_start_temp_sense(u16 cycles32k)
{
if (cycles32k == 0xFFFF)
return -EINVAL;
return config_hot_period(cycles32k);
}
int db8500_prcmu_stop_temp_sense(void)
{
return config_hot_period(0xFFFF);
}
static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3)
{
mutex_lock(&mb4_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
cpu_relax();
writeb(d0, (tcdm_base + PRCM_REQ_MB4_A9WDOG_0));
writeb(d1, (tcdm_base + PRCM_REQ_MB4_A9WDOG_1));
writeb(d2, (tcdm_base + PRCM_REQ_MB4_A9WDOG_2));
writeb(d3, (tcdm_base + PRCM_REQ_MB4_A9WDOG_3));
writeb(cmd, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb4_transfer.work);
mutex_unlock(&mb4_transfer.lock);
return 0;
}
int db8500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
{
BUG_ON(num == 0 || num > 0xf);
return prcmu_a9wdog(MB4H_A9WDOG_CONF, num, 0, 0,
sleep_auto_off ? A9WDOG_AUTO_OFF_EN :
A9WDOG_AUTO_OFF_DIS);
}
EXPORT_SYMBOL(db8500_prcmu_config_a9wdog);
int db8500_prcmu_enable_a9wdog(u8 id)
{
return prcmu_a9wdog(MB4H_A9WDOG_EN, id, 0, 0, 0);
}
EXPORT_SYMBOL(db8500_prcmu_enable_a9wdog);
int db8500_prcmu_disable_a9wdog(u8 id)
{
return prcmu_a9wdog(MB4H_A9WDOG_DIS, id, 0, 0, 0);
}
EXPORT_SYMBOL(db8500_prcmu_disable_a9wdog);
int db8500_prcmu_kick_a9wdog(u8 id)
{
return prcmu_a9wdog(MB4H_A9WDOG_KICK, id, 0, 0, 0);
}
EXPORT_SYMBOL(db8500_prcmu_kick_a9wdog);
/*
* timeout is 28 bit, in ms.
*/
int db8500_prcmu_load_a9wdog(u8 id, u32 timeout)
{
return prcmu_a9wdog(MB4H_A9WDOG_LOAD,
(id & A9WDOG_ID_MASK) |
/*
* Put the lowest 28 bits of timeout at
* offset 4. Four first bits are used for id.
*/
(u8)((timeout << 4) & 0xf0),
(u8)((timeout >> 4) & 0xff),
(u8)((timeout >> 12) & 0xff),
(u8)((timeout >> 20) & 0xff));
}
EXPORT_SYMBOL(db8500_prcmu_load_a9wdog);
/**
* prcmu_abb_read() - Read register value(s) from the ABB.
* @slave: The I2C slave address.
* @reg: The (start) register address.
* @value: The read out value(s).
* @size: The number of registers to read.
*
* Reads register value(s) from the ABB.
* @size has to be 1 for the current firmware version.
*/
int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
{
int r;
if (size != 1)
return -EINVAL;
mutex_lock(&mb5_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
cpu_relax();
writeb(0, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB5));
writeb(PRCMU_I2C_READ(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS));
writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
writeb(0, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
if (!wait_for_completion_timeout(&mb5_transfer.work,
msecs_to_jiffies(20000))) {
pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
__func__);
r = -EIO;
} else {
r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO);
}
if (!r)
*value = mb5_transfer.ack.value;
mutex_unlock(&mb5_transfer.lock);
return r;
}
/**
* prcmu_abb_write_masked() - Write masked register value(s) to the ABB.
* @slave: The I2C slave address.
* @reg: The (start) register address.
* @value: The value(s) to write.
* @mask: The mask(s) to use.
* @size: The number of registers to write.
*
* Writes masked register value(s) to the ABB.
* For each @value, only the bits set to 1 in the corresponding @mask
* will be written. The other bits are not changed.
* @size has to be 1 for the current firmware version.
*/
int prcmu_abb_write_masked(u8 slave, u8 reg, u8 *value, u8 *mask, u8 size)
{
int r;
if (size != 1)
return -EINVAL;
mutex_lock(&mb5_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
cpu_relax();
writeb(~*mask, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB5));
writeb(PRCMU_I2C_WRITE(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS));
writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
writeb(*value, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
if (!wait_for_completion_timeout(&mb5_transfer.work,
msecs_to_jiffies(20000))) {
pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
__func__);
r = -EIO;
} else {
r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO);
}
mutex_unlock(&mb5_transfer.lock);
return r;
}
/**
* prcmu_abb_write() - Write register value(s) to the ABB.
* @slave: The I2C slave address.
* @reg: The (start) register address.
* @value: The value(s) to write.
* @size: The number of registers to write.
*
* Writes register value(s) to the ABB.
* @size has to be 1 for the current firmware version.
*/
int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
{
u8 mask = ~0;
return prcmu_abb_write_masked(slave, reg, value, &mask, size);
}
/**
* prcmu_ac_wake_req - should be called whenever ARM wants to wakeup Modem
*/
int prcmu_ac_wake_req(void)
{
u32 val;
int ret = 0;
mutex_lock(&mb0_transfer.ac_wake_lock);
val = readl(PRCM_HOSTACCESS_REQ);
if (val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ)
goto unlock_and_return;
atomic_set(&ac_wake_req_state, 1);
/*
* Force Modem Wake-up before hostaccess_req ping-pong.
* It prevents Modem to enter in Sleep while acking the hostaccess
* request. The 31us delay has been calculated by HWI.
*/
val |= PRCM_HOSTACCESS_REQ_WAKE_REQ;
writel(val, PRCM_HOSTACCESS_REQ);
udelay(31);
val |= PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ;
writel(val, PRCM_HOSTACCESS_REQ);
if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
msecs_to_jiffies(5000))) {
#if defined(CONFIG_DBX500_PRCMU_DEBUG)
db8500_prcmu_debug_dump(__func__, true, true);
#endif
pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n",
__func__);
ret = -EFAULT;
}
unlock_and_return:
mutex_unlock(&mb0_transfer.ac_wake_lock);
return ret;
}
/**
* prcmu_ac_sleep_req - called when ARM no longer needs to talk to modem
*/
void prcmu_ac_sleep_req()
{
u32 val;
mutex_lock(&mb0_transfer.ac_wake_lock);
val = readl(PRCM_HOSTACCESS_REQ);
if (!(val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ))
goto unlock_and_return;
writel((val & ~PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ),
PRCM_HOSTACCESS_REQ);
if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
msecs_to_jiffies(5000))) {
pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n",
__func__);
}
atomic_set(&ac_wake_req_state, 0);
unlock_and_return:
mutex_unlock(&mb0_transfer.ac_wake_lock);
}
bool db8500_prcmu_is_ac_wake_requested(void)
{
return (atomic_read(&ac_wake_req_state) != 0);
}
/**
* db8500_prcmu_system_reset - System reset
*
* Saves the reset reason code and then sets the APE_SOFTRST register which
* fires interrupt to fw
*/
void db8500_prcmu_system_reset(u16 reset_code)
{
writew(reset_code, (tcdm_base + PRCM_SW_RST_REASON));
writel(1, PRCM_APE_SOFTRST);
}
/**
* db8500_prcmu_get_reset_code - Retrieve SW reset reason code
*
* Retrieves the reset reason code stored by prcmu_system_reset() before
* last restart.
*/
u16 db8500_prcmu_get_reset_code(void)
{
return readw(tcdm_base + PRCM_SW_RST_REASON);
}
/**
* db8500_prcmu_reset_modem - ask the PRCMU to reset modem
*/
void db8500_prcmu_modem_reset(void)
{
mutex_lock(&mb1_transfer.lock);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
cpu_relax();
writeb(MB1H_RESET_MODEM, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
wait_for_completion(&mb1_transfer.work);
/*
* No need to check return from PRCMU as modem should go in reset state
* This state is already managed by upper layer
*/
mutex_unlock(&mb1_transfer.lock);
}
static void ack_dbb_wakeup(void)
{
unsigned long flags;
spin_lock_irqsave(&mb0_transfer.lock, flags);
while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
cpu_relax();
writeb(MB0H_READ_WAKEUP_ACK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
spin_unlock_irqrestore(&mb0_transfer.lock, flags);
}
static inline void print_unknown_header_warning(u8 n, u8 header)
{
pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n",
header, n);
}
static bool read_mailbox_0(void)
{
bool r;
u32 ev;
unsigned int n;
u8 header;
header = readb(tcdm_base + PRCM_MBOX_HEADER_ACK_MB0);
switch (header) {
case MB0H_WAKEUP_EXE:
case MB0H_WAKEUP_SLEEP:
if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1)
ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_1_8500);
else
ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_0_8500);
if (ev & (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK))
complete(&mb0_transfer.ac_wake_work);
if (ev & WAKEUP_BIT_SYSCLK_OK)
complete(&mb3_transfer.sysclk_work);
ev &= mb0_transfer.req.dbb_irqs;
for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {
if (ev & prcmu_irq_bit[n])
generic_handle_irq(irq_find_mapping(db8500_irq_domain, n));
}
r = true;
break;
default:
print_unknown_header_warning(0, header);
r = false;
break;
}
writel(MBOX_BIT(0), PRCM_ARM_IT1_CLR);
return r;
}
static bool read_mailbox_1(void)
{
mb1_transfer.ack.header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB1);
mb1_transfer.ack.arm_opp = readb(tcdm_base +
PRCM_ACK_MB1_CURRENT_ARM_OPP);
mb1_transfer.ack.ape_opp = readb(tcdm_base +
PRCM_ACK_MB1_CURRENT_APE_OPP);
mb1_transfer.ack.ape_voltage_status = readb(tcdm_base +
PRCM_ACK_MB1_APE_VOLTAGE_STATUS);
writel(MBOX_BIT(1), PRCM_ARM_IT1_CLR);
complete(&mb1_transfer.work);
return false;
}
static bool read_mailbox_2(void)
{
mb2_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB2_DPS_STATUS);
writel(MBOX_BIT(2), PRCM_ARM_IT1_CLR);
complete(&mb2_transfer.work);
return false;
}
static bool read_mailbox_3(void)
{
writel(MBOX_BIT(3), PRCM_ARM_IT1_CLR);
return false;
}
static bool read_mailbox_4(void)
{
u8 header;
bool do_complete = true;
header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB4);
switch (header) {
case MB4H_MEM_ST:
case MB4H_HOTDOG:
case MB4H_HOTMON:
case MB4H_HOT_PERIOD:
case MB4H_A9WDOG_CONF:
case MB4H_A9WDOG_EN:
case MB4H_A9WDOG_DIS:
case MB4H_A9WDOG_LOAD:
case MB4H_A9WDOG_KICK:
break;
default:
print_unknown_header_warning(4, header);
do_complete = false;
break;
}
writel(MBOX_BIT(4), PRCM_ARM_IT1_CLR);
if (do_complete)
complete(&mb4_transfer.work);
return false;
}
static bool read_mailbox_5(void)
{
mb5_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB5_I2C_STATUS);
mb5_transfer.ack.value = readb(tcdm_base + PRCM_ACK_MB5_I2C_VAL);
writel(MBOX_BIT(5), PRCM_ARM_IT1_CLR);
complete(&mb5_transfer.work);
return false;
}
static bool read_mailbox_6(void)
{
writel(MBOX_BIT(6), PRCM_ARM_IT1_CLR);
return false;
}
static bool read_mailbox_7(void)
{
writel(MBOX_BIT(7), PRCM_ARM_IT1_CLR);
return false;
}
static bool (* const read_mailbox[NUM_MB])(void) = {
read_mailbox_0,
read_mailbox_1,
read_mailbox_2,
read_mailbox_3,
read_mailbox_4,
read_mailbox_5,
read_mailbox_6,
read_mailbox_7
};
static irqreturn_t prcmu_irq_handler(int irq, void *data)
{
u32 bits;
u8 n;
irqreturn_t r;
bits = (readl(PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
if (unlikely(!bits))
return IRQ_NONE;
r = IRQ_HANDLED;
for (n = 0; bits; n++) {
if (bits & MBOX_BIT(n)) {
bits -= MBOX_BIT(n);
if (read_mailbox[n]())
r = IRQ_WAKE_THREAD;
}
}
return r;
}
static irqreturn_t prcmu_irq_thread_fn(int irq, void *data)
{
ack_dbb_wakeup();
return IRQ_HANDLED;
}
static void prcmu_mask_work(struct work_struct *work)
{
unsigned long flags;
spin_lock_irqsave(&mb0_transfer.lock, flags);
config_wakeups();
spin_unlock_irqrestore(&mb0_transfer.lock, flags);
}
static void prcmu_irq_mask(struct irq_data *d)
{
unsigned long flags;
spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
mb0_transfer.req.dbb_irqs &= ~prcmu_irq_bit[d->hwirq];
spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
if (d->irq != IRQ_PRCMU_CA_SLEEP)
schedule_work(&mb0_transfer.mask_work);
}
static void prcmu_irq_unmask(struct irq_data *d)
{
unsigned long flags;
spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
mb0_transfer.req.dbb_irqs |= prcmu_irq_bit[d->hwirq];
spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
if (d->irq != IRQ_PRCMU_CA_SLEEP)
schedule_work(&mb0_transfer.mask_work);
}
static void noop(struct irq_data *d)
{
}
static struct irq_chip prcmu_irq_chip = {
.name = "prcmu",
.irq_disable = prcmu_irq_mask,
.irq_ack = noop,
.irq_mask = prcmu_irq_mask,
.irq_unmask = prcmu_irq_unmask,
};
static __init char *fw_project_name(u32 project)
{
switch (project) {
case PRCMU_FW_PROJECT_U8500:
return "U8500";
case PRCMU_FW_PROJECT_U8400:
return "U8400";
case PRCMU_FW_PROJECT_U9500:
return "U9500";
case PRCMU_FW_PROJECT_U8500_MBB:
return "U8500 MBB";
case PRCMU_FW_PROJECT_U8500_C1:
return "U8500 C1";
case PRCMU_FW_PROJECT_U8500_C2:
return "U8500 C2";
case PRCMU_FW_PROJECT_U8500_C3:
return "U8500 C3";
case PRCMU_FW_PROJECT_U8500_C4:
return "U8500 C4";
case PRCMU_FW_PROJECT_U9500_MBL:
return "U9500 MBL";
case PRCMU_FW_PROJECT_U8500_MBL:
return "U8500 MBL";
case PRCMU_FW_PROJECT_U8500_MBL2:
return "U8500 MBL2";
case PRCMU_FW_PROJECT_U8520:
return "U8520 MBL";
case PRCMU_FW_PROJECT_U8420:
return "U8420";
case PRCMU_FW_PROJECT_U9540:
return "U9540";
case PRCMU_FW_PROJECT_A9420:
return "A9420";
case PRCMU_FW_PROJECT_L8540:
return "L8540";
case PRCMU_FW_PROJECT_L8580:
return "L8580";
default:
return "Unknown";
}
}
static int db8500_irq_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(virq, &prcmu_irq_chip,
handle_simple_irq);
set_irq_flags(virq, IRQF_VALID);
return 0;
}
static struct irq_domain_ops db8500_irq_ops = {
.map = db8500_irq_map,
.xlate = irq_domain_xlate_twocell,
};
static int db8500_irq_init(struct device_node *np, int irq_base)
{
int i;
/* In the device tree case, just take some IRQs */
if (np)
irq_base = 0;
db8500_irq_domain = irq_domain_add_simple(
np, NUM_PRCMU_WAKEUPS, irq_base,
&db8500_irq_ops, NULL);
if (!db8500_irq_domain) {
pr_err("Failed to create irqdomain\n");
return -ENOSYS;
}
/* All wakeups will be used, so create mappings for all */
for (i = 0; i < NUM_PRCMU_WAKEUPS; i++)
irq_create_mapping(db8500_irq_domain, i);
return 0;
}
static void dbx500_fw_version_init(struct platform_device *pdev,
u32 version_offset)
{
struct resource *res;
void __iomem *tcpm_base;
u32 version;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"prcmu-tcpm");
if (!res) {
dev_err(&pdev->dev,
"Error: no prcmu tcpm memory region provided\n");
return;
}
tcpm_base = ioremap(res->start, resource_size(res));
if (!tcpm_base) {
dev_err(&pdev->dev, "no prcmu tcpm mem region provided\n");
return;
}
version = readl(tcpm_base + version_offset);
fw_info.version.project = (version & 0xFF);
fw_info.version.api_version = (version >> 8) & 0xFF;
fw_info.version.func_version = (version >> 16) & 0xFF;
fw_info.version.errata = (version >> 24) & 0xFF;
strncpy(fw_info.version.project_name,
fw_project_name(fw_info.version.project),
PRCMU_FW_PROJECT_NAME_LEN);
fw_info.valid = true;
pr_info("PRCMU firmware: %s(%d), version %d.%d.%d\n",
fw_info.version.project_name,
fw_info.version.project,
fw_info.version.api_version,
fw_info.version.func_version,
fw_info.version.errata);
iounmap(tcpm_base);
}
void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
{
/*
* This is a temporary remap to bring up the clocks. It is
* subsequently replaces with a real remap. After the merge of
* the mailbox subsystem all of this early code goes away, and the
* clock driver can probe independently. An early initcall will
* still be needed, but it can be diverted into drivers/clk/ux500.
*/
prcmu_base = ioremap(phy_base, size);
if (!prcmu_base)
pr_err("%s: ioremap() of prcmu registers failed!\n", __func__);
spin_lock_init(&mb0_transfer.lock);
spin_lock_init(&mb0_transfer.dbb_irqs_lock);
mutex_init(&mb0_transfer.ac_wake_lock);
init_completion(&mb0_transfer.ac_wake_work);
mutex_init(&mb1_transfer.lock);
init_completion(&mb1_transfer.work);
mb1_transfer.ape_opp = APE_NO_CHANGE;
mutex_init(&mb2_transfer.lock);
init_completion(&mb2_transfer.work);
spin_lock_init(&mb2_transfer.auto_pm_lock);
spin_lock_init(&mb3_transfer.lock);
mutex_init(&mb3_transfer.sysclk_lock);
init_completion(&mb3_transfer.sysclk_work);
mutex_init(&mb4_transfer.lock);
init_completion(&mb4_transfer.work);
mutex_init(&mb5_transfer.lock);
init_completion(&mb5_transfer.work);
INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
}
static void __init init_prcm_registers(void)
{
u32 val;
val = readl(PRCM_A9PL_FORCE_CLKEN);
val &= ~(PRCM_A9PL_FORCE_CLKEN_PRCM_A9PL_FORCE_CLKEN |
PRCM_A9PL_FORCE_CLKEN_PRCM_A9AXI_FORCE_CLKEN);
writel(val, (PRCM_A9PL_FORCE_CLKEN));
}
/*
* Power domain switches (ePODs) modeled as regulators for the DB8500 SoC
*/
static struct regulator_consumer_supply db8500_vape_consumers[] = {
REGULATOR_SUPPLY("v-ape", NULL),
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.0"),
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"),
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"),
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"),
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.4"),
/* "v-mmc" changed to "vcore" in the mainline kernel */
REGULATOR_SUPPLY("vcore", "sdi0"),
REGULATOR_SUPPLY("vcore", "sdi1"),
REGULATOR_SUPPLY("vcore", "sdi2"),
REGULATOR_SUPPLY("vcore", "sdi3"),
REGULATOR_SUPPLY("vcore", "sdi4"),
REGULATOR_SUPPLY("v-dma", "dma40.0"),
REGULATOR_SUPPLY("v-ape", "ab8500-usb.0"),
/* "v-uart" changed to "vcore" in the mainline kernel */
REGULATOR_SUPPLY("vcore", "uart0"),
REGULATOR_SUPPLY("vcore", "uart1"),
REGULATOR_SUPPLY("vcore", "uart2"),
REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"),
REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"),
REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
};
static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
REGULATOR_SUPPLY("musb_1v8", "ab8500-usb.0"),
/* AV8100 regulator */
REGULATOR_SUPPLY("hdmi_1v8", "0-0070"),
};
static struct regulator_consumer_supply db8500_b2r2_mcde_consumers[] = {
REGULATOR_SUPPLY("vsupply", "b2r2_bus"),
REGULATOR_SUPPLY("vsupply", "mcde"),
};
/* SVA MMDSP regulator switch */
static struct regulator_consumer_supply db8500_svammdsp_consumers[] = {
REGULATOR_SUPPLY("sva-mmdsp", "cm_control"),
};
/* SVA pipe regulator switch */
static struct regulator_consumer_supply db8500_svapipe_consumers[] = {
REGULATOR_SUPPLY("sva-pipe", "cm_control"),
};
/* SIA MMDSP regulator switch */
static struct regulator_consumer_supply db8500_siammdsp_consumers[] = {
REGULATOR_SUPPLY("sia-mmdsp", "cm_control"),
};
/* SIA pipe regulator switch */
static struct regulator_consumer_supply db8500_siapipe_consumers[] = {
REGULATOR_SUPPLY("sia-pipe", "cm_control"),
};
static struct regulator_consumer_supply db8500_sga_consumers[] = {
REGULATOR_SUPPLY("v-mali", NULL),
};
/* ESRAM1 and 2 regulator switch */
static struct regulator_consumer_supply db8500_esram12_consumers[] = {
REGULATOR_SUPPLY("esram12", "cm_control"),
};
/* ESRAM3 and 4 regulator switch */
static struct regulator_consumer_supply db8500_esram34_consumers[] = {
REGULATOR_SUPPLY("v-esram34", "mcde"),
REGULATOR_SUPPLY("esram34", "cm_control"),
REGULATOR_SUPPLY("lcla_esram", "dma40.0"),
};
static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VAPE] = {
.constraints = {
.name = "db8500-vape",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.always_on = true,
},
.consumer_supplies = db8500_vape_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_vape_consumers),
},
[DB8500_REGULATOR_VARM] = {
.constraints = {
.name = "db8500-varm",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_VMODEM] = {
.constraints = {
.name = "db8500-vmodem",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_VPLL] = {
.constraints = {
.name = "db8500-vpll",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_VSMPS1] = {
.constraints = {
.name = "db8500-vsmps1",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_VSMPS2] = {
.constraints = {
.name = "db8500-vsmps2",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_vsmps2_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_vsmps2_consumers),
},
[DB8500_REGULATOR_VSMPS3] = {
.constraints = {
.name = "db8500-vsmps3",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_VRF1] = {
.constraints = {
.name = "db8500-vrf1",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_SWITCH_SVAMMDSP] = {
/* dependency to u8500-vape is handled outside regulator framework */
.constraints = {
.name = "db8500-sva-mmdsp",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_svammdsp_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_svammdsp_consumers),
},
[DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = {
.constraints = {
/* "ret" means "retention" */
.name = "db8500-sva-mmdsp-ret",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_SWITCH_SVAPIPE] = {
/* dependency to u8500-vape is handled outside regulator framework */
.constraints = {
.name = "db8500-sva-pipe",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_svapipe_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_svapipe_consumers),
},
[DB8500_REGULATOR_SWITCH_SIAMMDSP] = {
/* dependency to u8500-vape is handled outside regulator framework */
.constraints = {
.name = "db8500-sia-mmdsp",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_siammdsp_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_siammdsp_consumers),
},
[DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = {
.constraints = {
.name = "db8500-sia-mmdsp-ret",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_SWITCH_SIAPIPE] = {
/* dependency to u8500-vape is handled outside regulator framework */
.constraints = {
.name = "db8500-sia-pipe",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_siapipe_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_siapipe_consumers),
},
[DB8500_REGULATOR_SWITCH_SGA] = {
.supply_regulator = "db8500-vape",
.constraints = {
.name = "db8500-sga",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_sga_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_sga_consumers),
},
[DB8500_REGULATOR_SWITCH_B2R2_MCDE] = {
.supply_regulator = "db8500-vape",
.constraints = {
.name = "db8500-b2r2-mcde",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_b2r2_mcde_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_b2r2_mcde_consumers),
},
[DB8500_REGULATOR_SWITCH_ESRAM12] = {
/*
* esram12 is set in retention and supplied by Vsafe when Vape is off,
* no need to hold Vape
*/
.constraints = {
.name = "db8500-esram12",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_esram12_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_esram12_consumers),
},
[DB8500_REGULATOR_SWITCH_ESRAM12RET] = {
.constraints = {
.name = "db8500-esram12-ret",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
[DB8500_REGULATOR_SWITCH_ESRAM34] = {
/*
* esram34 is set in retention and supplied by Vsafe when Vape is off,
* no need to hold Vape
*/
.constraints = {
.name = "db8500-esram34",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.consumer_supplies = db8500_esram34_consumers,
.num_consumer_supplies = ARRAY_SIZE(db8500_esram34_consumers),
},
[DB8500_REGULATOR_SWITCH_ESRAM34RET] = {
.constraints = {
.name = "db8500-esram34-ret",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
},
};
static struct ux500_wdt_data db8500_wdt_pdata = {
.timeout = 600, /* 10 minutes */
.has_28_bits_resolution = true,
};
/*
* Thermal Sensor
*/
static struct resource db8500_thsens_resources[] = {
{
.name = "IRQ_HOTMON_LOW",
.start = IRQ_PRCMU_HOTMON_LOW,
.end = IRQ_PRCMU_HOTMON_LOW,
.flags = IORESOURCE_IRQ,
},
{
.name = "IRQ_HOTMON_HIGH",
.start = IRQ_PRCMU_HOTMON_HIGH,
.end = IRQ_PRCMU_HOTMON_HIGH,
.flags = IORESOURCE_IRQ,
},
};
static struct db8500_thsens_platform_data db8500_thsens_data = {
.trip_points[0] = {
.temp = 70000,
.type = THERMAL_TRIP_ACTIVE,
.cdev_name = {
[0] = "thermal-cpufreq-0",
},
},
.trip_points[1] = {
.temp = 75000,
.type = THERMAL_TRIP_ACTIVE,
.cdev_name = {
[0] = "thermal-cpufreq-0",
},
},
.trip_points[2] = {
.temp = 80000,
.type = THERMAL_TRIP_ACTIVE,
.cdev_name = {
[0] = "thermal-cpufreq-0",
},
},
.trip_points[3] = {
.temp = 85000,
.type = THERMAL_TRIP_CRITICAL,
},
.num_trips = 4,
};
static struct mfd_cell common_prcmu_devs[] = {
{
.name = "ux500_wdt",
.platform_data = &db8500_wdt_pdata,
.pdata_size = sizeof(db8500_wdt_pdata),
.id = -1,
},
};
static struct mfd_cell db8500_prcmu_devs[] = {
{
.name = "db8500-prcmu-regulators",
.of_compatible = "stericsson,db8500-prcmu-regulator",
.platform_data = &db8500_regulators,
.pdata_size = sizeof(db8500_regulators),
},
{
.name = "cpufreq-ux500",
.of_compatible = "stericsson,cpufreq-ux500",
.platform_data = &db8500_cpufreq_table,
.pdata_size = sizeof(db8500_cpufreq_table),
},
{
.name = "db8500-thermal",
.num_resources = ARRAY_SIZE(db8500_thsens_resources),
.resources = db8500_thsens_resources,
.platform_data = &db8500_thsens_data,
.pdata_size = sizeof(db8500_thsens_data),
},
};
static void db8500_prcmu_update_cpufreq(void)
{
if (prcmu_has_arm_maxopp()) {
db8500_cpufreq_table[3].frequency = 1000000;
db8500_cpufreq_table[3].driver_data = ARM_MAX_OPP;
}
}
static int db8500_prcmu_register_ab8500(struct device *parent,
struct ab8500_platform_data *pdata,
int irq)
{
struct resource ab8500_resource = DEFINE_RES_IRQ(irq);
struct mfd_cell ab8500_cell = {
.name = "ab8500-core",
.of_compatible = "stericsson,ab8500",
.id = AB8500_VERSION_AB8500,
.platform_data = pdata,
.pdata_size = sizeof(struct ab8500_platform_data),
.resources = &ab8500_resource,
.num_resources = 1,
};
return mfd_add_devices(parent, 0, &ab8500_cell, 1, NULL, 0, NULL);
}
/**
* prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
*
*/
static int db8500_prcmu_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct prcmu_pdata *pdata = dev_get_platdata(&pdev->dev);
int irq = 0, err = 0;
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu");
if (!res) {
dev_err(&pdev->dev, "no prcmu memory region provided\n");
return -ENOENT;
}
prcmu_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!prcmu_base) {
dev_err(&pdev->dev,
"failed to ioremap prcmu register memory\n");
return -ENOENT;
}
init_prcm_registers();
dbx500_fw_version_init(pdev, pdata->version_offset);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu-tcdm");
if (!res) {
dev_err(&pdev->dev, "no prcmu tcdm region provided\n");
return -ENOENT;
}
tcdm_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
/* Clean up the mailbox interrupts after pre-kernel code. */
writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(&pdev->dev, "no prcmu irq provided\n");
return -ENOENT;
}
err = request_threaded_irq(irq, prcmu_irq_handler,
prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
if (err < 0) {
pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n");
err = -EBUSY;
goto no_irq_return;
}
db8500_irq_init(np, pdata->irq_base);
prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
db8500_prcmu_update_cpufreq();
err = mfd_add_devices(&pdev->dev, 0, common_prcmu_devs,
ARRAY_SIZE(common_prcmu_devs), NULL, 0, db8500_irq_domain);
if (err) {
pr_err("prcmu: Failed to add subdevices\n");
return err;
}
/* TODO: Remove restriction when clk definitions are available. */
if (!of_machine_is_compatible("st-ericsson,u8540")) {
err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
ARRAY_SIZE(db8500_prcmu_devs), NULL, 0,
db8500_irq_domain);
if (err) {
mfd_remove_devices(&pdev->dev);
pr_err("prcmu: Failed to add subdevices\n");
goto no_irq_return;
}
}
err = db8500_prcmu_register_ab8500(&pdev->dev, pdata->ab_platdata,
pdata->ab_irq);
if (err) {
mfd_remove_devices(&pdev->dev);
pr_err("prcmu: Failed to add ab8500 subdevice\n");
goto no_irq_return;
}
pr_info("DB8500 PRCMU initialized\n");
no_irq_return:
return err;
}
static const struct of_device_id db8500_prcmu_match[] = {
{ .compatible = "stericsson,db8500-prcmu"},
{ },
};
static struct platform_driver db8500_prcmu_driver = {
.driver = {
.name = "db8500-prcmu",
.owner = THIS_MODULE,
.of_match_table = db8500_prcmu_match,
},
.probe = db8500_prcmu_probe,
};
static int __init db8500_prcmu_init(void)
{
return platform_driver_register(&db8500_prcmu_driver);
}
core_initcall(db8500_prcmu_init);
MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com>");
MODULE_DESCRIPTION("DB8500 PRCM Unit driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
derekcentrico/m6.kernel.3.x | drivers/staging/serqt_usb2/serqt_usb2.c | 2185 | 42375 | /*
* This code was developed for the Quatech USB line for linux, it used
* much of the code developed by Greg Kroah-Hartman for USB serial devices
*
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
static int debug;
/* Version Information */
#define DRIVER_VERSION "v2.14"
#define DRIVER_AUTHOR "Tim Gobeli, Quatech, Inc"
#define DRIVER_DESC "Quatech USB to Serial Driver"
#define USB_VENDOR_ID_QUATECH 0x061d /* Quatech VID */
#define QUATECH_SSU200 0xC030 /* SSU200 */
#define QUATECH_DSU100 0xC040 /* DSU100 */
#define QUATECH_DSU200 0xC050 /* DSU200 */
#define QUATECH_QSU100 0xC060 /* QSU100 */
#define QUATECH_QSU200 0xC070 /* QSU200 */
#define QUATECH_ESU100A 0xC080 /* ESU100A */
#define QUATECH_ESU100B 0xC081 /* ESU100B */
#define QUATECH_ESU200A 0xC0A0 /* ESU200A */
#define QUATECH_ESU200B 0xC0A1 /* ESU200B */
#define QUATECH_HSU100A 0xC090 /* HSU100A */
#define QUATECH_HSU100B 0xC091 /* HSU100B */
#define QUATECH_HSU100C 0xC092 /* HSU100C */
#define QUATECH_HSU100D 0xC093 /* HSU100D */
#define QUATECH_HSU200A 0xC0B0 /* HSU200A */
#define QUATECH_HSU200B 0xC0B1 /* HSU200B */
#define QUATECH_HSU200C 0xC0B2 /* HSU200C */
#define QUATECH_HSU200D 0xC0B3 /* HSU200D */
#define QUATECH_SSU100_2 0xC120 /* SSU100_2 */
#define QUATECH_DSU100_2 0xC140 /* DSU100_2 */
#define QUATECH_DSU400_2 0xC150 /* DSU400_2 */
#define QUATECH_QSU100_2 0xC160 /* QSU100_2 */
#define QUATECH_QSU400_2 0xC170 /* QSU400_2 */
#define QUATECH_ESU400_2 0xC180 /* ESU400_2 */
#define QUATECH_ESU100_2 0xC1A0 /* ESU100_2 */
#define QT_SET_GET_DEVICE 0xc2
#define QT_OPEN_CLOSE_CHANNEL 0xca
#define QT_GET_SET_PREBUF_TRIG_LVL 0xcc
#define QT_SET_ATF 0xcd
#define QT_GET_SET_REGISTER 0xc0
#define QT_GET_SET_UART 0xc1
#define QT_HW_FLOW_CONTROL_MASK 0xc5
#define QT_SW_FLOW_CONTROL_MASK 0xc6
#define QT_SW_FLOW_CONTROL_DISABLE 0xc7
#define QT_BREAK_CONTROL 0xc8
#define USBD_TRANSFER_DIRECTION_IN 0xc0
#define USBD_TRANSFER_DIRECTION_OUT 0x40
#define MAX_BAUD_RATE 460800
#define MAX_BAUD_REMAINDER 4608
#define DIV_LATCH_LS 0x00
#define XMT_HOLD_REGISTER 0x00
#define XVR_BUFFER_REGISTER 0x00
#define DIV_LATCH_MS 0x01
#define FIFO_CONTROL_REGISTER 0x02
#define LINE_CONTROL_REGISTER 0x03
#define MODEM_CONTROL_REGISTER 0x04
#define LINE_STATUS_REGISTER 0x05
#define MODEM_STATUS_REGISTER 0x06
#define SERIAL_MCR_DTR 0x01
#define SERIAL_MCR_RTS 0x02
#define SERIAL_MCR_LOOP 0x10
#define SERIAL_MSR_CTS 0x10
#define SERIAL_MSR_CD 0x80
#define SERIAL_MSR_RI 0x40
#define SERIAL_MSR_DSR 0x20
#define SERIAL_MSR_MASK 0xf0
#define SERIAL_8_DATA 0x03
#define SERIAL_7_DATA 0x02
#define SERIAL_6_DATA 0x01
#define SERIAL_5_DATA 0x00
#define SERIAL_ODD_PARITY 0X08
#define SERIAL_EVEN_PARITY 0X18
#define SERIAL_TWO_STOPB 0x04
#define SERIAL_ONE_STOPB 0x00
#define DEFAULT_DIVISOR 0x30 /* gives 9600 baud rate */
#define DEFAULT_LCR SERIAL_8_DATA /* 8, none , 1 */
#define FULLPWRBIT 0x00000080
#define NEXT_BOARD_POWER_BIT 0x00000004
#define SERIAL_LSR_OE 0x02
#define SERIAL_LSR_PE 0x04
#define SERIAL_LSR_FE 0x08
#define SERIAL_LSR_BI 0x10
#define SERIAL_MSR_CTS 0x10
#define SERIAL_MSR_CD 0x80
#define SERIAL_MSR_RI 0x40
#define SERIAL_MSR_DSR 0x20
#define SERIAL_MSR_MASK 0xf0
#define PREFUFF_LEVEL_CONSERVATIVE 128
#define ATC_DISABLED 0x0
#define RR_BITS 0x03 /* for clearing clock bits */
#define DUPMODE_BITS 0xc0
#define CLKS_X4 0x02
#define LOOPMODE_BITS 0x41 /* LOOP1 = b6, LOOP0 = b0 (PORT B) */
#define ALL_LOOPBACK 0x01
#define MODEM_CTRL 0x40
#define RS232_MODE 0x00
static const struct usb_device_id serqt_id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_SSU200)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU100)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU200)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_QSU100)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_QSU200)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU100A)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU100B)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU200A)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU200B)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_HSU100A)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_HSU100B)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_HSU100C)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_HSU100D)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_HSU200A)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_HSU200B)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_HSU200C)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_HSU200D)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_SSU100_2)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU100_2)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU400_2)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_QSU100_2)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_QSU400_2)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU400_2)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU100_2)},
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, serqt_id_table);
struct qt_get_device_data {
__u8 porta;
__u8 portb;
__u8 portc;
};
struct qt_open_channel_data {
__u8 line_status;
__u8 modem_status;
};
struct quatech_port {
int port_num; /* number of the port */
struct urb *write_urb; /* write URB for this port */
struct urb *read_urb; /* read URB for this port */
struct urb *int_urb;
__u8 shadowLCR; /* last LCR value received */
__u8 shadowMCR; /* last MCR value received */
__u8 shadowMSR; /* last MSR value received */
__u8 shadowLSR; /* last LSR value received */
char open_ports;
/* Used for TIOCMIWAIT */
wait_queue_head_t msr_wait;
char prev_status, diff_status;
wait_queue_head_t wait;
struct async_icount icount;
struct usb_serial_port *port; /* owner of this object */
struct qt_get_device_data DeviceData;
spinlock_t lock;
bool read_urb_busy;
int RxHolding;
int ReadBulkStopped;
char closePending;
};
static struct usb_driver serqt_usb_driver = {
.name = "quatech-usb-serial",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = serqt_id_table,
.no_dynamic_id = 1,
};
static int port_paranoia_check(struct usb_serial_port *port,
const char *function)
{
if (!port) {
dbg("%s - port == NULL", function);
return -1;
}
if (!port->serial) {
dbg("%s - port->serial == NULL\n", function);
return -1;
}
return 0;
}
static int serial_paranoia_check(struct usb_serial *serial,
const char *function)
{
if (!serial) {
dbg("%s - serial == NULL\n", function);
return -1;
}
if (!serial->type) {
dbg("%s - serial->type == NULL!", function);
return -1;
}
return 0;
}
static inline struct quatech_port *qt_get_port_private(struct usb_serial_port
*port)
{
return (struct quatech_port *)usb_get_serial_port_data(port);
}
static inline void qt_set_port_private(struct usb_serial_port *port,
struct quatech_port *data)
{
usb_set_serial_port_data(port, (void *)data);
}
static struct usb_serial *get_usb_serial(struct usb_serial_port *port,
const char *function)
{
/* if no port was specified, or it fails a paranoia check */
if (!port ||
port_paranoia_check(port, function) ||
serial_paranoia_check(port->serial, function)) {
/*
* then say that we dont have a valid usb_serial thing,
* which will end up genrating -ENODEV return values
*/
return NULL;
}
return port->serial;
}
static void ProcessLineStatus(struct quatech_port *qt_port,
unsigned char line_status)
{
qt_port->shadowLSR =
line_status & (SERIAL_LSR_OE | SERIAL_LSR_PE | SERIAL_LSR_FE |
SERIAL_LSR_BI);
return;
}
static void ProcessModemStatus(struct quatech_port *qt_port,
unsigned char modem_status)
{
qt_port->shadowMSR = modem_status;
wake_up_interruptible(&qt_port->wait);
return;
}
static void ProcessRxChar(struct tty_struct *tty, struct usb_serial_port *port,
unsigned char data)
{
struct urb *urb = port->read_urb;
if (urb->actual_length)
tty_insert_flip_char(tty, data, TTY_NORMAL);
}
static void qt_write_bulk_callback(struct urb *urb)
{
struct tty_struct *tty;
int status;
struct quatech_port *quatech_port;
status = urb->status;
if (status) {
dbg("nonzero write bulk status received:%d\n", status);
return;
}
quatech_port = urb->context;
dbg("%s - port %d\n", __func__, quatech_port->port_num);
tty = tty_port_tty_get(&quatech_port->port->port);
if (tty)
tty_wakeup(tty);
tty_kref_put(tty);
}
static void qt_interrupt_callback(struct urb *urb)
{
/* FIXME */
}
static void qt_read_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port = qt_get_port_private(port);
unsigned char *data;
struct tty_struct *tty;
unsigned int index;
unsigned int RxCount;
int i, result;
int flag, flag_data;
if (urb->status) {
qt_port->ReadBulkStopped = 1;
dbg("%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
return;
}
tty = tty_port_tty_get(&port->port);
if (!tty) {
dbg("%s - bad tty pointer - exiting", __func__);
return;
}
data = urb->transfer_buffer;
RxCount = urb->actual_length;
/* index = MINOR(port->tty->device) - serial->minor; */
index = tty->index - serial->minor;
dbg("%s - port %d\n", __func__, port->number);
dbg("%s - port->RxHolding = %d\n", __func__, qt_port->RxHolding);
if (port_paranoia_check(port, __func__) != 0) {
dbg("%s - port_paranoia_check, exiting\n", __func__);
qt_port->ReadBulkStopped = 1;
goto exit;
}
if (!serial) {
dbg("%s - bad serial pointer, exiting\n", __func__);
goto exit;
}
if (qt_port->closePending == 1) {
/* Were closing , stop reading */
dbg("%s - (qt_port->closepending == 1\n", __func__);
qt_port->ReadBulkStopped = 1;
goto exit;
}
/*
* RxHolding is asserted by throttle, if we assert it, we're not
* receiving any more characters and let the box handle the flow
* control
*/
if (qt_port->RxHolding == 1) {
qt_port->ReadBulkStopped = 1;
goto exit;
}
if (urb->status) {
qt_port->ReadBulkStopped = 1;
dbg("%s - nonzero read bulk status received: %d\n",
__func__, urb->status);
goto exit;
}
if (tty && RxCount) {
flag_data = 0;
for (i = 0; i < RxCount; ++i) {
/* Look ahead code here */
if ((i <= (RxCount - 3)) && (data[i] == 0x1b)
&& (data[i + 1] == 0x1b)) {
flag = 0;
switch (data[i + 2]) {
case 0x00:
/* line status change 4th byte must follow */
if (i > (RxCount - 4)) {
dbg("Illegal escape seuences in received data\n");
break;
}
ProcessLineStatus(qt_port, data[i + 3]);
i += 3;
flag = 1;
break;
case 0x01:
/* Modem status status change 4th byte must follow */
dbg("Modem status status.\n");
if (i > (RxCount - 4)) {
dbg("Illegal escape sequences in received data\n");
break;
}
ProcessModemStatus(qt_port,
data[i + 3]);
i += 3;
flag = 1;
break;
case 0xff:
dbg("No status sequence.\n");
if (tty) {
ProcessRxChar(tty, port, data[i]);
ProcessRxChar(tty, port, data[i + 1]);
}
i += 2;
break;
}
if (flag == 1)
continue;
}
if (tty && urb->actual_length)
tty_insert_flip_char(tty, data[i], TTY_NORMAL);
}
tty_flip_buffer_push(tty);
}
/* Continue trying to always read */
usb_fill_bulk_urb(port->read_urb, serial->dev,
usb_rcvbulkpipe(serial->dev,
port->bulk_in_endpointAddress),
port->read_urb->transfer_buffer,
port->read_urb->transfer_buffer_length,
qt_read_bulk_callback, port);
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result)
dbg("%s - failed resubmitting read urb, error %d",
__func__, result);
else {
if (tty && RxCount) {
tty_flip_buffer_push(tty);
tty_schedule_flip(tty);
}
}
schedule_work(&port->work);
exit:
tty_kref_put(tty);
}
/*
* qt_get_device
* Issue a GET_DEVICE vendor-specific request on the default control pipe If
* successful, fills in the qt_get_device_data structure pointed to by
* device_data, otherwise return a negative error number of the problem.
*/
static int qt_get_device(struct usb_serial *serial,
struct qt_get_device_data *device_data)
{
int result;
unsigned char *transfer_buffer;
transfer_buffer =
kmalloc(sizeof(struct qt_get_device_data), GFP_KERNEL);
if (!transfer_buffer)
return -ENOMEM;
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
QT_SET_GET_DEVICE, 0xc0, 0, 0,
transfer_buffer,
sizeof(struct qt_get_device_data), 300);
if (result > 0)
memcpy(device_data, transfer_buffer,
sizeof(struct qt_get_device_data));
kfree(transfer_buffer);
return result;
}
/****************************************************************************
* BoxSetPrebufferLevel
TELLS BOX WHEN TO ASSERT FLOW CONTROL
****************************************************************************/
static int BoxSetPrebufferLevel(struct usb_serial *serial)
{
int result;
__u16 buffer_length;
buffer_length = PREFUFF_LEVEL_CONSERVATIVE;
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_GET_SET_PREBUF_TRIG_LVL, 0x40,
buffer_length, 0, NULL, 0, 300);
return result;
}
/****************************************************************************
* BoxSetATC
TELLS BOX WHEN TO ASSERT automatic transmitter control
****************************************************************************/
static int BoxSetATC(struct usb_serial *serial, __u16 n_Mode)
{
int result;
__u16 buffer_length;
buffer_length = PREFUFF_LEVEL_CONSERVATIVE;
result =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_SET_ATF, 0x40, n_Mode, 0, NULL, 0, 300);
return result;
}
/**
* qt_set_device
* Issue a SET_DEVICE vendor-specific request on the default control pipe If
* successful returns the number of bytes written, otherwise it returns a
* negative error number of the problem.
*/
static int qt_set_device(struct usb_serial *serial,
struct qt_get_device_data *device_data)
{
int result;
__u16 length;
__u16 PortSettings;
PortSettings = ((__u16) (device_data->portb));
PortSettings = (PortSettings << 8);
PortSettings += ((__u16) (device_data->porta));
length = sizeof(struct qt_get_device_data);
dbg("%s - PortSettings = 0x%x\n", __func__, PortSettings);
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_SET_GET_DEVICE, 0x40, PortSettings,
0, NULL, 0, 300);
return result;
}
static int qt_open_channel(struct usb_serial *serial, __u16 Uart_Number,
struct qt_open_channel_data *pDeviceData)
{
int result;
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
QT_OPEN_CLOSE_CHANNEL,
USBD_TRANSFER_DIRECTION_IN, 1, Uart_Number,
pDeviceData,
sizeof(struct qt_open_channel_data), 300);
return result;
}
static int qt_close_channel(struct usb_serial *serial, __u16 Uart_Number)
{
int result;
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
QT_OPEN_CLOSE_CHANNEL,
USBD_TRANSFER_DIRECTION_OUT, 0, Uart_Number,
NULL, 0, 300);
return result;
}
/****************************************************************************
* BoxGetRegister
* issuse a GET_REGISTER vendor-spcific request on the default control pipe
* If successful, fills in the pValue with the register value asked for
****************************************************************************/
static int BoxGetRegister(struct usb_serial *serial, unsigned short Uart_Number,
unsigned short Register_Num, __u8 *pValue)
{
int result;
__u16 current_length;
current_length = sizeof(struct qt_get_device_data);
result =
usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
QT_GET_SET_REGISTER, 0xC0, Register_Num,
Uart_Number, (void *)pValue, sizeof(*pValue), 300);
return result;
}
/****************************************************************************
* BoxSetRegister
* issuse a GET_REGISTER vendor-spcific request on the default control pipe
* If successful, fills in the pValue with the register value asked for
****************************************************************************/
static int BoxSetRegister(struct usb_serial *serial, unsigned short Uart_Number,
unsigned short Register_Num, unsigned short Value)
{
int result;
unsigned short RegAndByte;
RegAndByte = Value;
RegAndByte = RegAndByte << 8;
RegAndByte = RegAndByte + Register_Num;
/*
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_GET_SET_REGISTER, 0xC0, Register_Num,
Uart_Number, NULL, 0, 300);
*/
result =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_GET_SET_REGISTER, 0x40, RegAndByte, Uart_Number,
NULL, 0, 300);
return result;
}
/*
* qt_setuart
* issuse a SET_UART vendor-spcific request on the default control pipe
* If successful sets baud rate divisor and LCR value
*/
static int qt_setuart(struct usb_serial *serial, unsigned short Uart_Number,
unsigned short default_divisor, unsigned char default_LCR)
{
int result;
unsigned short UartNumandLCR;
UartNumandLCR = (default_LCR << 8) + Uart_Number;
result =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_GET_SET_UART, 0x40, default_divisor,
UartNumandLCR, NULL, 0, 300);
return result;
}
static int BoxSetHW_FlowCtrl(struct usb_serial *serial, unsigned int index,
int bSet)
{
__u8 mcr = 0;
__u8 msr = 0, MOUT_Value = 0;
unsigned int status;
if (bSet == 1) {
/* flow control, box will clear RTS line to prevent remote */
mcr = SERIAL_MCR_RTS;
} /* device from xmitting more chars */
else {
/* no flow control to remote device */
mcr = 0;
}
MOUT_Value = mcr << 8;
if (bSet == 1) {
/* flow control, box will inhibit xmit data if CTS line is
* asserted */
msr = SERIAL_MSR_CTS;
} else {
/* Box will not inhimbe xmit data due to CTS line */
msr = 0;
}
MOUT_Value |= msr;
status =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_HW_FLOW_CONTROL_MASK, 0x40, MOUT_Value,
index, NULL, 0, 300);
return status;
}
static int BoxSetSW_FlowCtrl(struct usb_serial *serial, __u16 index,
unsigned char stop_char, unsigned char start_char)
{
__u16 nSWflowout;
int result;
nSWflowout = start_char << 8;
nSWflowout = (unsigned short)stop_char;
result =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_SW_FLOW_CONTROL_MASK, 0x40, nSWflowout,
index, NULL, 0, 300);
return result;
}
static int BoxDisable_SW_FlowCtrl(struct usb_serial *serial, __u16 index)
{
int result;
result =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_SW_FLOW_CONTROL_DISABLE, 0x40, 0, index,
NULL, 0, 300);
return result;
}
static int qt_startup(struct usb_serial *serial)
{
struct usb_serial_port *port;
struct quatech_port *qt_port;
struct qt_get_device_data DeviceData;
int i;
int status;
dbg("enterting %s", __func__);
/* Now setup per port private data */
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
qt_port = kzalloc(sizeof(*qt_port), GFP_KERNEL);
if (!qt_port) {
dbg("%s: kmalloc for quatech_port (%d) failed!.",
__func__, i);
for (--i; i >= 0; i--) {
port = serial->port[i];
kfree(usb_get_serial_port_data(port));
usb_set_serial_port_data(port, NULL);
}
return -ENOMEM;
}
spin_lock_init(&qt_port->lock);
usb_set_serial_port_data(port, qt_port);
}
status = qt_get_device(serial, &DeviceData);
if (status < 0) {
dbg(__FILE__ "box_get_device failed");
goto startup_error;
}
dbg(__FILE__ "DeviceData.portb = 0x%x", DeviceData.portb);
DeviceData.portb &= ~FULLPWRBIT;
dbg(__FILE__ "Changing DeviceData.portb to 0x%x", DeviceData.portb);
status = qt_set_device(serial, &DeviceData);
if (status < 0) {
dbg(__FILE__ "qt_set_device failed\n");
goto startup_error;
}
status = qt_get_device(serial, &DeviceData);
if (status < 0) {
dbg(__FILE__ "qt_get_device failed");
goto startup_error;
}
switch (serial->dev->descriptor.idProduct) {
case QUATECH_DSU100:
case QUATECH_QSU100:
case QUATECH_ESU100A:
case QUATECH_ESU100B:
case QUATECH_HSU100A:
case QUATECH_HSU100B:
case QUATECH_HSU100C:
case QUATECH_HSU100D:
DeviceData.porta &= ~(RR_BITS | DUPMODE_BITS);
DeviceData.porta |= CLKS_X4;
DeviceData.portb &= ~(LOOPMODE_BITS);
DeviceData.portb |= RS232_MODE;
break;
case QUATECH_SSU200:
case QUATECH_DSU200:
case QUATECH_QSU200:
case QUATECH_ESU200A:
case QUATECH_ESU200B:
case QUATECH_HSU200A:
case QUATECH_HSU200B:
case QUATECH_HSU200C:
case QUATECH_HSU200D:
DeviceData.porta &= ~(RR_BITS | DUPMODE_BITS);
DeviceData.porta |= CLKS_X4;
DeviceData.portb &= ~(LOOPMODE_BITS);
DeviceData.portb |= ALL_LOOPBACK;
break;
default:
DeviceData.porta &= ~(RR_BITS | DUPMODE_BITS);
DeviceData.porta |= CLKS_X4;
DeviceData.portb &= ~(LOOPMODE_BITS);
DeviceData.portb |= RS232_MODE;
break;
}
status = BoxSetPrebufferLevel(serial); /* sets to default value */
if (status < 0) {
dbg(__FILE__ "BoxSetPrebufferLevel failed\n");
goto startup_error;
}
status = BoxSetATC(serial, ATC_DISABLED);
if (status < 0) {
dbg(__FILE__ "BoxSetATC failed\n");
goto startup_error;
}
dbg(__FILE__ "DeviceData.portb = 0x%x", DeviceData.portb);
DeviceData.portb |= NEXT_BOARD_POWER_BIT;
dbg(__FILE__ "Changing DeviceData.portb to 0x%x", DeviceData.portb);
status = qt_set_device(serial, &DeviceData);
if (status < 0) {
dbg(__FILE__ "qt_set_device failed\n");
goto startup_error;
}
dbg("Exit Success %s\n", __func__);
return 0;
startup_error:
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
qt_port = qt_get_port_private(port);
kfree(qt_port);
usb_set_serial_port_data(port, NULL);
}
dbg("Exit fail %s\n", __func__);
return -EIO;
}
static void qt_release(struct usb_serial *serial)
{
struct usb_serial_port *port;
struct quatech_port *qt_port;
int i;
dbg("enterting %s", __func__);
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
if (!port)
continue;
qt_port = usb_get_serial_port_data(port);
kfree(qt_port);
usb_set_serial_port_data(port, NULL);
}
}
static int qt_open(struct tty_struct *tty,
struct usb_serial_port *port)
{
struct usb_serial *serial;
struct quatech_port *quatech_port;
struct quatech_port *port0;
struct qt_open_channel_data ChannelData;
int result;
if (port_paranoia_check(port, __func__))
return -ENODEV;
dbg("%s - port %d\n", __func__, port->number);
serial = port->serial;
if (serial_paranoia_check(serial, __func__))
return -ENODEV;
quatech_port = qt_get_port_private(port);
port0 = qt_get_port_private(serial->port[0]);
if (quatech_port == NULL || port0 == NULL)
return -ENODEV;
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
port0->open_ports++;
result = qt_get_device(serial, &port0->DeviceData);
/* Port specific setups */
result = qt_open_channel(serial, port->number, &ChannelData);
if (result < 0) {
dbg(__FILE__ "qt_open_channel failed\n");
return result;
}
dbg(__FILE__ "qt_open_channel completed.\n");
/* FIXME: are these needed? Does it even do anything useful? */
quatech_port->shadowLSR = ChannelData.line_status &
(SERIAL_LSR_OE | SERIAL_LSR_PE | SERIAL_LSR_FE | SERIAL_LSR_BI);
quatech_port->shadowMSR = ChannelData.modem_status &
(SERIAL_MSR_CTS | SERIAL_MSR_DSR | SERIAL_MSR_RI | SERIAL_MSR_CD);
/* Set Baud rate to default and turn off (default)flow control here */
result = qt_setuart(serial, port->number, DEFAULT_DIVISOR, DEFAULT_LCR);
if (result < 0) {
dbg(__FILE__ "qt_setuart failed\n");
return result;
}
dbg(__FILE__ "qt_setuart completed.\n");
/*
* Put this here to make it responsive to stty and defaults set by
* the tty layer
*/
/* FIXME: is this needed? */
/* qt_set_termios(tty, port, NULL); */
/* Check to see if we've set up our endpoint info yet */
if (port0->open_ports == 1) {
if (serial->port[0]->interrupt_in_buffer == NULL) {
/* set up interrupt urb */
usb_fill_int_urb(serial->port[0]->interrupt_in_urb,
serial->dev,
usb_rcvintpipe(serial->dev,
serial->port[0]->interrupt_in_endpointAddress),
serial->port[0]->interrupt_in_buffer,
serial->port[0]->
interrupt_in_urb->transfer_buffer_length,
qt_interrupt_callback, serial,
serial->port[0]->
interrupt_in_urb->interval);
result =
usb_submit_urb(serial->port[0]->interrupt_in_urb,
GFP_KERNEL);
if (result) {
dev_err(&port->dev,
"%s - Error %d submitting "
"interrupt urb\n", __func__, result);
}
}
}
dbg("port number is %d\n", port->number);
dbg("serial number is %d\n", port->serial->minor);
dbg("Bulkin endpoint is %d\n", port->bulk_in_endpointAddress);
dbg("BulkOut endpoint is %d\n", port->bulk_out_endpointAddress);
dbg("Interrupt endpoint is %d\n", port->interrupt_in_endpointAddress);
dbg("port's number in the device is %d\n", quatech_port->port_num);
quatech_port->read_urb = port->read_urb;
/* set up our bulk in urb */
usb_fill_bulk_urb(quatech_port->read_urb,
serial->dev,
usb_rcvbulkpipe(serial->dev,
port->bulk_in_endpointAddress),
port->bulk_in_buffer,
quatech_port->read_urb->transfer_buffer_length,
qt_read_bulk_callback, quatech_port);
dbg("qt_open: bulkin endpoint is %d\n", port->bulk_in_endpointAddress);
quatech_port->read_urb_busy = true;
result = usb_submit_urb(quatech_port->read_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev,
"%s - Error %d submitting control urb\n",
__func__, result);
quatech_port->read_urb_busy = false;
}
/* initialize our wait queues */
init_waitqueue_head(&quatech_port->wait);
init_waitqueue_head(&quatech_port->msr_wait);
/* initialize our icount structure */
memset(&(quatech_port->icount), 0x00, sizeof(quatech_port->icount));
return 0;
}
static int qt_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial;
int chars = 0;
serial = get_usb_serial(port, __func__);
dbg("%s - port %d\n", __func__, port->number);
if (serial->num_bulk_out) {
if (port->write_urb->status == -EINPROGRESS)
chars = port->write_urb->transfer_buffer_length;
}
dbg("%s - returns %d\n", __func__, chars);
return chars;
}
static void qt_block_until_empty(struct tty_struct *tty,
struct quatech_port *qt_port)
{
int timeout = HZ / 10;
int wait = 30;
int count;
while (1) {
count = qt_chars_in_buffer(tty);
if (count <= 0)
return;
interruptible_sleep_on_timeout(&qt_port->wait, timeout);
wait--;
if (wait == 0) {
dbg("%s - TIMEOUT", __func__);
return;
} else {
wait = 30;
}
}
}
static void qt_close(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct quatech_port *qt_port;
struct quatech_port *port0;
struct tty_struct *tty;
int status;
unsigned int index;
status = 0;
dbg("%s - port %d\n", __func__, port->number);
tty = tty_port_tty_get(&port->port);
index = tty->index - serial->minor;
qt_port = qt_get_port_private(port);
port0 = qt_get_port_private(serial->port[0]);
/* shutdown any bulk reads that might be going on */
if (serial->num_bulk_out)
usb_unlink_urb(port->write_urb);
if (serial->num_bulk_in)
usb_unlink_urb(port->read_urb);
/* wait up to for transmitter to empty */
if (serial->dev)
qt_block_until_empty(tty, qt_port);
tty_kref_put(tty);
/* Close uart channel */
status = qt_close_channel(serial, index);
if (status < 0)
dbg("%s - port %d qt_close_channel failed.\n",
__func__, port->number);
port0->open_ports--;
dbg("qt_num_open_ports in close%d:in port%d\n",
port0->open_ports, port->number);
if (port0->open_ports == 0) {
if (serial->port[0]->interrupt_in_urb) {
dbg("%s", "Shutdown interrupt_in_urb\n");
usb_kill_urb(serial->port[0]->interrupt_in_urb);
}
}
if (qt_port->write_urb) {
/* if this urb had a transfer buffer already (old tx) free it */
kfree(qt_port->write_urb->transfer_buffer);
usb_free_urb(qt_port->write_urb);
}
}
static int qt_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
int result;
struct usb_serial *serial = get_usb_serial(port, __func__);
if (serial == NULL)
return -ENODEV;
dbg("%s - port %d\n", __func__, port->number);
if (count == 0) {
dbg("%s - write request of 0 bytes\n", __func__);
return 0;
}
/* only do something if we have a bulk out endpoint */
if (serial->num_bulk_out) {
if (port->write_urb->status == -EINPROGRESS) {
dbg("%s - already writing\n", __func__);
return 0;
}
count =
(count > port->bulk_out_size) ? port->bulk_out_size : count;
memcpy(port->write_urb->transfer_buffer, buf, count);
/* set up our urb */
usb_fill_bulk_urb(port->write_urb, serial->dev,
usb_sndbulkpipe(serial->dev,
port->
bulk_out_endpointAddress),
port->write_urb->transfer_buffer, count,
qt_write_bulk_callback, port);
/* send the data out the bulk port */
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result)
dbg("%s - failed submitting write urb, error %d\n",
__func__, result);
else
result = count;
return result;
}
/* no bulk out, so return 0 bytes written */
return 0;
}
static int qt_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial;
struct quatech_port *qt_port;
unsigned long flags;
int retval = -EINVAL;
if (port_paranoia_check(port, __func__)) {
dbg("%s", "Invalid port\n");
return -1;
}
serial = get_usb_serial(port, __func__);
if (!serial)
return -ENODEV;
qt_port = qt_get_port_private(port);
spin_lock_irqsave(&qt_port->lock, flags);
dbg("%s - port %d\n", __func__, port->number);
if (serial->num_bulk_out) {
if (port->write_urb->status != -EINPROGRESS)
retval = port->bulk_out_size;
}
spin_unlock_irqrestore(&qt_port->lock, flags);
return retval;
}
static int qt_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct quatech_port *qt_port = qt_get_port_private(port);
struct usb_serial *serial = get_usb_serial(port, __func__);
unsigned int index;
dbg("%s cmd 0x%04x", __func__, cmd);
index = tty->index - serial->minor;
if (cmd == TIOCMIWAIT) {
while (qt_port != NULL) {
interruptible_sleep_on(&qt_port->msr_wait);
if (signal_pending(current))
return -ERESTARTSYS;
else {
char diff = qt_port->diff_status;
if (diff == 0)
return -EIO; /* no change => error */
/* Consume all events */
qt_port->diff_status = 0;
if (((arg & TIOCM_RNG)
&& (diff & SERIAL_MSR_RI))
|| ((arg & TIOCM_DSR)
&& (diff & SERIAL_MSR_DSR))
|| ((arg & TIOCM_CD)
&& (diff & SERIAL_MSR_CD))
|| ((arg & TIOCM_CTS)
&& (diff & SERIAL_MSR_CTS))) {
return 0;
}
}
}
return 0;
}
dbg("%s -No ioctl for that one. port = %d\n", __func__, port->number);
return -ENOIOCTLCMD;
}
static void qt_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
struct ktermios *old_termios)
{
struct ktermios *termios = tty->termios;
unsigned char new_LCR = 0;
unsigned int cflag = termios->c_cflag;
unsigned int index;
int baud, divisor, remainder;
int status;
dbg("%s", __func__);
index = tty->index - port->serial->minor;
switch (cflag) {
case CS5:
new_LCR |= SERIAL_5_DATA;
break;
case CS6:
new_LCR |= SERIAL_6_DATA;
break;
case CS7:
new_LCR |= SERIAL_7_DATA;
break;
default:
case CS8:
new_LCR |= SERIAL_8_DATA;
break;
}
/* Parity stuff */
if (cflag & PARENB) {
if (cflag & PARODD)
new_LCR |= SERIAL_ODD_PARITY;
else
new_LCR |= SERIAL_EVEN_PARITY;
}
if (cflag & CSTOPB)
new_LCR |= SERIAL_TWO_STOPB;
else
new_LCR |= SERIAL_ONE_STOPB;
dbg("%s - 4\n", __func__);
/* Thats the LCR stuff, go ahead and set it */
baud = tty_get_baud_rate(tty);
if (!baud)
/* pick a default, any default... */
baud = 9600;
dbg("%s - got baud = %d\n", __func__, baud);
divisor = MAX_BAUD_RATE / baud;
remainder = MAX_BAUD_RATE % baud;
/* Round to nearest divisor */
if (((remainder * 2) >= baud) && (baud != 110))
divisor++;
/*
* Set Baud rate to default and turn off (default)flow control here
*/
status =
qt_setuart(port->serial, index, (unsigned short)divisor, new_LCR);
if (status < 0) {
dbg(__FILE__ "qt_setuart failed\n");
return;
}
/* Now determine flow control */
if (cflag & CRTSCTS) {
dbg("%s - Enabling HW flow control port %d\n", __func__,
port->number);
/* Enable RTS/CTS flow control */
status = BoxSetHW_FlowCtrl(port->serial, index, 1);
if (status < 0) {
dbg(__FILE__ "BoxSetHW_FlowCtrl failed\n");
return;
}
} else {
/* Disable RTS/CTS flow control */
dbg("%s - disabling HW flow control port %d\n", __func__,
port->number);
status = BoxSetHW_FlowCtrl(port->serial, index, 0);
if (status < 0) {
dbg(__FILE__ "BoxSetHW_FlowCtrl failed\n");
return;
}
}
/* if we are implementing XON/XOFF, set the start and stop character in
* the device */
if (I_IXOFF(tty) || I_IXON(tty)) {
unsigned char stop_char = STOP_CHAR(tty);
unsigned char start_char = START_CHAR(tty);
status =
BoxSetSW_FlowCtrl(port->serial, index, stop_char,
start_char);
if (status < 0)
dbg(__FILE__ "BoxSetSW_FlowCtrl (enabled) failed\n");
} else {
/* disable SW flow control */
status = BoxDisable_SW_FlowCtrl(port->serial, index);
if (status < 0)
dbg(__FILE__ "BoxSetSW_FlowCtrl (diabling) failed\n");
}
tty->termios->c_cflag &= ~CMSPAR;
/* FIXME: Error cases should be returning the actual bits changed only */
}
static void qt_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port;
u16 index, onoff;
unsigned int result;
unsigned long flags;
index = tty->index - serial->minor;
qt_port = qt_get_port_private(port);
if (break_state == -1)
onoff = 1;
else
onoff = 0;
spin_lock_irqsave(&qt_port->lock, flags);
dbg("%s - port %d\n", __func__, port->number);
result =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_BREAK_CONTROL, 0x40, onoff, index, NULL, 0, 300);
spin_unlock_irqrestore(&qt_port->lock, flags);
}
static inline int qt_real_tiocmget(struct tty_struct *tty,
struct usb_serial_port *port,
struct usb_serial *serial)
{
u8 mcr;
u8 msr;
unsigned int result = 0;
int status;
unsigned int index;
dbg("%s - port %d, tty =0x%p\n", __func__, port->number, tty);
index = tty->index - serial->minor;
status =
BoxGetRegister(port->serial, index, MODEM_CONTROL_REGISTER, &mcr);
if (status >= 0) {
status =
BoxGetRegister(port->serial, index,
MODEM_STATUS_REGISTER, &msr);
}
if (status >= 0) {
result = ((mcr & SERIAL_MCR_DTR) ? TIOCM_DTR : 0)
/* DTR IS SET */
| ((mcr & SERIAL_MCR_RTS) ? TIOCM_RTS : 0)
/* RTS IS SET */
| ((msr & SERIAL_MSR_CTS) ? TIOCM_CTS : 0)
/* CTS is set */
| ((msr & SERIAL_MSR_CD) ? TIOCM_CAR : 0)
/* Carrier detect is set */
| ((msr & SERIAL_MSR_RI) ? TIOCM_RI : 0)
/* Ring indicator set */
| ((msr & SERIAL_MSR_DSR) ? TIOCM_DSR : 0);
/* DSR is set */
return result;
} else
return -ESPIPE;
}
static inline int qt_real_tiocmset(struct tty_struct *tty,
struct usb_serial_port *port,
struct usb_serial *serial,
unsigned int value)
{
u8 mcr;
int status;
unsigned int index;
dbg("%s - port %d\n", __func__, port->number);
index = tty->index - serial->minor;
status =
BoxGetRegister(port->serial, index, MODEM_CONTROL_REGISTER, &mcr);
if (status < 0)
return -ESPIPE;
/*
* Turn off the RTS and DTR and loopbcck and then only turn on what was
* asked for
*/
mcr &= ~(SERIAL_MCR_RTS | SERIAL_MCR_DTR | SERIAL_MCR_LOOP);
if (value & TIOCM_RTS)
mcr |= SERIAL_MCR_RTS;
if (value & TIOCM_DTR)
mcr |= SERIAL_MCR_DTR;
if (value & TIOCM_LOOP)
mcr |= SERIAL_MCR_LOOP;
status =
BoxSetRegister(port->serial, index, MODEM_CONTROL_REGISTER, mcr);
if (status < 0)
return -ESPIPE;
else
return 0;
}
static int qt_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port = qt_get_port_private(port);
int retval = -ENODEV;
unsigned long flags;
dbg("In %s\n", __func__);
if (!serial)
return -ENODEV;
spin_lock_irqsave(&qt_port->lock, flags);
dbg("%s - port %d\n", __func__, port->number);
dbg("%s - port->RxHolding = %d\n", __func__, qt_port->RxHolding);
retval = qt_real_tiocmget(tty, port, serial);
spin_unlock_irqrestore(&qt_port->lock, flags);
return retval;
}
static int qt_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port = qt_get_port_private(port);
unsigned long flags;
int retval = -ENODEV;
dbg("In %s\n", __func__);
if (!serial)
return -ENODEV;
spin_lock_irqsave(&qt_port->lock, flags);
dbg("%s - port %d\n", __func__, port->number);
dbg("%s - qt_port->RxHolding = %d\n", __func__, qt_port->RxHolding);
retval = qt_real_tiocmset(tty, port, serial, set);
spin_unlock_irqrestore(&qt_port->lock, flags);
return retval;
}
static void qt_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port;
unsigned long flags;
dbg("%s - port %d\n", __func__, port->number);
if (!serial)
return;
qt_port = qt_get_port_private(port);
spin_lock_irqsave(&qt_port->lock, flags);
/* pass on to the driver specific version of this function */
qt_port->RxHolding = 1;
dbg("%s - port->RxHolding = 1\n", __func__);
spin_unlock_irqrestore(&qt_port->lock, flags);
return;
}
static void qt_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port;
unsigned long flags;
unsigned int result;
if (!serial)
return;
qt_port = qt_get_port_private(port);
spin_lock_irqsave(&qt_port->lock, flags);
dbg("%s - port %d\n", __func__, port->number);
if (qt_port->RxHolding == 1) {
dbg("%s -qt_port->RxHolding == 1\n", __func__);
qt_port->RxHolding = 0;
dbg("%s - qt_port->RxHolding = 0\n", __func__);
/* if we have a bulk endpoint, start it up */
if ((serial->num_bulk_in) && (qt_port->ReadBulkStopped == 1)) {
/* Start reading from the device */
usb_fill_bulk_urb(port->read_urb, serial->dev,
usb_rcvbulkpipe(serial->dev,
port->bulk_in_endpointAddress),
port->read_urb->transfer_buffer,
port->read_urb->
transfer_buffer_length,
qt_read_bulk_callback, port);
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result)
err("%s - failed restarting read urb, error %d",
__func__, result);
}
}
spin_unlock_irqrestore(&qt_port->lock, flags);
return;
}
static int qt_calc_num_ports(struct usb_serial *serial)
{
int num_ports;
dbg("numberofendpoints: %d\n",
(int)serial->interface->cur_altsetting->desc.bNumEndpoints);
dbg("numberofendpoints: %d\n",
(int)serial->interface->altsetting->desc.bNumEndpoints);
num_ports =
(serial->interface->cur_altsetting->desc.bNumEndpoints - 1) / 2;
return num_ports;
}
static struct usb_serial_driver quatech_device = {
.driver = {
.owner = THIS_MODULE,
.name = "serqt",
},
.description = DRIVER_DESC,
.usb_driver = &serqt_usb_driver,
.id_table = serqt_id_table,
.num_ports = 8,
.open = qt_open,
.close = qt_close,
.write = qt_write,
.write_room = qt_write_room,
.chars_in_buffer = qt_chars_in_buffer,
.throttle = qt_throttle,
.unthrottle = qt_unthrottle,
.calc_num_ports = qt_calc_num_ports,
.ioctl = qt_ioctl,
.set_termios = qt_set_termios,
.break_ctl = qt_break,
.tiocmget = qt_tiocmget,
.tiocmset = qt_tiocmset,
.attach = qt_startup,
.release = qt_release,
};
static int __init serqt_usb_init(void)
{
int retval;
dbg("%s\n", __func__);
/* register with usb-serial */
retval = usb_serial_register(&quatech_device);
if (retval)
goto failed_usb_serial_register;
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
DRIVER_DESC "\n");
/* register with usb */
retval = usb_register(&serqt_usb_driver);
if (retval == 0)
return 0;
/* if we're here, usb_register() failed */
usb_serial_deregister(&quatech_device);
failed_usb_serial_register:
return retval;
}
static void __exit serqt_usb_exit(void)
{
usb_deregister(&serqt_usb_driver);
usb_serial_deregister(&quatech_device);
}
module_init(serqt_usb_init);
module_exit(serqt_usb_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
| gpl-2.0 |
troth/linux-kernel | drivers/input/touchscreen/htcpen.c | 2185 | 5823 | /*
* HTC Shift touchscreen driver
*
* Copyright (C) 2008 Pau Oliva Fora <pof@eslack.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/isa.h>
#include <linux/ioport.h>
#include <linux/dmi.h>
MODULE_AUTHOR("Pau Oliva Fora <pau@eslack.org>");
MODULE_DESCRIPTION("HTC Shift touchscreen driver");
MODULE_LICENSE("GPL");
#define HTCPEN_PORT_IRQ_CLEAR 0x068
#define HTCPEN_PORT_INIT 0x06c
#define HTCPEN_PORT_INDEX 0x0250
#define HTCPEN_PORT_DATA 0x0251
#define HTCPEN_IRQ 3
#define DEVICE_ENABLE 0xa2
#define DEVICE_DISABLE 0xa3
#define X_INDEX 3
#define Y_INDEX 5
#define TOUCH_INDEX 0xb
#define LSB_XY_INDEX 0xc
#define X_AXIS_MAX 2040
#define Y_AXIS_MAX 2040
static bool invert_x;
module_param(invert_x, bool, 0644);
MODULE_PARM_DESC(invert_x, "If set, X axis is inverted");
static bool invert_y;
module_param(invert_y, bool, 0644);
MODULE_PARM_DESC(invert_y, "If set, Y axis is inverted");
static irqreturn_t htcpen_interrupt(int irq, void *handle)
{
struct input_dev *htcpen_dev = handle;
unsigned short x, y, xy;
/* 0 = press; 1 = release */
outb_p(TOUCH_INDEX, HTCPEN_PORT_INDEX);
if (inb_p(HTCPEN_PORT_DATA)) {
input_report_key(htcpen_dev, BTN_TOUCH, 0);
} else {
outb_p(X_INDEX, HTCPEN_PORT_INDEX);
x = inb_p(HTCPEN_PORT_DATA);
outb_p(Y_INDEX, HTCPEN_PORT_INDEX);
y = inb_p(HTCPEN_PORT_DATA);
outb_p(LSB_XY_INDEX, HTCPEN_PORT_INDEX);
xy = inb_p(HTCPEN_PORT_DATA);
/* get high resolution value of X and Y using LSB */
x = X_AXIS_MAX - ((x * 8) + ((xy >> 4) & 0xf));
y = (y * 8) + (xy & 0xf);
if (invert_x)
x = X_AXIS_MAX - x;
if (invert_y)
y = Y_AXIS_MAX - y;
if (x != X_AXIS_MAX && x != 0) {
input_report_key(htcpen_dev, BTN_TOUCH, 1);
input_report_abs(htcpen_dev, ABS_X, x);
input_report_abs(htcpen_dev, ABS_Y, y);
}
}
input_sync(htcpen_dev);
inb_p(HTCPEN_PORT_IRQ_CLEAR);
return IRQ_HANDLED;
}
static int htcpen_open(struct input_dev *dev)
{
outb_p(DEVICE_ENABLE, HTCPEN_PORT_INIT);
return 0;
}
static void htcpen_close(struct input_dev *dev)
{
outb_p(DEVICE_DISABLE, HTCPEN_PORT_INIT);
synchronize_irq(HTCPEN_IRQ);
}
static int htcpen_isa_probe(struct device *dev, unsigned int id)
{
struct input_dev *htcpen_dev;
int err = -EBUSY;
if (!request_region(HTCPEN_PORT_IRQ_CLEAR, 1, "htcpen")) {
printk(KERN_ERR "htcpen: unable to get IO region 0x%x\n",
HTCPEN_PORT_IRQ_CLEAR);
goto request_region1_failed;
}
if (!request_region(HTCPEN_PORT_INIT, 1, "htcpen")) {
printk(KERN_ERR "htcpen: unable to get IO region 0x%x\n",
HTCPEN_PORT_INIT);
goto request_region2_failed;
}
if (!request_region(HTCPEN_PORT_INDEX, 2, "htcpen")) {
printk(KERN_ERR "htcpen: unable to get IO region 0x%x\n",
HTCPEN_PORT_INDEX);
goto request_region3_failed;
}
htcpen_dev = input_allocate_device();
if (!htcpen_dev) {
printk(KERN_ERR "htcpen: can't allocate device\n");
err = -ENOMEM;
goto input_alloc_failed;
}
htcpen_dev->name = "HTC Shift EC TouchScreen";
htcpen_dev->id.bustype = BUS_ISA;
htcpen_dev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY);
htcpen_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_abs_params(htcpen_dev, ABS_X, 0, X_AXIS_MAX, 0, 0);
input_set_abs_params(htcpen_dev, ABS_Y, 0, Y_AXIS_MAX, 0, 0);
htcpen_dev->open = htcpen_open;
htcpen_dev->close = htcpen_close;
err = request_irq(HTCPEN_IRQ, htcpen_interrupt, 0, "htcpen",
htcpen_dev);
if (err) {
printk(KERN_ERR "htcpen: irq busy\n");
goto request_irq_failed;
}
inb_p(HTCPEN_PORT_IRQ_CLEAR);
err = input_register_device(htcpen_dev);
if (err)
goto input_register_failed;
dev_set_drvdata(dev, htcpen_dev);
return 0;
input_register_failed:
free_irq(HTCPEN_IRQ, htcpen_dev);
request_irq_failed:
input_free_device(htcpen_dev);
input_alloc_failed:
release_region(HTCPEN_PORT_INDEX, 2);
request_region3_failed:
release_region(HTCPEN_PORT_INIT, 1);
request_region2_failed:
release_region(HTCPEN_PORT_IRQ_CLEAR, 1);
request_region1_failed:
return err;
}
static int htcpen_isa_remove(struct device *dev, unsigned int id)
{
struct input_dev *htcpen_dev = dev_get_drvdata(dev);
input_unregister_device(htcpen_dev);
free_irq(HTCPEN_IRQ, htcpen_dev);
release_region(HTCPEN_PORT_INDEX, 2);
release_region(HTCPEN_PORT_INIT, 1);
release_region(HTCPEN_PORT_IRQ_CLEAR, 1);
return 0;
}
#ifdef CONFIG_PM
static int htcpen_isa_suspend(struct device *dev, unsigned int n,
pm_message_t state)
{
outb_p(DEVICE_DISABLE, HTCPEN_PORT_INIT);
return 0;
}
static int htcpen_isa_resume(struct device *dev, unsigned int n)
{
outb_p(DEVICE_ENABLE, HTCPEN_PORT_INIT);
return 0;
}
#endif
static struct isa_driver htcpen_isa_driver = {
.probe = htcpen_isa_probe,
.remove = htcpen_isa_remove,
#ifdef CONFIG_PM
.suspend = htcpen_isa_suspend,
.resume = htcpen_isa_resume,
#endif
.driver = {
.owner = THIS_MODULE,
.name = "htcpen",
}
};
static struct dmi_system_id htcshift_dmi_table[] __initdata = {
{
.ident = "Shift",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "High Tech Computer Corp"),
DMI_MATCH(DMI_PRODUCT_NAME, "Shift"),
},
},
{ }
};
MODULE_DEVICE_TABLE(dmi, htcshift_dmi_table);
static int __init htcpen_isa_init(void)
{
if (!dmi_check_system(htcshift_dmi_table))
return -ENODEV;
return isa_register_driver(&htcpen_isa_driver, 1);
}
static void __exit htcpen_isa_exit(void)
{
isa_unregister_driver(&htcpen_isa_driver);
}
module_init(htcpen_isa_init);
module_exit(htcpen_isa_exit);
| gpl-2.0 |
Haxynox/kernel_samsung_n7100 | drivers/staging/serqt_usb2/serqt_usb2.c | 2185 | 42375 | /*
* This code was developed for the Quatech USB line for linux, it used
* much of the code developed by Greg Kroah-Hartman for USB serial devices
*
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
static int debug;
/* Version Information */
#define DRIVER_VERSION "v2.14"
#define DRIVER_AUTHOR "Tim Gobeli, Quatech, Inc"
#define DRIVER_DESC "Quatech USB to Serial Driver"
#define USB_VENDOR_ID_QUATECH 0x061d /* Quatech VID */
#define QUATECH_SSU200 0xC030 /* SSU200 */
#define QUATECH_DSU100 0xC040 /* DSU100 */
#define QUATECH_DSU200 0xC050 /* DSU200 */
#define QUATECH_QSU100 0xC060 /* QSU100 */
#define QUATECH_QSU200 0xC070 /* QSU200 */
#define QUATECH_ESU100A 0xC080 /* ESU100A */
#define QUATECH_ESU100B 0xC081 /* ESU100B */
#define QUATECH_ESU200A 0xC0A0 /* ESU200A */
#define QUATECH_ESU200B 0xC0A1 /* ESU200B */
#define QUATECH_HSU100A 0xC090 /* HSU100A */
#define QUATECH_HSU100B 0xC091 /* HSU100B */
#define QUATECH_HSU100C 0xC092 /* HSU100C */
#define QUATECH_HSU100D 0xC093 /* HSU100D */
#define QUATECH_HSU200A 0xC0B0 /* HSU200A */
#define QUATECH_HSU200B 0xC0B1 /* HSU200B */
#define QUATECH_HSU200C 0xC0B2 /* HSU200C */
#define QUATECH_HSU200D 0xC0B3 /* HSU200D */
#define QUATECH_SSU100_2 0xC120 /* SSU100_2 */
#define QUATECH_DSU100_2 0xC140 /* DSU100_2 */
#define QUATECH_DSU400_2 0xC150 /* DSU400_2 */
#define QUATECH_QSU100_2 0xC160 /* QSU100_2 */
#define QUATECH_QSU400_2 0xC170 /* QSU400_2 */
#define QUATECH_ESU400_2 0xC180 /* ESU400_2 */
#define QUATECH_ESU100_2 0xC1A0 /* ESU100_2 */
#define QT_SET_GET_DEVICE 0xc2
#define QT_OPEN_CLOSE_CHANNEL 0xca
#define QT_GET_SET_PREBUF_TRIG_LVL 0xcc
#define QT_SET_ATF 0xcd
#define QT_GET_SET_REGISTER 0xc0
#define QT_GET_SET_UART 0xc1
#define QT_HW_FLOW_CONTROL_MASK 0xc5
#define QT_SW_FLOW_CONTROL_MASK 0xc6
#define QT_SW_FLOW_CONTROL_DISABLE 0xc7
#define QT_BREAK_CONTROL 0xc8
#define USBD_TRANSFER_DIRECTION_IN 0xc0
#define USBD_TRANSFER_DIRECTION_OUT 0x40
#define MAX_BAUD_RATE 460800
#define MAX_BAUD_REMAINDER 4608
#define DIV_LATCH_LS 0x00
#define XMT_HOLD_REGISTER 0x00
#define XVR_BUFFER_REGISTER 0x00
#define DIV_LATCH_MS 0x01
#define FIFO_CONTROL_REGISTER 0x02
#define LINE_CONTROL_REGISTER 0x03
#define MODEM_CONTROL_REGISTER 0x04
#define LINE_STATUS_REGISTER 0x05
#define MODEM_STATUS_REGISTER 0x06
#define SERIAL_MCR_DTR 0x01
#define SERIAL_MCR_RTS 0x02
#define SERIAL_MCR_LOOP 0x10
#define SERIAL_MSR_CTS 0x10
#define SERIAL_MSR_CD 0x80
#define SERIAL_MSR_RI 0x40
#define SERIAL_MSR_DSR 0x20
#define SERIAL_MSR_MASK 0xf0
#define SERIAL_8_DATA 0x03
#define SERIAL_7_DATA 0x02
#define SERIAL_6_DATA 0x01
#define SERIAL_5_DATA 0x00
#define SERIAL_ODD_PARITY 0X08
#define SERIAL_EVEN_PARITY 0X18
#define SERIAL_TWO_STOPB 0x04
#define SERIAL_ONE_STOPB 0x00
#define DEFAULT_DIVISOR 0x30 /* gives 9600 baud rate */
#define DEFAULT_LCR SERIAL_8_DATA /* 8, none , 1 */
#define FULLPWRBIT 0x00000080
#define NEXT_BOARD_POWER_BIT 0x00000004
#define SERIAL_LSR_OE 0x02
#define SERIAL_LSR_PE 0x04
#define SERIAL_LSR_FE 0x08
#define SERIAL_LSR_BI 0x10
#define SERIAL_MSR_CTS 0x10
#define SERIAL_MSR_CD 0x80
#define SERIAL_MSR_RI 0x40
#define SERIAL_MSR_DSR 0x20
#define SERIAL_MSR_MASK 0xf0
#define PREFUFF_LEVEL_CONSERVATIVE 128
#define ATC_DISABLED 0x0
#define RR_BITS 0x03 /* for clearing clock bits */
#define DUPMODE_BITS 0xc0
#define CLKS_X4 0x02
#define LOOPMODE_BITS 0x41 /* LOOP1 = b6, LOOP0 = b0 (PORT B) */
#define ALL_LOOPBACK 0x01
#define MODEM_CTRL 0x40
#define RS232_MODE 0x00
static const struct usb_device_id serqt_id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_SSU200)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU100)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU200)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_QSU100)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_QSU200)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU100A)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU100B)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU200A)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU200B)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_HSU100A)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_HSU100B)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_HSU100C)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_HSU100D)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_HSU200A)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_HSU200B)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_HSU200C)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_HSU200D)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_SSU100_2)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU100_2)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU400_2)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_QSU100_2)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_QSU400_2)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU400_2)},
{USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU100_2)},
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, serqt_id_table);
struct qt_get_device_data {
__u8 porta;
__u8 portb;
__u8 portc;
};
struct qt_open_channel_data {
__u8 line_status;
__u8 modem_status;
};
struct quatech_port {
int port_num; /* number of the port */
struct urb *write_urb; /* write URB for this port */
struct urb *read_urb; /* read URB for this port */
struct urb *int_urb;
__u8 shadowLCR; /* last LCR value received */
__u8 shadowMCR; /* last MCR value received */
__u8 shadowMSR; /* last MSR value received */
__u8 shadowLSR; /* last LSR value received */
char open_ports;
/* Used for TIOCMIWAIT */
wait_queue_head_t msr_wait;
char prev_status, diff_status;
wait_queue_head_t wait;
struct async_icount icount;
struct usb_serial_port *port; /* owner of this object */
struct qt_get_device_data DeviceData;
spinlock_t lock;
bool read_urb_busy;
int RxHolding;
int ReadBulkStopped;
char closePending;
};
static struct usb_driver serqt_usb_driver = {
.name = "quatech-usb-serial",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = serqt_id_table,
.no_dynamic_id = 1,
};
static int port_paranoia_check(struct usb_serial_port *port,
const char *function)
{
if (!port) {
dbg("%s - port == NULL", function);
return -1;
}
if (!port->serial) {
dbg("%s - port->serial == NULL\n", function);
return -1;
}
return 0;
}
static int serial_paranoia_check(struct usb_serial *serial,
const char *function)
{
if (!serial) {
dbg("%s - serial == NULL\n", function);
return -1;
}
if (!serial->type) {
dbg("%s - serial->type == NULL!", function);
return -1;
}
return 0;
}
static inline struct quatech_port *qt_get_port_private(struct usb_serial_port
*port)
{
return (struct quatech_port *)usb_get_serial_port_data(port);
}
static inline void qt_set_port_private(struct usb_serial_port *port,
struct quatech_port *data)
{
usb_set_serial_port_data(port, (void *)data);
}
static struct usb_serial *get_usb_serial(struct usb_serial_port *port,
const char *function)
{
/* if no port was specified, or it fails a paranoia check */
if (!port ||
port_paranoia_check(port, function) ||
serial_paranoia_check(port->serial, function)) {
/*
* then say that we dont have a valid usb_serial thing,
* which will end up genrating -ENODEV return values
*/
return NULL;
}
return port->serial;
}
static void ProcessLineStatus(struct quatech_port *qt_port,
unsigned char line_status)
{
qt_port->shadowLSR =
line_status & (SERIAL_LSR_OE | SERIAL_LSR_PE | SERIAL_LSR_FE |
SERIAL_LSR_BI);
return;
}
static void ProcessModemStatus(struct quatech_port *qt_port,
unsigned char modem_status)
{
qt_port->shadowMSR = modem_status;
wake_up_interruptible(&qt_port->wait);
return;
}
static void ProcessRxChar(struct tty_struct *tty, struct usb_serial_port *port,
unsigned char data)
{
struct urb *urb = port->read_urb;
if (urb->actual_length)
tty_insert_flip_char(tty, data, TTY_NORMAL);
}
static void qt_write_bulk_callback(struct urb *urb)
{
struct tty_struct *tty;
int status;
struct quatech_port *quatech_port;
status = urb->status;
if (status) {
dbg("nonzero write bulk status received:%d\n", status);
return;
}
quatech_port = urb->context;
dbg("%s - port %d\n", __func__, quatech_port->port_num);
tty = tty_port_tty_get(&quatech_port->port->port);
if (tty)
tty_wakeup(tty);
tty_kref_put(tty);
}
static void qt_interrupt_callback(struct urb *urb)
{
/* FIXME */
}
static void qt_read_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port = qt_get_port_private(port);
unsigned char *data;
struct tty_struct *tty;
unsigned int index;
unsigned int RxCount;
int i, result;
int flag, flag_data;
if (urb->status) {
qt_port->ReadBulkStopped = 1;
dbg("%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
return;
}
tty = tty_port_tty_get(&port->port);
if (!tty) {
dbg("%s - bad tty pointer - exiting", __func__);
return;
}
data = urb->transfer_buffer;
RxCount = urb->actual_length;
/* index = MINOR(port->tty->device) - serial->minor; */
index = tty->index - serial->minor;
dbg("%s - port %d\n", __func__, port->number);
dbg("%s - port->RxHolding = %d\n", __func__, qt_port->RxHolding);
if (port_paranoia_check(port, __func__) != 0) {
dbg("%s - port_paranoia_check, exiting\n", __func__);
qt_port->ReadBulkStopped = 1;
goto exit;
}
if (!serial) {
dbg("%s - bad serial pointer, exiting\n", __func__);
goto exit;
}
if (qt_port->closePending == 1) {
/* Were closing , stop reading */
dbg("%s - (qt_port->closepending == 1\n", __func__);
qt_port->ReadBulkStopped = 1;
goto exit;
}
/*
* RxHolding is asserted by throttle, if we assert it, we're not
* receiving any more characters and let the box handle the flow
* control
*/
if (qt_port->RxHolding == 1) {
qt_port->ReadBulkStopped = 1;
goto exit;
}
if (urb->status) {
qt_port->ReadBulkStopped = 1;
dbg("%s - nonzero read bulk status received: %d\n",
__func__, urb->status);
goto exit;
}
if (tty && RxCount) {
flag_data = 0;
for (i = 0; i < RxCount; ++i) {
/* Look ahead code here */
if ((i <= (RxCount - 3)) && (data[i] == 0x1b)
&& (data[i + 1] == 0x1b)) {
flag = 0;
switch (data[i + 2]) {
case 0x00:
/* line status change 4th byte must follow */
if (i > (RxCount - 4)) {
dbg("Illegal escape seuences in received data\n");
break;
}
ProcessLineStatus(qt_port, data[i + 3]);
i += 3;
flag = 1;
break;
case 0x01:
/* Modem status status change 4th byte must follow */
dbg("Modem status status.\n");
if (i > (RxCount - 4)) {
dbg("Illegal escape sequences in received data\n");
break;
}
ProcessModemStatus(qt_port,
data[i + 3]);
i += 3;
flag = 1;
break;
case 0xff:
dbg("No status sequence.\n");
if (tty) {
ProcessRxChar(tty, port, data[i]);
ProcessRxChar(tty, port, data[i + 1]);
}
i += 2;
break;
}
if (flag == 1)
continue;
}
if (tty && urb->actual_length)
tty_insert_flip_char(tty, data[i], TTY_NORMAL);
}
tty_flip_buffer_push(tty);
}
/* Continue trying to always read */
usb_fill_bulk_urb(port->read_urb, serial->dev,
usb_rcvbulkpipe(serial->dev,
port->bulk_in_endpointAddress),
port->read_urb->transfer_buffer,
port->read_urb->transfer_buffer_length,
qt_read_bulk_callback, port);
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result)
dbg("%s - failed resubmitting read urb, error %d",
__func__, result);
else {
if (tty && RxCount) {
tty_flip_buffer_push(tty);
tty_schedule_flip(tty);
}
}
schedule_work(&port->work);
exit:
tty_kref_put(tty);
}
/*
* qt_get_device
* Issue a GET_DEVICE vendor-specific request on the default control pipe If
* successful, fills in the qt_get_device_data structure pointed to by
* device_data, otherwise return a negative error number of the problem.
*/
static int qt_get_device(struct usb_serial *serial,
struct qt_get_device_data *device_data)
{
int result;
unsigned char *transfer_buffer;
transfer_buffer =
kmalloc(sizeof(struct qt_get_device_data), GFP_KERNEL);
if (!transfer_buffer)
return -ENOMEM;
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
QT_SET_GET_DEVICE, 0xc0, 0, 0,
transfer_buffer,
sizeof(struct qt_get_device_data), 300);
if (result > 0)
memcpy(device_data, transfer_buffer,
sizeof(struct qt_get_device_data));
kfree(transfer_buffer);
return result;
}
/****************************************************************************
* BoxSetPrebufferLevel
TELLS BOX WHEN TO ASSERT FLOW CONTROL
****************************************************************************/
static int BoxSetPrebufferLevel(struct usb_serial *serial)
{
int result;
__u16 buffer_length;
buffer_length = PREFUFF_LEVEL_CONSERVATIVE;
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_GET_SET_PREBUF_TRIG_LVL, 0x40,
buffer_length, 0, NULL, 0, 300);
return result;
}
/****************************************************************************
* BoxSetATC
TELLS BOX WHEN TO ASSERT automatic transmitter control
****************************************************************************/
static int BoxSetATC(struct usb_serial *serial, __u16 n_Mode)
{
int result;
__u16 buffer_length;
buffer_length = PREFUFF_LEVEL_CONSERVATIVE;
result =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_SET_ATF, 0x40, n_Mode, 0, NULL, 0, 300);
return result;
}
/**
* qt_set_device
* Issue a SET_DEVICE vendor-specific request on the default control pipe If
* successful returns the number of bytes written, otherwise it returns a
* negative error number of the problem.
*/
static int qt_set_device(struct usb_serial *serial,
struct qt_get_device_data *device_data)
{
int result;
__u16 length;
__u16 PortSettings;
PortSettings = ((__u16) (device_data->portb));
PortSettings = (PortSettings << 8);
PortSettings += ((__u16) (device_data->porta));
length = sizeof(struct qt_get_device_data);
dbg("%s - PortSettings = 0x%x\n", __func__, PortSettings);
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_SET_GET_DEVICE, 0x40, PortSettings,
0, NULL, 0, 300);
return result;
}
static int qt_open_channel(struct usb_serial *serial, __u16 Uart_Number,
struct qt_open_channel_data *pDeviceData)
{
int result;
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
QT_OPEN_CLOSE_CHANNEL,
USBD_TRANSFER_DIRECTION_IN, 1, Uart_Number,
pDeviceData,
sizeof(struct qt_open_channel_data), 300);
return result;
}
static int qt_close_channel(struct usb_serial *serial, __u16 Uart_Number)
{
int result;
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
QT_OPEN_CLOSE_CHANNEL,
USBD_TRANSFER_DIRECTION_OUT, 0, Uart_Number,
NULL, 0, 300);
return result;
}
/****************************************************************************
* BoxGetRegister
* issuse a GET_REGISTER vendor-spcific request on the default control pipe
* If successful, fills in the pValue with the register value asked for
****************************************************************************/
static int BoxGetRegister(struct usb_serial *serial, unsigned short Uart_Number,
unsigned short Register_Num, __u8 *pValue)
{
int result;
__u16 current_length;
current_length = sizeof(struct qt_get_device_data);
result =
usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
QT_GET_SET_REGISTER, 0xC0, Register_Num,
Uart_Number, (void *)pValue, sizeof(*pValue), 300);
return result;
}
/****************************************************************************
* BoxSetRegister
* issuse a GET_REGISTER vendor-spcific request on the default control pipe
* If successful, fills in the pValue with the register value asked for
****************************************************************************/
static int BoxSetRegister(struct usb_serial *serial, unsigned short Uart_Number,
unsigned short Register_Num, unsigned short Value)
{
int result;
unsigned short RegAndByte;
RegAndByte = Value;
RegAndByte = RegAndByte << 8;
RegAndByte = RegAndByte + Register_Num;
/*
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_GET_SET_REGISTER, 0xC0, Register_Num,
Uart_Number, NULL, 0, 300);
*/
result =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_GET_SET_REGISTER, 0x40, RegAndByte, Uart_Number,
NULL, 0, 300);
return result;
}
/*
* qt_setuart
* issuse a SET_UART vendor-spcific request on the default control pipe
* If successful sets baud rate divisor and LCR value
*/
static int qt_setuart(struct usb_serial *serial, unsigned short Uart_Number,
unsigned short default_divisor, unsigned char default_LCR)
{
int result;
unsigned short UartNumandLCR;
UartNumandLCR = (default_LCR << 8) + Uart_Number;
result =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_GET_SET_UART, 0x40, default_divisor,
UartNumandLCR, NULL, 0, 300);
return result;
}
static int BoxSetHW_FlowCtrl(struct usb_serial *serial, unsigned int index,
int bSet)
{
__u8 mcr = 0;
__u8 msr = 0, MOUT_Value = 0;
unsigned int status;
if (bSet == 1) {
/* flow control, box will clear RTS line to prevent remote */
mcr = SERIAL_MCR_RTS;
} /* device from xmitting more chars */
else {
/* no flow control to remote device */
mcr = 0;
}
MOUT_Value = mcr << 8;
if (bSet == 1) {
/* flow control, box will inhibit xmit data if CTS line is
* asserted */
msr = SERIAL_MSR_CTS;
} else {
/* Box will not inhimbe xmit data due to CTS line */
msr = 0;
}
MOUT_Value |= msr;
status =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_HW_FLOW_CONTROL_MASK, 0x40, MOUT_Value,
index, NULL, 0, 300);
return status;
}
static int BoxSetSW_FlowCtrl(struct usb_serial *serial, __u16 index,
unsigned char stop_char, unsigned char start_char)
{
__u16 nSWflowout;
int result;
nSWflowout = start_char << 8;
nSWflowout = (unsigned short)stop_char;
result =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_SW_FLOW_CONTROL_MASK, 0x40, nSWflowout,
index, NULL, 0, 300);
return result;
}
static int BoxDisable_SW_FlowCtrl(struct usb_serial *serial, __u16 index)
{
int result;
result =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_SW_FLOW_CONTROL_DISABLE, 0x40, 0, index,
NULL, 0, 300);
return result;
}
static int qt_startup(struct usb_serial *serial)
{
struct usb_serial_port *port;
struct quatech_port *qt_port;
struct qt_get_device_data DeviceData;
int i;
int status;
dbg("enterting %s", __func__);
/* Now setup per port private data */
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
qt_port = kzalloc(sizeof(*qt_port), GFP_KERNEL);
if (!qt_port) {
dbg("%s: kmalloc for quatech_port (%d) failed!.",
__func__, i);
for (--i; i >= 0; i--) {
port = serial->port[i];
kfree(usb_get_serial_port_data(port));
usb_set_serial_port_data(port, NULL);
}
return -ENOMEM;
}
spin_lock_init(&qt_port->lock);
usb_set_serial_port_data(port, qt_port);
}
status = qt_get_device(serial, &DeviceData);
if (status < 0) {
dbg(__FILE__ "box_get_device failed");
goto startup_error;
}
dbg(__FILE__ "DeviceData.portb = 0x%x", DeviceData.portb);
DeviceData.portb &= ~FULLPWRBIT;
dbg(__FILE__ "Changing DeviceData.portb to 0x%x", DeviceData.portb);
status = qt_set_device(serial, &DeviceData);
if (status < 0) {
dbg(__FILE__ "qt_set_device failed\n");
goto startup_error;
}
status = qt_get_device(serial, &DeviceData);
if (status < 0) {
dbg(__FILE__ "qt_get_device failed");
goto startup_error;
}
switch (serial->dev->descriptor.idProduct) {
case QUATECH_DSU100:
case QUATECH_QSU100:
case QUATECH_ESU100A:
case QUATECH_ESU100B:
case QUATECH_HSU100A:
case QUATECH_HSU100B:
case QUATECH_HSU100C:
case QUATECH_HSU100D:
DeviceData.porta &= ~(RR_BITS | DUPMODE_BITS);
DeviceData.porta |= CLKS_X4;
DeviceData.portb &= ~(LOOPMODE_BITS);
DeviceData.portb |= RS232_MODE;
break;
case QUATECH_SSU200:
case QUATECH_DSU200:
case QUATECH_QSU200:
case QUATECH_ESU200A:
case QUATECH_ESU200B:
case QUATECH_HSU200A:
case QUATECH_HSU200B:
case QUATECH_HSU200C:
case QUATECH_HSU200D:
DeviceData.porta &= ~(RR_BITS | DUPMODE_BITS);
DeviceData.porta |= CLKS_X4;
DeviceData.portb &= ~(LOOPMODE_BITS);
DeviceData.portb |= ALL_LOOPBACK;
break;
default:
DeviceData.porta &= ~(RR_BITS | DUPMODE_BITS);
DeviceData.porta |= CLKS_X4;
DeviceData.portb &= ~(LOOPMODE_BITS);
DeviceData.portb |= RS232_MODE;
break;
}
status = BoxSetPrebufferLevel(serial); /* sets to default value */
if (status < 0) {
dbg(__FILE__ "BoxSetPrebufferLevel failed\n");
goto startup_error;
}
status = BoxSetATC(serial, ATC_DISABLED);
if (status < 0) {
dbg(__FILE__ "BoxSetATC failed\n");
goto startup_error;
}
dbg(__FILE__ "DeviceData.portb = 0x%x", DeviceData.portb);
DeviceData.portb |= NEXT_BOARD_POWER_BIT;
dbg(__FILE__ "Changing DeviceData.portb to 0x%x", DeviceData.portb);
status = qt_set_device(serial, &DeviceData);
if (status < 0) {
dbg(__FILE__ "qt_set_device failed\n");
goto startup_error;
}
dbg("Exit Success %s\n", __func__);
return 0;
startup_error:
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
qt_port = qt_get_port_private(port);
kfree(qt_port);
usb_set_serial_port_data(port, NULL);
}
dbg("Exit fail %s\n", __func__);
return -EIO;
}
static void qt_release(struct usb_serial *serial)
{
struct usb_serial_port *port;
struct quatech_port *qt_port;
int i;
dbg("enterting %s", __func__);
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
if (!port)
continue;
qt_port = usb_get_serial_port_data(port);
kfree(qt_port);
usb_set_serial_port_data(port, NULL);
}
}
static int qt_open(struct tty_struct *tty,
struct usb_serial_port *port)
{
struct usb_serial *serial;
struct quatech_port *quatech_port;
struct quatech_port *port0;
struct qt_open_channel_data ChannelData;
int result;
if (port_paranoia_check(port, __func__))
return -ENODEV;
dbg("%s - port %d\n", __func__, port->number);
serial = port->serial;
if (serial_paranoia_check(serial, __func__))
return -ENODEV;
quatech_port = qt_get_port_private(port);
port0 = qt_get_port_private(serial->port[0]);
if (quatech_port == NULL || port0 == NULL)
return -ENODEV;
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
port0->open_ports++;
result = qt_get_device(serial, &port0->DeviceData);
/* Port specific setups */
result = qt_open_channel(serial, port->number, &ChannelData);
if (result < 0) {
dbg(__FILE__ "qt_open_channel failed\n");
return result;
}
dbg(__FILE__ "qt_open_channel completed.\n");
/* FIXME: are these needed? Does it even do anything useful? */
quatech_port->shadowLSR = ChannelData.line_status &
(SERIAL_LSR_OE | SERIAL_LSR_PE | SERIAL_LSR_FE | SERIAL_LSR_BI);
quatech_port->shadowMSR = ChannelData.modem_status &
(SERIAL_MSR_CTS | SERIAL_MSR_DSR | SERIAL_MSR_RI | SERIAL_MSR_CD);
/* Set Baud rate to default and turn off (default)flow control here */
result = qt_setuart(serial, port->number, DEFAULT_DIVISOR, DEFAULT_LCR);
if (result < 0) {
dbg(__FILE__ "qt_setuart failed\n");
return result;
}
dbg(__FILE__ "qt_setuart completed.\n");
/*
* Put this here to make it responsive to stty and defaults set by
* the tty layer
*/
/* FIXME: is this needed? */
/* qt_set_termios(tty, port, NULL); */
/* Check to see if we've set up our endpoint info yet */
if (port0->open_ports == 1) {
if (serial->port[0]->interrupt_in_buffer == NULL) {
/* set up interrupt urb */
usb_fill_int_urb(serial->port[0]->interrupt_in_urb,
serial->dev,
usb_rcvintpipe(serial->dev,
serial->port[0]->interrupt_in_endpointAddress),
serial->port[0]->interrupt_in_buffer,
serial->port[0]->
interrupt_in_urb->transfer_buffer_length,
qt_interrupt_callback, serial,
serial->port[0]->
interrupt_in_urb->interval);
result =
usb_submit_urb(serial->port[0]->interrupt_in_urb,
GFP_KERNEL);
if (result) {
dev_err(&port->dev,
"%s - Error %d submitting "
"interrupt urb\n", __func__, result);
}
}
}
dbg("port number is %d\n", port->number);
dbg("serial number is %d\n", port->serial->minor);
dbg("Bulkin endpoint is %d\n", port->bulk_in_endpointAddress);
dbg("BulkOut endpoint is %d\n", port->bulk_out_endpointAddress);
dbg("Interrupt endpoint is %d\n", port->interrupt_in_endpointAddress);
dbg("port's number in the device is %d\n", quatech_port->port_num);
quatech_port->read_urb = port->read_urb;
/* set up our bulk in urb */
usb_fill_bulk_urb(quatech_port->read_urb,
serial->dev,
usb_rcvbulkpipe(serial->dev,
port->bulk_in_endpointAddress),
port->bulk_in_buffer,
quatech_port->read_urb->transfer_buffer_length,
qt_read_bulk_callback, quatech_port);
dbg("qt_open: bulkin endpoint is %d\n", port->bulk_in_endpointAddress);
quatech_port->read_urb_busy = true;
result = usb_submit_urb(quatech_port->read_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev,
"%s - Error %d submitting control urb\n",
__func__, result);
quatech_port->read_urb_busy = false;
}
/* initialize our wait queues */
init_waitqueue_head(&quatech_port->wait);
init_waitqueue_head(&quatech_port->msr_wait);
/* initialize our icount structure */
memset(&(quatech_port->icount), 0x00, sizeof(quatech_port->icount));
return 0;
}
static int qt_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial;
int chars = 0;
serial = get_usb_serial(port, __func__);
dbg("%s - port %d\n", __func__, port->number);
if (serial->num_bulk_out) {
if (port->write_urb->status == -EINPROGRESS)
chars = port->write_urb->transfer_buffer_length;
}
dbg("%s - returns %d\n", __func__, chars);
return chars;
}
static void qt_block_until_empty(struct tty_struct *tty,
struct quatech_port *qt_port)
{
int timeout = HZ / 10;
int wait = 30;
int count;
while (1) {
count = qt_chars_in_buffer(tty);
if (count <= 0)
return;
interruptible_sleep_on_timeout(&qt_port->wait, timeout);
wait--;
if (wait == 0) {
dbg("%s - TIMEOUT", __func__);
return;
} else {
wait = 30;
}
}
}
static void qt_close(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct quatech_port *qt_port;
struct quatech_port *port0;
struct tty_struct *tty;
int status;
unsigned int index;
status = 0;
dbg("%s - port %d\n", __func__, port->number);
tty = tty_port_tty_get(&port->port);
index = tty->index - serial->minor;
qt_port = qt_get_port_private(port);
port0 = qt_get_port_private(serial->port[0]);
/* shutdown any bulk reads that might be going on */
if (serial->num_bulk_out)
usb_unlink_urb(port->write_urb);
if (serial->num_bulk_in)
usb_unlink_urb(port->read_urb);
/* wait up to for transmitter to empty */
if (serial->dev)
qt_block_until_empty(tty, qt_port);
tty_kref_put(tty);
/* Close uart channel */
status = qt_close_channel(serial, index);
if (status < 0)
dbg("%s - port %d qt_close_channel failed.\n",
__func__, port->number);
port0->open_ports--;
dbg("qt_num_open_ports in close%d:in port%d\n",
port0->open_ports, port->number);
if (port0->open_ports == 0) {
if (serial->port[0]->interrupt_in_urb) {
dbg("%s", "Shutdown interrupt_in_urb\n");
usb_kill_urb(serial->port[0]->interrupt_in_urb);
}
}
if (qt_port->write_urb) {
/* if this urb had a transfer buffer already (old tx) free it */
kfree(qt_port->write_urb->transfer_buffer);
usb_free_urb(qt_port->write_urb);
}
}
static int qt_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
int result;
struct usb_serial *serial = get_usb_serial(port, __func__);
if (serial == NULL)
return -ENODEV;
dbg("%s - port %d\n", __func__, port->number);
if (count == 0) {
dbg("%s - write request of 0 bytes\n", __func__);
return 0;
}
/* only do something if we have a bulk out endpoint */
if (serial->num_bulk_out) {
if (port->write_urb->status == -EINPROGRESS) {
dbg("%s - already writing\n", __func__);
return 0;
}
count =
(count > port->bulk_out_size) ? port->bulk_out_size : count;
memcpy(port->write_urb->transfer_buffer, buf, count);
/* set up our urb */
usb_fill_bulk_urb(port->write_urb, serial->dev,
usb_sndbulkpipe(serial->dev,
port->
bulk_out_endpointAddress),
port->write_urb->transfer_buffer, count,
qt_write_bulk_callback, port);
/* send the data out the bulk port */
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result)
dbg("%s - failed submitting write urb, error %d\n",
__func__, result);
else
result = count;
return result;
}
/* no bulk out, so return 0 bytes written */
return 0;
}
static int qt_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial;
struct quatech_port *qt_port;
unsigned long flags;
int retval = -EINVAL;
if (port_paranoia_check(port, __func__)) {
dbg("%s", "Invalid port\n");
return -1;
}
serial = get_usb_serial(port, __func__);
if (!serial)
return -ENODEV;
qt_port = qt_get_port_private(port);
spin_lock_irqsave(&qt_port->lock, flags);
dbg("%s - port %d\n", __func__, port->number);
if (serial->num_bulk_out) {
if (port->write_urb->status != -EINPROGRESS)
retval = port->bulk_out_size;
}
spin_unlock_irqrestore(&qt_port->lock, flags);
return retval;
}
static int qt_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct quatech_port *qt_port = qt_get_port_private(port);
struct usb_serial *serial = get_usb_serial(port, __func__);
unsigned int index;
dbg("%s cmd 0x%04x", __func__, cmd);
index = tty->index - serial->minor;
if (cmd == TIOCMIWAIT) {
while (qt_port != NULL) {
interruptible_sleep_on(&qt_port->msr_wait);
if (signal_pending(current))
return -ERESTARTSYS;
else {
char diff = qt_port->diff_status;
if (diff == 0)
return -EIO; /* no change => error */
/* Consume all events */
qt_port->diff_status = 0;
if (((arg & TIOCM_RNG)
&& (diff & SERIAL_MSR_RI))
|| ((arg & TIOCM_DSR)
&& (diff & SERIAL_MSR_DSR))
|| ((arg & TIOCM_CD)
&& (diff & SERIAL_MSR_CD))
|| ((arg & TIOCM_CTS)
&& (diff & SERIAL_MSR_CTS))) {
return 0;
}
}
}
return 0;
}
dbg("%s -No ioctl for that one. port = %d\n", __func__, port->number);
return -ENOIOCTLCMD;
}
static void qt_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
struct ktermios *old_termios)
{
struct ktermios *termios = tty->termios;
unsigned char new_LCR = 0;
unsigned int cflag = termios->c_cflag;
unsigned int index;
int baud, divisor, remainder;
int status;
dbg("%s", __func__);
index = tty->index - port->serial->minor;
switch (cflag) {
case CS5:
new_LCR |= SERIAL_5_DATA;
break;
case CS6:
new_LCR |= SERIAL_6_DATA;
break;
case CS7:
new_LCR |= SERIAL_7_DATA;
break;
default:
case CS8:
new_LCR |= SERIAL_8_DATA;
break;
}
/* Parity stuff */
if (cflag & PARENB) {
if (cflag & PARODD)
new_LCR |= SERIAL_ODD_PARITY;
else
new_LCR |= SERIAL_EVEN_PARITY;
}
if (cflag & CSTOPB)
new_LCR |= SERIAL_TWO_STOPB;
else
new_LCR |= SERIAL_ONE_STOPB;
dbg("%s - 4\n", __func__);
/* Thats the LCR stuff, go ahead and set it */
baud = tty_get_baud_rate(tty);
if (!baud)
/* pick a default, any default... */
baud = 9600;
dbg("%s - got baud = %d\n", __func__, baud);
divisor = MAX_BAUD_RATE / baud;
remainder = MAX_BAUD_RATE % baud;
/* Round to nearest divisor */
if (((remainder * 2) >= baud) && (baud != 110))
divisor++;
/*
* Set Baud rate to default and turn off (default)flow control here
*/
status =
qt_setuart(port->serial, index, (unsigned short)divisor, new_LCR);
if (status < 0) {
dbg(__FILE__ "qt_setuart failed\n");
return;
}
/* Now determine flow control */
if (cflag & CRTSCTS) {
dbg("%s - Enabling HW flow control port %d\n", __func__,
port->number);
/* Enable RTS/CTS flow control */
status = BoxSetHW_FlowCtrl(port->serial, index, 1);
if (status < 0) {
dbg(__FILE__ "BoxSetHW_FlowCtrl failed\n");
return;
}
} else {
/* Disable RTS/CTS flow control */
dbg("%s - disabling HW flow control port %d\n", __func__,
port->number);
status = BoxSetHW_FlowCtrl(port->serial, index, 0);
if (status < 0) {
dbg(__FILE__ "BoxSetHW_FlowCtrl failed\n");
return;
}
}
/* if we are implementing XON/XOFF, set the start and stop character in
* the device */
if (I_IXOFF(tty) || I_IXON(tty)) {
unsigned char stop_char = STOP_CHAR(tty);
unsigned char start_char = START_CHAR(tty);
status =
BoxSetSW_FlowCtrl(port->serial, index, stop_char,
start_char);
if (status < 0)
dbg(__FILE__ "BoxSetSW_FlowCtrl (enabled) failed\n");
} else {
/* disable SW flow control */
status = BoxDisable_SW_FlowCtrl(port->serial, index);
if (status < 0)
dbg(__FILE__ "BoxSetSW_FlowCtrl (diabling) failed\n");
}
tty->termios->c_cflag &= ~CMSPAR;
/* FIXME: Error cases should be returning the actual bits changed only */
}
static void qt_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port;
u16 index, onoff;
unsigned int result;
unsigned long flags;
index = tty->index - serial->minor;
qt_port = qt_get_port_private(port);
if (break_state == -1)
onoff = 1;
else
onoff = 0;
spin_lock_irqsave(&qt_port->lock, flags);
dbg("%s - port %d\n", __func__, port->number);
result =
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
QT_BREAK_CONTROL, 0x40, onoff, index, NULL, 0, 300);
spin_unlock_irqrestore(&qt_port->lock, flags);
}
static inline int qt_real_tiocmget(struct tty_struct *tty,
struct usb_serial_port *port,
struct usb_serial *serial)
{
u8 mcr;
u8 msr;
unsigned int result = 0;
int status;
unsigned int index;
dbg("%s - port %d, tty =0x%p\n", __func__, port->number, tty);
index = tty->index - serial->minor;
status =
BoxGetRegister(port->serial, index, MODEM_CONTROL_REGISTER, &mcr);
if (status >= 0) {
status =
BoxGetRegister(port->serial, index,
MODEM_STATUS_REGISTER, &msr);
}
if (status >= 0) {
result = ((mcr & SERIAL_MCR_DTR) ? TIOCM_DTR : 0)
/* DTR IS SET */
| ((mcr & SERIAL_MCR_RTS) ? TIOCM_RTS : 0)
/* RTS IS SET */
| ((msr & SERIAL_MSR_CTS) ? TIOCM_CTS : 0)
/* CTS is set */
| ((msr & SERIAL_MSR_CD) ? TIOCM_CAR : 0)
/* Carrier detect is set */
| ((msr & SERIAL_MSR_RI) ? TIOCM_RI : 0)
/* Ring indicator set */
| ((msr & SERIAL_MSR_DSR) ? TIOCM_DSR : 0);
/* DSR is set */
return result;
} else
return -ESPIPE;
}
static inline int qt_real_tiocmset(struct tty_struct *tty,
struct usb_serial_port *port,
struct usb_serial *serial,
unsigned int value)
{
u8 mcr;
int status;
unsigned int index;
dbg("%s - port %d\n", __func__, port->number);
index = tty->index - serial->minor;
status =
BoxGetRegister(port->serial, index, MODEM_CONTROL_REGISTER, &mcr);
if (status < 0)
return -ESPIPE;
/*
* Turn off the RTS and DTR and loopbcck and then only turn on what was
* asked for
*/
mcr &= ~(SERIAL_MCR_RTS | SERIAL_MCR_DTR | SERIAL_MCR_LOOP);
if (value & TIOCM_RTS)
mcr |= SERIAL_MCR_RTS;
if (value & TIOCM_DTR)
mcr |= SERIAL_MCR_DTR;
if (value & TIOCM_LOOP)
mcr |= SERIAL_MCR_LOOP;
status =
BoxSetRegister(port->serial, index, MODEM_CONTROL_REGISTER, mcr);
if (status < 0)
return -ESPIPE;
else
return 0;
}
static int qt_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port = qt_get_port_private(port);
int retval = -ENODEV;
unsigned long flags;
dbg("In %s\n", __func__);
if (!serial)
return -ENODEV;
spin_lock_irqsave(&qt_port->lock, flags);
dbg("%s - port %d\n", __func__, port->number);
dbg("%s - port->RxHolding = %d\n", __func__, qt_port->RxHolding);
retval = qt_real_tiocmget(tty, port, serial);
spin_unlock_irqrestore(&qt_port->lock, flags);
return retval;
}
static int qt_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port = qt_get_port_private(port);
unsigned long flags;
int retval = -ENODEV;
dbg("In %s\n", __func__);
if (!serial)
return -ENODEV;
spin_lock_irqsave(&qt_port->lock, flags);
dbg("%s - port %d\n", __func__, port->number);
dbg("%s - qt_port->RxHolding = %d\n", __func__, qt_port->RxHolding);
retval = qt_real_tiocmset(tty, port, serial, set);
spin_unlock_irqrestore(&qt_port->lock, flags);
return retval;
}
static void qt_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port;
unsigned long flags;
dbg("%s - port %d\n", __func__, port->number);
if (!serial)
return;
qt_port = qt_get_port_private(port);
spin_lock_irqsave(&qt_port->lock, flags);
/* pass on to the driver specific version of this function */
qt_port->RxHolding = 1;
dbg("%s - port->RxHolding = 1\n", __func__);
spin_unlock_irqrestore(&qt_port->lock, flags);
return;
}
static void qt_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct usb_serial *serial = get_usb_serial(port, __func__);
struct quatech_port *qt_port;
unsigned long flags;
unsigned int result;
if (!serial)
return;
qt_port = qt_get_port_private(port);
spin_lock_irqsave(&qt_port->lock, flags);
dbg("%s - port %d\n", __func__, port->number);
if (qt_port->RxHolding == 1) {
dbg("%s -qt_port->RxHolding == 1\n", __func__);
qt_port->RxHolding = 0;
dbg("%s - qt_port->RxHolding = 0\n", __func__);
/* if we have a bulk endpoint, start it up */
if ((serial->num_bulk_in) && (qt_port->ReadBulkStopped == 1)) {
/* Start reading from the device */
usb_fill_bulk_urb(port->read_urb, serial->dev,
usb_rcvbulkpipe(serial->dev,
port->bulk_in_endpointAddress),
port->read_urb->transfer_buffer,
port->read_urb->
transfer_buffer_length,
qt_read_bulk_callback, port);
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result)
err("%s - failed restarting read urb, error %d",
__func__, result);
}
}
spin_unlock_irqrestore(&qt_port->lock, flags);
return;
}
static int qt_calc_num_ports(struct usb_serial *serial)
{
int num_ports;
dbg("numberofendpoints: %d\n",
(int)serial->interface->cur_altsetting->desc.bNumEndpoints);
dbg("numberofendpoints: %d\n",
(int)serial->interface->altsetting->desc.bNumEndpoints);
num_ports =
(serial->interface->cur_altsetting->desc.bNumEndpoints - 1) / 2;
return num_ports;
}
static struct usb_serial_driver quatech_device = {
.driver = {
.owner = THIS_MODULE,
.name = "serqt",
},
.description = DRIVER_DESC,
.usb_driver = &serqt_usb_driver,
.id_table = serqt_id_table,
.num_ports = 8,
.open = qt_open,
.close = qt_close,
.write = qt_write,
.write_room = qt_write_room,
.chars_in_buffer = qt_chars_in_buffer,
.throttle = qt_throttle,
.unthrottle = qt_unthrottle,
.calc_num_ports = qt_calc_num_ports,
.ioctl = qt_ioctl,
.set_termios = qt_set_termios,
.break_ctl = qt_break,
.tiocmget = qt_tiocmget,
.tiocmset = qt_tiocmset,
.attach = qt_startup,
.release = qt_release,
};
static int __init serqt_usb_init(void)
{
int retval;
dbg("%s\n", __func__);
/* register with usb-serial */
retval = usb_serial_register(&quatech_device);
if (retval)
goto failed_usb_serial_register;
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
DRIVER_DESC "\n");
/* register with usb */
retval = usb_register(&serqt_usb_driver);
if (retval == 0)
return 0;
/* if we're here, usb_register() failed */
usb_serial_deregister(&quatech_device);
failed_usb_serial_register:
return retval;
}
static void __exit serqt_usb_exit(void)
{
usb_deregister(&serqt_usb_driver);
usb_serial_deregister(&quatech_device);
}
module_init(serqt_usb_init);
module_exit(serqt_usb_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
| gpl-2.0 |
Raybuntu/linux | fs/hpfs/ea.c | 2953 | 11678 | /*
* linux/fs/hpfs/ea.c
*
* Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
*
* handling extended attributes
*/
#include "hpfs_fn.h"
/* Remove external extended attributes. ano specifies whether a is a
direct sector where eas starts or an anode */
void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len)
{
unsigned pos = 0;
while (pos < len) {
char ex[4 + 255 + 1 + 8];
struct extended_attribute *ea = (struct extended_attribute *)ex;
if (pos + 4 > len) {
hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x",
ano ? "anode" : "sectors", a, len);
return;
}
if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return;
if (ea_indirect(ea)) {
if (ea_valuelen(ea) != 8) {
hpfs_error(s, "ea_indirect(ea) set while ea->valuelen!=8, %s %08x, pos %08x",
ano ? "anode" : "sectors", a, pos);
return;
}
if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 9, ex+4))
return;
hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea));
}
pos += ea->namelen + ea_valuelen(ea) + 5;
}
if (!ano) hpfs_free_sectors(s, a, (len+511) >> 9);
else {
struct buffer_head *bh;
struct anode *anode;
if ((anode = hpfs_map_anode(s, a, &bh))) {
hpfs_remove_btree(s, &anode->btree);
brelse(bh);
hpfs_free_sectors(s, a, 1);
}
}
}
static char *get_indirect_ea(struct super_block *s, int ano, secno a, int size)
{
char *ret;
if (!(ret = kmalloc(size + 1, GFP_NOFS))) {
printk("HPFS: out of memory for EA\n");
return NULL;
}
if (hpfs_ea_read(s, a, ano, 0, size, ret)) {
kfree(ret);
return NULL;
}
ret[size] = 0;
return ret;
}
static void set_indirect_ea(struct super_block *s, int ano, secno a,
const char *data, int size)
{
hpfs_ea_write(s, a, ano, 0, size, data);
}
/* Read an extended attribute named 'key' into the provided buffer */
int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
char *buf, int size)
{
unsigned pos;
int ano, len;
secno a;
char ex[4 + 255 + 1 + 8];
struct extended_attribute *ea;
struct extended_attribute *ea_end = fnode_end_ea(fnode);
for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
if (!strcmp(ea->name, key)) {
if (ea_indirect(ea))
goto indirect;
if (ea_valuelen(ea) >= size)
return -EINVAL;
memcpy(buf, ea_data(ea), ea_valuelen(ea));
buf[ea_valuelen(ea)] = 0;
return 0;
}
a = le32_to_cpu(fnode->ea_secno);
len = le32_to_cpu(fnode->ea_size_l);
ano = fnode_in_anode(fnode);
pos = 0;
while (pos < len) {
ea = (struct extended_attribute *)ex;
if (pos + 4 > len) {
hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x",
ano ? "anode" : "sectors", a, len);
return -EIO;
}
if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return -EIO;
if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4))
return -EIO;
if (!strcmp(ea->name, key)) {
if (ea_indirect(ea))
goto indirect;
if (ea_valuelen(ea) >= size)
return -EINVAL;
if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea_valuelen(ea), buf))
return -EIO;
buf[ea_valuelen(ea)] = 0;
return 0;
}
pos += ea->namelen + ea_valuelen(ea) + 5;
}
return -ENOENT;
indirect:
if (ea_len(ea) >= size)
return -EINVAL;
if (hpfs_ea_read(s, ea_sec(ea), ea_in_anode(ea), 0, ea_len(ea), buf))
return -EIO;
buf[ea_len(ea)] = 0;
return 0;
}
/* Read an extended attribute named 'key' */
char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *size)
{
char *ret;
unsigned pos;
int ano, len;
secno a;
struct extended_attribute *ea;
struct extended_attribute *ea_end = fnode_end_ea(fnode);
for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
if (!strcmp(ea->name, key)) {
if (ea_indirect(ea))
return get_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), *size = ea_len(ea));
if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) {
printk("HPFS: out of memory for EA\n");
return NULL;
}
memcpy(ret, ea_data(ea), ea_valuelen(ea));
ret[ea_valuelen(ea)] = 0;
return ret;
}
a = le32_to_cpu(fnode->ea_secno);
len = le32_to_cpu(fnode->ea_size_l);
ano = fnode_in_anode(fnode);
pos = 0;
while (pos < len) {
char ex[4 + 255 + 1 + 8];
ea = (struct extended_attribute *)ex;
if (pos + 4 > len) {
hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x",
ano ? "anode" : "sectors", a, len);
return NULL;
}
if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return NULL;
if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4))
return NULL;
if (!strcmp(ea->name, key)) {
if (ea_indirect(ea))
return get_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), *size = ea_len(ea));
if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) {
printk("HPFS: out of memory for EA\n");
return NULL;
}
if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea_valuelen(ea), ret)) {
kfree(ret);
return NULL;
}
ret[ea_valuelen(ea)] = 0;
return ret;
}
pos += ea->namelen + ea_valuelen(ea) + 5;
}
return NULL;
}
/*
* Update or create extended attribute 'key' with value 'data'. Note that
* when this ea exists, it MUST have the same size as size of data.
* This driver can't change sizes of eas ('cause I just don't need it).
*/
void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
const char *data, int size)
{
fnode_secno fno = inode->i_ino;
struct super_block *s = inode->i_sb;
unsigned pos;
int ano, len;
secno a;
unsigned char h[4];
struct extended_attribute *ea;
struct extended_attribute *ea_end = fnode_end_ea(fnode);
for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
if (!strcmp(ea->name, key)) {
if (ea_indirect(ea)) {
if (ea_len(ea) == size)
set_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), data, size);
} else if (ea_valuelen(ea) == size) {
memcpy(ea_data(ea), data, size);
}
return;
}
a = le32_to_cpu(fnode->ea_secno);
len = le32_to_cpu(fnode->ea_size_l);
ano = fnode_in_anode(fnode);
pos = 0;
while (pos < len) {
char ex[4 + 255 + 1 + 8];
ea = (struct extended_attribute *)ex;
if (pos + 4 > len) {
hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x",
ano ? "anode" : "sectors", a, len);
return;
}
if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return;
if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4))
return;
if (!strcmp(ea->name, key)) {
if (ea_indirect(ea)) {
if (ea_len(ea) == size)
set_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), data, size);
}
else {
if (ea_valuelen(ea) == size)
hpfs_ea_write(s, a, ano, pos + 4 + ea->namelen + 1, size, data);
}
return;
}
pos += ea->namelen + ea_valuelen(ea) + 5;
}
if (!le16_to_cpu(fnode->ea_offs)) {
/*if (le16_to_cpu(fnode->ea_size_s)) {
hpfs_error(s, "fnode %08x: ea_size_s == %03x, ea_offs == 0",
inode->i_ino, le16_to_cpu(fnode->ea_size_s));
return;
}*/
fnode->ea_offs = cpu_to_le16(0xc4);
}
if (le16_to_cpu(fnode->ea_offs) < 0xc4 || le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200) {
hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x",
(unsigned long)inode->i_ino,
le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s));
return;
}
if ((le16_to_cpu(fnode->ea_size_s) || !le32_to_cpu(fnode->ea_size_l)) &&
le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) + strlen(key) + size + 5 <= 0x200) {
ea = fnode_end_ea(fnode);
*(char *)ea = 0;
ea->namelen = strlen(key);
ea->valuelen_lo = size;
ea->valuelen_hi = size >> 8;
strcpy(ea->name, key);
memcpy(ea_data(ea), data, size);
fnode->ea_size_s = cpu_to_le16(le16_to_cpu(fnode->ea_size_s) + strlen(key) + size + 5);
goto ret;
}
/* Most the code here is 99.9993422% unused. I hope there are no bugs.
But what .. HPFS.IFS has also bugs in ea management. */
if (le16_to_cpu(fnode->ea_size_s) && !le32_to_cpu(fnode->ea_size_l)) {
secno n;
struct buffer_head *bh;
char *data;
if (!(n = hpfs_alloc_sector(s, fno, 1, 0))) return;
if (!(data = hpfs_get_sector(s, n, &bh))) {
hpfs_free_sectors(s, n, 1);
return;
}
memcpy(data, fnode_ea(fnode), le16_to_cpu(fnode->ea_size_s));
fnode->ea_size_l = cpu_to_le32(le16_to_cpu(fnode->ea_size_s));
fnode->ea_size_s = cpu_to_le16(0);
fnode->ea_secno = cpu_to_le32(n);
fnode->flags &= ~FNODE_anode;
mark_buffer_dirty(bh);
brelse(bh);
}
pos = le32_to_cpu(fnode->ea_size_l) + 5 + strlen(key) + size;
len = (le32_to_cpu(fnode->ea_size_l) + 511) >> 9;
if (pos >= 30000) goto bail;
while (((pos + 511) >> 9) > len) {
if (!len) {
secno q = hpfs_alloc_sector(s, fno, 1, 0);
if (!q) goto bail;
fnode->ea_secno = cpu_to_le32(q);
fnode->flags &= ~FNODE_anode;
len++;
} else if (!fnode_in_anode(fnode)) {
if (hpfs_alloc_if_possible(s, le32_to_cpu(fnode->ea_secno) + len)) {
len++;
} else {
/* Aargh... don't know how to create ea anodes :-( */
/*struct buffer_head *bh;
struct anode *anode;
anode_secno a_s;
if (!(anode = hpfs_alloc_anode(s, fno, &a_s, &bh)))
goto bail;
anode->up = cpu_to_le32(fno);
anode->btree.fnode_parent = 1;
anode->btree.n_free_nodes--;
anode->btree.n_used_nodes++;
anode->btree.first_free = cpu_to_le16(le16_to_cpu(anode->btree.first_free) + 12);
anode->u.external[0].disk_secno = cpu_to_le32(le32_to_cpu(fnode->ea_secno));
anode->u.external[0].file_secno = cpu_to_le32(0);
anode->u.external[0].length = cpu_to_le32(len);
mark_buffer_dirty(bh);
brelse(bh);
fnode->flags |= FNODE_anode;
fnode->ea_secno = cpu_to_le32(a_s);*/
secno new_sec;
int i;
if (!(new_sec = hpfs_alloc_sector(s, fno, 1, 1 - ((pos + 511) >> 9))))
goto bail;
for (i = 0; i < len; i++) {
struct buffer_head *bh1, *bh2;
void *b1, *b2;
if (!(b1 = hpfs_map_sector(s, le32_to_cpu(fnode->ea_secno) + i, &bh1, len - i - 1))) {
hpfs_free_sectors(s, new_sec, (pos + 511) >> 9);
goto bail;
}
if (!(b2 = hpfs_get_sector(s, new_sec + i, &bh2))) {
brelse(bh1);
hpfs_free_sectors(s, new_sec, (pos + 511) >> 9);
goto bail;
}
memcpy(b2, b1, 512);
brelse(bh1);
mark_buffer_dirty(bh2);
brelse(bh2);
}
hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno), len);
fnode->ea_secno = cpu_to_le32(new_sec);
len = (pos + 511) >> 9;
}
}
if (fnode_in_anode(fnode)) {
if (hpfs_add_sector_to_btree(s, le32_to_cpu(fnode->ea_secno),
0, len) != -1) {
len++;
} else {
goto bail;
}
}
}
h[0] = 0;
h[1] = strlen(key);
h[2] = size & 0xff;
h[3] = size >> 8;
if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail;
if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail;
if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail;
fnode->ea_size_l = cpu_to_le32(pos);
ret:
hpfs_i(inode)->i_ea_size += 5 + strlen(key) + size;
return;
bail:
if (le32_to_cpu(fnode->ea_secno))
if (fnode_in_anode(fnode)) hpfs_truncate_btree(s, le32_to_cpu(fnode->ea_secno), 1, (le32_to_cpu(fnode->ea_size_l) + 511) >> 9);
else hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno) + ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9), len - ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9));
else fnode->ea_secno = fnode->ea_size_l = cpu_to_le32(0);
}
| gpl-2.0 |
master-j/e210s_cam | drivers/edac/x38_edac.c | 4233 | 12118 | /*
* Intel X38 Memory Controller kernel module
* Copyright (C) 2008 Cluster Computing, Inc.
*
* This file may be distributed under the terms of the
* GNU General Public License.
*
* This file is based on i3200_edac.c
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include "edac_core.h"
#define X38_REVISION "1.1"
#define EDAC_MOD_STR "x38_edac"
#define PCI_DEVICE_ID_INTEL_X38_HB 0x29e0
#define X38_RANKS 8
#define X38_RANKS_PER_CHANNEL 4
#define X38_CHANNELS 2
/* Intel X38 register addresses - device 0 function 0 - DRAM Controller */
#define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
#define X38_MCHBAR_HIGH 0x4c
#define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
#define X38_MMR_WINDOW_SIZE 16384
#define X38_TOM 0xa0 /* Top of Memory (16b)
*
* 15:10 reserved
* 9:0 total populated physical memory
*/
#define X38_TOM_MASK 0x3ff /* bits 9:0 */
#define X38_TOM_SHIFT 26 /* 64MiB grain */
#define X38_ERRSTS 0xc8 /* Error Status Register (16b)
*
* 15 reserved
* 14 Isochronous TBWRR Run Behind FIFO Full
* (ITCV)
* 13 Isochronous TBWRR Run Behind FIFO Put
* (ITSTV)
* 12 reserved
* 11 MCH Thermal Sensor Event
* for SMI/SCI/SERR (GTSE)
* 10 reserved
* 9 LOCK to non-DRAM Memory Flag (LCKF)
* 8 reserved
* 7 DRAM Throttle Flag (DTF)
* 6:2 reserved
* 1 Multi-bit DRAM ECC Error Flag (DMERR)
* 0 Single-bit DRAM ECC Error Flag (DSERR)
*/
#define X38_ERRSTS_UE 0x0002
#define X38_ERRSTS_CE 0x0001
#define X38_ERRSTS_BITS (X38_ERRSTS_UE | X38_ERRSTS_CE)
/* Intel MMIO register space - device 0 function 0 - MMR space */
#define X38_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4)
*
* 15:10 reserved
* 9:0 Channel 0 DRAM Rank Boundary Address
*/
#define X38_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */
#define X38_DRB_MASK 0x3ff /* bits 9:0 */
#define X38_DRB_SHIFT 26 /* 64MiB grain */
#define X38_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b)
*
* 63:48 Error Column Address (ERRCOL)
* 47:32 Error Row Address (ERRROW)
* 31:29 Error Bank Address (ERRBANK)
* 28:27 Error Rank Address (ERRRANK)
* 26:24 reserved
* 23:16 Error Syndrome (ERRSYND)
* 15: 2 reserved
* 1 Multiple Bit Error Status (MERRSTS)
* 0 Correctable Error Status (CERRSTS)
*/
#define X38_C1ECCERRLOG 0x680 /* Channel 1 ECC Error Log (64b) */
#define X38_ECCERRLOG_CE 0x1
#define X38_ECCERRLOG_UE 0x2
#define X38_ECCERRLOG_RANK_BITS 0x18000000
#define X38_ECCERRLOG_SYNDROME_BITS 0xff0000
#define X38_CAPID0 0xe0 /* see P.94 of spec for details */
static int x38_channel_num;
static int how_many_channel(struct pci_dev *pdev)
{
unsigned char capid0_8b; /* 8th byte of CAPID0 */
pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b);
if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
debugf0("In single channel mode.\n");
x38_channel_num = 1;
} else {
debugf0("In dual channel mode.\n");
x38_channel_num = 2;
}
return x38_channel_num;
}
static unsigned long eccerrlog_syndrome(u64 log)
{
return (log & X38_ECCERRLOG_SYNDROME_BITS) >> 16;
}
static int eccerrlog_row(int channel, u64 log)
{
return ((log & X38_ECCERRLOG_RANK_BITS) >> 27) |
(channel * X38_RANKS_PER_CHANNEL);
}
enum x38_chips {
X38 = 0,
};
struct x38_dev_info {
const char *ctl_name;
};
struct x38_error_info {
u16 errsts;
u16 errsts2;
u64 eccerrlog[X38_CHANNELS];
};
static const struct x38_dev_info x38_devs[] = {
[X38] = {
.ctl_name = "x38"},
};
static struct pci_dev *mci_pdev;
static int x38_registered = 1;
static void x38_clear_error_info(struct mem_ctl_info *mci)
{
struct pci_dev *pdev;
pdev = to_pci_dev(mci->dev);
/*
* Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
pci_write_bits16(pdev, X38_ERRSTS, X38_ERRSTS_BITS,
X38_ERRSTS_BITS);
}
static u64 x38_readq(const void __iomem *addr)
{
return readl(addr) | (((u64)readl(addr + 4)) << 32);
}
static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
struct x38_error_info *info)
{
struct pci_dev *pdev;
void __iomem *window = mci->pvt_info;
pdev = to_pci_dev(mci->dev);
/*
* This is a mess because there is no atomic way to read all the
* registers at once and the registers can transition from CE being
* overwritten by UE.
*/
pci_read_config_word(pdev, X38_ERRSTS, &info->errsts);
if (!(info->errsts & X38_ERRSTS_BITS))
return;
info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
if (x38_channel_num == 2)
info->eccerrlog[1] = x38_readq(window + X38_C1ECCERRLOG);
pci_read_config_word(pdev, X38_ERRSTS, &info->errsts2);
/*
* If the error is the same for both reads then the first set
* of reads is valid. If there is a change then there is a CE
* with no info and the second set of reads is valid and
* should be UE info.
*/
if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
if (x38_channel_num == 2)
info->eccerrlog[1] =
x38_readq(window + X38_C1ECCERRLOG);
}
x38_clear_error_info(mci);
}
static void x38_process_error_info(struct mem_ctl_info *mci,
struct x38_error_info *info)
{
int channel;
u64 log;
if (!(info->errsts & X38_ERRSTS_BITS))
return;
if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
info->errsts = info->errsts2;
}
for (channel = 0; channel < x38_channel_num; channel++) {
log = info->eccerrlog[channel];
if (log & X38_ECCERRLOG_UE) {
edac_mc_handle_ue(mci, 0, 0,
eccerrlog_row(channel, log), "x38 UE");
} else if (log & X38_ECCERRLOG_CE) {
edac_mc_handle_ce(mci, 0, 0,
eccerrlog_syndrome(log),
eccerrlog_row(channel, log), 0, "x38 CE");
}
}
}
static void x38_check(struct mem_ctl_info *mci)
{
struct x38_error_info info;
debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
x38_get_and_clear_error_info(mci, &info);
x38_process_error_info(mci, &info);
}
void __iomem *x38_map_mchbar(struct pci_dev *pdev)
{
union {
u64 mchbar;
struct {
u32 mchbar_low;
u32 mchbar_high;
};
} u;
void __iomem *window;
pci_read_config_dword(pdev, X38_MCHBAR_LOW, &u.mchbar_low);
pci_write_config_dword(pdev, X38_MCHBAR_LOW, u.mchbar_low | 0x1);
pci_read_config_dword(pdev, X38_MCHBAR_HIGH, &u.mchbar_high);
u.mchbar &= X38_MCHBAR_MASK;
if (u.mchbar != (resource_size_t)u.mchbar) {
printk(KERN_ERR
"x38: mmio space beyond accessible range (0x%llx)\n",
(unsigned long long)u.mchbar);
return NULL;
}
window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE);
if (!window)
printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
return window;
}
static void x38_get_drbs(void __iomem *window,
u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
{
int i;
for (i = 0; i < X38_RANKS_PER_CHANNEL; i++) {
drbs[0][i] = readw(window + X38_C0DRB + 2*i) & X38_DRB_MASK;
drbs[1][i] = readw(window + X38_C1DRB + 2*i) & X38_DRB_MASK;
}
}
static bool x38_is_stacked(struct pci_dev *pdev,
u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL])
{
u16 tom;
pci_read_config_word(pdev, X38_TOM, &tom);
tom &= X38_TOM_MASK;
return drbs[X38_CHANNELS - 1][X38_RANKS_PER_CHANNEL - 1] == tom;
}
static unsigned long drb_to_nr_pages(
u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL],
bool stacked, int channel, int rank)
{
int n;
n = drbs[channel][rank];
if (rank > 0)
n -= drbs[channel][rank - 1];
if (stacked && (channel == 1) && drbs[channel][rank] ==
drbs[channel][X38_RANKS_PER_CHANNEL - 1]) {
n -= drbs[0][X38_RANKS_PER_CHANNEL - 1];
}
n <<= (X38_DRB_SHIFT - PAGE_SHIFT);
return n;
}
static int x38_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
int i;
struct mem_ctl_info *mci = NULL;
unsigned long last_page;
u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
bool stacked;
void __iomem *window;
debugf0("MC: %s()\n", __func__);
window = x38_map_mchbar(pdev);
if (!window)
return -ENODEV;
x38_get_drbs(window, drbs);
how_many_channel(pdev);
/* FIXME: unconventional pvt_info usage */
mci = edac_mc_alloc(0, X38_RANKS, x38_channel_num, 0);
if (!mci)
return -ENOMEM;
debugf3("MC: %s(): init mci\n", __func__);
mci->dev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = X38_REVISION;
mci->ctl_name = x38_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = x38_check;
mci->ctl_page_to_phys = NULL;
mci->pvt_info = window;
stacked = x38_is_stacked(pdev, drbs);
/*
* The dram rank boundary (DRB) reg values are boundary addresses
* for each DRAM rank with a granularity of 64MB. DRB regs are
* cumulative; the last one will contain the total memory
* contained in all ranks.
*/
last_page = -1UL;
for (i = 0; i < mci->nr_csrows; i++) {
unsigned long nr_pages;
struct csrow_info *csrow = &mci->csrows[i];
nr_pages = drb_to_nr_pages(drbs, stacked,
i / X38_RANKS_PER_CHANNEL,
i % X38_RANKS_PER_CHANNEL);
if (nr_pages == 0) {
csrow->mtype = MEM_EMPTY;
continue;
}
csrow->first_page = last_page + 1;
last_page += nr_pages;
csrow->last_page = last_page;
csrow->nr_pages = nr_pages;
csrow->grain = nr_pages << PAGE_SHIFT;
csrow->mtype = MEM_DDR2;
csrow->dtype = DEV_UNKNOWN;
csrow->edac_mode = EDAC_UNKNOWN;
}
x38_clear_error_info(mci);
rc = -ENODEV;
if (edac_mc_add_mc(mci)) {
debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
goto fail;
}
/* get this far and it's successful */
debugf3("MC: %s(): success\n", __func__);
return 0;
fail:
iounmap(window);
if (mci)
edac_mc_free(mci);
return rc;
}
static int __devinit x38_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int rc;
debugf0("MC: %s()\n", __func__);
if (pci_enable_device(pdev) < 0)
return -EIO;
rc = x38_probe1(pdev, ent->driver_data);
if (!mci_pdev)
mci_pdev = pci_dev_get(pdev);
return rc;
}
static void __devexit x38_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
debugf0("%s()\n", __func__);
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
return;
iounmap(mci->pvt_info);
edac_mc_free(mci);
}
static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
{
PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
X38},
{
0,
} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, x38_pci_tbl);
static struct pci_driver x38_driver = {
.name = EDAC_MOD_STR,
.probe = x38_init_one,
.remove = __devexit_p(x38_remove_one),
.id_table = x38_pci_tbl,
};
static int __init x38_init(void)
{
int pci_rc;
debugf3("MC: %s()\n", __func__);
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
pci_rc = pci_register_driver(&x38_driver);
if (pci_rc < 0)
goto fail0;
if (!mci_pdev) {
x38_registered = 0;
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_X38_HB, NULL);
if (!mci_pdev) {
debugf0("x38 pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = x38_init_one(mci_pdev, x38_pci_tbl);
if (pci_rc < 0) {
debugf0("x38 init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
}
return 0;
fail1:
pci_unregister_driver(&x38_driver);
fail0:
if (mci_pdev)
pci_dev_put(mci_pdev);
return pci_rc;
}
static void __exit x38_exit(void)
{
debugf3("MC: %s()\n", __func__);
pci_unregister_driver(&x38_driver);
if (!x38_registered) {
x38_remove_one(mci_pdev);
pci_dev_put(mci_pdev);
}
}
module_init(x38_init);
module_exit(x38_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cluster Computing, Inc. Hitoshi Mitake");
MODULE_DESCRIPTION("MC support for Intel X38 memory hub controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| gpl-2.0 |
pboonstoppel/linux-3.1-nv-rel15r7-cpuquiet | sound/pci/ctxfi/cttimer.c | 4745 | 11342 | /*
* PCM timer handling on ctxfi
*
* This source file is released under GPL v2 license (no other versions).
* See the COPYING file included in the main directory of this source
* distribution for the license terms and conditions.
*/
#include <linux/slab.h>
#include <linux/math64.h>
#include <linux/moduleparam.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include "ctatc.h"
#include "cthardware.h"
#include "cttimer.h"
static int use_system_timer;
MODULE_PARM_DESC(use_system_timer, "Foce to use system-timer");
module_param(use_system_timer, bool, S_IRUGO);
struct ct_timer_ops {
void (*init)(struct ct_timer_instance *);
void (*prepare)(struct ct_timer_instance *);
void (*start)(struct ct_timer_instance *);
void (*stop)(struct ct_timer_instance *);
void (*free_instance)(struct ct_timer_instance *);
void (*interrupt)(struct ct_timer *);
void (*free_global)(struct ct_timer *);
};
/* timer instance -- assigned to each PCM stream */
struct ct_timer_instance {
spinlock_t lock;
struct ct_timer *timer_base;
struct ct_atc_pcm *apcm;
struct snd_pcm_substream *substream;
struct timer_list timer;
struct list_head instance_list;
struct list_head running_list;
unsigned int position;
unsigned int frag_count;
unsigned int running:1;
unsigned int need_update:1;
};
/* timer instance manager */
struct ct_timer {
spinlock_t lock; /* global timer lock (for xfitimer) */
spinlock_t list_lock; /* lock for instance list */
struct ct_atc *atc;
struct ct_timer_ops *ops;
struct list_head instance_head;
struct list_head running_head;
unsigned int wc; /* current wallclock */
unsigned int irq_handling:1; /* in IRQ handling */
unsigned int reprogram:1; /* need to reprogram the internval */
unsigned int running:1; /* global timer running */
};
/*
* system-timer-based updates
*/
static void ct_systimer_callback(unsigned long data)
{
struct ct_timer_instance *ti = (struct ct_timer_instance *)data;
struct snd_pcm_substream *substream = ti->substream;
struct snd_pcm_runtime *runtime = substream->runtime;
struct ct_atc_pcm *apcm = ti->apcm;
unsigned int period_size = runtime->period_size;
unsigned int buffer_size = runtime->buffer_size;
unsigned long flags;
unsigned int position, dist, interval;
position = substream->ops->pointer(substream);
dist = (position + buffer_size - ti->position) % buffer_size;
if (dist >= period_size ||
position / period_size != ti->position / period_size) {
apcm->interrupt(apcm);
ti->position = position;
}
/* Add extra HZ*5/1000 to avoid overrun issue when recording
* at 8kHz in 8-bit format or at 88kHz in 24-bit format. */
interval = ((period_size - (position % period_size))
* HZ + (runtime->rate - 1)) / runtime->rate + HZ * 5 / 1000;
spin_lock_irqsave(&ti->lock, flags);
if (ti->running)
mod_timer(&ti->timer, jiffies + interval);
spin_unlock_irqrestore(&ti->lock, flags);
}
static void ct_systimer_init(struct ct_timer_instance *ti)
{
setup_timer(&ti->timer, ct_systimer_callback,
(unsigned long)ti);
}
static void ct_systimer_start(struct ct_timer_instance *ti)
{
struct snd_pcm_runtime *runtime = ti->substream->runtime;
unsigned long flags;
spin_lock_irqsave(&ti->lock, flags);
ti->running = 1;
mod_timer(&ti->timer,
jiffies + (runtime->period_size * HZ +
(runtime->rate - 1)) / runtime->rate);
spin_unlock_irqrestore(&ti->lock, flags);
}
static void ct_systimer_stop(struct ct_timer_instance *ti)
{
unsigned long flags;
spin_lock_irqsave(&ti->lock, flags);
ti->running = 0;
del_timer(&ti->timer);
spin_unlock_irqrestore(&ti->lock, flags);
}
static void ct_systimer_prepare(struct ct_timer_instance *ti)
{
ct_systimer_stop(ti);
try_to_del_timer_sync(&ti->timer);
}
#define ct_systimer_free ct_systimer_prepare
static struct ct_timer_ops ct_systimer_ops = {
.init = ct_systimer_init,
.free_instance = ct_systimer_free,
.prepare = ct_systimer_prepare,
.start = ct_systimer_start,
.stop = ct_systimer_stop,
};
/*
* Handling multiple streams using a global emu20k1 timer irq
*/
#define CT_TIMER_FREQ 48000
#define MIN_TICKS 1
#define MAX_TICKS ((1 << 13) - 1)
static void ct_xfitimer_irq_rearm(struct ct_timer *atimer, int ticks)
{
struct hw *hw = atimer->atc->hw;
if (ticks > MAX_TICKS)
ticks = MAX_TICKS;
hw->set_timer_tick(hw, ticks);
if (!atimer->running)
hw->set_timer_irq(hw, 1);
atimer->running = 1;
}
static void ct_xfitimer_irq_stop(struct ct_timer *atimer)
{
if (atimer->running) {
struct hw *hw = atimer->atc->hw;
hw->set_timer_irq(hw, 0);
hw->set_timer_tick(hw, 0);
atimer->running = 0;
}
}
static inline unsigned int ct_xfitimer_get_wc(struct ct_timer *atimer)
{
struct hw *hw = atimer->atc->hw;
return hw->get_wc(hw);
}
/*
* reprogram the timer interval;
* checks the running instance list and determines the next timer interval.
* also updates the each stream position, returns the number of streams
* to call snd_pcm_period_elapsed() appropriately
*
* call this inside the lock and irq disabled
*/
static int ct_xfitimer_reprogram(struct ct_timer *atimer, int can_update)
{
struct ct_timer_instance *ti;
unsigned int min_intr = (unsigned int)-1;
int updates = 0;
unsigned int wc, diff;
if (list_empty(&atimer->running_head)) {
ct_xfitimer_irq_stop(atimer);
atimer->reprogram = 0; /* clear flag */
return 0;
}
wc = ct_xfitimer_get_wc(atimer);
diff = wc - atimer->wc;
atimer->wc = wc;
list_for_each_entry(ti, &atimer->running_head, running_list) {
if (ti->frag_count > diff)
ti->frag_count -= diff;
else {
unsigned int pos;
unsigned int period_size, rate;
period_size = ti->substream->runtime->period_size;
rate = ti->substream->runtime->rate;
pos = ti->substream->ops->pointer(ti->substream);
if (pos / period_size != ti->position / period_size) {
ti->need_update = 1;
ti->position = pos;
updates++;
}
pos %= period_size;
pos = period_size - pos;
ti->frag_count = div_u64((u64)pos * CT_TIMER_FREQ +
rate - 1, rate);
}
if (ti->need_update && !can_update)
min_intr = 0; /* pending to the next irq */
if (ti->frag_count < min_intr)
min_intr = ti->frag_count;
}
if (min_intr < MIN_TICKS)
min_intr = MIN_TICKS;
ct_xfitimer_irq_rearm(atimer, min_intr);
atimer->reprogram = 0; /* clear flag */
return updates;
}
/* look through the instance list and call period_elapsed if needed */
static void ct_xfitimer_check_period(struct ct_timer *atimer)
{
struct ct_timer_instance *ti;
unsigned long flags;
spin_lock_irqsave(&atimer->list_lock, flags);
list_for_each_entry(ti, &atimer->instance_head, instance_list) {
if (ti->running && ti->need_update) {
ti->need_update = 0;
ti->apcm->interrupt(ti->apcm);
}
}
spin_unlock_irqrestore(&atimer->list_lock, flags);
}
/* Handle timer-interrupt */
static void ct_xfitimer_callback(struct ct_timer *atimer)
{
int update;
unsigned long flags;
spin_lock_irqsave(&atimer->lock, flags);
atimer->irq_handling = 1;
do {
update = ct_xfitimer_reprogram(atimer, 1);
spin_unlock(&atimer->lock);
if (update)
ct_xfitimer_check_period(atimer);
spin_lock(&atimer->lock);
} while (atimer->reprogram);
atimer->irq_handling = 0;
spin_unlock_irqrestore(&atimer->lock, flags);
}
static void ct_xfitimer_prepare(struct ct_timer_instance *ti)
{
ti->frag_count = ti->substream->runtime->period_size;
ti->running = 0;
ti->need_update = 0;
}
/* start/stop the timer */
static void ct_xfitimer_update(struct ct_timer *atimer)
{
unsigned long flags;
spin_lock_irqsave(&atimer->lock, flags);
if (atimer->irq_handling) {
/* reached from IRQ handler; let it handle later */
atimer->reprogram = 1;
spin_unlock_irqrestore(&atimer->lock, flags);
return;
}
ct_xfitimer_irq_stop(atimer);
ct_xfitimer_reprogram(atimer, 0);
spin_unlock_irqrestore(&atimer->lock, flags);
}
static void ct_xfitimer_start(struct ct_timer_instance *ti)
{
struct ct_timer *atimer = ti->timer_base;
unsigned long flags;
spin_lock_irqsave(&atimer->lock, flags);
if (list_empty(&ti->running_list))
atimer->wc = ct_xfitimer_get_wc(atimer);
ti->running = 1;
ti->need_update = 0;
list_add(&ti->running_list, &atimer->running_head);
spin_unlock_irqrestore(&atimer->lock, flags);
ct_xfitimer_update(atimer);
}
static void ct_xfitimer_stop(struct ct_timer_instance *ti)
{
struct ct_timer *atimer = ti->timer_base;
unsigned long flags;
spin_lock_irqsave(&atimer->lock, flags);
list_del_init(&ti->running_list);
ti->running = 0;
spin_unlock_irqrestore(&atimer->lock, flags);
ct_xfitimer_update(atimer);
}
static void ct_xfitimer_free_global(struct ct_timer *atimer)
{
ct_xfitimer_irq_stop(atimer);
}
static struct ct_timer_ops ct_xfitimer_ops = {
.prepare = ct_xfitimer_prepare,
.start = ct_xfitimer_start,
.stop = ct_xfitimer_stop,
.interrupt = ct_xfitimer_callback,
.free_global = ct_xfitimer_free_global,
};
/*
* timer instance
*/
struct ct_timer_instance *
ct_timer_instance_new(struct ct_timer *atimer, struct ct_atc_pcm *apcm)
{
struct ct_timer_instance *ti;
ti = kzalloc(sizeof(*ti), GFP_KERNEL);
if (!ti)
return NULL;
spin_lock_init(&ti->lock);
INIT_LIST_HEAD(&ti->instance_list);
INIT_LIST_HEAD(&ti->running_list);
ti->timer_base = atimer;
ti->apcm = apcm;
ti->substream = apcm->substream;
if (atimer->ops->init)
atimer->ops->init(ti);
spin_lock_irq(&atimer->list_lock);
list_add(&ti->instance_list, &atimer->instance_head);
spin_unlock_irq(&atimer->list_lock);
return ti;
}
void ct_timer_prepare(struct ct_timer_instance *ti)
{
if (ti->timer_base->ops->prepare)
ti->timer_base->ops->prepare(ti);
ti->position = 0;
ti->running = 0;
}
void ct_timer_start(struct ct_timer_instance *ti)
{
struct ct_timer *atimer = ti->timer_base;
atimer->ops->start(ti);
}
void ct_timer_stop(struct ct_timer_instance *ti)
{
struct ct_timer *atimer = ti->timer_base;
atimer->ops->stop(ti);
}
void ct_timer_instance_free(struct ct_timer_instance *ti)
{
struct ct_timer *atimer = ti->timer_base;
atimer->ops->stop(ti); /* to be sure */
if (atimer->ops->free_instance)
atimer->ops->free_instance(ti);
spin_lock_irq(&atimer->list_lock);
list_del(&ti->instance_list);
spin_unlock_irq(&atimer->list_lock);
kfree(ti);
}
/*
* timer manager
*/
static void ct_timer_interrupt(void *data, unsigned int status)
{
struct ct_timer *timer = data;
/* Interval timer interrupt */
if ((status & IT_INT) && timer->ops->interrupt)
timer->ops->interrupt(timer);
}
struct ct_timer *ct_timer_new(struct ct_atc *atc)
{
struct ct_timer *atimer;
struct hw *hw;
atimer = kzalloc(sizeof(*atimer), GFP_KERNEL);
if (!atimer)
return NULL;
spin_lock_init(&atimer->lock);
spin_lock_init(&atimer->list_lock);
INIT_LIST_HEAD(&atimer->instance_head);
INIT_LIST_HEAD(&atimer->running_head);
atimer->atc = atc;
hw = atc->hw;
if (!use_system_timer && hw->set_timer_irq) {
snd_printd(KERN_INFO "ctxfi: Use xfi-native timer\n");
atimer->ops = &ct_xfitimer_ops;
hw->irq_callback_data = atimer;
hw->irq_callback = ct_timer_interrupt;
} else {
snd_printd(KERN_INFO "ctxfi: Use system timer\n");
atimer->ops = &ct_systimer_ops;
}
return atimer;
}
void ct_timer_free(struct ct_timer *atimer)
{
struct hw *hw = atimer->atc->hw;
hw->irq_callback = NULL;
if (atimer->ops->free_global)
atimer->ops->free_global(atimer);
kfree(atimer);
}
| gpl-2.0 |
nasty007/android_kernel_lge_mako | arch/x86/platform/olpc/olpc-xo15-sci.c | 4745 | 5531 | /*
* Support for OLPC XO-1.5 System Control Interrupts (SCI)
*
* Copyright (C) 2009-2010 One Laptop per Child
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/power_supply.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <asm/olpc.h>
#define DRV_NAME "olpc-xo15-sci"
#define PFX DRV_NAME ": "
#define XO15_SCI_CLASS DRV_NAME
#define XO15_SCI_DEVICE_NAME "OLPC XO-1.5 SCI"
static unsigned long xo15_sci_gpe;
static bool lid_wake_on_close;
/*
* The normal ACPI LID wakeup behavior is wake-on-open, but not
* wake-on-close. This is implemented as standard by the XO-1.5 DSDT.
*
* We provide here a sysfs attribute that will additionally enable
* wake-on-close behavior. This is useful (e.g.) when we oportunistically
* suspend with the display running; if the lid is then closed, we want to
* wake up to turn the display off.
*
* This is controlled through a custom method in the XO-1.5 DSDT.
*/
static int set_lid_wake_behavior(bool wake_on_close)
{
struct acpi_object_list arg_list;
union acpi_object arg;
acpi_status status;
arg_list.count = 1;
arg_list.pointer = &arg;
arg.type = ACPI_TYPE_INTEGER;
arg.integer.value = wake_on_close;
status = acpi_evaluate_object(NULL, "\\_SB.PCI0.LID.LIDW", &arg_list, NULL);
if (ACPI_FAILURE(status)) {
pr_warning(PFX "failed to set lid behavior\n");
return 1;
}
lid_wake_on_close = wake_on_close;
return 0;
}
static ssize_t
lid_wake_on_close_show(struct kobject *s, struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", lid_wake_on_close);
}
static ssize_t lid_wake_on_close_store(struct kobject *s,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned int val;
if (sscanf(buf, "%u", &val) != 1)
return -EINVAL;
set_lid_wake_behavior(!!val);
return n;
}
static struct kobj_attribute lid_wake_on_close_attr =
__ATTR(lid_wake_on_close, 0644,
lid_wake_on_close_show,
lid_wake_on_close_store);
static void battery_status_changed(void)
{
struct power_supply *psy = power_supply_get_by_name("olpc-battery");
if (psy) {
power_supply_changed(psy);
put_device(psy->dev);
}
}
static void ac_status_changed(void)
{
struct power_supply *psy = power_supply_get_by_name("olpc-ac");
if (psy) {
power_supply_changed(psy);
put_device(psy->dev);
}
}
static void process_sci_queue(void)
{
u16 data;
int r;
do {
r = olpc_ec_sci_query(&data);
if (r || !data)
break;
pr_debug(PFX "SCI 0x%x received\n", data);
switch (data) {
case EC_SCI_SRC_BATERR:
case EC_SCI_SRC_BATSOC:
case EC_SCI_SRC_BATTERY:
case EC_SCI_SRC_BATCRIT:
battery_status_changed();
break;
case EC_SCI_SRC_ACPWR:
ac_status_changed();
break;
}
} while (data);
if (r)
pr_err(PFX "Failed to clear SCI queue");
}
static void process_sci_queue_work(struct work_struct *work)
{
process_sci_queue();
}
static DECLARE_WORK(sci_work, process_sci_queue_work);
static u32 xo15_sci_gpe_handler(acpi_handle gpe_device, u32 gpe, void *context)
{
schedule_work(&sci_work);
return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
}
static int xo15_sci_add(struct acpi_device *device)
{
unsigned long long tmp;
acpi_status status;
int r;
if (!device)
return -EINVAL;
strcpy(acpi_device_name(device), XO15_SCI_DEVICE_NAME);
strcpy(acpi_device_class(device), XO15_SCI_CLASS);
/* Get GPE bit assignment (EC events). */
status = acpi_evaluate_integer(device->handle, "_GPE", NULL, &tmp);
if (ACPI_FAILURE(status))
return -EINVAL;
xo15_sci_gpe = tmp;
status = acpi_install_gpe_handler(NULL, xo15_sci_gpe,
ACPI_GPE_EDGE_TRIGGERED,
xo15_sci_gpe_handler, device);
if (ACPI_FAILURE(status))
return -ENODEV;
dev_info(&device->dev, "Initialized, GPE = 0x%lx\n", xo15_sci_gpe);
r = sysfs_create_file(&device->dev.kobj, &lid_wake_on_close_attr.attr);
if (r)
goto err_sysfs;
/* Flush queue, and enable all SCI events */
process_sci_queue();
olpc_ec_mask_write(EC_SCI_SRC_ALL);
acpi_enable_gpe(NULL, xo15_sci_gpe);
/* Enable wake-on-EC */
if (device->wakeup.flags.valid)
device_init_wakeup(&device->dev, true);
return 0;
err_sysfs:
acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler);
cancel_work_sync(&sci_work);
return r;
}
static int xo15_sci_remove(struct acpi_device *device, int type)
{
acpi_disable_gpe(NULL, xo15_sci_gpe);
acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler);
cancel_work_sync(&sci_work);
sysfs_remove_file(&device->dev.kobj, &lid_wake_on_close_attr.attr);
return 0;
}
static int xo15_sci_resume(struct acpi_device *device)
{
/* Enable all EC events */
olpc_ec_mask_write(EC_SCI_SRC_ALL);
/* Power/battery status might have changed */
battery_status_changed();
ac_status_changed();
return 0;
}
static const struct acpi_device_id xo15_sci_device_ids[] = {
{"XO15EC", 0},
{"", 0},
};
static struct acpi_driver xo15_sci_drv = {
.name = DRV_NAME,
.class = XO15_SCI_CLASS,
.ids = xo15_sci_device_ids,
.ops = {
.add = xo15_sci_add,
.remove = xo15_sci_remove,
.resume = xo15_sci_resume,
},
};
static int __init xo15_sci_init(void)
{
return acpi_bus_register_driver(&xo15_sci_drv);
}
device_initcall(xo15_sci_init);
| gpl-2.0 |
omnirom/android_kernel_oppo_msm8974 | drivers/s390/char/sclp_config.c | 5001 | 1745 | /*
* drivers/s390/char/sclp_config.c
*
* Copyright IBM Corp. 2007
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#define KMSG_COMPONENT "sclp_config"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <asm/smp.h>
#include "sclp.h"
struct conf_mgm_data {
u8 reserved;
u8 ev_qualifier;
} __attribute__((packed));
#define EV_QUAL_CPU_CHANGE 1
#define EV_QUAL_CAP_CHANGE 3
static struct work_struct sclp_cpu_capability_work;
static struct work_struct sclp_cpu_change_work;
static void sclp_cpu_capability_notify(struct work_struct *work)
{
int cpu;
struct device *dev;
s390_adjust_jiffies();
pr_warning("cpu capability changed.\n");
get_online_cpus();
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
put_online_cpus();
}
static void __ref sclp_cpu_change_notify(struct work_struct *work)
{
smp_rescan_cpus();
}
static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
{
struct conf_mgm_data *cdata;
cdata = (struct conf_mgm_data *)(evbuf + 1);
switch (cdata->ev_qualifier) {
case EV_QUAL_CPU_CHANGE:
schedule_work(&sclp_cpu_change_work);
break;
case EV_QUAL_CAP_CHANGE:
schedule_work(&sclp_cpu_capability_work);
break;
}
}
static struct sclp_register sclp_conf_register =
{
.receive_mask = EVTYP_CONFMGMDATA_MASK,
.receiver_fn = sclp_conf_receiver_fn,
};
static int __init sclp_conf_init(void)
{
INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
return sclp_register(&sclp_conf_register);
}
__initcall(sclp_conf_init);
| gpl-2.0 |
ReVolt-ROM/android_kernel_htc_m7 | drivers/gpu/drm/gma500/oaktrail_crtc.c | 5257 | 16725 | /*
* Copyright © 2009 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include "framebuffer.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
#include "psb_intel_display.h"
#include "power.h"
struct psb_intel_range_t {
int min, max;
};
struct oaktrail_limit_t {
struct psb_intel_range_t dot, m, p1;
};
struct oaktrail_clock_t {
/* derived values */
int dot;
int m;
int p1;
};
#define MRST_LIMIT_LVDS_100L 0
#define MRST_LIMIT_LVDS_83 1
#define MRST_LIMIT_LVDS_100 2
#define MRST_DOT_MIN 19750
#define MRST_DOT_MAX 120000
#define MRST_M_MIN_100L 20
#define MRST_M_MIN_100 10
#define MRST_M_MIN_83 12
#define MRST_M_MAX_100L 34
#define MRST_M_MAX_100 17
#define MRST_M_MAX_83 20
#define MRST_P1_MIN 2
#define MRST_P1_MAX_0 7
#define MRST_P1_MAX_1 8
static const struct oaktrail_limit_t oaktrail_limits[] = {
{ /* MRST_LIMIT_LVDS_100L */
.dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
.m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
.p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
},
{ /* MRST_LIMIT_LVDS_83L */
.dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
.m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
.p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
},
{ /* MRST_LIMIT_LVDS_100 */
.dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
.m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
.p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
},
};
#define MRST_M_MIN 10
static const u32 oaktrail_m_converts[] = {
0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
};
static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
{
const struct oaktrail_limit_t *limit = NULL;
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
|| psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
switch (dev_priv->core_freq) {
case 100:
limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
break;
case 166:
limit = &oaktrail_limits[MRST_LIMIT_LVDS_83];
break;
case 200:
limit = &oaktrail_limits[MRST_LIMIT_LVDS_100];
break;
}
} else {
limit = NULL;
dev_err(dev->dev, "oaktrail_limit Wrong display type.\n");
}
return limit;
}
/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock)
{
clock->dot = (refclk * clock->m) / (14 * clock->p1);
}
static void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock)
{
pr_debug("%s: dotclock = %d, m = %d, p1 = %d.\n",
prefix, clock->dot, clock->m, clock->p1);
}
/**
* Returns a set of divisors for the desired target clock with the given refclk,
* or FALSE. Divisor values are the actual divisors for
*/
static bool
mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
struct oaktrail_clock_t *best_clock)
{
struct oaktrail_clock_t clock;
const struct oaktrail_limit_t *limit = oaktrail_limit(crtc);
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
clock.p1++) {
int this_err;
oaktrail_clock(refclk, &clock);
this_err = abs(clock.dot - target);
if (this_err < err) {
*best_clock = clock;
err = this_err;
}
}
}
dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
return err != target;
}
/**
* Sets the power management mode of the pipe and plane.
*
* This code should probably grow support for turning the cursor off and back
* on appropriately at the same time as we're turning the pipe off/on.
*/
static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
u32 temp;
if (!gma_power_begin(dev, true))
return;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
*/
switch (mode) {
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable the DPLL */
temp = REG_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) == 0) {
REG_WRITE(dpll_reg, temp);
REG_READ(dpll_reg);
/* Wait for the clocks to stabilize. */
udelay(150);
REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
REG_READ(dpll_reg);
/* Wait for the clocks to stabilize. */
udelay(150);
REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
REG_READ(dpll_reg);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Enable the pipe */
temp = REG_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) == 0)
REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
/* Enable the plane */
temp = REG_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
REG_WRITE(dspcntr_reg,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
}
psb_intel_crtc_load_lut(crtc);
/* Give the overlay scaler a chance to enable
if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, true); TODO */
break;
case DRM_MODE_DPMS_OFF:
/* Give the overlay scaler a chance to disable
* if it's on this pipe */
/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
/* Disable the VGA plane that we never use */
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable display plane */
temp = REG_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
REG_WRITE(dspcntr_reg,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
REG_READ(dspbase_reg);
}
/* Next, disable display pipes */
temp = REG_READ(pipeconf_reg);
if ((temp & PIPEACONF_ENABLE) != 0) {
REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
REG_READ(pipeconf_reg);
}
/* Wait for for the pipe disable to take effect. */
psb_intel_wait_for_vblank(dev);
temp = REG_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) != 0) {
REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
REG_READ(dpll_reg);
}
/* Wait for the clocks to turn off. */
udelay(150);
break;
}
/*Set FIFO Watermarks*/
REG_WRITE(DSPARB, 0x3FFF);
REG_WRITE(DSPFW1, 0x3F88080A);
REG_WRITE(DSPFW2, 0x0b060808);
REG_WRITE(DSPFW3, 0x0);
REG_WRITE(DSPFW4, 0x08030404);
REG_WRITE(DSPFW5, 0x04040404);
REG_WRITE(DSPFW6, 0x78);
REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
/* Must write Bit 14 of the Chicken Bit Register */
gma_power_end(dev);
}
/**
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
static int oaktrail_panel_fitter_pipe(struct drm_device *dev)
{
u32 pfit_control;
pfit_control = REG_READ(PFIT_CONTROL);
/* See if the panel fitter is in use */
if ((pfit_control & PFIT_ENABLE) == 0)
return -1;
return (pfit_control >> 29) & 3;
}
static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct drm_psb_private *dev_priv = dev->dev_private;
int pipe = psb_intel_crtc->pipe;
int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
int refclk = 0;
struct oaktrail_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
bool ok, is_sdvo = false;
bool is_lvds = false;
bool is_mipi = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct psb_intel_encoder *psb_intel_encoder = NULL;
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
struct drm_connector *connector;
if (!gma_power_begin(dev, true))
return 0;
memcpy(&psb_intel_crtc->saved_mode,
mode,
sizeof(struct drm_display_mode));
memcpy(&psb_intel_crtc->saved_adjusted_mode,
adjusted_mode,
sizeof(struct drm_display_mode));
list_for_each_entry(connector, &mode_config->connector_list, head) {
if (!connector->encoder || connector->encoder->crtc != crtc)
continue;
psb_intel_encoder = psb_intel_attached_encoder(connector);
switch (psb_intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
is_sdvo = true;
break;
case INTEL_OUTPUT_MIPI:
is_mipi = true;
break;
}
}
/* Disable the VGA plane that we never use */
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable the panel fitter if it was on our pipe */
if (oaktrail_panel_fitter_pipe(dev) == pipe)
REG_WRITE(PFIT_CONTROL, 0);
REG_WRITE(pipesrc_reg,
((mode->crtc_hdisplay - 1) << 16) |
(mode->crtc_vdisplay - 1));
if (psb_intel_encoder)
drm_connector_property_get_value(connector,
dev->mode_config.scaling_mode_property, &scalingType);
if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
/* Moorestown doesn't have register support for centering so
* we need to mess with the h/vblank and h/vsync start and
* ends to get centering */
int offsetX = 0, offsetY = 0;
offsetX = (adjusted_mode->crtc_hdisplay -
mode->crtc_hdisplay) / 2;
offsetY = (adjusted_mode->crtc_vdisplay -
mode->crtc_vdisplay) / 2;
REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
REG_WRITE(hblank_reg,
(adjusted_mode->crtc_hblank_start - offsetX - 1) |
((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
REG_WRITE(hsync_reg,
(adjusted_mode->crtc_hsync_start - offsetX - 1) |
((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
REG_WRITE(vblank_reg,
(adjusted_mode->crtc_vblank_start - offsetY - 1) |
((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
REG_WRITE(vsync_reg,
(adjusted_mode->crtc_vsync_start - offsetY - 1) |
((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
} else {
REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
}
/* Flush the plane changes */
{
struct drm_crtc_helper_funcs *crtc_funcs =
crtc->helper_private;
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
/* setup pipeconf */
pipeconf = REG_READ(pipeconf_reg);
/* Set up the display plane register */
dspcntr = REG_READ(dspcntr_reg);
dspcntr |= DISPPLANE_GAMMA_ENABLE;
if (pipe == 0)
dspcntr |= DISPPLANE_SEL_PIPE_A;
else
dspcntr |= DISPPLANE_SEL_PIPE_B;
if (is_mipi)
goto oaktrail_crtc_mode_set_exit;
refclk = dev_priv->core_freq * 1000;
dpll = 0; /*BIT16 = 0 for 100MHz reference */
ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
dev_dbg(dev->dev, "mrstFindBestPLL fail in oaktrail_crtc_mode_set.\n");
} else {
dev_dbg(dev->dev, "oaktrail_crtc_mode_set pixel clock = %d,"
"m = %x, p1 = %x.\n", clock.dot, clock.m,
clock.p1);
}
fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
dpll |= DPLL_VGA_MODE_DIS;
dpll |= DPLL_VCO_ENABLE;
if (is_lvds)
dpll |= DPLLA_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
int sdvo_pixel_multiply =
adjusted_mode->clock / mode->clock;
dpll |= DPLL_DVO_HIGH_SPEED;
dpll |=
(sdvo_pixel_multiply -
1) << SDVO_MULTIPLIER_SHIFT_HIRES;
}
/* compute bitmask from p1 value */
dpll |= (1 << (clock.p1 - 2)) << 17;
dpll |= DPLL_VCO_ENABLE;
mrstPrintPll("chosen", &clock);
if (dpll & DPLL_VCO_ENABLE) {
REG_WRITE(fp_reg, fp);
REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
REG_READ(dpll_reg);
/* Check the DPLLA lock bit PIPEACONF[29] */
udelay(150);
}
REG_WRITE(fp_reg, fp);
REG_WRITE(dpll_reg, dpll);
REG_READ(dpll_reg);
/* Wait for the clocks to stabilize. */
udelay(150);
/* write it again -- the BIOS does, after all */
REG_WRITE(dpll_reg, dpll);
REG_READ(dpll_reg);
/* Wait for the clocks to stabilize. */
udelay(150);
REG_WRITE(pipeconf_reg, pipeconf);
REG_READ(pipeconf_reg);
psb_intel_wait_for_vblank(dev);
REG_WRITE(dspcntr_reg, dspcntr);
psb_intel_wait_for_vblank(dev);
oaktrail_crtc_mode_set_exit:
gma_power_end(dev);
return 0;
}
static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
unsigned long start, offset;
int dspbase = (pipe == 0 ? DSPALINOFF : DSPBBASE);
int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
int ret = 0;
/* no fb bound */
if (!crtc->fb) {
dev_dbg(dev->dev, "No FB bound\n");
return 0;
}
if (!gma_power_begin(dev, true))
return 0;
start = psbfb->gtt->offset;
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
REG_WRITE(dspstride, crtc->fb->pitches[0]);
dspcntr = REG_READ(dspcntr_reg);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
case 8:
dspcntr |= DISPPLANE_8BPP;
break;
case 16:
if (crtc->fb->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
break;
case 24:
case 32:
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
default:
dev_err(dev->dev, "Unknown color depth\n");
ret = -EINVAL;
goto pipe_set_base_exit;
}
REG_WRITE(dspcntr_reg, dspcntr);
REG_WRITE(dspbase, offset);
REG_READ(dspbase);
REG_WRITE(dspsurf, start);
REG_READ(dspsurf);
pipe_set_base_exit:
gma_power_end(dev);
return ret;
}
static void oaktrail_crtc_prepare(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void oaktrail_crtc_commit(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
}
const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
.dpms = oaktrail_crtc_dpms,
.mode_fixup = oaktrail_crtc_mode_fixup,
.mode_set = oaktrail_crtc_mode_set,
.mode_set_base = oaktrail_pipe_set_base,
.prepare = oaktrail_crtc_prepare,
.commit = oaktrail_crtc_commit,
};
| gpl-2.0 |
nychitman1/android_kernel_htc_flounder | arch/powerpc/kernel/io.c | 7049 | 4270 | /*
* I/O string operations
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
* Copyright (C) 2006 IBM Corporation
*
* Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
* and Paul Mackerras.
*
* Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
* PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
*
* Rewritten in C by Stephen Rothwell.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/export.h>
#include <asm/io.h>
#include <asm/firmware.h>
#include <asm/bug.h>
void _insb(const volatile u8 __iomem *port, void *buf, long count)
{
u8 *tbuf = buf;
u8 tmp;
if (unlikely(count <= 0))
return;
asm volatile("sync");
do {
tmp = *port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
}
EXPORT_SYMBOL(_insb);
void _outsb(volatile u8 __iomem *port, const void *buf, long count)
{
const u8 *tbuf = buf;
if (unlikely(count <= 0))
return;
asm volatile("sync");
do {
*port = *tbuf++;
} while (--count != 0);
asm volatile("sync");
}
EXPORT_SYMBOL(_outsb);
void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
{
u16 *tbuf = buf;
u16 tmp;
if (unlikely(count <= 0))
return;
asm volatile("sync");
do {
tmp = *port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
}
EXPORT_SYMBOL(_insw_ns);
void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
{
const u16 *tbuf = buf;
if (unlikely(count <= 0))
return;
asm volatile("sync");
do {
*port = *tbuf++;
} while (--count != 0);
asm volatile("sync");
}
EXPORT_SYMBOL(_outsw_ns);
void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
{
u32 *tbuf = buf;
u32 tmp;
if (unlikely(count <= 0))
return;
asm volatile("sync");
do {
tmp = *port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
}
EXPORT_SYMBOL(_insl_ns);
void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
{
const u32 *tbuf = buf;
if (unlikely(count <= 0))
return;
asm volatile("sync");
do {
*port = *tbuf++;
} while (--count != 0);
asm volatile("sync");
}
EXPORT_SYMBOL(_outsl_ns);
#define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
notrace void
_memset_io(volatile void __iomem *addr, int c, unsigned long n)
{
void *p = (void __force *)addr;
u32 lc = c;
lc |= lc << 8;
lc |= lc << 16;
__asm__ __volatile__ ("sync" : : : "memory");
while(n && !IO_CHECK_ALIGN(p, 4)) {
*((volatile u8 *)p) = c;
p++;
n--;
}
while(n >= 4) {
*((volatile u32 *)p) = lc;
p += 4;
n -= 4;
}
while(n) {
*((volatile u8 *)p) = c;
p++;
n--;
}
__asm__ __volatile__ ("sync" : : : "memory");
}
EXPORT_SYMBOL(_memset_io);
void _memcpy_fromio(void *dest, const volatile void __iomem *src,
unsigned long n)
{
void *vsrc = (void __force *) src;
__asm__ __volatile__ ("sync" : : : "memory");
while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
*((u8 *)dest) = *((volatile u8 *)vsrc);
eieio();
vsrc++;
dest++;
n--;
}
while(n >= 4) {
*((u32 *)dest) = *((volatile u32 *)vsrc);
eieio();
vsrc += 4;
dest += 4;
n -= 4;
}
while(n) {
*((u8 *)dest) = *((volatile u8 *)vsrc);
eieio();
vsrc++;
dest++;
n--;
}
__asm__ __volatile__ ("sync" : : : "memory");
}
EXPORT_SYMBOL(_memcpy_fromio);
void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
{
void *vdest = (void __force *) dest;
__asm__ __volatile__ ("sync" : : : "memory");
while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) {
*((volatile u8 *)vdest) = *((u8 *)src);
src++;
vdest++;
n--;
}
while(n >= 4) {
*((volatile u32 *)vdest) = *((volatile u32 *)src);
src += 4;
vdest += 4;
n-=4;
}
while(n) {
*((volatile u8 *)vdest) = *((u8 *)src);
src++;
vdest++;
n--;
}
__asm__ __volatile__ ("sync" : : : "memory");
}
EXPORT_SYMBOL(_memcpy_toio);
| gpl-2.0 |
piasek1906/Piasek-G2 | arch/x86/kernel/pci-swiotlb.c | 7561 | 2635 | /* Glue code to lib/swiotlb.c */
#include <linux/pci.h>
#include <linux/cache.h>
#include <linux/module.h>
#include <linux/swiotlb.h>
#include <linux/bootmem.h>
#include <linux/dma-mapping.h>
#include <asm/iommu.h>
#include <asm/swiotlb.h>
#include <asm/dma.h>
#include <asm/xen/swiotlb-xen.h>
#include <asm/iommu_table.h>
int swiotlb __read_mostly;
static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
void *vaddr;
vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags,
attrs);
if (vaddr)
return vaddr;
return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
}
static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
}
static struct dma_map_ops swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error,
.alloc = x86_swiotlb_alloc_coherent,
.free = x86_swiotlb_free_coherent,
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
.sync_single_for_device = swiotlb_sync_single_for_device,
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = swiotlb_sync_sg_for_device,
.map_sg = swiotlb_map_sg_attrs,
.unmap_sg = swiotlb_unmap_sg_attrs,
.map_page = swiotlb_map_page,
.unmap_page = swiotlb_unmap_page,
.dma_supported = NULL,
};
/*
* pci_swiotlb_detect_override - set swiotlb to 1 if necessary
*
* This returns non-zero if we are forced to use swiotlb (by the boot
* option).
*/
int __init pci_swiotlb_detect_override(void)
{
int use_swiotlb = swiotlb | swiotlb_force;
if (swiotlb_force)
swiotlb = 1;
return use_swiotlb;
}
IOMMU_INIT_FINISH(pci_swiotlb_detect_override,
pci_xen_swiotlb_detect,
pci_swiotlb_init,
pci_swiotlb_late_init);
/*
* if 4GB or more detected (and iommu=off not set) return 1
* and set swiotlb to 1.
*/
int __init pci_swiotlb_detect_4gb(void)
{
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
#ifdef CONFIG_X86_64
if (!no_iommu && max_pfn > MAX_DMA32_PFN)
swiotlb = 1;
#endif
return swiotlb;
}
IOMMU_INIT(pci_swiotlb_detect_4gb,
pci_swiotlb_detect_override,
pci_swiotlb_init,
pci_swiotlb_late_init);
void __init pci_swiotlb_init(void)
{
if (swiotlb) {
swiotlb_init(0);
dma_ops = &swiotlb_dma_ops;
}
}
void __init pci_swiotlb_late_init(void)
{
/* An IOMMU turned us off. */
if (!swiotlb)
swiotlb_free();
else {
printk(KERN_INFO "PCI-DMA: "
"Using software bounce buffering for IO (SWIOTLB)\n");
swiotlb_print_info();
}
}
| gpl-2.0 |
qianxiaoxi/NX507J_Lollipop_kernel | sound/pci/asihpi/hpimsgx.c | 8329 | 21940 | /******************************************************************************
AudioScience HPI driver
Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com>
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation;
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Extended Message Function With Response Caching
(C) Copyright AudioScience Inc. 2002
*****************************************************************************/
#define SOURCEFILE_NAME "hpimsgx.c"
#include "hpi_internal.h"
#include "hpi_version.h"
#include "hpimsginit.h"
#include "hpicmn.h"
#include "hpimsgx.h"
#include "hpidebug.h"
static struct pci_device_id asihpi_pci_tbl[] = {
#include "hpipcida.h"
};
static struct hpios_spinlock msgx_lock;
static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
*pci_info)
{
int i;
for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
&& asihpi_pci_tbl[i].vendor !=
pci_info->pci_dev->vendor)
continue;
if (asihpi_pci_tbl[i].device != PCI_ANY_ID
&& asihpi_pci_tbl[i].device !=
pci_info->pci_dev->device)
continue;
if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
&& asihpi_pci_tbl[i].subvendor !=
pci_info->pci_dev->subsystem_vendor)
continue;
if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
&& asihpi_pci_tbl[i].subdevice !=
pci_info->pci_dev->subsystem_device)
continue;
/* HPI_DEBUG_LOG(DEBUG, " %x,%lx\n", i,
asihpi_pci_tbl[i].driver_data); */
return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
}
return NULL;
}
static inline void hw_entry_point(struct hpi_message *phm,
struct hpi_response *phr)
{
if ((phm->adapter_index < HPI_MAX_ADAPTERS)
&& hpi_entry_points[phm->adapter_index])
hpi_entry_points[phm->adapter_index] (phm, phr);
else
hpi_init_response(phr, phm->object, phm->function,
HPI_ERROR_PROCESSING_MESSAGE);
}
static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
void *h_owner);
static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
void *h_owner);
static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
void *h_owner);
static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
void *h_owner);
static void HPIMSGX__reset(u16 adapter_index);
static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
#ifndef DISABLE_PRAGMA_PACK1
#pragma pack(push, 1)
#endif
struct hpi_subsys_response {
struct hpi_response_header h;
struct hpi_subsys_res s;
};
struct hpi_adapter_response {
struct hpi_response_header h;
struct hpi_adapter_res a;
};
struct hpi_mixer_response {
struct hpi_response_header h;
struct hpi_mixer_res m;
};
struct hpi_stream_response {
struct hpi_response_header h;
struct hpi_stream_res d;
};
struct adapter_info {
u16 type;
u16 num_instreams;
u16 num_outstreams;
};
struct asi_open_state {
int open_flag;
void *h_owner;
};
#ifndef DISABLE_PRAGMA_PACK1
#pragma pack(pop)
#endif
/* Globals */
static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
static struct hpi_stream_response
rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
static struct hpi_stream_response
rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
/* use these to keep track of opens from user mode apps/DLLs */
static struct asi_open_state
outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
static struct asi_open_state
instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
void *h_owner)
{
if (phm->adapter_index != HPI_ADAPTER_INDEX_INVALID)
HPI_DEBUG_LOG(WARNING,
"suspicious adapter index %d in subsys message 0x%x.\n",
phm->adapter_index, phm->function);
switch (phm->function) {
case HPI_SUBSYS_GET_VERSION:
hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
HPI_SUBSYS_GET_VERSION, 0);
phr->u.s.version = HPI_VER >> 8; /* return major.minor */
phr->u.s.data = HPI_VER; /* return major.minor.release */
break;
case HPI_SUBSYS_OPEN:
/*do not propagate the message down the chain */
hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0);
break;
case HPI_SUBSYS_CLOSE:
/*do not propagate the message down the chain */
hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE,
0);
HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
break;
case HPI_SUBSYS_DRIVER_LOAD:
/* Initialize this module's internal state */
hpios_msgxlock_init(&msgx_lock);
memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
/* Init subsys_findadapters response to no-adapters */
HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
HPI_SUBSYS_DRIVER_LOAD, 0);
/* individual HPIs dont implement driver load */
HPI_COMMON(phm, phr);
break;
case HPI_SUBSYS_DRIVER_UNLOAD:
HPI_COMMON(phm, phr);
HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
HPI_SUBSYS_DRIVER_UNLOAD, 0);
return;
case HPI_SUBSYS_GET_NUM_ADAPTERS:
case HPI_SUBSYS_GET_ADAPTER:
HPI_COMMON(phm, phr);
break;
case HPI_SUBSYS_CREATE_ADAPTER:
HPIMSGX__init(phm, phr);
break;
default:
/* Must explicitly handle every subsys message in this switch */
hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function,
HPI_ERROR_INVALID_FUNC);
break;
}
}
static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
void *h_owner)
{
switch (phm->function) {
case HPI_ADAPTER_OPEN:
adapter_open(phm, phr);
break;
case HPI_ADAPTER_CLOSE:
adapter_close(phm, phr);
break;
case HPI_ADAPTER_DELETE:
HPIMSGX__cleanup(phm->adapter_index, h_owner);
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
HPI_ADAPTER_CLOSE);
hm.adapter_index = phm->adapter_index;
hw_entry_point(&hm, &hr);
}
hw_entry_point(phm, phr);
break;
default:
hw_entry_point(phm, phr);
break;
}
}
static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
{
switch (phm->function) {
case HPI_MIXER_OPEN:
mixer_open(phm, phr);
break;
case HPI_MIXER_CLOSE:
mixer_close(phm, phr);
break;
default:
hw_entry_point(phm, phr);
break;
}
}
static void outstream_message(struct hpi_message *phm,
struct hpi_response *phr, void *h_owner)
{
if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function,
HPI_ERROR_INVALID_OBJ_INDEX);
return;
}
switch (phm->function) {
case HPI_OSTREAM_OPEN:
outstream_open(phm, phr, h_owner);
break;
case HPI_OSTREAM_CLOSE:
outstream_close(phm, phr, h_owner);
break;
default:
hw_entry_point(phm, phr);
break;
}
}
static void instream_message(struct hpi_message *phm,
struct hpi_response *phr, void *h_owner)
{
if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function,
HPI_ERROR_INVALID_OBJ_INDEX);
return;
}
switch (phm->function) {
case HPI_ISTREAM_OPEN:
instream_open(phm, phr, h_owner);
break;
case HPI_ISTREAM_CLOSE:
instream_close(phm, phr, h_owner);
break;
default:
hw_entry_point(phm, phr);
break;
}
}
/* NOTE: HPI_Message() must be defined in the driver as a wrapper for
* HPI_MessageEx so that functions in hpifunc.c compile.
*/
void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
void *h_owner)
{
HPI_DEBUG_MESSAGE(DEBUG, phm);
if (phm->type != HPI_TYPE_REQUEST) {
hpi_init_response(phr, phm->object, phm->function,
HPI_ERROR_INVALID_TYPE);
return;
}
if (phm->adapter_index >= HPI_MAX_ADAPTERS
&& phm->adapter_index != HPIMSGX_ALLADAPTERS) {
hpi_init_response(phr, phm->object, phm->function,
HPI_ERROR_BAD_ADAPTER_NUMBER);
return;
}
switch (phm->object) {
case HPI_OBJ_SUBSYSTEM:
subsys_message(phm, phr, h_owner);
break;
case HPI_OBJ_ADAPTER:
adapter_message(phm, phr, h_owner);
break;
case HPI_OBJ_MIXER:
mixer_message(phm, phr);
break;
case HPI_OBJ_OSTREAM:
outstream_message(phm, phr, h_owner);
break;
case HPI_OBJ_ISTREAM:
instream_message(phm, phr, h_owner);
break;
default:
hw_entry_point(phm, phr);
break;
}
HPI_DEBUG_RESPONSE(phr);
}
static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
{
HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
sizeof(rESP_HPI_ADAPTER_OPEN[0]));
}
static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
{
HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0);
}
static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
{
memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
sizeof(rESP_HPI_MIXER_OPEN[0]));
}
static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
{
hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0);
}
static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
void *h_owner)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0);
hpios_msgxlock_lock(&msgx_lock);
if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
[phm->obj_index].h.error)
memcpy(phr,
&rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
obj_index],
sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
else {
instream_user_open[phm->adapter_index][phm->
obj_index].open_flag = 1;
hpios_msgxlock_unlock(&msgx_lock);
/* issue a reset */
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_RESET);
hm.adapter_index = phm->adapter_index;
hm.obj_index = phm->obj_index;
hw_entry_point(&hm, &hr);
hpios_msgxlock_lock(&msgx_lock);
if (hr.error) {
instream_user_open[phm->adapter_index][phm->
obj_index].open_flag = 0;
phr->error = hr.error;
} else {
instream_user_open[phm->adapter_index][phm->
obj_index].open_flag = 1;
instream_user_open[phm->adapter_index][phm->
obj_index].h_owner = h_owner;
memcpy(phr,
&rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
[phm->obj_index],
sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
}
}
hpios_msgxlock_unlock(&msgx_lock);
}
static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
void *h_owner)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0);
hpios_msgxlock_lock(&msgx_lock);
if (h_owner ==
instream_user_open[phm->adapter_index][phm->
obj_index].h_owner) {
/* HPI_DEBUG_LOG(INFO,"closing adapter %d "
"instream %d owned by %p\n",
phm->wAdapterIndex, phm->wObjIndex, hOwner); */
instream_user_open[phm->adapter_index][phm->
obj_index].h_owner = NULL;
hpios_msgxlock_unlock(&msgx_lock);
/* issue a reset */
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_RESET);
hm.adapter_index = phm->adapter_index;
hm.obj_index = phm->obj_index;
hw_entry_point(&hm, &hr);
hpios_msgxlock_lock(&msgx_lock);
if (hr.error) {
instream_user_open[phm->adapter_index][phm->
obj_index].h_owner = h_owner;
phr->error = hr.error;
} else {
instream_user_open[phm->adapter_index][phm->
obj_index].open_flag = 0;
instream_user_open[phm->adapter_index][phm->
obj_index].h_owner = NULL;
}
} else {
HPI_DEBUG_LOG(WARNING,
"%p trying to close %d instream %d owned by %p\n",
h_owner, phm->adapter_index, phm->obj_index,
instream_user_open[phm->adapter_index][phm->
obj_index].h_owner);
phr->error = HPI_ERROR_OBJ_NOT_OPEN;
}
hpios_msgxlock_unlock(&msgx_lock);
}
static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
void *h_owner)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0);
hpios_msgxlock_lock(&msgx_lock);
if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
[phm->obj_index].h.error)
memcpy(phr,
&rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
obj_index],
sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
else {
outstream_user_open[phm->adapter_index][phm->
obj_index].open_flag = 1;
hpios_msgxlock_unlock(&msgx_lock);
/* issue a reset */
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_RESET);
hm.adapter_index = phm->adapter_index;
hm.obj_index = phm->obj_index;
hw_entry_point(&hm, &hr);
hpios_msgxlock_lock(&msgx_lock);
if (hr.error) {
outstream_user_open[phm->adapter_index][phm->
obj_index].open_flag = 0;
phr->error = hr.error;
} else {
outstream_user_open[phm->adapter_index][phm->
obj_index].open_flag = 1;
outstream_user_open[phm->adapter_index][phm->
obj_index].h_owner = h_owner;
memcpy(phr,
&rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
[phm->obj_index],
sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
}
}
hpios_msgxlock_unlock(&msgx_lock);
}
static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
void *h_owner)
{
struct hpi_message hm;
struct hpi_response hr;
hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0);
hpios_msgxlock_lock(&msgx_lock);
if (h_owner ==
outstream_user_open[phm->adapter_index][phm->
obj_index].h_owner) {
/* HPI_DEBUG_LOG(INFO,"closing adapter %d "
"outstream %d owned by %p\n",
phm->wAdapterIndex, phm->wObjIndex, hOwner); */
outstream_user_open[phm->adapter_index][phm->
obj_index].h_owner = NULL;
hpios_msgxlock_unlock(&msgx_lock);
/* issue a reset */
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_RESET);
hm.adapter_index = phm->adapter_index;
hm.obj_index = phm->obj_index;
hw_entry_point(&hm, &hr);
hpios_msgxlock_lock(&msgx_lock);
if (hr.error) {
outstream_user_open[phm->adapter_index][phm->
obj_index].h_owner = h_owner;
phr->error = hr.error;
} else {
outstream_user_open[phm->adapter_index][phm->
obj_index].open_flag = 0;
outstream_user_open[phm->adapter_index][phm->
obj_index].h_owner = NULL;
}
} else {
HPI_DEBUG_LOG(WARNING,
"%p trying to close %d outstream %d owned by %p\n",
h_owner, phm->adapter_index, phm->obj_index,
outstream_user_open[phm->adapter_index][phm->
obj_index].h_owner);
phr->error = HPI_ERROR_OBJ_NOT_OPEN;
}
hpios_msgxlock_unlock(&msgx_lock);
}
static u16 adapter_prepare(u16 adapter)
{
struct hpi_message hm;
struct hpi_response hr;
/* Open the adapter and streams */
u16 i;
/* call to HPI_ADAPTER_OPEN */
hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
HPI_ADAPTER_OPEN);
hm.adapter_index = adapter;
hw_entry_point(&hm, &hr);
memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
sizeof(rESP_HPI_ADAPTER_OPEN[0]));
if (hr.error)
return hr.error;
/* call to HPI_ADAPTER_GET_INFO */
hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
HPI_ADAPTER_GET_INFO);
hm.adapter_index = adapter;
hw_entry_point(&hm, &hr);
if (hr.error)
return hr.error;
aDAPTER_INFO[adapter].num_outstreams = hr.u.ax.info.num_outstreams;
aDAPTER_INFO[adapter].num_instreams = hr.u.ax.info.num_instreams;
aDAPTER_INFO[adapter].type = hr.u.ax.info.adapter_type;
/* call to HPI_OSTREAM_OPEN */
for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_OPEN);
hm.adapter_index = adapter;
hm.obj_index = i;
hw_entry_point(&hm, &hr);
memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
outstream_user_open[adapter][i].open_flag = 0;
outstream_user_open[adapter][i].h_owner = NULL;
}
/* call to HPI_ISTREAM_OPEN */
for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_OPEN);
hm.adapter_index = adapter;
hm.obj_index = i;
hw_entry_point(&hm, &hr);
memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
instream_user_open[adapter][i].open_flag = 0;
instream_user_open[adapter][i].h_owner = NULL;
}
/* call to HPI_MIXER_OPEN */
hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
hm.adapter_index = adapter;
hw_entry_point(&hm, &hr);
memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
sizeof(rESP_HPI_MIXER_OPEN[0]));
return 0;
}
static void HPIMSGX__reset(u16 adapter_index)
{
int i;
u16 adapter;
struct hpi_response hr;
if (adapter_index == HPIMSGX_ALLADAPTERS) {
for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
hpi_init_response(&hr, HPI_OBJ_ADAPTER,
HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER);
memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN,
HPI_ERROR_INVALID_OBJ);
memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
sizeof(rESP_HPI_MIXER_OPEN[adapter]));
for (i = 0; i < HPI_MAX_STREAMS; i++) {
hpi_init_response(&hr, HPI_OBJ_OSTREAM,
HPI_OSTREAM_OPEN,
HPI_ERROR_INVALID_OBJ);
memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
&hr,
sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
[i]));
hpi_init_response(&hr, HPI_OBJ_ISTREAM,
HPI_ISTREAM_OPEN,
HPI_ERROR_INVALID_OBJ);
memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
&hr,
sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
[i]));
}
}
} else if (adapter_index < HPI_MAX_ADAPTERS) {
rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
HPI_ERROR_BAD_ADAPTER;
rESP_HPI_MIXER_OPEN[adapter_index].h.error =
HPI_ERROR_INVALID_OBJ;
for (i = 0; i < HPI_MAX_STREAMS; i++) {
rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
HPI_ERROR_INVALID_OBJ;
rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
HPI_ERROR_INVALID_OBJ;
}
}
}
static u16 HPIMSGX__init(struct hpi_message *phm,
/* HPI_SUBSYS_CREATE_ADAPTER structure with */
/* resource list or NULL=find all */
struct hpi_response *phr
/* response from HPI_ADAPTER_GET_INFO */
)
{
hpi_handler_func *entry_point_func;
struct hpi_response hr;
/* Init response here so we can pass in previous adapter list */
hpi_init_response(&hr, phm->object, phm->function,
HPI_ERROR_INVALID_OBJ);
entry_point_func =
hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
if (entry_point_func) {
HPI_DEBUG_MESSAGE(DEBUG, phm);
entry_point_func(phm, &hr);
} else {
phr->error = HPI_ERROR_PROCESSING_MESSAGE;
return phr->error;
}
if (hr.error == 0) {
/* the adapter was created successfully
save the mapping for future use */
hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
/* prepare adapter (pre-open streams etc.) */
HPI_DEBUG_LOG(DEBUG,
"HPI_SUBSYS_CREATE_ADAPTER successful,"
" preparing adapter\n");
adapter_prepare(hr.u.s.adapter_index);
}
memcpy(phr, &hr, hr.size);
return phr->error;
}
static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
{
int i, adapter, adapter_limit;
if (!h_owner)
return;
if (adapter_index == HPIMSGX_ALLADAPTERS) {
adapter = 0;
adapter_limit = HPI_MAX_ADAPTERS;
} else {
adapter = adapter_index;
adapter_limit = adapter + 1;
}
for (; adapter < adapter_limit; adapter++) {
/* printk(KERN_INFO "Cleanup adapter #%d\n",wAdapter); */
for (i = 0; i < HPI_MAX_STREAMS; i++) {
if (h_owner ==
outstream_user_open[adapter][i].h_owner) {
struct hpi_message hm;
struct hpi_response hr;
HPI_DEBUG_LOG(DEBUG,
"Close adapter %d ostream %d\n",
adapter, i);
hpi_init_message_response(&hm, &hr,
HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET);
hm.adapter_index = (u16)adapter;
hm.obj_index = (u16)i;
hw_entry_point(&hm, &hr);
hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
hw_entry_point(&hm, &hr);
hm.function = HPI_OSTREAM_GROUP_RESET;
hw_entry_point(&hm, &hr);
outstream_user_open[adapter][i].open_flag = 0;
outstream_user_open[adapter][i].h_owner =
NULL;
}
if (h_owner == instream_user_open[adapter][i].h_owner) {
struct hpi_message hm;
struct hpi_response hr;
HPI_DEBUG_LOG(DEBUG,
"Close adapter %d istream %d\n",
adapter, i);
hpi_init_message_response(&hm, &hr,
HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET);
hm.adapter_index = (u16)adapter;
hm.obj_index = (u16)i;
hw_entry_point(&hm, &hr);
hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
hw_entry_point(&hm, &hr);
hm.function = HPI_ISTREAM_GROUP_RESET;
hw_entry_point(&hm, &hr);
instream_user_open[adapter][i].open_flag = 0;
instream_user_open[adapter][i].h_owner = NULL;
}
}
}
}
| gpl-2.0 |
adri360/DarkNoteIII-Kernel | drivers/net/can/sja1000/tscan1.c | 8329 | 5841 | /*
* tscan1.c: driver for Technologic Systems TS-CAN1 PC104 boards
*
* Copyright 2010 Andre B. Oliveira
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* References:
* - Getting started with TS-CAN1, Technologic Systems, Jun 2009
* http://www.embeddedarm.com/documentation/ts-can1-manual.pdf
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/isa.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include "sja1000.h"
MODULE_DESCRIPTION("Driver for Technologic Systems TS-CAN1 PC104 boards");
MODULE_AUTHOR("Andre B. Oliveira <anbadeol@gmail.com>");
MODULE_LICENSE("GPL");
/* Maximum number of boards (one in each JP1:JP2 setting of IO address) */
#define TSCAN1_MAXDEV 4
/* PLD registers address offsets */
#define TSCAN1_ID1 0
#define TSCAN1_ID2 1
#define TSCAN1_VERSION 2
#define TSCAN1_LED 3
#define TSCAN1_PAGE 4
#define TSCAN1_MODE 5
#define TSCAN1_JUMPERS 6
/* PLD board identifier registers magic values */
#define TSCAN1_ID1_VALUE 0xf6
#define TSCAN1_ID2_VALUE 0xb9
/* PLD mode register SJA1000 IO enable bit */
#define TSCAN1_MODE_ENABLE 0x40
/* PLD jumpers register bits */
#define TSCAN1_JP4 0x10
#define TSCAN1_JP5 0x20
/* PLD IO base addresses start */
#define TSCAN1_PLD_ADDRESS 0x150
/* PLD register space size */
#define TSCAN1_PLD_SIZE 8
/* SJA1000 register space size */
#define TSCAN1_SJA1000_SIZE 32
/* SJA1000 crystal frequency (16MHz) */
#define TSCAN1_SJA1000_XTAL 16000000
/* SJA1000 IO base addresses */
static const unsigned short tscan1_sja1000_addresses[] __devinitconst = {
0x100, 0x120, 0x180, 0x1a0, 0x200, 0x240, 0x280, 0x320
};
/* Read SJA1000 register */
static u8 tscan1_read(const struct sja1000_priv *priv, int reg)
{
return inb((unsigned long)priv->reg_base + reg);
}
/* Write SJA1000 register */
static void tscan1_write(const struct sja1000_priv *priv, int reg, u8 val)
{
outb(val, (unsigned long)priv->reg_base + reg);
}
/* Probe for a TS-CAN1 board with JP2:JP1 jumper setting ID */
static int __devinit tscan1_probe(struct device *dev, unsigned id)
{
struct net_device *netdev;
struct sja1000_priv *priv;
unsigned long pld_base, sja1000_base;
int irq, i;
pld_base = TSCAN1_PLD_ADDRESS + id * TSCAN1_PLD_SIZE;
if (!request_region(pld_base, TSCAN1_PLD_SIZE, dev_name(dev)))
return -EBUSY;
if (inb(pld_base + TSCAN1_ID1) != TSCAN1_ID1_VALUE ||
inb(pld_base + TSCAN1_ID2) != TSCAN1_ID2_VALUE) {
release_region(pld_base, TSCAN1_PLD_SIZE);
return -ENODEV;
}
switch (inb(pld_base + TSCAN1_JUMPERS) & (TSCAN1_JP4 | TSCAN1_JP5)) {
case TSCAN1_JP4:
irq = 6;
break;
case TSCAN1_JP5:
irq = 7;
break;
case TSCAN1_JP4 | TSCAN1_JP5:
irq = 5;
break;
default:
dev_err(dev, "invalid JP4:JP5 setting (no IRQ)\n");
release_region(pld_base, TSCAN1_PLD_SIZE);
return -EINVAL;
}
netdev = alloc_sja1000dev(0);
if (!netdev) {
release_region(pld_base, TSCAN1_PLD_SIZE);
return -ENOMEM;
}
dev_set_drvdata(dev, netdev);
SET_NETDEV_DEV(netdev, dev);
netdev->base_addr = pld_base;
netdev->irq = irq;
priv = netdev_priv(netdev);
priv->read_reg = tscan1_read;
priv->write_reg = tscan1_write;
priv->can.clock.freq = TSCAN1_SJA1000_XTAL / 2;
priv->cdr = CDR_CBP | CDR_CLK_OFF;
priv->ocr = OCR_TX0_PUSHPULL;
/* Select the first SJA1000 IO address that is free and that works */
for (i = 0; i < ARRAY_SIZE(tscan1_sja1000_addresses); i++) {
sja1000_base = tscan1_sja1000_addresses[i];
if (!request_region(sja1000_base, TSCAN1_SJA1000_SIZE,
dev_name(dev)))
continue;
/* Set SJA1000 IO base address and enable it */
outb(TSCAN1_MODE_ENABLE | i, pld_base + TSCAN1_MODE);
priv->reg_base = (void __iomem *)sja1000_base;
if (!register_sja1000dev(netdev)) {
/* SJA1000 probe succeeded; turn LED off and return */
outb(0, pld_base + TSCAN1_LED);
netdev_info(netdev, "TS-CAN1 at 0x%lx 0x%lx irq %d\n",
pld_base, sja1000_base, irq);
return 0;
}
/* SJA1000 probe failed; release and try next address */
outb(0, pld_base + TSCAN1_MODE);
release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
}
dev_err(dev, "failed to assign SJA1000 IO address\n");
dev_set_drvdata(dev, NULL);
free_sja1000dev(netdev);
release_region(pld_base, TSCAN1_PLD_SIZE);
return -ENXIO;
}
static int __devexit tscan1_remove(struct device *dev, unsigned id /*unused*/)
{
struct net_device *netdev;
struct sja1000_priv *priv;
unsigned long pld_base, sja1000_base;
netdev = dev_get_drvdata(dev);
unregister_sja1000dev(netdev);
dev_set_drvdata(dev, NULL);
priv = netdev_priv(netdev);
pld_base = netdev->base_addr;
sja1000_base = (unsigned long)priv->reg_base;
outb(0, pld_base + TSCAN1_MODE); /* disable SJA1000 IO space */
release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
release_region(pld_base, TSCAN1_PLD_SIZE);
free_sja1000dev(netdev);
return 0;
}
static struct isa_driver tscan1_isa_driver = {
.probe = tscan1_probe,
.remove = __devexit_p(tscan1_remove),
.driver = {
.name = "tscan1",
},
};
static int __init tscan1_init(void)
{
return isa_register_driver(&tscan1_isa_driver, TSCAN1_MAXDEV);
}
module_init(tscan1_init);
static void __exit tscan1_exit(void)
{
isa_unregister_driver(&tscan1_isa_driver);
}
module_exit(tscan1_exit);
| gpl-2.0 |
MassStash/htc_m8_kernel_GPE_6.0 | net/appletalk/dev.c | 12681 | 1232 | /*
* Moved here from drivers/net/net_init.c, which is:
* Written 1993,1994,1995 by Donald Becker.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ltalk.h>
static void ltalk_setup(struct net_device *dev)
{
/* Fill in the fields of the device structure with localtalk-generic values. */
dev->type = ARPHRD_LOCALTLK;
dev->hard_header_len = LTALK_HLEN;
dev->mtu = LTALK_MTU;
dev->addr_len = LTALK_ALEN;
dev->tx_queue_len = 10;
dev->broadcast[0] = 0xFF;
dev->flags = IFF_BROADCAST|IFF_MULTICAST|IFF_NOARP;
}
/**
* alloc_ltalkdev - Allocates and sets up an localtalk device
* @sizeof_priv: Size of additional driver-private structure to be allocated
* for this localtalk device
*
* Fill in the fields of the device structure with localtalk-generic
* values. Basically does everything except registering the device.
*
* Constructs a new net device, complete with a private data area of
* size @sizeof_priv. A 32-byte (not bit) alignment is enforced for
* this private data area.
*/
struct net_device *alloc_ltalkdev(int sizeof_priv)
{
return alloc_netdev(sizeof_priv, "lt%d", ltalk_setup);
}
EXPORT_SYMBOL(alloc_ltalkdev);
| gpl-2.0 |
PatrikKT/useless | arch/powerpc/platforms/powermac/udbg_scc.c | 13193 | 4202 | /*
* udbg for zilog scc ports as found on Apple PowerMacs
*
* Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <asm/udbg.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/pmac_feature.h>
extern u8 real_readb(volatile u8 __iomem *addr);
extern void real_writeb(u8 data, volatile u8 __iomem *addr);
#define SCC_TXRDY 4
#define SCC_RXRDY 1
static volatile u8 __iomem *sccc;
static volatile u8 __iomem *sccd;
static void udbg_scc_putc(char c)
{
if (sccc) {
while ((in_8(sccc) & SCC_TXRDY) == 0)
;
out_8(sccd, c);
if (c == '\n')
udbg_scc_putc('\r');
}
}
static int udbg_scc_getc_poll(void)
{
if (sccc) {
if ((in_8(sccc) & SCC_RXRDY) != 0)
return in_8(sccd);
else
return -1;
}
return -1;
}
static int udbg_scc_getc(void)
{
if (sccc) {
while ((in_8(sccc) & SCC_RXRDY) == 0)
;
return in_8(sccd);
}
return -1;
}
static unsigned char scc_inittab[] = {
13, 0, /* set baud rate divisor */
12, 0,
14, 1, /* baud rate gen enable, src=rtxc */
11, 0x50, /* clocks = br gen */
5, 0xea, /* tx 8 bits, assert DTR & RTS */
4, 0x46, /* x16 clock, 1 stop */
3, 0xc1, /* rx enable, 8 bits */
};
void udbg_scc_init(int force_scc)
{
const u32 *reg;
unsigned long addr;
struct device_node *stdout = NULL, *escc = NULL, *macio = NULL;
struct device_node *ch, *ch_def = NULL, *ch_a = NULL;
const char *path;
int i, x;
escc = of_find_node_by_name(NULL, "escc");
if (escc == NULL)
goto bail;
macio = of_get_parent(escc);
if (macio == NULL)
goto bail;
path = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (path != NULL)
stdout = of_find_node_by_path(path);
for (ch = NULL; (ch = of_get_next_child(escc, ch)) != NULL;) {
if (ch == stdout)
ch_def = of_node_get(ch);
if (strcmp(ch->name, "ch-a") == 0)
ch_a = of_node_get(ch);
}
if (ch_def == NULL && !force_scc)
goto bail;
ch = ch_def ? ch_def : ch_a;
/* Get address within mac-io ASIC */
reg = of_get_property(escc, "reg", NULL);
if (reg == NULL)
goto bail;
addr = reg[0];
/* Get address of mac-io PCI itself */
reg = of_get_property(macio, "assigned-addresses", NULL);
if (reg == NULL)
goto bail;
addr += reg[2];
/* Lock the serial port */
pmac_call_feature(PMAC_FTR_SCC_ENABLE, ch,
PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1);
if (ch == ch_a)
addr += 0x20;
sccc = ioremap(addr & PAGE_MASK, PAGE_SIZE) ;
sccc += addr & ~PAGE_MASK;
sccd = sccc + 0x10;
mb();
for (i = 20000; i != 0; --i)
x = in_8(sccc);
out_8(sccc, 0x09); /* reset A or B side */
out_8(sccc, 0xc0);
/* If SCC was the OF output port, read the BRG value, else
* Setup for 38400 or 57600 8N1 depending on the machine
*/
if (ch_def != NULL) {
out_8(sccc, 13);
scc_inittab[1] = in_8(sccc);
out_8(sccc, 12);
scc_inittab[3] = in_8(sccc);
} else if (of_machine_is_compatible("RackMac1,1")
|| of_machine_is_compatible("RackMac1,2")
|| of_machine_is_compatible("MacRISC4")) {
/* Xserves and G5s default to 57600 */
scc_inittab[1] = 0;
scc_inittab[3] = 0;
} else {
/* Others default to 38400 */
scc_inittab[1] = 0;
scc_inittab[3] = 1;
}
for (i = 0; i < sizeof(scc_inittab); ++i)
out_8(sccc, scc_inittab[i]);
udbg_putc = udbg_scc_putc;
udbg_getc = udbg_scc_getc;
udbg_getc_poll = udbg_scc_getc_poll;
udbg_puts("Hello World !\n");
bail:
of_node_put(macio);
of_node_put(escc);
of_node_put(stdout);
of_node_put(ch_def);
of_node_put(ch_a);
}
#ifdef CONFIG_PPC64
static void udbg_real_scc_putc(char c)
{
while ((real_readb(sccc) & SCC_TXRDY) == 0)
;
real_writeb(c, sccd);
if (c == '\n')
udbg_real_scc_putc('\r');
}
void __init udbg_init_pmac_realmode(void)
{
sccc = (volatile u8 __iomem *)0x80013020ul;
sccd = (volatile u8 __iomem *)0x80013030ul;
udbg_putc = udbg_real_scc_putc;
udbg_getc = NULL;
udbg_getc_poll = NULL;
}
#endif /* CONFIG_PPC64 */
| gpl-2.0 |
futuretekinc/cortina-kernel-2.6.36 | arch/mips/kernel/signal.c | 138 | 18490 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994 - 2000 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/cache.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/personality.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/compiler.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/tracehook.h>
#include <asm/abi.h>
#include <asm/asm.h>
#include <linux/bitops.h>
#include <asm/cacheflush.h>
#include <asm/fpu.h>
#include <asm/sim.h>
#include <asm/ucontext.h>
#include <asm/cpu-features.h>
#include <asm/war.h>
#include <asm/vdso.h>
#include "signal-common.h"
static int (*save_fp_context)(struct sigcontext __user *sc);
static int (*restore_fp_context)(struct sigcontext __user *sc);
extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
struct sigframe {
u32 sf_ass[4]; /* argument save space for o32 */
u32 sf_pad[2]; /* Was: signal trampoline */
struct sigcontext sf_sc;
sigset_t sf_mask;
};
struct rt_sigframe {
u32 rs_ass[4]; /* argument save space for o32 */
u32 rs_pad[2]; /* Was: signal trampoline */
struct siginfo rs_info;
struct ucontext rs_uc;
};
/*
* Helper routines
*/
static int protected_save_fp_context(struct sigcontext __user *sc)
{
int err;
while (1) {
lock_fpu_owner();
own_fpu_inatomic(1);
err = save_fp_context(sc); /* this might fail */
unlock_fpu_owner();
if (likely(!err))
break;
/* touch the sigcontext and try again */
err = __put_user(0, &sc->sc_fpregs[0]) |
__put_user(0, &sc->sc_fpregs[31]) |
__put_user(0, &sc->sc_fpc_csr);
if (err)
break; /* really bad sigcontext */
}
return err;
}
static int protected_restore_fp_context(struct sigcontext __user *sc)
{
int err, tmp;
while (1) {
lock_fpu_owner();
own_fpu_inatomic(0);
err = restore_fp_context(sc); /* this might fail */
unlock_fpu_owner();
if (likely(!err))
break;
/* touch the sigcontext and try again */
err = __get_user(tmp, &sc->sc_fpregs[0]) |
__get_user(tmp, &sc->sc_fpregs[31]) |
__get_user(tmp, &sc->sc_fpc_csr);
if (err)
break; /* really bad sigcontext */
}
return err;
}
int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
int i;
unsigned int used_math;
err |= __put_user(regs->cp0_epc, &sc->sc_pc);
err |= __put_user(0, &sc->sc_regs[0]);
for (i = 1; i < 32; i++)
err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
#ifdef CONFIG_CPU_HAS_SMARTMIPS
err |= __put_user(regs->acx, &sc->sc_acx);
#endif
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
err |= __put_user(mfhi1(), &sc->sc_hi1);
err |= __put_user(mflo1(), &sc->sc_lo1);
err |= __put_user(mfhi2(), &sc->sc_hi2);
err |= __put_user(mflo2(), &sc->sc_lo2);
err |= __put_user(mfhi3(), &sc->sc_hi3);
err |= __put_user(mflo3(), &sc->sc_lo3);
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
}
used_math = !!used_math();
err |= __put_user(used_math, &sc->sc_used_math);
if (used_math) {
/*
* Save FPU state to signal context. Signal handler
* will "inherit" current FPU state.
*/
err |= protected_save_fp_context(sc);
}
return err;
}
int fpcsr_pending(unsigned int __user *fpcsr)
{
int err, sig = 0;
unsigned int csr, enabled;
err = __get_user(csr, fpcsr);
enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5);
/*
* If the signal handler set some FPU exceptions, clear it and
* send SIGFPE.
*/
if (csr & enabled) {
csr &= ~enabled;
err |= __put_user(csr, fpcsr);
sig = SIGFPE;
}
return err ?: sig;
}
static int
check_and_restore_fp_context(struct sigcontext __user *sc)
{
int err, sig;
err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0)
err = 0;
err |= protected_restore_fp_context(sc);
return err ?: sig;
}
int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
unsigned int used_math;
unsigned long treg;
int err = 0;
int i;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
#ifdef CONFIG_CPU_HAS_SMARTMIPS
err |= __get_user(regs->acx, &sc->sc_acx);
#endif
err |= __get_user(regs->hi, &sc->sc_mdhi);
err |= __get_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
}
for (i = 1; i < 32; i++)
err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
err |= __get_user(used_math, &sc->sc_used_math);
conditional_used_math(used_math);
if (used_math) {
/* restore fpu context if we have used it before */
if (!err)
err = check_and_restore_fp_context(sc);
} else {
/* signal handler may have used FPU. Give it up. */
lose_fpu(0);
}
return err;
}
void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
size_t frame_size)
{
unsigned long sp;
/* Default to using normal stack */
sp = regs->regs[29];
/*
* FPU emulator may have it's own trampoline active just
* above the user stack, 16-bytes before the next lowest
* 16 byte boundary. Try to avoid trashing it.
*/
sp -= 32;
/* This is the X/Open sanctioned signal stack switching. */
if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK));
}
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
#ifdef CONFIG_TRAD_SIGNALS
asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
sigset_t newset;
sigset_t __user *uset;
uset = (sigset_t __user *) regs.regs[4];
if (copy_from_user(&newset, uset, sizeof(sigset_t)))
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(¤t->sighand->siglock);
current->saved_sigmask = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
current->state = TASK_INTERRUPTIBLE;
schedule();
set_thread_flag(TIF_RESTORE_SIGMASK);
return -ERESTARTNOHAND;
}
#endif
asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
sigset_t newset;
sigset_t __user *unewset;
size_t sigsetsize;
/* XXX Don't preclude handling different sized sigset_t's. */
sigsetsize = regs.regs[5];
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
unewset = (sigset_t __user *) regs.regs[4];
if (copy_from_user(&newset, unewset, sizeof(newset)))
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(¤t->sighand->siglock);
current->saved_sigmask = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
current->state = TASK_INTERRUPTIBLE;
schedule();
set_thread_flag(TIF_RESTORE_SIGMASK);
return -ERESTARTNOHAND;
}
#ifdef CONFIG_TRAD_SIGNALS
SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
struct sigaction __user *, oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
int err = 0;
if (act) {
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)))
return -EFAULT;
err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler);
err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
err |= __get_user(mask, &act->sa_mask.sig[0]);
if (err)
return -EFAULT;
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
return -EFAULT;
err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
err |= __put_user(0, &oact->sa_mask.sig[1]);
err |= __put_user(0, &oact->sa_mask.sig[2]);
err |= __put_user(0, &oact->sa_mask.sig[3]);
if (err)
return -EFAULT;
}
return ret;
}
#endif
asmlinkage int sys_sigaltstack(nabi_no_regargs struct pt_regs regs)
{
const stack_t __user *uss = (const stack_t __user *) regs.regs[4];
stack_t __user *uoss = (stack_t __user *) regs.regs[5];
unsigned long usp = regs.regs[29];
return do_sigaltstack(uss, uoss, usp);
}
#ifdef CONFIG_TRAD_SIGNALS
asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct sigframe __user *frame;
sigset_t blocked;
int sig;
frame = (struct sigframe __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
goto badframe;
sigdelsetmask(&blocked, ~_BLOCKABLE);
spin_lock_irq(¤t->sighand->siglock);
current->blocked = blocked;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
sig = restore_sigcontext(®s, &frame->sf_sc);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig, current);
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
:/* no outputs */
:"r" (®s));
/* Unreached */
badframe:
force_sig(SIGSEGV, current);
}
#endif /* CONFIG_TRAD_SIGNALS */
asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe __user *frame;
sigset_t set;
int sig;
frame = (struct rt_sigframe __user *) regs.regs[29];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(¤t->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig, current);
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
do_sigaltstack(&frame->rs_uc.uc_stack, NULL, regs.regs[29]);
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
:/* no outputs */
:"r" (®s));
/* Unreached */
badframe:
force_sig(SIGSEGV, current);
}
#ifdef CONFIG_TRAD_SIGNALS
static int setup_frame(void *sig_return, struct k_sigaction *ka,
struct pt_regs *regs, int signr, sigset_t *set)
{
struct sigframe __user *frame;
int err = 0;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
err |= setup_sigcontext(regs, &frame->sf_sc);
err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
if (err)
goto give_sigsegv;
/*
* Arguments to signal handler:
*
* a0 = signal number
* a1 = 0 (should be cause)
* a2 = pointer to struct sigcontext
*
* $25 and c0_epc point to the signal handler, $29 points to the
* struct sigframe.
*/
regs->regs[ 4] = signr;
regs->regs[ 5] = 0;
regs->regs[ 6] = (unsigned long) &frame->sf_sc;
regs->regs[29] = (unsigned long) frame;
regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
return 0;
give_sigsegv:
force_sigsegv(signr, current);
return -EFAULT;
}
#endif
static int setup_rt_frame(void *sig_return, struct k_sigaction *ka,
struct pt_regs *regs, int signr, sigset_t *set,
siginfo_t *info)
{
struct rt_sigframe __user *frame;
int err = 0;
frame = get_sigframe(ka, regs, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
/* Create siginfo. */
err |= copy_siginfo_to_user(&frame->rs_info, info);
/* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(NULL, &frame->rs_uc.uc_link);
err |= __put_user((void __user *)current->sas_ss_sp,
&frame->rs_uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->regs[29]),
&frame->rs_uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size,
&frame->rs_uc.uc_stack.ss_size);
err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
if (err)
goto give_sigsegv;
/*
* Arguments to signal handler:
*
* a0 = signal number
* a1 = 0 (should be cause)
* a2 = pointer to ucontext
*
* $25 and c0_epc point to the signal handler, $29 points to
* the struct rt_sigframe.
*/
regs->regs[ 4] = signr;
regs->regs[ 5] = (unsigned long) &frame->rs_info;
regs->regs[ 6] = (unsigned long) &frame->rs_uc;
regs->regs[29] = (unsigned long) frame;
regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
return 0;
give_sigsegv:
force_sigsegv(signr, current);
return -EFAULT;
}
struct mips_abi mips_abi = {
#ifdef CONFIG_TRAD_SIGNALS
.setup_frame = setup_frame,
.signal_return_offset = offsetof(struct mips_vdso, signal_trampoline),
#endif
.setup_rt_frame = setup_rt_frame,
.rt_signal_return_offset =
offsetof(struct mips_vdso, rt_signal_trampoline),
.restart = __NR_restart_syscall
};
static int handle_signal(unsigned long sig, siginfo_t *info,
struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs)
{
int ret;
struct mips_abi *abi = current->thread.abi;
void *vdso = current->mm->context.vdso;
if (regs->regs[0]) {
switch(regs->regs[2]) {
case ERESTART_RESTARTBLOCK:
case ERESTARTNOHAND:
regs->regs[2] = EINTR;
break;
case ERESTARTSYS:
if (!(ka->sa.sa_flags & SA_RESTART)) {
regs->regs[2] = EINTR;
break;
}
/* fallthrough */
case ERESTARTNOINTR:
regs->regs[7] = regs->regs[26];
regs->regs[2] = regs->regs[0];
regs->cp0_epc -= 4;
}
regs->regs[0] = 0; /* Don't deal with this again. */
}
if (sig_uses_siginfo(ka))
ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
ka, regs, sig, oldset, info);
else
ret = abi->setup_frame(vdso + abi->signal_return_offset,
ka, regs, sig, oldset);
if (ret)
return ret;
spin_lock_irq(¤t->sighand->siglock);
sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(¤t->blocked, sig);
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
return ret;
}
static void do_signal(struct pt_regs *regs)
{
struct k_sigaction ka;
sigset_t *oldset;
siginfo_t info;
int signr;
/*
* We want the common case to go fast, which is why we may in certain
* cases get here from kernel mode. Just return without doing anything
* if so.
*/
if (!user_mode(regs))
return;
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = ¤t->saved_sigmask;
else
oldset = ¤t->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
/* Whee! Actually deliver the signal. */
if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
/*
* A signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TIF_RESTORE_SIGMASK flag.
*/
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
return;
}
if (regs->regs[0]) {
if (regs->regs[2] == ERESTARTNOHAND ||
regs->regs[2] == ERESTARTSYS ||
regs->regs[2] == ERESTARTNOINTR) {
regs->regs[2] = regs->regs[0];
regs->regs[7] = regs->regs[26];
regs->cp0_epc -= 4;
}
if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
regs->regs[2] = current->thread.abi->restart;
regs->regs[7] = regs->regs[26];
regs->cp0_epc -= 4;
}
regs->regs[0] = 0; /* Don't deal with this again. */
}
/*
* If there's no signal to deliver, we just put the saved sigmask
* back
*/
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
}
}
/*
* notification of userspace execution resumption
* - triggered by the TIF_WORK_MASK flags
*/
asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
__u32 thread_info_flags)
{
/* deal with pending signal delivery */
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
if (current->replacement_session_keyring)
key_replace_session_keyring();
}
}
#ifdef CONFIG_SMP
static int smp_save_fp_context(struct sigcontext __user *sc)
{
return raw_cpu_has_fpu
? _save_fp_context(sc)
: fpu_emulator_save_context(sc);
}
static int smp_restore_fp_context(struct sigcontext __user *sc)
{
return raw_cpu_has_fpu
? _restore_fp_context(sc)
: fpu_emulator_restore_context(sc);
}
#endif
static int signal_setup(void)
{
#ifdef CONFIG_SMP
/* For now just do the cpu_has_fpu check when the functions are invoked */
save_fp_context = smp_save_fp_context;
restore_fp_context = smp_restore_fp_context;
#else
if (cpu_has_fpu) {
save_fp_context = _save_fp_context;
restore_fp_context = _restore_fp_context;
} else {
save_fp_context = fpu_emulator_save_context;
restore_fp_context = fpu_emulator_restore_context;
}
#endif
return 0;
}
arch_initcall(signal_setup);
| gpl-2.0 |
loxK/kernel-hero | arch/x86/mach-generic/bigsmp.c | 138 | 1424 | /*
* APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs.
* Drives the local APIC in "clustered mode".
*/
#define APIC_DEFINITION 1
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <asm/mpspec.h>
#include <asm/genapic.h>
#include <asm/fixmap.h>
#include <asm/apicdef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/dmi.h>
#include <asm/bigsmp/apicdef.h>
#include <linux/smp.h>
#include <asm/bigsmp/apic.h>
#include <asm/bigsmp/ipi.h>
#include <asm/mach-default/mach_mpparse.h>
#include <asm/mach-default/mach_wakecpu.h>
static int dmi_bigsmp; /* can be set by dmi scanners */
static int hp_ht_bigsmp(const struct dmi_system_id *d)
{
printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
dmi_bigsmp = 1;
return 0;
}
static const struct dmi_system_id bigsmp_dmi_table[] = {
{ hp_ht_bigsmp, "HP ProLiant DL760 G2",
{ DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
DMI_MATCH(DMI_BIOS_VERSION, "P44-"),}
},
{ hp_ht_bigsmp, "HP ProLiant DL740",
{ DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
DMI_MATCH(DMI_BIOS_VERSION, "P47-"),}
},
{ }
};
static void vector_allocation_domain(int cpu, cpumask_t *retmask)
{
cpus_clear(*retmask);
cpu_set(cpu, *retmask);
}
static int probe_bigsmp(void)
{
if (def_to_bigsmp)
dmi_bigsmp = 1;
else
dmi_check_system(bigsmp_dmi_table);
return dmi_bigsmp;
}
struct genapic apic_bigsmp = APIC_INIT("bigsmp", probe_bigsmp);
| gpl-2.0 |
fledermaus/steamos_kernel | net/bluetooth/rfcomm/sock.c | 650 | 22429 | /*
RFCOMM implementation for Linux Bluetooth stack (BlueZ).
Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/*
* RFCOMM sockets.
*/
#include <linux/export.h>
#include <linux/debugfs.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/rfcomm.h>
static const struct proto_ops rfcomm_sock_ops;
static struct bt_sock_list rfcomm_sk_list = {
.lock = __RW_LOCK_UNLOCKED(rfcomm_sk_list.lock)
};
static void rfcomm_sock_close(struct sock *sk);
static void rfcomm_sock_kill(struct sock *sk);
/* ---- DLC callbacks ----
*
* called under rfcomm_dlc_lock()
*/
static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
{
struct sock *sk = d->owner;
if (!sk)
return;
atomic_add(skb->len, &sk->sk_rmem_alloc);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk, skb->len);
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
rfcomm_dlc_throttle(d);
}
static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
{
struct sock *sk = d->owner, *parent;
unsigned long flags;
if (!sk)
return;
BT_DBG("dlc %p state %ld err %d", d, d->state, err);
local_irq_save(flags);
bh_lock_sock(sk);
if (err)
sk->sk_err = err;
sk->sk_state = d->state;
parent = bt_sk(sk)->parent;
if (parent) {
if (d->state == BT_CLOSED) {
sock_set_flag(sk, SOCK_ZAPPED);
bt_accept_unlink(sk);
}
parent->sk_data_ready(parent, 0);
} else {
if (d->state == BT_CONNECTED)
rfcomm_session_getaddr(d->session, &bt_sk(sk)->src, NULL);
sk->sk_state_change(sk);
}
bh_unlock_sock(sk);
local_irq_restore(flags);
if (parent && sock_flag(sk, SOCK_ZAPPED)) {
/* We have to drop DLC lock here, otherwise
* rfcomm_sock_destruct() will dead lock. */
rfcomm_dlc_unlock(d);
rfcomm_sock_kill(sk);
rfcomm_dlc_lock(d);
}
}
/* ---- Socket functions ---- */
static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
{
struct sock *sk = NULL;
sk_for_each(sk, &rfcomm_sk_list.head) {
if (rfcomm_pi(sk)->channel == channel &&
!bacmp(&bt_sk(sk)->src, src))
break;
}
return sk ? sk : NULL;
}
/* Find socket with channel and source bdaddr.
* Returns closest match.
*/
static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
read_lock(&rfcomm_sk_list.lock);
sk_for_each(sk, &rfcomm_sk_list.head) {
if (state && sk->sk_state != state)
continue;
if (rfcomm_pi(sk)->channel == channel) {
/* Exact match. */
if (!bacmp(&bt_sk(sk)->src, src))
break;
/* Closest match */
if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
sk1 = sk;
}
}
read_unlock(&rfcomm_sk_list.lock);
return sk ? sk : sk1;
}
static void rfcomm_sock_destruct(struct sock *sk)
{
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
BT_DBG("sk %p dlc %p", sk, d);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
rfcomm_dlc_lock(d);
rfcomm_pi(sk)->dlc = NULL;
/* Detach DLC if it's owned by this socket */
if (d->owner == sk)
d->owner = NULL;
rfcomm_dlc_unlock(d);
rfcomm_dlc_put(d);
}
static void rfcomm_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
BT_DBG("parent %p", parent);
/* Close not yet accepted dlcs */
while ((sk = bt_accept_dequeue(parent, NULL))) {
rfcomm_sock_close(sk);
rfcomm_sock_kill(sk);
}
parent->sk_state = BT_CLOSED;
sock_set_flag(parent, SOCK_ZAPPED);
}
/* Kill socket (only if zapped and orphan)
* Must be called on unlocked socket.
*/
static void rfcomm_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
/* Kill poor orphan */
bt_sock_unlink(&rfcomm_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
static void __rfcomm_sock_close(struct sock *sk)
{
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
switch (sk->sk_state) {
case BT_LISTEN:
rfcomm_sock_cleanup_listen(sk);
break;
case BT_CONNECT:
case BT_CONNECT2:
case BT_CONFIG:
case BT_CONNECTED:
rfcomm_dlc_close(d, 0);
default:
sock_set_flag(sk, SOCK_ZAPPED);
break;
}
}
/* Close socket.
* Must be called on unlocked socket.
*/
static void rfcomm_sock_close(struct sock *sk)
{
lock_sock(sk);
__rfcomm_sock_close(sk);
release_sock(sk);
}
static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
{
struct rfcomm_pinfo *pi = rfcomm_pi(sk);
BT_DBG("sk %p", sk);
if (parent) {
sk->sk_type = parent->sk_type;
pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP,
&bt_sk(parent)->flags);
pi->sec_level = rfcomm_pi(parent)->sec_level;
pi->role_switch = rfcomm_pi(parent)->role_switch;
security_sk_clone(parent, sk);
} else {
pi->dlc->defer_setup = 0;
pi->sec_level = BT_SECURITY_LOW;
pi->role_switch = 0;
}
pi->dlc->sec_level = pi->sec_level;
pi->dlc->role_switch = pi->role_switch;
}
static struct proto rfcomm_proto = {
.name = "RFCOMM",
.owner = THIS_MODULE,
.obj_size = sizeof(struct rfcomm_pinfo)
};
static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
{
struct rfcomm_dlc *d;
struct sock *sk;
sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto);
if (!sk)
return NULL;
sock_init_data(sock, sk);
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
d = rfcomm_dlc_alloc(prio);
if (!d) {
sk_free(sk);
return NULL;
}
d->data_ready = rfcomm_sk_data_ready;
d->state_change = rfcomm_sk_state_change;
rfcomm_pi(sk)->dlc = d;
d->owner = sk;
sk->sk_destruct = rfcomm_sock_destruct;
sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT;
sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
bt_sock_link(&rfcomm_sk_list, sk);
BT_DBG("sk %p", sk);
return sk;
}
static int rfcomm_sock_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_STREAM && sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
sock->ops = &rfcomm_sock_ops;
sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC);
if (!sk)
return -ENOMEM;
rfcomm_sock_init(sk, NULL);
return 0;
}
static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p %pMR", sk, &sa->rc_bdaddr);
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
write_lock(&rfcomm_sk_list.lock);
if (sa->rc_channel && __rfcomm_get_sock_by_addr(sa->rc_channel, &sa->rc_bdaddr)) {
err = -EADDRINUSE;
} else {
/* Save source address */
bacpy(&bt_sk(sk)->src, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
sk->sk_state = BT_BOUND;
}
write_unlock(&rfcomm_sk_list.lock);
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
int err = 0;
BT_DBG("sk %p", sk);
if (alen < sizeof(struct sockaddr_rc) ||
addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
sk->sk_state = BT_CONNECT;
bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
d->sec_level = rfcomm_pi(sk)->sec_level;
d->role_switch = rfcomm_pi(sk)->role_switch;
err = rfcomm_dlc_open(d, &bt_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel);
if (!err)
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
if (!rfcomm_pi(sk)->channel) {
bdaddr_t *src = &bt_sk(sk)->src;
u8 channel;
err = -EINVAL;
write_lock(&rfcomm_sk_list.lock);
for (channel = 1; channel < 31; channel++)
if (!__rfcomm_get_sock_by_addr(channel, src)) {
rfcomm_pi(sk)->channel = channel;
err = 0;
break;
}
write_unlock(&rfcomm_sk_list.lock);
if (err < 0)
goto done;
}
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = BT_LISTEN;
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *nsk;
long timeo;
int err = 0;
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
nsk = bt_accept_dequeue(sk, newsock);
if (nsk)
break;
if (!timeo) {
err = -EAGAIN;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
newsock->state = SS_CONNECTED;
BT_DBG("new socket %p", nsk);
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
BT_DBG("sock %p, sk %p", sock, sk);
memset(sa, 0, sizeof(*sa));
sa->rc_family = AF_BLUETOOTH;
sa->rc_channel = rfcomm_pi(sk)->channel;
if (peer)
bacpy(&sa->rc_bdaddr, &bt_sk(sk)->dst);
else
bacpy(&sa->rc_bdaddr, &bt_sk(sk)->src);
*len = sizeof(struct sockaddr_rc);
return 0;
}
static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
struct sk_buff *skb;
int sent = 0;
if (test_bit(RFCOMM_DEFER_SETUP, &d->flags))
return -ENOTCONN;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
if (sk->sk_shutdown & SEND_SHUTDOWN)
return -EPIPE;
BT_DBG("sock %p, sk %p", sock, sk);
lock_sock(sk);
while (len) {
size_t size = min_t(size_t, len, d->mtu);
int err;
skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE,
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb) {
if (sent == 0)
sent = err;
break;
}
skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE);
err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
if (err) {
kfree_skb(skb);
if (sent == 0)
sent = err;
break;
}
skb->priority = sk->sk_priority;
err = rfcomm_dlc_send(d, skb);
if (err < 0) {
kfree_skb(skb);
if (sent == 0)
sent = err;
break;
}
sent += size;
len -= size;
}
release_sock(sk);
return sent;
}
static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
int len;
if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
rfcomm_dlc_accept(d);
return 0;
}
len = bt_sock_stream_recvmsg(iocb, sock, msg, size, flags);
lock_sock(sk);
if (!(flags & MSG_PEEK) && len > 0)
atomic_sub(len, &sk->sk_rmem_alloc);
if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2))
rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc);
release_sock(sk);
return len;
}
static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int err = 0;
u32 opt;
BT_DBG("sk %p", sk);
lock_sock(sk);
switch (optname) {
case RFCOMM_LM:
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
}
if (opt & RFCOMM_LM_AUTH)
rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW;
if (opt & RFCOMM_LM_ENCRYPT)
rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
if (opt & RFCOMM_LM_SECURE)
rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH;
rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER);
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct bt_security sec;
int err = 0;
size_t len;
u32 opt;
BT_DBG("sk %p", sk);
if (level == SOL_RFCOMM)
return rfcomm_sock_setsockopt_old(sock, optname, optval, optlen);
if (level != SOL_BLUETOOTH)
return -ENOPROTOOPT;
lock_sock(sk);
switch (optname) {
case BT_SECURITY:
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
break;
}
sec.level = BT_SECURITY_LOW;
len = min_t(unsigned int, sizeof(sec), optlen);
if (copy_from_user((char *) &sec, optval, len)) {
err = -EFAULT;
break;
}
if (sec.level > BT_SECURITY_HIGH) {
err = -EINVAL;
break;
}
rfcomm_pi(sk)->sec_level = sec.level;
break;
case BT_DEFER_SETUP:
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
err = -EINVAL;
break;
}
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
}
if (opt)
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
else
clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct rfcomm_conninfo cinfo;
struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
int len, err = 0;
u32 opt;
BT_DBG("sk %p", sk);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case RFCOMM_LM:
switch (rfcomm_pi(sk)->sec_level) {
case BT_SECURITY_LOW:
opt = RFCOMM_LM_AUTH;
break;
case BT_SECURITY_MEDIUM:
opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT;
break;
case BT_SECURITY_HIGH:
opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT |
RFCOMM_LM_SECURE;
break;
default:
opt = 0;
break;
}
if (rfcomm_pi(sk)->role_switch)
opt |= RFCOMM_LM_MASTER;
if (put_user(opt, (u32 __user *) optval))
err = -EFAULT;
break;
case RFCOMM_CONNINFO:
if (sk->sk_state != BT_CONNECTED &&
!rfcomm_pi(sk)->dlc->defer_setup) {
err = -ENOTCONN;
break;
}
memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = conn->hcon->handle;
memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct bt_security sec;
int len, err = 0;
BT_DBG("sk %p", sk);
if (level == SOL_RFCOMM)
return rfcomm_sock_getsockopt_old(sock, optname, optval, optlen);
if (level != SOL_BLUETOOTH)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case BT_SECURITY:
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
break;
}
sec.level = rfcomm_pi(sk)->sec_level;
sec.key_size = 0;
len = min_t(unsigned int, len, sizeof(sec));
if (copy_to_user(optval, (char *) &sec, len))
err = -EFAULT;
break;
case BT_DEFER_SETUP:
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
err = -EINVAL;
break;
}
if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
(u32 __user *) optval))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk __maybe_unused = sock->sk;
int err;
BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
err = bt_sock_ioctl(sock, cmd, arg);
if (err == -ENOIOCTLCMD) {
#ifdef CONFIG_BT_RFCOMM_TTY
lock_sock(sk);
err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg);
release_sock(sk);
#else
err = -EOPNOTSUPP;
#endif
}
return err;
}
static int rfcomm_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
lock_sock(sk);
if (!sk->sk_shutdown) {
sk->sk_shutdown = SHUTDOWN_MASK;
__rfcomm_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
!(current->flags & PF_EXITING))
err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
}
release_sock(sk);
return err;
}
static int rfcomm_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
err = rfcomm_sock_shutdown(sock, 2);
sock_orphan(sk);
rfcomm_sock_kill(sk);
return err;
}
/* ---- RFCOMM core layer callbacks ----
*
* called under rfcomm_lock()
*/
int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d)
{
struct sock *sk, *parent;
bdaddr_t src, dst;
int result = 0;
BT_DBG("session %p channel %d", s, channel);
rfcomm_session_getaddr(s, &src, &dst);
/* Check if we have socket listening on channel */
parent = rfcomm_get_sock_by_channel(BT_LISTEN, channel, &src);
if (!parent)
return 0;
bh_lock_sock(parent);
/* Check for backlog size */
if (sk_acceptq_is_full(parent)) {
BT_DBG("backlog full %d", parent->sk_ack_backlog);
goto done;
}
sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC);
if (!sk)
goto done;
bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM);
rfcomm_sock_init(sk, parent);
bacpy(&bt_sk(sk)->src, &src);
bacpy(&bt_sk(sk)->dst, &dst);
rfcomm_pi(sk)->channel = channel;
sk->sk_state = BT_CONFIG;
bt_accept_enqueue(parent, sk);
/* Accept connection and return socket DLC */
*d = rfcomm_pi(sk)->dlc;
result = 1;
done:
bh_unlock_sock(parent);
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
parent->sk_state_change(parent);
return result;
}
static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
{
struct sock *sk;
read_lock(&rfcomm_sk_list.lock);
sk_for_each(sk, &rfcomm_sk_list.head) {
seq_printf(f, "%pMR %pMR %d %d\n",
&bt_sk(sk)->src, &bt_sk(sk)->dst,
sk->sk_state, rfcomm_pi(sk)->channel);
}
read_unlock(&rfcomm_sk_list.lock);
return 0;
}
static int rfcomm_sock_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, rfcomm_sock_debugfs_show, inode->i_private);
}
static const struct file_operations rfcomm_sock_debugfs_fops = {
.open = rfcomm_sock_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static struct dentry *rfcomm_sock_debugfs;
static const struct proto_ops rfcomm_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = rfcomm_sock_release,
.bind = rfcomm_sock_bind,
.connect = rfcomm_sock_connect,
.listen = rfcomm_sock_listen,
.accept = rfcomm_sock_accept,
.getname = rfcomm_sock_getname,
.sendmsg = rfcomm_sock_sendmsg,
.recvmsg = rfcomm_sock_recvmsg,
.shutdown = rfcomm_sock_shutdown,
.setsockopt = rfcomm_sock_setsockopt,
.getsockopt = rfcomm_sock_getsockopt,
.ioctl = rfcomm_sock_ioctl,
.poll = bt_sock_poll,
.socketpair = sock_no_socketpair,
.mmap = sock_no_mmap
};
static const struct net_proto_family rfcomm_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = rfcomm_sock_create
};
int __init rfcomm_init_sockets(void)
{
int err;
err = proto_register(&rfcomm_proto, 0);
if (err < 0)
return err;
err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops);
if (err < 0) {
BT_ERR("RFCOMM socket layer registration failed");
goto error;
}
err = bt_procfs_init(&init_net, "rfcomm", &rfcomm_sk_list, NULL);
if (err < 0) {
BT_ERR("Failed to create RFCOMM proc file");
bt_sock_unregister(BTPROTO_RFCOMM);
goto error;
}
if (bt_debugfs) {
rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
bt_debugfs, NULL, &rfcomm_sock_debugfs_fops);
if (!rfcomm_sock_debugfs)
BT_ERR("Failed to create RFCOMM debug file");
}
BT_INFO("RFCOMM socket layer initialized");
return 0;
error:
proto_unregister(&rfcomm_proto);
return err;
}
void __exit rfcomm_cleanup_sockets(void)
{
bt_procfs_cleanup(&init_net, "rfcomm");
debugfs_remove(rfcomm_sock_debugfs);
bt_sock_unregister(BTPROTO_RFCOMM);
proto_unregister(&rfcomm_proto);
}
| gpl-2.0 |
estiko/android_kernel_lenovo_armani | drivers/crypto/msm/ota_crypto.c | 906 | 16488 | /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/* Qualcomm Over the Air (OTA) Crypto driver */
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/qcota.h>
#include "qce.h"
#include "qce_ota.h"
enum qce_ota_oper_enum {
QCE_OTA_F8_OPER = 0,
QCE_OTA_MPKT_F8_OPER = 1,
QCE_OTA_F9_OPER = 2,
QCE_OTA_OPER_LAST
};
struct ota_dev_control;
struct ota_async_req {
struct list_head list;
struct completion complete;
int err;
enum qce_ota_oper_enum op;
union {
struct qce_f9_req f9_req;
struct qce_f8_req f8_req;
struct qce_f8_multi_pkt_req f8_mp_req;
} req;
struct ota_dev_control *podev;
};
/*
* Register ourselves as a misc device to be able to access the ota
* from userspace.
*/
#define QCOTA_DEV "qcota"
struct ota_dev_control {
/* misc device */
struct miscdevice miscdevice;
/* qce handle */
void *qce;
/* platform device */
struct platform_device *pdev;
unsigned magic;
struct list_head ready_commands;
struct ota_async_req *active_command;
spinlock_t lock;
struct tasklet_struct done_tasklet;
};
#define OTA_MAGIC 0x4f544143
static long qcota_ioctl(struct file *file,
unsigned cmd, unsigned long arg);
static int qcota_open(struct inode *inode, struct file *file);
static int qcota_release(struct inode *inode, struct file *file);
static int start_req(struct ota_dev_control *podev);
static const struct file_operations qcota_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = qcota_ioctl,
.open = qcota_open,
.release = qcota_release,
};
static struct ota_dev_control qcota_dev[] = {
{
.miscdevice = {
.minor = MISC_DYNAMIC_MINOR,
.name = "qcota0",
.fops = &qcota_fops,
},
.magic = OTA_MAGIC,
},
{
.miscdevice = {
.minor = MISC_DYNAMIC_MINOR,
.name = "qcota1",
.fops = &qcota_fops,
},
.magic = OTA_MAGIC,
},
{
.miscdevice = {
.minor = MISC_DYNAMIC_MINOR,
.name = "qcota2",
.fops = &qcota_fops,
},
.magic = OTA_MAGIC,
}
};
#define MAX_OTA_DEVICE ARRAY_SIZE(qcota_dev)
#define DEBUG_MAX_FNAME 16
#define DEBUG_MAX_RW_BUF 1024
struct qcota_stat {
u32 f8_req;
u32 f8_mp_req;
u32 f9_req;
u32 f8_op_success;
u32 f8_op_fail;
u32 f8_mp_op_success;
u32 f8_mp_op_fail;
u32 f9_op_success;
u32 f9_op_fail;
};
static struct qcota_stat _qcota_stat[MAX_OTA_DEVICE];
static struct dentry *_debug_dent;
static char _debug_read_buf[DEBUG_MAX_RW_BUF];
static int _debug_qcota[MAX_OTA_DEVICE];
static struct ota_dev_control *qcota_minor_to_control(unsigned n)
{
int i;
for (i = 0; i < MAX_OTA_DEVICE; i++) {
if (qcota_dev[i].miscdevice.minor == n)
return &qcota_dev[i];
}
return NULL;
}
static int qcota_open(struct inode *inode, struct file *file)
{
struct ota_dev_control *podev;
podev = qcota_minor_to_control(MINOR(inode->i_rdev));
if (podev == NULL) {
pr_err("%s: no such device %d\n", __func__,
MINOR(inode->i_rdev));
return -ENOENT;
}
file->private_data = podev;
return 0;
}
static int qcota_release(struct inode *inode, struct file *file)
{
struct ota_dev_control *podev;
podev = file->private_data;
if (podev != NULL && podev->magic != OTA_MAGIC) {
pr_err("%s: invalid handle %p\n",
__func__, podev);
}
file->private_data = NULL;
return 0;
}
static void req_done(unsigned long data)
{
struct ota_dev_control *podev = (struct ota_dev_control *)data;
struct ota_async_req *areq;
unsigned long flags;
struct ota_async_req *new_req = NULL;
int ret = 0;
spin_lock_irqsave(&podev->lock, flags);
areq = podev->active_command;
podev->active_command = NULL;
again:
if (!list_empty(&podev->ready_commands)) {
new_req = container_of(podev->ready_commands.next,
struct ota_async_req, list);
list_del(&new_req->list);
podev->active_command = new_req;
new_req->err = 0;
ret = start_req(podev);
}
spin_unlock_irqrestore(&podev->lock, flags);
if (areq)
complete(&areq->complete);
if (new_req && ret) {
complete(&new_req->complete);
spin_lock_irqsave(&podev->lock, flags);
podev->active_command = NULL;
areq = NULL;
ret = 0;
new_req = NULL;
goto again;
}
return;
}
static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv,
int ret)
{
struct ota_async_req *areq = (struct ota_async_req *) cookie;
struct ota_dev_control *podev;
struct qcota_stat *pstat;
podev = areq->podev;
pstat = &_qcota_stat[podev->pdev->id];
areq->req.f9_req.mac_i = (uint32_t) icv;
if (ret)
areq->err = -ENXIO;
else
areq->err = 0;
tasklet_schedule(&podev->done_tasklet);
};
static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv,
int ret)
{
struct ota_async_req *areq = (struct ota_async_req *) cookie;
struct ota_dev_control *podev;
struct qcota_stat *pstat;
podev = areq->podev;
pstat = &_qcota_stat[podev->pdev->id];
if (ret)
areq->err = -ENXIO;
else
areq->err = 0;
tasklet_schedule(&podev->done_tasklet);
};
static int start_req(struct ota_dev_control *podev)
{
struct ota_async_req *areq;
struct qce_f9_req *pf9;
struct qce_f8_multi_pkt_req *p_mp_f8;
struct qce_f8_req *pf8;
int ret = 0;
/* start the command on the podev->active_command */
areq = podev->active_command;
areq->podev = podev;
switch (areq->op) {
case QCE_OTA_F8_OPER:
pf8 = &areq->req.f8_req;
ret = qce_f8_req(podev->qce, pf8, areq, f8_cb);
break;
case QCE_OTA_MPKT_F8_OPER:
p_mp_f8 = &areq->req.f8_mp_req;
ret = qce_f8_multi_pkt_req(podev->qce, p_mp_f8, areq, f8_cb);
break;
case QCE_OTA_F9_OPER:
pf9 = &areq->req.f9_req;
ret = qce_f9_req(podev->qce, pf9, areq, f9_cb);
break;
default:
ret = -ENOTSUPP;
break;
};
areq->err = ret;
return ret;
};
static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev)
{
unsigned long flags;
int ret = 0;
struct qcota_stat *pstat;
areq->err = 0;
spin_lock_irqsave(&podev->lock, flags);
if (podev->active_command == NULL) {
podev->active_command = areq;
ret = start_req(podev);
} else {
list_add_tail(&areq->list, &podev->ready_commands);
}
if (ret != 0)
podev->active_command = NULL;
spin_unlock_irqrestore(&podev->lock, flags);
if (ret == 0)
wait_for_completion(&areq->complete);
pstat = &_qcota_stat[podev->pdev->id];
switch (areq->op) {
case QCE_OTA_F8_OPER:
if (areq->err)
pstat->f8_op_fail++;
else
pstat->f8_op_success++;
break;
case QCE_OTA_MPKT_F8_OPER:
if (areq->err)
pstat->f8_mp_op_fail++;
else
pstat->f8_mp_op_success++;
break;
case QCE_OTA_F9_OPER:
default:
if (areq->err)
pstat->f9_op_fail++;
else
pstat->f9_op_success++;
break;
};
return areq->err;
};
static long qcota_ioctl(struct file *file,
unsigned cmd, unsigned long arg)
{
int err = 0;
struct ota_dev_control *podev;
uint8_t *user_src;
uint8_t *user_dst;
uint8_t *k_buf = NULL;
struct ota_async_req areq;
uint32_t total;
struct qcota_stat *pstat;
podev = file->private_data;
if (podev == NULL || podev->magic != OTA_MAGIC) {
pr_err("%s: invalid handle %p\n",
__func__, podev);
return -ENOENT;
}
/* Verify user arguments. */
if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC)
return -ENOTTY;
init_completion(&areq.complete);
pstat = &_qcota_stat[podev->pdev->id];
switch (cmd) {
case QCOTA_F9_REQ:
if (!access_ok(VERIFY_WRITE, (void __user *)arg,
sizeof(struct qce_f9_req)))
return -EFAULT;
if (__copy_from_user(&areq.req.f9_req, (void __user *)arg,
sizeof(struct qce_f9_req)))
return -EFAULT;
user_src = areq.req.f9_req.message;
if (!access_ok(VERIFY_READ, (void __user *)user_src,
areq.req.f9_req.msize))
return -EFAULT;
k_buf = kmalloc(areq.req.f9_req.msize, GFP_KERNEL);
if (k_buf == NULL)
return -ENOMEM;
if (__copy_from_user(k_buf, (void __user *)user_src,
areq.req.f9_req.msize)) {
kfree(k_buf);
return -EFAULT;
}
areq.req.f9_req.message = k_buf;
areq.op = QCE_OTA_F9_OPER;
pstat->f9_req++;
err = submit_req(&areq, podev);
areq.req.f9_req.message = user_src;
if (err == 0 && __copy_to_user((void __user *)arg,
&areq.req.f9_req, sizeof(struct qce_f9_req))) {
err = -EFAULT;
}
kfree(k_buf);
break;
case QCOTA_F8_REQ:
if (!access_ok(VERIFY_WRITE, (void __user *)arg,
sizeof(struct qce_f8_req)))
return -EFAULT;
if (__copy_from_user(&areq.req.f8_req, (void __user *)arg,
sizeof(struct qce_f8_req)))
return -EFAULT;
total = areq.req.f8_req.data_len;
user_src = areq.req.f8_req.data_in;
if (user_src != NULL) {
if (!access_ok(VERIFY_READ, (void __user *)
user_src, total))
return -EFAULT;
};
user_dst = areq.req.f8_req.data_out;
if (!access_ok(VERIFY_WRITE, (void __user *)
user_dst, total))
return -EFAULT;
k_buf = kmalloc(total, GFP_KERNEL);
if (k_buf == NULL)
return -ENOMEM;
/* k_buf returned from kmalloc should be cache line aligned */
if (user_src && __copy_from_user(k_buf,
(void __user *)user_src, total)) {
kfree(k_buf);
return -EFAULT;
}
if (user_src)
areq.req.f8_req.data_in = k_buf;
else
areq.req.f8_req.data_in = NULL;
areq.req.f8_req.data_out = k_buf;
areq.op = QCE_OTA_F8_OPER;
pstat->f8_req++;
err = submit_req(&areq, podev);
if (err == 0 && __copy_to_user(user_dst, k_buf, total))
err = -EFAULT;
kfree(k_buf);
break;
case QCOTA_F8_MPKT_REQ:
if (!access_ok(VERIFY_WRITE, (void __user *)arg,
sizeof(struct qce_f8_multi_pkt_req)))
return -EFAULT;
if (__copy_from_user(&areq.req.f8_mp_req, (void __user *)arg,
sizeof(struct qce_f8_multi_pkt_req)))
return -EFAULT;
total = areq.req.f8_mp_req.num_pkt *
areq.req.f8_mp_req.qce_f8_req.data_len;
user_src = areq.req.f8_mp_req.qce_f8_req.data_in;
if (!access_ok(VERIFY_READ, (void __user *)
user_src, total))
return -EFAULT;
user_dst = areq.req.f8_mp_req.qce_f8_req.data_out;
if (!access_ok(VERIFY_WRITE, (void __user *)
user_dst, total))
return -EFAULT;
k_buf = kmalloc(total, GFP_KERNEL);
if (k_buf == NULL)
return -ENOMEM;
/* k_buf returned from kmalloc should be cache line aligned */
if (__copy_from_user(k_buf, (void __user *)user_src, total)) {
kfree(k_buf);
return -EFAULT;
}
areq.req.f8_mp_req.qce_f8_req.data_out = k_buf;
areq.req.f8_mp_req.qce_f8_req.data_in = k_buf;
areq.op = QCE_OTA_MPKT_F8_OPER;
pstat->f8_mp_req++;
err = submit_req(&areq, podev);
if (err == 0 && __copy_to_user(user_dst, k_buf, total))
err = -EFAULT;
kfree(k_buf);
break;
default:
return -ENOTTY;
}
return err;
}
static int qcota_probe(struct platform_device *pdev)
{
void *handle = NULL;
int rc = 0;
struct ota_dev_control *podev;
struct ce_hw_support ce_support;
if (pdev->id >= MAX_OTA_DEVICE) {
pr_err("%s: device id %d exceeds allowed %d\n",
__func__, pdev->id, MAX_OTA_DEVICE);
return -ENOENT;
}
podev = &qcota_dev[pdev->id];
INIT_LIST_HEAD(&podev->ready_commands);
podev->active_command = NULL;
spin_lock_init(&podev->lock);
tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
/* open qce */
handle = qce_open(pdev, &rc);
if (handle == NULL) {
pr_err("%s: device id %d, can not open qce\n",
__func__, pdev->id);
platform_set_drvdata(pdev, NULL);
return rc;
}
if (qce_hw_support(handle, &ce_support) < 0 ||
ce_support.ota == false) {
pr_err("%s: device id %d, qce does not support ota capability\n",
__func__, pdev->id);
rc = -ENODEV;
goto err;
}
podev->qce = handle;
podev->pdev = pdev;
platform_set_drvdata(pdev, podev);
rc = misc_register(&podev->miscdevice);
if (rc < 0)
goto err;
return 0;
err:
if (handle)
qce_close(handle);
platform_set_drvdata(pdev, NULL);
podev->qce = NULL;
podev->pdev = NULL;
return rc;
};
static int qcota_remove(struct platform_device *pdev)
{
struct ota_dev_control *podev;
podev = platform_get_drvdata(pdev);
if (!podev)
return 0;
if (podev->qce)
qce_close(podev->qce);
if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
misc_deregister(&podev->miscdevice);
tasklet_kill(&podev->done_tasklet);
return 0;
};
static struct platform_driver qcota_plat_driver = {
.probe = qcota_probe,
.remove = qcota_remove,
.driver = {
.name = "qcota",
.owner = THIS_MODULE,
},
};
static int _disp_stats(int id)
{
struct qcota_stat *pstat;
int len = 0;
pstat = &_qcota_stat[id];
len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
"\nQualcomm OTA crypto accelerator %d Statistics:\n",
id + 1);
len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 request : %d\n",
pstat->f8_req);
len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 operation success : %d\n",
pstat->f8_op_success);
len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 operation fail : %d\n",
pstat->f8_op_fail);
len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 MP request : %d\n",
pstat->f8_mp_req);
len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 MP operation success: %d\n",
pstat->f8_mp_op_success);
len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 MP operation fail : %d\n",
pstat->f8_mp_op_fail);
len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F9 request : %d\n",
pstat->f9_req);
len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F9 operation success : %d\n",
pstat->f9_op_success);
len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F9 operation fail : %d\n",
pstat->f9_op_fail);
return len;
}
static int _debug_stats_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static ssize_t _debug_stats_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
int rc = -EINVAL;
int qcota = *((int *) file->private_data);
int len;
len = _disp_stats(qcota);
rc = simple_read_from_buffer((void __user *) buf, len,
ppos, (void *) _debug_read_buf, len);
return rc;
}
static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
int qcota = *((int *) file->private_data);
memset((char *)&_qcota_stat[qcota], 0, sizeof(struct qcota_stat));
return count;
};
static const struct file_operations _debug_stats_ops = {
.open = _debug_stats_open,
.read = _debug_stats_read,
.write = _debug_stats_write,
};
static int _qcota_debug_init(void)
{
int rc;
char name[DEBUG_MAX_FNAME];
int i;
struct dentry *dent;
_debug_dent = debugfs_create_dir("qcota", NULL);
if (IS_ERR(_debug_dent)) {
pr_err("qcota debugfs_create_dir fail, error %ld\n",
PTR_ERR(_debug_dent));
return PTR_ERR(_debug_dent);
}
for (i = 0; i < MAX_OTA_DEVICE; i++) {
snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1);
_debug_qcota[i] = i;
dent = debugfs_create_file(name, 0644, _debug_dent,
&_debug_qcota[i], &_debug_stats_ops);
if (dent == NULL) {
pr_err("qcota debugfs_create_file fail, error %ld\n",
PTR_ERR(dent));
rc = PTR_ERR(dent);
goto err;
}
}
return 0;
err:
debugfs_remove_recursive(_debug_dent);
return rc;
}
static int __init qcota_init(void)
{
int rc;
rc = _qcota_debug_init();
if (rc)
return rc;
return platform_driver_register(&qcota_plat_driver);
}
static void __exit qcota_exit(void)
{
debugfs_remove_recursive(_debug_dent);
platform_driver_unregister(&qcota_plat_driver);
}
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Rohit Vaswani <rvaswani@codeaurora.org>");
MODULE_DESCRIPTION("Qualcomm Ota Crypto driver");
MODULE_VERSION("1.01");
module_init(qcota_init);
module_exit(qcota_exit);
| gpl-2.0 |
TheBootloader/android_kernel_samsung_msm8930-common | drivers/net/ethernet/sfc/siena_sriov.c | 3466 | 45705 | /****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2010-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/pci.h>
#include <linux/module.h>
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
#include "io.h"
#include "mcdi.h"
#include "filter.h"
#include "mcdi_pcol.h"
#include "regs.h"
#include "vfdi.h"
/* Number of longs required to track all the VIs in a VF */
#define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
/**
* enum efx_vf_tx_filter_mode - TX MAC filtering behaviour
* @VF_TX_FILTER_OFF: Disabled
* @VF_TX_FILTER_AUTO: Enabled if MAC address assigned to VF and only
* 2 TX queues allowed per VF.
* @VF_TX_FILTER_ON: Enabled
*/
enum efx_vf_tx_filter_mode {
VF_TX_FILTER_OFF,
VF_TX_FILTER_AUTO,
VF_TX_FILTER_ON,
};
/**
* struct efx_vf - Back-end resource and protocol state for a PCI VF
* @efx: The Efx NIC owning this VF
* @pci_rid: The PCI requester ID for this VF
* @pci_name: The PCI name (formatted address) of this VF
* @index: Index of VF within its port and PF.
* @req: VFDI incoming request work item. Incoming USR_EV events are received
* by the NAPI handler, but must be handled by executing MCDI requests
* inside a work item.
* @req_addr: VFDI incoming request DMA address (in VF's PCI address space).
* @req_type: Expected next incoming (from VF) %VFDI_EV_TYPE member.
* @req_seqno: Expected next incoming (from VF) %VFDI_EV_SEQ member.
* @msg_seqno: Next %VFDI_EV_SEQ member to reply to VF. Protected by
* @status_lock
* @busy: VFDI request queued to be processed or being processed. Receiving
* a VFDI request when @busy is set is an error condition.
* @buf: Incoming VFDI requests are DMA from the VF into this buffer.
* @buftbl_base: Buffer table entries for this VF start at this index.
* @rx_filtering: Receive filtering has been requested by the VF driver.
* @rx_filter_flags: The flags sent in the %VFDI_OP_INSERT_FILTER request.
* @rx_filter_qid: VF relative qid for RX filter requested by VF.
* @rx_filter_id: Receive MAC filter ID. Only one filter per VF is supported.
* @tx_filter_mode: Transmit MAC filtering mode.
* @tx_filter_id: Transmit MAC filter ID.
* @addr: The MAC address and outer vlan tag of the VF.
* @status_addr: VF DMA address of page for &struct vfdi_status updates.
* @status_lock: Mutex protecting @msg_seqno, @status_addr, @addr,
* @peer_page_addrs and @peer_page_count from simultaneous
* updates by the VM and consumption by
* efx_sriov_update_vf_addr()
* @peer_page_addrs: Pointer to an array of guest pages for local addresses.
* @peer_page_count: Number of entries in @peer_page_count.
* @evq0_addrs: Array of guest pages backing evq0.
* @evq0_count: Number of entries in @evq0_addrs.
* @flush_waitq: wait queue used by %VFDI_OP_FINI_ALL_QUEUES handler
* to wait for flush completions.
* @txq_lock: Mutex for TX queue allocation.
* @txq_mask: Mask of initialized transmit queues.
* @txq_count: Number of initialized transmit queues.
* @rxq_mask: Mask of initialized receive queues.
* @rxq_count: Number of initialized receive queues.
* @rxq_retry_mask: Mask or receive queues that need to be flushed again
* due to flush failure.
* @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
* @reset_work: Work item to schedule a VF reset.
*/
struct efx_vf {
struct efx_nic *efx;
unsigned int pci_rid;
char pci_name[13]; /* dddd:bb:dd.f */
unsigned int index;
struct work_struct req;
u64 req_addr;
int req_type;
unsigned req_seqno;
unsigned msg_seqno;
bool busy;
struct efx_buffer buf;
unsigned buftbl_base;
bool rx_filtering;
enum efx_filter_flags rx_filter_flags;
unsigned rx_filter_qid;
int rx_filter_id;
enum efx_vf_tx_filter_mode tx_filter_mode;
int tx_filter_id;
struct vfdi_endpoint addr;
u64 status_addr;
struct mutex status_lock;
u64 *peer_page_addrs;
unsigned peer_page_count;
u64 evq0_addrs[EFX_MAX_VF_EVQ_SIZE * sizeof(efx_qword_t) /
EFX_BUF_SIZE];
unsigned evq0_count;
wait_queue_head_t flush_waitq;
struct mutex txq_lock;
unsigned long txq_mask[VI_MASK_LENGTH];
unsigned txq_count;
unsigned long rxq_mask[VI_MASK_LENGTH];
unsigned rxq_count;
unsigned long rxq_retry_mask[VI_MASK_LENGTH];
atomic_t rxq_retry_count;
struct work_struct reset_work;
};
struct efx_memcpy_req {
unsigned int from_rid;
void *from_buf;
u64 from_addr;
unsigned int to_rid;
u64 to_addr;
unsigned length;
};
/**
* struct efx_local_addr - A MAC address on the vswitch without a VF.
*
* Siena does not have a switch, so VFs can't transmit data to each
* other. Instead the VFs must be made aware of the local addresses
* on the vswitch, so that they can arrange for an alternative
* software datapath to be used.
*
* @link: List head for insertion into efx->local_addr_list.
* @addr: Ethernet address
*/
struct efx_local_addr {
struct list_head link;
u8 addr[ETH_ALEN];
};
/**
* struct efx_endpoint_page - Page of vfdi_endpoint structures
*
* @link: List head for insertion into efx->local_page_list.
* @ptr: Pointer to page.
* @addr: DMA address of page.
*/
struct efx_endpoint_page {
struct list_head link;
void *ptr;
dma_addr_t addr;
};
/* Buffer table entries are reserved txq0,rxq0,evq0,txq1,rxq1,evq1 */
#define EFX_BUFTBL_TXQ_BASE(_vf, _qid) \
((_vf)->buftbl_base + EFX_VF_BUFTBL_PER_VI * (_qid))
#define EFX_BUFTBL_RXQ_BASE(_vf, _qid) \
(EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
(EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
#define EFX_BUFTBL_EVQ_BASE(_vf, _qid) \
(EFX_BUFTBL_TXQ_BASE(_vf, _qid) + \
(2 * EFX_MAX_DMAQ_SIZE * sizeof(efx_qword_t) / EFX_BUF_SIZE))
#define EFX_FIELD_MASK(_field) \
((1 << _field ## _WIDTH) - 1)
/* VFs can only use this many transmit channels */
static unsigned int vf_max_tx_channels = 2;
module_param(vf_max_tx_channels, uint, 0444);
MODULE_PARM_DESC(vf_max_tx_channels,
"Limit the number of TX channels VFs can use");
static int max_vfs = -1;
module_param(max_vfs, int, 0444);
MODULE_PARM_DESC(max_vfs,
"Reduce the number of VFs initialized by the driver");
/* Workqueue used by VFDI communication. We can't use the global
* workqueue because it may be running the VF driver's probe()
* routine, which will be blocked there waiting for a VFDI response.
*/
static struct workqueue_struct *vfdi_workqueue;
static unsigned abs_index(struct efx_vf *vf, unsigned index)
{
return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
}
static int efx_sriov_cmd(struct efx_nic *efx, bool enable,
unsigned *vi_scale_out, unsigned *vf_total_out)
{
u8 inbuf[MC_CMD_SRIOV_IN_LEN];
u8 outbuf[MC_CMD_SRIOV_OUT_LEN];
unsigned vi_scale, vf_total;
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, SRIOV_IN_ENABLE, enable ? 1 : 0);
MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE);
MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count);
rc = efx_mcdi_rpc(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
if (rc)
return rc;
if (outlen < MC_CMD_SRIOV_OUT_LEN)
return -EIO;
vf_total = MCDI_DWORD(outbuf, SRIOV_OUT_VF_TOTAL);
vi_scale = MCDI_DWORD(outbuf, SRIOV_OUT_VI_SCALE);
if (vi_scale > EFX_VI_SCALE_MAX)
return -EOPNOTSUPP;
if (vi_scale_out)
*vi_scale_out = vi_scale;
if (vf_total_out)
*vf_total_out = vf_total;
return 0;
}
static void efx_sriov_usrev(struct efx_nic *efx, bool enabled)
{
efx_oword_t reg;
EFX_POPULATE_OWORD_2(reg,
FRF_CZ_USREV_DIS, enabled ? 0 : 1,
FRF_CZ_DFLT_EVQ, efx->vfdi_channel->channel);
efx_writeo(efx, ®, FR_CZ_USR_EV_CFG);
}
static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req,
unsigned int count)
{
u8 *inbuf, *record;
unsigned int used;
u32 from_rid, from_hi, from_lo;
int rc;
mb(); /* Finish writing source/reading dest before DMA starts */
used = MC_CMD_MEMCPY_IN_LEN(count);
if (WARN_ON(used > MCDI_CTL_SDU_LEN_MAX))
return -ENOBUFS;
/* Allocate room for the largest request */
inbuf = kzalloc(MCDI_CTL_SDU_LEN_MAX, GFP_KERNEL);
if (inbuf == NULL)
return -ENOMEM;
record = inbuf;
MCDI_SET_DWORD(record, MEMCPY_IN_RECORD, count);
while (count-- > 0) {
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID,
req->to_rid);
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO,
(u32)req->to_addr);
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI,
(u32)(req->to_addr >> 32));
if (req->from_buf == NULL) {
from_rid = req->from_rid;
from_lo = (u32)req->from_addr;
from_hi = (u32)(req->from_addr >> 32);
} else {
if (WARN_ON(used + req->length > MCDI_CTL_SDU_LEN_MAX)) {
rc = -ENOBUFS;
goto out;
}
from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE;
from_lo = used;
from_hi = 0;
memcpy(inbuf + used, req->from_buf, req->length);
used += req->length;
}
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid);
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO,
from_lo);
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI,
from_hi);
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH,
req->length);
++req;
record += MC_CMD_MEMCPY_IN_RECORD_LEN;
}
rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
out:
kfree(inbuf);
mb(); /* Don't write source/read dest before DMA is complete */
return rc;
}
/* The TX filter is entirely controlled by this driver, and is modified
* underneath the feet of the VF
*/
static void efx_sriov_reset_tx_filter(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
u16 vlan;
int rc;
if (vf->tx_filter_id != -1) {
efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
vf->tx_filter_id);
netif_dbg(efx, hw, efx->net_dev, "Removed vf %s tx filter %d\n",
vf->pci_name, vf->tx_filter_id);
vf->tx_filter_id = -1;
}
if (is_zero_ether_addr(vf->addr.mac_addr))
return;
/* Turn on TX filtering automatically if not explicitly
* enabled or disabled.
*/
if (vf->tx_filter_mode == VF_TX_FILTER_AUTO && vf_max_tx_channels <= 2)
vf->tx_filter_mode = VF_TX_FILTER_ON;
vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
efx_filter_init_tx(&filter, abs_index(vf, 0));
rc = efx_filter_set_eth_local(&filter,
vlan ? vlan : EFX_FILTER_VID_UNSPEC,
vf->addr.mac_addr);
BUG_ON(rc);
rc = efx_filter_insert_filter(efx, &filter, true);
if (rc < 0) {
netif_warn(efx, hw, efx->net_dev,
"Unable to migrate tx filter for vf %s\n",
vf->pci_name);
} else {
netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s tx filter %d\n",
vf->pci_name, rc);
vf->tx_filter_id = rc;
}
}
/* The RX filter is managed here on behalf of the VF driver */
static void efx_sriov_reset_rx_filter(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct efx_filter_spec filter;
u16 vlan;
int rc;
if (vf->rx_filter_id != -1) {
efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
vf->rx_filter_id);
netif_dbg(efx, hw, efx->net_dev, "Removed vf %s rx filter %d\n",
vf->pci_name, vf->rx_filter_id);
vf->rx_filter_id = -1;
}
if (!vf->rx_filtering || is_zero_ether_addr(vf->addr.mac_addr))
return;
vlan = ntohs(vf->addr.tci) & VLAN_VID_MASK;
efx_filter_init_rx(&filter, EFX_FILTER_PRI_REQUIRED,
vf->rx_filter_flags,
abs_index(vf, vf->rx_filter_qid));
rc = efx_filter_set_eth_local(&filter,
vlan ? vlan : EFX_FILTER_VID_UNSPEC,
vf->addr.mac_addr);
BUG_ON(rc);
rc = efx_filter_insert_filter(efx, &filter, true);
if (rc < 0) {
netif_warn(efx, hw, efx->net_dev,
"Unable to insert rx filter for vf %s\n",
vf->pci_name);
} else {
netif_dbg(efx, hw, efx->net_dev, "Inserted vf %s rx filter %d\n",
vf->pci_name, rc);
vf->rx_filter_id = rc;
}
}
static void __efx_sriov_update_vf_addr(struct efx_vf *vf)
{
efx_sriov_reset_tx_filter(vf);
efx_sriov_reset_rx_filter(vf);
queue_work(vfdi_workqueue, &vf->efx->peer_work);
}
/* Push the peer list to this VF. The caller must hold status_lock to interlock
* with VFDI requests, and they must be serialised against manipulation of
* local_page_list, either by acquiring local_lock or by running from
* efx_sriov_peer_work()
*/
static void __efx_sriov_push_vf_status(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_status *status = efx->vfdi_status.addr;
struct efx_memcpy_req copy[4];
struct efx_endpoint_page *epp;
unsigned int pos, count;
unsigned data_offset;
efx_qword_t event;
WARN_ON(!mutex_is_locked(&vf->status_lock));
WARN_ON(!vf->status_addr);
status->local = vf->addr;
status->generation_end = ++status->generation_start;
memset(copy, '\0', sizeof(copy));
/* Write generation_start */
copy[0].from_buf = &status->generation_start;
copy[0].to_rid = vf->pci_rid;
copy[0].to_addr = vf->status_addr + offsetof(struct vfdi_status,
generation_start);
copy[0].length = sizeof(status->generation_start);
/* DMA the rest of the structure (excluding the generations). This
* assumes that the non-generation portion of vfdi_status is in
* one chunk starting at the version member.
*/
data_offset = offsetof(struct vfdi_status, version);
copy[1].from_rid = efx->pci_dev->devfn;
copy[1].from_addr = efx->vfdi_status.dma_addr + data_offset;
copy[1].to_rid = vf->pci_rid;
copy[1].to_addr = vf->status_addr + data_offset;
copy[1].length = status->length - data_offset;
/* Copy the peer pages */
pos = 2;
count = 0;
list_for_each_entry(epp, &efx->local_page_list, link) {
if (count == vf->peer_page_count) {
/* The VF driver will know they need to provide more
* pages because peer_addr_count is too large.
*/
break;
}
copy[pos].from_buf = NULL;
copy[pos].from_rid = efx->pci_dev->devfn;
copy[pos].from_addr = epp->addr;
copy[pos].to_rid = vf->pci_rid;
copy[pos].to_addr = vf->peer_page_addrs[count];
copy[pos].length = EFX_PAGE_SIZE;
if (++pos == ARRAY_SIZE(copy)) {
efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
pos = 0;
}
++count;
}
/* Write generation_end */
copy[pos].from_buf = &status->generation_end;
copy[pos].to_rid = vf->pci_rid;
copy[pos].to_addr = vf->status_addr + offsetof(struct vfdi_status,
generation_end);
copy[pos].length = sizeof(status->generation_end);
efx_sriov_memcpy(efx, copy, pos + 1);
/* Notify the guest */
EFX_POPULATE_QWORD_3(event,
FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
VFDI_EV_SEQ, (vf->msg_seqno & 0xff),
VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS);
++vf->msg_seqno;
efx_generate_event(efx, EFX_VI_BASE + vf->index * efx_vf_size(efx),
&event);
}
static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset,
u64 *addr, unsigned count)
{
efx_qword_t buf;
unsigned pos;
for (pos = 0; pos < count; ++pos) {
EFX_POPULATE_QWORD_3(buf,
FRF_AZ_BUF_ADR_REGION, 0,
FRF_AZ_BUF_ADR_FBUF,
addr ? addr[pos] >> 12 : 0,
FRF_AZ_BUF_OWNER_ID_FBUF, 0);
efx_sram_writeq(efx, efx->membase + FR_BZ_BUF_FULL_TBL,
&buf, offset + pos);
}
}
static bool bad_vf_index(struct efx_nic *efx, unsigned index)
{
return index >= efx_vf_size(efx);
}
static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count)
{
unsigned max_buf_count = max_entry_count *
sizeof(efx_qword_t) / EFX_BUF_SIZE;
return ((buf_count & (buf_count - 1)) || buf_count > max_buf_count);
}
/* Check that VI specified by per-port index belongs to a VF.
* Optionally set VF index and VI index within the VF.
*/
static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
struct efx_vf **vf_out, unsigned *rel_index_out)
{
unsigned vf_i;
if (abs_index < EFX_VI_BASE)
return true;
vf_i = (abs_index - EFX_VI_BASE) / efx_vf_size(efx);
if (vf_i >= efx->vf_init_count)
return true;
if (vf_out)
*vf_out = efx->vf + vf_i;
if (rel_index_out)
*rel_index_out = abs_index % efx_vf_size(efx);
return false;
}
static int efx_vfdi_init_evq(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
unsigned vf_evq = req->u.init_evq.index;
unsigned buf_count = req->u.init_evq.buf_count;
unsigned abs_evq = abs_index(vf, vf_evq);
unsigned buftbl = EFX_BUFTBL_EVQ_BASE(vf, vf_evq);
efx_oword_t reg;
if (bad_vf_index(efx, vf_evq) ||
bad_buf_count(buf_count, EFX_MAX_VF_EVQ_SIZE)) {
if (net_ratelimit())
netif_err(efx, hw, efx->net_dev,
"ERROR: Invalid INIT_EVQ from %s: evq %d bufs %d\n",
vf->pci_name, vf_evq, buf_count);
return VFDI_RC_EINVAL;
}
efx_sriov_bufs(efx, buftbl, req->u.init_evq.addr, buf_count);
EFX_POPULATE_OWORD_3(reg,
FRF_CZ_TIMER_Q_EN, 1,
FRF_CZ_HOST_NOTIFY_MODE, 0,
FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, abs_evq);
EFX_POPULATE_OWORD_3(reg,
FRF_AZ_EVQ_EN, 1,
FRF_AZ_EVQ_SIZE, __ffs(buf_count),
FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, abs_evq);
if (vf_evq == 0) {
memcpy(vf->evq0_addrs, req->u.init_evq.addr,
buf_count * sizeof(u64));
vf->evq0_count = buf_count;
}
return VFDI_RC_SUCCESS;
}
static int efx_vfdi_init_rxq(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
unsigned vf_rxq = req->u.init_rxq.index;
unsigned vf_evq = req->u.init_rxq.evq;
unsigned buf_count = req->u.init_rxq.buf_count;
unsigned buftbl = EFX_BUFTBL_RXQ_BASE(vf, vf_rxq);
unsigned label;
efx_oword_t reg;
if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) ||
bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
if (net_ratelimit())
netif_err(efx, hw, efx->net_dev,
"ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d "
"buf_count %d\n", vf->pci_name, vf_rxq,
vf_evq, buf_count);
return VFDI_RC_EINVAL;
}
if (__test_and_set_bit(req->u.init_rxq.index, vf->rxq_mask))
++vf->rxq_count;
efx_sriov_bufs(efx, buftbl, req->u.init_rxq.addr, buf_count);
label = req->u.init_rxq.label & EFX_FIELD_MASK(FRF_AZ_RX_DESCQ_LABEL);
EFX_POPULATE_OWORD_6(reg,
FRF_AZ_RX_DESCQ_BUF_BASE_ID, buftbl,
FRF_AZ_RX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
FRF_AZ_RX_DESCQ_LABEL, label,
FRF_AZ_RX_DESCQ_SIZE, __ffs(buf_count),
FRF_AZ_RX_DESCQ_JUMBO,
!!(req->u.init_rxq.flags &
VFDI_RXQ_FLAG_SCATTER_EN),
FRF_AZ_RX_DESCQ_EN, 1);
efx_writeo_table(efx, ®, FR_BZ_RX_DESC_PTR_TBL,
abs_index(vf, vf_rxq));
return VFDI_RC_SUCCESS;
}
static int efx_vfdi_init_txq(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
unsigned vf_txq = req->u.init_txq.index;
unsigned vf_evq = req->u.init_txq.evq;
unsigned buf_count = req->u.init_txq.buf_count;
unsigned buftbl = EFX_BUFTBL_TXQ_BASE(vf, vf_txq);
unsigned label, eth_filt_en;
efx_oword_t reg;
if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_txq) ||
vf_txq >= vf_max_tx_channels ||
bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
if (net_ratelimit())
netif_err(efx, hw, efx->net_dev,
"ERROR: Invalid INIT_TXQ from %s: txq %d evq %d "
"buf_count %d\n", vf->pci_name, vf_txq,
vf_evq, buf_count);
return VFDI_RC_EINVAL;
}
mutex_lock(&vf->txq_lock);
if (__test_and_set_bit(req->u.init_txq.index, vf->txq_mask))
++vf->txq_count;
mutex_unlock(&vf->txq_lock);
efx_sriov_bufs(efx, buftbl, req->u.init_txq.addr, buf_count);
eth_filt_en = vf->tx_filter_mode == VF_TX_FILTER_ON;
label = req->u.init_txq.label & EFX_FIELD_MASK(FRF_AZ_TX_DESCQ_LABEL);
EFX_POPULATE_OWORD_8(reg,
FRF_CZ_TX_DPT_Q_MASK_WIDTH, min(efx->vi_scale, 1U),
FRF_CZ_TX_DPT_ETH_FILT_EN, eth_filt_en,
FRF_AZ_TX_DESCQ_EN, 1,
FRF_AZ_TX_DESCQ_BUF_BASE_ID, buftbl,
FRF_AZ_TX_DESCQ_EVQ_ID, abs_index(vf, vf_evq),
FRF_AZ_TX_DESCQ_LABEL, label,
FRF_AZ_TX_DESCQ_SIZE, __ffs(buf_count),
FRF_BZ_TX_NON_IP_DROP_DIS, 1);
efx_writeo_table(efx, ®, FR_BZ_TX_DESC_PTR_TBL,
abs_index(vf, vf_txq));
return VFDI_RC_SUCCESS;
}
/* Returns true when efx_vfdi_fini_all_queues should wake */
static bool efx_vfdi_flush_wake(struct efx_vf *vf)
{
/* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
smp_mb();
return (!vf->txq_count && !vf->rxq_count) ||
atomic_read(&vf->rxq_retry_count);
}
static void efx_vfdi_flush_clear(struct efx_vf *vf)
{
memset(vf->txq_mask, 0, sizeof(vf->txq_mask));
vf->txq_count = 0;
memset(vf->rxq_mask, 0, sizeof(vf->rxq_mask));
vf->rxq_count = 0;
memset(vf->rxq_retry_mask, 0, sizeof(vf->rxq_retry_mask));
atomic_set(&vf->rxq_retry_count, 0);
}
static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
efx_oword_t reg;
unsigned count = efx_vf_size(efx);
unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx);
unsigned timeout = HZ;
unsigned index, rxqs_count;
__le32 *rxqs;
int rc;
rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
if (rxqs == NULL)
return VFDI_RC_ENOMEM;
rtnl_lock();
if (efx->fc_disable++ == 0)
efx_mcdi_set_mac(efx);
rtnl_unlock();
/* Flush all the initialized queues */
rxqs_count = 0;
for (index = 0; index < count; ++index) {
if (test_bit(index, vf->txq_mask)) {
EFX_POPULATE_OWORD_2(reg,
FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
FRF_AZ_TX_FLUSH_DESCQ,
vf_offset + index);
efx_writeo(efx, ®, FR_AZ_TX_FLUSH_DESCQ);
}
if (test_bit(index, vf->rxq_mask))
rxqs[rxqs_count++] = cpu_to_le32(vf_offset + index);
}
atomic_set(&vf->rxq_retry_count, 0);
while (timeout && (vf->rxq_count || vf->txq_count)) {
rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)rxqs,
rxqs_count * sizeof(*rxqs), NULL, 0, NULL);
WARN_ON(rc < 0);
timeout = wait_event_timeout(vf->flush_waitq,
efx_vfdi_flush_wake(vf),
timeout);
rxqs_count = 0;
for (index = 0; index < count; ++index) {
if (test_and_clear_bit(index, vf->rxq_retry_mask)) {
atomic_dec(&vf->rxq_retry_count);
rxqs[rxqs_count++] =
cpu_to_le32(vf_offset + index);
}
}
}
rtnl_lock();
if (--efx->fc_disable == 0)
efx_mcdi_set_mac(efx);
rtnl_unlock();
/* Irrespective of success/failure, fini the queues */
EFX_ZERO_OWORD(reg);
for (index = 0; index < count; ++index) {
efx_writeo_table(efx, ®, FR_BZ_RX_DESC_PTR_TBL,
vf_offset + index);
efx_writeo_table(efx, ®, FR_BZ_TX_DESC_PTR_TBL,
vf_offset + index);
efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL,
vf_offset + index);
efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL,
vf_offset + index);
}
efx_sriov_bufs(efx, vf->buftbl_base, NULL,
EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
kfree(rxqs);
efx_vfdi_flush_clear(vf);
vf->evq0_count = 0;
return timeout ? 0 : VFDI_RC_ETIMEDOUT;
}
static int efx_vfdi_insert_filter(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
unsigned vf_rxq = req->u.mac_filter.rxq;
unsigned flags;
if (bad_vf_index(efx, vf_rxq) || vf->rx_filtering) {
if (net_ratelimit())
netif_err(efx, hw, efx->net_dev,
"ERROR: Invalid INSERT_FILTER from %s: rxq %d "
"flags 0x%x\n", vf->pci_name, vf_rxq,
req->u.mac_filter.flags);
return VFDI_RC_EINVAL;
}
flags = 0;
if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_RSS)
flags |= EFX_FILTER_FLAG_RX_RSS;
if (req->u.mac_filter.flags & VFDI_MAC_FILTER_FLAG_SCATTER)
flags |= EFX_FILTER_FLAG_RX_SCATTER;
vf->rx_filter_flags = flags;
vf->rx_filter_qid = vf_rxq;
vf->rx_filtering = true;
efx_sriov_reset_rx_filter(vf);
queue_work(vfdi_workqueue, &efx->peer_work);
return VFDI_RC_SUCCESS;
}
static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
{
vf->rx_filtering = false;
efx_sriov_reset_rx_filter(vf);
queue_work(vfdi_workqueue, &vf->efx->peer_work);
return VFDI_RC_SUCCESS;
}
static int efx_vfdi_set_status_page(struct efx_vf *vf)
{
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
u64 page_count = req->u.set_status_page.peer_page_count;
u64 max_page_count =
(EFX_PAGE_SIZE -
offsetof(struct vfdi_req, u.set_status_page.peer_page_addr[0]))
/ sizeof(req->u.set_status_page.peer_page_addr[0]);
if (!req->u.set_status_page.dma_addr || page_count > max_page_count) {
if (net_ratelimit())
netif_err(efx, hw, efx->net_dev,
"ERROR: Invalid SET_STATUS_PAGE from %s\n",
vf->pci_name);
return VFDI_RC_EINVAL;
}
mutex_lock(&efx->local_lock);
mutex_lock(&vf->status_lock);
vf->status_addr = req->u.set_status_page.dma_addr;
kfree(vf->peer_page_addrs);
vf->peer_page_addrs = NULL;
vf->peer_page_count = 0;
if (page_count) {
vf->peer_page_addrs = kcalloc(page_count, sizeof(u64),
GFP_KERNEL);
if (vf->peer_page_addrs) {
memcpy(vf->peer_page_addrs,
req->u.set_status_page.peer_page_addr,
page_count * sizeof(u64));
vf->peer_page_count = page_count;
}
}
__efx_sriov_push_vf_status(vf);
mutex_unlock(&vf->status_lock);
mutex_unlock(&efx->local_lock);
return VFDI_RC_SUCCESS;
}
static int efx_vfdi_clear_status_page(struct efx_vf *vf)
{
mutex_lock(&vf->status_lock);
vf->status_addr = 0;
mutex_unlock(&vf->status_lock);
return VFDI_RC_SUCCESS;
}
typedef int (*efx_vfdi_op_t)(struct efx_vf *vf);
static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
[VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq,
[VFDI_OP_INIT_TXQ] = efx_vfdi_init_txq,
[VFDI_OP_INIT_RXQ] = efx_vfdi_init_rxq,
[VFDI_OP_FINI_ALL_QUEUES] = efx_vfdi_fini_all_queues,
[VFDI_OP_INSERT_FILTER] = efx_vfdi_insert_filter,
[VFDI_OP_REMOVE_ALL_FILTERS] = efx_vfdi_remove_all_filters,
[VFDI_OP_SET_STATUS_PAGE] = efx_vfdi_set_status_page,
[VFDI_OP_CLEAR_STATUS_PAGE] = efx_vfdi_clear_status_page,
};
static void efx_sriov_vfdi(struct work_struct *work)
{
struct efx_vf *vf = container_of(work, struct efx_vf, req);
struct efx_nic *efx = vf->efx;
struct vfdi_req *req = vf->buf.addr;
struct efx_memcpy_req copy[2];
int rc;
/* Copy this page into the local address space */
memset(copy, '\0', sizeof(copy));
copy[0].from_rid = vf->pci_rid;
copy[0].from_addr = vf->req_addr;
copy[0].to_rid = efx->pci_dev->devfn;
copy[0].to_addr = vf->buf.dma_addr;
copy[0].length = EFX_PAGE_SIZE;
rc = efx_sriov_memcpy(efx, copy, 1);
if (rc) {
/* If we can't get the request, we can't reply to the caller */
if (net_ratelimit())
netif_err(efx, hw, efx->net_dev,
"ERROR: Unable to fetch VFDI request from %s rc %d\n",
vf->pci_name, -rc);
vf->busy = false;
return;
}
if (req->op < VFDI_OP_LIMIT && vfdi_ops[req->op] != NULL) {
rc = vfdi_ops[req->op](vf);
if (rc == 0) {
netif_dbg(efx, hw, efx->net_dev,
"vfdi request %d from %s ok\n",
req->op, vf->pci_name);
}
} else {
netif_dbg(efx, hw, efx->net_dev,
"ERROR: Unrecognised request %d from VF %s addr "
"%llx\n", req->op, vf->pci_name,
(unsigned long long)vf->req_addr);
rc = VFDI_RC_EOPNOTSUPP;
}
/* Allow subsequent VF requests */
vf->busy = false;
smp_wmb();
/* Respond to the request */
req->rc = rc;
req->op = VFDI_OP_RESPONSE;
memset(copy, '\0', sizeof(copy));
copy[0].from_buf = &req->rc;
copy[0].to_rid = vf->pci_rid;
copy[0].to_addr = vf->req_addr + offsetof(struct vfdi_req, rc);
copy[0].length = sizeof(req->rc);
copy[1].from_buf = &req->op;
copy[1].to_rid = vf->pci_rid;
copy[1].to_addr = vf->req_addr + offsetof(struct vfdi_req, op);
copy[1].length = sizeof(req->op);
(void) efx_sriov_memcpy(efx, copy, ARRAY_SIZE(copy));
}
/* After a reset the event queues inside the guests no longer exist. Fill the
* event ring in guest memory with VFDI reset events, then (re-initialise) the
* event queue to raise an interrupt. The guest driver will then recover.
*/
static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
{
struct efx_nic *efx = vf->efx;
struct efx_memcpy_req copy_req[4];
efx_qword_t event;
unsigned int pos, count, k, buftbl, abs_evq;
efx_oword_t reg;
efx_dword_t ptr;
int rc;
BUG_ON(buffer->len != EFX_PAGE_SIZE);
if (!vf->evq0_count)
return;
BUG_ON(vf->evq0_count & (vf->evq0_count - 1));
mutex_lock(&vf->status_lock);
EFX_POPULATE_QWORD_3(event,
FSF_AZ_EV_CODE, FSE_CZ_EV_CODE_USER_EV,
VFDI_EV_SEQ, vf->msg_seqno,
VFDI_EV_TYPE, VFDI_EV_TYPE_RESET);
vf->msg_seqno++;
for (pos = 0; pos < EFX_PAGE_SIZE; pos += sizeof(event))
memcpy(buffer->addr + pos, &event, sizeof(event));
for (pos = 0; pos < vf->evq0_count; pos += count) {
count = min_t(unsigned, vf->evq0_count - pos,
ARRAY_SIZE(copy_req));
for (k = 0; k < count; k++) {
copy_req[k].from_buf = NULL;
copy_req[k].from_rid = efx->pci_dev->devfn;
copy_req[k].from_addr = buffer->dma_addr;
copy_req[k].to_rid = vf->pci_rid;
copy_req[k].to_addr = vf->evq0_addrs[pos + k];
copy_req[k].length = EFX_PAGE_SIZE;
}
rc = efx_sriov_memcpy(efx, copy_req, count);
if (rc) {
if (net_ratelimit())
netif_err(efx, hw, efx->net_dev,
"ERROR: Unable to notify %s of reset"
": %d\n", vf->pci_name, -rc);
break;
}
}
/* Reinitialise, arm and trigger evq0 */
abs_evq = abs_index(vf, 0);
buftbl = EFX_BUFTBL_EVQ_BASE(vf, 0);
efx_sriov_bufs(efx, buftbl, vf->evq0_addrs, vf->evq0_count);
EFX_POPULATE_OWORD_3(reg,
FRF_CZ_TIMER_Q_EN, 1,
FRF_CZ_HOST_NOTIFY_MODE, 0,
FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, abs_evq);
EFX_POPULATE_OWORD_3(reg,
FRF_AZ_EVQ_EN, 1,
FRF_AZ_EVQ_SIZE, __ffs(vf->evq0_count),
FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
efx_writeo_table(efx, ®, FR_BZ_EVQ_PTR_TBL, abs_evq);
EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0);
efx_writed_table(efx, &ptr, FR_BZ_EVQ_RPTR, abs_evq);
mutex_unlock(&vf->status_lock);
}
static void efx_sriov_reset_vf_work(struct work_struct *work)
{
struct efx_vf *vf = container_of(work, struct efx_vf, req);
struct efx_nic *efx = vf->efx;
struct efx_buffer buf;
if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) {
efx_sriov_reset_vf(vf, &buf);
efx_nic_free_buffer(efx, &buf);
}
}
static void efx_sriov_handle_no_channel(struct efx_nic *efx)
{
netif_err(efx, drv, efx->net_dev,
"ERROR: IOV requires MSI-X and 1 additional interrupt"
"vector. IOV disabled\n");
efx->vf_count = 0;
}
static int efx_sriov_probe_channel(struct efx_channel *channel)
{
channel->efx->vfdi_channel = channel;
return 0;
}
static void
efx_sriov_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
{
snprintf(buf, len, "%s-iov", channel->efx->name);
}
static const struct efx_channel_type efx_sriov_channel_type = {
.handle_no_channel = efx_sriov_handle_no_channel,
.pre_probe = efx_sriov_probe_channel,
.get_name = efx_sriov_get_channel_name,
/* no copy operation; channel must not be reallocated */
.keep_eventq = true,
};
void efx_sriov_probe(struct efx_nic *efx)
{
unsigned count;
if (!max_vfs)
return;
if (efx_sriov_cmd(efx, false, &efx->vi_scale, &count))
return;
if (count > 0 && count > max_vfs)
count = max_vfs;
/* efx_nic_dimension_resources() will reduce vf_count as appopriate */
efx->vf_count = count;
efx->extra_channel_type[EFX_EXTRA_CHANNEL_IOV] = &efx_sriov_channel_type;
}
/* Copy the list of individual addresses into the vfdi_status.peers
* array and auxillary pages, protected by %local_lock. Drop that lock
* and then broadcast the address list to every VF.
*/
static void efx_sriov_peer_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic, peer_work);
struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
struct efx_vf *vf;
struct efx_local_addr *local_addr;
struct vfdi_endpoint *peer;
struct efx_endpoint_page *epp;
struct list_head pages;
unsigned int peer_space;
unsigned int peer_count;
unsigned int pos;
mutex_lock(&efx->local_lock);
/* Move the existing peer pages off %local_page_list */
INIT_LIST_HEAD(&pages);
list_splice_tail_init(&efx->local_page_list, &pages);
/* Populate the VF addresses starting from entry 1 (entry 0 is
* the PF address)
*/
peer = vfdi_status->peers + 1;
peer_space = ARRAY_SIZE(vfdi_status->peers) - 1;
peer_count = 1;
for (pos = 0; pos < efx->vf_count; ++pos) {
vf = efx->vf + pos;
mutex_lock(&vf->status_lock);
if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) {
*peer++ = vf->addr;
++peer_count;
--peer_space;
BUG_ON(peer_space == 0);
}
mutex_unlock(&vf->status_lock);
}
/* Fill the remaining addresses */
list_for_each_entry(local_addr, &efx->local_addr_list, link) {
memcpy(peer->mac_addr, local_addr->addr, ETH_ALEN);
peer->tci = 0;
++peer;
++peer_count;
if (--peer_space == 0) {
if (list_empty(&pages)) {
epp = kmalloc(sizeof(*epp), GFP_KERNEL);
if (!epp)
break;
epp->ptr = dma_alloc_coherent(
&efx->pci_dev->dev, EFX_PAGE_SIZE,
&epp->addr, GFP_KERNEL);
if (!epp->ptr) {
kfree(epp);
break;
}
} else {
epp = list_first_entry(
&pages, struct efx_endpoint_page, link);
list_del(&epp->link);
}
list_add_tail(&epp->link, &efx->local_page_list);
peer = (struct vfdi_endpoint *)epp->ptr;
peer_space = EFX_PAGE_SIZE / sizeof(struct vfdi_endpoint);
}
}
vfdi_status->peer_count = peer_count;
mutex_unlock(&efx->local_lock);
/* Free any now unused endpoint pages */
while (!list_empty(&pages)) {
epp = list_first_entry(
&pages, struct efx_endpoint_page, link);
list_del(&epp->link);
dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
epp->ptr, epp->addr);
kfree(epp);
}
/* Finally, push the pages */
for (pos = 0; pos < efx->vf_count; ++pos) {
vf = efx->vf + pos;
mutex_lock(&vf->status_lock);
if (vf->status_addr)
__efx_sriov_push_vf_status(vf);
mutex_unlock(&vf->status_lock);
}
}
static void efx_sriov_free_local(struct efx_nic *efx)
{
struct efx_local_addr *local_addr;
struct efx_endpoint_page *epp;
while (!list_empty(&efx->local_addr_list)) {
local_addr = list_first_entry(&efx->local_addr_list,
struct efx_local_addr, link);
list_del(&local_addr->link);
kfree(local_addr);
}
while (!list_empty(&efx->local_page_list)) {
epp = list_first_entry(&efx->local_page_list,
struct efx_endpoint_page, link);
list_del(&epp->link);
dma_free_coherent(&efx->pci_dev->dev, EFX_PAGE_SIZE,
epp->ptr, epp->addr);
kfree(epp);
}
}
static int efx_sriov_vf_alloc(struct efx_nic *efx)
{
unsigned index;
struct efx_vf *vf;
efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL);
if (!efx->vf)
return -ENOMEM;
for (index = 0; index < efx->vf_count; ++index) {
vf = efx->vf + index;
vf->efx = efx;
vf->index = index;
vf->rx_filter_id = -1;
vf->tx_filter_mode = VF_TX_FILTER_AUTO;
vf->tx_filter_id = -1;
INIT_WORK(&vf->req, efx_sriov_vfdi);
INIT_WORK(&vf->reset_work, efx_sriov_reset_vf_work);
init_waitqueue_head(&vf->flush_waitq);
mutex_init(&vf->status_lock);
mutex_init(&vf->txq_lock);
}
return 0;
}
static void efx_sriov_vfs_fini(struct efx_nic *efx)
{
struct efx_vf *vf;
unsigned int pos;
for (pos = 0; pos < efx->vf_count; ++pos) {
vf = efx->vf + pos;
efx_nic_free_buffer(efx, &vf->buf);
kfree(vf->peer_page_addrs);
vf->peer_page_addrs = NULL;
vf->peer_page_count = 0;
vf->evq0_count = 0;
}
}
static int efx_sriov_vfs_init(struct efx_nic *efx)
{
struct pci_dev *pci_dev = efx->pci_dev;
unsigned index, devfn, sriov, buftbl_base;
u16 offset, stride;
struct efx_vf *vf;
int rc;
sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV);
if (!sriov)
return -ENOENT;
pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride);
buftbl_base = efx->vf_buftbl_base;
devfn = pci_dev->devfn + offset;
for (index = 0; index < efx->vf_count; ++index) {
vf = efx->vf + index;
/* Reserve buffer entries */
vf->buftbl_base = buftbl_base;
buftbl_base += EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx);
vf->pci_rid = devfn;
snprintf(vf->pci_name, sizeof(vf->pci_name),
"%04x:%02x:%02x.%d",
pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
PCI_SLOT(devfn), PCI_FUNC(devfn));
rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE);
if (rc)
goto fail;
devfn += stride;
}
return 0;
fail:
efx_sriov_vfs_fini(efx);
return rc;
}
int efx_sriov_init(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
struct vfdi_status *vfdi_status;
int rc;
/* Ensure there's room for vf_channel */
BUILD_BUG_ON(EFX_MAX_CHANNELS + 1 >= EFX_VI_BASE);
/* Ensure that VI_BASE is aligned on VI_SCALE */
BUILD_BUG_ON(EFX_VI_BASE & ((1 << EFX_VI_SCALE_MAX) - 1));
if (efx->vf_count == 0)
return 0;
rc = efx_sriov_cmd(efx, true, NULL, NULL);
if (rc)
goto fail_cmd;
rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status));
if (rc)
goto fail_status;
vfdi_status = efx->vfdi_status.addr;
memset(vfdi_status, 0, sizeof(*vfdi_status));
vfdi_status->version = 1;
vfdi_status->length = sizeof(*vfdi_status);
vfdi_status->max_tx_channels = vf_max_tx_channels;
vfdi_status->vi_scale = efx->vi_scale;
vfdi_status->rss_rxq_count = efx->rss_spread;
vfdi_status->peer_count = 1 + efx->vf_count;
vfdi_status->timer_quantum_ns = efx->timer_quantum_ns;
rc = efx_sriov_vf_alloc(efx);
if (rc)
goto fail_alloc;
mutex_init(&efx->local_lock);
INIT_WORK(&efx->peer_work, efx_sriov_peer_work);
INIT_LIST_HEAD(&efx->local_addr_list);
INIT_LIST_HEAD(&efx->local_page_list);
rc = efx_sriov_vfs_init(efx);
if (rc)
goto fail_vfs;
rtnl_lock();
memcpy(vfdi_status->peers[0].mac_addr,
net_dev->dev_addr, ETH_ALEN);
efx->vf_init_count = efx->vf_count;
rtnl_unlock();
efx_sriov_usrev(efx, true);
/* At this point we must be ready to accept VFDI requests */
rc = pci_enable_sriov(efx->pci_dev, efx->vf_count);
if (rc)
goto fail_pci;
netif_info(efx, probe, net_dev,
"enabled SR-IOV for %d VFs, %d VI per VF\n",
efx->vf_count, efx_vf_size(efx));
return 0;
fail_pci:
efx_sriov_usrev(efx, false);
rtnl_lock();
efx->vf_init_count = 0;
rtnl_unlock();
efx_sriov_vfs_fini(efx);
fail_vfs:
cancel_work_sync(&efx->peer_work);
efx_sriov_free_local(efx);
kfree(efx->vf);
fail_alloc:
efx_nic_free_buffer(efx, &efx->vfdi_status);
fail_status:
efx_sriov_cmd(efx, false, NULL, NULL);
fail_cmd:
return rc;
}
void efx_sriov_fini(struct efx_nic *efx)
{
struct efx_vf *vf;
unsigned int pos;
if (efx->vf_init_count == 0)
return;
/* Disable all interfaces to reconfiguration */
BUG_ON(efx->vfdi_channel->enabled);
efx_sriov_usrev(efx, false);
rtnl_lock();
efx->vf_init_count = 0;
rtnl_unlock();
/* Flush all reconfiguration work */
for (pos = 0; pos < efx->vf_count; ++pos) {
vf = efx->vf + pos;
cancel_work_sync(&vf->req);
cancel_work_sync(&vf->reset_work);
}
cancel_work_sync(&efx->peer_work);
pci_disable_sriov(efx->pci_dev);
/* Tear down back-end state */
efx_sriov_vfs_fini(efx);
efx_sriov_free_local(efx);
kfree(efx->vf);
efx_nic_free_buffer(efx, &efx->vfdi_status);
efx_sriov_cmd(efx, false, NULL, NULL);
}
void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
struct efx_vf *vf;
unsigned qid, seq, type, data;
qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
/* USR_EV_REG_VALUE is dword0, so access the VFDI_EV fields directly */
BUILD_BUG_ON(FSF_CZ_USER_EV_REG_VALUE_LBN != 0);
seq = EFX_QWORD_FIELD(*event, VFDI_EV_SEQ);
type = EFX_QWORD_FIELD(*event, VFDI_EV_TYPE);
data = EFX_QWORD_FIELD(*event, VFDI_EV_DATA);
netif_vdbg(efx, hw, efx->net_dev,
"USR_EV event from qid %d seq 0x%x type %d data 0x%x\n",
qid, seq, type, data);
if (map_vi_index(efx, qid, &vf, NULL))
return;
if (vf->busy)
goto error;
if (type == VFDI_EV_TYPE_REQ_WORD0) {
/* Resynchronise */
vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
vf->req_seqno = seq + 1;
vf->req_addr = 0;
} else if (seq != (vf->req_seqno++ & 0xff) || type != vf->req_type)
goto error;
switch (vf->req_type) {
case VFDI_EV_TYPE_REQ_WORD0:
case VFDI_EV_TYPE_REQ_WORD1:
case VFDI_EV_TYPE_REQ_WORD2:
vf->req_addr |= (u64)data << (vf->req_type << 4);
++vf->req_type;
return;
case VFDI_EV_TYPE_REQ_WORD3:
vf->req_addr |= (u64)data << 48;
vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
vf->busy = true;
queue_work(vfdi_workqueue, &vf->req);
return;
}
error:
if (net_ratelimit())
netif_err(efx, hw, efx->net_dev,
"ERROR: Screaming VFDI request from %s\n",
vf->pci_name);
/* Reset the request and sequence number */
vf->req_type = VFDI_EV_TYPE_REQ_WORD0;
vf->req_seqno = seq + 1;
}
void efx_sriov_flr(struct efx_nic *efx, unsigned vf_i)
{
struct efx_vf *vf;
if (vf_i > efx->vf_init_count)
return;
vf = efx->vf + vf_i;
netif_info(efx, hw, efx->net_dev,
"FLR on VF %s\n", vf->pci_name);
vf->status_addr = 0;
efx_vfdi_remove_all_filters(vf);
efx_vfdi_flush_clear(vf);
vf->evq0_count = 0;
}
void efx_sriov_mac_address_changed(struct efx_nic *efx)
{
struct vfdi_status *vfdi_status = efx->vfdi_status.addr;
if (!efx->vf_init_count)
return;
memcpy(vfdi_status->peers[0].mac_addr,
efx->net_dev->dev_addr, ETH_ALEN);
queue_work(vfdi_workqueue, &efx->peer_work);
}
void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
struct efx_vf *vf;
unsigned queue, qid;
queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
if (map_vi_index(efx, queue, &vf, &qid))
return;
/* Ignore flush completions triggered by an FLR */
if (!test_bit(qid, vf->txq_mask))
return;
__clear_bit(qid, vf->txq_mask);
--vf->txq_count;
if (efx_vfdi_flush_wake(vf))
wake_up(&vf->flush_waitq);
}
void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
struct efx_vf *vf;
unsigned ev_failed, queue, qid;
queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
ev_failed = EFX_QWORD_FIELD(*event,
FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
if (map_vi_index(efx, queue, &vf, &qid))
return;
if (!test_bit(qid, vf->rxq_mask))
return;
if (ev_failed) {
set_bit(qid, vf->rxq_retry_mask);
atomic_inc(&vf->rxq_retry_count);
} else {
__clear_bit(qid, vf->rxq_mask);
--vf->rxq_count;
}
if (efx_vfdi_flush_wake(vf))
wake_up(&vf->flush_waitq);
}
/* Called from napi. Schedule the reset work item */
void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
{
struct efx_vf *vf;
unsigned int rel;
if (map_vi_index(efx, dmaq, &vf, &rel))
return;
if (net_ratelimit())
netif_err(efx, hw, efx->net_dev,
"VF %d DMA Q %d reports descriptor fetch error.\n",
vf->index, rel);
queue_work(vfdi_workqueue, &vf->reset_work);
}
/* Reset all VFs */
void efx_sriov_reset(struct efx_nic *efx)
{
unsigned int vf_i;
struct efx_buffer buf;
struct efx_vf *vf;
ASSERT_RTNL();
if (efx->vf_init_count == 0)
return;
efx_sriov_usrev(efx, true);
(void)efx_sriov_cmd(efx, true, NULL, NULL);
if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE))
return;
for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
vf = efx->vf + vf_i;
efx_sriov_reset_vf(vf, &buf);
}
efx_nic_free_buffer(efx, &buf);
}
int efx_init_sriov(void)
{
/* A single threaded workqueue is sufficient. efx_sriov_vfdi() and
* efx_sriov_peer_work() spend almost all their time sleeping for
* MCDI to complete anyway
*/
vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
if (!vfdi_workqueue)
return -ENOMEM;
return 0;
}
void efx_fini_sriov(void)
{
destroy_workqueue(vfdi_workqueue);
}
int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_vf *vf;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
vf = efx->vf + vf_i;
mutex_lock(&vf->status_lock);
memcpy(vf->addr.mac_addr, mac, ETH_ALEN);
__efx_sriov_update_vf_addr(vf);
mutex_unlock(&vf->status_lock);
return 0;
}
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
u16 vlan, u8 qos)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_vf *vf;
u16 tci;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
vf = efx->vf + vf_i;
mutex_lock(&vf->status_lock);
tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
vf->addr.tci = htons(tci);
__efx_sriov_update_vf_addr(vf);
mutex_unlock(&vf->status_lock);
return 0;
}
int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
bool spoofchk)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_vf *vf;
int rc;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
vf = efx->vf + vf_i;
mutex_lock(&vf->txq_lock);
if (vf->txq_count == 0) {
vf->tx_filter_mode =
spoofchk ? VF_TX_FILTER_ON : VF_TX_FILTER_OFF;
rc = 0;
} else {
/* This cannot be changed while TX queues are running */
rc = -EBUSY;
}
mutex_unlock(&vf->txq_lock);
return rc;
}
int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
struct ifla_vf_info *ivi)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_vf *vf;
u16 tci;
if (vf_i >= efx->vf_init_count)
return -EINVAL;
vf = efx->vf + vf_i;
ivi->vf = vf_i;
memcpy(ivi->mac, vf->addr.mac_addr, ETH_ALEN);
ivi->tx_rate = 0;
tci = ntohs(vf->addr.tci);
ivi->vlan = tci & VLAN_VID_MASK;
ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7;
ivi->spoofchk = vf->tx_filter_mode == VF_TX_FILTER_ON;
return 0;
}
| gpl-2.0 |
omnirom/android_kernel_oppo_msm8916 | arch/mips/mm/tlb-r8k.c | 6538 | 5474 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
#include <asm/cpu.h>
#include <asm/bootinfo.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
extern void build_tlb_refill_handler(void);
#define TFP_TLB_SIZE 384
#define TFP_TLB_SET_SHIFT 7
/* CP0 hazard avoidance. */
#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
"nop; nop; nop; nop; nop; nop;\n\t" \
".set reorder\n\t")
void local_flush_tlb_all(void)
{
unsigned long flags;
unsigned long old_ctx;
int entry;
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
write_c0_entrylo(0);
for (entry = 0; entry < TFP_TLB_SIZE; entry++) {
write_c0_tlbset(entry >> TFP_TLB_SET_SHIFT);
write_c0_vaddr(entry << PAGE_SHIFT);
write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
mtc0_tlbw_hazard();
tlb_write();
}
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
local_irq_restore(flags);
}
void local_flush_tlb_mm(struct mm_struct *mm)
{
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0)
drop_mmu_context(mm, cpu);
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
int cpu = smp_processor_id();
unsigned long flags;
int oldpid, newpid, size;
if (!cpu_context(cpu, mm))
return;
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
local_irq_save(flags);
if (size > TFP_TLB_SIZE / 2) {
drop_mmu_context(mm, cpu);
goto out_restore;
}
oldpid = read_c0_entryhi();
newpid = cpu_asid(cpu, mm);
write_c0_entrylo(0);
start &= PAGE_MASK;
end += (PAGE_SIZE - 1);
end &= PAGE_MASK;
while (start < end) {
signed long idx;
write_c0_vaddr(start);
write_c0_entryhi(start);
start += PAGE_SIZE;
tlb_probe();
idx = read_c0_tlbset();
if (idx < 0)
continue;
write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
tlb_write();
}
write_c0_entryhi(oldpid);
out_restore:
local_irq_restore(flags);
}
/* Usable for KV1 addresses only! */
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long size, flags;
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
if (size > TFP_TLB_SIZE / 2) {
local_flush_tlb_all();
return;
}
local_irq_save(flags);
write_c0_entrylo(0);
start &= PAGE_MASK;
end += (PAGE_SIZE - 1);
end &= PAGE_MASK;
while (start < end) {
signed long idx;
write_c0_vaddr(start);
write_c0_entryhi(start);
start += PAGE_SIZE;
tlb_probe();
idx = read_c0_tlbset();
if (idx < 0)
continue;
write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
tlb_write();
}
local_irq_restore(flags);
}
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
int cpu = smp_processor_id();
unsigned long flags;
int oldpid, newpid;
signed long idx;
if (!cpu_context(cpu, vma->vm_mm))
return;
newpid = cpu_asid(cpu, vma->vm_mm);
page &= PAGE_MASK;
local_irq_save(flags);
oldpid = read_c0_entryhi();
write_c0_vaddr(page);
write_c0_entryhi(newpid);
tlb_probe();
idx = read_c0_tlbset();
if (idx < 0)
goto finish;
write_c0_entrylo(0);
write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
tlb_write();
finish:
write_c0_entryhi(oldpid);
local_irq_restore(flags);
}
/*
* We will need multiple versions of update_mmu_cache(), one that just
* updates the TLB with the new pte(s), and another which also checks
* for the R4k "end of page" hardware bug and does the needy.
*/
void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
unsigned long flags;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
int pid;
/*
* Handle debugger faulting in for debugee.
*/
if (current->active_mm != vma->vm_mm)
return;
pid = read_c0_entryhi() & ASID_MASK;
local_irq_save(flags);
address &= PAGE_MASK;
write_c0_vaddr(address);
write_c0_entryhi(pid);
pgdp = pgd_offset(vma->vm_mm, address);
pmdp = pmd_offset(pgdp, address);
ptep = pte_offset_map(pmdp, address);
tlb_probe();
write_c0_entrylo(pte_val(*ptep++) >> 6);
tlb_write();
write_c0_entryhi(pid);
local_irq_restore(flags);
}
static void __cpuinit probe_tlb(unsigned long config)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
c->tlbsize = 3 * 128; /* 3 sets each 128 entries */
}
void __cpuinit tlb_init(void)
{
unsigned int config = read_c0_config();
unsigned long status;
probe_tlb(config);
status = read_c0_status();
status &= ~(ST0_UPS | ST0_KPS);
#ifdef CONFIG_PAGE_SIZE_4KB
status |= (TFP_PAGESIZE_4K << 32) | (TFP_PAGESIZE_4K << 36);
#elif defined(CONFIG_PAGE_SIZE_8KB)
status |= (TFP_PAGESIZE_8K << 32) | (TFP_PAGESIZE_8K << 36);
#elif defined(CONFIG_PAGE_SIZE_16KB)
status |= (TFP_PAGESIZE_16K << 32) | (TFP_PAGESIZE_16K << 36);
#elif defined(CONFIG_PAGE_SIZE_64KB)
status |= (TFP_PAGESIZE_64K << 32) | (TFP_PAGESIZE_64K << 36);
#endif
write_c0_status(status);
write_c0_wired(0);
local_flush_tlb_all();
build_tlb_refill_handler();
}
| gpl-2.0 |
boa19861105/android_443_KitKat_kernel_htc_dlxub1 | drivers/usb/misc/trancevibrator.c | 7562 | 3891 | /*
* PlayStation 2 Trance Vibrator driver
*
* Copyright (C) 2006 Sam Hocevar <sam@zoy.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Standard include files */
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/usb.h>
/* Version Information */
#define DRIVER_VERSION "v1.1"
#define DRIVER_AUTHOR "Sam Hocevar, sam@zoy.org"
#define DRIVER_DESC "PlayStation 2 Trance Vibrator driver"
#define TRANCEVIBRATOR_VENDOR_ID 0x0b49 /* ASCII Corporation */
#define TRANCEVIBRATOR_PRODUCT_ID 0x064f /* Trance Vibrator */
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(TRANCEVIBRATOR_VENDOR_ID, TRANCEVIBRATOR_PRODUCT_ID) },
{ },
};
MODULE_DEVICE_TABLE (usb, id_table);
/* Driver-local specific stuff */
struct trancevibrator {
struct usb_device *udev;
unsigned int speed;
};
static ssize_t show_speed(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
struct trancevibrator *tv = usb_get_intfdata(intf);
return sprintf(buf, "%d\n", tv->speed);
}
static ssize_t set_speed(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_interface *intf = to_usb_interface(dev);
struct trancevibrator *tv = usb_get_intfdata(intf);
int temp, retval, old;
temp = simple_strtoul(buf, NULL, 10);
if (temp > 255)
temp = 255;
else if (temp < 0)
temp = 0;
old = tv->speed;
tv->speed = temp;
dev_dbg(&tv->udev->dev, "speed = %d\n", tv->speed);
/* Set speed */
retval = usb_control_msg(tv->udev, usb_sndctrlpipe(tv->udev, 0),
0x01, /* vendor request: set speed */
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER,
tv->speed, /* speed value */
0, NULL, 0, USB_CTRL_GET_TIMEOUT);
if (retval) {
tv->speed = old;
dev_dbg(&tv->udev->dev, "retval = %d\n", retval);
return retval;
}
return count;
}
static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR, show_speed, set_speed);
static int tv_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct trancevibrator *dev;
int retval;
dev = kzalloc(sizeof(struct trancevibrator), GFP_KERNEL);
if (dev == NULL) {
dev_err(&interface->dev, "Out of memory\n");
retval = -ENOMEM;
goto error;
}
dev->udev = usb_get_dev(udev);
usb_set_intfdata(interface, dev);
retval = device_create_file(&interface->dev, &dev_attr_speed);
if (retval)
goto error_create_file;
return 0;
error_create_file:
usb_put_dev(udev);
usb_set_intfdata(interface, NULL);
error:
kfree(dev);
return retval;
}
static void tv_disconnect(struct usb_interface *interface)
{
struct trancevibrator *dev;
dev = usb_get_intfdata (interface);
device_remove_file(&interface->dev, &dev_attr_speed);
usb_set_intfdata(interface, NULL);
usb_put_dev(dev->udev);
kfree(dev);
}
/* USB subsystem object */
static struct usb_driver tv_driver = {
.name = "trancevibrator",
.probe = tv_probe,
.disconnect = tv_disconnect,
.id_table = id_table,
};
module_usb_driver(tv_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| gpl-2.0 |
AICP/kernel_xiaomi_cancro | drivers/media/video/videobuf-dma-contig.c | 7818 | 9705 | /*
* helper functions for physically contiguous capture buffers
*
* The functions support hardware lacking scatter gather support
* (i.e. the buffers must be linear in physical memory)
*
* Copyright (c) 2008 Magnus Damm
*
* Based on videobuf-vmalloc.c,
* (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <media/videobuf-dma-contig.h>
struct videobuf_dma_contig_memory {
u32 magic;
void *vaddr;
dma_addr_t dma_handle;
unsigned long size;
};
#define MAGIC_DC_MEM 0x0733ac61
#define MAGIC_CHECK(is, should) \
if (unlikely((is) != (should))) { \
pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
BUG(); \
}
static void
videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
map->count++;
}
static void videobuf_vm_close(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
struct videobuf_queue *q = map->q;
int i;
dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
map->count--;
if (0 == map->count) {
struct videobuf_dma_contig_memory *mem;
dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
videobuf_queue_cancel(q);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
if (q->bufs[i]->map != map)
continue;
mem = q->bufs[i]->priv;
if (mem) {
/* This callback is called only if kernel has
allocated memory and this memory is mmapped.
In this case, memory should be freed,
in order to do memory unmap.
*/
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
/* vfree is not atomic - can't be
called with IRQ's disabled
*/
dev_dbg(q->dev, "buf[%d] freeing %p\n",
i, mem->vaddr);
dma_free_coherent(q->dev, mem->size,
mem->vaddr, mem->dma_handle);
mem->vaddr = NULL;
}
q->bufs[i]->map = NULL;
q->bufs[i]->baddr = 0;
}
kfree(map);
videobuf_queue_unlock(q);
}
}
static const struct vm_operations_struct videobuf_vm_ops = {
.open = videobuf_vm_open,
.close = videobuf_vm_close,
};
/**
* videobuf_dma_contig_user_put() - reset pointer to user space buffer
* @mem: per-buffer private videobuf-dma-contig data
*
* This function resets the user space pointer
*/
static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
{
mem->dma_handle = 0;
mem->size = 0;
}
/**
* videobuf_dma_contig_user_get() - setup user space memory pointer
* @mem: per-buffer private videobuf-dma-contig data
* @vb: video buffer to map
*
* This function validates and sets up a pointer to user space memory.
* Only physically contiguous pfn-mapped memory is accepted.
*
* Returns 0 if successful.
*/
static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
struct videobuf_buffer *vb)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long prev_pfn, this_pfn;
unsigned long pages_done, user_address;
unsigned int offset;
int ret;
offset = vb->baddr & ~PAGE_MASK;
mem->size = PAGE_ALIGN(vb->size + offset);
ret = -EINVAL;
down_read(&mm->mmap_sem);
vma = find_vma(mm, vb->baddr);
if (!vma)
goto out_up;
if ((vb->baddr + mem->size) > vma->vm_end)
goto out_up;
pages_done = 0;
prev_pfn = 0; /* kill warning */
user_address = vb->baddr;
while (pages_done < (mem->size >> PAGE_SHIFT)) {
ret = follow_pfn(vma, user_address, &this_pfn);
if (ret)
break;
if (pages_done == 0)
mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
else if (this_pfn != (prev_pfn + 1))
ret = -EFAULT;
if (ret)
break;
prev_pfn = this_pfn;
user_address += PAGE_SIZE;
pages_done++;
}
out_up:
up_read(¤t->mm->mmap_sem);
return ret;
}
static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_buffer *vb;
vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
if (vb) {
mem = vb->priv = ((char *)vb) + size;
mem->magic = MAGIC_DC_MEM;
}
return vb;
}
static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
return mem->vaddr;
}
static int __videobuf_iolock(struct videobuf_queue *q,
struct videobuf_buffer *vb,
struct v4l2_framebuffer *fbuf)
{
struct videobuf_dma_contig_memory *mem = vb->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
switch (vb->memory) {
case V4L2_MEMORY_MMAP:
dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
/* All handling should be done by __videobuf_mmap_mapper() */
if (!mem->vaddr) {
dev_err(q->dev, "memory is not alloced/mmapped.\n");
return -EINVAL;
}
break;
case V4L2_MEMORY_USERPTR:
dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
/* handle pointer from user space */
if (vb->baddr)
return videobuf_dma_contig_user_get(mem, vb);
/* allocate memory for the read() method */
mem->size = PAGE_ALIGN(vb->size);
mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
&mem->dma_handle, GFP_KERNEL);
if (!mem->vaddr) {
dev_err(q->dev, "dma_alloc_coherent %ld failed\n",
mem->size);
return -ENOMEM;
}
dev_dbg(q->dev, "dma_alloc_coherent data is at %p (%ld)\n",
mem->vaddr, mem->size);
break;
case V4L2_MEMORY_OVERLAY:
default:
dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n",
__func__);
return -EINVAL;
}
return 0;
}
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
struct videobuf_buffer *buf,
struct vm_area_struct *vma)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_mapping *map;
int retval;
unsigned long size;
dev_dbg(q->dev, "%s\n", __func__);
/* create mapping + update buffer list */
map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
if (!map)
return -ENOMEM;
buf->map = map;
map->q = q;
buf->baddr = vma->vm_start;
mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
mem->size = PAGE_ALIGN(buf->bsize);
mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
&mem->dma_handle, GFP_KERNEL);
if (!mem->vaddr) {
dev_err(q->dev, "dma_alloc_coherent size %ld failed\n",
mem->size);
goto error;
}
dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n",
mem->vaddr, mem->size);
/* Try to remap memory */
size = vma->vm_end - vma->vm_start;
size = (size < mem->size) ? size : mem->size;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
retval = remap_pfn_range(vma, vma->vm_start,
mem->dma_handle >> PAGE_SHIFT,
size, vma->vm_page_prot);
if (retval) {
dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
dma_free_coherent(q->dev, mem->size,
mem->vaddr, mem->dma_handle);
goto error;
}
vma->vm_ops = &videobuf_vm_ops;
vma->vm_flags |= VM_DONTEXPAND;
vma->vm_private_data = map;
dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
map, q, vma->vm_start, vma->vm_end,
(long int)buf->bsize,
vma->vm_pgoff, buf->i);
videobuf_vm_open(vma);
return 0;
error:
kfree(map);
return -ENOMEM;
}
static struct videobuf_qtype_ops qops = {
.magic = MAGIC_QTYPE_OPS,
.alloc_vb = __videobuf_alloc_vb,
.iolock = __videobuf_iolock,
.mmap_mapper = __videobuf_mmap_mapper,
.vaddr = __videobuf_to_vaddr,
};
void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
void *priv,
struct mutex *ext_lock)
{
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
priv, &qops, ext_lock);
}
EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
return mem->dma_handle;
}
EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
void videobuf_dma_contig_free(struct videobuf_queue *q,
struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
/* mmapped memory can't be freed here, otherwise mmapped region
would be released, while still needed. In this case, the memory
release should happen inside videobuf_vm_close().
So, it should free memory only if the memory were allocated for
read() operation.
*/
if (buf->memory != V4L2_MEMORY_USERPTR)
return;
if (!mem)
return;
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
/* handle user space pointer case */
if (buf->baddr) {
videobuf_dma_contig_user_put(mem);
return;
}
/* read() method */
if (mem->vaddr) {
dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
mem->vaddr = NULL;
}
}
EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
MODULE_AUTHOR("Magnus Damm");
MODULE_LICENSE("GPL");
| gpl-2.0 |
2nd-yacuken/android_kernel_samsung_espresso10 | drivers/mfd/ucb1400_core.c | 8330 | 3601 | /*
* Core functions for:
* Philips UCB1400 multifunction chip
*
* Based on ucb1400_ts.c:
* Author: Nicolas Pitre
* Created: September 25, 2006
* Copyright: MontaVista Software, Inc.
*
* Spliting done by: Marek Vasut <marek.vasut@gmail.com>
* If something doesn't work and it worked before spliting, e-mail me,
* dont bother Nicolas please ;-)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This code is heavily based on ucb1x00-*.c copyrighted by Russell King
* covering the UCB1100, UCB1200 and UCB1300.. Support for the UCB1400 has
* been made separate from ucb1x00-core/ucb1x00-ts on Russell's request.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/ucb1400.h>
unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel,
int adcsync)
{
unsigned int val;
if (adcsync)
adc_channel |= UCB_ADC_SYNC_ENA;
ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA | adc_channel);
ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA | adc_channel |
UCB_ADC_START);
while (!((val = ucb1400_reg_read(ac97, UCB_ADC_DATA))
& UCB_ADC_DAT_VALID))
schedule_timeout_uninterruptible(1);
return val & UCB_ADC_DAT_MASK;
}
EXPORT_SYMBOL_GPL(ucb1400_adc_read);
static int ucb1400_core_probe(struct device *dev)
{
int err;
struct ucb1400 *ucb;
struct ucb1400_ts ucb_ts;
struct ucb1400_gpio ucb_gpio;
struct snd_ac97 *ac97;
struct ucb1400_pdata *pdata = dev->platform_data;
memset(&ucb_ts, 0, sizeof(ucb_ts));
memset(&ucb_gpio, 0, sizeof(ucb_gpio));
ucb = kzalloc(sizeof(struct ucb1400), GFP_KERNEL);
if (!ucb) {
err = -ENOMEM;
goto err;
}
dev_set_drvdata(dev, ucb);
ac97 = to_ac97_t(dev);
ucb_ts.id = ucb1400_reg_read(ac97, UCB_ID);
if (ucb_ts.id != UCB_ID_1400) {
err = -ENODEV;
goto err0;
}
/* GPIO */
ucb_gpio.ac97 = ac97;
ucb->ucb1400_gpio = platform_device_alloc("ucb1400_gpio", -1);
if (!ucb->ucb1400_gpio) {
err = -ENOMEM;
goto err0;
}
err = platform_device_add_data(ucb->ucb1400_gpio, &ucb_gpio,
sizeof(ucb_gpio));
if (err)
goto err1;
err = platform_device_add(ucb->ucb1400_gpio);
if (err)
goto err1;
/* TOUCHSCREEN */
ucb_ts.ac97 = ac97;
if (pdata != NULL && pdata->irq >= 0)
ucb_ts.irq = pdata->irq;
else
ucb_ts.irq = -1;
ucb->ucb1400_ts = platform_device_alloc("ucb1400_ts", -1);
if (!ucb->ucb1400_ts) {
err = -ENOMEM;
goto err2;
}
err = platform_device_add_data(ucb->ucb1400_ts, &ucb_ts,
sizeof(ucb_ts));
if (err)
goto err3;
err = platform_device_add(ucb->ucb1400_ts);
if (err)
goto err3;
return 0;
err3:
platform_device_put(ucb->ucb1400_ts);
err2:
platform_device_del(ucb->ucb1400_gpio);
err1:
platform_device_put(ucb->ucb1400_gpio);
err0:
kfree(ucb);
err:
return err;
}
static int ucb1400_core_remove(struct device *dev)
{
struct ucb1400 *ucb = dev_get_drvdata(dev);
platform_device_unregister(ucb->ucb1400_ts);
platform_device_unregister(ucb->ucb1400_gpio);
kfree(ucb);
return 0;
}
static struct device_driver ucb1400_core_driver = {
.name = "ucb1400_core",
.bus = &ac97_bus_type,
.probe = ucb1400_core_probe,
.remove = ucb1400_core_remove,
};
static int __init ucb1400_core_init(void)
{
return driver_register(&ucb1400_core_driver);
}
static void __exit ucb1400_core_exit(void)
{
driver_unregister(&ucb1400_core_driver);
}
module_init(ucb1400_core_init);
module_exit(ucb1400_core_exit);
MODULE_DESCRIPTION("Philips UCB1400 driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
SlimRoms/kernel_samsung_aries | drivers/media/video/samsung/tv20/s5pc100/sdout_s5pc100.c | 139 | 50274 | /* linux/drivers/media/video/samsung/tv20/s5pc100/sdout_s5pc100.c
*
* tv encoder raw ftn file for Samsung TVOut driver
*
* Copyright (c) 2009 Samsung Electronics
* http://www.samsungsemi.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/sizes.h>
#include <linux/memory.h>
#include "tv_out_s5pc100.h"
#include "regs/regs-sdaout.h"
#ifdef COFIG_TVOUT_RAW_DBG
#define S5P_SDAOUT_DEBUG 1
#endif
#ifdef S5P_SDAOUT_DEBUG
#define SDPRINTK(fmt, args...) \
printk(KERN_INFO "\t\t[SDOUT] %s: " fmt, __func__ , ## args)
#else
#define SDPRINTK(fmt, args...)
#endif
static struct resource *sdout_mem;
void __iomem *sdout_base;
/*
* initialization - iniization functions are only called under stopping SDOUT
*/
enum s5p_tv_sd_err __s5p_sdout_init_video_scale_cfg(
enum s5p_sd_level component_level,
enum s5p_sd_vsync_ratio component_ratio,
enum s5p_sd_level composite_level,
enum s5p_sd_vsync_ratio composite_ratio)
{
u32 temp_reg = 0;
SDPRINTK("%d, %d, %d, %d\n\r", component_level, component_ratio,
composite_level, composite_ratio);
switch (component_level) {
case S5P_TV_SD_LEVEL_0IRE:
temp_reg = SDO_COMPONENT_LEVEL_SEL_0IRE;
break;
case S5P_TV_SD_LEVEL_75IRE:
temp_reg = SDO_COMPONENT_LEVEL_SEL_75IRE;
break;
default:
SDPRINTK("invalid component_level parameter(%d)\n\r",
component_level);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
switch (composite_level) {
case SDOUT_VTOS_RATIO_10_4:
temp_reg |= SDO_COMPONENT_VTOS_RATIO_10_4;
break;
case SDOUT_VTOS_RATIO_7_3:
temp_reg |= SDO_COMPONENT_VTOS_RATIO_7_3;
break;
default:
SDPRINTK(" invalid composite_level parameter(%d)\n\r",
composite_level);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
switch (composite_level) {
case S5P_TV_SD_LEVEL_0IRE:
temp_reg |= SDO_COMPOSITE_LEVEL_SEL_0IRE;
break;
case S5P_TV_SD_LEVEL_75IRE:
temp_reg |= SDO_COMPOSITE_LEVEL_SEL_75IRE;
break;
default:
SDPRINTK("invalid composite_ratio parameter(%d)\n\r",
composite_ratio);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
switch (composite_ratio) {
case SDOUT_VTOS_RATIO_10_4:
temp_reg |= SDO_COMPOSITE_VTOS_RATIO_10_4;
break;
case SDOUT_VTOS_RATIO_7_3:
temp_reg |= SDO_COMPOSITE_VTOS_RATIO_7_3;
break;
default:
SDPRINTK("invalid component_ratio parameter(%d)\n\r",
component_ratio);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
writel(temp_reg, sdout_base + S5P_SDO_SCALE);
SDPRINTK("0x%08x)\n\r", readl(sdout_base + S5P_SDO_SCALE));
return SDOUT_NO_ERROR;
}
enum s5p_tv_sd_err __s5p_sdout_init_sync_signal_pin(
enum s5p_sd_sync_sig_pin pin)
{
SDPRINTK("%d\n\r", pin);
switch (pin) {
case SDOUT_SYNC_SIG_NO:
writel(SDO_COMPONENT_SYNC_ABSENT, sdout_base + S5P_SDO_SYNC);
break;
case SDOUT_SYNC_SIG_YG:
writel(SDO_COMPONENT_SYNC_YG, sdout_base + S5P_SDO_SYNC);
break;
case SDOUT_SYNC_SIG_ALL:
writel(SDO_COMPONENT_SYNC_ALL, sdout_base + S5P_SDO_SYNC);
break;
default:
SDPRINTK("invalid pin parameter(%d)\n\r", pin);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
SDPRINTK("0x%08x\n\r", readl(sdout_base + S5P_SDO_SYNC));
return SDOUT_NO_ERROR;
}
enum s5p_tv_sd_err __s5p_sdout_init_vbi(bool wss_cvbs,
enum s5p_sd_closed_caption_type caption_cvbs,
bool wss_y_sideo,
enum s5p_sd_closed_caption_type caption_y_sideo,
bool cgmsa_rgb,
bool wss_rgb,
enum s5p_sd_closed_caption_type caption_rgb,
bool cgmsa_y_ppr,
bool wss_y_ppr,
enum s5p_sd_closed_caption_type caption_y_ppr)
{
u32 temp_reg = 0;
SDPRINTK(" %d, %d, %d, %d, %d, %d, %d, %d, %d, %d\n\r",
wss_cvbs, caption_cvbs, wss_y_sideo, caption_y_sideo,
cgmsa_rgb, wss_rgb, caption_rgb, cgmsa_y_ppr, wss_y_ppr,
caption_y_ppr);
if (wss_cvbs)
temp_reg = SDO_CVBS_WSS_INS;
else
temp_reg = SDO_CVBS_NO_WSS;
switch (caption_cvbs) {
case SDOUT_NO_INS:
temp_reg |= SDO_CVBS_NO_CLOSED_CAPTION;
break;
case SDOUT_INS_1:
temp_reg |= SDO_CVBS_21H_CLOSED_CAPTION;
break;
case SDOUT_INS_2:
temp_reg |= SDO_CVBS_21H_284H_CLOSED_CAPTION;
break;
case SDOUT_INS_OTHERS:
temp_reg |= SDO_CVBS_USE_OTHERS;
break;
default:
SDPRINTK(" invalid caption_cvbs parameter(%d)\n\r",
caption_cvbs);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
if (wss_y_sideo)
temp_reg |= SDO_SVIDEO_WSS_INS;
else
temp_reg |= SDO_SVIDEO_NO_WSS;
switch (caption_y_sideo) {
case SDOUT_NO_INS:
temp_reg |= SDO_SVIDEO_NO_CLOSED_CAPTION;
break;
case SDOUT_INS_1:
temp_reg |= SDO_SVIDEO_21H_CLOSED_CAPTION;
break;
case SDOUT_INS_2:
temp_reg |= SDO_SVIDEO_21H_284H_CLOSED_CAPTION;
break;
case SDOUT_INS_OTHERS:
temp_reg |= SDO_SVIDEO_USE_OTHERS;
break;
default:
SDPRINTK("invalid caption_y_sideo parameter(%d)\n\r",
caption_y_sideo);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
if (cgmsa_rgb)
temp_reg |= SDO_RGB_CGMSA_INS;
else
temp_reg |= SDO_RGB_NO_CGMSA;
if (wss_rgb)
temp_reg |= SDO_RGB_WSS_INS;
else
temp_reg |= SDO_RGB_NO_WSS;
switch (caption_rgb) {
case SDOUT_NO_INS:
temp_reg |= SDO_RGB_NO_CLOSED_CAPTION;
break;
case SDOUT_INS_1:
temp_reg |= SDO_RGB_21H_CLOSED_CAPTION;
break;
case SDOUT_INS_2:
temp_reg |= SDO_RGB_21H_284H_CLOSED_CAPTION;
break;
case SDOUT_INS_OTHERS:
temp_reg |= SDO_RGB_USE_OTHERS;
break;
default:
SDPRINTK(" invalid caption_rgb parameter(%d)\n\r",
caption_rgb);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
if (cgmsa_y_ppr)
temp_reg |= SDO_YPBPR_CGMSA_INS;
else
temp_reg |= SDO_YPBPR_NO_CGMSA;
if (wss_y_ppr)
temp_reg |= SDO_YPBPR_WSS_INS;
else
temp_reg |= SDO_YPBPR_NO_WSS;
switch (caption_y_ppr) {
case SDOUT_NO_INS:
temp_reg |= SDO_YPBPR_NO_CLOSED_CAPTION;
break;
case SDOUT_INS_1:
temp_reg |= SDO_YPBPR_21H_CLOSED_CAPTION;
break;
case SDOUT_INS_2:
temp_reg |= SDO_YPBPR_21H_284H_CLOSED_CAPTION;
break;
case SDOUT_INS_OTHERS:
temp_reg |= SDO_YPBPR_USE_OTHERS;
break;
default:
SDPRINTK("invalid caption_y_ppr parameter(%d)\n\r",
caption_y_ppr);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
writel(temp_reg, sdout_base + S5P_SDO_VBI);
SDPRINTK("0x%08x\n\r", readl(sdout_base + S5P_SDO_VBI));
return SDOUT_NO_ERROR;
}
enum s5p_tv_sd_err __s5p_sdout_init_offset_gain(
enum s5p_sd_channel_sel channel,
u32 offset, u32 gain)
{
SDPRINTK("%d, %d, %d\n\r", channel, offset, gain);
switch (channel) {
case SDOUT_CHANNEL_0:
writel(SDO_SCALE_CONV_OFFSET(offset) |
SDO_SCALE_CONV_GAIN(gain),
sdout_base + S5P_SDO_SCALE_CH0);
SDPRINTK("0x%08x\n\r", readl(sdout_base + S5P_SDO_SCALE_CH0));
break;
case SDOUT_CHANNEL_1:
writel(SDO_SCALE_CONV_OFFSET(offset) |
SDO_SCALE_CONV_GAIN(gain),
sdout_base + S5P_SDO_SCALE_CH1);
SDPRINTK(" 0x%08x\n\r", readl(sdout_base + S5P_SDO_SCALE_CH1));
break;
case SDOUT_CHANNEL_2:
writel(SDO_SCALE_CONV_OFFSET(offset) |
SDO_SCALE_CONV_GAIN(gain),
sdout_base + S5P_SDO_SCALE_CH2);
SDPRINTK(" 0x%08x\n\r", readl(sdout_base + S5P_SDO_SCALE_CH2));
break;
default:
SDPRINTK(" invalid channel parameter(%d)\n\r", channel);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
return SDOUT_NO_ERROR;
}
void __s5p_sdout_init_delay(u32 delay_y,
u32 offset_video_start,
u32 offset_video_end)
{
SDPRINTK("%d, %d, %d\n\r", delay_y,
offset_video_start, offset_video_end);
writel(SDO_DELAY_YTOC(delay_y) |
SDO_ACTIVE_START_OFFSET(offset_video_start) |
SDO_ACTIVE_END_OFFSET(offset_video_end),
sdout_base + S5P_SDO_YCDELAY);
SDPRINTK("0x%08x\n\r", readl(sdout_base + S5P_SDO_YCDELAY));
}
void __s5p_sdout_init_schlock(bool color_sucarrier_pha_adj)
{
SDPRINTK("%d\n\r", color_sucarrier_pha_adj);
if (color_sucarrier_pha_adj)
writel(SDO_COLOR_SC_PHASE_ADJ, sdout_base + S5P_SDO_SCHLOCK);
else
writel(SDO_COLOR_SC_PHASE_NOADJ, sdout_base + S5P_SDO_SCHLOCK);
SDPRINTK("0x%08x\n\r", readl(sdout_base + S5P_SDO_SCHLOCK));
}
enum s5p_tv_sd_err __s5p_sdout_init_dac_power_onoff(
enum s5p_sd_channel_sel channel, bool dac_on)
{
u32 temp_on_off;
SDPRINTK("%d, %d)\n\r", channel, dac_on);
switch (channel) {
case SDOUT_CHANNEL_0:
temp_on_off = SDO_POWER_ON_DAC0;
break;
case SDOUT_CHANNEL_1:
temp_on_off = SDO_POWER_ON_DAC1;
break;
case SDOUT_CHANNEL_2:
temp_on_off = SDO_POWER_ON_DAC2;
break;
default:
SDPRINTK("invalid channel parameter(%d)\n\r", channel);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
if (dac_on)
writel(readl(sdout_base + S5P_SDO_DAC) | temp_on_off,
sdout_base + S5P_SDO_DAC);
else
writel(readl(sdout_base + S5P_SDO_DAC) & ~temp_on_off,
sdout_base + S5P_SDO_DAC);
SDPRINTK("0x%08x\n\r", readl(sdout_base + S5P_SDO_DAC));
return SDOUT_NO_ERROR;
}
void __s5p_sdout_init_color_compensaton_onoff(bool bright_hue_saturation_adj,
bool y_ppr_color_compensation,
bool rgcolor_compensation,
bool y_c_color_compensation,
bool y_cvbs_color_compensation)
{
u32 temp_reg = 0;
SDPRINTK("%d, %d, %d, %d, %d)\n\r", bright_hue_saturation_adj,
y_ppr_color_compensation, rgcolor_compensation,
y_c_color_compensation, y_cvbs_color_compensation);
if (bright_hue_saturation_adj)
temp_reg &= ~SDO_COMPONENT_BHS_ADJ_OFF;
else
temp_reg |= SDO_COMPONENT_BHS_ADJ_OFF;
if (y_ppr_color_compensation)
temp_reg &= ~SDO_COMPONENT_YPBPR_COMP_OFF;
else
temp_reg |= SDO_COMPONENT_YPBPR_COMP_OFF;
if (rgcolor_compensation)
temp_reg &= ~SDO_COMPONENT_RGB_COMP_OFF;
else
temp_reg |= SDO_COMPONENT_RGB_COMP_OFF;
if (y_c_color_compensation)
temp_reg &= ~SDO_COMPONENT_YC_COMP_OFF;
else
temp_reg |= SDO_COMPONENT_YC_COMP_OFF;
if (y_cvbs_color_compensation)
temp_reg &= ~SDO_COMPONENT_CVBS_COMP_OFF;
else
temp_reg |= SDO_COMPONENT_CVBS_COMP_OFF;
writel(temp_reg, sdout_base + S5P_SDO_CCCON);
SDPRINTK("0x%08x\n\r", readl(sdout_base + S5P_SDO_CCCON));
}
void __s5p_sdout_init_brightness_hue_saturation(u32 gain_brightness,
u32 offset_brightness,
u32 gain0_cb_hue_saturation,
u32 gain1_cb_hue_saturation,
u32 gain0_cr_hue_saturation,
u32 gain1_cr_hue_saturation,
u32 offset_cb_hue_saturation,
u32 offset_cr_hue_saturation)
{
SDPRINTK(" %d, %d, %d, %d, %d, %d, %d, %d)\n\r", gain_brightness,
offset_brightness, gain0_cb_hue_saturation,
gain1_cb_hue_saturation, gain0_cr_hue_saturation,
gain1_cr_hue_saturation, offset_cb_hue_saturation,
offset_cr_hue_saturation);
writel(SDO_BRIGHTNESS_GAIN(gain_brightness) |
SDO_BRIGHTNESS_OFFSET(offset_brightness),
sdout_base + S5P_SDO_YSCALE);
writel(SDO_HS_CB_GAIN0(gain0_cb_hue_saturation) |
SDO_HS_CB_GAIN1(gain1_cb_hue_saturation),
sdout_base + S5P_SDO_CBSCALE);
writel(SDO_HS_CR_GAIN0(gain0_cr_hue_saturation) |
SDO_HS_CR_GAIN1(gain1_cr_hue_saturation),
sdout_base + S5P_SDO_CRSCALE);
writel(SDO_HS_CR_OFFSET(offset_cr_hue_saturation) |
SDO_HS_CB_OFFSET(offset_cb_hue_saturation),
sdout_base + S5P_SDO_CB_CR_OFFSET);
SDPRINTK("0x%08x, 0x%08x, 0x%08x, 0x%08x)\n\r",
readl(sdout_base + S5P_SDO_YSCALE),
readl(sdout_base + S5P_SDO_CBSCALE),
readl(sdout_base + S5P_SDO_CRSCALE),
readl(sdout_base + S5P_SDO_CB_CR_OFFSET));
}
void __s5p_sdout_init_rgb_color_compensation(u32 max_rgbcube,
u32 min_rgbcube)
{
SDPRINTK("0x%08x, 0x%08x\n\r", max_rgbcube, min_rgbcube);
writel(SDO_MAX_RGB_CUBE(max_rgbcube) | SDO_MIN_RGB_CUBE(min_rgbcube),
sdout_base + S5P_SDO_RGB_CC);
SDPRINTK("0x%08x)\n\r", readl(sdout_base + S5P_SDO_RGB_CC));
}
void __s5p_sdout_init_cvbs_color_compensation(u32 y_lower_mid,
u32 y_bottom,
u32 y_top,
u32 y_upper_mid,
u32 radius)
{
SDPRINTK("%d, %d, %d, %d, %d\n\r", y_lower_mid, y_bottom,
y_top, y_upper_mid, radius);
writel(SDO_Y_LOWER_MID_CVBS_CORN(y_lower_mid) |
SDO_Y_BOTTOM_CVBS_CORN(y_bottom),
sdout_base + S5P_SDO_CVBS_CC_Y1);
writel(SDO_Y_TOP_CVBS_CORN(y_top) |
SDO_Y_UPPER_MID_CVBS_CORN(y_upper_mid),
sdout_base + S5P_SDO_CVBS_CC_Y2);
writel(SDO_RADIUS_CVBS_CORN(radius), sdout_base + S5P_SDO_CVBS_CC_C);
SDPRINTK("0x%08x, 0x%08x, 0x%08x)\n\r",
readl(sdout_base + S5P_SDO_CVBS_CC_Y1),
readl(sdout_base + S5P_SDO_CVBS_CC_Y2),
readl(sdout_base + S5P_SDO_CVBS_CC_C));
}
void __s5p_sdout_init_svideo_color_compensation(u32 y_top,
u32 y_bottom,
u32 y_c_cylinder)
{
SDPRINTK(" %d, %d, %d)\n\r", y_top, y_bottom, y_c_cylinder);
writel(SDO_Y_TOP_YC_CYLINDER(y_top) |
SDO_Y_BOTOM_YC_CYLINDER(y_bottom),
sdout_base + S5P_SDO_YC_CC_Y);
writel(SDO_RADIUS_YC_CYLINDER(y_c_cylinder),
sdout_base + S5P_SDO_YC_CC_C);
SDPRINTK("0x%08x, 0x%08x)\n\r", readl(sdout_base + S5P_SDO_YC_CC_Y),
readl(sdout_base + S5P_SDO_YC_CC_C));
}
void __s5p_sdout_init_component_porch(u32 back_525,
u32 front_525,
u32 back_625,
u32 front_625)
{
SDPRINTK(" %d, %d, %d, %d)\n\r", back_525,
front_525, back_625, front_625);
writel(SDO_COMPONENT_525_BP(back_525) |
SDO_COMPONENT_525_FP(front_525),
sdout_base + S5P_SDO_CSC_525_PORCH);
writel(SDO_COMPONENT_625_BP(back_625) |
SDO_COMPONENT_625_FP(front_625),
sdout_base + S5P_SDO_CSC_625_PORCH);
SDPRINTK(" 0x%08x, 0x%08x)\n\r",
readl(sdout_base + S5P_SDO_CSC_525_PORCH),
readl(sdout_base + S5P_SDO_CSC_625_PORCH));
}
enum s5p_tv_sd_err __s5p_sdout_init_vesa_rgb_sync(
enum s5p_sd_vesa_rgb_sync_type sync_type,
enum s5p_tv_active_polarity v_sync_active,
enum s5p_tv_active_polarity h_sync_active)
{
u32 temp_reg = 0;
SDPRINTK("%d, %d, %d\n\r", sync_type, v_sync_active, h_sync_active);
switch (sync_type) {
case SDOUT_VESA_RGB_SYNC_COMPOSITE:
temp_reg |= SDO_RGB_SYNC_COMPOSITE;
break;
case SDOUT_VESA_RGB_SYNC_SEPARATE:
temp_reg |= SDO_RGB_SYNC_SEPERATE;
break;
default:
SDPRINTK(" invalid sync_type parameter(%d)\n\r", sync_type);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
switch (v_sync_active) {
case TVOUT_POL_ACTIVE_LOW:
temp_reg |= SDO_RGB_VSYNC_LOW_ACT;
break;
case TVOUT_POL_ACTIVE_HIGH:
temp_reg |= SDO_RGB_VSYNC_HIGH_ACT;
break;
default:
SDPRINTK(" invalid v_sync_active parameter(%d)\n\r",
v_sync_active);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
switch (h_sync_active) {
case TVOUT_POL_ACTIVE_LOW:
temp_reg |= SDO_RGB_HSYNC_LOW_ACT;
break;
case TVOUT_POL_ACTIVE_HIGH:
temp_reg |= SDO_RGB_HSYNC_HIGH_ACT;
break;
default:
SDPRINTK(" invalid h_sync_active parameter(%d)\n\r",
h_sync_active);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
writel(temp_reg, sdout_base + S5P_SDO_RGBSYNC);
SDPRINTK("0x%08x\n\r", readl(sdout_base + S5P_SDO_RGBSYNC));
return SDOUT_NO_ERROR;
}
void __s5p_sdout_init_oversampling_filter_coeff(u32 size,
u32 *coeff,
u32 *coeff1,
u32 *coeff2)
{
u32 *temp_reg = 0;
SDPRINTK(" %d, 0x%x, 0x%x, 0x%x\n\r", (u32)size, (u32)coeff,
(u32)coeff1, (u32)coeff2);
if (coeff != NULL) {
temp_reg = (u32 *)readl(sdout_base + S5P_SDO_OSFC00_0);
memcpy((void *)temp_reg, (const void *)coeff, size*4);
}
if (coeff1 != NULL) {
temp_reg = (u32 *)readl(sdout_base + S5P_SDO_OSFC00_1);
memcpy((void *)temp_reg, (const void *)coeff1, size*4);
}
if (coeff2 != NULL) {
temp_reg = (u32 *)readl(sdout_base + S5P_SDO_OSFC00_2);
memcpy((void *)temp_reg, (const void *)coeff2, size*4);
}
SDPRINTK(" ()\n\r");
}
enum s5p_tv_sd_err __s5p_sdout_init_ch_xtalk_cancel_coef(
enum s5p_sd_channel_sel channel,
u32 coeff2, u32 coeff1)
{
SDPRINTK(" %d, %d, %d\n\r", channel, coeff2, coeff1);
switch (channel) {
case SDOUT_CHANNEL_0:
writel(SDO_XTALK_COEF02(coeff2) | SDO_XTALK_COEF01(coeff1),
sdout_base + S5P_SDO_XTALK0);
SDPRINTK(" 0x%08x)\n\r", readl(sdout_base + S5P_SDO_XTALK0));
break;
case SDOUT_CHANNEL_1:
writel(SDO_XTALK_COEF02(coeff2) | SDO_XTALK_COEF01(coeff1),
sdout_base + S5P_SDO_XTALK1);
SDPRINTK(" 0x%08x)\n\r", readl(sdout_base + S5P_SDO_XTALK1));
break;
case SDOUT_CHANNEL_2:
writel(SDO_XTALK_COEF02(coeff2) | SDO_XTALK_COEF01(coeff1),
sdout_base + S5P_SDO_XTALK2);
SDPRINTK("0x%08x)\n\r", readl(sdout_base + S5P_SDO_XTALK2));
break;
default:
SDPRINTK(" invalid channel parameter(%d)\n\r", channel);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
return SDOUT_NO_ERROR;
}
void __s5p_sdout_init_closed_caption(u32 display_cc, u32 non_display_cc)
{
SDPRINTK("%d, %d\n\r", display_cc, non_display_cc);
writel(SDO_DISPLAY_CC_CAPTION(display_cc) |
SDO_NON_DISPLAY_CC_CAPTION(non_display_cc),
sdout_base + S5P_SDO_ARMCC);
SDPRINTK("0x%x\n\r", readl(sdout_base + S5P_SDO_ARMCC));
}
static u32 __s5p_sdout_init_wss_cgms_crc(u32 value)
{
u8 i;
u8 CGMS[14], CRC[6], OLD_CRC;
u32 temp_in;
temp_in = value;
for (i = 0; i < 14; i++)
CGMS[i] = (u8)(temp_in >> i) & 0x1 ;
for (i = 0; i < 6; i++)
CRC[i] = 0x1;
for (i = 0; i < 14; i++) {
OLD_CRC = CRC[0];
CRC[0] = CRC[1];
CRC[1] = CRC[2];
CRC[2] = CRC[3];
CRC[3] = CRC[4];
CRC[4] = OLD_CRC ^ CGMS[i] ^ CRC[5];
CRC[5] = OLD_CRC ^ CGMS[i];
}
temp_in &= 0x3fff;
for (i = 0; i < 6; i++)
temp_in |= ((u32)(CRC[i] & 0x1) << i);
return temp_in;
}
enum s5p_tv_sd_err __s5p_sdout_init_wss525_data(
enum s5p_sd_525_copy_permit copy_permit,
enum s5p_sd_525_mv_psp mv_psp,
enum s5p_sd_525_copy_info copy_info,
bool analog_on,
enum s5p_sd_525_aspect_ratio display_ratio)
{
u32 temp_reg = 0;
SDPRINTK("%d, %d, %d, %d\n\r", copy_permit, mv_psp, copy_info,
display_ratio);
switch (copy_permit) {
case SDO_525_COPY_PERMIT:
temp_reg = SDO_WORD2_WSS525_COPY_PERMIT;
break;
case SDO_525_ONECOPY_PERMIT:
temp_reg = SDO_WORD2_WSS525_ONECOPY_PERMIT;
break;
case SDO_525_NOCOPY_PERMIT:
temp_reg = SDO_WORD2_WSS525_NOCOPY_PERMIT;
break;
default:
SDPRINTK(" invalid copy_permit parameter(%d)\n\r",
copy_permit);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
switch (mv_psp) {
case SDO_525_MV_PSP_OFF:
temp_reg |= SDO_WORD2_WSS525_MV_PSP_OFF;
break;
case SDO_525_MV_PSP_ON_2LINE_BURST:
temp_reg |= SDO_WORD2_WSS525_MV_PSP_ON_2LINE_BURST;
break;
case SDO_525_MV_PSP_ON_BURST_OFF:
temp_reg |= SDO_WORD2_WSS525_MV_PSP_ON_BURST_OFF;
break;
case SDO_525_MV_PSP_ON_4LINE_BURST:
temp_reg |= SDO_WORD2_WSS525_MV_PSP_ON_4LINE_BURST;
break;
default:
SDPRINTK(" invalid mv_psp parameter(%d)\n\r", mv_psp);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
switch (copy_info) {
case SDO_525_COPY_INFO:
temp_reg |= SDO_WORD1_WSS525_COPY_INFO;
break;
case SDO_525_DEFAULT:
temp_reg |= SDO_WORD1_WSS525_DEFAULT;
break;
default:
SDPRINTK(" invalid copy_info parameter(%d)\n\r", copy_info);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
if (analog_on)
temp_reg |= SDO_WORD2_WSS525_ANALOG_ON;
else
temp_reg |= SDO_WORD2_WSS525_ANALOG_OFF;
switch (display_ratio) {
case SDO_525_COPY_PERMIT:
temp_reg |= SDO_WORD0_WSS525_4_3_NORMAL;
break;
case SDO_525_ONECOPY_PERMIT:
temp_reg |= SDO_WORD0_WSS525_16_9_ANAMORPIC;
break;
case SDO_525_NOCOPY_PERMIT:
temp_reg |= SDO_WORD0_WSS525_4_3_LETTERBOX;
break;
default:
SDPRINTK(" invalid display_ratio parameter(%d)\n\r",
display_ratio);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
writel(temp_reg | SDO_CRC_WSS525(
__s5p_sdout_init_wss_cgms_crc(temp_reg)),
sdout_base + S5P_SDO_WSS525);
SDPRINTK("0x%08x)\n\r", readl(sdout_base + S5P_SDO_WSS525));
return SDOUT_NO_ERROR;
}
enum s5p_tv_sd_err __s5p_sdout_init_wss625_data(bool surround_sound,
bool copyright,
bool copy_protection,
bool text_subtitles,
enum s5p_sd_625_subtitles open_subtitles,
enum s5p_sd_625_camera_film camera_film,
enum s5p_sd_625_color_encoding color_encoding,
bool helper_signal,
enum s5p_sd_625_aspect_ratio display_ratio)
{
u32 temp_reg = 0;
SDPRINTK("%d, %d, %d, %d, %d, %d, %d, %d, %d\n\r",
surround_sound, copyright, copy_protection,
text_subtitles, open_subtitles, camera_film,
color_encoding, helper_signal, display_ratio);
if (surround_sound)
temp_reg = SDO_WSS625_SURROUND_SOUND_ENABLE;
else
temp_reg = SDO_WSS625_SURROUND_SOUND_DISABLE;
if (copyright)
temp_reg |= SDO_WSS625_COPYRIGHT;
else
temp_reg |= SDO_WSS625_NO_COPYRIGHT;
if (copy_protection)
temp_reg |= SDO_WSS625_COPY_RESTRICTED;
else
temp_reg |= SDO_WSS625_COPY_NOT_RESTRICTED;
if (text_subtitles)
temp_reg |= SDO_WSS625_TELETEXT_SUBTITLES;
else
temp_reg |= SDO_WSS625_TELETEXT_NO_SUBTITLES;
switch (open_subtitles) {
case SDO_625_NO_OPEN_SUBTITLES:
temp_reg |= SDO_WSS625_NO_OPEN_SUBTITLES;
break;
case SDO_625_INACT_OPEN_SUBTITLES:
temp_reg |= SDO_WSS625_INACT_OPEN_SUBTITLES;
break;
case SDO_625_OUTACT_OPEN_SUBTITLES:
temp_reg |= SDO_WSS625_OUTACT_OPEN_SUBTITLES;
break;
default:
SDPRINTK(" invalid open_subtitles parameter(%d)\n\r",
open_subtitles);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
switch (camera_film) {
case SDO_625_CAMERA:
temp_reg |= SDO_WSS625_CAMERA;
break;
case SDO_625_FILM:
temp_reg |= SDO_WSS625_FILM;
break;
default:
SDPRINTK("invalid camera_film parameter(%d)\n\r", camera_film);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
switch (color_encoding) {
case SDO_625_NORMAL_PAL:
temp_reg |= SDO_WSS625_NORMAL_PAL;
break;
case SDO_625_MOTION_ADAPTIVE_COLORPLUS:
temp_reg |= SDO_WSS625_MOTION_ADAPTIVE_COLORPLUS;
break;
default:
SDPRINTK("invalid color_encoding parameter(%d)\n\r",
color_encoding);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
if (helper_signal)
temp_reg |= SDO_WSS625_HELPER_SIG;
else
temp_reg |= SDO_WSS625_HELPER_NO_SIG;
switch (display_ratio) {
case SDO_625_4_3_FULL_576:
temp_reg |= SDO_WSS625_4_3_FULL_576;
break;
case SDO_625_14_9_LETTERBOX_CENTER_504:
temp_reg |= SDO_WSS625_14_9_LETTERBOX_CENTER_504;
break;
case SDO_625_14_9_LETTERBOX_TOP_504:
temp_reg |= SDO_WSS625_14_9_LETTERBOX_TOP_504;
break;
case SDO_625_16_9_LETTERBOX_CENTER_430:
temp_reg |= SDO_WSS625_16_9_LETTERBOX_CENTER_430;
break;
case SDO_625_16_9_LETTERBOX_TOP_430:
temp_reg |= SDO_WSS625_16_9_LETTERBOX_TOP_430;
break;
case SDO_625_16_9_LETTERBOX_CENTER:
temp_reg |= SDO_WSS625_16_9_LETTERBOX_CENTER;
break;
case SDO_625_14_9_FULL_CENTER_576:
temp_reg |= SDO_WSS625_14_9_FULL_CENTER_576;
break;
case SDO_625_16_9_ANAMORPIC_576:
temp_reg |= SDO_WSS625_16_9_ANAMORPIC_576;
break;
default:
SDPRINTK("invalid display_ratio parameter(%d)\n\r",
display_ratio);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
writel(temp_reg, sdout_base + S5P_SDO_WSS625);
SDPRINTK("0x%08x\n\r", readl(sdout_base + S5P_SDO_WSS625));
return SDOUT_NO_ERROR;
}
enum s5p_tv_sd_err __s5p_sdout_init_cgmsa525_data(
enum s5p_sd_525_copy_permit copy_permit,
enum s5p_sd_525_mv_psp mv_psp,
enum s5p_sd_525_copy_info copy_info,
bool analog_on,
enum s5p_sd_525_aspect_ratio display_ratio)
{
u32 temp_reg = 0;
SDPRINTK("%d, %d, %d, %d)\n\r", copy_permit, mv_psp,
copy_info, display_ratio);
switch (copy_permit) {
case SDO_525_COPY_PERMIT:
temp_reg = SDO_WORD2_CGMS525_COPY_PERMIT;
break;
case SDO_525_ONECOPY_PERMIT:
temp_reg = SDO_WORD2_CGMS525_ONECOPY_PERMIT;
break;
case SDO_525_NOCOPY_PERMIT:
temp_reg = SDO_WORD2_CGMS525_NOCOPY_PERMIT;
break;
default:
SDPRINTK("invalid copy_permit parameter(%d)\n\r", copy_permit);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
switch (mv_psp) {
case SDO_525_MV_PSP_OFF:
temp_reg |= SDO_WORD2_CGMS525_MV_PSP_OFF;
break;
case SDO_525_MV_PSP_ON_2LINE_BURST:
temp_reg |= SDO_WORD2_CGMS525_MV_PSP_ON_2LINE_BURST;
break;
case SDO_525_MV_PSP_ON_BURST_OFF:
temp_reg |= SDO_WORD2_CGMS525_MV_PSP_ON_BURST_OFF;
break;
case SDO_525_MV_PSP_ON_4LINE_BURST:
temp_reg |= SDO_WORD2_CGMS525_MV_PSP_ON_4LINE_BURST;
break;
default:
SDPRINTK(" invalid mv_psp parameter(%d)\n\r", mv_psp);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
switch (copy_info) {
case SDO_525_COPY_INFO:
temp_reg |= SDO_WORD1_CGMS525_COPY_INFO;
break;
case SDO_525_DEFAULT:
temp_reg |= SDO_WORD1_CGMS525_DEFAULT;
break;
default:
SDPRINTK("invalid copy_info parameter(%d)\n\r", copy_info);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
if (analog_on)
temp_reg |= SDO_WORD2_CGMS525_ANALOG_ON;
else
temp_reg |= SDO_WORD2_CGMS525_ANALOG_OFF;
switch (display_ratio) {
case SDO_525_COPY_PERMIT:
temp_reg |= SDO_WORD0_CGMS525_4_3_NORMAL;
break;
case SDO_525_ONECOPY_PERMIT:
temp_reg |= SDO_WORD0_CGMS525_16_9_ANAMORPIC;
break;
case SDO_525_NOCOPY_PERMIT:
temp_reg |= SDO_WORD0_CGMS525_4_3_LETTERBOX;
break;
default:
SDPRINTK(" invalid display_ratio parameter(%d)\n\r",
display_ratio);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
writel(temp_reg |
SDO_CRC_CGMS525(__s5p_sdout_init_wss_cgms_crc(temp_reg)),
sdout_base + S5P_SDO_CGMS525);
SDPRINTK(" 0x%08x)\n\r", readl(sdout_base + S5P_SDO_CGMS525));
return SDOUT_NO_ERROR;
}
enum s5p_tv_sd_err __s5p_sdout_init_cgmsa625_data(bool surround_sound,
bool copyright,
bool copy_protection,
bool text_subtitles,
enum s5p_sd_625_subtitles open_subtitles,
enum s5p_sd_625_camera_film camera_film,
enum s5p_sd_625_color_encoding color_encoding,
bool helper_signal,
enum s5p_sd_625_aspect_ratio display_ratio)
{
u32 temp_reg = 0;
SDPRINTK("%d, %d, %d, %d, %d, %d, %d, %d, %d)\n\r",
surround_sound, copyright, copy_protection,
text_subtitles, open_subtitles, camera_film,
color_encoding, helper_signal, display_ratio);
if (surround_sound)
temp_reg = SDO_CGMS625_SURROUND_SOUND_ENABLE;
else
temp_reg = SDO_CGMS625_SURROUND_SOUND_DISABLE;
if (copyright)
temp_reg |= SDO_CGMS625_COPYRIGHT;
else
temp_reg |= SDO_CGMS625_NO_COPYRIGHT;
if (copy_protection)
temp_reg |= SDO_CGMS625_COPY_RESTRICTED;
else
temp_reg |= SDO_CGMS625_COPY_NOT_RESTRICTED;
if (text_subtitles)
temp_reg |= SDO_CGMS625_TELETEXT_SUBTITLES;
else
temp_reg |= SDO_CGMS625_TELETEXT_NO_SUBTITLES;
switch (open_subtitles) {
case SDO_625_NO_OPEN_SUBTITLES:
temp_reg |= SDO_CGMS625_NO_OPEN_SUBTITLES;
break;
case SDO_625_INACT_OPEN_SUBTITLES:
temp_reg |= SDO_CGMS625_INACT_OPEN_SUBTITLES;
break;
case SDO_625_OUTACT_OPEN_SUBTITLES:
temp_reg |= SDO_CGMS625_OUTACT_OPEN_SUBTITLES;
break;
default:
SDPRINTK("invalid open_subtitles parameter(%d)\n\r",
open_subtitles);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
switch (camera_film) {
case SDO_625_CAMERA:
temp_reg |= SDO_CGMS625_CAMERA;
break;
case SDO_625_FILM:
temp_reg |= SDO_CGMS625_FILM;
break;
default:
SDPRINTK(" invalid camera_film parameter(%d)\n\r",
camera_film);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
switch (color_encoding) {
case SDO_625_NORMAL_PAL:
temp_reg |= SDO_CGMS625_NORMAL_PAL;
break;
case SDO_625_MOTION_ADAPTIVE_COLORPLUS:
temp_reg |= SDO_CGMS625_MOTION_ADAPTIVE_COLORPLUS;
break;
default:
SDPRINTK(" invalid color_encoding parameter(%d)\n\r",
color_encoding);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
if (helper_signal)
temp_reg |= SDO_CGMS625_HELPER_SIG;
else
temp_reg |= SDO_CGMS625_HELPER_NO_SIG;
switch (display_ratio) {
case SDO_625_4_3_FULL_576:
temp_reg |= SDO_CGMS625_4_3_FULL_576;
break;
case SDO_625_14_9_LETTERBOX_CENTER_504:
temp_reg |= SDO_CGMS625_14_9_LETTERBOX_CENTER_504;
break;
case SDO_625_14_9_LETTERBOX_TOP_504:
temp_reg |= SDO_CGMS625_14_9_LETTERBOX_TOP_504;
break;
case SDO_625_16_9_LETTERBOX_CENTER_430:
temp_reg |= SDO_CGMS625_16_9_LETTERBOX_CENTER_430;
break;
case SDO_625_16_9_LETTERBOX_TOP_430:
temp_reg |= SDO_CGMS625_16_9_LETTERBOX_TOP_430;
break;
case SDO_625_16_9_LETTERBOX_CENTER:
temp_reg |= SDO_CGMS625_16_9_LETTERBOX_CENTER;
break;
case SDO_625_14_9_FULL_CENTER_576:
temp_reg |= SDO_CGMS625_14_9_FULL_CENTER_576;
break;
case SDO_625_16_9_ANAMORPIC_576:
temp_reg |= SDO_CGMS625_16_9_ANAMORPIC_576;
break;
default:
SDPRINTK("invalid display_ratio parameter(%d)\n\r",
display_ratio);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
writel(temp_reg, sdout_base + S5P_SDO_CGMS625);
SDPRINTK("0x%08x\n\r", readl(sdout_base + S5P_SDO_CGMS625));
return SDOUT_NO_ERROR;
}
static enum s5p_tv_sd_err __s5p_sdout_init_antialias_filter_coeff_default(
enum s5p_sd_level composite_level,
enum s5p_sd_vsync_ratio composite_ratio,
enum s5p_tv_o_mode out_mode)
{
SDPRINTK("%d, %d, %d\n\r", composite_level, composite_ratio, out_mode);
switch (composite_level) {
case S5P_TV_SD_LEVEL_0IRE:
switch (composite_ratio) {
case SDOUT_VTOS_RATIO_10_4:
switch (out_mode) {
case TVOUT_OUTPUT_COMPOSITE:
case TVOUT_OUTPUT_SVIDEO:
writel(0x00000000 , sdout_base + S5P_SDO_Y3);
writel(0x00000000 , sdout_base + S5P_SDO_Y4);
writel(0x00000000 , sdout_base + S5P_SDO_Y5);
writel(0x00000000 , sdout_base + S5P_SDO_Y6);
writel(0x00000000 , sdout_base + S5P_SDO_Y7);
writel(0x00000000 , sdout_base + S5P_SDO_Y8);
writel(0x00000000 , sdout_base + S5P_SDO_Y9);
writel(0x00000000 , sdout_base + S5P_SDO_Y10);
writel(0x0000029a , sdout_base + S5P_SDO_Y11);
writel(0x00000000 , sdout_base + S5P_SDO_CB0);
writel(0x00000000 , sdout_base + S5P_SDO_CB1);
writel(0x00000000 , sdout_base + S5P_SDO_CB2);
writel(0x00000000 , sdout_base + S5P_SDO_CB3);
writel(0x00000000 , sdout_base + S5P_SDO_CB4);
writel(0x00000001 , sdout_base + S5P_SDO_CB5);
writel(0x00000007 , sdout_base + S5P_SDO_CB6);
writel(0x00000015 , sdout_base + S5P_SDO_CB7);
writel(0x0000002b , sdout_base + S5P_SDO_CB8);
writel(0x00000045 , sdout_base + S5P_SDO_CB9);
writel(0x00000059 , sdout_base + S5P_SDO_CB10);
writel(0x00000061 , sdout_base + S5P_SDO_CB11);
writel(0x00000000 , sdout_base + S5P_SDO_CR1);
writel(0x00000000 , sdout_base + S5P_SDO_CR2);
writel(0x00000000 , sdout_base + S5P_SDO_CR3);
writel(0x00000000 , sdout_base + S5P_SDO_CR4);
writel(0x00000002 , sdout_base + S5P_SDO_CR5);
writel(0x0000000a , sdout_base + S5P_SDO_CR6);
writel(0x0000001e , sdout_base + S5P_SDO_CR7);
writel(0x0000003d , sdout_base + S5P_SDO_CR8);
writel(0x00000061 , sdout_base + S5P_SDO_CR9);
writel(0x0000007a , sdout_base + S5P_SDO_CR10);
writel(0x0000008f , sdout_base + S5P_SDO_CR11);
break;
case TVOUT_OUTPUT_COMPONENT_YPBPR_INERLACED:
case TVOUT_OUTPUT_COMPONENT_YPBPR_PROGRESSIVE:
case TVOUT_OUTPUT_COMPONENT_RGB_PROGRESSIVE:
writel(0x00000000, sdout_base + S5P_SDO_Y0);
writel(0x00000000, sdout_base + S5P_SDO_Y1);
writel(0x00000000, sdout_base + S5P_SDO_Y2);
writel(0x00000000, sdout_base + S5P_SDO_Y3);
writel(0x00000000, sdout_base + S5P_SDO_Y4);
writel(0x00000000, sdout_base + S5P_SDO_Y5);
writel(0x00000000, sdout_base + S5P_SDO_Y6);
writel(0x00000000, sdout_base + S5P_SDO_Y7);
writel(0x00000000, sdout_base + S5P_SDO_Y8);
writel(0x00000000, sdout_base + S5P_SDO_Y9);
writel(0x00000000, sdout_base + S5P_SDO_Y10);
writel(0x0000029a, sdout_base + S5P_SDO_Y11);
writel(0x00000000, sdout_base + S5P_SDO_CB0);
writel(0x00000000, sdout_base + S5P_SDO_CB1);
writel(0x00000000, sdout_base + S5P_SDO_CB2);
writel(0x00000000, sdout_base + S5P_SDO_CB3);
writel(0x00000000, sdout_base + S5P_SDO_CB4);
writel(0x00000001, sdout_base + S5P_SDO_CB5);
writel(0x00000007, sdout_base + S5P_SDO_CB6);
writel(0x00000015, sdout_base + S5P_SDO_CB7);
writel(0x0000002b, sdout_base + S5P_SDO_CB8);
writel(0x00000045, sdout_base + S5P_SDO_CB9);
writel(0x00000059, sdout_base + S5P_SDO_CB10);
writel(0x00000061, sdout_base + S5P_SDO_CB11);
writel(0x00000000, sdout_base + S5P_SDO_CR1);
writel(0x00000000, sdout_base + S5P_SDO_CR2);
writel(0x00000000, sdout_base + S5P_SDO_CR3);
writel(0x00000000, sdout_base + S5P_SDO_CR4);
writel(0x00000002, sdout_base + S5P_SDO_CR5);
writel(0x0000000a, sdout_base + S5P_SDO_CR6);
writel(0x0000001e, sdout_base + S5P_SDO_CR7);
writel(0x0000003d, sdout_base + S5P_SDO_CR8);
writel(0x00000061, sdout_base + S5P_SDO_CR9);
writel(0x0000007a, sdout_base + S5P_SDO_CR10);
writel(0x0000008f, sdout_base + S5P_SDO_CR11);
break;
default:
SDPRINTK("invalid out_mode parameter(%d)\n\r",
out_mode);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
break;
case SDOUT_VTOS_RATIO_7_3:
writel(0x00000000, sdout_base + S5P_SDO_Y0);
writel(0x00000000, sdout_base + S5P_SDO_Y1);
writel(0x00000000, sdout_base + S5P_SDO_Y2);
writel(0x00000000, sdout_base + S5P_SDO_Y3);
writel(0x00000000, sdout_base + S5P_SDO_Y4);
writel(0x00000000, sdout_base + S5P_SDO_Y5);
writel(0x00000000, sdout_base + S5P_SDO_Y6);
writel(0x00000000, sdout_base + S5P_SDO_Y7);
writel(0x00000000, sdout_base + S5P_SDO_Y8);
writel(0x00000000, sdout_base + S5P_SDO_Y9);
writel(0x00000000, sdout_base + S5P_SDO_Y10);
writel(0x00000281, sdout_base + S5P_SDO_Y11);
writel(0x00000000, sdout_base + S5P_SDO_CB0);
writel(0x00000000, sdout_base + S5P_SDO_CB1);
writel(0x00000000, sdout_base + S5P_SDO_CB2);
writel(0x00000000, sdout_base + S5P_SDO_CB3);
writel(0x00000000, sdout_base + S5P_SDO_CB4);
writel(0x00000001, sdout_base + S5P_SDO_CB5);
writel(0x00000007, sdout_base + S5P_SDO_CB6);
writel(0x00000015, sdout_base + S5P_SDO_CB7);
writel(0x0000002a, sdout_base + S5P_SDO_CB8);
writel(0x00000044, sdout_base + S5P_SDO_CB9);
writel(0x00000057, sdout_base + S5P_SDO_CB10);
writel(0x0000005f, sdout_base + S5P_SDO_CB11);
writel(0x00000000, sdout_base + S5P_SDO_CR1);
writel(0x00000000, sdout_base + S5P_SDO_CR2);
writel(0x00000000, sdout_base + S5P_SDO_CR3);
writel(0x00000000, sdout_base + S5P_SDO_CR4);
writel(0x00000002, sdout_base + S5P_SDO_CR5);
writel(0x0000000a, sdout_base + S5P_SDO_CR6);
writel(0x0000001d, sdout_base + S5P_SDO_CR7);
writel(0x0000003c, sdout_base + S5P_SDO_CR8);
writel(0x0000005f, sdout_base + S5P_SDO_CR9);
writel(0x0000007b, sdout_base + S5P_SDO_CR10);
writel(0x00000086, sdout_base + S5P_SDO_CR11);
break;
default:
SDPRINTK("invalid composite_ratio parameter(%d)\n\r",
composite_ratio);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
break;
case S5P_TV_SD_LEVEL_75IRE:
switch (composite_ratio) {
case SDOUT_VTOS_RATIO_10_4:
writel(0x00000000, sdout_base + S5P_SDO_Y0);
writel(0x00000000, sdout_base + S5P_SDO_Y1);
writel(0x00000000, sdout_base + S5P_SDO_Y2);
writel(0x00000000, sdout_base + S5P_SDO_Y3);
writel(0x00000000, sdout_base + S5P_SDO_Y4);
writel(0x00000000, sdout_base + S5P_SDO_Y5);
writel(0x00000000, sdout_base + S5P_SDO_Y6);
writel(0x00000000, sdout_base + S5P_SDO_Y7);
writel(0x00000000, sdout_base + S5P_SDO_Y8);
writel(0x00000000, sdout_base + S5P_SDO_Y9);
writel(0x00000000, sdout_base + S5P_SDO_Y10);
writel(0x0000025d, sdout_base + S5P_SDO_Y11);
writel(0x00000000, sdout_base + S5P_SDO_CB0);
writel(0x00000000, sdout_base + S5P_SDO_CB1);
writel(0x00000000, sdout_base + S5P_SDO_CB2);
writel(0x00000000, sdout_base + S5P_SDO_CB3);
writel(0x00000000, sdout_base + S5P_SDO_CB4);
writel(0x00000001, sdout_base + S5P_SDO_CB5);
writel(0x00000007, sdout_base + S5P_SDO_CB6);
writel(0x00000014, sdout_base + S5P_SDO_CB7);
writel(0x00000028, sdout_base + S5P_SDO_CB8);
writel(0x0000003f, sdout_base + S5P_SDO_CB9);
writel(0x00000052, sdout_base + S5P_SDO_CB10);
writel(0x0000005a, sdout_base + S5P_SDO_CB11);
writel(0x00000000, sdout_base + S5P_SDO_CR1);
writel(0x00000000, sdout_base + S5P_SDO_CR2);
writel(0x00000000, sdout_base + S5P_SDO_CR3);
writel(0x00000000, sdout_base + S5P_SDO_CR4);
writel(0x00000001, sdout_base + S5P_SDO_CR5);
writel(0x00000009, sdout_base + S5P_SDO_CR6);
writel(0x0000001c, sdout_base + S5P_SDO_CR7);
writel(0x00000039, sdout_base + S5P_SDO_CR8);
writel(0x0000005a, sdout_base + S5P_SDO_CR9);
writel(0x00000074, sdout_base + S5P_SDO_CR10);
writel(0x0000007e, sdout_base + S5P_SDO_CR11);
break;
case SDOUT_VTOS_RATIO_7_3:
writel(0x00000000, sdout_base + S5P_SDO_Y0);
writel(0x00000000, sdout_base + S5P_SDO_Y1);
writel(0x00000000, sdout_base + S5P_SDO_Y2);
writel(0x00000000, sdout_base + S5P_SDO_Y3);
writel(0x00000000, sdout_base + S5P_SDO_Y4);
writel(0x00000000, sdout_base + S5P_SDO_Y5);
writel(0x00000000, sdout_base + S5P_SDO_Y6);
writel(0x00000000, sdout_base + S5P_SDO_Y7);
writel(0x00000000, sdout_base + S5P_SDO_Y8);
writel(0x00000000, sdout_base + S5P_SDO_Y9);
writel(0x00000000, sdout_base + S5P_SDO_Y10);
writel(0x00000251, sdout_base + S5P_SDO_Y11);
writel(0x00000000, sdout_base + S5P_SDO_CB0);
writel(0x00000000, sdout_base + S5P_SDO_CB1);
writel(0x00000000, sdout_base + S5P_SDO_CB2);
writel(0x00000000, sdout_base + S5P_SDO_CB3);
writel(0x00000000, sdout_base + S5P_SDO_CB4);
writel(0x00000001, sdout_base + S5P_SDO_CB5);
writel(0x00000006, sdout_base + S5P_SDO_CB6);
writel(0x00000013, sdout_base + S5P_SDO_CB7);
writel(0x00000028, sdout_base + S5P_SDO_CB8);
writel(0x0000003f, sdout_base + S5P_SDO_CB9);
writel(0x00000051, sdout_base + S5P_SDO_CB10);
writel(0x00000056, sdout_base + S5P_SDO_CB11);
writel(0x00000000, sdout_base + S5P_SDO_CR1);
writel(0x00000000, sdout_base + S5P_SDO_CR2);
writel(0x00000000, sdout_base + S5P_SDO_CR3);
writel(0x00000000, sdout_base + S5P_SDO_CR4);
writel(0x00000002, sdout_base + S5P_SDO_CR5);
writel(0x00000005, sdout_base + S5P_SDO_CR6);
writel(0x00000018, sdout_base + S5P_SDO_CR7);
writel(0x00000037, sdout_base + S5P_SDO_CR8);
writel(0x0000005A, sdout_base + S5P_SDO_CR9);
writel(0x00000076, sdout_base + S5P_SDO_CR10);
writel(0x0000007e, sdout_base + S5P_SDO_CR11);
break;
default:
SDPRINTK(" invalid composite_ratio parameter(%d)\n\r",
composite_ratio);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
break;
default:
SDPRINTK(" invalid composite_level parameter(%d)\n\r",
composite_level);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
SDPRINTK("()\n\r");
return SDOUT_NO_ERROR;
}
static enum s5p_tv_sd_err __s5p_sdout_init_oversampling_filter_coeff_default(
enum s5p_tv_o_mode out_mode)
{
u32 temp_reg = 0;
u8 i;
SDPRINTK("%d\n\r", out_mode);
switch (out_mode) {
case TVOUT_OUTPUT_COMPOSITE:
case TVOUT_OUTPUT_SVIDEO:
case TVOUT_OUTPUT_COMPONENT_YPBPR_INERLACED:
temp_reg = (u32)(sdout_base + S5P_SDO_OSFC00_0);
for (i = 0; i < 3; i++) {
temp_reg = (u32)((i == 0) ?
sdout_base + S5P_SDO_OSFC00_0 :
(i == 1) ?
sdout_base + S5P_SDO_OSFC00_1 :
sdout_base + S5P_SDO_OSFC00_2);
writel(((-2&0xfff) << 0) | ((-3&0xfff) << 0),
temp_reg + 0);
writel(0,
temp_reg + 1);
writel((4 << 0) | (5 << 16),
temp_reg + 2);
writel(((-1&0xfff) << 0) | (0 << 16),
temp_reg + 3);
writel(((-6&0xfff) << 0) | ((-9&0xfff) << 16),
temp_reg + 4);
writel((1 << 0) | (0 << 16),
temp_reg + 5);
writel((10 << 0) | (14 << 16),
temp_reg + 6);
writel(((-1&0xfff) << 0) | (0 << 16),
temp_reg + 7);
writel(((-14&0xfff) << 0) | ((-20&0xfff) << 16),
temp_reg + 8);
writel((1 << 0) | (0 << 16),
temp_reg + 9);
writel((20 << 0) | (29 << 16),
temp_reg + 10);
writel(((-2&0xfff) << 0) | (0 << 16),
temp_reg + 11);
writel(((-28&0xfff) << 0) | ((-40&0xfff) << 16),
temp_reg + 12);
writel((2 << 0) | (0 << 16),
temp_reg + 13);
writel((40 << 0) | (56 << 16),
temp_reg + 14);
writel(((-3&0xfff) << 0) | (0 << 16),
temp_reg + 15);
writel(((-57&0xfff) << 0) | ((-80&0xfff) << 16),
temp_reg + 16);
writel((5 << 0) | (0 << 16),
temp_reg + 17);
writel((86 << 0) | (121 << 16),
temp_reg + 18);
writel(((-10&0xfff) << 0) | (0 << 16),
temp_reg + 19);
writel(((-154&0xfff) << 0) | ((-212&0xfff) << 16),
temp_reg + 20);
writel((27 << 0) | (0 << 16),
temp_reg + 21);
writel((613 << 0) | (651 << 16),
temp_reg + 22);
writel(((-308&0xfff) << 0) | (1024 << 16),
temp_reg + 23);
}
break;
default:
SDPRINTK("invalid out_mode parameter(%d)\n\r", out_mode);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
return SDOUT_NO_ERROR;
}
/*
* initialization
* - iniization functions are only called under stopping sdout
*/
enum s5p_tv_sd_err __s5p_sdout_init_display_mode(
enum s5p_tv_disp_mode disp_mode,
enum s5p_tv_o_mode out_mode,
enum s5p_sd_order order)
{
u32 temp_reg = 0;
SDPRINTK(" %d, %d, %d\n\r", disp_mode, out_mode, order);
switch (disp_mode) {
case TVOUT_NTSC_M:
temp_reg |= SDO_NTSC_M;
__s5p_sdout_init_video_scale_cfg(S5P_TV_SD_LEVEL_0IRE,
SDOUT_VTOS_RATIO_7_3,
S5P_TV_SD_LEVEL_75IRE,
SDOUT_VTOS_RATIO_10_4);
__s5p_sdout_init_antialias_filter_coeff_default(
S5P_TV_SD_LEVEL_75IRE,
SDOUT_VTOS_RATIO_10_4,
out_mode);
break;
case TVOUT_PAL_BDGHI:
temp_reg |= SDO_PAL_BGHID;
__s5p_sdout_init_video_scale_cfg(S5P_TV_SD_LEVEL_0IRE,
SDOUT_VTOS_RATIO_7_3, S5P_TV_SD_LEVEL_0IRE,
SDOUT_VTOS_RATIO_7_3);
__s5p_sdout_init_antialias_filter_coeff_default(
S5P_TV_SD_LEVEL_0IRE,
SDOUT_VTOS_RATIO_7_3,
out_mode);
break;
case TVOUT_PAL_M:
temp_reg |= SDO_PAL_M;
__s5p_sdout_init_video_scale_cfg(S5P_TV_SD_LEVEL_0IRE,
SDOUT_VTOS_RATIO_7_3, S5P_TV_SD_LEVEL_0IRE,
SDOUT_VTOS_RATIO_7_3);
__s5p_sdout_init_antialias_filter_coeff_default(
S5P_TV_SD_LEVEL_0IRE,
SDOUT_VTOS_RATIO_7_3,
out_mode);
break;
case TVOUT_PAL_N:
temp_reg |= SDO_PAL_N;
__s5p_sdout_init_video_scale_cfg(S5P_TV_SD_LEVEL_0IRE,
SDOUT_VTOS_RATIO_7_3, S5P_TV_SD_LEVEL_0IRE,
SDOUT_VTOS_RATIO_7_3);
__s5p_sdout_init_antialias_filter_coeff_default(
S5P_TV_SD_LEVEL_75IRE,
SDOUT_VTOS_RATIO_10_4,
out_mode);
break;
case TVOUT_PAL_NC:
temp_reg |= SDO_PAL_NC;
__s5p_sdout_init_video_scale_cfg(S5P_TV_SD_LEVEL_0IRE,
SDOUT_VTOS_RATIO_7_3, S5P_TV_SD_LEVEL_0IRE,
SDOUT_VTOS_RATIO_7_3);
__s5p_sdout_init_antialias_filter_coeff_default(
S5P_TV_SD_LEVEL_0IRE,
DOUT_VTOS_RATIO_7_3,
ut_mode);
break;
case TVOUT_PAL_60:
temp_reg |= SDO_PAL_60;
__s5p_sdout_init_video_scale_cfg(S5P_TV_SD_LEVEL_0IRE,
SDOUT_VTOS_RATIO_7_3, S5P_TV_SD_LEVEL_0IRE,
SDOUT_VTOS_RATIO_7_3);
__s5p_sdout_init_antialias_filter_coeff_default(
S5P_TV_SD_LEVEL_0IRE,
SDOUT_VTOS_RATIO_7_3,
out_mode);
break;
case TVOUT_NTSC_443:
temp_reg |= SDO_NTSC_443;
__s5p_sdout_init_video_scale_cfg(S5P_TV_SD_LEVEL_0IRE,
SDOUT_VTOS_RATIO_7_3, S5P_TV_SD_LEVEL_75IRE,
SDOUT_VTOS_RATIO_10_4);
__s5p_sdout_init_antialias_filter_coeff_default(
S5P_TV_SD_LEVEL_75IRE,
SDOUT_VTOS_RATIO_10_4,
out_mode);
break;
default:
SDPRINTK("invalid disp_mode parameter(%d)\n\r", disp_mode);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
switch (out_mode) {
case TVOUT_OUTPUT_COMPOSITE:
case TVOUT_OUTPUT_SVIDEO:
temp_reg |= SDO_COMPOSITE | SDO_INTERLACED;
switch (order) {
case S5P_TV_SD_O_ORDER_COMPOSITE_CVBS_Y_C:
temp_reg |= SDO_DAC2_CVBS | SDO_DAC1_Y | SDO_DAC0_C;
break;
case S5P_TV_SD_O_ORDER_COMPOSITE_CVBS_C_Y:
temp_reg |= SDO_DAC2_CVBS | SDO_DAC1_C | SDO_DAC0_Y;
break;
case S5P_TV_SD_O_ORDER_COMPOSITE_Y_C_CVBS:
temp_reg |= SDO_DAC2_Y | SDO_DAC1_C | SDO_DAC0_CVBS;
break;
case S5P_TV_SD_O_ORDER_COMPOSITE_Y_CVBS_C:
temp_reg |= SDO_DAC2_Y | SDO_DAC1_CVBS | SDO_DAC0_C;
break;
case S5P_TV_SD_O_ORDER_COMPOSITE_C_CVBS_Y:
temp_reg |= SDO_DAC2_C | SDO_DAC1_CVBS | SDO_DAC0_Y;
break;
case S5P_TV_SD_O_ORDER_COMPOSITE_C_Y_CVBS:
temp_reg |= SDO_DAC2_C | SDO_DAC1_Y | SDO_DAC0_CVBS;
break;
default:
SDPRINTK(" invalid order parameter(%d)\n\r", order);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
break;
case TVOUT_OUTPUT_COMPONENT_YPBPR_INERLACED:
temp_reg |= SDO_COMPONENT | SDO_YPBPR | SDO_INTERLACED;
switch (order) {
case S5P_TV_SD_O_ORDER_COMPONENT_RGB_PRYPB:
temp_reg |= SDO_DAC2_PR_R | SDO_DAC1_Y_G |
SDO_DAC0_PB_B;
break;
case S5P_TV_SD_O_ORDER_COMPONENT_RBG_PRPBY:
temp_reg |= SDO_DAC2_PR_R | SDO_DAC1_PB_B |
SDO_DAC0_Y_G;
break;
case S5P_TV_SD_O_ORDER_COMPONENT_BGR_PBYPR:
temp_reg |= SDO_DAC2_PB_B | SDO_DAC1_Y_G |
SDO_DAC0_PR_R;
break;
case S5P_TV_SD_O_ORDER_COMPONENT_BRG_PBPRY:
temp_reg |= SDO_DAC2_PB_B | SDO_DAC1_PR_R |
SDO_DAC0_Y_G;
break;
case S5P_TV_SD_O_ORDER_COMPONENT_GRB_YPRPB:
temp_reg |= SDO_DAC2_Y_G | SDO_DAC1_PR_R |
SDO_DAC0_PB_B;
break;
case S5P_TV_SD_O_ORDER_COMPONENT_GBR_YPBPR:
temp_reg |= SDO_DAC2_Y_G | SDO_DAC1_PB_B |
SDO_DAC0_PR_R;
break;
default:
SDPRINTK(" invalid order parameter(%d)\n\r", order);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
break;
case TVOUT_OUTPUT_COMPONENT_YPBPR_PROGRESSIVE:
temp_reg |= SDO_COMPONENT | SDO_YPBPR | SDO_PROGRESSIVE;
switch (order) {
case S5P_TV_SD_O_ORDER_COMPONENT_RGB_PRYPB:
temp_reg |= SDO_DAC2_PR_R | SDO_DAC1_Y_G |
SDO_DAC0_PB_B;
break;
case S5P_TV_SD_O_ORDER_COMPONENT_RBG_PRPBY:
temp_reg |= SDO_DAC2_PR_R | SDO_DAC1_PB_B |
SDO_DAC0_Y_G;
break;
case S5P_TV_SD_O_ORDER_COMPONENT_BGR_PBYPR:
temp_reg |= SDO_DAC2_PB_B | SDO_DAC1_Y_G |
SDO_DAC0_PR_R;
break;
case S5P_TV_SD_O_ORDER_COMPONENT_BRG_PBPRY:
temp_reg |= SDO_DAC2_PB_B | SDO_DAC1_PR_R |
SDO_DAC0_Y_G;
break;
case S5P_TV_SD_O_ORDER_COMPONENT_GRB_YPRPB:
temp_reg |= SDO_DAC2_Y_G | SDO_DAC1_PR_R |
SDO_DAC0_PB_B;
break;
case S5P_TV_SD_O_ORDER_COMPONENT_GBR_YPBPR:
temp_reg |= SDO_DAC2_Y_G | SDO_DAC1_PB_B |
SDO_DAC0_PR_R;
break;
default:
SDPRINTK(" invalid order parameter(%d)\n\r", order);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
break;
case TVOUT_OUTPUT_COMPONENT_RGB_PROGRESSIVE:
temp_reg |= SDO_COMPONENT | SDO_RGB | SDO_PROGRESSIVE;
switch (order) {
case S5P_TV_SD_O_ORDER_COMPONENT_RGB_PRYPB:
temp_reg |= SDO_DAC2_PR_R | SDO_DAC1_Y_G |
SDO_DAC0_PB_B;
break;
case S5P_TV_SD_O_ORDER_COMPONENT_RBG_PRPBY:
temp_reg |= SDO_DAC2_PR_R | SDO_DAC1_PB_B |
SDO_DAC0_Y_G;
break;
case S5P_TV_SD_O_ORDER_COMPONENT_BGR_PBYPR:
temp_reg |= SDO_DAC2_PB_B | SDO_DAC1_Y_G |
SDO_DAC0_PR_R;
break;
case S5P_TV_SD_O_ORDER_COMPONENT_BRG_PBPRY:
temp_reg |= SDO_DAC2_PB_B | SDO_DAC1_PR_R |
SDO_DAC0_Y_G;
break;
case S5P_TV_SD_O_ORDER_COMPONENT_GRB_YPRPB:
temp_reg |= SDO_DAC2_Y_G | SDO_DAC1_PR_R |
SDO_DAC0_PB_B;
break;
case S5P_TV_SD_O_ORDER_COMPONENT_GBR_YPBPR:
temp_reg |= SDO_DAC2_Y_G | SDO_DAC1_PB_B |
SDO_DAC0_PR_R;
break;
default:
SDPRINTK("invalid order parameter(%d)\n\r", order);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
break;
default:
SDPRINTK(" invalid out_mode parameter(%d)\n\r", out_mode);
return S5P_TV_SD_ERR_INVALID_PARAM;
break;
}
__s5p_sdout_init_oversampling_filter_coeff_default(out_mode);
writel(temp_reg, sdout_base + S5P_SDO_CONFIG);
SDPRINTK("0x%08x\n\r", readl(sdout_base + S5P_SDO_CONFIG));
return SDOUT_NO_ERROR;
}
/*
* start - start functions are only called under stopping SDOUT
*/
void __s5p_sdout_start(void)
{
SDPRINTK("()\n\r");
writel(SDO_TVOUT_CLOCK_ON, sdout_base + S5P_SDO_CLKCON);
SDPRINTK("0x%x\n\r", readl(sdout_base + S5P_SDO_CLKCON));
}
/*
/ stop - stop functions are only called under running SDOUT
*/
void __s5p_sdout_stop(void)
{
SDPRINTK("()\n\r");
writel(SDO_TVOUT_CLOCK_OFF, sdout_base + S5P_SDO_CLKCON);
SDPRINTK(" 0x%x)\n\r", readl(sdout_base + S5P_SDO_CLKCON));
}
/*
* reset
* - reset function
*/
void __s5p_sdout_sw_reset(bool active)
{
SDPRINTK("%d\n\r", active);
if (active)
writel(readl(sdout_base + S5P_SDO_CLKCON) | SDO_TVOUT_SW_RESET,
sdout_base + S5P_SDO_CLKCON);
else
writel(readl(sdout_base + S5P_SDO_CLKCON) & ~SDO_TVOUT_SW_RESET,
sdout_base + S5P_SDO_CLKCON);
SDPRINTK(" 0x%x\n\r", readl(sdout_base + S5P_SDO_CLKCON));
}
void __s5p_sdout_set_interrupt_enable(bool vsync_intr_en)
{
SDPRINTK("%d)\n\r", vsync_intr_en);
if (vsync_intr_en)
writel(readl(sdout_base + S5P_SDO_IRQMASK) &
~SDO_VSYNC_IRQ_DISABLE,
sdout_base + S5P_SDO_IRQMASK);
else
writel(readl(sdout_base + S5P_SDO_IRQMASK) |
SDO_VSYNC_IRQ_DISABLE,
sdout_base + S5P_SDO_IRQMASK);
SDPRINTK("0x%x)\n\r", readl(sdout_base + S5P_SDO_IRQMASK));
}
void __s5p_sdout_clear_interrupt_pending(void)
{
SDPRINTK("0x%x\n\r", readl(sdout_base + S5P_SDO_IRQ));
writel(readl(sdout_base + S5P_SDO_IRQ) | SDO_VSYNC_IRQ_PEND,
sdout_base + S5P_SDO_IRQ);
SDPRINTK("0x%x\n\r", readl(sdout_base + S5P_SDO_IRQ));
}
bool __s5p_sdout_get_interrupt_pending(void)
{
SDPRINTK(" 0x%x\n\r", readl(sdout_base + S5P_SDO_IRQ));
return (readl(sdout_base + S5P_SDO_IRQ) |
SDO_VSYNC_IRQ_PEND) ? 1 : 0;
}
int __init __s5p_sdout_probe(struct platform_device *pdev, u32 res_num)
{
struct resource *res;
size_t size;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, res_num);
if (res == NULL) {
dev_err(&pdev->dev,
"failed to get memory region resource\n");
ret = -ENOENT;
}
size = (res->end - res->start) + 1;
sdout_mem = request_mem_region(res->start, size, pdev->name);
if (sdout_mem == NULL) {
dev_err(&pdev->dev,
"failed to get memory region\n");
ret = -ENOENT;
}
sdout_base = ioremap(res->start, size);
if (sdout_base == NULL) {
dev_err(&pdev->dev,
"failed to ioremap address region\n");
ret = -ENOENT;
}
return ret;
}
int __init __s5p_sdout_release(struct platform_device *pdev)
{
iounmap(sdout_base);
/* remove memory region */
if (sdout_mem != NULL) {
if (release_resource(sdout_mem))
dev_err(&pdev->dev,
"Can't remove tvout drv !!\n");
kfree(sdout_mem);
sdout_mem = NULL;
}
return 0;
}
| gpl-2.0 |
ezterry/kernel-biff-testing | net/mac80211/work.c | 139 | 30514 | /*
* mac80211 work implementation
*
* Copyright 2003-2008, Jouni Malinen <j@w1.fi>
* Copyright 2004, Instant802 Networks, Inc.
* Copyright 2005, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/crc32.h>
#include <linux/slab.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
#include "ieee80211_i.h"
#include "rate.h"
#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
#define IEEE80211_AUTH_MAX_TRIES 3
#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
#define IEEE80211_ASSOC_MAX_TRIES 3
#define IEEE80211_MAX_PROBE_TRIES 5
enum work_action {
WORK_ACT_MISMATCH,
WORK_ACT_NONE,
WORK_ACT_TIMEOUT,
WORK_ACT_DONE,
};
/* utils */
static inline void ASSERT_WORK_MTX(struct ieee80211_local *local)
{
WARN_ON(!mutex_is_locked(&local->work_mtx));
}
/*
* We can have multiple work items (and connection probing)
* scheduling this timer, but we need to take care to only
* reschedule it when it should fire _earlier_ than it was
* asked for before, or if it's not pending right now. This
* function ensures that. Note that it then is required to
* run this function for all timeouts after the first one
* has happened -- the work that runs from this timer will
* do that.
*/
static void run_again(struct ieee80211_local *local,
unsigned long timeout)
{
ASSERT_WORK_MTX(local);
if (!timer_pending(&local->work_timer) ||
time_before(timeout, local->work_timer.expires))
mod_timer(&local->work_timer, timeout);
}
static void work_free_rcu(struct rcu_head *head)
{
struct ieee80211_work *wk =
container_of(head, struct ieee80211_work, rcu_head);
kfree(wk);
}
void free_work(struct ieee80211_work *wk)
{
call_rcu(&wk->rcu_head, work_free_rcu);
}
static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
struct ieee80211_supported_band *sband,
u32 *rates)
{
int i, j, count;
*rates = 0;
count = 0;
for (i = 0; i < supp_rates_len; i++) {
int rate = (supp_rates[i] & 0x7F) * 5;
for (j = 0; j < sband->n_bitrates; j++)
if (sband->bitrates[j].bitrate == rate) {
*rates |= BIT(j);
count++;
break;
}
}
return count;
}
/* frame sending functions */
static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
struct ieee80211_supported_band *sband,
struct ieee80211_channel *channel,
enum ieee80211_smps_mode smps)
{
struct ieee80211_ht_info *ht_info;
u8 *pos;
u32 flags = channel->flags;
u16 cap = sband->ht_cap.cap;
__le16 tmp;
if (!sband->ht_cap.ht_supported)
return;
if (!ht_info_ie)
return;
if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
return;
ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
/* determine capability flags */
if (ieee80211_disable_40mhz_24ghz &&
sband->band == IEEE80211_BAND_2GHZ) {
cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
cap &= ~IEEE80211_HT_CAP_SGI_40;
}
switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
cap &= ~IEEE80211_HT_CAP_SGI_40;
}
break;
case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
cap &= ~IEEE80211_HT_CAP_SGI_40;
}
break;
}
/* set SM PS mode properly */
cap &= ~IEEE80211_HT_CAP_SM_PS;
switch (smps) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_NUM_MODES:
WARN_ON(1);
case IEEE80211_SMPS_OFF:
cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
IEEE80211_HT_CAP_SM_PS_SHIFT;
break;
case IEEE80211_SMPS_STATIC:
cap |= WLAN_HT_CAP_SM_PS_STATIC <<
IEEE80211_HT_CAP_SM_PS_SHIFT;
break;
case IEEE80211_SMPS_DYNAMIC:
cap |= WLAN_HT_CAP_SM_PS_DYNAMIC <<
IEEE80211_HT_CAP_SM_PS_SHIFT;
break;
}
/* reserve and fill IE */
pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
*pos++ = WLAN_EID_HT_CAPABILITY;
*pos++ = sizeof(struct ieee80211_ht_cap);
memset(pos, 0, sizeof(struct ieee80211_ht_cap));
/* capability flags */
tmp = cpu_to_le16(cap);
memcpy(pos, &tmp, sizeof(u16));
pos += sizeof(u16);
/* AMPDU parameters */
*pos++ = sband->ht_cap.ampdu_factor |
(sband->ht_cap.ampdu_density <<
IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
/* MCS set */
memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
pos += sizeof(sband->ht_cap.mcs);
/* extended capabilities */
pos += sizeof(__le16);
/* BF capabilities */
pos += sizeof(__le32);
/* antenna selection */
pos += sizeof(u8);
}
static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_work *wk)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
u8 *pos, qos_info;
const u8 *ies;
size_t offset = 0, noffset;
int i, len, count, rates_len, supp_rates_len;
u16 capab;
struct ieee80211_supported_band *sband;
u32 rates = 0;
sband = local->hw.wiphy->bands[wk->chan->band];
if (wk->assoc.supp_rates_len) {
/*
* Get all rates supported by the device and the AP as
* some APs don't like getting a superset of their rates
* in the association request (e.g. D-Link DAP 1353 in
* b-only mode)...
*/
rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates,
wk->assoc.supp_rates_len,
sband, &rates);
} else {
/*
* In case AP not provide any supported rates information
* before association, we send information element(s) with
* all rates that we support.
*/
rates = ~0;
rates_len = sband->n_bitrates;
}
skb = alloc_skb(local->hw.extra_tx_headroom +
sizeof(*mgmt) + /* bit too much but doesn't matter */
2 + wk->assoc.ssid_len + /* SSID */
4 + rates_len + /* (extended) rates */
4 + /* power capability */
2 + 2 * sband->n_channels + /* supported channels */
2 + sizeof(struct ieee80211_ht_cap) + /* HT */
wk->ie_len + /* extra IEs */
9, /* WMM */
GFP_KERNEL);
if (!skb) {
printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
"frame\n", sdata->name);
return;
}
skb_reserve(skb, local->hw.extra_tx_headroom);
capab = WLAN_CAPABILITY_ESS;
if (sband->band == IEEE80211_BAND_2GHZ) {
if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
}
if (wk->assoc.capability & WLAN_CAPABILITY_PRIVACY)
capab |= WLAN_CAPABILITY_PRIVACY;
if ((wk->assoc.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
(local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
memset(mgmt, 0, 24);
memcpy(mgmt->da, wk->filter_ta, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, wk->filter_ta, ETH_ALEN);
if (!is_zero_ether_addr(wk->assoc.prev_bssid)) {
skb_put(skb, 10);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_REASSOC_REQ);
mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
mgmt->u.reassoc_req.listen_interval =
cpu_to_le16(local->hw.conf.listen_interval);
memcpy(mgmt->u.reassoc_req.current_ap, wk->assoc.prev_bssid,
ETH_ALEN);
} else {
skb_put(skb, 4);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ASSOC_REQ);
mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
mgmt->u.assoc_req.listen_interval =
cpu_to_le16(local->hw.conf.listen_interval);
}
/* SSID */
ies = pos = skb_put(skb, 2 + wk->assoc.ssid_len);
*pos++ = WLAN_EID_SSID;
*pos++ = wk->assoc.ssid_len;
memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len);
/* add all rates which were marked to be used above */
supp_rates_len = rates_len;
if (supp_rates_len > 8)
supp_rates_len = 8;
len = sband->n_bitrates;
pos = skb_put(skb, supp_rates_len + 2);
*pos++ = WLAN_EID_SUPP_RATES;
*pos++ = supp_rates_len;
count = 0;
for (i = 0; i < sband->n_bitrates; i++) {
if (BIT(i) & rates) {
int rate = sband->bitrates[i].bitrate;
*pos++ = (u8) (rate / 5);
if (++count == 8)
break;
}
}
if (rates_len > count) {
pos = skb_put(skb, rates_len - count + 2);
*pos++ = WLAN_EID_EXT_SUPP_RATES;
*pos++ = rates_len - count;
for (i++; i < sband->n_bitrates; i++) {
if (BIT(i) & rates) {
int rate = sband->bitrates[i].bitrate;
*pos++ = (u8) (rate / 5);
}
}
}
if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
/* 1. power capabilities */
pos = skb_put(skb, 4);
*pos++ = WLAN_EID_PWR_CAPABILITY;
*pos++ = 2;
*pos++ = 0; /* min tx power */
*pos++ = wk->chan->max_power; /* max tx power */
/* 2. supported channels */
/* TODO: get this in reg domain format */
pos = skb_put(skb, 2 * sband->n_channels + 2);
*pos++ = WLAN_EID_SUPPORTED_CHANNELS;
*pos++ = 2 * sband->n_channels;
for (i = 0; i < sband->n_channels; i++) {
*pos++ = ieee80211_frequency_to_channel(
sband->channels[i].center_freq);
*pos++ = 1; /* one channel in the subband*/
}
}
/* if present, add any custom IEs that go before HT */
if (wk->ie_len && wk->ie) {
static const u8 before_ht[] = {
WLAN_EID_SSID,
WLAN_EID_SUPP_RATES,
WLAN_EID_EXT_SUPP_RATES,
WLAN_EID_PWR_CAPABILITY,
WLAN_EID_SUPPORTED_CHANNELS,
WLAN_EID_RSN,
WLAN_EID_QOS_CAPA,
WLAN_EID_RRM_ENABLED_CAPABILITIES,
WLAN_EID_MOBILITY_DOMAIN,
WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
};
noffset = ieee80211_ie_split(wk->ie, wk->ie_len,
before_ht, ARRAY_SIZE(before_ht),
offset);
pos = skb_put(skb, noffset - offset);
memcpy(pos, wk->ie + offset, noffset - offset);
offset = noffset;
}
if (wk->assoc.use_11n && wk->assoc.wmm_used &&
local->hw.queues >= 4)
ieee80211_add_ht_ie(skb, wk->assoc.ht_information_ie,
sband, wk->chan, wk->assoc.smps);
/* if present, add any custom non-vendor IEs that go after HT */
if (wk->ie_len && wk->ie) {
noffset = ieee80211_ie_split_vendor(wk->ie, wk->ie_len,
offset);
pos = skb_put(skb, noffset - offset);
memcpy(pos, wk->ie + offset, noffset - offset);
offset = noffset;
}
if (wk->assoc.wmm_used && local->hw.queues >= 4) {
if (wk->assoc.uapsd_used) {
qos_info = local->uapsd_queues;
qos_info |= (local->uapsd_max_sp_len <<
IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT);
} else {
qos_info = 0;
}
pos = skb_put(skb, 9);
*pos++ = WLAN_EID_VENDOR_SPECIFIC;
*pos++ = 7; /* len */
*pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
*pos++ = 0x50;
*pos++ = 0xf2;
*pos++ = 2; /* WME */
*pos++ = 0; /* WME info */
*pos++ = 1; /* WME ver */
*pos++ = qos_info;
}
/* add any remaining custom (i.e. vendor specific here) IEs */
if (wk->ie_len && wk->ie) {
noffset = wk->ie_len;
pos = skb_put(skb, noffset - offset);
memcpy(pos, wk->ie + offset, noffset - offset);
}
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
}
static void ieee80211_remove_auth_bss(struct ieee80211_local *local,
struct ieee80211_work *wk)
{
struct cfg80211_bss *cbss;
u16 capa_val = WLAN_CAPABILITY_ESS;
if (wk->probe_auth.privacy)
capa_val |= WLAN_CAPABILITY_PRIVACY;
cbss = cfg80211_get_bss(local->hw.wiphy, wk->chan, wk->filter_ta,
wk->probe_auth.ssid, wk->probe_auth.ssid_len,
WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY,
capa_val);
if (!cbss)
return;
cfg80211_unlink_bss(local->hw.wiphy, cbss);
cfg80211_put_bss(cbss);
}
static enum work_action __must_check
ieee80211_direct_probe(struct ieee80211_work *wk)
{
struct ieee80211_sub_if_data *sdata = wk->sdata;
struct ieee80211_local *local = sdata->local;
wk->probe_auth.tries++;
if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
printk(KERN_DEBUG "%s: direct probe to %pM timed out\n",
sdata->name, wk->filter_ta);
/*
* Most likely AP is not in the range so remove the
* bss struct for that AP.
*/
ieee80211_remove_auth_bss(local, wk);
return WORK_ACT_TIMEOUT;
}
printk(KERN_DEBUG "%s: direct probe to %pM (try %d)\n",
sdata->name, wk->filter_ta, wk->probe_auth.tries);
/*
* Direct probe is sent to broadcast address as some APs
* will not answer to direct packet in unassociated state.
*/
ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid,
wk->probe_auth.ssid_len, NULL, 0);
wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
run_again(local, wk->timeout);
return WORK_ACT_NONE;
}
static enum work_action __must_check
ieee80211_authenticate(struct ieee80211_work *wk)
{
struct ieee80211_sub_if_data *sdata = wk->sdata;
struct ieee80211_local *local = sdata->local;
wk->probe_auth.tries++;
if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
printk(KERN_DEBUG "%s: authentication with %pM"
" timed out\n", sdata->name, wk->filter_ta);
/*
* Most likely AP is not in the range so remove the
* bss struct for that AP.
*/
ieee80211_remove_auth_bss(local, wk);
return WORK_ACT_TIMEOUT;
}
printk(KERN_DEBUG "%s: authenticate with %pM (try %d)\n",
sdata->name, wk->filter_ta, wk->probe_auth.tries);
ieee80211_send_auth(sdata, 1, wk->probe_auth.algorithm, wk->ie,
wk->ie_len, wk->filter_ta, NULL, 0, 0);
wk->probe_auth.transaction = 2;
wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
run_again(local, wk->timeout);
return WORK_ACT_NONE;
}
static enum work_action __must_check
ieee80211_associate(struct ieee80211_work *wk)
{
struct ieee80211_sub_if_data *sdata = wk->sdata;
struct ieee80211_local *local = sdata->local;
wk->assoc.tries++;
if (wk->assoc.tries > IEEE80211_ASSOC_MAX_TRIES) {
printk(KERN_DEBUG "%s: association with %pM"
" timed out\n",
sdata->name, wk->filter_ta);
/*
* Most likely AP is not in the range so remove the
* bss struct for that AP.
*/
if (wk->assoc.bss)
cfg80211_unlink_bss(local->hw.wiphy, wk->assoc.bss);
return WORK_ACT_TIMEOUT;
}
printk(KERN_DEBUG "%s: associate with %pM (try %d)\n",
sdata->name, wk->filter_ta, wk->assoc.tries);
ieee80211_send_assoc(sdata, wk);
wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
run_again(local, wk->timeout);
return WORK_ACT_NONE;
}
static enum work_action __must_check
ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk)
{
/*
* First time we run, do nothing -- the generic code will
* have switched to the right channel etc.
*/
if (!wk->started) {
wk->timeout = jiffies + msecs_to_jiffies(wk->remain.duration);
cfg80211_ready_on_channel(wk->sdata->dev, (unsigned long) wk,
wk->chan, wk->chan_type,
wk->remain.duration, GFP_KERNEL);
return WORK_ACT_NONE;
}
return WORK_ACT_TIMEOUT;
}
static enum work_action __must_check
ieee80211_assoc_beacon_wait(struct ieee80211_work *wk)
{
if (wk->started)
return WORK_ACT_TIMEOUT;
/*
* Wait up to one beacon interval ...
* should this be more if we miss one?
*/
printk(KERN_DEBUG "%s: waiting for beacon from %pM\n",
wk->sdata->name, wk->filter_ta);
wk->timeout = TU_TO_EXP_TIME(wk->assoc.bss->beacon_interval);
return WORK_ACT_NONE;
}
static void ieee80211_auth_challenge(struct ieee80211_work *wk,
struct ieee80211_mgmt *mgmt,
size_t len)
{
struct ieee80211_sub_if_data *sdata = wk->sdata;
u8 *pos;
struct ieee802_11_elems elems;
pos = mgmt->u.auth.variable;
ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
if (!elems.challenge)
return;
ieee80211_send_auth(sdata, 3, wk->probe_auth.algorithm,
elems.challenge - 2, elems.challenge_len + 2,
wk->filter_ta, wk->probe_auth.key,
wk->probe_auth.key_len, wk->probe_auth.key_idx);
wk->probe_auth.transaction = 4;
}
static enum work_action __must_check
ieee80211_rx_mgmt_auth(struct ieee80211_work *wk,
struct ieee80211_mgmt *mgmt, size_t len)
{
u16 auth_alg, auth_transaction, status_code;
if (wk->type != IEEE80211_WORK_AUTH)
return WORK_ACT_MISMATCH;
if (len < 24 + 6)
return WORK_ACT_NONE;
auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
status_code = le16_to_cpu(mgmt->u.auth.status_code);
if (auth_alg != wk->probe_auth.algorithm ||
auth_transaction != wk->probe_auth.transaction)
return WORK_ACT_NONE;
if (status_code != WLAN_STATUS_SUCCESS) {
printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
wk->sdata->name, mgmt->sa, status_code);
return WORK_ACT_DONE;
}
switch (wk->probe_auth.algorithm) {
case WLAN_AUTH_OPEN:
case WLAN_AUTH_LEAP:
case WLAN_AUTH_FT:
break;
case WLAN_AUTH_SHARED_KEY:
if (wk->probe_auth.transaction != 4) {
ieee80211_auth_challenge(wk, mgmt, len);
/* need another frame */
return WORK_ACT_NONE;
}
break;
default:
WARN_ON(1);
return WORK_ACT_NONE;
}
printk(KERN_DEBUG "%s: authenticated\n", wk->sdata->name);
return WORK_ACT_DONE;
}
static enum work_action __must_check
ieee80211_rx_mgmt_assoc_resp(struct ieee80211_work *wk,
struct ieee80211_mgmt *mgmt, size_t len,
bool reassoc)
{
struct ieee80211_sub_if_data *sdata = wk->sdata;
struct ieee80211_local *local = sdata->local;
u16 capab_info, status_code, aid;
struct ieee802_11_elems elems;
u8 *pos;
if (wk->type != IEEE80211_WORK_ASSOC)
return WORK_ACT_MISMATCH;
/*
* AssocResp and ReassocResp have identical structure, so process both
* of them in this function.
*/
if (len < 24 + 6)
return WORK_ACT_NONE;
capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
"status=%d aid=%d)\n",
sdata->name, reassoc ? "Rea" : "A", mgmt->sa,
capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
pos = mgmt->u.assoc_resp.variable;
ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
elems.timeout_int && elems.timeout_int_len == 5 &&
elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
u32 tu, ms;
tu = get_unaligned_le32(elems.timeout_int + 1);
ms = tu * 1024 / 1000;
printk(KERN_DEBUG "%s: %pM rejected association temporarily; "
"comeback duration %u TU (%u ms)\n",
sdata->name, mgmt->sa, tu, ms);
wk->timeout = jiffies + msecs_to_jiffies(ms);
if (ms > IEEE80211_ASSOC_TIMEOUT)
run_again(local, wk->timeout);
return WORK_ACT_NONE;
}
if (status_code != WLAN_STATUS_SUCCESS)
printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n",
sdata->name, mgmt->sa, status_code);
else
printk(KERN_DEBUG "%s: associated\n", sdata->name);
return WORK_ACT_DONE;
}
static enum work_action __must_check
ieee80211_rx_mgmt_probe_resp(struct ieee80211_work *wk,
struct ieee80211_mgmt *mgmt, size_t len,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_sub_if_data *sdata = wk->sdata;
struct ieee80211_local *local = sdata->local;
size_t baselen;
ASSERT_WORK_MTX(local);
if (wk->type != IEEE80211_WORK_DIRECT_PROBE)
return WORK_ACT_MISMATCH;
if (len < 24 + 12)
return WORK_ACT_NONE;
baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
if (baselen > len)
return WORK_ACT_NONE;
printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
return WORK_ACT_DONE;
}
static enum work_action __must_check
ieee80211_rx_mgmt_beacon(struct ieee80211_work *wk,
struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee80211_sub_if_data *sdata = wk->sdata;
struct ieee80211_local *local = sdata->local;
ASSERT_WORK_MTX(local);
if (wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
return WORK_ACT_MISMATCH;
if (len < 24 + 12)
return WORK_ACT_NONE;
printk(KERN_DEBUG "%s: beacon received\n", sdata->name);
return WORK_ACT_DONE;
}
static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
struct sk_buff *skb)
{
struct ieee80211_rx_status *rx_status;
struct ieee80211_mgmt *mgmt;
struct ieee80211_work *wk;
enum work_action rma = WORK_ACT_NONE;
u16 fc;
rx_status = (struct ieee80211_rx_status *) skb->cb;
mgmt = (struct ieee80211_mgmt *) skb->data;
fc = le16_to_cpu(mgmt->frame_control);
mutex_lock(&local->work_mtx);
list_for_each_entry(wk, &local->work_list, list) {
const u8 *bssid = NULL;
switch (wk->type) {
case IEEE80211_WORK_DIRECT_PROBE:
case IEEE80211_WORK_AUTH:
case IEEE80211_WORK_ASSOC:
case IEEE80211_WORK_ASSOC_BEACON_WAIT:
bssid = wk->filter_ta;
break;
default:
continue;
}
/*
* Before queuing, we already verified mgmt->sa,
* so this is needed just for matching.
*/
if (compare_ether_addr(bssid, mgmt->bssid))
continue;
switch (fc & IEEE80211_FCTL_STYPE) {
case IEEE80211_STYPE_BEACON:
rma = ieee80211_rx_mgmt_beacon(wk, mgmt, skb->len);
break;
case IEEE80211_STYPE_PROBE_RESP:
rma = ieee80211_rx_mgmt_probe_resp(wk, mgmt, skb->len,
rx_status);
break;
case IEEE80211_STYPE_AUTH:
rma = ieee80211_rx_mgmt_auth(wk, mgmt, skb->len);
break;
case IEEE80211_STYPE_ASSOC_RESP:
rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
skb->len, false);
break;
case IEEE80211_STYPE_REASSOC_RESP:
rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
skb->len, true);
break;
default:
WARN_ON(1);
rma = WORK_ACT_NONE;
}
/*
* We've either received an unexpected frame, or we have
* multiple work items and need to match the frame to the
* right one.
*/
if (rma == WORK_ACT_MISMATCH)
continue;
/*
* We've processed this frame for that work, so it can't
* belong to another work struct.
* NB: this is also required for correctness for 'rma'!
*/
break;
}
switch (rma) {
case WORK_ACT_MISMATCH:
/* ignore this unmatched frame */
break;
case WORK_ACT_NONE:
break;
case WORK_ACT_DONE:
list_del_rcu(&wk->list);
break;
default:
WARN(1, "unexpected: %d", rma);
}
mutex_unlock(&local->work_mtx);
if (rma != WORK_ACT_DONE)
goto out;
switch (wk->done(wk, skb)) {
case WORK_DONE_DESTROY:
free_work(wk);
break;
case WORK_DONE_REQUEUE:
synchronize_rcu();
wk->started = false; /* restart */
mutex_lock(&local->work_mtx);
list_add_tail(&wk->list, &local->work_list);
mutex_unlock(&local->work_mtx);
}
out:
kfree_skb(skb);
}
static void ieee80211_work_timer(unsigned long data)
{
struct ieee80211_local *local = (void *) data;
if (local->quiescing)
return;
ieee80211_queue_work(&local->hw, &local->work_work);
}
static void ieee80211_work_work(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, work_work);
struct sk_buff *skb;
struct ieee80211_work *wk, *tmp;
LIST_HEAD(free_work);
enum work_action rma;
bool remain_off_channel = false;
if (local->scanning)
return;
/*
* ieee80211_queue_work() should have picked up most cases,
* here we'll pick the rest.
*/
if (WARN(local->suspended, "work scheduled while going to suspend\n"))
return;
/* first process frames to avoid timing out while a frame is pending */
while ((skb = skb_dequeue(&local->work_skb_queue)))
ieee80211_work_rx_queued_mgmt(local, skb);
ieee80211_recalc_idle(local);
mutex_lock(&local->work_mtx);
list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
bool started = wk->started;
/* mark work as started if it's on the current off-channel */
if (!started && local->tmp_channel &&
wk->chan == local->tmp_channel &&
wk->chan_type == local->tmp_channel_type) {
started = true;
wk->timeout = jiffies;
}
if (!started && !local->tmp_channel) {
/*
* TODO: could optimize this by leaving the
* station vifs in awake mode if they
* happen to be on the same channel as
* the requested channel
*/
ieee80211_offchannel_stop_beaconing(local);
ieee80211_offchannel_stop_station(local);
local->tmp_channel = wk->chan;
local->tmp_channel_type = wk->chan_type;
ieee80211_hw_config(local, 0);
started = true;
wk->timeout = jiffies;
}
/* don't try to work with items that aren't started */
if (!started)
continue;
if (time_is_after_jiffies(wk->timeout)) {
/*
* This work item isn't supposed to be worked on
* right now, but take care to adjust the timer
* properly.
*/
run_again(local, wk->timeout);
continue;
}
switch (wk->type) {
default:
WARN_ON(1);
/* nothing */
rma = WORK_ACT_NONE;
break;
case IEEE80211_WORK_ABORT:
rma = WORK_ACT_TIMEOUT;
break;
case IEEE80211_WORK_DIRECT_PROBE:
rma = ieee80211_direct_probe(wk);
break;
case IEEE80211_WORK_AUTH:
rma = ieee80211_authenticate(wk);
break;
case IEEE80211_WORK_ASSOC:
rma = ieee80211_associate(wk);
break;
case IEEE80211_WORK_REMAIN_ON_CHANNEL:
rma = ieee80211_remain_on_channel_timeout(wk);
break;
case IEEE80211_WORK_ASSOC_BEACON_WAIT:
rma = ieee80211_assoc_beacon_wait(wk);
break;
}
wk->started = started;
switch (rma) {
case WORK_ACT_NONE:
/* might have changed the timeout */
run_again(local, wk->timeout);
break;
case WORK_ACT_TIMEOUT:
list_del_rcu(&wk->list);
synchronize_rcu();
list_add(&wk->list, &free_work);
break;
default:
WARN(1, "unexpected: %d", rma);
}
}
list_for_each_entry(wk, &local->work_list, list) {
if (!wk->started)
continue;
if (wk->chan != local->tmp_channel)
continue;
if (wk->chan_type != local->tmp_channel_type)
continue;
remain_off_channel = true;
}
if (!remain_off_channel && local->tmp_channel) {
local->tmp_channel = NULL;
ieee80211_hw_config(local, 0);
ieee80211_offchannel_return(local, true);
/* give connection some time to breathe */
run_again(local, jiffies + HZ/2);
}
mutex_lock(&local->scan_mtx);
if (list_empty(&local->work_list) && local->scan_req &&
!local->scanning)
ieee80211_queue_delayed_work(&local->hw,
&local->scan_work,
round_jiffies_relative(0));
mutex_unlock(&local->scan_mtx);
mutex_unlock(&local->work_mtx);
ieee80211_recalc_idle(local);
list_for_each_entry_safe(wk, tmp, &free_work, list) {
wk->done(wk, NULL);
list_del(&wk->list);
kfree(wk);
}
}
void ieee80211_add_work(struct ieee80211_work *wk)
{
struct ieee80211_local *local;
if (WARN_ON(!wk->chan))
return;
if (WARN_ON(!wk->sdata))
return;
if (WARN_ON(!wk->done))
return;
if (WARN_ON(!ieee80211_sdata_running(wk->sdata)))
return;
wk->started = false;
local = wk->sdata->local;
mutex_lock(&local->work_mtx);
list_add_tail(&wk->list, &local->work_list);
mutex_unlock(&local->work_mtx);
ieee80211_queue_work(&local->hw, &local->work_work);
}
void ieee80211_work_init(struct ieee80211_local *local)
{
mutex_init(&local->work_mtx);
INIT_LIST_HEAD(&local->work_list);
setup_timer(&local->work_timer, ieee80211_work_timer,
(unsigned long)local);
INIT_WORK(&local->work_work, ieee80211_work_work);
skb_queue_head_init(&local->work_skb_queue);
}
void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_work *wk;
mutex_lock(&local->work_mtx);
list_for_each_entry(wk, &local->work_list, list) {
if (wk->sdata != sdata)
continue;
wk->type = IEEE80211_WORK_ABORT;
wk->started = true;
wk->timeout = jiffies;
}
mutex_unlock(&local->work_mtx);
/* run cleanups etc. */
ieee80211_work_work(&local->work_work);
mutex_lock(&local->work_mtx);
list_for_each_entry(wk, &local->work_list, list) {
if (wk->sdata != sdata)
continue;
WARN_ON(1);
break;
}
mutex_unlock(&local->work_mtx);
}
ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_mgmt *mgmt;
struct ieee80211_work *wk;
u16 fc;
if (skb->len < 24)
return RX_DROP_MONITOR;
mgmt = (struct ieee80211_mgmt *) skb->data;
fc = le16_to_cpu(mgmt->frame_control);
list_for_each_entry_rcu(wk, &local->work_list, list) {
if (sdata != wk->sdata)
continue;
if (compare_ether_addr(wk->filter_ta, mgmt->sa))
continue;
if (compare_ether_addr(wk->filter_ta, mgmt->bssid))
continue;
switch (fc & IEEE80211_FCTL_STYPE) {
case IEEE80211_STYPE_AUTH:
case IEEE80211_STYPE_PROBE_RESP:
case IEEE80211_STYPE_ASSOC_RESP:
case IEEE80211_STYPE_REASSOC_RESP:
case IEEE80211_STYPE_BEACON:
skb_queue_tail(&local->work_skb_queue, skb);
ieee80211_queue_work(&local->hw, &local->work_work);
return RX_QUEUED;
}
}
return RX_CONTINUE;
}
static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk,
struct sk_buff *skb)
{
/*
* We are done serving the remain-on-channel command.
*/
cfg80211_remain_on_channel_expired(wk->sdata->dev, (unsigned long) wk,
wk->chan, wk->chan_type,
GFP_KERNEL);
return WORK_DONE_DESTROY;
}
int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
unsigned int duration, u64 *cookie)
{
struct ieee80211_work *wk;
wk = kzalloc(sizeof(*wk), GFP_KERNEL);
if (!wk)
return -ENOMEM;
wk->type = IEEE80211_WORK_REMAIN_ON_CHANNEL;
wk->chan = chan;
wk->chan_type = channel_type;
wk->sdata = sdata;
wk->done = ieee80211_remain_done;
wk->remain.duration = duration;
*cookie = (unsigned long) wk;
ieee80211_add_work(wk);
return 0;
}
int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata,
u64 cookie)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_work *wk, *tmp;
bool found = false;
mutex_lock(&local->work_mtx);
list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
if ((unsigned long) wk == cookie) {
wk->timeout = jiffies;
found = true;
break;
}
}
mutex_unlock(&local->work_mtx);
if (!found)
return -ENOENT;
ieee80211_queue_work(&local->hw, &local->work_work);
return 0;
}
| gpl-2.0 |
tluyou/openwrtbb-tluyou | target/linux/ar71xx/files/drivers/mtd/wrt160nl_part.c | 139 | 5644 | /*
* Copyright (C) 2009 Christian Daniel <cd@maintech.de>
* Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* TRX flash partition table.
* Based on ar7 map by Felix Fietkau <nbd@openwrt.org>
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
struct cybertan_header {
char magic[4];
u8 res1[4];
char fw_date[3];
char fw_ver[3];
char id[4];
char hw_ver;
char unused;
u8 flags[2];
u8 res2[10];
};
#define TRX_PARTS 6
#define TRX_MAGIC 0x30524448
#define TRX_MAX_OFFSET 3
struct trx_header {
uint32_t magic; /* "HDR0" */
uint32_t len; /* Length of file including header */
uint32_t crc32; /* 32-bit CRC from flag_version to end of file */
uint32_t flag_version; /* 0:15 flags, 16:31 version */
uint32_t offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of header */
};
#define IH_MAGIC 0x27051956 /* Image Magic Number */
#define IH_NMLEN 32 /* Image Name Length */
struct uimage_header {
uint32_t ih_magic; /* Image Header Magic Number */
uint32_t ih_hcrc; /* Image Header CRC Checksum */
uint32_t ih_time; /* Image Creation Timestamp */
uint32_t ih_size; /* Image Data Size */
uint32_t ih_load; /* Data» Load Address */
uint32_t ih_ep; /* Entry Point Address */
uint32_t ih_dcrc; /* Image Data CRC Checksum */
uint8_t ih_os; /* Operating System */
uint8_t ih_arch; /* CPU architecture */
uint8_t ih_type; /* Image Type */
uint8_t ih_comp; /* Compression Type */
uint8_t ih_name[IH_NMLEN]; /* Image Name */
};
struct wrt160nl_header {
struct cybertan_header cybertan;
struct trx_header trx;
struct uimage_header uimage;
} __attribute__ ((packed));
#define WRT160NL_UBOOT_LEN 0x40000
#define WRT160NL_ART_LEN 0x10000
#define WRT160NL_NVRAM_LEN 0x10000
static int wrt160nl_parse_partitions(struct mtd_info *master,
struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
struct wrt160nl_header *header;
struct trx_header *theader;
struct uimage_header *uheader;
struct mtd_partition *trx_parts;
size_t retlen;
unsigned int kernel_len;
unsigned int uboot_len;
unsigned int nvram_len;
unsigned int art_len;
int ret;
uboot_len = max_t(unsigned int, master->erasesize, WRT160NL_UBOOT_LEN);
nvram_len = max_t(unsigned int, master->erasesize, WRT160NL_NVRAM_LEN);
art_len = max_t(unsigned int, master->erasesize, WRT160NL_ART_LEN);
trx_parts = kzalloc(TRX_PARTS * sizeof(struct mtd_partition),
GFP_KERNEL);
if (!trx_parts) {
ret = -ENOMEM;
goto out;
}
header = vmalloc(sizeof(*header));
if (!header) {
return -ENOMEM;
goto free_parts;
}
ret = mtd_read(master, uboot_len, sizeof(*header),
&retlen, (void *) header);
if (ret)
goto free_hdr;
if (retlen != sizeof(*header)) {
ret = -EIO;
goto free_hdr;
}
if (strncmp(header->cybertan.magic, "NL16", 4) != 0) {
printk(KERN_NOTICE "%s: no WRT160NL signature found\n",
master->name);
goto free_hdr;
}
theader = &header->trx;
if (le32_to_cpu(theader->magic) != TRX_MAGIC) {
printk(KERN_NOTICE "%s: no TRX header found\n", master->name);
goto free_hdr;
}
uheader = &header->uimage;
if (uheader->ih_magic != IH_MAGIC) {
printk(KERN_NOTICE "%s: no uImage found\n", master->name);
goto free_hdr;
}
kernel_len = le32_to_cpu(theader->offsets[1]) +
sizeof(struct cybertan_header);
trx_parts[0].name = "u-boot";
trx_parts[0].offset = 0;
trx_parts[0].size = uboot_len;
trx_parts[0].mask_flags = MTD_WRITEABLE;
trx_parts[1].name = "kernel";
trx_parts[1].offset = trx_parts[0].offset + trx_parts[0].size;
trx_parts[1].size = kernel_len;
trx_parts[1].mask_flags = 0;
trx_parts[2].name = "rootfs";
trx_parts[2].offset = trx_parts[1].offset + trx_parts[1].size;
trx_parts[2].size = master->size - uboot_len - nvram_len - art_len -
trx_parts[1].size;
trx_parts[2].mask_flags = 0;
trx_parts[3].name = "nvram";
trx_parts[3].offset = master->size - nvram_len - art_len;
trx_parts[3].size = nvram_len;
trx_parts[3].mask_flags = MTD_WRITEABLE;
trx_parts[4].name = "art";
trx_parts[4].offset = master->size - art_len;
trx_parts[4].size = art_len;
trx_parts[4].mask_flags = MTD_WRITEABLE;
trx_parts[5].name = "firmware";
trx_parts[5].offset = uboot_len;
trx_parts[5].size = master->size - uboot_len - nvram_len - art_len;
trx_parts[5].mask_flags = 0;
vfree(header);
*pparts = trx_parts;
return TRX_PARTS;
free_hdr:
vfree(header);
free_parts:
kfree(trx_parts);
out:
return ret;
}
static struct mtd_part_parser wrt160nl_parser = {
.owner = THIS_MODULE,
.parse_fn = wrt160nl_parse_partitions,
.name = "wrt160nl",
};
static int __init wrt160nl_parser_init(void)
{
register_mtd_parser(&wrt160nl_parser);
return 0;
}
module_init(wrt160nl_parser_init);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Daniel <cd@maintech.de>");
| gpl-2.0 |
eugene373/Nexus_S_ICS | drivers/net/wireless/bcmdhd/dhd_common.c | 395 | 59086 | /*
* Broadcom Dongle Host Driver (DHD), common DHD core.
*
* Copyright (C) 1999-2011, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* $Id: dhd_common.c 290546 2011-10-19 01:55:21Z $
*/
#include <typedefs.h>
#include <osl.h>
#include <epivers.h>
#include <bcmutils.h>
#include <bcmendian.h>
#include <dngl_stats.h>
#include <wlioctl.h>
#include <dhd.h>
#include <proto/bcmevent.h>
#include <dhd_bus.h>
#include <dhd_proto.h>
#include <dhd_dbg.h>
#include <msgtrace.h>
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
#endif
#include <proto/bt_amp_hci.h>
#include <dhd_bta.h>
#ifdef SET_RANDOM_MAC_SOFTAP
#include <linux/random.h>
#include <linux/jiffies.h>
#endif
#ifdef PROP_TXSTATUS
#include <wlfc_proto.h>
#include <dhd_wlfc.h>
#endif
#ifdef WLMEDIA_HTSF
extern void htsf_update(struct dhd_info *dhd, void *data);
#endif
int dhd_msg_level = DHD_ERROR_VAL;
#include <wl_iw.h>
char fw_path[MOD_PARAM_PATHLEN];
char nv_path[MOD_PARAM_PATHLEN];
#ifdef SOFTAP
char fw_path2[MOD_PARAM_PATHLEN];
extern bool softap_enabled;
#endif
/* Last connection success/failure status */
uint32 dhd_conn_event;
uint32 dhd_conn_status;
uint32 dhd_conn_reason;
#define htod32(i) i
#define htod16(i) i
#define dtoh32(i) i
#define dtoh16(i) i
extern int dhd_iscan_request(void * dhdp, uint16 action);
extern void dhd_ind_scan_confirm(void *h, bool status);
extern int dhd_iscan_in_progress(void *h);
void dhd_iscan_lock(void);
void dhd_iscan_unlock(void);
extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
bool ap_cfg_running = FALSE;
bool ap_fw_loaded = FALSE;
#ifdef DHD_DEBUG
const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR "\nCompiled on "
__DATE__ " at " __TIME__;
#else
const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR;
#endif
void dhd_set_timer(void *bus, uint wdtick);
/* IOVar table */
enum {
IOV_VERSION = 1,
IOV_MSGLEVEL,
IOV_BCMERRORSTR,
IOV_BCMERROR,
IOV_WDTICK,
IOV_DUMP,
IOV_CLEARCOUNTS,
IOV_LOGDUMP,
IOV_LOGCAL,
IOV_LOGSTAMP,
IOV_GPIOOB,
IOV_IOCTLTIMEOUT,
IOV_HCI_CMD, /* HCI command */
IOV_HCI_ACL_DATA, /* HCI data packet */
#if defined(DHD_DEBUG)
IOV_CONS,
IOV_DCONSOLE_POLL,
#endif /* defined(DHD_DEBUG) */
#ifdef PROP_TXSTATUS
IOV_PROPTXSTATUS_ENABLE,
IOV_PROPTXSTATUS_MODE,
#endif
IOV_BUS_TYPE,
#ifdef WLMEDIA_HTSF
IOV_WLPKTDLYSTAT_SZ,
#endif
IOV_CHANGEMTU,
IOV_LAST
};
const bcm_iovar_t dhd_iovars[] = {
{"version", IOV_VERSION, 0, IOVT_BUFFER, sizeof(dhd_version) },
#ifdef DHD_DEBUG
{"msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
#endif /* DHD_DEBUG */
{"bcmerrorstr", IOV_BCMERRORSTR, 0, IOVT_BUFFER, BCME_STRLEN },
{"bcmerror", IOV_BCMERROR, 0, IOVT_INT8, 0 },
{"wdtick", IOV_WDTICK, 0, IOVT_UINT32, 0 },
{"dump", IOV_DUMP, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN },
#ifdef DHD_DEBUG
{"cons", IOV_CONS, 0, IOVT_BUFFER, 0 },
{"dconpoll", IOV_DCONSOLE_POLL, 0, IOVT_UINT32, 0 },
#endif
{"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID, 0 },
{"gpioob", IOV_GPIOOB, 0, IOVT_UINT32, 0 },
{"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, IOVT_UINT32, 0 },
{"HCI_cmd", IOV_HCI_CMD, 0, IOVT_BUFFER, 0},
{"HCI_ACL_data", IOV_HCI_ACL_DATA, 0, IOVT_BUFFER, 0},
#ifdef PROP_TXSTATUS
{"proptx", IOV_PROPTXSTATUS_ENABLE, 0, IOVT_UINT32, 0 },
/*
set the proptxtstatus operation mode:
0 - Do not do any proptxtstatus flow control
1 - Use implied credit from a packet status
2 - Use explicit credit
*/
{"ptxmode", IOV_PROPTXSTATUS_MODE, 0, IOVT_UINT32, 0 },
#endif
{"bustype", IOV_BUS_TYPE, 0, IOVT_UINT32, 0},
#ifdef WLMEDIA_HTSF
{"pktdlystatsz", IOV_WLPKTDLYSTAT_SZ, 0, IOVT_UINT8, 0 },
#endif
{"changemtu", IOV_CHANGEMTU, 0, IOVT_UINT32, 0 },
{NULL, 0, 0, 0, 0 }
};
struct dhd_cmn *
dhd_common_init(osl_t *osh)
{
dhd_cmn_t *cmn;
/* Init global variables at run-time, not as part of the declaration.
* This is required to support init/de-init of the driver. Initialization
* of globals as part of the declaration results in non-deterministic
* behavior since the value of the globals may be different on the
* first time that the driver is initialized vs subsequent initializations.
*/
/* Allocate private bus interface state */
if (!(cmn = MALLOC(osh, sizeof(dhd_cmn_t)))) {
DHD_ERROR(("%s: MALLOC failed\n", __FUNCTION__));
return NULL;
}
memset(cmn, 0, sizeof(dhd_cmn_t));
cmn->osh = osh;
#ifdef CONFIG_BCMDHD_FW_PATH
bcm_strncpy_s(fw_path, sizeof(fw_path), CONFIG_BCMDHD_FW_PATH, MOD_PARAM_PATHLEN-1);
#else /* CONFIG_BCMDHD_FW_PATH */
fw_path[0] = '\0';
#endif /* CONFIG_BCMDHD_FW_PATH */
#ifdef CONFIG_BCMDHD_NVRAM_PATH
bcm_strncpy_s(nv_path, sizeof(nv_path), CONFIG_BCMDHD_NVRAM_PATH, MOD_PARAM_PATHLEN-1);
#else /* CONFIG_BCMDHD_NVRAM_PATH */
nv_path[0] = '\0';
#endif /* CONFIG_BCMDHD_NVRAM_PATH */
#ifdef SOFTAP
fw_path2[0] = '\0';
#endif
return cmn;
}
void
dhd_common_deinit(dhd_pub_t *dhd_pub, dhd_cmn_t *sa_cmn)
{
osl_t *osh;
dhd_cmn_t *cmn;
if (dhd_pub != NULL)
cmn = dhd_pub->cmn;
else
cmn = sa_cmn;
if (!cmn)
return;
osh = cmn->osh;
if (dhd_pub != NULL)
dhd_pub->cmn = NULL;
MFREE(osh, cmn, sizeof(dhd_cmn_t));
}
static int
dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
{
char eabuf[ETHER_ADDR_STR_LEN];
struct bcmstrbuf b;
struct bcmstrbuf *strbuf = &b;
bcm_binit(strbuf, buf, buflen);
/* Base DHD info */
bcm_bprintf(strbuf, "%s\n", dhd_version);
bcm_bprintf(strbuf, "\n");
bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
dhdp->up, dhdp->txoff, dhdp->busstate);
bcm_bprintf(strbuf, "pub.hdrlen %d pub.maxctl %d pub.rxsz %d\n",
dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %s\n",
dhdp->iswl, dhdp->drv_version, bcm_ether_ntoa(&dhdp->mac, eabuf));
bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %d\n", dhdp->bcmerror, dhdp->tickcnt);
bcm_bprintf(strbuf, "dongle stats:\n");
bcm_bprintf(strbuf, "tx_packets %ld tx_bytes %ld tx_errors %ld tx_dropped %ld\n",
dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
bcm_bprintf(strbuf, "rx_packets %ld rx_bytes %ld rx_errors %ld rx_dropped %ld\n",
dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
bcm_bprintf(strbuf, "multicast %ld\n", dhdp->dstats.multicast);
bcm_bprintf(strbuf, "bus stats:\n");
bcm_bprintf(strbuf, "tx_packets %ld tx_multicast %ld tx_errors %ld\n",
dhdp->tx_packets, dhdp->tx_multicast, dhdp->tx_errors);
bcm_bprintf(strbuf, "tx_ctlpkts %ld tx_ctlerrs %ld\n",
dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
bcm_bprintf(strbuf, "rx_packets %ld rx_multicast %ld rx_errors %ld \n",
dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
bcm_bprintf(strbuf, "rx_ctlpkts %ld rx_ctlerrs %ld rx_dropped %ld\n",
dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
bcm_bprintf(strbuf, "rx_readahead_cnt %ld tx_realloc %ld\n",
dhdp->rx_readahead_cnt, dhdp->tx_realloc);
bcm_bprintf(strbuf, "\n");
/* Add any prot info */
dhd_prot_dump(dhdp, strbuf);
bcm_bprintf(strbuf, "\n");
/* Add any bus info */
dhd_bus_dump(dhdp, strbuf);
return (!strbuf->size ? BCME_BUFTOOSHORT : 0);
}
int
dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifindex)
{
wl_ioctl_t ioc;
ioc.cmd = cmd;
ioc.buf = arg;
ioc.len = len;
ioc.set = set;
return dhd_wl_ioctl(dhd_pub, ifindex, &ioc, arg, len);
}
int
dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len)
{
int ret;
dhd_os_proto_block(dhd_pub);
ret = dhd_prot_ioctl(dhd_pub, ifindex, ioc, buf, len);
if (!ret)
dhd_os_check_hang(dhd_pub, ifindex, ret);
dhd_os_proto_unblock(dhd_pub);
return ret;
}
static int
dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
void *params, int plen, void *arg, int len, int val_size)
{
int bcmerror = 0;
int32 int_val = 0;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
goto exit;
if (plen >= (int)sizeof(int_val))
bcopy(params, &int_val, sizeof(int_val));
switch (actionid) {
case IOV_GVAL(IOV_VERSION):
/* Need to have checked buffer length */
bcm_strncpy_s((char*)arg, len, dhd_version, len);
break;
case IOV_GVAL(IOV_MSGLEVEL):
int_val = (int32)dhd_msg_level;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_MSGLEVEL):
dhd_msg_level = int_val;
break;
case IOV_GVAL(IOV_BCMERRORSTR):
bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
((char *)arg)[BCME_STRLEN - 1] = 0x00;
break;
case IOV_GVAL(IOV_BCMERROR):
int_val = (int32)dhd_pub->bcmerror;
bcopy(&int_val, arg, val_size);
break;
case IOV_GVAL(IOV_WDTICK):
int_val = (int32)dhd_watchdog_ms;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_WDTICK):
if (!dhd_pub->up) {
bcmerror = BCME_NOTUP;
break;
}
dhd_os_wd_timer(dhd_pub, (uint)int_val);
break;
case IOV_GVAL(IOV_DUMP):
bcmerror = dhd_dump(dhd_pub, arg, len);
break;
#ifdef DHD_DEBUG
case IOV_GVAL(IOV_DCONSOLE_POLL):
int_val = (int32)dhd_console_ms;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_DCONSOLE_POLL):
dhd_console_ms = (uint)int_val;
break;
case IOV_SVAL(IOV_CONS):
if (len > 0)
bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
break;
#endif /* DHD_DEBUG */
case IOV_SVAL(IOV_CLEARCOUNTS):
dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
dhd_pub->rx_dropped = 0;
dhd_pub->rx_readahead_cnt = 0;
dhd_pub->tx_realloc = 0;
dhd_pub->wd_dpc_sched = 0;
memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
dhd_bus_clearcounts(dhd_pub);
#ifdef PROP_TXSTATUS
/* clear proptxstatus related counters */
if (dhd_pub->wlfc_state) {
athost_wl_status_info_t *wlfc =
(athost_wl_status_info_t*)dhd_pub->wlfc_state;
wlfc_hanger_t* hanger;
memset(&wlfc->stats, 0, sizeof(athost_wl_stat_counters_t));
hanger = (wlfc_hanger_t*)wlfc->hanger;
hanger->pushed = 0;
hanger->popped = 0;
hanger->failed_slotfind = 0;
hanger->failed_to_pop = 0;
hanger->failed_to_push = 0;
}
#endif /* PROP_TXSTATUS */
break;
case IOV_GVAL(IOV_IOCTLTIMEOUT): {
int_val = (int32)dhd_os_get_ioctl_resp_timeout();
bcopy(&int_val, arg, sizeof(int_val));
break;
}
case IOV_SVAL(IOV_IOCTLTIMEOUT): {
if (int_val <= 0)
bcmerror = BCME_BADARG;
else
dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
break;
}
case IOV_SVAL(IOV_HCI_CMD): {
amp_hci_cmd_t *cmd = (amp_hci_cmd_t *)arg;
/* sanity check: command preamble present */
if (len < HCI_CMD_PREAMBLE_SIZE)
return BCME_BUFTOOSHORT;
/* sanity check: command parameters are present */
if (len < (int)(HCI_CMD_PREAMBLE_SIZE + cmd->plen))
return BCME_BUFTOOSHORT;
dhd_bta_docmd(dhd_pub, cmd, len);
break;
}
case IOV_SVAL(IOV_HCI_ACL_DATA): {
amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)arg;
/* sanity check: HCI header present */
if (len < HCI_ACL_DATA_PREAMBLE_SIZE)
return BCME_BUFTOOSHORT;
/* sanity check: ACL data is present */
if (len < (int)(HCI_ACL_DATA_PREAMBLE_SIZE + ACL_data->dlen))
return BCME_BUFTOOSHORT;
dhd_bta_tx_hcidata(dhd_pub, ACL_data, len);
break;
}
#ifdef PROP_TXSTATUS
case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE):
int_val = dhd_pub->wlfc_enabled? 1 : 0;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE):
dhd_pub->wlfc_enabled = int_val? 1 : 0;
break;
case IOV_GVAL(IOV_PROPTXSTATUS_MODE): {
athost_wl_status_info_t *wlfc =
(athost_wl_status_info_t*)dhd_pub->wlfc_state;
int_val = dhd_pub->wlfc_state ? (int32)wlfc->proptxstatus_mode : 0;
bcopy(&int_val, arg, val_size);
break;
}
case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
if (dhd_pub->wlfc_state) {
athost_wl_status_info_t *wlfc =
(athost_wl_status_info_t*)dhd_pub->wlfc_state;
wlfc->proptxstatus_mode = int_val & 0xff;
}
break;
#endif /* PROP_TXSTATUS */
case IOV_GVAL(IOV_BUS_TYPE):
/* The dhd application queries the driver to check if its usb or sdio. */
#ifdef BCMDHDUSB
int_val = BUS_TYPE_USB;
#endif
int_val = BUS_TYPE_SDIO;
bcopy(&int_val, arg, val_size);
break;
#ifdef WLMEDIA_HTSF
case IOV_GVAL(IOV_WLPKTDLYSTAT_SZ):
int_val = dhd_pub->htsfdlystat_sz;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_WLPKTDLYSTAT_SZ):
dhd_pub->htsfdlystat_sz = int_val & 0xff;
printf("Setting tsfdlystat_sz:%d\n", dhd_pub->htsfdlystat_sz);
break;
#endif
case IOV_SVAL(IOV_CHANGEMTU):
int_val &= 0xffff;
bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
break;
default:
bcmerror = BCME_UNSUPPORTED;
break;
}
exit:
DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
return bcmerror;
}
/* Store the status of a connection attempt for later retrieval by an iovar */
void
dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
{
/* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
* because an encryption/rsn mismatch results in both events, and
* the important information is in the WLC_E_PRUNE.
*/
if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
dhd_conn_event == WLC_E_PRUNE)) {
dhd_conn_event = event;
dhd_conn_status = status;
dhd_conn_reason = reason;
}
}
bool
dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
{
void *p;
int eprec = -1; /* precedence to evict from */
bool discard_oldest;
/* Fast case, precedence queue is not full and we are also not
* exceeding total queue length
*/
if (!pktq_pfull(q, prec) && !pktq_full(q)) {
pktq_penq(q, prec, pkt);
return TRUE;
}
/* Determine precedence from which to evict packet, if any */
if (pktq_pfull(q, prec))
eprec = prec;
else if (pktq_full(q)) {
p = pktq_peek_tail(q, &eprec);
ASSERT(p);
if (eprec > prec || eprec < 0)
return FALSE;
}
/* Evict if needed */
if (eprec >= 0) {
/* Detect queueing to unconfigured precedence */
ASSERT(!pktq_pempty(q, eprec));
discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
if (eprec == prec && !discard_oldest)
return FALSE; /* refuse newer (incoming) packet */
/* Evict packet according to discard policy */
p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
ASSERT(p);
PKTFREE(dhdp->osh, p, TRUE);
}
/* Enqueue */
p = pktq_penq(q, prec, pkt);
ASSERT(p);
return TRUE;
}
static int
dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
void *params, int plen, void *arg, int len, bool set)
{
int bcmerror = 0;
int val_size;
const bcm_iovar_t *vi = NULL;
uint32 actionid;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
ASSERT(name);
ASSERT(len >= 0);
/* Get MUST have return space */
ASSERT(set || (arg && len));
/* Set does NOT take qualifiers */
ASSERT(!set || (!params && !plen));
if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
bcmerror = BCME_UNSUPPORTED;
goto exit;
}
DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
name, (set ? "set" : "get"), len, plen));
/* set up 'params' pointer in case this is a set command so that
* the convenience int and bool code can be common to set and get
*/
if (params == NULL) {
params = arg;
plen = len;
}
if (vi->type == IOVT_VOID)
val_size = 0;
else if (vi->type == IOVT_BUFFER)
val_size = len;
else
/* all other types are integer sized */
val_size = sizeof(int);
actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
exit:
return bcmerror;
}
int
dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void * buf, uint buflen)
{
int bcmerror = 0;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (!buf) {
return BCME_BADARG;
}
switch (ioc->cmd) {
case DHD_GET_MAGIC:
if (buflen < sizeof(int))
bcmerror = BCME_BUFTOOSHORT;
else
*(int*)buf = DHD_IOCTL_MAGIC;
break;
case DHD_GET_VERSION:
if (buflen < sizeof(int))
bcmerror = -BCME_BUFTOOSHORT;
else
*(int*)buf = DHD_IOCTL_VERSION;
break;
case DHD_GET_VAR:
case DHD_SET_VAR: {
char *arg;
uint arglen;
/* scan past the name to any arguments */
for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
;
if (*arg) {
bcmerror = BCME_BUFTOOSHORT;
break;
}
/* account for the NUL terminator */
arg++, arglen--;
/* call with the appropriate arguments */
if (ioc->cmd == DHD_GET_VAR)
bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
buf, buflen, IOV_GET);
else
bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0, arg, arglen, IOV_SET);
if (bcmerror != BCME_UNSUPPORTED)
break;
/* not in generic table, try protocol module */
if (ioc->cmd == DHD_GET_VAR)
bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
arglen, buf, buflen, IOV_GET);
else
bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
NULL, 0, arg, arglen, IOV_SET);
if (bcmerror != BCME_UNSUPPORTED)
break;
/* if still not found, try bus module */
if (ioc->cmd == DHD_GET_VAR) {
bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
arg, arglen, buf, buflen, IOV_GET);
} else {
bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
NULL, 0, arg, arglen, IOV_SET);
}
break;
}
default:
bcmerror = BCME_UNSUPPORTED;
}
return bcmerror;
}
#ifdef SHOW_EVENTS
static void
wl_show_host_event(wl_event_msg_t *event, void *event_data)
{
uint i, status, reason;
bool group = FALSE, flush_txq = FALSE, link = FALSE;
const char *auth_str;
const char *event_name;
uchar *buf;
char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
uint event_type, flags, auth_type, datalen;
event_type = ntoh32(event->event_type);
flags = ntoh16(event->flags);
status = ntoh32(event->status);
reason = ntoh32(event->reason);
auth_type = ntoh32(event->auth_type);
datalen = ntoh32(event->datalen);
/* debug dump of event messages */
sprintf(eabuf, "%02x:%02x:%02x:%02x:%02x:%02x",
(uchar)event->addr.octet[0]&0xff,
(uchar)event->addr.octet[1]&0xff,
(uchar)event->addr.octet[2]&0xff,
(uchar)event->addr.octet[3]&0xff,
(uchar)event->addr.octet[4]&0xff,
(uchar)event->addr.octet[5]&0xff);
event_name = "UNKNOWN";
for (i = 0; i < (uint)bcmevent_names_size; i++)
if (bcmevent_names[i].event == event_type)
event_name = bcmevent_names[i].name;
if (flags & WLC_EVENT_MSG_LINK)
link = TRUE;
if (flags & WLC_EVENT_MSG_GROUP)
group = TRUE;
if (flags & WLC_EVENT_MSG_FLUSHTXQ)
flush_txq = TRUE;
switch (event_type) {
case WLC_E_START:
case WLC_E_DEAUTH:
case WLC_E_DISASSOC:
DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
break;
case WLC_E_ASSOC_IND:
case WLC_E_REASSOC_IND:
DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
break;
case WLC_E_ASSOC:
case WLC_E_REASSOC:
if (status == WLC_E_STATUS_SUCCESS) {
DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
} else if (status == WLC_E_STATUS_TIMEOUT) {
DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
} else if (status == WLC_E_STATUS_FAIL) {
DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
event_name, eabuf, (int)reason));
} else {
DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
event_name, eabuf, (int)status));
}
break;
case WLC_E_DEAUTH_IND:
case WLC_E_DISASSOC_IND:
DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
break;
case WLC_E_AUTH:
case WLC_E_AUTH_IND:
if (auth_type == DOT11_OPEN_SYSTEM)
auth_str = "Open System";
else if (auth_type == DOT11_SHARED_KEY)
auth_str = "Shared Key";
else {
sprintf(err_msg, "AUTH unknown: %d", (int)auth_type);
auth_str = err_msg;
}
if (event_type == WLC_E_AUTH_IND) {
DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
} else if (status == WLC_E_STATUS_SUCCESS) {
DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
event_name, eabuf, auth_str));
} else if (status == WLC_E_STATUS_TIMEOUT) {
DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
event_name, eabuf, auth_str));
} else if (status == WLC_E_STATUS_FAIL) {
DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
event_name, eabuf, auth_str, (int)reason));
}
break;
case WLC_E_JOIN:
case WLC_E_ROAM:
case WLC_E_SET_SSID:
if (status == WLC_E_STATUS_SUCCESS) {
DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
} else if (status == WLC_E_STATUS_FAIL) {
DHD_EVENT(("MACEVENT: %s, failed\n", event_name));
} else if (status == WLC_E_STATUS_NO_NETWORKS) {
DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
} else {
DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
event_name, (int)status));
}
break;
case WLC_E_BEACON_RX:
if (status == WLC_E_STATUS_SUCCESS) {
DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
} else if (status == WLC_E_STATUS_FAIL) {
DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
} else {
DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
}
break;
case WLC_E_LINK:
DHD_EVENT(("MACEVENT: %s %s\n", event_name, link?"UP":"DOWN"));
break;
case WLC_E_MIC_ERROR:
DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
event_name, eabuf, group, flush_txq));
break;
case WLC_E_ICV_ERROR:
case WLC_E_UNICAST_DECODE_ERROR:
case WLC_E_MULTICAST_DECODE_ERROR:
DHD_EVENT(("MACEVENT: %s, MAC %s\n",
event_name, eabuf));
break;
case WLC_E_TXFAIL:
DHD_EVENT(("MACEVENT: %s, RA %s\n", event_name, eabuf));
break;
case WLC_E_SCAN_COMPLETE:
case WLC_E_PMKID_CACHE:
DHD_EVENT(("MACEVENT: %s\n", event_name));
break;
case WLC_E_PFN_NET_FOUND:
case WLC_E_PFN_NET_LOST:
case WLC_E_PFN_SCAN_COMPLETE:
case WLC_E_PFN_SCAN_NONE:
case WLC_E_PFN_SCAN_ALLGONE:
DHD_EVENT(("PNOEVENT: %s\n", event_name));
break;
case WLC_E_PSK_SUP:
case WLC_E_PRUNE:
DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
event_name, (int)status, (int)reason));
break;
#ifdef WIFI_ACT_FRAME
case WLC_E_ACTION_FRAME:
DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf));
break;
#endif /* WIFI_ACT_FRAME */
case WLC_E_TRACE: {
static uint32 seqnum_prev = 0;
msgtrace_hdr_t hdr;
uint32 nblost;
char *s, *p;
buf = (uchar *) event_data;
memcpy(&hdr, buf, MSGTRACE_HDRLEN);
if (hdr.version != MSGTRACE_VERSION) {
printf("\nMACEVENT: %s [unsupported version --> "
"dhd version:%d dongle version:%d]\n",
event_name, MSGTRACE_VERSION, hdr.version);
/* Reset datalen to avoid display below */
datalen = 0;
break;
}
/* There are 2 bytes available at the end of data */
buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0';
if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) {
printf("\nWLC_E_TRACE: [Discarded traces in dongle -->"
"discarded_bytes %d discarded_printf %d]\n",
ntoh32(hdr.discarded_bytes), ntoh32(hdr.discarded_printf));
}
nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1;
if (nblost > 0) {
printf("\nWLC_E_TRACE: [Event lost --> seqnum %d nblost %d\n",
ntoh32(hdr.seqnum), nblost);
}
seqnum_prev = ntoh32(hdr.seqnum);
/* Display the trace buffer. Advance from \n to \n to avoid display big
* printf (issue with Linux printk )
*/
p = (char *)&buf[MSGTRACE_HDRLEN];
while ((s = strstr(p, "\n")) != NULL) {
*s = '\0';
printf("%s\n", p);
p = s+1;
}
printf("%s\n", p);
/* Reset datalen to avoid display below */
datalen = 0;
break;
}
case WLC_E_RSSI:
DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
break;
default:
DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
event_name, event_type, eabuf, (int)status, (int)reason,
(int)auth_type));
break;
}
/* show any appended data */
if (datalen) {
buf = (uchar *) event_data;
DHD_EVENT((" data (%d) : ", datalen));
for (i = 0; i < datalen; i++)
DHD_EVENT((" 0x%02x ", *buf++));
DHD_EVENT(("\n"));
}
}
#endif /* SHOW_EVENTS */
int
wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
wl_event_msg_t *event, void **data_ptr)
{
/* check whether packet is a BRCM event pkt */
bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
uint8 *event_data;
uint32 type, status, reason, datalen;
uint16 flags;
int evlen;
if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__));
return (BCME_ERROR);
}
/* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */
if (ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype) != BCMILCP_BCM_SUBTYPE_EVENT) {
DHD_ERROR(("%s: mismatched subtype, bailing\n", __FUNCTION__));
return (BCME_ERROR);
}
*data_ptr = &pvt_data[1];
event_data = *data_ptr;
/* memcpy since BRCM event pkt may be unaligned. */
memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t));
type = ntoh32_ua((void *)&event->event_type);
flags = ntoh16_ua((void *)&event->flags);
status = ntoh32_ua((void *)&event->status);
reason = ntoh32_ua((void *)&event->reason);
datalen = ntoh32_ua((void *)&event->datalen);
evlen = datalen + sizeof(bcm_event_t);
switch (type) {
#ifdef PROP_TXSTATUS
case WLC_E_FIFO_CREDIT_MAP:
dhd_wlfc_event(dhd_pub->info);
dhd_wlfc_FIFOcreditmap_event(dhd_pub->info, event_data);
WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
"(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
event_data[2],
event_data[3], event_data[4], event_data[5]));
break;
#endif
case WLC_E_IF:
{
dhd_if_event_t *ifevent = (dhd_if_event_t *)event_data;
#ifdef PROP_TXSTATUS
{
uint8* ea = pvt_data->eth.ether_dhost;
WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, "
"[%02x:%02x:%02x:%02x:%02x:%02x]\n",
ifevent->ifidx,
((ifevent->action == WLC_E_IF_ADD) ? "ADD":"DEL"),
((ifevent->is_AP == 0) ? "STA":"AP "),
ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]));
(void)ea;
dhd_wlfc_interface_event(dhd_pub->info,
((ifevent->action == WLC_E_IF_ADD) ?
eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
ifevent->ifidx, ifevent->is_AP, ea);
/* dhd already has created an interface by default, for 0 */
if (ifevent->ifidx == 0)
break;
}
#endif /* PROP_TXSTATUS */
#ifdef WL_CFG80211
if (wl_cfg80211_is_progress_ifchange()) {
DHD_ERROR(("%s: ifidx %d for %s action %d\n",
__FUNCTION__, ifevent->ifidx,
event->ifname, ifevent->action));
if (ifevent->action == WLC_E_IF_ADD)
wl_cfg80211_notify_ifchange();
return (BCME_OK);
}
#endif /* WL_CFG80211 */
if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
if (ifevent->action == WLC_E_IF_ADD) {
if (dhd_add_if(dhd_pub->info, ifevent->ifidx,
NULL, event->ifname,
event->addr.octet,
ifevent->flags, ifevent->bssidx)) {
DHD_ERROR(("%s: dhd_add_if failed!!"
" ifidx: %d for %s\n",
__FUNCTION__,
ifevent->ifidx,
event->ifname));
return (BCME_ERROR);
}
}
else
dhd_del_if(dhd_pub->info, ifevent->ifidx);
} else {
#ifndef PROP_TXSTATUS
DHD_ERROR(("%s: Invalid ifidx %d for %s\n",
__FUNCTION__, ifevent->ifidx, event->ifname));
#endif /* !PROP_TXSTATUS */
}
}
/* send up the if event: btamp user needs it */
*ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
/* push up to external supp/auth */
dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
break;
#ifdef WLMEDIA_HTSF
case WLC_E_HTSFSYNC:
htsf_update(dhd_pub->info, event_data);
break;
#endif /* WLMEDIA_HTSF */
case WLC_E_NDIS_LINK: {
uint32 temp = hton32(WLC_E_LINK);
memcpy((void *)(&pvt_data->event.event_type), &temp,
sizeof(pvt_data->event.event_type));
}
/* These are what external supplicant/authenticator wants */
/* fall through */
case WLC_E_LINK:
case WLC_E_DEAUTH:
case WLC_E_DEAUTH_IND:
case WLC_E_DISASSOC:
case WLC_E_DISASSOC_IND:
DHD_EVENT(("%s: Link event %d, flags %x, status %x\n",
__FUNCTION__, type, flags, status));
/* fall through */
default:
*ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
/* push up to external supp/auth */
dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
__FUNCTION__, type, flags, status));
/* put it back to WLC_E_NDIS_LINK */
if (type == WLC_E_NDIS_LINK) {
uint32 temp;
temp = ntoh32_ua((void *)&event->event_type);
DHD_TRACE(("Converted to WLC_E_LINK type %d\n", temp));
temp = ntoh32(WLC_E_NDIS_LINK);
memcpy((void *)(&pvt_data->event.event_type), &temp,
sizeof(pvt_data->event.event_type));
}
break;
}
#ifdef SHOW_EVENTS
wl_show_host_event(event, (void *)event_data);
#endif /* SHOW_EVENTS */
return (BCME_OK);
}
void
wl_event_to_host_order(wl_event_msg_t * evt)
{
/* Event struct members passed from dongle to host are stored in network
* byte order. Convert all members to host-order.
*/
evt->event_type = ntoh32(evt->event_type);
evt->flags = ntoh16(evt->flags);
evt->status = ntoh32(evt->status);
evt->reason = ntoh32(evt->reason);
evt->auth_type = ntoh32(evt->auth_type);
evt->datalen = ntoh32(evt->datalen);
evt->version = ntoh16(evt->version);
}
void
dhd_print_buf(void *pbuf, int len, int bytes_per_line)
{
#ifdef DHD_DEBUG
int i, j = 0;
unsigned char *buf = pbuf;
if (bytes_per_line == 0) {
bytes_per_line = len;
}
for (i = 0; i < len; i++) {
printf("%2.2x", *buf++);
j++;
if (j == bytes_per_line) {
printf("\n");
j = 0;
} else {
printf(":");
}
}
printf("\n");
#endif /* DHD_DEBUG */
}
#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
/* Convert user's input in hex pattern to byte-size mask */
static int
wl_pattern_atoh(char *src, char *dst)
{
int i;
if (strncmp(src, "0x", 2) != 0 &&
strncmp(src, "0X", 2) != 0) {
DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
return -1;
}
src = src + 2; /* Skip past 0x */
if (strlen(src) % 2 != 0) {
DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
return -1;
}
for (i = 0; *src != '\0'; i++) {
char num[3];
bcm_strncpy_s(num, sizeof(num), src, 2);
num[2] = '\0';
dst[i] = (uint8)strtoul(num, NULL, 16);
src += 2;
}
return i;
}
void
dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
{
char *argv[8];
int i = 0;
const char *str;
int buf_len;
int str_len;
char *arg_save = 0, *arg_org = 0;
int rc;
char buf[128];
wl_pkt_filter_enable_t enable_parm;
wl_pkt_filter_enable_t * pkt_filterp;
if (!arg)
return;
if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
goto fail;
}
arg_org = arg_save;
memcpy(arg_save, arg, strlen(arg) + 1);
argv[i] = bcmstrtok(&arg_save, " ", 0);
i = 0;
if (argv[i] == NULL) {
DHD_ERROR(("No args provided\n"));
goto fail;
}
str = "pkt_filter_enable";
str_len = strlen(str);
bcm_strncpy_s(buf, sizeof(buf), str, str_len);
buf[str_len] = '\0';
buf_len = str_len + 1;
pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
/* Parse packet filter id. */
enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
/* Parse enable/disable value. */
enable_parm.enable = htod32(enable);
buf_len += sizeof(enable_parm);
memcpy((char *)pkt_filterp,
&enable_parm,
sizeof(enable_parm));
/* Enable/disable the specified filter. */
rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
rc = rc >= 0 ? 0 : rc;
if (rc)
DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
__FUNCTION__, arg, rc));
else
DHD_TRACE(("%s: successfully added pktfilter %s\n",
__FUNCTION__, arg));
/* Contorl the master mode */
bcm_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf, sizeof(buf));
rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
rc = rc >= 0 ? 0 : rc;
if (rc)
DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
__FUNCTION__, arg, rc));
fail:
if (arg_org)
MFREE(dhd->osh, arg_org, strlen(arg) + 1);
}
void
dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
{
const char *str;
wl_pkt_filter_t pkt_filter;
wl_pkt_filter_t *pkt_filterp;
int buf_len;
int str_len;
int rc;
uint32 mask_size;
uint32 pattern_size;
char *argv[8], * buf = 0;
int i = 0;
char *arg_save = 0, *arg_org = 0;
#define BUF_SIZE 2048
if (!arg)
return;
if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
goto fail;
}
arg_org = arg_save;
if (!(buf = MALLOC(dhd->osh, BUF_SIZE))) {
DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
goto fail;
}
memcpy(arg_save, arg, strlen(arg) + 1);
if (strlen(arg) > BUF_SIZE) {
DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
goto fail;
}
argv[i] = bcmstrtok(&arg_save, " ", 0);
while (argv[i++])
argv[i] = bcmstrtok(&arg_save, " ", 0);
i = 0;
if (argv[i] == NULL) {
DHD_ERROR(("No args provided\n"));
goto fail;
}
str = "pkt_filter_add";
str_len = strlen(str);
bcm_strncpy_s(buf, BUF_SIZE, str, str_len);
buf[ str_len ] = '\0';
buf_len = str_len + 1;
pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
/* Parse packet filter id. */
pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
if (argv[++i] == NULL) {
DHD_ERROR(("Polarity not provided\n"));
goto fail;
}
/* Parse filter polarity. */
pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
if (argv[++i] == NULL) {
DHD_ERROR(("Filter type not provided\n"));
goto fail;
}
/* Parse filter type. */
pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
if (argv[++i] == NULL) {
DHD_ERROR(("Offset not provided\n"));
goto fail;
}
/* Parse pattern filter offset. */
pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
if (argv[++i] == NULL) {
DHD_ERROR(("Bitmask not provided\n"));
goto fail;
}
/* Parse pattern filter mask. */
mask_size =
htod32(wl_pattern_atoh(argv[i], (char *) pkt_filterp->u.pattern.mask_and_pattern));
if (argv[++i] == NULL) {
DHD_ERROR(("Pattern not provided\n"));
goto fail;
}
/* Parse pattern filter pattern. */
pattern_size =
htod32(wl_pattern_atoh(argv[i],
(char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]));
if (mask_size != pattern_size) {
DHD_ERROR(("Mask and pattern not the same size\n"));
goto fail;
}
pkt_filter.u.pattern.size_bytes = mask_size;
buf_len += WL_PKT_FILTER_FIXED_LEN;
buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
/* Keep-alive attributes are set in local variable (keep_alive_pkt), and
** then memcpy'ed into buffer (keep_alive_pktp) since there is no
** guarantee that the buffer is properly aligned.
*/
memcpy((char *)pkt_filterp,
&pkt_filter,
WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
rc = rc >= 0 ? 0 : rc;
if (rc)
DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
__FUNCTION__, arg, rc));
else
DHD_TRACE(("%s: successfully added pktfilter %s\n",
__FUNCTION__, arg));
fail:
if (arg_org)
MFREE(dhd->osh, arg_org, strlen(arg) + 1);
if (buf)
MFREE(dhd->osh, buf, BUF_SIZE);
}
/* ========================== */
/* ==== ARP OFFLOAD SUPPORT = */
/* ========================== */
#ifdef ARP_OFFLOAD_SUPPORT
void
dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
{
char iovbuf[32];
int retcode;
bcm_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
retcode = retcode >= 0 ? 0 : retcode;
if (retcode)
DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
__FUNCTION__, arp_mode, retcode));
else
DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
__FUNCTION__, arp_mode));
}
void
dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
{
char iovbuf[32];
int retcode;
bcm_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf));
retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
retcode = retcode >= 0 ? 0 : retcode;
if (retcode)
DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
__FUNCTION__, arp_enable, retcode));
else
DHD_TRACE(("%s: successfully enabed ARP offload to %d\n",
__FUNCTION__, arp_enable));
}
void
dhd_aoe_arp_clr(dhd_pub_t *dhd)
{
int ret = 0;
int iov_len = 0;
char iovbuf[128];
if (dhd == NULL) return;
iov_len = bcm_mkiovar("arp_table_clear", 0, 0, iovbuf, sizeof(iovbuf));
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0) < 0))
DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
}
void
dhd_aoe_hostip_clr(dhd_pub_t *dhd)
{
int ret = 0;
int iov_len = 0;
char iovbuf[128];
if (dhd == NULL) return;
iov_len = bcm_mkiovar("arp_hostip_clear", 0, 0, iovbuf, sizeof(iovbuf));
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0)) < 0)
DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
}
void
dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr)
{
int iov_len = 0;
char iovbuf[32];
int retcode;
iov_len = bcm_mkiovar("arp_hostip", (char *)&ipaddr, 4, iovbuf, sizeof(iovbuf));
retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
if (retcode)
DHD_TRACE(("%s: ARP ip addr add failed, retcode = %d\n",
__FUNCTION__, retcode));
else
DHD_TRACE(("%s: sARP H ipaddr entry added \n",
__FUNCTION__));
}
int
dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen)
{
int retcode, i;
int iov_len = 0;
uint32 *ptr32 = buf;
bool clr_bottom = FALSE;
if (!buf)
return -1;
iov_len = bcm_mkiovar("arp_hostip", 0, 0, buf, buflen);
retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, buflen, TRUE, 0);
if (retcode) {
DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n",
__FUNCTION__, retcode));
return -1;
}
/* clean up the buf, ascii reminder */
for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
if (!clr_bottom) {
if (*ptr32 == 0)
clr_bottom = TRUE;
} else {
*ptr32 = 0;
}
ptr32++;
}
return 0;
}
#endif /* ARP_OFFLOAD_SUPPORT */
/* send up locally generated event */
void
dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
{
switch (ntoh32(event->event_type)) {
case WLC_E_BTA_HCI_EVENT:
break;
default:
break;
}
/* Call per-port handler. */
dhd_sendup_event(dhdp, event, data);
}
#ifdef SIMPLE_ISCAN
uint iscan_thread_id = 0;
iscan_buf_t * iscan_chain = 0;
iscan_buf_t *
dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf)
{
iscan_buf_t *iscanbuf_alloc = 0;
iscan_buf_t *iscanbuf_head;
DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
dhd_iscan_lock();
iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t));
if (iscanbuf_alloc == NULL)
goto fail;
iscanbuf_alloc->next = NULL;
iscanbuf_head = *iscanbuf;
DHD_ISCAN(("%s: addr of allocated node = 0x%X"
"addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
__FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd));
if (iscanbuf_head == NULL) {
*iscanbuf = iscanbuf_alloc;
DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__));
goto fail;
}
while (iscanbuf_head->next)
iscanbuf_head = iscanbuf_head->next;
iscanbuf_head->next = iscanbuf_alloc;
fail:
dhd_iscan_unlock();
return iscanbuf_alloc;
}
void
dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
{
iscan_buf_t *iscanbuf_free = 0;
iscan_buf_t *iscanbuf_prv = 0;
iscan_buf_t *iscanbuf_cur;
dhd_pub_t *dhd = dhd_bus_pub(dhdp);
DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
dhd_iscan_lock();
iscanbuf_cur = iscan_chain;
/* If iscan_delete is null then delete the entire
* chain or else delete specific one provided
*/
if (!iscan_delete) {
while (iscanbuf_cur) {
iscanbuf_free = iscanbuf_cur;
iscanbuf_cur = iscanbuf_cur->next;
iscanbuf_free->next = 0;
MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t));
}
iscan_chain = 0;
} else {
while (iscanbuf_cur) {
if (iscanbuf_cur == iscan_delete)
break;
iscanbuf_prv = iscanbuf_cur;
iscanbuf_cur = iscanbuf_cur->next;
}
if (iscanbuf_prv)
iscanbuf_prv->next = iscan_delete->next;
iscan_delete->next = 0;
MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t));
if (!iscanbuf_prv)
iscan_chain = 0;
}
dhd_iscan_unlock();
}
iscan_buf_t *
dhd_iscan_result_buf(void)
{
return iscan_chain;
}
int
dhd_iscan_issue_request(void * dhdp, wl_iscan_params_t *pParams, uint32 size)
{
int rc = -1;
dhd_pub_t *dhd = dhd_bus_pub(dhdp);
char *buf;
char iovar[] = "iscan";
uint32 allocSize = 0;
wl_ioctl_t ioctl;
if (pParams) {
allocSize = (size + strlen(iovar) + 1);
if ((allocSize < size) || (allocSize < strlen(iovar)))
{
DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n",
__FUNCTION__, allocSize, size, strlen(iovar)));
goto cleanUp;
}
buf = MALLOC(dhd->osh, allocSize);
if (buf == NULL)
{
DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__, allocSize));
goto cleanUp;
}
ioctl.cmd = WLC_SET_VAR;
bcm_mkiovar(iovar, (char *)pParams, size, buf, allocSize);
rc = dhd_wl_ioctl(dhd, 0, &ioctl, buf, allocSize);
}
cleanUp:
if (buf) {
MFREE(dhd->osh, buf, allocSize);
}
return rc;
}
static int
dhd_iscan_get_partial_result(void *dhdp, uint *scan_count)
{
wl_iscan_results_t *list_buf;
wl_iscan_results_t list;
wl_scan_results_t *results;
iscan_buf_t *iscan_cur;
int status = -1;
dhd_pub_t *dhd = dhd_bus_pub(dhdp);
int rc;
wl_ioctl_t ioctl;
DHD_ISCAN(("%s: Enter\n", __FUNCTION__));
iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain);
if (!iscan_cur) {
DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__));
dhd_iscan_free_buf(dhdp, 0);
dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
dhd_ind_scan_confirm(dhdp, FALSE);
goto fail;
}
dhd_iscan_lock();
memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf;
results = &list_buf->results;
results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
results->version = 0;
results->count = 0;
memset(&list, 0, sizeof(list));
list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE,
iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
ioctl.cmd = WLC_GET_VAR;
ioctl.set = FALSE;
rc = dhd_wl_ioctl(dhd, 0, &ioctl, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
results->buflen = dtoh32(results->buflen);
results->version = dtoh32(results->version);
*scan_count = results->count = dtoh32(results->count);
status = dtoh32(list_buf->status);
DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__, results->count, status));
dhd_iscan_unlock();
if (!(*scan_count)) {
/* TODO: race condition when FLUSH already called */
dhd_iscan_free_buf(dhdp, 0);
}
fail:
return status;
}
#endif /* SIMPLE_ISCAN */
/*
* returns = TRUE if associated, FALSE if not associated
*/
bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf)
{
char bssid[6], zbuf[6];
int ret = -1;
bzero(bssid, 6);
bzero(zbuf, 6);
ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid, ETHER_ADDR_LEN, FALSE, 0);
DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret));
if (ret == BCME_NOTASSOCIATED) {
DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__, ret));
}
if (ret < 0)
return FALSE;
if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) != 0)) {
/* STA is assocoated BSSID is non zero */
if (bss_buf) {
/* return bss if caller provided buf */
memcpy(bss_buf, bssid, ETHER_ADDR_LEN);
}
return TRUE;
} else {
DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__));
return FALSE;
}
}
/* Function to estimate possible DTIM_SKIP value */
int
dhd_get_dtim_skip(dhd_pub_t *dhd)
{
int bcn_li_dtim;
int ret = -1;
int dtim_assoc = 0;
if ((dhd->dtim_skip == 0) || (dhd->dtim_skip == 1))
bcn_li_dtim = 3;
else
bcn_li_dtim = dhd->dtim_skip;
/* Check if associated */
if (dhd_is_associated(dhd, NULL) == FALSE) {
DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
goto exit;
}
/* if assoc grab ap's dtim value */
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
&dtim_assoc, sizeof(dtim_assoc), FALSE, 0)) < 0) {
DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
goto exit;
}
DHD_ERROR(("%s bcn_li_dtim=%d DTIM=%d Listen=%d\n",
__FUNCTION__, bcn_li_dtim, dtim_assoc, LISTEN_INTERVAL));
/* if not assocated just eixt */
if (dtim_assoc == 0) {
goto exit;
}
/* check if sta listen interval fits into AP dtim */
if (dtim_assoc > LISTEN_INTERVAL) {
/* AP DTIM to big for our Listen Interval : no dtim skiping */
bcn_li_dtim = 1;
DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
__FUNCTION__, dtim_assoc, LISTEN_INTERVAL));
goto exit;
}
if ((bcn_li_dtim * dtim_assoc) > LISTEN_INTERVAL) {
/* Round up dtim_skip to fit into STAs Listen Interval */
bcn_li_dtim = (int)(LISTEN_INTERVAL / dtim_assoc);
DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
}
exit:
return bcn_li_dtim;
}
/* Check if HostAPD or WFD mode setup */
bool dhd_check_ap_wfd_mode_set(dhd_pub_t *dhd)
{
#ifdef WL_CFG80211
if (((dhd->op_mode & HOSTAPD_MASK) == HOSTAPD_MASK) ||
((dhd->op_mode & WFD_MASK) == WFD_MASK))
return TRUE;
else
#endif /* WL_CFG80211 */
return FALSE;
}
#ifdef PNO_SUPPORT
int
dhd_pno_clean(dhd_pub_t *dhd)
{
char iovbuf[128];
int pfn_enabled = 0;
int iov_len = 0;
int ret;
/* Disable pfn */
iov_len = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf));
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) >= 0) {
/* clear pfn */
iov_len = bcm_mkiovar("pfnclear", 0, 0, iovbuf, sizeof(iovbuf));
if (iov_len) {
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
iov_len, TRUE, 0)) < 0) {
DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
}
}
else {
ret = -1;
DHD_ERROR(("%s failed code %d\n", __FUNCTION__, iov_len));
}
}
else
DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
return ret;
}
int
dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled)
{
char iovbuf[128];
int ret = -1;
if ((!dhd) && ((pfn_enabled != 0) || (pfn_enabled != 1))) {
DHD_ERROR(("%s error exit\n", __FUNCTION__));
return ret;
}
if (dhd_check_ap_wfd_mode_set(dhd) == TRUE)
return (ret);
memset(iovbuf, 0, sizeof(iovbuf));
if ((pfn_enabled) && (dhd_is_associated(dhd, NULL) == TRUE)) {
DHD_ERROR(("%s pno is NOT enable : called in assoc mode , ignore\n", __FUNCTION__));
return ret;
}
/* Enable/disable PNO */
if ((ret = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf))) > 0) {
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
DHD_ERROR(("%s failed for error=%d\n", __FUNCTION__, ret));
return ret;
}
else {
dhd->pno_enable = pfn_enabled;
DHD_TRACE(("%s set pno as %s\n",
__FUNCTION__, dhd->pno_enable ? "Enable" : "Disable"));
}
}
else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, ret));
return ret;
}
/* Function to execute combined scan */
int
dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr,
int pno_repeat, int pno_freq_expo_max)
{
int err = -1;
char iovbuf[128];
int k, i;
wl_pfn_param_t pfn_param;
wl_pfn_t pfn_element;
uint len = 0;
DHD_TRACE(("%s nssid=%d nchan=%d\n", __FUNCTION__, nssid, scan_fr));
if ((!dhd) && (!ssids_local)) {
DHD_ERROR(("%s error exit\n", __FUNCTION__));
err = -1;
}
if (dhd_check_ap_wfd_mode_set(dhd) == TRUE)
return (err);
/* Check for broadcast ssid */
for (k = 0; k < nssid; k++) {
if (!ssids_local[k].SSID_len) {
DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO setting\n", k));
return err;
}
}
/* #define PNO_DUMP 1 */
#ifdef PNO_DUMP
{
int j;
for (j = 0; j < nssid; j++) {
DHD_ERROR(("%d: scan for %s size =%d\n", j,
ssids_local[j].SSID, ssids_local[j].SSID_len));
}
}
#endif /* PNO_DUMP */
/* clean up everything */
if ((err = dhd_pno_clean(dhd)) < 0) {
DHD_ERROR(("%s failed error=%d\n", __FUNCTION__, err));
return err;
}
memset(iovbuf, 0, sizeof(iovbuf));
memset(&pfn_param, 0, sizeof(pfn_param));
memset(&pfn_element, 0, sizeof(pfn_element));
/* set pfn parameters */
pfn_param.version = htod32(PFN_VERSION);
pfn_param.flags = htod16((PFN_LIST_ORDER << SORT_CRITERIA_BIT));
/* check and set extra pno params */
if ((pno_repeat != 0) || (pno_freq_expo_max != 0)) {
pfn_param.flags |= htod16(ENABLE << ENABLE_ADAPTSCAN_BIT);
pfn_param.repeat = (uchar) (pno_repeat);
pfn_param.exp = (uchar) (pno_freq_expo_max);
}
/* set up pno scan fr */
if (scan_fr != 0)
pfn_param.scan_freq = htod32(scan_fr);
if (pfn_param.scan_freq > PNO_SCAN_MAX_FW_SEC) {
DHD_ERROR(("%s pno freq above %d sec\n", __FUNCTION__, PNO_SCAN_MAX_FW_SEC));
return err;
}
if (pfn_param.scan_freq < PNO_SCAN_MIN_FW_SEC) {
DHD_ERROR(("%s pno freq less %d sec\n", __FUNCTION__, PNO_SCAN_MIN_FW_SEC));
return err;
}
len = bcm_mkiovar("pfn_set", (char *)&pfn_param, sizeof(pfn_param), iovbuf, sizeof(iovbuf));
if ((err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0)) < 0) {
DHD_ERROR(("%s pfn_set failed for error=%d\n",
__FUNCTION__, err));
return err;
}
/* set all pfn ssid */
for (i = 0; i < nssid; i++) {
pfn_element.infra = htod32(DOT11_BSSTYPE_INFRASTRUCTURE);
pfn_element.auth = (DOT11_OPEN_SYSTEM);
pfn_element.wpa_auth = htod32(WPA_AUTH_PFN_ANY);
pfn_element.wsec = htod32(0);
pfn_element.infra = htod32(1);
pfn_element.flags = htod32(ENABLE << WL_PFN_HIDDEN_BIT);
memcpy((char *)pfn_element.ssid.SSID, ssids_local[i].SSID, ssids_local[i].SSID_len);
pfn_element.ssid.SSID_len = ssids_local[i].SSID_len;
if ((len =
bcm_mkiovar("pfn_add", (char *)&pfn_element,
sizeof(pfn_element), iovbuf, sizeof(iovbuf))) > 0) {
if ((err =
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0)) < 0) {
DHD_ERROR(("%s failed for i=%d error=%d\n",
__FUNCTION__, i, err));
return err;
}
else
DHD_TRACE(("%s set OK with PNO time=%d repeat=%d max_adjust=%d\n",
__FUNCTION__, pfn_param.scan_freq,
pfn_param.repeat, pfn_param.exp));
}
else DHD_ERROR(("%s failed err=%d\n", __FUNCTION__, err));
}
/* Enable PNO */
/* dhd_pno_enable(dhd, 1); */
return err;
}
int
dhd_pno_get_status(dhd_pub_t *dhd)
{
int ret = -1;
if (!dhd)
return ret;
else
return (dhd->pno_enable);
}
#endif /* PNO_SUPPORT */
#if defined(KEEP_ALIVE)
int dhd_keep_alive_onoff(dhd_pub_t *dhd)
{
char buf[256];
const char *str;
wl_mkeep_alive_pkt_t mkeep_alive_pkt;
wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
int buf_len;
int str_len;
int res = -1;
if (dhd_check_ap_wfd_mode_set(dhd) == TRUE)
return (res);
DHD_TRACE(("%s execution\n", __FUNCTION__));
str = "mkeep_alive";
str_len = strlen(str);
strncpy(buf, str, str_len);
buf[ str_len ] = '\0';
mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
mkeep_alive_pkt.period_msec = KEEP_ALIVE_PERIOD;
buf_len = str_len + 1;
mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
/* Setup keep alive zero for null packet generation */
mkeep_alive_pkt.keep_alive_id = 0;
mkeep_alive_pkt.len_bytes = 0;
buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
/* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
* then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
* guarantee that the buffer is properly aligned.
*/
memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
return res;
}
#endif /* defined(KEEP_ALIVE) */
/* Android ComboSCAN support */
/*
* data parsing from ComboScan tlv list
*/
int
wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
int input_size, int *bytes_left)
{
char* str = *list_str;
uint16 short_temp;
uint32 int_temp;
if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
DHD_ERROR(("%s error paramters\n", __FUNCTION__));
return -1;
}
/* Clean all dest bytes */
memset(dst, 0, dst_size);
while (*bytes_left > 0) {
if (str[0] != token) {
DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
__FUNCTION__, token, str[0], *bytes_left));
return -1;
}
*bytes_left -= 1;
str += 1;
if (input_size == 1) {
memcpy(dst, str, input_size);
}
else if (input_size == 2) {
memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
input_size);
}
else if (input_size == 4) {
memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
input_size);
}
*bytes_left -= input_size;
str += input_size;
*list_str = str;
return 1;
}
return 1;
}
/*
* channel list parsing from cscan tlv list
*/
int
wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
int channel_num, int *bytes_left)
{
char* str = *list_str;
int idx = 0;
if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
DHD_ERROR(("%s error paramters\n", __FUNCTION__));
return -1;
}
while (*bytes_left > 0) {
if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
*list_str = str;
DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
return idx;
}
/* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
*bytes_left -= 1;
str += 1;
if (str[0] == 0) {
/* All channels */
channel_list[idx] = 0x0;
}
else {
channel_list[idx] = (uint16)str[0];
DHD_TRACE(("%s channel=%d \n", __FUNCTION__, channel_list[idx]));
}
*bytes_left -= 1;
str += 1;
if (idx++ > 255) {
DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
return -1;
}
}
*list_str = str;
return idx;
}
/*
* SSIDs list parsing from cscan tlv list
*/
int
wl_iw_parse_ssid_list_tlv(char** list_str, wlc_ssid_t* ssid, int max, int *bytes_left)
{
char* str = *list_str;
int idx = 0;
if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
DHD_ERROR(("%s error paramters\n", __FUNCTION__));
return -1;
}
while (*bytes_left > 0) {
if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
*list_str = str;
DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
return idx;
}
/* Get proper CSCAN_TLV_TYPE_SSID_IE */
*bytes_left -= 1;
str += 1;
if (str[0] == 0) {
/* Broadcast SSID */
ssid[idx].SSID_len = 0;
memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
*bytes_left -= 1;
str += 1;
DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left));
}
else if (str[0] <= DOT11_MAX_SSID_LEN) {
/* Get proper SSID size */
ssid[idx].SSID_len = str[0];
*bytes_left -= 1;
str += 1;
/* Get SSID */
if (ssid[idx].SSID_len > *bytes_left) {
DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
__FUNCTION__, ssid[idx].SSID_len, *bytes_left));
return -1;
}
memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
*bytes_left -= ssid[idx].SSID_len;
str += ssid[idx].SSID_len;
DHD_TRACE(("%s :size=%d left=%d\n",
(char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
}
else {
DHD_ERROR(("### SSID size more that %d\n", str[0]));
return -1;
}
if (idx++ > max) {
DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx));
return -1;
}
}
*list_str = str;
return idx;
}
/* Parse a comma-separated list from list_str into ssid array, starting
* at index idx. Max specifies size of the ssid array. Parses ssids
* and returns updated idx; if idx >= max not all fit, the excess have
* not been copied. Returns -1 on empty string, or on ssid too long.
*/
int
wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
{
char* str, *ptr;
if ((list_str == NULL) || (*list_str == NULL))
return -1;
for (str = *list_str; str != NULL; str = ptr) {
/* check for next TAG */
if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
*list_str = str + strlen(GET_CHANNEL);
return idx;
}
if ((ptr = strchr(str, ',')) != NULL) {
*ptr++ = '\0';
}
if (strlen(str) > DOT11_MAX_SSID_LEN) {
DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
return -1;
}
if (strlen(str) == 0)
ssid[idx].SSID_len = 0;
if (idx < max) {
bcm_strcpy_s((char*)ssid[idx].SSID, sizeof(ssid[idx].SSID), str);
ssid[idx].SSID_len = strlen(str);
}
idx++;
}
return idx;
}
/*
* Parse channel list from iwpriv CSCAN
*/
int
wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
{
int num;
int val;
char* str;
char* endptr = NULL;
if ((list_str == NULL)||(*list_str == NULL))
return -1;
str = *list_str;
num = 0;
while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
val = (int)strtoul(str, &endptr, 0);
if (endptr == str) {
printf("could not parse channel number starting at"
" substring \"%s\" in list:\n%s\n",
str, *list_str);
return -1;
}
str = endptr + strspn(endptr, " ,");
if (num == channel_num) {
DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
channel_num, *list_str));
return -1;
}
channel_list[num++] = (uint16)val;
}
*list_str = str;
return num;
}
| gpl-2.0 |
GoinsWithTheWind/drm-prime-sync | drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 395 | 14565 | /*
* Copyright (c) 2010 Red Hat Inc.
* Author : Dave Airlie <airlied@redhat.com>
*
* Licensed under GPLv2
*
* ATPX support for both Intel/ATI
*/
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include "amdgpu_acpi.h"
struct amdgpu_atpx_functions {
bool px_params;
bool power_cntl;
bool disp_mux_cntl;
bool i2c_mux_cntl;
bool switch_start;
bool switch_end;
bool disp_connectors_mapping;
bool disp_detetion_ports;
};
struct amdgpu_atpx {
acpi_handle handle;
struct amdgpu_atpx_functions functions;
};
static struct amdgpu_atpx_priv {
bool atpx_detected;
/* handle for device - and atpx */
acpi_handle dhandle;
acpi_handle other_handle;
struct amdgpu_atpx atpx;
} amdgpu_atpx_priv;
struct atpx_verify_interface {
u16 size; /* structure size in bytes (includes size field) */
u16 version; /* version */
u32 function_bits; /* supported functions bit vector */
} __packed;
struct atpx_px_params {
u16 size; /* structure size in bytes (includes size field) */
u32 valid_flags; /* which flags are valid */
u32 flags; /* flags */
} __packed;
struct atpx_power_control {
u16 size;
u8 dgpu_state;
} __packed;
struct atpx_mux {
u16 size;
u16 mux;
} __packed;
bool amdgpu_has_atpx(void) {
return amdgpu_atpx_priv.atpx_detected;
}
/**
* amdgpu_atpx_call - call an ATPX method
*
* @handle: acpi handle
* @function: the ATPX function to execute
* @params: ATPX function params
*
* Executes the requested ATPX function (all asics).
* Returns a pointer to the acpi output buffer.
*/
static union acpi_object *amdgpu_atpx_call(acpi_handle handle, int function,
struct acpi_buffer *params)
{
acpi_status status;
union acpi_object atpx_arg_elements[2];
struct acpi_object_list atpx_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
atpx_arg.count = 2;
atpx_arg.pointer = &atpx_arg_elements[0];
atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
atpx_arg_elements[0].integer.value = function;
if (params) {
atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
atpx_arg_elements[1].buffer.length = params->length;
atpx_arg_elements[1].buffer.pointer = params->pointer;
} else {
/* We need a second fake parameter */
atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
atpx_arg_elements[1].integer.value = 0;
}
status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
/* Fail only if calling the method fails and ATPX is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
printk("failed to evaluate ATPX got %s\n",
acpi_format_exception(status));
kfree(buffer.pointer);
return NULL;
}
return buffer.pointer;
}
/**
* amdgpu_atpx_parse_functions - parse supported functions
*
* @f: supported functions struct
* @mask: supported functions mask from ATPX
*
* Use the supported functions mask from ATPX function
* ATPX_FUNCTION_VERIFY_INTERFACE to determine what functions
* are supported (all asics).
*/
static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mask)
{
f->px_params = mask & ATPX_GET_PX_PARAMETERS_SUPPORTED;
f->power_cntl = mask & ATPX_POWER_CONTROL_SUPPORTED;
f->disp_mux_cntl = mask & ATPX_DISPLAY_MUX_CONTROL_SUPPORTED;
f->i2c_mux_cntl = mask & ATPX_I2C_MUX_CONTROL_SUPPORTED;
f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
}
/**
* amdgpu_atpx_validate_functions - validate ATPX functions
*
* @atpx: amdgpu atpx struct
*
* Validate that required functions are enabled (all asics).
* returns 0 on success, error on failure.
*/
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
{
/* make sure required functions are enabled */
/* dGPU power control is required */
atpx->functions.power_cntl = true;
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
size_t size;
u32 valid_bits;
info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
if (!info)
return -EIO;
memset(&output, 0, sizeof(output));
size = *(u16 *) info->buffer.pointer;
if (size < 10) {
printk("ATPX buffer is too small: %zu\n", size);
kfree(info);
return -EINVAL;
}
size = min(sizeof(output), size);
memcpy(&output, info->buffer.pointer, size);
valid_bits = output.flags & output.valid_flags;
/* if separate mux flag is set, mux controls are required */
if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
atpx->functions.i2c_mux_cntl = true;
atpx->functions.disp_mux_cntl = true;
}
/* if any outputs are muxed, mux controls are required */
if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
ATPX_TV_SIGNAL_MUXED |
ATPX_DFP_SIGNAL_MUXED))
atpx->functions.disp_mux_cntl = true;
kfree(info);
}
return 0;
}
/**
* amdgpu_atpx_verify_interface - verify ATPX
*
* @atpx: amdgpu atpx struct
*
* Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function
* to initialize ATPX and determine what features are supported
* (all asics).
* returns 0 on success, error on failure.
*/
static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx)
{
union acpi_object *info;
struct atpx_verify_interface output;
size_t size;
int err = 0;
info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL);
if (!info)
return -EIO;
memset(&output, 0, sizeof(output));
size = *(u16 *) info->buffer.pointer;
if (size < 8) {
printk("ATPX buffer is too small: %zu\n", size);
err = -EINVAL;
goto out;
}
size = min(sizeof(output), size);
memcpy(&output, info->buffer.pointer, size);
/* TODO: check version? */
printk("ATPX version %u, functions 0x%08x\n",
output.version, output.function_bits);
amdgpu_atpx_parse_functions(&atpx->functions, output.function_bits);
out:
kfree(info);
return err;
}
/**
* amdgpu_atpx_set_discrete_state - power up/down discrete GPU
*
* @atpx: atpx info struct
* @state: discrete GPU state (0 = power down, 1 = power up)
*
* Execute the ATPX_FUNCTION_POWER_CONTROL ATPX function to
* power down/up the discrete GPU (all asics).
* Returns 0 on success, error on failure.
*/
static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state)
{
struct acpi_buffer params;
union acpi_object *info;
struct atpx_power_control input;
if (atpx->functions.power_cntl) {
input.size = 3;
input.dgpu_state = state;
params.length = input.size;
params.pointer = &input;
info = amdgpu_atpx_call(atpx->handle,
ATPX_FUNCTION_POWER_CONTROL,
¶ms);
if (!info)
return -EIO;
kfree(info);
}
return 0;
}
/**
* amdgpu_atpx_switch_disp_mux - switch display mux
*
* @atpx: atpx info struct
* @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
*
* Execute the ATPX_FUNCTION_DISPLAY_MUX_CONTROL ATPX function to
* switch the display mux between the discrete GPU and integrated GPU
* (all asics).
* Returns 0 on success, error on failure.
*/
static int amdgpu_atpx_switch_disp_mux(struct amdgpu_atpx *atpx, u16 mux_id)
{
struct acpi_buffer params;
union acpi_object *info;
struct atpx_mux input;
if (atpx->functions.disp_mux_cntl) {
input.size = 4;
input.mux = mux_id;
params.length = input.size;
params.pointer = &input;
info = amdgpu_atpx_call(atpx->handle,
ATPX_FUNCTION_DISPLAY_MUX_CONTROL,
¶ms);
if (!info)
return -EIO;
kfree(info);
}
return 0;
}
/**
* amdgpu_atpx_switch_i2c_mux - switch i2c/hpd mux
*
* @atpx: atpx info struct
* @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
*
* Execute the ATPX_FUNCTION_I2C_MUX_CONTROL ATPX function to
* switch the i2c/hpd mux between the discrete GPU and integrated GPU
* (all asics).
* Returns 0 on success, error on failure.
*/
static int amdgpu_atpx_switch_i2c_mux(struct amdgpu_atpx *atpx, u16 mux_id)
{
struct acpi_buffer params;
union acpi_object *info;
struct atpx_mux input;
if (atpx->functions.i2c_mux_cntl) {
input.size = 4;
input.mux = mux_id;
params.length = input.size;
params.pointer = &input;
info = amdgpu_atpx_call(atpx->handle,
ATPX_FUNCTION_I2C_MUX_CONTROL,
¶ms);
if (!info)
return -EIO;
kfree(info);
}
return 0;
}
/**
* amdgpu_atpx_switch_start - notify the sbios of a GPU switch
*
* @atpx: atpx info struct
* @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
*
* Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION ATPX
* function to notify the sbios that a switch between the discrete GPU and
* integrated GPU has begun (all asics).
* Returns 0 on success, error on failure.
*/
static int amdgpu_atpx_switch_start(struct amdgpu_atpx *atpx, u16 mux_id)
{
struct acpi_buffer params;
union acpi_object *info;
struct atpx_mux input;
if (atpx->functions.switch_start) {
input.size = 4;
input.mux = mux_id;
params.length = input.size;
params.pointer = &input;
info = amdgpu_atpx_call(atpx->handle,
ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION,
¶ms);
if (!info)
return -EIO;
kfree(info);
}
return 0;
}
/**
* amdgpu_atpx_switch_end - notify the sbios of a GPU switch
*
* @atpx: atpx info struct
* @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
*
* Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION ATPX
* function to notify the sbios that a switch between the discrete GPU and
* integrated GPU has ended (all asics).
* Returns 0 on success, error on failure.
*/
static int amdgpu_atpx_switch_end(struct amdgpu_atpx *atpx, u16 mux_id)
{
struct acpi_buffer params;
union acpi_object *info;
struct atpx_mux input;
if (atpx->functions.switch_end) {
input.size = 4;
input.mux = mux_id;
params.length = input.size;
params.pointer = &input;
info = amdgpu_atpx_call(atpx->handle,
ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION,
¶ms);
if (!info)
return -EIO;
kfree(info);
}
return 0;
}
/**
* amdgpu_atpx_switchto - switch to the requested GPU
*
* @id: GPU to switch to
*
* Execute the necessary ATPX functions to switch between the discrete GPU and
* integrated GPU (all asics).
* Returns 0 on success, error on failure.
*/
static int amdgpu_atpx_switchto(enum vga_switcheroo_client_id id)
{
u16 gpu_id;
if (id == VGA_SWITCHEROO_IGD)
gpu_id = ATPX_INTEGRATED_GPU;
else
gpu_id = ATPX_DISCRETE_GPU;
amdgpu_atpx_switch_start(&amdgpu_atpx_priv.atpx, gpu_id);
amdgpu_atpx_switch_disp_mux(&amdgpu_atpx_priv.atpx, gpu_id);
amdgpu_atpx_switch_i2c_mux(&amdgpu_atpx_priv.atpx, gpu_id);
amdgpu_atpx_switch_end(&amdgpu_atpx_priv.atpx, gpu_id);
return 0;
}
/**
* amdgpu_atpx_power_state - power down/up the requested GPU
*
* @id: GPU to power down/up
* @state: requested power state (0 = off, 1 = on)
*
* Execute the necessary ATPX function to power down/up the discrete GPU
* (all asics).
* Returns 0 on success, error on failure.
*/
static int amdgpu_atpx_power_state(enum vga_switcheroo_client_id id,
enum vga_switcheroo_state state)
{
/* on w500 ACPI can't change intel gpu state */
if (id == VGA_SWITCHEROO_IGD)
return 0;
amdgpu_atpx_set_discrete_state(&amdgpu_atpx_priv.atpx, state);
return 0;
}
/**
* amdgpu_atpx_pci_probe_handle - look up the ATPX handle
*
* @pdev: pci device
*
* Look up the ATPX handles (all asics).
* Returns true if the handles are found, false if not.
*/
static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev)
{
acpi_handle dhandle, atpx_handle;
acpi_status status;
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
if (ACPI_FAILURE(status)) {
amdgpu_atpx_priv.other_handle = dhandle;
return false;
}
amdgpu_atpx_priv.dhandle = dhandle;
amdgpu_atpx_priv.atpx.handle = atpx_handle;
return true;
}
/**
* amdgpu_atpx_init - verify the ATPX interface
*
* Verify the ATPX interface (all asics).
* Returns 0 on success, error on failure.
*/
static int amdgpu_atpx_init(void)
{
int r;
/* set up the ATPX handle */
r = amdgpu_atpx_verify_interface(&amdgpu_atpx_priv.atpx);
if (r)
return r;
/* validate the atpx setup */
r = amdgpu_atpx_validate(&amdgpu_atpx_priv.atpx);
if (r)
return r;
return 0;
}
/**
* amdgpu_atpx_get_client_id - get the client id
*
* @pdev: pci device
*
* look up whether we are the integrated or discrete GPU (all asics).
* Returns the client id.
*/
static int amdgpu_atpx_get_client_id(struct pci_dev *pdev)
{
if (amdgpu_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
return VGA_SWITCHEROO_IGD;
else
return VGA_SWITCHEROO_DIS;
}
static struct vga_switcheroo_handler amdgpu_atpx_handler = {
.switchto = amdgpu_atpx_switchto,
.power_state = amdgpu_atpx_power_state,
.init = amdgpu_atpx_init,
.get_client_id = amdgpu_atpx_get_client_id,
};
/**
* amdgpu_atpx_detect - detect whether we have PX
*
* Check if we have a PX system (all asics).
* Returns true if we have a PX system, false if not.
*/
static bool amdgpu_atpx_detect(void)
{
char acpi_method_name[255] = { 0 };
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
struct pci_dev *pdev = NULL;
bool has_atpx = false;
int vga_count = 0;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
}
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
vga_count++;
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
}
if (has_atpx && vga_count == 2) {
acpi_get_name(amdgpu_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
acpi_method_name);
amdgpu_atpx_priv.atpx_detected = true;
return true;
}
return false;
}
/**
* amdgpu_register_atpx_handler - register with vga_switcheroo
*
* Register the PX callbacks with vga_switcheroo (all asics).
*/
void amdgpu_register_atpx_handler(void)
{
bool r;
/* detect if we have any ATPX + 2 VGA in the system */
r = amdgpu_atpx_detect();
if (!r)
return;
vga_switcheroo_register_handler(&amdgpu_atpx_handler);
}
/**
* amdgpu_unregister_atpx_handler - unregister with vga_switcheroo
*
* Unregister the PX callbacks with vga_switcheroo (all asics).
*/
void amdgpu_unregister_atpx_handler(void)
{
vga_switcheroo_unregister_handler();
}
| gpl-2.0 |
doungni/linux | arch/x86/kernel/fpu/regset.c | 395 | 9045 | /*
* FPU register's regset abstraction, for ptrace, core dumps, etc.
*/
#include <asm/fpu/internal.h>
#include <asm/fpu/signal.h>
#include <asm/fpu/regset.h>
/*
* The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
* as the "regset->n" for the xstate regset will be updated based on the feature
* capabilites supported by the xsave.
*/
int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
{
struct fpu *target_fpu = &target->thread.fpu;
return target_fpu->fpstate_active ? regset->n : 0;
}
int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
{
struct fpu *target_fpu = &target->thread.fpu;
return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0;
}
int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
if (!cpu_has_fxsr)
return -ENODEV;
fpu__activate_fpstate_read(fpu);
fpstate_sanitize_xstate(fpu);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&fpu->state.fxsave, 0, -1);
}
int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
int ret;
if (!cpu_has_fxsr)
return -ENODEV;
fpu__activate_fpstate_write(fpu);
fpstate_sanitize_xstate(fpu);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&fpu->state.fxsave, 0, -1);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
/*
* update the header bits in the xsave header, indicating the
* presence of FP and SSE state.
*/
if (cpu_has_xsave)
fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE;
return ret;
}
int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
struct xregs_state *xsave;
int ret;
if (!cpu_has_xsave)
return -ENODEV;
fpu__activate_fpstate_read(fpu);
xsave = &fpu->state.xsave;
/*
* Copy the 48bytes defined by the software first into the xstate
* memory layout in the thread struct, so that we can copy the entire
* xstateregs to the user using one user_regset_copyout().
*/
memcpy(&xsave->i387.sw_reserved,
xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
/*
* Copy the xstate memory layout.
*/
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
return ret;
}
int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
struct xregs_state *xsave;
int ret;
if (!cpu_has_xsave)
return -ENODEV;
fpu__activate_fpstate_write(fpu);
xsave = &fpu->state.xsave;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
/*
* mxcsr reserved bits must be masked to zero for security reasons.
*/
xsave->i387.mxcsr &= mxcsr_feature_mask;
xsave->header.xfeatures &= xfeatures_mask;
/*
* These bits must be zero.
*/
memset(&xsave->header.reserved, 0, 48);
return ret;
}
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
/*
* FPU tag word conversions.
*/
static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
{
unsigned int tmp; /* to avoid 16 bit prefixes in the code */
/* Transform each pair of bits into 01 (valid) or 00 (empty) */
tmp = ~twd;
tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
/* and move the valid bits to the lower byte. */
tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
return tmp;
}
#define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
#define FP_EXP_TAG_VALID 0
#define FP_EXP_TAG_ZERO 1
#define FP_EXP_TAG_SPECIAL 2
#define FP_EXP_TAG_EMPTY 3
static inline u32 twd_fxsr_to_i387(struct fxregs_state *fxsave)
{
struct _fpxreg *st;
u32 tos = (fxsave->swd >> 11) & 7;
u32 twd = (unsigned long) fxsave->twd;
u32 tag;
u32 ret = 0xffff0000u;
int i;
for (i = 0; i < 8; i++, twd >>= 1) {
if (twd & 0x1) {
st = FPREG_ADDR(fxsave, (i - tos) & 7);
switch (st->exponent & 0x7fff) {
case 0x7fff:
tag = FP_EXP_TAG_SPECIAL;
break;
case 0x0000:
if (!st->significand[0] &&
!st->significand[1] &&
!st->significand[2] &&
!st->significand[3])
tag = FP_EXP_TAG_ZERO;
else
tag = FP_EXP_TAG_SPECIAL;
break;
default:
if (st->significand[3] & 0x8000)
tag = FP_EXP_TAG_VALID;
else
tag = FP_EXP_TAG_SPECIAL;
break;
}
} else {
tag = FP_EXP_TAG_EMPTY;
}
ret |= tag << (2 * i);
}
return ret;
}
/*
* FXSR floating point environment conversions.
*/
void
convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
{
struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
int i;
env->cwd = fxsave->cwd | 0xffff0000u;
env->swd = fxsave->swd | 0xffff0000u;
env->twd = twd_fxsr_to_i387(fxsave);
#ifdef CONFIG_X86_64
env->fip = fxsave->rip;
env->foo = fxsave->rdp;
/*
* should be actually ds/cs at fpu exception time, but
* that information is not available in 64bit mode.
*/
env->fcs = task_pt_regs(tsk)->cs;
if (tsk == current) {
savesegment(ds, env->fos);
} else {
env->fos = tsk->thread.ds;
}
env->fos |= 0xffff0000;
#else
env->fip = fxsave->fip;
env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
env->foo = fxsave->foo;
env->fos = fxsave->fos;
#endif
for (i = 0; i < 8; ++i)
memcpy(&to[i], &from[i], sizeof(to[0]));
}
void convert_to_fxsr(struct task_struct *tsk,
const struct user_i387_ia32_struct *env)
{
struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
int i;
fxsave->cwd = env->cwd;
fxsave->swd = env->swd;
fxsave->twd = twd_i387_to_fxsr(env->twd);
fxsave->fop = (u16) ((u32) env->fcs >> 16);
#ifdef CONFIG_X86_64
fxsave->rip = env->fip;
fxsave->rdp = env->foo;
/* cs and ds ignored */
#else
fxsave->fip = env->fip;
fxsave->fcs = (env->fcs & 0xffff);
fxsave->foo = env->foo;
fxsave->fos = env->fos;
#endif
for (i = 0; i < 8; ++i)
memcpy(&to[i], &from[i], sizeof(from[0]));
}
int fpregs_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
struct user_i387_ia32_struct env;
fpu__activate_fpstate_read(fpu);
if (!static_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
if (!cpu_has_fxsr)
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&fpu->state.fsave, 0,
-1);
fpstate_sanitize_xstate(fpu);
if (kbuf && pos == 0 && count == sizeof(env)) {
convert_from_fxsr(kbuf, target);
return 0;
}
convert_from_fxsr(&env, target);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
}
int fpregs_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
struct user_i387_ia32_struct env;
int ret;
fpu__activate_fpstate_write(fpu);
fpstate_sanitize_xstate(fpu);
if (!static_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
if (!cpu_has_fxsr)
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&fpu->state.fsave, 0,
-1);
if (pos > 0 || count < sizeof(env))
convert_from_fxsr(&env, target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
if (!ret)
convert_to_fxsr(target, &env);
/*
* update the header bit in the xsave header, indicating the
* presence of FP.
*/
if (cpu_has_xsave)
fpu->state.xsave.header.xfeatures |= XSTATE_FP;
return ret;
}
/*
* FPU state for core dumps.
* This is only used for a.out dumps now.
* It is declared generically using elf_fpregset_t (which is
* struct user_i387_struct) but is in fact only used for 32-bit
* dumps, so on 64-bit it is really struct user_i387_ia32_struct.
*/
int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
{
struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu;
int fpvalid;
fpvalid = fpu->fpstate_active;
if (fpvalid)
fpvalid = !fpregs_get(tsk, NULL,
0, sizeof(struct user_i387_ia32_struct),
ufpu, NULL);
return fpvalid;
}
EXPORT_SYMBOL(dump_fpu);
#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
| gpl-2.0 |
project-voodoo/linux_gt-i9000 | arch/sh/kernel/cpu/sh2a/clock-sh7201.c | 907 | 1994 | /*
* arch/sh/kernel/cpu/sh2a/clock-sh7201.c
*
* SH7201 support for the clock framework
*
* Copyright (C) 2008 Peter Griffin <pgriffin@mpc-data.co.uk>
*
* Based on clock-sh4.c
* Copyright (C) 2005 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
static const int pll1rate[]={1,2,3,4,6,8};
static const int pfc_divisors[]={1,2,3,4,6,8,12};
#define ifc_divisors pfc_divisors
#if (CONFIG_SH_CLK_MD == 0)
#define PLL2 (4)
#elif (CONFIG_SH_CLK_MD == 2)
#define PLL2 (2)
#elif (CONFIG_SH_CLK_MD == 3)
#define PLL2 (1)
#else
#error "Illegal Clock Mode!"
#endif
static void master_clk_init(struct clk *clk)
{
return 10000000 * PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
}
static struct clk_ops sh7201_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx];
}
static struct clk_ops sh7201_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx];
}
static struct clk_ops sh7201_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int idx = ((__raw_readw(FREQCR) >> 4) & 0x0007);
return clk->parent->rate / ifc_divisors[idx];
}
static struct clk_ops sh7201_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct clk_ops *sh7201_clk_ops[] = {
&sh7201_master_clk_ops,
&sh7201_module_clk_ops,
&sh7201_bus_clk_ops,
&sh7201_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7201_clk_ops))
*ops = sh7201_clk_ops[idx];
}
| gpl-2.0 |
martyborya/N3-CM-Unified | drivers/platform/msm/qpnp-clkdiv.c | 1675 | 6987 | /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/types.h>
#include <linux/spmi.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/time.h>
#include <linux/qpnp/clkdiv.h>
#define Q_MAX_DT_PROP_SIZE 32
#define Q_REG_ADDR(q_clkdiv, reg_offset) \
((q_clkdiv)->offset + reg_offset)
#define Q_REG_DIV_CTL1 0x43
#define Q_REG_EN_CTL 0x46
#define Q_SET_EN BIT(7)
#define Q_CXO_PERIOD_NS(_cxo_clk) (NSEC_PER_SEC / _cxo_clk)
#define Q_DIV_PERIOD_NS(_cxo_clk, _div) (NSEC_PER_SEC / (_cxo_clk / _div))
#define Q_ENABLE_DELAY_NS(_cxo_clk, _div) (2 * Q_CXO_PERIOD_NS(_cxo_clk) + \
3 * Q_DIV_PERIOD_NS(_cxo_clk, _div))
#define Q_DISABLE_DELAY_NS(_cxo_clk, _div) (3 * Q_DIV_PERIOD_NS(_cxo_clk, _div))
struct q_clkdiv {
uint32_t cxo_hz;
enum q_clkdiv_cfg cxo_div;
struct device_node *node;
uint16_t offset;
struct spmi_controller *ctrl;
bool enabled;
struct mutex lock;
struct list_head list;
uint8_t slave;
};
static LIST_HEAD(qpnp_clkdiv_devs);
/**
* qpnp_clkdiv_get - get a clkdiv handle
* @dev: client device pointer.
* @name: client specific name for the clock in question.
*
* Return a clkdiv handle given a client specific name. This name be a prefix
* for a property naming that takes a phandle to the actual clkdiv device.
*/
struct q_clkdiv *qpnp_clkdiv_get(struct device *dev, const char *name)
{
struct q_clkdiv *q_clkdiv;
struct device_node *divclk_node;
char prop_name[Q_MAX_DT_PROP_SIZE];
int n;
n = snprintf(prop_name, Q_MAX_DT_PROP_SIZE, "%s-clk", name);
if (n == Q_MAX_DT_PROP_SIZE)
return ERR_PTR(-EINVAL);
divclk_node = of_parse_phandle(dev->of_node, prop_name, 0);
if (divclk_node == NULL)
return ERR_PTR(-ENODEV);
list_for_each_entry(q_clkdiv, &qpnp_clkdiv_devs, list)
if (q_clkdiv->node == divclk_node)
return q_clkdiv;
return ERR_PTR(-EPROBE_DEFER);
}
EXPORT_SYMBOL(qpnp_clkdiv_get);
static int __clkdiv_enable(struct q_clkdiv *q_clkdiv, bool enable)
{
int rc;
char buf[1];
buf[0] = enable ? Q_SET_EN : 0;
mutex_lock(&q_clkdiv->lock);
rc = spmi_ext_register_writel(q_clkdiv->ctrl, q_clkdiv->slave,
Q_REG_ADDR(q_clkdiv, Q_REG_EN_CTL),
&buf[0], 1);
if (!rc)
q_clkdiv->enabled = enable;
mutex_unlock(&q_clkdiv->lock);
if (enable)
ndelay(Q_ENABLE_DELAY_NS(q_clkdiv->cxo_hz, q_clkdiv->cxo_div));
else
ndelay(Q_DISABLE_DELAY_NS(q_clkdiv->cxo_hz, q_clkdiv->cxo_div));
return rc;
}
/**
* qpnp_clkdiv_enable - enable a clkdiv
* @q_clkdiv: pointer to clkdiv handle
*/
int qpnp_clkdiv_enable(struct q_clkdiv *q_clkdiv)
{
return __clkdiv_enable(q_clkdiv, true);
}
EXPORT_SYMBOL(qpnp_clkdiv_enable);
/**
* qpnp_clkdiv_disable - disable a clkdiv
* @q_clkdiv: pointer to clkdiv handle
*/
int qpnp_clkdiv_disable(struct q_clkdiv *q_clkdiv)
{
return __clkdiv_enable(q_clkdiv, false);
}
EXPORT_SYMBOL(qpnp_clkdiv_disable);
/**
* @q_clkdiv: pointer to clkdiv handle
* @cfg: setting used to configure the output frequency
*
* Given a q_clkdiv_cfg setting, configure the corresponding clkdiv device
* for the desired output frequency.
*/
int qpnp_clkdiv_config(struct q_clkdiv *q_clkdiv, enum q_clkdiv_cfg cfg)
{
int rc;
char buf[1];
if (cfg < 0 || cfg >= Q_CLKDIV_INVALID)
return -EINVAL;
buf[0] = cfg;
mutex_lock(&q_clkdiv->lock);
if (q_clkdiv->enabled) {
rc = __clkdiv_enable(q_clkdiv, false);
if (rc) {
pr_err("unable to disable clock\n");
goto cfg_err;
}
}
rc = spmi_ext_register_writel(q_clkdiv->ctrl, q_clkdiv->slave,
Q_REG_ADDR(q_clkdiv, Q_REG_DIV_CTL1), &buf[0], 1);
if (rc) {
pr_err("enable to write config\n");
q_clkdiv->enabled = 0;
goto cfg_err;
}
q_clkdiv->cxo_div = cfg;
if (q_clkdiv->enabled) {
rc = __clkdiv_enable(q_clkdiv, true);
if (rc) {
pr_err("unable to re-enable clock\n");
goto cfg_err;
}
}
cfg_err:
mutex_unlock(&q_clkdiv->lock);
return rc;
}
EXPORT_SYMBOL(qpnp_clkdiv_config);
static int __devinit qpnp_clkdiv_probe(struct spmi_device *spmi)
{
struct q_clkdiv *q_clkdiv;
struct device_node *node = spmi->dev.of_node;
int rc;
uint32_t en;
struct resource *res;
q_clkdiv = devm_kzalloc(&spmi->dev, sizeof(*q_clkdiv), GFP_ATOMIC);
if (!q_clkdiv)
return -ENOMEM;
rc = of_property_read_u32(node, "qcom,cxo-freq",
&q_clkdiv->cxo_hz);
if (rc) {
dev_err(&spmi->dev,
"%s: unable to get qcom,cxo-freq property\n", __func__);
return rc;
}
res = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&spmi->dev, "%s: unable to get device reg resource\n",
__func__);
return -EINVAL;
}
q_clkdiv->slave = spmi->sid;
q_clkdiv->offset = res->start;
q_clkdiv->ctrl = spmi->ctrl;
q_clkdiv->node = node;
mutex_init(&q_clkdiv->lock);
rc = of_property_read_u32(node, "qcom,cxo-div",
&q_clkdiv->cxo_div);
if (rc && rc != -EINVAL) {
dev_err(&spmi->dev,
"%s: error getting qcom,cxo-div property\n",
__func__);
return rc;
}
if (!rc) {
rc = qpnp_clkdiv_config(q_clkdiv, q_clkdiv->cxo_div);
if (rc) {
dev_err(&spmi->dev,
"%s: unable to set default divide config\n",
__func__);
return rc;
}
}
rc = of_property_read_u32(node, "qcom,enable", &en);
if (rc && rc != -EINVAL) {
dev_err(&spmi->dev,
"%s: error getting qcom,enable property\n", __func__);
return rc;
}
if (!rc) {
rc = __clkdiv_enable(q_clkdiv, en);
dev_err(&spmi->dev,
"%s: unable to set default config\n", __func__);
return rc;
}
dev_set_drvdata(&spmi->dev, q_clkdiv);
list_add(&q_clkdiv->list, &qpnp_clkdiv_devs);
return 0;
}
static int __devexit qpnp_clkdiv_remove(struct spmi_device *spmi)
{
struct q_clkdiv *q_clkdiv = dev_get_drvdata(&spmi->dev);
list_del(&q_clkdiv->list);
return 0;
}
static struct of_device_id spmi_match_table[] = {
{ .compatible = "qcom,qpnp-clkdiv",
},
{}
};
static struct spmi_driver qpnp_clkdiv_driver = {
.driver = {
.name = "qcom,qpnp-clkdiv",
.of_match_table = spmi_match_table,
},
.probe = qpnp_clkdiv_probe,
.remove = __devexit_p(qpnp_clkdiv_remove),
};
static int __init qpnp_clkdiv_init(void)
{
return spmi_driver_register(&qpnp_clkdiv_driver);
}
static void __exit qpnp_clkdiv_exit(void)
{
return spmi_driver_unregister(&qpnp_clkdiv_driver);
}
MODULE_DESCRIPTION("QPNP PMIC clkdiv driver");
MODULE_LICENSE("GPL v2");
module_init(qpnp_clkdiv_init);
module_exit(qpnp_clkdiv_exit);
| gpl-2.0 |
tcreech/tilegx-linux-3.4.68-politestackrehome | drivers/net/ethernet/intel/e1000e/82571.c | 1931 | 55856 | /*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/*
* 82571EB Gigabit Ethernet Controller
* 82571EB Gigabit Ethernet Controller (Copper)
* 82571EB Gigabit Ethernet Controller (Fiber)
* 82571EB Dual Port Gigabit Mezzanine Adapter
* 82571EB Quad Port Gigabit Mezzanine Adapter
* 82571PT Gigabit PT Quad Port Server ExpressModule
* 82572EI Gigabit Ethernet Controller (Copper)
* 82572EI Gigabit Ethernet Controller (Fiber)
* 82572EI Gigabit Ethernet Controller
* 82573V Gigabit Ethernet Controller (Copper)
* 82573E Gigabit Ethernet Controller (Copper)
* 82573L Gigabit Ethernet Controller
* 82574L Gigabit Network Connection
* 82583V Gigabit Network Connection
*/
#include "e1000.h"
#define ID_LED_RESERVED_F746 0xF746
#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
(ID_LED_OFF1_ON2 << 8) | \
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_DEF1_DEF2))
#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
#define E1000_BASE1000T_STATUS 10
#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
#define E1000_RECEIVE_ERROR_COUNTER 21
#define E1000_RECEIVE_ERROR_MAX 0xFFFF
#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw);
static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
static s32 e1000_setup_link_82571(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
static void e1000_clear_vfta_82571(struct e1000_hw *hw);
static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
static s32 e1000_led_on_82574(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
/**
* e1000_init_phy_params_82571 - Init PHY func ptrs.
* @hw: pointer to the HW structure
**/
static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
if (hw->phy.media_type != e1000_media_type_copper) {
phy->type = e1000_phy_none;
return 0;
}
phy->addr = 1;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
phy->reset_delay_us = 100;
phy->ops.power_up = e1000_power_up_phy_copper;
phy->ops.power_down = e1000_power_down_phy_copper_82571;
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
phy->type = e1000_phy_igp_2;
break;
case e1000_82573:
phy->type = e1000_phy_m88;
break;
case e1000_82574:
case e1000_82583:
phy->type = e1000_phy_bm;
phy->ops.acquire = e1000_get_hw_semaphore_82574;
phy->ops.release = e1000_put_hw_semaphore_82574;
phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
break;
default:
return -E1000_ERR_PHY;
break;
}
/* This can only be done after all function pointers are setup. */
ret_val = e1000_get_phy_id_82571(hw);
if (ret_val) {
e_dbg("Error getting PHY ID\n");
return ret_val;
}
/* Verify phy id */
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
if (phy->id != IGP01E1000_I_PHY_ID)
ret_val = -E1000_ERR_PHY;
break;
case e1000_82573:
if (phy->id != M88E1111_I_PHY_ID)
ret_val = -E1000_ERR_PHY;
break;
case e1000_82574:
case e1000_82583:
if (phy->id != BME1000_E_PHY_ID_R2)
ret_val = -E1000_ERR_PHY;
break;
default:
ret_val = -E1000_ERR_PHY;
break;
}
if (ret_val)
e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
return ret_val;
}
/**
* e1000_init_nvm_params_82571 - Init NVM func ptrs.
* @hw: pointer to the HW structure
**/
static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 eecd = er32(EECD);
u16 size;
nvm->opcode_bits = 8;
nvm->delay_usec = 1;
switch (nvm->override) {
case e1000_nvm_override_spi_large:
nvm->page_size = 32;
nvm->address_bits = 16;
break;
case e1000_nvm_override_spi_small:
nvm->page_size = 8;
nvm->address_bits = 8;
break;
default:
nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
break;
}
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
case e1000_82583:
if (((eecd >> 15) & 0x3) == 0x3) {
nvm->type = e1000_nvm_flash_hw;
nvm->word_size = 2048;
/*
* Autonomous Flash update bit must be cleared due
* to Flash update issue.
*/
eecd &= ~E1000_EECD_AUPDEN;
ew32(EECD, eecd);
break;
}
/* Fall Through */
default:
nvm->type = e1000_nvm_eeprom_spi;
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
/*
* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
/* EEPROM access above 16k is unsupported */
if (size > 14)
size = 14;
nvm->word_size = 1 << size;
break;
}
/* Function Pointers */
switch (hw->mac.type) {
case e1000_82574:
case e1000_82583:
nvm->ops.acquire = e1000_get_hw_semaphore_82574;
nvm->ops.release = e1000_put_hw_semaphore_82574;
break;
default:
break;
}
return 0;
}
/**
* e1000_init_mac_params_82571 - Init MAC func ptrs.
* @hw: pointer to the HW structure
**/
static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
u32 swsm = 0;
u32 swsm2 = 0;
bool force_clear_smbi = false;
/* Set media type and media-dependent function pointers */
switch (hw->adapter->pdev->device) {
case E1000_DEV_ID_82571EB_FIBER:
case E1000_DEV_ID_82572EI_FIBER:
case E1000_DEV_ID_82571EB_QUAD_FIBER:
hw->phy.media_type = e1000_media_type_fiber;
mac->ops.setup_physical_interface =
e1000_setup_fiber_serdes_link_82571;
mac->ops.check_for_link = e1000e_check_for_fiber_link;
mac->ops.get_link_up_info =
e1000e_get_speed_and_duplex_fiber_serdes;
break;
case E1000_DEV_ID_82571EB_SERDES:
case E1000_DEV_ID_82571EB_SERDES_DUAL:
case E1000_DEV_ID_82571EB_SERDES_QUAD:
case E1000_DEV_ID_82572EI_SERDES:
hw->phy.media_type = e1000_media_type_internal_serdes;
mac->ops.setup_physical_interface =
e1000_setup_fiber_serdes_link_82571;
mac->ops.check_for_link = e1000_check_for_serdes_link_82571;
mac->ops.get_link_up_info =
e1000e_get_speed_and_duplex_fiber_serdes;
break;
default:
hw->phy.media_type = e1000_media_type_copper;
mac->ops.setup_physical_interface =
e1000_setup_copper_link_82571;
mac->ops.check_for_link = e1000e_check_for_copper_link;
mac->ops.get_link_up_info = e1000e_get_speed_and_duplex_copper;
break;
}
/* Set mta register count */
mac->mta_reg_count = 128;
/* Set rar entry count */
mac->rar_entry_count = E1000_RAR_ENTRIES;
/* Adaptive IFS supported */
mac->adaptive_ifs = true;
/* MAC-specific function pointers */
switch (hw->mac.type) {
case e1000_82573:
mac->ops.set_lan_id = e1000_set_lan_id_single_port;
mac->ops.check_mng_mode = e1000e_check_mng_mode_generic;
mac->ops.led_on = e1000e_led_on_generic;
mac->ops.blink_led = e1000e_blink_led_generic;
/* FWSM register */
mac->has_fwsm = true;
/*
* ARC supported; valid only if manageability features are
* enabled.
*/
mac->arc_subsystem_valid =
(er32(FWSM) & E1000_FWSM_MODE_MASK)
? true : false;
break;
case e1000_82574:
case e1000_82583:
mac->ops.set_lan_id = e1000_set_lan_id_single_port;
mac->ops.check_mng_mode = e1000_check_mng_mode_82574;
mac->ops.led_on = e1000_led_on_82574;
break;
default:
mac->ops.check_mng_mode = e1000e_check_mng_mode_generic;
mac->ops.led_on = e1000e_led_on_generic;
mac->ops.blink_led = e1000e_blink_led_generic;
/* FWSM register */
mac->has_fwsm = true;
break;
}
/*
* Ensure that the inter-port SWSM.SMBI lock bit is clear before
* first NVM or PHY access. This should be done for single-port
* devices, and for one port only on dual-port devices so that
* for those devices we can still use the SMBI lock to synchronize
* inter-port accesses to the PHY & NVM.
*/
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
swsm2 = er32(SWSM2);
if (!(swsm2 & E1000_SWSM2_LOCK)) {
/* Only do this for the first interface on this card */
ew32(SWSM2, swsm2 | E1000_SWSM2_LOCK);
force_clear_smbi = true;
} else {
force_clear_smbi = false;
}
break;
default:
force_clear_smbi = true;
break;
}
if (force_clear_smbi) {
/* Make sure SWSM.SMBI is clear */
swsm = er32(SWSM);
if (swsm & E1000_SWSM_SMBI) {
/* This bit should not be set on a first interface, and
* indicates that the bootagent or EFI code has
* improperly left this bit enabled
*/
e_dbg("Please update your 82571 Bootagent\n");
}
ew32(SWSM, swsm & ~E1000_SWSM_SMBI);
}
/*
* Initialize device specific counter of SMBI acquisition
* timeouts.
*/
hw->dev_spec.e82571.smb_counter = 0;
return 0;
}
static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
static int global_quad_port_a; /* global port a indication */
struct pci_dev *pdev = adapter->pdev;
int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
s32 rc;
rc = e1000_init_mac_params_82571(hw);
if (rc)
return rc;
rc = e1000_init_nvm_params_82571(hw);
if (rc)
return rc;
rc = e1000_init_phy_params_82571(hw);
if (rc)
return rc;
/* tag quad port adapters first, it's used below */
switch (pdev->device) {
case E1000_DEV_ID_82571EB_QUAD_COPPER:
case E1000_DEV_ID_82571EB_QUAD_FIBER:
case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
case E1000_DEV_ID_82571PT_QUAD_COPPER:
adapter->flags |= FLAG_IS_QUAD_PORT;
/* mark the first port */
if (global_quad_port_a == 0)
adapter->flags |= FLAG_IS_QUAD_PORT_A;
/* Reset for multiple quad port adapters */
global_quad_port_a++;
if (global_quad_port_a == 4)
global_quad_port_a = 0;
break;
default:
break;
}
switch (adapter->hw.mac.type) {
case e1000_82571:
/* these dual ports don't have WoL on port B at all */
if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) ||
(pdev->device == E1000_DEV_ID_82571EB_SERDES) ||
(pdev->device == E1000_DEV_ID_82571EB_COPPER)) &&
(is_port_b))
adapter->flags &= ~FLAG_HAS_WOL;
/* quad ports only support WoL on port A */
if (adapter->flags & FLAG_IS_QUAD_PORT &&
(!(adapter->flags & FLAG_IS_QUAD_PORT_A)))
adapter->flags &= ~FLAG_HAS_WOL;
/* Does not support WoL on any port */
if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)
adapter->flags &= ~FLAG_HAS_WOL;
break;
case e1000_82573:
if (pdev->device == E1000_DEV_ID_82573L) {
adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
adapter->max_hw_frame_size = DEFAULT_JUMBO;
}
break;
default:
break;
}
return 0;
}
/**
* e1000_get_phy_id_82571 - Retrieve the PHY ID and revision
* @hw: pointer to the HW structure
*
* Reads the PHY registers and stores the PHY ID and possibly the PHY
* revision in the hardware structure.
**/
static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_id = 0;
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
/*
* The 82571 firmware may still be configuring the PHY.
* In this case, we cannot access the PHY until the
* configuration is done. So we explicitly set the
* PHY ID.
*/
phy->id = IGP01E1000_I_PHY_ID;
break;
case e1000_82573:
return e1000e_get_phy_id(hw);
break;
case e1000_82574:
case e1000_82583:
ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
if (ret_val)
return ret_val;
phy->id = (u32)(phy_id << 16);
udelay(20);
ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
if (ret_val)
return ret_val;
phy->id |= (u32)(phy_id);
phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
break;
default:
return -E1000_ERR_PHY;
break;
}
return 0;
}
/**
* e1000_get_hw_semaphore_82571 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM
**/
static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
{
u32 swsm;
s32 sw_timeout = hw->nvm.word_size + 1;
s32 fw_timeout = hw->nvm.word_size + 1;
s32 i = 0;
/*
* If we have timedout 3 times on trying to acquire
* the inter-port SMBI semaphore, there is old code
* operating on the other port, and it is not
* releasing SMBI. Modify the number of times that
* we try for the semaphore to interwork with this
* older code.
*/
if (hw->dev_spec.e82571.smb_counter > 2)
sw_timeout = 1;
/* Get the SW semaphore */
while (i < sw_timeout) {
swsm = er32(SWSM);
if (!(swsm & E1000_SWSM_SMBI))
break;
udelay(50);
i++;
}
if (i == sw_timeout) {
e_dbg("Driver can't access device - SMBI bit is set.\n");
hw->dev_spec.e82571.smb_counter++;
}
/* Get the FW semaphore. */
for (i = 0; i < fw_timeout; i++) {
swsm = er32(SWSM);
ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
/* Semaphore acquired if bit latched */
if (er32(SWSM) & E1000_SWSM_SWESMBI)
break;
udelay(50);
}
if (i == fw_timeout) {
/* Release semaphores */
e1000_put_hw_semaphore_82571(hw);
e_dbg("Driver can't access the NVM\n");
return -E1000_ERR_NVM;
}
return 0;
}
/**
* e1000_put_hw_semaphore_82571 - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
**/
static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
{
u32 swsm;
swsm = er32(SWSM);
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
ew32(SWSM, swsm);
}
/**
* e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore during reset.
*
**/
static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
{
u32 extcnf_ctrl;
s32 i = 0;
extcnf_ctrl = er32(EXTCNF_CTRL);
extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
do {
ew32(EXTCNF_CTRL, extcnf_ctrl);
extcnf_ctrl = er32(EXTCNF_CTRL);
if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
break;
extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
usleep_range(2000, 4000);
i++;
} while (i < MDIO_OWNERSHIP_TIMEOUT);
if (i == MDIO_OWNERSHIP_TIMEOUT) {
/* Release semaphores */
e1000_put_hw_semaphore_82573(hw);
e_dbg("Driver can't access the PHY\n");
return -E1000_ERR_PHY;
}
return 0;
}
/**
* e1000_put_hw_semaphore_82573 - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used during reset.
*
**/
static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
{
u32 extcnf_ctrl;
extcnf_ctrl = er32(EXTCNF_CTRL);
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
ew32(EXTCNF_CTRL, extcnf_ctrl);
}
static DEFINE_MUTEX(swflag_mutex);
/**
* e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM.
*
**/
static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
{
s32 ret_val;
mutex_lock(&swflag_mutex);
ret_val = e1000_get_hw_semaphore_82573(hw);
if (ret_val)
mutex_unlock(&swflag_mutex);
return ret_val;
}
/**
* e1000_put_hw_semaphore_82574 - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
*
**/
static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
{
e1000_put_hw_semaphore_82573(hw);
mutex_unlock(&swflag_mutex);
}
/**
* e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
* @hw: pointer to the HW structure
* @active: true to enable LPLU, false to disable
*
* Sets the LPLU D0 state according to the active flag.
* LPLU will not be activated unless the
* device autonegotiation advertisement meets standards of
* either 10 or 10/100 or 10/100/1000 at all duplexes.
* This is a function pointer entry point only called by
* PHY setup routines.
**/
static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
{
u16 data = er32(POEMB);
if (active)
data |= E1000_PHY_CTRL_D0A_LPLU;
else
data &= ~E1000_PHY_CTRL_D0A_LPLU;
ew32(POEMB, data);
return 0;
}
/**
* e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
* @hw: pointer to the HW structure
* @active: boolean used to enable/disable lplu
*
* The low power link up (lplu) state is set to the power management level D3
* when active is true, else clear lplu for D3. LPLU
* is used during Dx states where the power conservation is most important.
* During driver activity, SmartSpeed should be enabled so performance is
* maintained.
**/
static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
{
u16 data = er32(POEMB);
if (!active) {
data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
} else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
(hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
(hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
data |= E1000_PHY_CTRL_NOND0A_LPLU;
}
ew32(POEMB, data);
return 0;
}
/**
* e1000_acquire_nvm_82571 - Request for access to the EEPROM
* @hw: pointer to the HW structure
*
* To gain access to the EEPROM, first we must obtain a hardware semaphore.
* Then for non-82573 hardware, set the EEPROM access request bit and wait
* for EEPROM access grant bit. If the access grant bit is not set, release
* hardware semaphore.
**/
static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
{
s32 ret_val;
ret_val = e1000_get_hw_semaphore_82571(hw);
if (ret_val)
return ret_val;
switch (hw->mac.type) {
case e1000_82573:
break;
default:
ret_val = e1000e_acquire_nvm(hw);
break;
}
if (ret_val)
e1000_put_hw_semaphore_82571(hw);
return ret_val;
}
/**
* e1000_release_nvm_82571 - Release exclusive access to EEPROM
* @hw: pointer to the HW structure
*
* Stop any current commands to the EEPROM and clear the EEPROM request bit.
**/
static void e1000_release_nvm_82571(struct e1000_hw *hw)
{
e1000e_release_nvm(hw);
e1000_put_hw_semaphore_82571(hw);
}
/**
* e1000_write_nvm_82571 - Write to EEPROM using appropriate interface
* @hw: pointer to the HW structure
* @offset: offset within the EEPROM to be written to
* @words: number of words to write
* @data: 16 bit word(s) to be written to the EEPROM
*
* For non-82573 silicon, write data to EEPROM at offset using SPI interface.
*
* If e1000e_update_nvm_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
**/
static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
s32 ret_val;
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
case e1000_82583:
ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data);
break;
case e1000_82571:
case e1000_82572:
ret_val = e1000e_write_nvm_spi(hw, offset, words, data);
break;
default:
ret_val = -E1000_ERR_NVM;
break;
}
return ret_val;
}
/**
* e1000_update_nvm_checksum_82571 - Update EEPROM checksum
* @hw: pointer to the HW structure
*
* Updates the EEPROM checksum by reading/adding each word of the EEPROM
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM.
**/
static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
{
u32 eecd;
s32 ret_val;
u16 i;
ret_val = e1000e_update_nvm_checksum_generic(hw);
if (ret_val)
return ret_val;
/*
* If our nvm is an EEPROM, then we're done
* otherwise, commit the checksum to the flash NVM.
*/
if (hw->nvm.type != e1000_nvm_flash_hw)
return 0;
/* Check for pending operations. */
for (i = 0; i < E1000_FLASH_UPDATES; i++) {
usleep_range(1000, 2000);
if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
break;
}
if (i == E1000_FLASH_UPDATES)
return -E1000_ERR_NVM;
/* Reset the firmware if using STM opcode. */
if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) {
/*
* The enabling of and the actual reset must be done
* in two write cycles.
*/
ew32(HICR, E1000_HICR_FW_RESET_ENABLE);
e1e_flush();
ew32(HICR, E1000_HICR_FW_RESET);
}
/* Commit the write to flash */
eecd = er32(EECD) | E1000_EECD_FLUPD;
ew32(EECD, eecd);
for (i = 0; i < E1000_FLASH_UPDATES; i++) {
usleep_range(1000, 2000);
if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
break;
}
if (i == E1000_FLASH_UPDATES)
return -E1000_ERR_NVM;
return 0;
}
/**
* e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
**/
static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw)
{
if (hw->nvm.type == e1000_nvm_flash_hw)
e1000_fix_nvm_checksum_82571(hw);
return e1000e_validate_nvm_checksum_generic(hw);
}
/**
* e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon
* @hw: pointer to the HW structure
* @offset: offset within the EEPROM to be written to
* @words: number of words to write
* @data: 16 bit word(s) to be written to the EEPROM
*
* After checking for invalid values, poll the EEPROM to ensure the previous
* command has completed before trying to write the next word. After write
* poll for completion.
*
* If e1000e_update_nvm_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
**/
static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 i, eewr = 0;
s32 ret_val = 0;
/*
* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
e_dbg("nvm parameter(s) out of bounds\n");
return -E1000_ERR_NVM;
}
for (i = 0; i < words; i++) {
eewr = (data[i] << E1000_NVM_RW_REG_DATA) |
((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
E1000_NVM_RW_REG_START;
ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
if (ret_val)
break;
ew32(EEWR, eewr);
ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
if (ret_val)
break;
}
return ret_val;
}
/**
* e1000_get_cfg_done_82571 - Poll for configuration done
* @hw: pointer to the HW structure
*
* Reads the management control register for the config done bit to be set.
**/
static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
{
s32 timeout = PHY_CFG_TIMEOUT;
while (timeout) {
if (er32(EEMNGCTL) &
E1000_NVM_CFG_DONE_PORT_0)
break;
usleep_range(1000, 2000);
timeout--;
}
if (!timeout) {
e_dbg("MNG configuration cycle has not completed.\n");
return -E1000_ERR_RESET;
}
return 0;
}
/**
* e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state
* @hw: pointer to the HW structure
* @active: true to enable LPLU, false to disable
*
* Sets the LPLU D0 state according to the active flag. When activating LPLU
* this function also disables smart speed and vice versa. LPLU will not be
* activated unless the device autonegotiation advertisement meets standards
* of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function
* pointer entry point only called by PHY setup routines.
**/
static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 data;
ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data);
if (ret_val)
return ret_val;
if (active) {
data |= IGP02E1000_PM_D0_LPLU;
ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
if (ret_val)
return ret_val;
/* When LPLU is enabled, we should disable SmartSpeed */
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
if (ret_val)
return ret_val;
} else {
data &= ~IGP02E1000_PM_D0_LPLU;
ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
/*
* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
*/
if (phy->smart_speed == e1000_smart_speed_on) {
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
&data);
if (ret_val)
return ret_val;
data |= IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
data);
if (ret_val)
return ret_val;
} else if (phy->smart_speed == e1000_smart_speed_off) {
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
&data);
if (ret_val)
return ret_val;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
data);
if (ret_val)
return ret_val;
}
}
return 0;
}
/**
* e1000_reset_hw_82571 - Reset hardware
* @hw: pointer to the HW structure
*
* This resets the hardware into a known state.
**/
static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
{
u32 ctrl, ctrl_ext;
s32 ret_val;
/*
* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = e1000e_disable_pcie_master(hw);
if (ret_val)
e_dbg("PCI-E Master disable polling has failed.\n");
e_dbg("Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
ew32(RCTL, 0);
ew32(TCTL, E1000_TCTL_PSP);
e1e_flush();
usleep_range(10000, 20000);
/*
* Must acquire the MDIO ownership before MAC reset.
* Ownership defaults to firmware after a reset.
*/
switch (hw->mac.type) {
case e1000_82573:
ret_val = e1000_get_hw_semaphore_82573(hw);
break;
case e1000_82574:
case e1000_82583:
ret_val = e1000_get_hw_semaphore_82574(hw);
break;
default:
break;
}
if (ret_val)
e_dbg("Cannot acquire MDIO ownership\n");
ctrl = er32(CTRL);
e_dbg("Issuing a global reset to MAC\n");
ew32(CTRL, ctrl | E1000_CTRL_RST);
/* Must release MDIO ownership and mutex after MAC reset. */
switch (hw->mac.type) {
case e1000_82574:
case e1000_82583:
e1000_put_hw_semaphore_82574(hw);
break;
default:
break;
}
if (hw->nvm.type == e1000_nvm_flash_hw) {
udelay(10);
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_EE_RST;
ew32(CTRL_EXT, ctrl_ext);
e1e_flush();
}
ret_val = e1000e_get_auto_rd_done(hw);
if (ret_val)
/* We don't want to continue accessing MAC registers. */
return ret_val;
/*
* Phy configuration from NVM just starts after EECD_AUTO_RD is set.
* Need to wait for Phy configuration completion before accessing
* NVM and Phy.
*/
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
case e1000_82583:
msleep(25);
break;
default:
break;
}
/* Clear any pending interrupt events. */
ew32(IMC, 0xffffffff);
er32(ICR);
if (hw->mac.type == e1000_82571) {
/* Install any alternate MAC address into RAR0 */
ret_val = e1000_check_alt_mac_addr_generic(hw);
if (ret_val)
return ret_val;
e1000e_set_laa_state_82571(hw, true);
}
/* Reinitialize the 82571 serdes link state machine */
if (hw->phy.media_type == e1000_media_type_internal_serdes)
hw->mac.serdes_link_state = e1000_serdes_link_down;
return 0;
}
/**
* e1000_init_hw_82571 - Initialize hardware
* @hw: pointer to the HW structure
*
* This inits the hardware readying it for operation.
**/
static s32 e1000_init_hw_82571(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
u32 reg_data;
s32 ret_val;
u16 i, rar_count = mac->rar_entry_count;
e1000_initialize_hw_bits_82571(hw);
/* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw);
if (ret_val)
e_dbg("Error initializing identification LED\n");
/* This is not fatal and we should not stop init due to this */
/* Disabling VLAN filtering */
e_dbg("Initializing the IEEE VLAN\n");
mac->ops.clear_vfta(hw);
/* Setup the receive address. */
/*
* If, however, a locally administered address was assigned to the
* 82571, we must reserve a RAR for it to work around an issue where
* resetting one port will reload the MAC on the other port.
*/
if (e1000e_get_laa_state_82571(hw))
rar_count--;
e1000e_init_rx_addrs(hw, rar_count);
/* Zero out the Multicast HASH table */
e_dbg("Zeroing the MTA\n");
for (i = 0; i < mac->mta_reg_count; i++)
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
/* Setup link and flow control */
ret_val = mac->ops.setup_link(hw);
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB |
E1000_TXDCTL_COUNT_DESC;
ew32(TXDCTL(0), reg_data);
/* ...for both queues. */
switch (mac->type) {
case e1000_82573:
e1000e_enable_tx_pkt_filtering(hw);
/* fall through */
case e1000_82574:
case e1000_82583:
reg_data = er32(GCR);
reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
ew32(GCR, reg_data);
break;
default:
reg_data = er32(TXDCTL(1));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB |
E1000_TXDCTL_COUNT_DESC;
ew32(TXDCTL(1), reg_data);
break;
}
/*
* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
* is no link.
*/
e1000_clear_hw_cntrs_82571(hw);
return ret_val;
}
/**
* e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits
* @hw: pointer to the HW structure
*
* Initializes required hardware-dependent bits needed for normal operation.
**/
static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
{
u32 reg;
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL(0));
reg |= (1 << 22);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL(1));
reg |= (1 << 22);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
reg = er32(TARC(0));
reg &= ~(0xF << 27); /* 30:27 */
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
break;
case e1000_82574:
case e1000_82583:
reg |= (1 << 26);
break;
default:
break;
}
ew32(TARC(0), reg);
/* Transmit Arbitration Control 1 */
reg = er32(TARC(1));
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
reg &= ~((1 << 29) | (1 << 30));
reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
if (er32(TCTL) & E1000_TCTL_MULR)
reg &= ~(1 << 28);
else
reg |= (1 << 28);
ew32(TARC(1), reg);
break;
default:
break;
}
/* Device Control */
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
case e1000_82583:
reg = er32(CTRL);
reg &= ~(1 << 29);
ew32(CTRL, reg);
break;
default:
break;
}
/* Extended Device Control */
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
case e1000_82583:
reg = er32(CTRL_EXT);
reg &= ~(1 << 23);
reg |= (1 << 22);
ew32(CTRL_EXT, reg);
break;
default:
break;
}
if (hw->mac.type == e1000_82571) {
reg = er32(PBA_ECC);
reg |= E1000_PBA_ECC_CORR_EN;
ew32(PBA_ECC, reg);
}
/*
* Workaround for hardware errata.
* Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572
*/
if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) {
reg = er32(CTRL_EXT);
reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN;
ew32(CTRL_EXT, reg);
}
/* PCI-Ex Control Registers */
switch (hw->mac.type) {
case e1000_82574:
case e1000_82583:
reg = er32(GCR);
reg |= (1 << 22);
ew32(GCR, reg);
/*
* Workaround for hardware errata.
* apply workaround for hardware errata documented in errata
* docs Fixes issue where some error prone or unreliable PCIe
* completions are occurring, particularly with ASPM enabled.
* Without fix, issue can cause Tx timeouts.
*/
reg = er32(GCR2);
reg |= 1;
ew32(GCR2, reg);
break;
default:
break;
}
}
/**
* e1000_clear_vfta_82571 - Clear VLAN filter table
* @hw: pointer to the HW structure
*
* Clears the register array which contains the VLAN filter table by
* setting all the values to 0.
**/
static void e1000_clear_vfta_82571(struct e1000_hw *hw)
{
u32 offset;
u32 vfta_value = 0;
u32 vfta_offset = 0;
u32 vfta_bit_in_reg = 0;
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
case e1000_82583:
if (hw->mng_cookie.vlan_id != 0) {
/*
* The VFTA is a 4096b bit-field, each identifying
* a single VLAN ID. The following operations
* determine which 32b entry (i.e. offset) into the
* array we want to set the VLAN ID (i.e. bit) of
* the manageability unit.
*/
vfta_offset = (hw->mng_cookie.vlan_id >>
E1000_VFTA_ENTRY_SHIFT) &
E1000_VFTA_ENTRY_MASK;
vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
}
break;
default:
break;
}
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
/*
* If the offset we want to clear is the same offset of the
* manageability VLAN ID, then clear all bits except that of
* the manageability unit.
*/
vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value);
e1e_flush();
}
}
/**
* e1000_check_mng_mode_82574 - Check manageability is enabled
* @hw: pointer to the HW structure
*
* Reads the NVM Initialization Control Word 2 and returns true
* (>0) if any manageability is enabled, else false (0).
**/
static bool e1000_check_mng_mode_82574(struct e1000_hw *hw)
{
u16 data;
e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0;
}
/**
* e1000_led_on_82574 - Turn LED on
* @hw: pointer to the HW structure
*
* Turn LED on.
**/
static s32 e1000_led_on_82574(struct e1000_hw *hw)
{
u32 ctrl;
u32 i;
ctrl = hw->mac.ledctl_mode2;
if (!(E1000_STATUS_LU & er32(STATUS))) {
/*
* If no link, then turn LED on by setting the invert bit
* for each LED that's "on" (0x0E) in ledctl_mode2.
*/
for (i = 0; i < 4; i++)
if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
E1000_LEDCTL_MODE_LED_ON)
ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8));
}
ew32(LEDCTL, ctrl);
return 0;
}
/**
* e1000_check_phy_82574 - check 82574 phy hung state
* @hw: pointer to the HW structure
*
* Returns whether phy is hung or not
**/
bool e1000_check_phy_82574(struct e1000_hw *hw)
{
u16 status_1kbt = 0;
u16 receive_errors = 0;
s32 ret_val = 0;
/*
* Read PHY Receive Error counter first, if its is max - all F's then
* read the Base1000T status register If both are max then PHY is hung.
*/
ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
if (ret_val)
return false;
if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt);
if (ret_val)
return false;
if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
E1000_IDLE_ERROR_COUNT_MASK)
return true;
}
return false;
}
/**
* e1000_setup_link_82571 - Setup flow control and link settings
* @hw: pointer to the HW structure
*
* Determines which flow control settings to use, then configures flow
* control. Calls the appropriate media-specific link configuration
* function. Assuming the adapter has a valid link partner, a valid link
* should be established. Assumes the hardware has previously been reset
* and the transmitter and receiver are not enabled.
**/
static s32 e1000_setup_link_82571(struct e1000_hw *hw)
{
/*
* 82573 does not have a word in the NVM to determine
* the default flow control setting, so we explicitly
* set it to full.
*/
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
case e1000_82583:
if (hw->fc.requested_mode == e1000_fc_default)
hw->fc.requested_mode = e1000_fc_full;
break;
default:
break;
}
return e1000e_setup_link_generic(hw);
}
/**
* e1000_setup_copper_link_82571 - Configure copper link settings
* @hw: pointer to the HW structure
*
* Configures the link for auto-neg or forced speed and duplex. Then we check
* for link, once link is established calls to configure collision distance
* and flow control are called.
**/
static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
{
u32 ctrl;
s32 ret_val;
ctrl = er32(CTRL);
ctrl |= E1000_CTRL_SLU;
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
ew32(CTRL, ctrl);
switch (hw->phy.type) {
case e1000_phy_m88:
case e1000_phy_bm:
ret_val = e1000e_copper_link_setup_m88(hw);
break;
case e1000_phy_igp_2:
ret_val = e1000e_copper_link_setup_igp(hw);
break;
default:
return -E1000_ERR_PHY;
break;
}
if (ret_val)
return ret_val;
return e1000e_setup_copper_link(hw);
}
/**
* e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes
* @hw: pointer to the HW structure
*
* Configures collision distance and flow control for fiber and serdes links.
* Upon successful setup, poll for link.
**/
static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
{
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
/*
* If SerDes loopback mode is entered, there is no form
* of reset to take the adapter out of that mode. So we
* have to explicitly take the adapter out of loopback
* mode. This prevents drivers from twiddling their thumbs
* if another tool failed to take it out of loopback mode.
*/
ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
break;
default:
break;
}
return e1000e_setup_fiber_serdes_link(hw);
}
/**
* e1000_check_for_serdes_link_82571 - Check for link (Serdes)
* @hw: pointer to the HW structure
*
* Reports the link state as up or down.
*
* If autonegotiation is supported by the link partner, the link state is
* determined by the result of autonegotiation. This is the most likely case.
* If autonegotiation is not supported by the link partner, and the link
* has a valid signal, force the link up.
*
* The link state is represented internally here by 4 states:
*
* 1) down
* 2) autoneg_progress
* 3) autoneg_complete (the link successfully autonegotiated)
* 4) forced_up (the link has been forced up, it did not autonegotiate)
*
**/
static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
u32 rxcw;
u32 ctrl;
u32 status;
u32 txcw;
u32 i;
s32 ret_val = 0;
ctrl = er32(CTRL);
status = er32(STATUS);
rxcw = er32(RXCW);
/* SYNCH bit and IV bit are sticky */
udelay(10);
rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
/* Receiver is synchronized with no invalid bits. */
switch (mac->serdes_link_state) {
case e1000_serdes_link_autoneg_complete:
if (!(status & E1000_STATUS_LU)) {
/*
* We have lost link, retry autoneg before
* reporting link failure
*/
mac->serdes_link_state =
e1000_serdes_link_autoneg_progress;
mac->serdes_has_link = false;
e_dbg("AN_UP -> AN_PROG\n");
} else {
mac->serdes_has_link = true;
}
break;
case e1000_serdes_link_forced_up:
/*
* If we are receiving /C/ ordered sets, re-enable
* auto-negotiation in the TXCW register and disable
* forced link in the Device Control register in an
* attempt to auto-negotiate with our link partner.
*/
if (rxcw & E1000_RXCW_C) {
/* Enable autoneg, and unforce link up */
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
mac->serdes_link_state =
e1000_serdes_link_autoneg_progress;
mac->serdes_has_link = false;
e_dbg("FORCED_UP -> AN_PROG\n");
} else {
mac->serdes_has_link = true;
}
break;
case e1000_serdes_link_autoneg_progress:
if (rxcw & E1000_RXCW_C) {
/*
* We received /C/ ordered sets, meaning the
* link partner has autonegotiated, and we can
* trust the Link Up (LU) status bit.
*/
if (status & E1000_STATUS_LU) {
mac->serdes_link_state =
e1000_serdes_link_autoneg_complete;
e_dbg("AN_PROG -> AN_UP\n");
mac->serdes_has_link = true;
} else {
/* Autoneg completed, but failed. */
mac->serdes_link_state =
e1000_serdes_link_down;
e_dbg("AN_PROG -> DOWN\n");
}
} else {
/*
* The link partner did not autoneg.
* Force link up and full duplex, and change
* state to forced.
*/
ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
ew32(CTRL, ctrl);
/* Configure Flow Control after link up. */
ret_val = e1000e_config_fc_after_link_up(hw);
if (ret_val) {
e_dbg("Error config flow control\n");
break;
}
mac->serdes_link_state =
e1000_serdes_link_forced_up;
mac->serdes_has_link = true;
e_dbg("AN_PROG -> FORCED_UP\n");
}
break;
case e1000_serdes_link_down:
default:
/*
* The link was down but the receiver has now gained
* valid sync, so lets see if we can bring the link
* up.
*/
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
mac->serdes_link_state =
e1000_serdes_link_autoneg_progress;
mac->serdes_has_link = false;
e_dbg("DOWN -> AN_PROG\n");
break;
}
} else {
if (!(rxcw & E1000_RXCW_SYNCH)) {
mac->serdes_has_link = false;
mac->serdes_link_state = e1000_serdes_link_down;
e_dbg("ANYSTATE -> DOWN\n");
} else {
/*
* Check several times, if Sync and Config
* both are consistently 1 then simply ignore
* the Invalid bit and restart Autoneg
*/
for (i = 0; i < AN_RETRY_COUNT; i++) {
udelay(10);
rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_IV) &&
!((rxcw & E1000_RXCW_SYNCH) &&
(rxcw & E1000_RXCW_C))) {
mac->serdes_has_link = false;
mac->serdes_link_state =
e1000_serdes_link_down;
e_dbg("ANYSTATE -> DOWN\n");
break;
}
}
if (i == AN_RETRY_COUNT) {
txcw = er32(TXCW);
txcw |= E1000_TXCW_ANE;
ew32(TXCW, txcw);
mac->serdes_link_state =
e1000_serdes_link_autoneg_progress;
mac->serdes_has_link = false;
e_dbg("ANYSTATE -> AN_PROG\n");
}
}
}
return ret_val;
}
/**
* e1000_valid_led_default_82571 - Verify a valid default LED config
* @hw: pointer to the HW structure
* @data: pointer to the NVM (EEPROM)
*
* Read the EEPROM for the current default LED configuration. If the
* LED configuration is not valid, set to a valid LED configuration.
**/
static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
{
s32 ret_val;
ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
if (ret_val) {
e_dbg("NVM Read Error\n");
return ret_val;
}
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
case e1000_82583:
if (*data == ID_LED_RESERVED_F746)
*data = ID_LED_DEFAULT_82573;
break;
default:
if (*data == ID_LED_RESERVED_0000 ||
*data == ID_LED_RESERVED_FFFF)
*data = ID_LED_DEFAULT;
break;
}
return 0;
}
/**
* e1000e_get_laa_state_82571 - Get locally administered address state
* @hw: pointer to the HW structure
*
* Retrieve and return the current locally administered address state.
**/
bool e1000e_get_laa_state_82571(struct e1000_hw *hw)
{
if (hw->mac.type != e1000_82571)
return false;
return hw->dev_spec.e82571.laa_is_present;
}
/**
* e1000e_set_laa_state_82571 - Set locally administered address state
* @hw: pointer to the HW structure
* @state: enable/disable locally administered address
*
* Enable/Disable the current locally administered address state.
**/
void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
{
if (hw->mac.type != e1000_82571)
return;
hw->dev_spec.e82571.laa_is_present = state;
/* If workaround is activated... */
if (state)
/*
* Hold a copy of the LAA in RAR[14] This is done so that
* between the time RAR[0] gets clobbered and the time it
* gets fixed, the actual LAA is in one of the RARs and no
* incoming packets directed to this port are dropped.
* Eventually the LAA will be in RAR[0] and RAR[14].
*/
e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1);
}
/**
* e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum
* @hw: pointer to the HW structure
*
* Verifies that the EEPROM has completed the update. After updating the
* EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If
* the checksum fix is not implemented, we need to set the bit and update
* the checksum. Otherwise, if bit 15 is set and the checksum is incorrect,
* we need to return bad checksum.
**/
static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
s32 ret_val;
u16 data;
if (nvm->type != e1000_nvm_flash_hw)
return 0;
/*
* Check bit 4 of word 10h. If it is 0, firmware is done updating
* 10h-12h. Checksum may need to be fixed.
*/
ret_val = e1000_read_nvm(hw, 0x10, 1, &data);
if (ret_val)
return ret_val;
if (!(data & 0x10)) {
/*
* Read 0x23 and check bit 15. This bit is a 1
* when the checksum has already been fixed. If
* the checksum is still wrong and this bit is a
* 1, we need to return bad checksum. Otherwise,
* we need to set this bit to a 1 and update the
* checksum.
*/
ret_val = e1000_read_nvm(hw, 0x23, 1, &data);
if (ret_val)
return ret_val;
if (!(data & 0x8000)) {
data |= 0x8000;
ret_val = e1000_write_nvm(hw, 0x23, 1, &data);
if (ret_val)
return ret_val;
ret_val = e1000e_update_nvm_checksum(hw);
}
}
return 0;
}
/**
* e1000_read_mac_addr_82571 - Read device MAC address
* @hw: pointer to the HW structure
**/
static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
{
if (hw->mac.type == e1000_82571) {
s32 ret_val = 0;
/*
* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
* address.
*/
ret_val = e1000_check_alt_mac_addr_generic(hw);
if (ret_val)
return ret_val;
}
return e1000_read_mac_addr_generic(hw);
}
/**
* e1000_power_down_phy_copper_82571 - Remove link during PHY power down
* @hw: pointer to the HW structure
*
* In the case of a PHY power down to save power, or to turn off link during a
* driver unload, or wake on lan is not enabled, remove the link.
**/
static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
struct e1000_mac_info *mac = &hw->mac;
if (!phy->ops.check_reset_block)
return;
/* If the management interface is not enabled, then power down */
if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
e1000_power_down_phy_copper(hw);
}
/**
* e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters
* @hw: pointer to the HW structure
*
* Clears the hardware counters by reading the counter registers.
**/
static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
{
e1000e_clear_hw_cntrs_base(hw);
er32(PRC64);
er32(PRC127);
er32(PRC255);
er32(PRC511);
er32(PRC1023);
er32(PRC1522);
er32(PTC64);
er32(PTC127);
er32(PTC255);
er32(PTC511);
er32(PTC1023);
er32(PTC1522);
er32(ALGNERRC);
er32(RXERRC);
er32(TNCRS);
er32(CEXTERR);
er32(TSCTC);
er32(TSCTFC);
er32(MGTPRC);
er32(MGTPDC);
er32(MGTPTC);
er32(IAC);
er32(ICRXOC);
er32(ICRXPTC);
er32(ICRXATC);
er32(ICTXPTC);
er32(ICTXATC);
er32(ICTXQEC);
er32(ICTXQMTC);
er32(ICRXDMTC);
}
static const struct e1000_mac_operations e82571_mac_ops = {
/* .check_mng_mode: mac type dependent */
/* .check_for_link: media type dependent */
.id_led_init = e1000e_id_led_init_generic,
.cleanup_led = e1000e_cleanup_led_generic,
.clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
.get_bus_info = e1000e_get_bus_info_pcie,
.set_lan_id = e1000_set_lan_id_multi_port_pcie,
/* .get_link_up_info: media type dependent */
/* .led_on: mac type dependent */
.led_off = e1000e_led_off_generic,
.update_mc_addr_list = e1000e_update_mc_addr_list_generic,
.write_vfta = e1000_write_vfta_generic,
.clear_vfta = e1000_clear_vfta_82571,
.reset_hw = e1000_reset_hw_82571,
.init_hw = e1000_init_hw_82571,
.setup_link = e1000_setup_link_82571,
/* .setup_physical_interface: media type dependent */
.setup_led = e1000e_setup_led_generic,
.config_collision_dist = e1000e_config_collision_dist_generic,
.read_mac_addr = e1000_read_mac_addr_82571,
};
static const struct e1000_phy_operations e82_phy_ops_igp = {
.acquire = e1000_get_hw_semaphore_82571,
.check_polarity = e1000_check_polarity_igp,
.check_reset_block = e1000e_check_reset_block_generic,
.commit = NULL,
.force_speed_duplex = e1000e_phy_force_speed_duplex_igp,
.get_cfg_done = e1000_get_cfg_done_82571,
.get_cable_length = e1000e_get_cable_length_igp_2,
.get_info = e1000e_get_phy_info_igp,
.read_reg = e1000e_read_phy_reg_igp,
.release = e1000_put_hw_semaphore_82571,
.reset = e1000e_phy_hw_reset_generic,
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
.set_d3_lplu_state = e1000e_set_d3_lplu_state,
.write_reg = e1000e_write_phy_reg_igp,
.cfg_on_link_up = NULL,
};
static const struct e1000_phy_operations e82_phy_ops_m88 = {
.acquire = e1000_get_hw_semaphore_82571,
.check_polarity = e1000_check_polarity_m88,
.check_reset_block = e1000e_check_reset_block_generic,
.commit = e1000e_phy_sw_reset,
.force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
.get_cfg_done = e1000e_get_cfg_done,
.get_cable_length = e1000e_get_cable_length_m88,
.get_info = e1000e_get_phy_info_m88,
.read_reg = e1000e_read_phy_reg_m88,
.release = e1000_put_hw_semaphore_82571,
.reset = e1000e_phy_hw_reset_generic,
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
.set_d3_lplu_state = e1000e_set_d3_lplu_state,
.write_reg = e1000e_write_phy_reg_m88,
.cfg_on_link_up = NULL,
};
static const struct e1000_phy_operations e82_phy_ops_bm = {
.acquire = e1000_get_hw_semaphore_82571,
.check_polarity = e1000_check_polarity_m88,
.check_reset_block = e1000e_check_reset_block_generic,
.commit = e1000e_phy_sw_reset,
.force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
.get_cfg_done = e1000e_get_cfg_done,
.get_cable_length = e1000e_get_cable_length_m88,
.get_info = e1000e_get_phy_info_m88,
.read_reg = e1000e_read_phy_reg_bm2,
.release = e1000_put_hw_semaphore_82571,
.reset = e1000e_phy_hw_reset_generic,
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
.set_d3_lplu_state = e1000e_set_d3_lplu_state,
.write_reg = e1000e_write_phy_reg_bm2,
.cfg_on_link_up = NULL,
};
static const struct e1000_nvm_operations e82571_nvm_ops = {
.acquire = e1000_acquire_nvm_82571,
.read = e1000e_read_nvm_eerd,
.release = e1000_release_nvm_82571,
.reload = e1000e_reload_nvm_generic,
.update = e1000_update_nvm_checksum_82571,
.valid_led_default = e1000_valid_led_default_82571,
.validate = e1000_validate_nvm_checksum_82571,
.write = e1000_write_nvm_82571,
};
const struct e1000_info e1000_82571_info = {
.mac = e1000_82571,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_RESET_OVERWRITES_LAA /* errata */
| FLAG_TARC_SPEED_MODE_BIT /* errata */
| FLAG_APME_CHECK_PORT_B,
.flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */
| FLAG2_DMA_BURST,
.pba = 38,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_igp,
.nvm_ops = &e82571_nvm_ops,
};
const struct e1000_info e1000_82572_info = {
.mac = e1000_82572,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_TARC_SPEED_MODE_BIT, /* errata */
.flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */
| FLAG2_DMA_BURST,
.pba = 38,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_igp,
.nvm_ops = &e82571_nvm_ops,
};
const struct e1000_info e1000_82573_info = {
.mac = e1000_82573,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_SWSM_ON_LOAD,
.flags2 = FLAG2_DISABLE_ASPM_L1
| FLAG2_DISABLE_ASPM_L0S,
.pba = 20,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_m88,
.nvm_ops = &e82571_nvm_ops,
};
const struct e1000_info e1000_82574_info = {
.mac = e1000_82574,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_MSIX
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_CHECK_PHY_HANG
| FLAG2_DISABLE_ASPM_L0S
| FLAG2_DISABLE_ASPM_L1
| FLAG2_NO_DISABLE_RX,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_bm,
.nvm_ops = &e82571_nvm_ops,
};
const struct e1000_info e1000_82583_info = {
.mac = e1000_82583,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_DISABLE_ASPM_L0S
| FLAG2_NO_DISABLE_RX,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_bm,
.nvm_ops = &e82571_nvm_ops,
};
| gpl-2.0 |
percy-g2/android_kernel_sony_u8500_OLD | drivers/char/generic_nvram.c | 2699 | 3460 | /*
* Generic /dev/nvram driver for architectures providing some
* "generic" hooks, that is :
*
* nvram_read_byte, nvram_write_byte, nvram_sync, nvram_get_size
*
* Note that an additional hook is supported for PowerMac only
* for getting the nvram "partition" informations
*
*/
#define NVRAM_VERSION "1.1"
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <asm/nvram.h>
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
#endif
#define NVRAM_SIZE 8192
static DEFINE_MUTEX(nvram_mutex);
static ssize_t nvram_len;
static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
{
switch (origin) {
case 1:
offset += file->f_pos;
break;
case 2:
offset += nvram_len;
break;
}
if (offset < 0)
return -EINVAL;
file->f_pos = offset;
return file->f_pos;
}
static ssize_t read_nvram(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
unsigned int i;
char __user *p = buf;
if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
if (*ppos >= nvram_len)
return 0;
for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count)
if (__put_user(nvram_read_byte(i), p))
return -EFAULT;
*ppos = i;
return p - buf;
}
static ssize_t write_nvram(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned int i;
const char __user *p = buf;
char c;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
if (*ppos >= nvram_len)
return 0;
for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count) {
if (__get_user(c, p))
return -EFAULT;
nvram_write_byte(c, i);
}
*ppos = i;
return p - buf;
}
static int nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch(cmd) {
#ifdef CONFIG_PPC_PMAC
case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
printk(KERN_WARNING "nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
case IOC_NVRAM_GET_OFFSET: {
int part, offset;
if (!machine_is(powermac))
return -EINVAL;
if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0)
return -EFAULT;
if (part < pmac_nvram_OF || part > pmac_nvram_NR)
return -EINVAL;
offset = pmac_get_partition(part);
if (copy_to_user((void __user*)arg, &offset, sizeof(offset)) != 0)
return -EFAULT;
break;
}
#endif /* CONFIG_PPC_PMAC */
case IOC_NVRAM_SYNC:
nvram_sync();
break;
default:
return -EINVAL;
}
return 0;
}
static long nvram_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int ret;
mutex_lock(&nvram_mutex);
ret = nvram_ioctl(file, cmd, arg);
mutex_unlock(&nvram_mutex);
return ret;
}
const struct file_operations nvram_fops = {
.owner = THIS_MODULE,
.llseek = nvram_llseek,
.read = read_nvram,
.write = write_nvram,
.unlocked_ioctl = nvram_unlocked_ioctl,
};
static struct miscdevice nvram_dev = {
NVRAM_MINOR,
"nvram",
&nvram_fops
};
int __init nvram_init(void)
{
int ret = 0;
printk(KERN_INFO "Generic non-volatile memory driver v%s\n",
NVRAM_VERSION);
ret = misc_register(&nvram_dev);
if (ret != 0)
goto out;
nvram_len = nvram_get_size();
if (nvram_len < 0)
nvram_len = NVRAM_SIZE;
out:
return ret;
}
void __exit nvram_cleanup(void)
{
misc_deregister( &nvram_dev );
}
module_init(nvram_init);
module_exit(nvram_cleanup);
MODULE_LICENSE("GPL");
| gpl-2.0 |
jackyh/qt210_kernel | drivers/staging/westbridge/astoria/api/src/cyasintr.c | 2699 | 4083 | /* Cypress West Bridge API source file (cyasintr.c)
## ===========================
## Copyright (C) 2010 Cypress Semiconductor
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor
## Boston, MA 02110-1301, USA.
## ===========================
*/
#include "../../include/linux/westbridge/cyashal.h"
#include "../../include/linux/westbridge/cyasdevice.h"
#include "../../include/linux/westbridge/cyasregs.h"
#include "../../include/linux/westbridge/cyaserr.h"
extern void cy_as_mail_box_interrupt_handler(cy_as_device *);
void
cy_as_mcu_interrupt_handler(cy_as_device *dev_p)
{
/* Read and clear the interrupt. */
uint16_t v;
v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_MCU_STAT);
v = v;
}
void
cy_as_power_management_interrupt_handler(cy_as_device *dev_p)
{
uint16_t v;
v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_PWR_MAGT_STAT);
v = v;
}
void
cy_as_pll_lock_loss_interrupt_handler(cy_as_device *dev_p)
{
uint16_t v;
v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_PLL_LOCK_LOSS_STAT);
v = v;
}
uint32_t cy_as_intr_start(cy_as_device *dev_p, cy_bool dmaintr)
{
uint16_t v;
cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
if (cy_as_device_is_intr_running(dev_p) != 0)
return CY_AS_ERROR_ALREADY_RUNNING;
v = CY_AS_MEM_P0_INT_MASK_REG_MMCUINT |
CY_AS_MEM_P0_INT_MASK_REG_MMBINT |
CY_AS_MEM_P0_INT_MASK_REG_MPMINT;
if (dmaintr)
v |= CY_AS_MEM_P0_INT_MASK_REG_MDRQINT;
/* Enable the interrupts of interest */
cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_P0_INT_MASK_REG, v);
/* Mark the interrupt module as initialized */
cy_as_device_set_intr_running(dev_p);
return CY_AS_ERROR_SUCCESS;
}
uint32_t cy_as_intr_stop(cy_as_device *dev_p)
{
cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
if (cy_as_device_is_intr_running(dev_p) == 0)
return CY_AS_ERROR_NOT_RUNNING;
cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_P0_INT_MASK_REG, 0);
cy_as_device_set_intr_stopped(dev_p);
return CY_AS_ERROR_SUCCESS;
}
void cy_as_intr_service_interrupt(cy_as_hal_device_tag tag)
{
uint16_t v;
cy_as_device *dev_p;
dev_p = cy_as_device_find_from_tag(tag);
/*
* only power management interrupts can occur before the
* antioch API setup is complete. if this is a PM interrupt
* handle it here; otherwise output a warning message.
*/
if (dev_p == 0) {
v = cy_as_hal_read_register(tag, CY_AS_MEM_P0_INTR_REG);
if (v == CY_AS_MEM_P0_INTR_REG_PMINT) {
/* Read the PWR_MAGT_STAT register
* to clear this interrupt. */
v = cy_as_hal_read_register(tag,
CY_AS_MEM_PWR_MAGT_STAT);
} else
cy_as_hal_print_message("stray antioch "
"interrupt detected"
", tag not associated "
"with any created device.");
return;
}
/* Make sure we got a valid object from CyAsDeviceFindFromTag */
cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_INTR_REG);
if (v & CY_AS_MEM_P0_INTR_REG_MCUINT)
cy_as_mcu_interrupt_handler(dev_p);
if (v & CY_AS_MEM_P0_INTR_REG_PMINT)
cy_as_power_management_interrupt_handler(dev_p);
if (v & CY_AS_MEM_P0_INTR_REG_PLLLOCKINT)
cy_as_pll_lock_loss_interrupt_handler(dev_p);
/* If the interrupt module is not running, no mailbox
* interrupts are expected from the west bridge. */
if (cy_as_device_is_intr_running(dev_p) == 0)
return;
if (v & CY_AS_MEM_P0_INTR_REG_MBINT)
cy_as_mail_box_interrupt_handler(dev_p);
}
| gpl-2.0 |
gimmeitorilltell/slim_kernel_samsung_msm8660 | drivers/staging/iio/adc/ad7298_core.c | 2699 | 7101 | /*
* AD7298 SPI ADC driver
*
* Copyright 2011 Analog Devices Inc.
*
* Licensed under the GPL-2.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
#include <linux/delay.h>
#include "../iio.h"
#include "../sysfs.h"
#include "../ring_generic.h"
#include "adc.h"
#include "ad7298.h"
static struct iio_chan_spec ad7298_channels[] = {
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
9, AD7298_CH_TEMP, IIO_ST('s', 32, 32, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
0, 0, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
1, 1, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
2, 2, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
3, 3, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 4, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
4, 4, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 5, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
5, 5, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 6, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
6, 6, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 7, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
7, 7, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(8),
};
static int ad7298_scan_direct(struct ad7298_state *st, unsigned ch)
{
int ret;
st->tx_buf[0] = cpu_to_be16(AD7298_WRITE | st->ext_ref |
(AD7298_CH(0) >> ch));
ret = spi_sync(st->spi, &st->scan_single_msg);
if (ret)
return ret;
return be16_to_cpu(st->rx_buf[0]);
}
static int ad7298_scan_temp(struct ad7298_state *st, int *val)
{
int tmp, ret;
tmp = cpu_to_be16(AD7298_WRITE | AD7298_TSENSE |
AD7298_TAVG | st->ext_ref);
ret = spi_write(st->spi, (u8 *)&tmp, 2);
if (ret)
return ret;
tmp = 0;
ret = spi_write(st->spi, (u8 *)&tmp, 2);
if (ret)
return ret;
usleep_range(101, 1000); /* sleep > 100us */
ret = spi_read(st->spi, (u8 *)&tmp, 2);
if (ret)
return ret;
tmp = be16_to_cpu(tmp) & RES_MASK(AD7298_BITS);
/*
* One LSB of the ADC corresponds to 0.25 deg C.
* The temperature reading is in 12-bit twos complement format
*/
if (tmp & (1 << (AD7298_BITS - 1))) {
tmp = (4096 - tmp) * 250;
tmp -= (2 * tmp);
} else {
tmp *= 250; /* temperature in milli degrees Celsius */
}
*val = tmp;
return 0;
}
static int ad7298_read_raw(struct iio_dev *dev_info,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long m)
{
int ret;
struct ad7298_state *st = iio_priv(dev_info);
unsigned int scale_uv;
switch (m) {
case 0:
mutex_lock(&dev_info->mlock);
if (iio_ring_enabled(dev_info)) {
if (chan->address == AD7298_CH_TEMP)
ret = -ENODEV;
else
ret = ad7298_scan_from_ring(dev_info,
chan->address);
} else {
if (chan->address == AD7298_CH_TEMP)
ret = ad7298_scan_temp(st, val);
else
ret = ad7298_scan_direct(st, chan->address);
}
mutex_unlock(&dev_info->mlock);
if (ret < 0)
return ret;
if (chan->address != AD7298_CH_TEMP)
*val = ret & RES_MASK(AD7298_BITS);
return IIO_VAL_INT;
case (1 << IIO_CHAN_INFO_SCALE_SHARED):
scale_uv = (st->int_vref_mv * 1000) >> AD7298_BITS;
*val = scale_uv / 1000;
*val2 = (scale_uv % 1000) * 1000;
return IIO_VAL_INT_PLUS_MICRO;
case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
*val = 1;
*val2 = 0;
return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
}
static const struct iio_info ad7298_info = {
.read_raw = &ad7298_read_raw,
.driver_module = THIS_MODULE,
};
static int __devinit ad7298_probe(struct spi_device *spi)
{
struct ad7298_platform_data *pdata = spi->dev.platform_data;
struct ad7298_state *st;
int ret, regdone = 0;
struct iio_dev *indio_dev = iio_allocate_device(sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
st->reg = regulator_get(&spi->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
goto error_put_reg;
}
spi_set_drvdata(spi, indio_dev);
st->spi = spi;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->dev.parent = &spi->dev;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = ad7298_channels;
indio_dev->num_channels = ARRAY_SIZE(ad7298_channels);
indio_dev->info = &ad7298_info;
/* Setup default message */
st->scan_single_xfer[0].tx_buf = &st->tx_buf[0];
st->scan_single_xfer[0].len = 2;
st->scan_single_xfer[0].cs_change = 1;
st->scan_single_xfer[1].tx_buf = &st->tx_buf[1];
st->scan_single_xfer[1].len = 2;
st->scan_single_xfer[1].cs_change = 1;
st->scan_single_xfer[2].rx_buf = &st->rx_buf[0];
st->scan_single_xfer[2].len = 2;
spi_message_init(&st->scan_single_msg);
spi_message_add_tail(&st->scan_single_xfer[0], &st->scan_single_msg);
spi_message_add_tail(&st->scan_single_xfer[1], &st->scan_single_msg);
spi_message_add_tail(&st->scan_single_xfer[2], &st->scan_single_msg);
if (pdata && pdata->vref_mv) {
st->int_vref_mv = pdata->vref_mv;
st->ext_ref = AD7298_EXTREF;
} else {
st->int_vref_mv = AD7298_INTREF_mV;
}
ret = ad7298_register_ring_funcs_and_init(indio_dev);
if (ret)
goto error_disable_reg;
ret = iio_device_register(indio_dev);
if (ret)
goto error_disable_reg;
regdone = 1;
ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
&ad7298_channels[1], /* skip temp0 */
ARRAY_SIZE(ad7298_channels) - 1);
if (ret)
goto error_cleanup_ring;
return 0;
error_cleanup_ring:
ad7298_ring_cleanup(indio_dev);
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
error_put_reg:
if (!IS_ERR(st->reg))
regulator_put(st->reg);
if (regdone)
iio_device_unregister(indio_dev);
else
iio_free_device(indio_dev);
return ret;
}
static int __devexit ad7298_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7298_state *st = iio_priv(indio_dev);
iio_ring_buffer_unregister(indio_dev->ring);
ad7298_ring_cleanup(indio_dev);
iio_device_unregister(indio_dev);
if (!IS_ERR(st->reg)) {
regulator_disable(st->reg);
regulator_put(st->reg);
}
iio_device_unregister(indio_dev);
return 0;
}
static const struct spi_device_id ad7298_id[] = {
{"ad7298", 0},
{}
};
static struct spi_driver ad7298_driver = {
.driver = {
.name = "ad7298",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7298_probe,
.remove = __devexit_p(ad7298_remove),
.id_table = ad7298_id,
};
static int __init ad7298_init(void)
{
return spi_register_driver(&ad7298_driver);
}
module_init(ad7298_init);
static void __exit ad7298_exit(void)
{
spi_unregister_driver(&ad7298_driver);
}
module_exit(ad7298_exit);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7298 ADC");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("spi:ad7298");
| gpl-2.0 |
mzueger/linux-phycore-mpc5200b | drivers/gpu/drm/ttm/ttm_memory.c | 3723 | 14204 | /**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "ttm/ttm_memory.h"
#include "ttm/ttm_module.h"
#include "ttm/ttm_page_alloc.h"
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#define TTM_MEMORY_ALLOC_RETRIES 4
struct ttm_mem_zone {
struct kobject kobj;
struct ttm_mem_global *glob;
const char *name;
uint64_t zone_mem;
uint64_t emer_mem;
uint64_t max_mem;
uint64_t swap_limit;
uint64_t used_mem;
};
static struct attribute ttm_mem_sys = {
.name = "zone_memory",
.mode = S_IRUGO
};
static struct attribute ttm_mem_emer = {
.name = "emergency_memory",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_mem_max = {
.name = "available_memory",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_mem_swap = {
.name = "swap_limit",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_mem_used = {
.name = "used_memory",
.mode = S_IRUGO
};
static void ttm_mem_zone_kobj_release(struct kobject *kobj)
{
struct ttm_mem_zone *zone =
container_of(kobj, struct ttm_mem_zone, kobj);
printk(KERN_INFO TTM_PFX
"Zone %7s: Used memory at exit: %llu kiB.\n",
zone->name, (unsigned long long) zone->used_mem >> 10);
kfree(zone);
}
static ssize_t ttm_mem_zone_show(struct kobject *kobj,
struct attribute *attr,
char *buffer)
{
struct ttm_mem_zone *zone =
container_of(kobj, struct ttm_mem_zone, kobj);
uint64_t val = 0;
spin_lock(&zone->glob->lock);
if (attr == &ttm_mem_sys)
val = zone->zone_mem;
else if (attr == &ttm_mem_emer)
val = zone->emer_mem;
else if (attr == &ttm_mem_max)
val = zone->max_mem;
else if (attr == &ttm_mem_swap)
val = zone->swap_limit;
else if (attr == &ttm_mem_used)
val = zone->used_mem;
spin_unlock(&zone->glob->lock);
return snprintf(buffer, PAGE_SIZE, "%llu\n",
(unsigned long long) val >> 10);
}
static void ttm_check_swapping(struct ttm_mem_global *glob);
static ssize_t ttm_mem_zone_store(struct kobject *kobj,
struct attribute *attr,
const char *buffer,
size_t size)
{
struct ttm_mem_zone *zone =
container_of(kobj, struct ttm_mem_zone, kobj);
int chars;
unsigned long val;
uint64_t val64;
chars = sscanf(buffer, "%lu", &val);
if (chars == 0)
return size;
val64 = val;
val64 <<= 10;
spin_lock(&zone->glob->lock);
if (val64 > zone->zone_mem)
val64 = zone->zone_mem;
if (attr == &ttm_mem_emer) {
zone->emer_mem = val64;
if (zone->max_mem > val64)
zone->max_mem = val64;
} else if (attr == &ttm_mem_max) {
zone->max_mem = val64;
if (zone->emer_mem < val64)
zone->emer_mem = val64;
} else if (attr == &ttm_mem_swap)
zone->swap_limit = val64;
spin_unlock(&zone->glob->lock);
ttm_check_swapping(zone->glob);
return size;
}
static struct attribute *ttm_mem_zone_attrs[] = {
&ttm_mem_sys,
&ttm_mem_emer,
&ttm_mem_max,
&ttm_mem_swap,
&ttm_mem_used,
NULL
};
static const struct sysfs_ops ttm_mem_zone_ops = {
.show = &ttm_mem_zone_show,
.store = &ttm_mem_zone_store
};
static struct kobj_type ttm_mem_zone_kobj_type = {
.release = &ttm_mem_zone_kobj_release,
.sysfs_ops = &ttm_mem_zone_ops,
.default_attrs = ttm_mem_zone_attrs,
};
static void ttm_mem_global_kobj_release(struct kobject *kobj)
{
struct ttm_mem_global *glob =
container_of(kobj, struct ttm_mem_global, kobj);
kfree(glob);
}
static struct kobj_type ttm_mem_glob_kobj_type = {
.release = &ttm_mem_global_kobj_release,
};
static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
bool from_wq, uint64_t extra)
{
unsigned int i;
struct ttm_mem_zone *zone;
uint64_t target;
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (from_wq)
target = zone->swap_limit;
else if (capable(CAP_SYS_ADMIN))
target = zone->emer_mem;
else
target = zone->max_mem;
target = (extra > target) ? 0ULL : target;
if (zone->used_mem > target)
return true;
}
return false;
}
/**
* At this point we only support a single shrink callback.
* Extend this if needed, perhaps using a linked list of callbacks.
* Note that this function is reentrant:
* many threads may try to swap out at any given time.
*/
static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
uint64_t extra)
{
int ret;
struct ttm_mem_shrink *shrink;
spin_lock(&glob->lock);
if (glob->shrink == NULL)
goto out;
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
shrink = glob->shrink;
spin_unlock(&glob->lock);
ret = shrink->do_shrink(shrink);
spin_lock(&glob->lock);
if (unlikely(ret != 0))
goto out;
}
out:
spin_unlock(&glob->lock);
}
static void ttm_shrink_work(struct work_struct *work)
{
struct ttm_mem_global *glob =
container_of(work, struct ttm_mem_global, work);
ttm_shrink(glob, true, 0ULL);
}
static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
const struct sysinfo *si)
{
struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
uint64_t mem;
int ret;
if (unlikely(!zone))
return -ENOMEM;
mem = si->totalram - si->totalhigh;
mem *= si->mem_unit;
zone->name = "kernel";
zone->zone_mem = mem;
zone->max_mem = mem >> 1;
zone->emer_mem = (mem >> 1) + (mem >> 2);
zone->swap_limit = zone->max_mem - (mem >> 3);
zone->used_mem = 0;
zone->glob = glob;
glob->zone_kernel = zone;
ret = kobject_init_and_add(
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
}
glob->zones[glob->num_zones++] = zone;
return 0;
}
#ifdef CONFIG_HIGHMEM
static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
const struct sysinfo *si)
{
struct ttm_mem_zone *zone;
uint64_t mem;
int ret;
if (si->totalhigh == 0)
return 0;
zone = kzalloc(sizeof(*zone), GFP_KERNEL);
if (unlikely(!zone))
return -ENOMEM;
mem = si->totalram;
mem *= si->mem_unit;
zone->name = "highmem";
zone->zone_mem = mem;
zone->max_mem = mem >> 1;
zone->emer_mem = (mem >> 1) + (mem >> 2);
zone->swap_limit = zone->max_mem - (mem >> 3);
zone->used_mem = 0;
zone->glob = glob;
glob->zone_highmem = zone;
ret = kobject_init_and_add(
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
}
glob->zones[glob->num_zones++] = zone;
return 0;
}
#else
static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
const struct sysinfo *si)
{
struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
uint64_t mem;
int ret;
if (unlikely(!zone))
return -ENOMEM;
mem = si->totalram;
mem *= si->mem_unit;
/**
* No special dma32 zone needed.
*/
if (mem <= ((uint64_t) 1ULL << 32)) {
kfree(zone);
return 0;
}
/*
* Limit max dma32 memory to 4GB for now
* until we can figure out how big this
* zone really is.
*/
mem = ((uint64_t) 1ULL << 32);
zone->name = "dma32";
zone->zone_mem = mem;
zone->max_mem = mem >> 1;
zone->emer_mem = (mem >> 1) + (mem >> 2);
zone->swap_limit = zone->max_mem - (mem >> 3);
zone->used_mem = 0;
zone->glob = glob;
glob->zone_dma32 = zone;
ret = kobject_init_and_add(
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
}
glob->zones[glob->num_zones++] = zone;
return 0;
}
#endif
int ttm_mem_global_init(struct ttm_mem_global *glob)
{
struct sysinfo si;
int ret;
int i;
struct ttm_mem_zone *zone;
spin_lock_init(&glob->lock);
glob->swap_queue = create_singlethread_workqueue("ttm_swap");
INIT_WORK(&glob->work, ttm_shrink_work);
init_waitqueue_head(&glob->queue);
ret = kobject_init_and_add(
&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
if (unlikely(ret != 0)) {
kobject_put(&glob->kobj);
return ret;
}
si_meminfo(&si);
ret = ttm_mem_init_kernel_zone(glob, &si);
if (unlikely(ret != 0))
goto out_no_zone;
#ifdef CONFIG_HIGHMEM
ret = ttm_mem_init_highmem_zone(glob, &si);
if (unlikely(ret != 0))
goto out_no_zone;
#else
ret = ttm_mem_init_dma32_zone(glob, &si);
if (unlikely(ret != 0))
goto out_no_zone;
#endif
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
printk(KERN_INFO TTM_PFX
"Zone %7s: Available graphics memory: %llu kiB.\n",
zone->name, (unsigned long long) zone->max_mem >> 10);
}
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0;
out_no_zone:
ttm_mem_global_release(glob);
return ret;
}
EXPORT_SYMBOL(ttm_mem_global_init);
void ttm_mem_global_release(struct ttm_mem_global *glob)
{
unsigned int i;
struct ttm_mem_zone *zone;
/* let the page allocator first stop the shrink work. */
ttm_page_alloc_fini();
flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
glob->swap_queue = NULL;
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
kobject_del(&zone->kobj);
kobject_put(&zone->kobj);
}
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
}
EXPORT_SYMBOL(ttm_mem_global_release);
static void ttm_check_swapping(struct ttm_mem_global *glob)
{
bool needs_swapping = false;
unsigned int i;
struct ttm_mem_zone *zone;
spin_lock(&glob->lock);
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (zone->used_mem > zone->swap_limit) {
needs_swapping = true;
break;
}
}
spin_unlock(&glob->lock);
if (unlikely(needs_swapping))
(void)queue_work(glob->swap_queue, &glob->work);
}
static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
uint64_t amount)
{
unsigned int i;
struct ttm_mem_zone *zone;
spin_lock(&glob->lock);
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (single_zone && zone != single_zone)
continue;
zone->used_mem -= amount;
}
spin_unlock(&glob->lock);
}
void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount)
{
return ttm_mem_global_free_zone(glob, NULL, amount);
}
EXPORT_SYMBOL(ttm_mem_global_free);
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
uint64_t amount, bool reserve)
{
uint64_t limit;
int ret = -ENOMEM;
unsigned int i;
struct ttm_mem_zone *zone;
spin_lock(&glob->lock);
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (single_zone && zone != single_zone)
continue;
limit = (capable(CAP_SYS_ADMIN)) ?
zone->emer_mem : zone->max_mem;
if (zone->used_mem > limit)
goto out_unlock;
}
if (reserve) {
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (single_zone && zone != single_zone)
continue;
zone->used_mem += amount;
}
}
ret = 0;
out_unlock:
spin_unlock(&glob->lock);
ttm_check_swapping(glob);
return ret;
}
static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
uint64_t memory,
bool no_wait, bool interruptible)
{
int count = TTM_MEMORY_ALLOC_RETRIES;
while (unlikely(ttm_mem_global_reserve(glob,
single_zone,
memory, true)
!= 0)) {
if (no_wait)
return -ENOMEM;
if (unlikely(count-- == 0))
return -ENOMEM;
ttm_shrink(glob, false, memory + (memory >> 2) + 16);
}
return 0;
}
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
bool no_wait, bool interruptible)
{
/**
* Normal allocations of kernel memory are registered in
* all zones.
*/
return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
interruptible);
}
EXPORT_SYMBOL(ttm_mem_global_alloc);
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page,
bool no_wait, bool interruptible)
{
struct ttm_mem_zone *zone = NULL;
/**
* Page allocations may be registed in a single zone
* only if highmem or !dma32.
*/
#ifdef CONFIG_HIGHMEM
if (PageHighMem(page) && glob->zone_highmem != NULL)
zone = glob->zone_highmem;
#else
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel;
#endif
return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
interruptible);
}
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
{
struct ttm_mem_zone *zone = NULL;
#ifdef CONFIG_HIGHMEM
if (PageHighMem(page) && glob->zone_highmem != NULL)
zone = glob->zone_highmem;
#else
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel;
#endif
ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
}
size_t ttm_round_pot(size_t size)
{
if ((size & (size - 1)) == 0)
return size;
else if (size > PAGE_SIZE)
return PAGE_ALIGN(size);
else {
size_t tmp_size = 4;
while (tmp_size < size)
tmp_size <<= 1;
return tmp_size;
}
return 0;
}
EXPORT_SYMBOL(ttm_round_pot);
| gpl-2.0 |
Mr-AW/Kernel_TeLo_LP_LenovoA6000 | net/netfilter/xt_conntrack.c | 4235 | 9883 | /*
* xt_conntrack - Netfilter module to match connection tracking
* information. (Superset of Rusty's minimalistic state match.)
*
* (C) 2001 Marc Boucher (marc@mbsi.ca).
* (C) 2006-2012 Patrick McHardy <kaber@trash.net>
* Copyright © CC Computer Consultants GmbH, 2007 - 2008
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_conntrack.h>
#include <net/netfilter/nf_conntrack.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
MODULE_DESCRIPTION("Xtables: connection tracking state match");
MODULE_ALIAS("ipt_conntrack");
MODULE_ALIAS("ip6t_conntrack");
static bool
conntrack_addrcmp(const union nf_inet_addr *kaddr,
const union nf_inet_addr *uaddr,
const union nf_inet_addr *umask, unsigned int l3proto)
{
if (l3proto == NFPROTO_IPV4)
return ((kaddr->ip ^ uaddr->ip) & umask->ip) == 0;
else if (l3proto == NFPROTO_IPV6)
return ipv6_masked_addr_cmp(&kaddr->in6, &umask->in6,
&uaddr->in6) == 0;
else
return false;
}
static inline bool
conntrack_mt_origsrc(const struct nf_conn *ct,
const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3,
&info->origsrc_addr, &info->origsrc_mask, family);
}
static inline bool
conntrack_mt_origdst(const struct nf_conn *ct,
const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3,
&info->origdst_addr, &info->origdst_mask, family);
}
static inline bool
conntrack_mt_replsrc(const struct nf_conn *ct,
const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3,
&info->replsrc_addr, &info->replsrc_mask, family);
}
static inline bool
conntrack_mt_repldst(const struct nf_conn *ct,
const struct xt_conntrack_mtinfo2 *info,
u_int8_t family)
{
return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3,
&info->repldst_addr, &info->repldst_mask, family);
}
static inline bool
ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info,
const struct nf_conn *ct)
{
const struct nf_conntrack_tuple *tuple;
tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
if ((info->match_flags & XT_CONNTRACK_PROTO) &&
(nf_ct_protonum(ct) == info->l4proto) ^
!(info->invert_flags & XT_CONNTRACK_PROTO))
return false;
/* Shortcut to match all recognized protocols by using ->src.all. */
if ((info->match_flags & XT_CONNTRACK_ORIGSRC_PORT) &&
(tuple->src.u.all == info->origsrc_port) ^
!(info->invert_flags & XT_CONNTRACK_ORIGSRC_PORT))
return false;
if ((info->match_flags & XT_CONNTRACK_ORIGDST_PORT) &&
(tuple->dst.u.all == info->origdst_port) ^
!(info->invert_flags & XT_CONNTRACK_ORIGDST_PORT))
return false;
tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
if ((info->match_flags & XT_CONNTRACK_REPLSRC_PORT) &&
(tuple->src.u.all == info->replsrc_port) ^
!(info->invert_flags & XT_CONNTRACK_REPLSRC_PORT))
return false;
if ((info->match_flags & XT_CONNTRACK_REPLDST_PORT) &&
(tuple->dst.u.all == info->repldst_port) ^
!(info->invert_flags & XT_CONNTRACK_REPLDST_PORT))
return false;
return true;
}
static inline bool
port_match(u16 min, u16 max, u16 port, bool invert)
{
return (port >= min && port <= max) ^ invert;
}
static inline bool
ct_proto_port_check_v3(const struct xt_conntrack_mtinfo3 *info,
const struct nf_conn *ct)
{
const struct nf_conntrack_tuple *tuple;
tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
if ((info->match_flags & XT_CONNTRACK_PROTO) &&
(nf_ct_protonum(ct) == info->l4proto) ^
!(info->invert_flags & XT_CONNTRACK_PROTO))
return false;
/* Shortcut to match all recognized protocols by using ->src.all. */
if ((info->match_flags & XT_CONNTRACK_ORIGSRC_PORT) &&
!port_match(info->origsrc_port, info->origsrc_port_high,
ntohs(tuple->src.u.all),
info->invert_flags & XT_CONNTRACK_ORIGSRC_PORT))
return false;
if ((info->match_flags & XT_CONNTRACK_ORIGDST_PORT) &&
!port_match(info->origdst_port, info->origdst_port_high,
ntohs(tuple->dst.u.all),
info->invert_flags & XT_CONNTRACK_ORIGDST_PORT))
return false;
tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
if ((info->match_flags & XT_CONNTRACK_REPLSRC_PORT) &&
!port_match(info->replsrc_port, info->replsrc_port_high,
ntohs(tuple->src.u.all),
info->invert_flags & XT_CONNTRACK_REPLSRC_PORT))
return false;
if ((info->match_flags & XT_CONNTRACK_REPLDST_PORT) &&
!port_match(info->repldst_port, info->repldst_port_high,
ntohs(tuple->dst.u.all),
info->invert_flags & XT_CONNTRACK_REPLDST_PORT))
return false;
return true;
}
static bool
conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
u16 state_mask, u16 status_mask)
{
const struct xt_conntrack_mtinfo2 *info = par->matchinfo;
enum ip_conntrack_info ctinfo;
const struct nf_conn *ct;
unsigned int statebit;
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
if (nf_ct_is_untracked(ct))
statebit = XT_CONNTRACK_STATE_UNTRACKED;
else
statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
} else
statebit = XT_CONNTRACK_STATE_INVALID;
if (info->match_flags & XT_CONNTRACK_STATE) {
if (ct != NULL) {
if (test_bit(IPS_SRC_NAT_BIT, &ct->status))
statebit |= XT_CONNTRACK_STATE_SNAT;
if (test_bit(IPS_DST_NAT_BIT, &ct->status))
statebit |= XT_CONNTRACK_STATE_DNAT;
}
if (!!(state_mask & statebit) ^
!(info->invert_flags & XT_CONNTRACK_STATE))
return false;
}
if (ct == NULL)
return info->match_flags & XT_CONNTRACK_STATE;
if ((info->match_flags & XT_CONNTRACK_DIRECTION) &&
(CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^
!(info->invert_flags & XT_CONNTRACK_DIRECTION))
return false;
if (info->match_flags & XT_CONNTRACK_ORIGSRC)
if (conntrack_mt_origsrc(ct, info, par->family) ^
!(info->invert_flags & XT_CONNTRACK_ORIGSRC))
return false;
if (info->match_flags & XT_CONNTRACK_ORIGDST)
if (conntrack_mt_origdst(ct, info, par->family) ^
!(info->invert_flags & XT_CONNTRACK_ORIGDST))
return false;
if (info->match_flags & XT_CONNTRACK_REPLSRC)
if (conntrack_mt_replsrc(ct, info, par->family) ^
!(info->invert_flags & XT_CONNTRACK_REPLSRC))
return false;
if (info->match_flags & XT_CONNTRACK_REPLDST)
if (conntrack_mt_repldst(ct, info, par->family) ^
!(info->invert_flags & XT_CONNTRACK_REPLDST))
return false;
if (par->match->revision != 3) {
if (!ct_proto_port_check(info, ct))
return false;
} else {
if (!ct_proto_port_check_v3(par->matchinfo, ct))
return false;
}
if ((info->match_flags & XT_CONNTRACK_STATUS) &&
(!!(status_mask & ct->status) ^
!(info->invert_flags & XT_CONNTRACK_STATUS)))
return false;
if (info->match_flags & XT_CONNTRACK_EXPIRES) {
unsigned long expires = 0;
if (timer_pending(&ct->timeout))
expires = (ct->timeout.expires - jiffies) / HZ;
if ((expires >= info->expires_min &&
expires <= info->expires_max) ^
!(info->invert_flags & XT_CONNTRACK_EXPIRES))
return false;
}
return true;
}
static bool
conntrack_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_conntrack_mtinfo1 *info = par->matchinfo;
return conntrack_mt(skb, par, info->state_mask, info->status_mask);
}
static bool
conntrack_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_conntrack_mtinfo2 *info = par->matchinfo;
return conntrack_mt(skb, par, info->state_mask, info->status_mask);
}
static bool
conntrack_mt_v3(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_conntrack_mtinfo3 *info = par->matchinfo;
return conntrack_mt(skb, par, info->state_mask, info->status_mask);
}
static int conntrack_mt_check(const struct xt_mtchk_param *par)
{
int ret;
ret = nf_ct_l3proto_try_module_get(par->family);
if (ret < 0)
pr_info("cannot load conntrack support for proto=%u\n",
par->family);
return ret;
}
static void conntrack_mt_destroy(const struct xt_mtdtor_param *par)
{
nf_ct_l3proto_module_put(par->family);
}
static struct xt_match conntrack_mt_reg[] __read_mostly = {
{
.name = "conntrack",
.revision = 1,
.family = NFPROTO_UNSPEC,
.matchsize = sizeof(struct xt_conntrack_mtinfo1),
.match = conntrack_mt_v1,
.checkentry = conntrack_mt_check,
.destroy = conntrack_mt_destroy,
.me = THIS_MODULE,
},
{
.name = "conntrack",
.revision = 2,
.family = NFPROTO_UNSPEC,
.matchsize = sizeof(struct xt_conntrack_mtinfo2),
.match = conntrack_mt_v2,
.checkentry = conntrack_mt_check,
.destroy = conntrack_mt_destroy,
.me = THIS_MODULE,
},
{
.name = "conntrack",
.revision = 3,
.family = NFPROTO_UNSPEC,
.matchsize = sizeof(struct xt_conntrack_mtinfo3),
.match = conntrack_mt_v3,
.checkentry = conntrack_mt_check,
.destroy = conntrack_mt_destroy,
.me = THIS_MODULE,
},
};
static int __init conntrack_mt_init(void)
{
return xt_register_matches(conntrack_mt_reg,
ARRAY_SIZE(conntrack_mt_reg));
}
static void __exit conntrack_mt_exit(void)
{
xt_unregister_matches(conntrack_mt_reg, ARRAY_SIZE(conntrack_mt_reg));
}
module_init(conntrack_mt_init);
module_exit(conntrack_mt_exit);
| gpl-2.0 |
uileyar/fastsocket | kernel/drivers/infiniband/hw/qib/qib_uc.c | 4235 | 13983 | /*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
* All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "qib.h"
/* cut down ridiculously long IB macro names */
#define OP(x) IB_OPCODE_UC_##x
/**
* qib_make_uc_req - construct a request packet (SEND, RDMA write)
* @qp: a pointer to the QP
*
* Return 1 if constructed; otherwise, return 0.
*/
int qib_make_uc_req(struct qib_qp *qp)
{
struct qib_other_headers *ohdr;
struct qib_swqe *wqe;
unsigned long flags;
u32 hwords;
u32 bth0;
u32 len;
u32 pmtu = qp->pmtu;
int ret = 0;
spin_lock_irqsave(&qp->s_lock, flags);
if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
if (qp->s_last == qp->s_head)
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&qp->s_dma_busy)) {
qp->s_flags |= QIB_S_WAIT_DMA;
goto bail;
}
wqe = get_swqe_ptr(qp, qp->s_last);
qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done;
}
ohdr = &qp->s_hdr->u.oth;
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
ohdr = &qp->s_hdr->u.l.oth;
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5;
bth0 = 0;
/* Get the next send request. */
wqe = get_swqe_ptr(qp, qp->s_cur);
qp->s_wqe = NULL;
switch (qp->s_state) {
default:
if (!(ib_qib_state_ops[qp->state] &
QIB_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
if (qp->s_cur == qp->s_head)
goto bail;
/*
* Start a new request.
*/
wqe->psn = qp->s_next_psn;
qp->s_psn = qp->s_next_psn;
qp->s_sge.sge = wqe->sg_list[0];
qp->s_sge.sg_list = wqe->sg_list + 1;
qp->s_sge.num_sge = wqe->wr.num_sge;
qp->s_sge.total_len = wqe->length;
len = wqe->length;
qp->s_len = len;
switch (wqe->wr.opcode) {
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
if (len > pmtu) {
qp->s_state = OP(SEND_FIRST);
len = pmtu;
break;
}
if (wqe->wr.opcode == IB_WR_SEND)
qp->s_state = OP(SEND_ONLY);
else {
qp->s_state =
OP(SEND_ONLY_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
ohdr->u.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
}
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
qp->s_wqe = wqe;
if (++qp->s_cur >= qp->s_size)
qp->s_cur = 0;
break;
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
ohdr->u.rc.reth.vaddr =
cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
ohdr->u.rc.reth.rkey =
cpu_to_be32(wqe->wr.wr.rdma.rkey);
ohdr->u.rc.reth.length = cpu_to_be32(len);
hwords += sizeof(struct ib_reth) / 4;
if (len > pmtu) {
qp->s_state = OP(RDMA_WRITE_FIRST);
len = pmtu;
break;
}
if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
qp->s_state = OP(RDMA_WRITE_ONLY);
else {
qp->s_state =
OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
/* Immediate data comes after the RETH */
ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
}
qp->s_wqe = wqe;
if (++qp->s_cur >= qp->s_size)
qp->s_cur = 0;
break;
default:
goto bail;
}
break;
case OP(SEND_FIRST):
qp->s_state = OP(SEND_MIDDLE);
/* FALLTHROUGH */
case OP(SEND_MIDDLE):
len = qp->s_len;
if (len > pmtu) {
len = pmtu;
break;
}
if (wqe->wr.opcode == IB_WR_SEND)
qp->s_state = OP(SEND_LAST);
else {
qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
ohdr->u.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
}
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
qp->s_wqe = wqe;
if (++qp->s_cur >= qp->s_size)
qp->s_cur = 0;
break;
case OP(RDMA_WRITE_FIRST):
qp->s_state = OP(RDMA_WRITE_MIDDLE);
/* FALLTHROUGH */
case OP(RDMA_WRITE_MIDDLE):
len = qp->s_len;
if (len > pmtu) {
len = pmtu;
break;
}
if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
qp->s_state = OP(RDMA_WRITE_LAST);
else {
qp->s_state =
OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
ohdr->u.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
}
qp->s_wqe = wqe;
if (++qp->s_cur >= qp->s_size)
qp->s_cur = 0;
break;
}
qp->s_len -= len;
qp->s_hdrwords = hwords;
qp->s_cur_sge = &qp->s_sge;
qp->s_cur_size = len;
qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
qp->s_next_psn++ & QIB_PSN_MASK);
done:
ret = 1;
goto unlock;
bail:
qp->s_flags &= ~QIB_S_BUSY;
unlock:
spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
}
/**
* qib_uc_rcv - handle an incoming UC packet
* @ibp: the port the packet came in on
* @hdr: the header of the packet
* @has_grh: true if the packet has a GRH
* @data: the packet data
* @tlen: the length of the packet
* @qp: the QP for this packet.
*
* This is called from qib_qp_rcv() to process an incoming UC packet
* for the given QP.
* Called at interrupt level.
*/
void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
int has_grh, void *data, u32 tlen, struct qib_qp *qp)
{
struct qib_other_headers *ohdr;
u32 opcode;
u32 hdrsize;
u32 psn;
u32 pad;
struct ib_wc wc;
u32 pmtu = qp->pmtu;
struct ib_reth *reth;
int ret;
/* Check for GRH */
if (!has_grh) {
ohdr = &hdr->u.oth;
hdrsize = 8 + 12; /* LRH + BTH */
} else {
ohdr = &hdr->u.l.oth;
hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
}
opcode = be32_to_cpu(ohdr->bth[0]);
if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
return;
psn = be32_to_cpu(ohdr->bth[2]);
opcode >>= 24;
/* Compare the PSN verses the expected PSN. */
if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
/*
* Handle a sequence error.
* Silently drop any current message.
*/
qp->r_psn = psn;
inv:
if (qp->r_state == OP(SEND_FIRST) ||
qp->r_state == OP(SEND_MIDDLE)) {
set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0;
} else
qib_put_ss(&qp->r_sge);
qp->r_state = OP(SEND_LAST);
switch (opcode) {
case OP(SEND_FIRST):
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
goto send_first;
case OP(RDMA_WRITE_FIRST):
case OP(RDMA_WRITE_ONLY):
case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
goto rdma_first;
default:
goto drop;
}
}
/* Check for opcode sequence errors. */
switch (qp->r_state) {
case OP(SEND_FIRST):
case OP(SEND_MIDDLE):
if (opcode == OP(SEND_MIDDLE) ||
opcode == OP(SEND_LAST) ||
opcode == OP(SEND_LAST_WITH_IMMEDIATE))
break;
goto inv;
case OP(RDMA_WRITE_FIRST):
case OP(RDMA_WRITE_MIDDLE):
if (opcode == OP(RDMA_WRITE_MIDDLE) ||
opcode == OP(RDMA_WRITE_LAST) ||
opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
break;
goto inv;
default:
if (opcode == OP(SEND_FIRST) ||
opcode == OP(SEND_ONLY) ||
opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
opcode == OP(RDMA_WRITE_FIRST) ||
opcode == OP(RDMA_WRITE_ONLY) ||
opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
break;
goto inv;
}
if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
qp->r_flags |= QIB_R_COMM_EST;
if (qp->ibqp.event_handler) {
struct ib_event ev;
ev.device = qp->ibqp.device;
ev.element.qp = &qp->ibqp;
ev.event = IB_EVENT_COMM_EST;
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
}
}
/* OK, process the packet. */
switch (opcode) {
case OP(SEND_FIRST):
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
send_first:
if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
qp->r_sge = qp->s_rdma_read_sge;
else {
ret = qib_get_rwqe(qp, 0);
if (ret < 0)
goto op_err;
if (!ret)
goto drop;
/*
* qp->s_rdma_read_sge will be the owner
* of the mr references.
*/
qp->s_rdma_read_sge = qp->r_sge;
}
qp->r_rcv_len = 0;
if (opcode == OP(SEND_ONLY))
goto no_immediate_data;
else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
goto send_last_imm;
/* FALLTHROUGH */
case OP(SEND_MIDDLE):
/* Check for invalid length PMTU or posted rwqe len. */
if (unlikely(tlen != (hdrsize + pmtu + 4)))
goto rewind;
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto rewind;
qib_copy_sge(&qp->r_sge, data, pmtu, 0);
break;
case OP(SEND_LAST_WITH_IMMEDIATE):
send_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM;
goto send_last;
case OP(SEND_LAST):
no_immediate_data:
wc.ex.imm_data = 0;
wc.wc_flags = 0;
send_last:
/* Get the number of bytes the message was padded by. */
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
/* Check for invalid length. */
/* XXX LAST len should be >= 1 */
if (unlikely(tlen < (hdrsize + pad + 4)))
goto rewind;
/* Don't count the CRC. */
tlen -= (hdrsize + pad + 4);
wc.byte_len = tlen + qp->r_rcv_len;
if (unlikely(wc.byte_len > qp->r_len))
goto rewind;
wc.opcode = IB_WC_RECV;
qib_copy_sge(&qp->r_sge, data, tlen, 0);
qib_put_ss(&qp->s_rdma_read_sge);
last_imm:
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
wc.slid = qp->remote_ah_attr.dlid;
wc.sl = qp->remote_ah_attr.sl;
/* zero fields that are N/A */
wc.vendor_err = 0;
wc.pkey_index = 0;
wc.dlid_path_bits = 0;
wc.port_num = 0;
/* Signal completion event if the solicited bit is set. */
qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
cpu_to_be32(IB_BTH_SOLICITED)) != 0);
break;
case OP(RDMA_WRITE_FIRST):
case OP(RDMA_WRITE_ONLY):
case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
rdma_first:
if (unlikely(!(qp->qp_access_flags &
IB_ACCESS_REMOTE_WRITE))) {
goto drop;
}
reth = &ohdr->u.rc.reth;
hdrsize += sizeof(*reth);
qp->r_len = be32_to_cpu(reth->length);
qp->r_rcv_len = 0;
qp->r_sge.sg_list = NULL;
if (qp->r_len != 0) {
u32 rkey = be32_to_cpu(reth->rkey);
u64 vaddr = be64_to_cpu(reth->vaddr);
int ok;
/* Check rkey */
ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
if (unlikely(!ok))
goto drop;
qp->r_sge.num_sge = 1;
} else {
qp->r_sge.num_sge = 0;
qp->r_sge.sge.mr = NULL;
qp->r_sge.sge.vaddr = NULL;
qp->r_sge.sge.length = 0;
qp->r_sge.sge.sge_length = 0;
}
if (opcode == OP(RDMA_WRITE_ONLY))
goto rdma_last;
else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
wc.ex.imm_data = ohdr->u.rc.imm_data;
goto rdma_last_imm;
}
/* FALLTHROUGH */
case OP(RDMA_WRITE_MIDDLE):
/* Check for invalid length PMTU or posted rwqe len. */
if (unlikely(tlen != (hdrsize + pmtu + 4)))
goto drop;
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto drop;
qib_copy_sge(&qp->r_sge, data, pmtu, 1);
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
wc.ex.imm_data = ohdr->u.imm_data;
rdma_last_imm:
hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM;
/* Get the number of bytes the message was padded by. */
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
/* Check for invalid length. */
/* XXX LAST len should be >= 1 */
if (unlikely(tlen < (hdrsize + pad + 4)))
goto drop;
/* Don't count the CRC. */
tlen -= (hdrsize + pad + 4);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
qib_put_ss(&qp->s_rdma_read_sge);
else {
ret = qib_get_rwqe(qp, 1);
if (ret < 0)
goto op_err;
if (!ret)
goto drop;
}
wc.byte_len = qp->r_len;
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
qib_copy_sge(&qp->r_sge, data, tlen, 1);
qib_put_ss(&qp->r_sge);
goto last_imm;
case OP(RDMA_WRITE_LAST):
rdma_last:
/* Get the number of bytes the message was padded by. */
pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
/* Check for invalid length. */
/* XXX LAST len should be >= 1 */
if (unlikely(tlen < (hdrsize + pad + 4)))
goto drop;
/* Don't count the CRC. */
tlen -= (hdrsize + pad + 4);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
qib_copy_sge(&qp->r_sge, data, tlen, 1);
qib_put_ss(&qp->r_sge);
break;
default:
/* Drop packet for unknown opcodes. */
goto drop;
}
qp->r_psn++;
qp->r_state = opcode;
return;
rewind:
set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0;
drop:
ibp->n_pkt_drops++;
return;
op_err:
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
return;
}
| gpl-2.0 |
infectedmushi/kernel-sony-copyleft | arch/arm/mach-footbridge/cats-pci.c | 4747 | 1397 | /*
* linux/arch/arm/mach-footbridge/cats-pci.c
*
* PCI bios-type initialisation for PCI machines
*
* Bits taken from various places.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <asm/irq.h>
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
/* cats host-specific stuff */
static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin)
{
return 0;
}
static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (dev->irq >= 255)
return -1; /* not a valid interrupt. */
if (dev->irq >= 128)
return dev->irq & 0x1f;
if (dev->irq >= 1 && dev->irq <= 4)
return irqmap_cats[dev->irq - 1];
if (dev->irq != 0)
printk("PCI: device %02x:%02x has unknown irq line %x\n",
dev->bus->number, dev->devfn, dev->irq);
return -1;
}
/*
* why not the standard PCI swizzle? does this prevent 4-port tulip
* cards being used (ie, pci-pci bridge based cards)?
*/
static struct hw_pci cats_pci __initdata = {
.swizzle = cats_no_swizzle,
.map_irq = cats_map_irq,
.nr_controllers = 1,
.ops = &dc21285_ops,
.setup = dc21285_setup,
.preinit = dc21285_preinit,
.postinit = dc21285_postinit,
};
static int __init cats_pci_init(void)
{
if (machine_is_cats())
pci_common_init(&cats_pci);
return 0;
}
subsys_initcall(cats_pci_init);
| gpl-2.0 |
EuphoriaOS/android_kernel_samsung_exynos5410 | drivers/gpu/drm/radeon/r200.c | 5259 | 15434 | /*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "r100d.h"
#include "r200_reg_safe.h"
#include "r100_track.h"
static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
{
int vtx_size, i;
vtx_size = 2;
if (vtx_fmt_0 & R200_VTX_Z0)
vtx_size++;
if (vtx_fmt_0 & R200_VTX_W0)
vtx_size++;
/* blend weight */
if (vtx_fmt_0 & (0x7 << R200_VTX_WEIGHT_COUNT_SHIFT))
vtx_size += (vtx_fmt_0 >> R200_VTX_WEIGHT_COUNT_SHIFT) & 0x7;
if (vtx_fmt_0 & R200_VTX_PV_MATRIX_SEL)
vtx_size++;
if (vtx_fmt_0 & R200_VTX_N0)
vtx_size += 3;
if (vtx_fmt_0 & R200_VTX_POINT_SIZE)
vtx_size++;
if (vtx_fmt_0 & R200_VTX_DISCRETE_FOG)
vtx_size++;
if (vtx_fmt_0 & R200_VTX_SHININESS_0)
vtx_size++;
if (vtx_fmt_0 & R200_VTX_SHININESS_1)
vtx_size++;
for (i = 0; i < 8; i++) {
int color_size = (vtx_fmt_0 >> (11 + 2*i)) & 0x3;
switch (color_size) {
case 0: break;
case 1: vtx_size++; break;
case 2: vtx_size += 3; break;
case 3: vtx_size += 4; break;
}
}
if (vtx_fmt_0 & R200_VTX_XY1)
vtx_size += 2;
if (vtx_fmt_0 & R200_VTX_Z1)
vtx_size++;
if (vtx_fmt_0 & R200_VTX_W1)
vtx_size++;
if (vtx_fmt_0 & R200_VTX_N1)
vtx_size += 3;
return vtx_size;
}
int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t size;
uint32_t cur_size;
int i, num_loops;
int r = 0;
/* radeon pitch is /64 */
size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
/* Must wait for 2D idle & clean before DMA or hangs might happen */
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, (1 << 16));
for (i = 0; i < num_loops; i++) {
cur_size = size;
if (cur_size > 0x1FFFFF) {
cur_size = 0x1FFFFF;
}
size -= cur_size;
radeon_ring_write(ring, PACKET0(0x720, 2));
radeon_ring_write(ring, src_offset);
radeon_ring_write(ring, dst_offset);
radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
src_offset += cur_size;
dst_offset += cur_size;
}
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
r = radeon_fence_emit(rdev, fence);
}
radeon_ring_unlock_commit(rdev, ring);
return r;
}
static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
{
int vtx_size, i, tex_size;
vtx_size = 0;
for (i = 0; i < 6; i++) {
tex_size = (vtx_fmt_1 >> (i * 3)) & 0x7;
if (tex_size > 4)
continue;
vtx_size += tex_size;
}
return vtx_size;
}
int r200_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp;
int r;
int i;
int face;
u32 tile_flags = 0;
u32 idx_value;
ib = p->ib->ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
switch (reg) {
case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
}
break;
/* FIXME: only allow PACKET3 blit? easier to check for out of
* range access */
case RADEON_DST_PITCH_OFFSET:
case RADEON_SRC_PITCH_OFFSET:
r = r100_reloc_pitch_offset(p, pkt, idx, reg);
if (r)
return r;
break;
case RADEON_RB3D_DEPTHOFFSET:
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
}
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
track->zb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
}
track->cb[0].robj = reloc->robj;
track->cb[0].offset = idx_value;
track->cb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R200_PP_TXOFFSET_0:
case R200_PP_TXOFFSET_1:
case R200_PP_TXOFFSET_2:
case R200_PP_TXOFFSET_3:
case R200_PP_TXOFFSET_4:
case R200_PP_TXOFFSET_5:
i = (reg - R200_PP_TXOFFSET_0) / 24;
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R200_TXO_MACRO_TILE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R200_TXO_MICRO_TILE;
tmp = idx_value & ~(0x7 << 2);
tmp |= tile_flags;
ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
} else
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
case R200_PP_CUBIC_OFFSET_F1_0:
case R200_PP_CUBIC_OFFSET_F2_0:
case R200_PP_CUBIC_OFFSET_F3_0:
case R200_PP_CUBIC_OFFSET_F4_0:
case R200_PP_CUBIC_OFFSET_F5_0:
case R200_PP_CUBIC_OFFSET_F1_1:
case R200_PP_CUBIC_OFFSET_F2_1:
case R200_PP_CUBIC_OFFSET_F3_1:
case R200_PP_CUBIC_OFFSET_F4_1:
case R200_PP_CUBIC_OFFSET_F5_1:
case R200_PP_CUBIC_OFFSET_F1_2:
case R200_PP_CUBIC_OFFSET_F2_2:
case R200_PP_CUBIC_OFFSET_F3_2:
case R200_PP_CUBIC_OFFSET_F4_2:
case R200_PP_CUBIC_OFFSET_F5_2:
case R200_PP_CUBIC_OFFSET_F1_3:
case R200_PP_CUBIC_OFFSET_F2_3:
case R200_PP_CUBIC_OFFSET_F3_3:
case R200_PP_CUBIC_OFFSET_F4_3:
case R200_PP_CUBIC_OFFSET_F5_3:
case R200_PP_CUBIC_OFFSET_F1_4:
case R200_PP_CUBIC_OFFSET_F2_4:
case R200_PP_CUBIC_OFFSET_F3_4:
case R200_PP_CUBIC_OFFSET_F4_4:
case R200_PP_CUBIC_OFFSET_F5_4:
case R200_PP_CUBIC_OFFSET_F1_5:
case R200_PP_CUBIC_OFFSET_F2_5:
case R200_PP_CUBIC_OFFSET_F3_5:
case R200_PP_CUBIC_OFFSET_F4_5:
case R200_PP_CUBIC_OFFSET_F5_5:
i = (reg - R200_PP_TXOFFSET_0) / 24;
face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
}
track->textures[i].cube_info[face - 1].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].cube_info[face - 1].robj = reloc->robj;
track->tex_dirty = true;
break;
case RADEON_RE_WIDTH_HEIGHT:
track->maxy = ((idx_value >> 16) & 0x7FF);
track->cb_dirty = true;
track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_COLOR_TILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
} else
ib[idx] = idx_value;
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
track->cb_dirty = true;
break;
case RADEON_RB3D_DEPTHPITCH:
track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
track->zb_dirty = true;
break;
case RADEON_RB3D_CNTL:
switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
case 7:
case 8:
case 9:
case 11:
case 12:
track->cb[0].cpp = 1;
break;
case 3:
case 4:
case 15:
track->cb[0].cpp = 2;
break;
case 6:
track->cb[0].cpp = 4;
break;
default:
DRM_ERROR("Invalid color buffer format (%d) !\n",
((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
return -EINVAL;
}
if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) {
DRM_ERROR("No support for depth xy offset in kms\n");
return -EINVAL;
}
track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
track->cb_dirty = true;
track->zb_dirty = true;
break;
case RADEON_RB3D_ZSTENCILCNTL:
switch (idx_value & 0xf) {
case 0:
track->zb.cpp = 2;
break;
case 2:
case 3:
case 4:
case 5:
case 9:
case 11:
track->zb.cpp = 4;
break;
default:
break;
}
track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_PP_CNTL:
{
uint32_t temp = idx_value >> 4;
for (i = 0; i < track->num_texture; i++)
track->textures[i].enabled = !!(temp & (1 << i));
track->tex_dirty = true;
}
break;
case RADEON_SE_VF_CNTL:
track->vap_vf_cntl = idx_value;
break;
case 0x210c:
/* VAP_VF_MAX_VTX_INDX */
track->max_indx = idx_value & 0x00FFFFFFUL;
break;
case R200_SE_VTX_FMT_0:
track->vtx_size = r200_get_vtx_size_0(idx_value);
break;
case R200_SE_VTX_FMT_1:
track->vtx_size += r200_get_vtx_size_1(idx_value);
break;
case R200_PP_TXSIZE_0:
case R200_PP_TXSIZE_1:
case R200_PP_TXSIZE_2:
case R200_PP_TXSIZE_3:
case R200_PP_TXSIZE_4:
case R200_PP_TXSIZE_5:
i = (reg - R200_PP_TXSIZE_0) / 32;
track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
track->tex_dirty = true;
break;
case R200_PP_TXPITCH_0:
case R200_PP_TXPITCH_1:
case R200_PP_TXPITCH_2:
case R200_PP_TXPITCH_3:
case R200_PP_TXPITCH_4:
case R200_PP_TXPITCH_5:
i = (reg - R200_PP_TXPITCH_0) / 32;
track->textures[i].pitch = idx_value + 32;
track->tex_dirty = true;
break;
case R200_PP_TXFILTER_0:
case R200_PP_TXFILTER_1:
case R200_PP_TXFILTER_2:
case R200_PP_TXFILTER_3:
case R200_PP_TXFILTER_4:
case R200_PP_TXFILTER_5:
i = (reg - R200_PP_TXFILTER_0) / 32;
track->textures[i].num_levels = ((idx_value & R200_MAX_MIP_LEVEL_MASK)
>> R200_MAX_MIP_LEVEL_SHIFT);
tmp = (idx_value >> 23) & 0x7;
if (tmp == 2 || tmp == 6)
track->textures[i].roundup_w = false;
tmp = (idx_value >> 27) & 0x7;
if (tmp == 2 || tmp == 6)
track->textures[i].roundup_h = false;
track->tex_dirty = true;
break;
case R200_PP_TXMULTI_CTL_0:
case R200_PP_TXMULTI_CTL_1:
case R200_PP_TXMULTI_CTL_2:
case R200_PP_TXMULTI_CTL_3:
case R200_PP_TXMULTI_CTL_4:
case R200_PP_TXMULTI_CTL_5:
i = (reg - R200_PP_TXMULTI_CTL_0) / 32;
break;
case R200_PP_TXFORMAT_X_0:
case R200_PP_TXFORMAT_X_1:
case R200_PP_TXFORMAT_X_2:
case R200_PP_TXFORMAT_X_3:
case R200_PP_TXFORMAT_X_4:
case R200_PP_TXFORMAT_X_5:
i = (reg - R200_PP_TXFORMAT_X_0) / 32;
track->textures[i].txdepth = idx_value & 0x7;
tmp = (idx_value >> 16) & 0x3;
/* 2D, 3D, CUBE */
switch (tmp) {
case 0:
case 3:
case 4:
case 5:
case 6:
case 7:
/* 1D/2D */
track->textures[i].tex_coord_type = 0;
break;
case 1:
/* CUBE */
track->textures[i].tex_coord_type = 2;
break;
case 2:
/* 3D */
track->textures[i].tex_coord_type = 1;
break;
}
track->tex_dirty = true;
break;
case R200_PP_TXFORMAT_0:
case R200_PP_TXFORMAT_1:
case R200_PP_TXFORMAT_2:
case R200_PP_TXFORMAT_3:
case R200_PP_TXFORMAT_4:
case R200_PP_TXFORMAT_5:
i = (reg - R200_PP_TXFORMAT_0) / 32;
if (idx_value & R200_TXFORMAT_NON_POWER2) {
track->textures[i].use_pitch = 1;
} else {
track->textures[i].use_pitch = 0;
track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
}
if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
track->textures[i].lookup_disable = true;
switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
case R200_TXFORMAT_I8:
case R200_TXFORMAT_RGB332:
case R200_TXFORMAT_Y8:
track->textures[i].cpp = 1;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R200_TXFORMAT_AI88:
case R200_TXFORMAT_ARGB1555:
case R200_TXFORMAT_RGB565:
case R200_TXFORMAT_ARGB4444:
case R200_TXFORMAT_VYUY422:
case R200_TXFORMAT_YVYU422:
case R200_TXFORMAT_LDVDU655:
case R200_TXFORMAT_DVDU88:
case R200_TXFORMAT_AVYU4444:
track->textures[i].cpp = 2;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R200_TXFORMAT_ARGB8888:
case R200_TXFORMAT_RGBA8888:
case R200_TXFORMAT_ABGR8888:
case R200_TXFORMAT_BGR111110:
case R200_TXFORMAT_LDVDU8888:
track->textures[i].cpp = 4;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R200_TXFORMAT_DXT1:
track->textures[i].cpp = 1;
track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
break;
case R200_TXFORMAT_DXT23:
case R200_TXFORMAT_DXT45:
track->textures[i].cpp = 1;
track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
break;
}
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
track->tex_dirty = true;
break;
case R200_PP_CUBIC_FACES_0:
case R200_PP_CUBIC_FACES_1:
case R200_PP_CUBIC_FACES_2:
case R200_PP_CUBIC_FACES_3:
case R200_PP_CUBIC_FACES_4:
case R200_PP_CUBIC_FACES_5:
tmp = idx_value;
i = (reg - R200_PP_CUBIC_FACES_0) / 32;
for (face = 0; face < 4; face++) {
track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
}
track->tex_dirty = true;
break;
default:
printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
reg, idx);
return -EINVAL;
}
return 0;
}
void r200_set_safe_registers(struct radeon_device *rdev)
{
rdev->config.r100.reg_safe_bm = r200_reg_safe_bm;
rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm);
}
| gpl-2.0 |
profglavcho/tesr1 | drivers/gpu/drm/i915/i915_gem_evict.c | 5259 | 6367 | /*
* Copyright © 2008-2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uuk>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drv.h"
#include "i915_drm.h"
#include "i915_trace.h"
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
list_add(&obj->exec_list, unwind);
return drm_mm_scan_add_block(obj->gtt_space);
}
int
i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, bool mappable)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
struct drm_i915_gem_object *obj;
int ret = 0;
trace_i915_gem_evict(dev, min_size, alignment, mappable);
/*
* The goal is to evict objects and amalgamate space in LRU order.
* The oldest idle objects reside on the inactive list, which is in
* retirement order. The next objects to retire are those on the (per
* ring) active list that do not have an outstanding flush. Once the
* hardware reports completion (the seqno is updated after the
* batchbuffer has been finished) the clean buffer objects would
* be retired to the inactive list. Any dirty objects would be added
* to the tail of the flushing list. So after processing the clean
* active objects we need to emit a MI_FLUSH to retire the flushing
* list, hence the retirement order of the flushing list is in
* advance of the dirty objects on the active lists.
*
* The retirement sequence is thus:
* 1. Inactive objects (already retired)
* 2. Clean active objects
* 3. Flushing list
* 4. Dirty active objects.
*
* On each list, the oldest objects lie at the HEAD with the freshest
* object on the TAIL.
*/
INIT_LIST_HEAD(&unwind_list);
if (mappable)
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
alignment, 0,
dev_priv->mm.gtt_mappable_end);
else
drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
if (mark_free(obj, &unwind_list))
goto found;
}
/* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
if (obj->base.write_domain || obj->pin_count)
continue;
if (mark_free(obj, &unwind_list))
goto found;
}
/* Finally add anything with a pending flush (in order of retirement) */
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
if (obj->pin_count)
continue;
if (mark_free(obj, &unwind_list))
goto found;
}
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (!obj->base.write_domain || obj->pin_count)
continue;
if (mark_free(obj, &unwind_list))
goto found;
}
/* Nothing found, clean up and bail out! */
while (!list_empty(&unwind_list)) {
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
ret = drm_mm_scan_remove_block(obj->gtt_space);
BUG_ON(ret);
list_del_init(&obj->exec_list);
}
/* We expect the caller to unpin, evict all and try again, or give up.
* So calling i915_gem_evict_everything() is unnecessary.
*/
return -ENOSPC;
found:
/* drm_mm doesn't allow any other other operations while
* scanning, therefore store to be evicted objects on a
* temporary list. */
INIT_LIST_HEAD(&eviction_list);
while (!list_empty(&unwind_list)) {
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
if (drm_mm_scan_remove_block(obj->gtt_space)) {
list_move(&obj->exec_list, &eviction_list);
drm_gem_object_reference(&obj->base);
continue;
}
list_del_init(&obj->exec_list);
}
/* Unbinding will emit any required flushes */
while (!list_empty(&eviction_list)) {
obj = list_first_entry(&eviction_list,
struct drm_i915_gem_object,
exec_list);
if (ret == 0)
ret = i915_gem_object_unbind(obj);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
return ret;
}
int
i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
bool lists_empty;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->mm.active_list));
if (lists_empty)
return -ENOSPC;
trace_i915_gem_evict_everything(dev, purgeable_only);
/* Flush everything (on to the inactive lists) and evict */
ret = i915_gpu_idle(dev, true);
if (ret)
return ret;
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
return i915_gem_evict_inactive(dev, purgeable_only);
}
/** Unbinds all inactive objects. */
int
i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list, mm_list) {
if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
int ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
}
}
return 0;
}
| gpl-2.0 |
justin0406/MiRaGe-GEE | net/llc/sysctl_net_llc.c | 8075 | 2008 | /*
* sysctl_net_llc.c: sysctl interface to LLC net subsystem.
*
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*/
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <net/llc.h>
#ifndef CONFIG_SYSCTL
#error This file should not be compiled without CONFIG_SYSCTL defined
#endif
static struct ctl_table llc2_timeout_table[] = {
{
.procname = "ack",
.data = &sysctl_llc2_ack_timeout,
.maxlen = sizeof(long),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "busy",
.data = &sysctl_llc2_busy_timeout,
.maxlen = sizeof(long),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "p",
.data = &sysctl_llc2_p_timeout,
.maxlen = sizeof(long),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "rej",
.data = &sysctl_llc2_rej_timeout,
.maxlen = sizeof(long),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{ },
};
static struct ctl_table llc_station_table[] = {
{
.procname = "ack_timeout",
.data = &sysctl_llc_station_ack_timeout,
.maxlen = sizeof(long),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{ },
};
static struct ctl_table llc2_dir_timeout_table[] = {
{
.procname = "timeout",
.mode = 0555,
.child = llc2_timeout_table,
},
{ },
};
static struct ctl_table llc_table[] = {
{
.procname = "llc2",
.mode = 0555,
.child = llc2_dir_timeout_table,
},
{
.procname = "station",
.mode = 0555,
.child = llc_station_table,
},
{ },
};
static struct ctl_path llc_path[] = {
{ .procname = "net", },
{ .procname = "llc", },
{ }
};
static struct ctl_table_header *llc_table_header;
int __init llc_sysctl_init(void)
{
llc_table_header = register_sysctl_paths(llc_path, llc_table);
return llc_table_header ? 0 : -ENOMEM;
}
void llc_sysctl_exit(void)
{
if (llc_table_header) {
unregister_sysctl_table(llc_table_header);
llc_table_header = NULL;
}
}
| gpl-2.0 |
Jackeagle/android_kernel_htc_dlxub1 | net/llc/sysctl_net_llc.c | 8075 | 2008 | /*
* sysctl_net_llc.c: sysctl interface to LLC net subsystem.
*
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*/
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <net/llc.h>
#ifndef CONFIG_SYSCTL
#error This file should not be compiled without CONFIG_SYSCTL defined
#endif
static struct ctl_table llc2_timeout_table[] = {
{
.procname = "ack",
.data = &sysctl_llc2_ack_timeout,
.maxlen = sizeof(long),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "busy",
.data = &sysctl_llc2_busy_timeout,
.maxlen = sizeof(long),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "p",
.data = &sysctl_llc2_p_timeout,
.maxlen = sizeof(long),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "rej",
.data = &sysctl_llc2_rej_timeout,
.maxlen = sizeof(long),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{ },
};
static struct ctl_table llc_station_table[] = {
{
.procname = "ack_timeout",
.data = &sysctl_llc_station_ack_timeout,
.maxlen = sizeof(long),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{ },
};
static struct ctl_table llc2_dir_timeout_table[] = {
{
.procname = "timeout",
.mode = 0555,
.child = llc2_timeout_table,
},
{ },
};
static struct ctl_table llc_table[] = {
{
.procname = "llc2",
.mode = 0555,
.child = llc2_dir_timeout_table,
},
{
.procname = "station",
.mode = 0555,
.child = llc_station_table,
},
{ },
};
static struct ctl_path llc_path[] = {
{ .procname = "net", },
{ .procname = "llc", },
{ }
};
static struct ctl_table_header *llc_table_header;
int __init llc_sysctl_init(void)
{
llc_table_header = register_sysctl_paths(llc_path, llc_table);
return llc_table_header ? 0 : -ENOMEM;
}
void llc_sysctl_exit(void)
{
if (llc_table_header) {
unregister_sysctl_table(llc_table_header);
llc_table_header = NULL;
}
}
| gpl-2.0 |
gouwa/linux-rk3288 | drivers/parport/ieee1284.c | 12683 | 23096 | /*
* IEEE-1284 implementation for parport.
*
* Authors: Phil Blundell <philb@gnu.org>
* Carsten Gross <carsten@sol.wohnheim.uni-ulm.de>
* Jose Renau <renau@acm.org>
* Tim Waugh <tim@cyberelk.demon.co.uk> (largely rewritten)
*
* This file is responsible for IEEE 1284 negotiation, and for handing
* read/write requests to low-level drivers.
*
* Any part of this program may be used in documents licensed under
* the GNU Free Documentation License, Version 1.1 or any later version
* published by the Free Software Foundation.
*
* Various hacks, Fred Barnes <frmb2@ukc.ac.uk>, 04/2000
*/
#include <linux/module.h>
#include <linux/threads.h>
#include <linux/parport.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/sched.h>
#undef DEBUG /* undef me for production */
#ifdef CONFIG_LP_CONSOLE
#undef DEBUG /* Don't want a garbled console */
#endif
#ifdef DEBUG
#define DPRINTK(stuff...) printk (stuff)
#else
#define DPRINTK(stuff...)
#endif
/* Make parport_wait_peripheral wake up.
* It will be useful to call this from an interrupt handler. */
static void parport_ieee1284_wakeup (struct parport *port)
{
up (&port->physport->ieee1284.irq);
}
static struct parport *port_from_cookie[PARPORT_MAX];
static void timeout_waiting_on_port (unsigned long cookie)
{
parport_ieee1284_wakeup (port_from_cookie[cookie % PARPORT_MAX]);
}
/**
* parport_wait_event - wait for an event on a parallel port
* @port: port to wait on
* @timeout: time to wait (in jiffies)
*
* This function waits for up to @timeout jiffies for an
* interrupt to occur on a parallel port. If the port timeout is
* set to zero, it returns immediately.
*
* If an interrupt occurs before the timeout period elapses, this
* function returns zero immediately. If it times out, it returns
* one. An error code less than zero indicates an error (most
* likely a pending signal), and the calling code should finish
* what it's doing as soon as it can.
*/
int parport_wait_event (struct parport *port, signed long timeout)
{
int ret;
struct timer_list timer;
if (!port->physport->cad->timeout)
/* Zero timeout is special, and we can't down() the
semaphore. */
return 1;
init_timer_on_stack(&timer);
timer.expires = jiffies + timeout;
timer.function = timeout_waiting_on_port;
port_from_cookie[port->number % PARPORT_MAX] = port;
timer.data = port->number;
add_timer (&timer);
ret = down_interruptible (&port->physport->ieee1284.irq);
if (!del_timer_sync(&timer) && !ret)
/* Timed out. */
ret = 1;
destroy_timer_on_stack(&timer);
return ret;
}
/**
* parport_poll_peripheral - poll status lines
* @port: port to watch
* @mask: status lines to watch
* @result: desired values of chosen status lines
* @usec: timeout
*
* This function busy-waits until the masked status lines have
* the desired values, or until the timeout period elapses. The
* @mask and @result parameters are bitmasks, with the bits
* defined by the constants in parport.h: %PARPORT_STATUS_BUSY,
* and so on.
*
* This function does not call schedule(); instead it busy-waits
* using udelay(). It currently has a resolution of 5usec.
*
* If the status lines take on the desired values before the
* timeout period elapses, parport_poll_peripheral() returns zero
* immediately. A return value greater than zero indicates
* a timeout. An error code (less than zero) indicates an error,
* most likely a signal that arrived, and the caller should
* finish what it is doing as soon as possible.
*/
int parport_poll_peripheral(struct parport *port,
unsigned char mask,
unsigned char result,
int usec)
{
/* Zero return code is success, >0 is timeout. */
int count = usec / 5 + 2;
int i;
unsigned char status;
for (i = 0; i < count; i++) {
status = parport_read_status (port);
if ((status & mask) == result)
return 0;
if (signal_pending (current))
return -EINTR;
if (need_resched())
break;
if (i >= 2)
udelay (5);
}
return 1;
}
/**
* parport_wait_peripheral - wait for status lines to change in 35ms
* @port: port to watch
* @mask: status lines to watch
* @result: desired values of chosen status lines
*
* This function waits until the masked status lines have the
* desired values, or until 35ms have elapsed (see IEEE 1284-1994
* page 24 to 25 for why this value in particular is hardcoded).
* The @mask and @result parameters are bitmasks, with the bits
* defined by the constants in parport.h: %PARPORT_STATUS_BUSY,
* and so on.
*
* The port is polled quickly to start off with, in anticipation
* of a fast response from the peripheral. This fast polling
* time is configurable (using /proc), and defaults to 500usec.
* If the timeout for this port (see parport_set_timeout()) is
* zero, the fast polling time is 35ms, and this function does
* not call schedule().
*
* If the timeout for this port is non-zero, after the fast
* polling fails it uses parport_wait_event() to wait for up to
* 10ms, waking up if an interrupt occurs.
*/
int parport_wait_peripheral(struct parport *port,
unsigned char mask,
unsigned char result)
{
int ret;
int usec;
unsigned long deadline;
unsigned char status;
usec = port->physport->spintime; /* usecs of fast polling */
if (!port->physport->cad->timeout)
/* A zero timeout is "special": busy wait for the
entire 35ms. */
usec = 35000;
/* Fast polling.
*
* This should be adjustable.
* How about making a note (in the device structure) of how long
* it takes, so we know for next time?
*/
ret = parport_poll_peripheral (port, mask, result, usec);
if (ret != 1)
return ret;
if (!port->physport->cad->timeout)
/* We may be in an interrupt handler, so we can't poll
* slowly anyway. */
return 1;
/* 40ms of slow polling. */
deadline = jiffies + msecs_to_jiffies(40);
while (time_before (jiffies, deadline)) {
if (signal_pending (current))
return -EINTR;
/* Wait for 10ms (or until an interrupt occurs if
* the handler is set) */
if ((ret = parport_wait_event (port, msecs_to_jiffies(10))) < 0)
return ret;
status = parport_read_status (port);
if ((status & mask) == result)
return 0;
if (!ret) {
/* parport_wait_event didn't time out, but the
* peripheral wasn't actually ready either.
* Wait for another 10ms. */
schedule_timeout_interruptible(msecs_to_jiffies(10));
}
}
return 1;
}
#ifdef CONFIG_PARPORT_1284
/* Terminate a negotiated mode. */
static void parport_ieee1284_terminate (struct parport *port)
{
int r;
port = port->physport;
/* EPP terminates differently. */
switch (port->ieee1284.mode) {
case IEEE1284_MODE_EPP:
case IEEE1284_MODE_EPPSL:
case IEEE1284_MODE_EPPSWE:
/* Terminate from EPP mode. */
/* Event 68: Set nInit low */
parport_frob_control (port, PARPORT_CONTROL_INIT, 0);
udelay (50);
/* Event 69: Set nInit high, nSelectIn low */
parport_frob_control (port,
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_INIT,
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_INIT);
break;
case IEEE1284_MODE_ECP:
case IEEE1284_MODE_ECPRLE:
case IEEE1284_MODE_ECPSWE:
/* In ECP we can only terminate from fwd idle phase. */
if (port->ieee1284.phase != IEEE1284_PH_FWD_IDLE) {
/* Event 47: Set nInit high */
parport_frob_control (port,
PARPORT_CONTROL_INIT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_INIT
| PARPORT_CONTROL_AUTOFD);
/* Event 49: PError goes high */
r = parport_wait_peripheral (port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r)
DPRINTK (KERN_INFO "%s: Timeout at event 49\n",
port->name);
parport_data_forward (port);
DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n",
port->name);
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
}
/* fall-though.. */
default:
/* Terminate from all other modes. */
/* Event 22: Set nSelectIn low, nAutoFd high */
parport_frob_control (port,
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_SELECT);
/* Event 24: nAck goes low */
r = parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0);
if (r)
DPRINTK (KERN_INFO "%s: Timeout at event 24\n",
port->name);
/* Event 25: Set nAutoFd low */
parport_frob_control (port,
PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
/* Event 27: nAck goes high */
r = parport_wait_peripheral (port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK);
if (r)
DPRINTK (KERN_INFO "%s: Timeout at event 27\n",
port->name);
/* Event 29: Set nAutoFd high */
parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
}
port->ieee1284.mode = IEEE1284_MODE_COMPAT;
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
DPRINTK (KERN_DEBUG "%s: In compatibility (forward idle) mode\n",
port->name);
}
#endif /* IEEE1284 support */
/**
* parport_negotiate - negotiate an IEEE 1284 mode
* @port: port to use
* @mode: mode to negotiate to
*
* Use this to negotiate to a particular IEEE 1284 transfer mode.
* The @mode parameter should be one of the constants in
* parport.h starting %IEEE1284_MODE_xxx.
*
* The return value is 0 if the peripheral has accepted the
* negotiation to the mode specified, -1 if the peripheral is not
* IEEE 1284 compliant (or not present), or 1 if the peripheral
* has rejected the negotiation.
*/
int parport_negotiate (struct parport *port, int mode)
{
#ifndef CONFIG_PARPORT_1284
if (mode == IEEE1284_MODE_COMPAT)
return 0;
printk (KERN_ERR "parport: IEEE1284 not supported in this kernel\n");
return -1;
#else
int m = mode & ~IEEE1284_ADDR;
int r;
unsigned char xflag;
port = port->physport;
/* Is there anything to do? */
if (port->ieee1284.mode == mode)
return 0;
/* Is the difference just an address-or-not bit? */
if ((port->ieee1284.mode & ~IEEE1284_ADDR) == (mode & ~IEEE1284_ADDR)){
port->ieee1284.mode = mode;
return 0;
}
/* Go to compatibility forward idle mode */
if (port->ieee1284.mode != IEEE1284_MODE_COMPAT)
parport_ieee1284_terminate (port);
if (mode == IEEE1284_MODE_COMPAT)
/* Compatibility mode: no negotiation. */
return 0;
switch (mode) {
case IEEE1284_MODE_ECPSWE:
m = IEEE1284_MODE_ECP;
break;
case IEEE1284_MODE_EPPSL:
case IEEE1284_MODE_EPPSWE:
m = IEEE1284_MODE_EPP;
break;
case IEEE1284_MODE_BECP:
return -ENOSYS; /* FIXME (implement BECP) */
}
if (mode & IEEE1284_EXT_LINK)
m = 1<<7; /* request extensibility link */
port->ieee1284.phase = IEEE1284_PH_NEGOTIATION;
/* Start off with nStrobe and nAutoFd high, and nSelectIn low */
parport_frob_control (port,
PARPORT_CONTROL_STROBE
| PARPORT_CONTROL_AUTOFD
| PARPORT_CONTROL_SELECT,
PARPORT_CONTROL_SELECT);
udelay(1);
/* Event 0: Set data */
parport_data_forward (port);
parport_write_data (port, m);
udelay (400); /* Shouldn't need to wait this long. */
/* Event 1: Set nSelectIn high, nAutoFd low */
parport_frob_control (port,
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
/* Event 2: PError, Select, nFault go high, nAck goes low */
if (parport_wait_peripheral (port,
PARPORT_STATUS_ERROR
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_PAPEROUT
| PARPORT_STATUS_ACK,
PARPORT_STATUS_ERROR
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_PAPEROUT)) {
/* Timeout */
parport_frob_control (port,
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_SELECT);
DPRINTK (KERN_DEBUG
"%s: Peripheral not IEEE1284 compliant (0x%02X)\n",
port->name, parport_read_status (port));
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
return -1; /* Not IEEE1284 compliant */
}
/* Event 3: Set nStrobe low */
parport_frob_control (port,
PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE);
/* Event 4: Set nStrobe and nAutoFd high */
udelay (5);
parport_frob_control (port,
PARPORT_CONTROL_STROBE
| PARPORT_CONTROL_AUTOFD,
0);
/* Event 6: nAck goes high */
if (parport_wait_peripheral (port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* This shouldn't really happen with a compliant device. */
DPRINTK (KERN_DEBUG
"%s: Mode 0x%02x not supported? (0x%02x)\n",
port->name, mode, port->ops->read_status (port));
parport_ieee1284_terminate (port);
return 1;
}
xflag = parport_read_status (port) & PARPORT_STATUS_SELECT;
/* xflag should be high for all modes other than nibble (0). */
if (mode && !xflag) {
/* Mode not supported. */
DPRINTK (KERN_DEBUG "%s: Mode 0x%02x rejected by peripheral\n",
port->name, mode);
parport_ieee1284_terminate (port);
return 1;
}
/* More to do if we've requested extensibility link. */
if (mode & IEEE1284_EXT_LINK) {
m = mode & 0x7f;
udelay (1);
parport_write_data (port, m);
udelay (1);
/* Event 51: Set nStrobe low */
parport_frob_control (port,
PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE);
/* Event 52: nAck goes low */
if (parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0)) {
/* This peripheral is _very_ slow. */
DPRINTK (KERN_DEBUG
"%s: Event 52 didn't happen\n",
port->name);
parport_ieee1284_terminate (port);
return 1;
}
/* Event 53: Set nStrobe high */
parport_frob_control (port,
PARPORT_CONTROL_STROBE,
0);
/* Event 55: nAck goes high */
if (parport_wait_peripheral (port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* This shouldn't really happen with a compliant
* device. */
DPRINTK (KERN_DEBUG
"%s: Mode 0x%02x not supported? (0x%02x)\n",
port->name, mode,
port->ops->read_status (port));
parport_ieee1284_terminate (port);
return 1;
}
/* Event 54: Peripheral sets XFlag to reflect support */
xflag = parport_read_status (port) & PARPORT_STATUS_SELECT;
/* xflag should be high. */
if (!xflag) {
/* Extended mode not supported. */
DPRINTK (KERN_DEBUG "%s: Extended mode 0x%02x not "
"supported\n", port->name, mode);
parport_ieee1284_terminate (port);
return 1;
}
/* Any further setup is left to the caller. */
}
/* Mode is supported */
DPRINTK (KERN_DEBUG "%s: In mode 0x%02x\n", port->name, mode);
port->ieee1284.mode = mode;
/* But ECP is special */
if (!(mode & IEEE1284_EXT_LINK) && (m & IEEE1284_MODE_ECP)) {
port->ieee1284.phase = IEEE1284_PH_ECP_SETUP;
/* Event 30: Set nAutoFd low */
parport_frob_control (port,
PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_AUTOFD);
/* Event 31: PError goes high. */
r = parport_wait_peripheral (port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r) {
DPRINTK (KERN_INFO "%s: Timeout at event 31\n",
port->name);
}
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n",
port->name);
} else switch (mode) {
case IEEE1284_MODE_NIBBLE:
case IEEE1284_MODE_BYTE:
port->ieee1284.phase = IEEE1284_PH_REV_IDLE;
break;
default:
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
}
return 0;
#endif /* IEEE1284 support */
}
/* Acknowledge that the peripheral has data available.
* Events 18-20, in order to get from Reverse Idle phase
* to Host Busy Data Available.
* This will most likely be called from an interrupt.
* Returns zero if data was available.
*/
#ifdef CONFIG_PARPORT_1284
static int parport_ieee1284_ack_data_avail (struct parport *port)
{
if (parport_read_status (port) & PARPORT_STATUS_ERROR)
/* Event 18 didn't happen. */
return -1;
/* Event 20: nAutoFd goes high. */
port->ops->frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
return 0;
}
#endif /* IEEE1284 support */
/* Handle an interrupt. */
void parport_ieee1284_interrupt (void *handle)
{
struct parport *port = handle;
parport_ieee1284_wakeup (port);
#ifdef CONFIG_PARPORT_1284
if (port->ieee1284.phase == IEEE1284_PH_REV_IDLE) {
/* An interrupt in this phase means that data
* is now available. */
DPRINTK (KERN_DEBUG "%s: Data available\n", port->name);
parport_ieee1284_ack_data_avail (port);
}
#endif /* IEEE1284 support */
}
/**
* parport_write - write a block of data to a parallel port
* @port: port to write to
* @buffer: data buffer (in kernel space)
* @len: number of bytes of data to transfer
*
* This will write up to @len bytes of @buffer to the port
* specified, using the IEEE 1284 transfer mode most recently
* negotiated to (using parport_negotiate()), as long as that
* mode supports forward transfers (host to peripheral).
*
* It is the caller's responsibility to ensure that the first
* @len bytes of @buffer are valid.
*
* This function returns the number of bytes transferred (if zero
* or positive), or else an error code.
*/
ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
{
#ifndef CONFIG_PARPORT_1284
return port->ops->compat_write_data (port, buffer, len, 0);
#else
ssize_t retval;
int mode = port->ieee1284.mode;
int addr = mode & IEEE1284_ADDR;
size_t (*fn) (struct parport *, const void *, size_t, int);
/* Ignore the device-ID-request bit and the address bit. */
mode &= ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
/* Use the mode we're in. */
switch (mode) {
case IEEE1284_MODE_NIBBLE:
case IEEE1284_MODE_BYTE:
parport_negotiate (port, IEEE1284_MODE_COMPAT);
case IEEE1284_MODE_COMPAT:
DPRINTK (KERN_DEBUG "%s: Using compatibility mode\n",
port->name);
fn = port->ops->compat_write_data;
break;
case IEEE1284_MODE_EPP:
DPRINTK (KERN_DEBUG "%s: Using EPP mode\n", port->name);
if (addr) {
fn = port->ops->epp_write_addr;
} else {
fn = port->ops->epp_write_data;
}
break;
case IEEE1284_MODE_EPPSWE:
DPRINTK (KERN_DEBUG "%s: Using software-emulated EPP mode\n",
port->name);
if (addr) {
fn = parport_ieee1284_epp_write_addr;
} else {
fn = parport_ieee1284_epp_write_data;
}
break;
case IEEE1284_MODE_ECP:
case IEEE1284_MODE_ECPRLE:
DPRINTK (KERN_DEBUG "%s: Using ECP mode\n", port->name);
if (addr) {
fn = port->ops->ecp_write_addr;
} else {
fn = port->ops->ecp_write_data;
}
break;
case IEEE1284_MODE_ECPSWE:
DPRINTK (KERN_DEBUG "%s: Using software-emulated ECP mode\n",
port->name);
/* The caller has specified that it must be emulated,
* even if we have ECP hardware! */
if (addr) {
fn = parport_ieee1284_ecp_write_addr;
} else {
fn = parport_ieee1284_ecp_write_data;
}
break;
default:
DPRINTK (KERN_DEBUG "%s: Unknown mode 0x%02x\n", port->name,
port->ieee1284.mode);
return -ENOSYS;
}
retval = (*fn) (port, buffer, len, 0);
DPRINTK (KERN_DEBUG "%s: wrote %d/%d bytes\n", port->name, retval, len);
return retval;
#endif /* IEEE1284 support */
}
/**
* parport_read - read a block of data from a parallel port
* @port: port to read from
* @buffer: data buffer (in kernel space)
* @len: number of bytes of data to transfer
*
* This will read up to @len bytes of @buffer to the port
* specified, using the IEEE 1284 transfer mode most recently
* negotiated to (using parport_negotiate()), as long as that
* mode supports reverse transfers (peripheral to host).
*
* It is the caller's responsibility to ensure that the first
* @len bytes of @buffer are available to write to.
*
* This function returns the number of bytes transferred (if zero
* or positive), or else an error code.
*/
ssize_t parport_read (struct parport *port, void *buffer, size_t len)
{
#ifndef CONFIG_PARPORT_1284
printk (KERN_ERR "parport: IEEE1284 not supported in this kernel\n");
return -ENODEV;
#else
int mode = port->physport->ieee1284.mode;
int addr = mode & IEEE1284_ADDR;
size_t (*fn) (struct parport *, void *, size_t, int);
/* Ignore the device-ID-request bit and the address bit. */
mode &= ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
/* Use the mode we're in. */
switch (mode) {
case IEEE1284_MODE_COMPAT:
/* if we can tri-state use BYTE mode instead of NIBBLE mode,
* if that fails, revert to NIBBLE mode -- ought to store somewhere
* the device's ability to do BYTE mode reverse transfers, so we don't
* end up needlessly calling negotiate(BYTE) repeately.. (fb)
*/
if ((port->physport->modes & PARPORT_MODE_TRISTATE) &&
!parport_negotiate (port, IEEE1284_MODE_BYTE)) {
/* got into BYTE mode OK */
DPRINTK (KERN_DEBUG "%s: Using byte mode\n", port->name);
fn = port->ops->byte_read_data;
break;
}
if (parport_negotiate (port, IEEE1284_MODE_NIBBLE)) {
return -EIO;
}
/* fall through to NIBBLE */
case IEEE1284_MODE_NIBBLE:
DPRINTK (KERN_DEBUG "%s: Using nibble mode\n", port->name);
fn = port->ops->nibble_read_data;
break;
case IEEE1284_MODE_BYTE:
DPRINTK (KERN_DEBUG "%s: Using byte mode\n", port->name);
fn = port->ops->byte_read_data;
break;
case IEEE1284_MODE_EPP:
DPRINTK (KERN_DEBUG "%s: Using EPP mode\n", port->name);
if (addr) {
fn = port->ops->epp_read_addr;
} else {
fn = port->ops->epp_read_data;
}
break;
case IEEE1284_MODE_EPPSWE:
DPRINTK (KERN_DEBUG "%s: Using software-emulated EPP mode\n",
port->name);
if (addr) {
fn = parport_ieee1284_epp_read_addr;
} else {
fn = parport_ieee1284_epp_read_data;
}
break;
case IEEE1284_MODE_ECP:
case IEEE1284_MODE_ECPRLE:
DPRINTK (KERN_DEBUG "%s: Using ECP mode\n", port->name);
fn = port->ops->ecp_read_data;
break;
case IEEE1284_MODE_ECPSWE:
DPRINTK (KERN_DEBUG "%s: Using software-emulated ECP mode\n",
port->name);
fn = parport_ieee1284_ecp_read_data;
break;
default:
DPRINTK (KERN_DEBUG "%s: Unknown mode 0x%02x\n", port->name,
port->physport->ieee1284.mode);
return -ENOSYS;
}
return (*fn) (port, buffer, len, 0);
#endif /* IEEE1284 support */
}
/**
* parport_set_timeout - set the inactivity timeout for a device
* @dev: device on a port
* @inactivity: inactivity timeout (in jiffies)
*
* This sets the inactivity timeout for a particular device on a
* port. This affects functions like parport_wait_peripheral().
* The special value 0 means not to call schedule() while dealing
* with this device.
*
* The return value is the previous inactivity timeout.
*
* Any callers of parport_wait_event() for this device are woken
* up.
*/
long parport_set_timeout (struct pardevice *dev, long inactivity)
{
long int old = dev->timeout;
dev->timeout = inactivity;
if (dev->port->physport->cad == dev)
parport_ieee1284_wakeup (dev->port);
return old;
}
/* Exported symbols for modules. */
EXPORT_SYMBOL(parport_negotiate);
EXPORT_SYMBOL(parport_write);
EXPORT_SYMBOL(parport_read);
EXPORT_SYMBOL(parport_wait_peripheral);
EXPORT_SYMBOL(parport_wait_event);
EXPORT_SYMBOL(parport_set_timeout);
EXPORT_SYMBOL(parport_ieee1284_interrupt);
| gpl-2.0 |
htc-mirror/jewel-ics-crc-3.0.8-3fd0422 | drivers/usb/host/ohci-mem.c | 12939 | 3398 | /*
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
* (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
*
* This file is licenced under the GPL.
*/
/*-------------------------------------------------------------------------*/
/*
* OHCI deals with three types of memory:
* - data used only by the HCD ... kmalloc is fine
* - async and periodic schedules, shared by HC and HCD ... these
* need to use dma_pool or dma_alloc_coherent
* - driver buffers, read/written by HC ... the hcd glue or the
* device driver provides us with dma addresses
*
* There's also "register" data, which is memory mapped.
* No memory seen by this driver (or any HCD) may be paged out.
*/
/*-------------------------------------------------------------------------*/
static void ohci_hcd_init (struct ohci_hcd *ohci)
{
ohci->next_statechange = jiffies;
spin_lock_init (&ohci->lock);
INIT_LIST_HEAD (&ohci->pending);
}
/*-------------------------------------------------------------------------*/
static int ohci_mem_init (struct ohci_hcd *ohci)
{
ohci->td_cache = dma_pool_create ("ohci_td",
ohci_to_hcd(ohci)->self.controller,
sizeof (struct td),
32 /* byte alignment */,
0 /* no page-crossing issues */);
if (!ohci->td_cache)
return -ENOMEM;
ohci->ed_cache = dma_pool_create ("ohci_ed",
ohci_to_hcd(ohci)->self.controller,
sizeof (struct ed),
16 /* byte alignment */,
0 /* no page-crossing issues */);
if (!ohci->ed_cache) {
dma_pool_destroy (ohci->td_cache);
return -ENOMEM;
}
return 0;
}
static void ohci_mem_cleanup (struct ohci_hcd *ohci)
{
if (ohci->td_cache) {
dma_pool_destroy (ohci->td_cache);
ohci->td_cache = NULL;
}
if (ohci->ed_cache) {
dma_pool_destroy (ohci->ed_cache);
ohci->ed_cache = NULL;
}
}
/*-------------------------------------------------------------------------*/
/* ohci "done list" processing needs this mapping */
static inline struct td *
dma_to_td (struct ohci_hcd *hc, dma_addr_t td_dma)
{
struct td *td;
td_dma &= TD_MASK;
td = hc->td_hash [TD_HASH_FUNC(td_dma)];
while (td && td->td_dma != td_dma)
td = td->td_hash;
return td;
}
/* TDs ... */
static struct td *
td_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
{
dma_addr_t dma;
struct td *td;
td = dma_pool_alloc (hc->td_cache, mem_flags, &dma);
if (td) {
/* in case hc fetches it, make it look dead */
memset (td, 0, sizeof *td);
td->hwNextTD = cpu_to_hc32 (hc, dma);
td->td_dma = dma;
/* hashed in td_fill */
}
return td;
}
static void
td_free (struct ohci_hcd *hc, struct td *td)
{
struct td **prev = &hc->td_hash [TD_HASH_FUNC (td->td_dma)];
while (*prev && *prev != td)
prev = &(*prev)->td_hash;
if (*prev)
*prev = td->td_hash;
else if ((td->hwINFO & cpu_to_hc32(hc, TD_DONE)) != 0)
ohci_dbg (hc, "no hash for td %p\n", td);
dma_pool_free (hc->td_cache, td, td->td_dma);
}
/*-------------------------------------------------------------------------*/
/* EDs ... */
static struct ed *
ed_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
{
dma_addr_t dma;
struct ed *ed;
ed = dma_pool_alloc (hc->ed_cache, mem_flags, &dma);
if (ed) {
memset (ed, 0, sizeof (*ed));
INIT_LIST_HEAD (&ed->td_list);
ed->dma = dma;
}
return ed;
}
static void
ed_free (struct ohci_hcd *hc, struct ed *ed)
{
dma_pool_free (hc->ed_cache, ed, ed->dma);
}
| gpl-2.0 |
chraso/GT-I5500_ | GT-I5500_OpenSource_Kernel/kernel/drivers/rapidio/rio-driver.c | 140 | 6092 | /*
* RapidIO driver support
*
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/rio.h>
#include <linux/rio_ids.h>
#include "rio.h"
/**
* rio_match_device - Tell if a RIO device has a matching RIO device id structure
* @id: the RIO device id structure to match against
* @rdev: the RIO device structure to match against
*
* Used from driver probe and bus matching to check whether a RIO device
* matches a device id structure provided by a RIO driver. Returns the
* matching &struct rio_device_id or %NULL if there is no match.
*/
static const struct rio_device_id *rio_match_device(const struct rio_device_id
*id,
const struct rio_dev *rdev)
{
while (id->vid || id->asm_vid) {
if (((id->vid == RIO_ANY_ID) || (id->vid == rdev->vid)) &&
((id->did == RIO_ANY_ID) || (id->did == rdev->did)) &&
((id->asm_vid == RIO_ANY_ID)
|| (id->asm_vid == rdev->asm_vid))
&& ((id->asm_did == RIO_ANY_ID)
|| (id->asm_did == rdev->asm_did)))
return id;
id++;
}
return NULL;
}
/**
* rio_dev_get - Increments the reference count of the RIO device structure
*
* @rdev: RIO device being referenced
*
* Each live reference to a device should be refcounted.
*
* Drivers for RIO devices should normally record such references in
* their probe() methods, when they bind to a device, and release
* them by calling rio_dev_put(), in their disconnect() methods.
*/
struct rio_dev *rio_dev_get(struct rio_dev *rdev)
{
if (rdev)
get_device(&rdev->dev);
return rdev;
}
/**
* rio_dev_put - Release a use of the RIO device structure
*
* @rdev: RIO device being disconnected
*
* Must be called when a user of a device is finished with it.
* When the last user of the device calls this function, the
* memory of the device is freed.
*/
void rio_dev_put(struct rio_dev *rdev)
{
if (rdev)
put_device(&rdev->dev);
}
/**
* rio_device_probe - Tell if a RIO device structure has a matching RIO device id structure
* @dev: the RIO device structure to match against
*
* return 0 and set rio_dev->driver when drv claims rio_dev, else error
*/
static int rio_device_probe(struct device *dev)
{
struct rio_driver *rdrv = to_rio_driver(dev->driver);
struct rio_dev *rdev = to_rio_dev(dev);
int error = -ENODEV;
const struct rio_device_id *id;
if (!rdev->driver && rdrv->probe) {
if (!rdrv->id_table)
return error;
id = rio_match_device(rdrv->id_table, rdev);
rio_dev_get(rdev);
if (id)
error = rdrv->probe(rdev, id);
if (error >= 0) {
rdev->driver = rdrv;
error = 0;
} else
rio_dev_put(rdev);
}
return error;
}
/**
* rio_device_remove - Remove a RIO device from the system
*
* @dev: the RIO device structure to match against
*
* Remove a RIO device from the system. If it has an associated
* driver, then run the driver remove() method. Then update
* the reference count.
*/
static int rio_device_remove(struct device *dev)
{
struct rio_dev *rdev = to_rio_dev(dev);
struct rio_driver *rdrv = rdev->driver;
if (rdrv) {
if (rdrv->remove)
rdrv->remove(rdev);
rdev->driver = NULL;
}
rio_dev_put(rdev);
return 0;
}
/**
* rio_register_driver - register a new RIO driver
* @rdrv: the RIO driver structure to register
*
* Adds a &struct rio_driver to the list of registered drivers.
* Returns a negative value on error, otherwise 0. If no error
* occurred, the driver remains registered even if no device
* was claimed during registration.
*/
int rio_register_driver(struct rio_driver *rdrv)
{
/* initialize common driver fields */
rdrv->driver.name = rdrv->name;
rdrv->driver.bus = &rio_bus_type;
/* register with core */
return driver_register(&rdrv->driver);
}
/**
* rio_unregister_driver - unregister a RIO driver
* @rdrv: the RIO driver structure to unregister
*
* Deletes the &struct rio_driver from the list of registered RIO
* drivers, gives it a chance to clean up by calling its remove()
* function for each device it was responsible for, and marks those
* devices as driverless.
*/
void rio_unregister_driver(struct rio_driver *rdrv)
{
driver_unregister(&rdrv->driver);
}
/**
* rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure
* @dev: the standard device structure to match against
* @drv: the standard driver structure containing the ids to match against
*
* Used by a driver to check whether a RIO device present in the
* system is in its list of supported devices. Returns 1 if
* there is a matching &struct rio_device_id or 0 if there is
* no match.
*/
static int rio_match_bus(struct device *dev, struct device_driver *drv)
{
struct rio_dev *rdev = to_rio_dev(dev);
struct rio_driver *rdrv = to_rio_driver(drv);
const struct rio_device_id *id = rdrv->id_table;
const struct rio_device_id *found_id;
if (!id)
goto out;
found_id = rio_match_device(id, rdev);
if (found_id)
return 1;
out:return 0;
}
static struct device rio_bus = {
.bus_id = "rapidio",
};
struct bus_type rio_bus_type = {
.name = "rapidio",
.match = rio_match_bus,
.dev_attrs = rio_dev_attrs,
.probe = rio_device_probe,
.remove = rio_device_remove,
};
/**
* rio_bus_init - Register the RapidIO bus with the device model
*
* Registers the RIO bus device and RIO bus type with the Linux
* device model.
*/
static int __init rio_bus_init(void)
{
if (device_register(&rio_bus) < 0)
printk("RIO: failed to register RIO bus device\n");
return bus_register(&rio_bus_type);
}
postcore_initcall(rio_bus_init);
EXPORT_SYMBOL_GPL(rio_register_driver);
EXPORT_SYMBOL_GPL(rio_unregister_driver);
EXPORT_SYMBOL_GPL(rio_bus_type);
EXPORT_SYMBOL_GPL(rio_dev_get);
EXPORT_SYMBOL_GPL(rio_dev_put);
| gpl-2.0 |
derekzhuo/android-goldfish-2.6.29 | drivers/usb/host/whci/asl.c | 140 | 8932 | /*
* Wireless Host Controller (WHC) asynchronous schedule management.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include <linux/uwb/umc.h>
#include <linux/usb.h>
#include "../../wusbcore/wusbhc.h"
#include "whcd.h"
static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset,
struct whc_qset **next, struct whc_qset **prev)
{
struct list_head *n, *p;
BUG_ON(list_empty(&whc->async_list));
n = qset->list_node.next;
if (n == &whc->async_list)
n = n->next;
p = qset->list_node.prev;
if (p == &whc->async_list)
p = p->prev;
*next = container_of(n, struct whc_qset, list_node);
*prev = container_of(p, struct whc_qset, list_node);
}
static void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset)
{
list_move(&qset->list_node, &whc->async_list);
qset->in_sw_list = true;
}
static void asl_qset_insert(struct whc *whc, struct whc_qset *qset)
{
struct whc_qset *next, *prev;
qset_clear(whc, qset);
/* Link into ASL. */
qset_get_next_prev(whc, qset, &next, &prev);
whc_qset_set_link_ptr(&qset->qh.link, next->qset_dma);
whc_qset_set_link_ptr(&prev->qh.link, qset->qset_dma);
qset->in_hw_list = true;
}
static void asl_qset_remove(struct whc *whc, struct whc_qset *qset)
{
struct whc_qset *prev, *next;
qset_get_next_prev(whc, qset, &next, &prev);
list_move(&qset->list_node, &whc->async_removed_list);
qset->in_sw_list = false;
/*
* No more qsets in the ASL? The caller must stop the ASL as
* it's no longer valid.
*/
if (list_empty(&whc->async_list))
return;
/* Remove from ASL. */
whc_qset_set_link_ptr(&prev->qh.link, next->qset_dma);
qset->in_hw_list = false;
}
/**
* process_qset - process any recently inactivated or halted qTDs in a
* qset.
*
* After inactive qTDs are removed, new qTDs can be added if the
* urb queue still contains URBs.
*
* Returns any additional WUSBCMD bits for the ASL sync command (i.e.,
* WUSBCMD_ASYNC_QSET_RM if a halted qset was removed).
*/
static uint32_t process_qset(struct whc *whc, struct whc_qset *qset)
{
enum whc_update update = 0;
uint32_t status = 0;
while (qset->ntds) {
struct whc_qtd *td;
int t;
t = qset->td_start;
td = &qset->qtd[qset->td_start];
status = le32_to_cpu(td->status);
/*
* Nothing to do with a still active qTD.
*/
if (status & QTD_STS_ACTIVE)
break;
if (status & QTD_STS_HALTED) {
/* Ug, an error. */
process_halted_qtd(whc, qset, td);
goto done;
}
/* Mmm, a completed qTD. */
process_inactive_qtd(whc, qset, td);
}
update |= qset_add_qtds(whc, qset);
done:
/*
* Remove this qset from the ASL if requested, but only if has
* no qTDs.
*/
if (qset->remove && qset->ntds == 0) {
asl_qset_remove(whc, qset);
update |= WHC_UPDATE_REMOVED;
}
return update;
}
void asl_start(struct whc *whc)
{
struct whc_qset *qset;
qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
le_writeq(qset->qset_dma | QH_LINK_NTDS(8), whc->base + WUSBASYNCLISTADDR);
whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, WUSBCMD_ASYNC_EN);
whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
WUSBSTS_ASYNC_SCHED, WUSBSTS_ASYNC_SCHED,
1000, "start ASL");
}
void asl_stop(struct whc *whc)
{
whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, 0);
whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
WUSBSTS_ASYNC_SCHED, 0,
1000, "stop ASL");
}
/**
* asl_update - request an ASL update and wait for the hardware to be synced
* @whc: the WHCI HC
* @wusbcmd: WUSBCMD value to start the update.
*
* If the WUSB HC is inactive (i.e., the ASL is stopped) then the
* update must be skipped as the hardware may not respond to update
* requests.
*/
void asl_update(struct whc *whc, uint32_t wusbcmd)
{
struct wusbhc *wusbhc = &whc->wusbhc;
long t;
mutex_lock(&wusbhc->mutex);
if (wusbhc->active) {
whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
t = wait_event_timeout(
whc->async_list_wq,
(le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0,
msecs_to_jiffies(1000));
if (t == 0)
whc_hw_error(whc, "ASL update timeout");
}
mutex_unlock(&wusbhc->mutex);
}
/**
* scan_async_work - scan the ASL for qsets to process.
*
* Process each qset in the ASL in turn and then signal the WHC that
* the ASL has been updated.
*
* Then start, stop or update the asynchronous schedule as required.
*/
void scan_async_work(struct work_struct *work)
{
struct whc *whc = container_of(work, struct whc, async_work);
struct whc_qset *qset, *t;
enum whc_update update = 0;
spin_lock_irq(&whc->lock);
/*
* Transerve the software list backwards so new qsets can be
* safely inserted into the ASL without making it non-circular.
*/
list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) {
if (!qset->in_hw_list) {
asl_qset_insert(whc, qset);
update |= WHC_UPDATE_ADDED;
}
update |= process_qset(whc, qset);
}
spin_unlock_irq(&whc->lock);
if (update) {
uint32_t wusbcmd = WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB;
if (update & WHC_UPDATE_REMOVED)
wusbcmd |= WUSBCMD_ASYNC_QSET_RM;
asl_update(whc, wusbcmd);
}
/*
* Now that the ASL is updated, complete the removal of any
* removed qsets.
*/
spin_lock_irq(&whc->lock);
list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) {
qset_remove_complete(whc, qset);
}
spin_unlock_irq(&whc->lock);
}
/**
* asl_urb_enqueue - queue an URB onto the asynchronous list (ASL).
* @whc: the WHCI host controller
* @urb: the URB to enqueue
* @mem_flags: flags for any memory allocations
*
* The qset for the endpoint is obtained and the urb queued on to it.
*
* Work is scheduled to update the hardware's view of the ASL.
*/
int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
{
struct whc_qset *qset;
int err;
unsigned long flags;
spin_lock_irqsave(&whc->lock, flags);
qset = get_qset(whc, urb, GFP_ATOMIC);
if (qset == NULL)
err = -ENOMEM;
else
err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
if (!err) {
usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
if (!qset->in_sw_list)
asl_qset_insert_begin(whc, qset);
}
spin_unlock_irqrestore(&whc->lock, flags);
if (!err)
queue_work(whc->workqueue, &whc->async_work);
return 0;
}
/**
* asl_urb_dequeue - remove an URB (qset) from the async list.
* @whc: the WHCI host controller
* @urb: the URB to dequeue
* @status: the current status of the URB
*
* URBs that do yet have qTDs can simply be removed from the software
* queue, otherwise the qset must be removed from the ASL so the qTDs
* can be removed.
*/
int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
{
struct whc_urb *wurb = urb->hcpriv;
struct whc_qset *qset = wurb->qset;
struct whc_std *std, *t;
int ret;
unsigned long flags;
spin_lock_irqsave(&whc->lock, flags);
ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
if (ret < 0)
goto out;
list_for_each_entry_safe(std, t, &qset->stds, list_node) {
if (std->urb == urb)
qset_free_std(whc, std);
else
std->qtd = NULL; /* so this std is re-added when the qset is */
}
asl_qset_remove(whc, qset);
wurb->status = status;
wurb->is_async = true;
queue_work(whc->workqueue, &wurb->dequeue_work);
out:
spin_unlock_irqrestore(&whc->lock, flags);
return ret;
}
/**
* asl_qset_delete - delete a qset from the ASL
*/
void asl_qset_delete(struct whc *whc, struct whc_qset *qset)
{
qset->remove = 1;
queue_work(whc->workqueue, &whc->async_work);
qset_delete(whc, qset);
}
/**
* asl_init - initialize the asynchronous schedule list
*
* A dummy qset with no qTDs is added to the ASL to simplify removing
* qsets (no need to stop the ASL when the last qset is removed).
*/
int asl_init(struct whc *whc)
{
struct whc_qset *qset;
qset = qset_alloc(whc, GFP_KERNEL);
if (qset == NULL)
return -ENOMEM;
asl_qset_insert_begin(whc, qset);
asl_qset_insert(whc, qset);
return 0;
}
/**
* asl_clean_up - free ASL resources
*
* The ASL is stopped and empty except for the dummy qset.
*/
void asl_clean_up(struct whc *whc)
{
struct whc_qset *qset;
if (!list_empty(&whc->async_list)) {
qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
list_del(&qset->list_node);
qset_free(whc, qset);
}
}
| gpl-2.0 |
kernel13D/linux-rpi | sound/pci/ctxfi/cthw20k2.c | 652 | 52694 | /**
* Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
*
* This source file is released under GPL v2 license (no other versions).
* See the COPYING file included in the main directory of this source
* distribution for the license terms and conditions.
*
* @File cthw20k2.c
*
* @Brief
* This file contains the implementation of hardware access method for 20k2.
*
* @Author Liu Chun
* @Date May 14 2008
*
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include "cthw20k2.h"
#include "ct20k2reg.h"
#if BITS_PER_LONG == 32
#define CT_XFI_DMA_MASK DMA_BIT_MASK(32) /* 32 bit PTE */
#else
#define CT_XFI_DMA_MASK DMA_BIT_MASK(64) /* 64 bit PTE */
#endif
struct hw20k2 {
struct hw hw;
/* for i2c */
unsigned char dev_id;
unsigned char addr_size;
unsigned char data_size;
int mic_source;
};
static u32 hw_read_20kx(struct hw *hw, u32 reg);
static void hw_write_20kx(struct hw *hw, u32 reg, u32 data);
/*
* Type definition block.
* The layout of control structures can be directly applied on 20k2 chip.
*/
/*
* SRC control block definitions.
*/
/* SRC resource control block */
#define SRCCTL_STATE 0x00000007
#define SRCCTL_BM 0x00000008
#define SRCCTL_RSR 0x00000030
#define SRCCTL_SF 0x000001C0
#define SRCCTL_WR 0x00000200
#define SRCCTL_PM 0x00000400
#define SRCCTL_ROM 0x00001800
#define SRCCTL_VO 0x00002000
#define SRCCTL_ST 0x00004000
#define SRCCTL_IE 0x00008000
#define SRCCTL_ILSZ 0x000F0000
#define SRCCTL_BP 0x00100000
#define SRCCCR_CISZ 0x000007FF
#define SRCCCR_CWA 0x001FF800
#define SRCCCR_D 0x00200000
#define SRCCCR_RS 0x01C00000
#define SRCCCR_NAL 0x3E000000
#define SRCCCR_RA 0xC0000000
#define SRCCA_CA 0x0FFFFFFF
#define SRCCA_RS 0xE0000000
#define SRCSA_SA 0x0FFFFFFF
#define SRCLA_LA 0x0FFFFFFF
/* Mixer Parameter Ring ram Low and Hight register.
* Fixed-point value in 8.24 format for parameter channel */
#define MPRLH_PITCH 0xFFFFFFFF
/* SRC resource register dirty flags */
union src_dirty {
struct {
u16 ctl:1;
u16 ccr:1;
u16 sa:1;
u16 la:1;
u16 ca:1;
u16 mpr:1;
u16 czbfs:1; /* Clear Z-Buffers */
u16 rsv:9;
} bf;
u16 data;
};
struct src_rsc_ctrl_blk {
unsigned int ctl;
unsigned int ccr;
unsigned int ca;
unsigned int sa;
unsigned int la;
unsigned int mpr;
union src_dirty dirty;
};
/* SRC manager control block */
union src_mgr_dirty {
struct {
u16 enb0:1;
u16 enb1:1;
u16 enb2:1;
u16 enb3:1;
u16 enb4:1;
u16 enb5:1;
u16 enb6:1;
u16 enb7:1;
u16 enbsa:1;
u16 rsv:7;
} bf;
u16 data;
};
struct src_mgr_ctrl_blk {
unsigned int enbsa;
unsigned int enb[8];
union src_mgr_dirty dirty;
};
/* SRCIMP manager control block */
#define SRCAIM_ARC 0x00000FFF
#define SRCAIM_NXT 0x00FF0000
#define SRCAIM_SRC 0xFF000000
struct srcimap {
unsigned int srcaim;
unsigned int idx;
};
/* SRCIMP manager register dirty flags */
union srcimp_mgr_dirty {
struct {
u16 srcimap:1;
u16 rsv:15;
} bf;
u16 data;
};
struct srcimp_mgr_ctrl_blk {
struct srcimap srcimap;
union srcimp_mgr_dirty dirty;
};
/*
* Function implementation block.
*/
static int src_get_rsc_ctrl_blk(void **rblk)
{
struct src_rsc_ctrl_blk *blk;
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
if (!blk)
return -ENOMEM;
*rblk = blk;
return 0;
}
static int src_put_rsc_ctrl_blk(void *blk)
{
kfree(blk);
return 0;
}
static int src_set_state(void *blk, unsigned int state)
{
struct src_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->ctl, SRCCTL_STATE, state);
ctl->dirty.bf.ctl = 1;
return 0;
}
static int src_set_bm(void *blk, unsigned int bm)
{
struct src_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->ctl, SRCCTL_BM, bm);
ctl->dirty.bf.ctl = 1;
return 0;
}
static int src_set_rsr(void *blk, unsigned int rsr)
{
struct src_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->ctl, SRCCTL_RSR, rsr);
ctl->dirty.bf.ctl = 1;
return 0;
}
static int src_set_sf(void *blk, unsigned int sf)
{
struct src_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->ctl, SRCCTL_SF, sf);
ctl->dirty.bf.ctl = 1;
return 0;
}
static int src_set_wr(void *blk, unsigned int wr)
{
struct src_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->ctl, SRCCTL_WR, wr);
ctl->dirty.bf.ctl = 1;
return 0;
}
static int src_set_pm(void *blk, unsigned int pm)
{
struct src_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->ctl, SRCCTL_PM, pm);
ctl->dirty.bf.ctl = 1;
return 0;
}
static int src_set_rom(void *blk, unsigned int rom)
{
struct src_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->ctl, SRCCTL_ROM, rom);
ctl->dirty.bf.ctl = 1;
return 0;
}
static int src_set_vo(void *blk, unsigned int vo)
{
struct src_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->ctl, SRCCTL_VO, vo);
ctl->dirty.bf.ctl = 1;
return 0;
}
static int src_set_st(void *blk, unsigned int st)
{
struct src_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->ctl, SRCCTL_ST, st);
ctl->dirty.bf.ctl = 1;
return 0;
}
static int src_set_ie(void *blk, unsigned int ie)
{
struct src_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->ctl, SRCCTL_IE, ie);
ctl->dirty.bf.ctl = 1;
return 0;
}
static int src_set_ilsz(void *blk, unsigned int ilsz)
{
struct src_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->ctl, SRCCTL_ILSZ, ilsz);
ctl->dirty.bf.ctl = 1;
return 0;
}
static int src_set_bp(void *blk, unsigned int bp)
{
struct src_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->ctl, SRCCTL_BP, bp);
ctl->dirty.bf.ctl = 1;
return 0;
}
static int src_set_cisz(void *blk, unsigned int cisz)
{
struct src_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->ccr, SRCCCR_CISZ, cisz);
ctl->dirty.bf.ccr = 1;
return 0;
}
static int src_set_ca(void *blk, unsigned int ca)
{
struct src_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->ca, SRCCA_CA, ca);
ctl->dirty.bf.ca = 1;
return 0;
}
static int src_set_sa(void *blk, unsigned int sa)
{
struct src_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->sa, SRCSA_SA, sa);
ctl->dirty.bf.sa = 1;
return 0;
}
static int src_set_la(void *blk, unsigned int la)
{
struct src_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->la, SRCLA_LA, la);
ctl->dirty.bf.la = 1;
return 0;
}
static int src_set_pitch(void *blk, unsigned int pitch)
{
struct src_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->mpr, MPRLH_PITCH, pitch);
ctl->dirty.bf.mpr = 1;
return 0;
}
static int src_set_clear_zbufs(void *blk, unsigned int clear)
{
((struct src_rsc_ctrl_blk *)blk)->dirty.bf.czbfs = (clear ? 1 : 0);
return 0;
}
static int src_set_dirty(void *blk, unsigned int flags)
{
((struct src_rsc_ctrl_blk *)blk)->dirty.data = (flags & 0xffff);
return 0;
}
static int src_set_dirty_all(void *blk)
{
((struct src_rsc_ctrl_blk *)blk)->dirty.data = ~(0x0);
return 0;
}
#define AR_SLOT_SIZE 4096
#define AR_SLOT_BLOCK_SIZE 16
#define AR_PTS_PITCH 6
#define AR_PARAM_SRC_OFFSET 0x60
static unsigned int src_param_pitch_mixer(unsigned int src_idx)
{
return ((src_idx << 4) + AR_PTS_PITCH + AR_SLOT_SIZE
- AR_PARAM_SRC_OFFSET) % AR_SLOT_SIZE;
}
static int src_commit_write(struct hw *hw, unsigned int idx, void *blk)
{
struct src_rsc_ctrl_blk *ctl = blk;
int i;
if (ctl->dirty.bf.czbfs) {
/* Clear Z-Buffer registers */
for (i = 0; i < 8; i++)
hw_write_20kx(hw, SRC_UPZ+idx*0x100+i*0x4, 0);
for (i = 0; i < 4; i++)
hw_write_20kx(hw, SRC_DN0Z+idx*0x100+i*0x4, 0);
for (i = 0; i < 8; i++)
hw_write_20kx(hw, SRC_DN1Z+idx*0x100+i*0x4, 0);
ctl->dirty.bf.czbfs = 0;
}
if (ctl->dirty.bf.mpr) {
/* Take the parameter mixer resource in the same group as that
* the idx src is in for simplicity. Unlike src, all conjugate
* parameter mixer resources must be programmed for
* corresponding conjugate src resources. */
unsigned int pm_idx = src_param_pitch_mixer(idx);
hw_write_20kx(hw, MIXER_PRING_LO_HI+4*pm_idx, ctl->mpr);
hw_write_20kx(hw, MIXER_PMOPLO+8*pm_idx, 0x3);
hw_write_20kx(hw, MIXER_PMOPHI+8*pm_idx, 0x0);
ctl->dirty.bf.mpr = 0;
}
if (ctl->dirty.bf.sa) {
hw_write_20kx(hw, SRC_SA+idx*0x100, ctl->sa);
ctl->dirty.bf.sa = 0;
}
if (ctl->dirty.bf.la) {
hw_write_20kx(hw, SRC_LA+idx*0x100, ctl->la);
ctl->dirty.bf.la = 0;
}
if (ctl->dirty.bf.ca) {
hw_write_20kx(hw, SRC_CA+idx*0x100, ctl->ca);
ctl->dirty.bf.ca = 0;
}
/* Write srccf register */
hw_write_20kx(hw, SRC_CF+idx*0x100, 0x0);
if (ctl->dirty.bf.ccr) {
hw_write_20kx(hw, SRC_CCR+idx*0x100, ctl->ccr);
ctl->dirty.bf.ccr = 0;
}
if (ctl->dirty.bf.ctl) {
hw_write_20kx(hw, SRC_CTL+idx*0x100, ctl->ctl);
ctl->dirty.bf.ctl = 0;
}
return 0;
}
static int src_get_ca(struct hw *hw, unsigned int idx, void *blk)
{
struct src_rsc_ctrl_blk *ctl = blk;
ctl->ca = hw_read_20kx(hw, SRC_CA+idx*0x100);
ctl->dirty.bf.ca = 0;
return get_field(ctl->ca, SRCCA_CA);
}
static unsigned int src_get_dirty(void *blk)
{
return ((struct src_rsc_ctrl_blk *)blk)->dirty.data;
}
static unsigned int src_dirty_conj_mask(void)
{
return 0x20;
}
static int src_mgr_enbs_src(void *blk, unsigned int idx)
{
((struct src_mgr_ctrl_blk *)blk)->enbsa |= (0x1 << ((idx%128)/4));
((struct src_mgr_ctrl_blk *)blk)->dirty.bf.enbsa = 1;
((struct src_mgr_ctrl_blk *)blk)->enb[idx/32] |= (0x1 << (idx%32));
return 0;
}
static int src_mgr_enb_src(void *blk, unsigned int idx)
{
((struct src_mgr_ctrl_blk *)blk)->enb[idx/32] |= (0x1 << (idx%32));
((struct src_mgr_ctrl_blk *)blk)->dirty.data |= (0x1 << (idx/32));
return 0;
}
static int src_mgr_dsb_src(void *blk, unsigned int idx)
{
((struct src_mgr_ctrl_blk *)blk)->enb[idx/32] &= ~(0x1 << (idx%32));
((struct src_mgr_ctrl_blk *)blk)->dirty.data |= (0x1 << (idx/32));
return 0;
}
static int src_mgr_commit_write(struct hw *hw, void *blk)
{
struct src_mgr_ctrl_blk *ctl = blk;
int i;
unsigned int ret;
if (ctl->dirty.bf.enbsa) {
do {
ret = hw_read_20kx(hw, SRC_ENBSTAT);
} while (ret & 0x1);
hw_write_20kx(hw, SRC_ENBSA, ctl->enbsa);
ctl->dirty.bf.enbsa = 0;
}
for (i = 0; i < 8; i++) {
if ((ctl->dirty.data & (0x1 << i))) {
hw_write_20kx(hw, SRC_ENB+(i*0x100), ctl->enb[i]);
ctl->dirty.data &= ~(0x1 << i);
}
}
return 0;
}
static int src_mgr_get_ctrl_blk(void **rblk)
{
struct src_mgr_ctrl_blk *blk;
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
if (!blk)
return -ENOMEM;
*rblk = blk;
return 0;
}
static int src_mgr_put_ctrl_blk(void *blk)
{
kfree(blk);
return 0;
}
static int srcimp_mgr_get_ctrl_blk(void **rblk)
{
struct srcimp_mgr_ctrl_blk *blk;
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
if (!blk)
return -ENOMEM;
*rblk = blk;
return 0;
}
static int srcimp_mgr_put_ctrl_blk(void *blk)
{
kfree(blk);
return 0;
}
static int srcimp_mgr_set_imaparc(void *blk, unsigned int slot)
{
struct srcimp_mgr_ctrl_blk *ctl = blk;
set_field(&ctl->srcimap.srcaim, SRCAIM_ARC, slot);
ctl->dirty.bf.srcimap = 1;
return 0;
}
static int srcimp_mgr_set_imapuser(void *blk, unsigned int user)
{
struct srcimp_mgr_ctrl_blk *ctl = blk;
set_field(&ctl->srcimap.srcaim, SRCAIM_SRC, user);
ctl->dirty.bf.srcimap = 1;
return 0;
}
static int srcimp_mgr_set_imapnxt(void *blk, unsigned int next)
{
struct srcimp_mgr_ctrl_blk *ctl = blk;
set_field(&ctl->srcimap.srcaim, SRCAIM_NXT, next);
ctl->dirty.bf.srcimap = 1;
return 0;
}
static int srcimp_mgr_set_imapaddr(void *blk, unsigned int addr)
{
((struct srcimp_mgr_ctrl_blk *)blk)->srcimap.idx = addr;
((struct srcimp_mgr_ctrl_blk *)blk)->dirty.bf.srcimap = 1;
return 0;
}
static int srcimp_mgr_commit_write(struct hw *hw, void *blk)
{
struct srcimp_mgr_ctrl_blk *ctl = blk;
if (ctl->dirty.bf.srcimap) {
hw_write_20kx(hw, SRC_IMAP+ctl->srcimap.idx*0x100,
ctl->srcimap.srcaim);
ctl->dirty.bf.srcimap = 0;
}
return 0;
}
/*
* AMIXER control block definitions.
*/
#define AMOPLO_M 0x00000003
#define AMOPLO_IV 0x00000004
#define AMOPLO_X 0x0003FFF0
#define AMOPLO_Y 0xFFFC0000
#define AMOPHI_SADR 0x000000FF
#define AMOPHI_SE 0x80000000
/* AMIXER resource register dirty flags */
union amixer_dirty {
struct {
u16 amoplo:1;
u16 amophi:1;
u16 rsv:14;
} bf;
u16 data;
};
/* AMIXER resource control block */
struct amixer_rsc_ctrl_blk {
unsigned int amoplo;
unsigned int amophi;
union amixer_dirty dirty;
};
static int amixer_set_mode(void *blk, unsigned int mode)
{
struct amixer_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->amoplo, AMOPLO_M, mode);
ctl->dirty.bf.amoplo = 1;
return 0;
}
static int amixer_set_iv(void *blk, unsigned int iv)
{
struct amixer_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->amoplo, AMOPLO_IV, iv);
ctl->dirty.bf.amoplo = 1;
return 0;
}
static int amixer_set_x(void *blk, unsigned int x)
{
struct amixer_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->amoplo, AMOPLO_X, x);
ctl->dirty.bf.amoplo = 1;
return 0;
}
static int amixer_set_y(void *blk, unsigned int y)
{
struct amixer_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->amoplo, AMOPLO_Y, y);
ctl->dirty.bf.amoplo = 1;
return 0;
}
static int amixer_set_sadr(void *blk, unsigned int sadr)
{
struct amixer_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->amophi, AMOPHI_SADR, sadr);
ctl->dirty.bf.amophi = 1;
return 0;
}
static int amixer_set_se(void *blk, unsigned int se)
{
struct amixer_rsc_ctrl_blk *ctl = blk;
set_field(&ctl->amophi, AMOPHI_SE, se);
ctl->dirty.bf.amophi = 1;
return 0;
}
static int amixer_set_dirty(void *blk, unsigned int flags)
{
((struct amixer_rsc_ctrl_blk *)blk)->dirty.data = (flags & 0xffff);
return 0;
}
static int amixer_set_dirty_all(void *blk)
{
((struct amixer_rsc_ctrl_blk *)blk)->dirty.data = ~(0x0);
return 0;
}
static int amixer_commit_write(struct hw *hw, unsigned int idx, void *blk)
{
struct amixer_rsc_ctrl_blk *ctl = blk;
if (ctl->dirty.bf.amoplo || ctl->dirty.bf.amophi) {
hw_write_20kx(hw, MIXER_AMOPLO+idx*8, ctl->amoplo);
ctl->dirty.bf.amoplo = 0;
hw_write_20kx(hw, MIXER_AMOPHI+idx*8, ctl->amophi);
ctl->dirty.bf.amophi = 0;
}
return 0;
}
static int amixer_get_y(void *blk)
{
struct amixer_rsc_ctrl_blk *ctl = blk;
return get_field(ctl->amoplo, AMOPLO_Y);
}
static unsigned int amixer_get_dirty(void *blk)
{
return ((struct amixer_rsc_ctrl_blk *)blk)->dirty.data;
}
static int amixer_rsc_get_ctrl_blk(void **rblk)
{
struct amixer_rsc_ctrl_blk *blk;
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
if (!blk)
return -ENOMEM;
*rblk = blk;
return 0;
}
static int amixer_rsc_put_ctrl_blk(void *blk)
{
kfree(blk);
return 0;
}
static int amixer_mgr_get_ctrl_blk(void **rblk)
{
*rblk = NULL;
return 0;
}
static int amixer_mgr_put_ctrl_blk(void *blk)
{
return 0;
}
/*
* DAIO control block definitions.
*/
/* Receiver Sample Rate Tracker Control register */
#define SRTCTL_SRCO 0x000000FF
#define SRTCTL_SRCM 0x0000FF00
#define SRTCTL_RSR 0x00030000
#define SRTCTL_DRAT 0x00300000
#define SRTCTL_EC 0x01000000
#define SRTCTL_ET 0x10000000
/* DAIO Receiver register dirty flags */
union dai_dirty {
struct {
u16 srt:1;
u16 rsv:15;
} bf;
u16 data;
};
/* DAIO Receiver control block */
struct dai_ctrl_blk {
unsigned int srt;
union dai_dirty dirty;
};
/* Audio Input Mapper RAM */
#define AIM_ARC 0x00000FFF
#define AIM_NXT 0x007F0000
struct daoimap {
unsigned int aim;
unsigned int idx;
};
/* Audio Transmitter Control and Status register */
#define ATXCTL_EN 0x00000001
#define ATXCTL_MODE 0x00000010
#define ATXCTL_CD 0x00000020
#define ATXCTL_RAW 0x00000100
#define ATXCTL_MT 0x00000200
#define ATXCTL_NUC 0x00003000
#define ATXCTL_BEN 0x00010000
#define ATXCTL_BMUX 0x00700000
#define ATXCTL_B24 0x01000000
#define ATXCTL_CPF 0x02000000
#define ATXCTL_RIV 0x10000000
#define ATXCTL_LIV 0x20000000
#define ATXCTL_RSAT 0x40000000
#define ATXCTL_LSAT 0x80000000
/* XDIF Transmitter register dirty flags */
union dao_dirty {
struct {
u16 atxcsl:1;
u16 rsv:15;
} bf;
u16 data;
};
/* XDIF Transmitter control block */
struct dao_ctrl_blk {
/* XDIF Transmitter Channel Status Low Register */
unsigned int atxcsl;
union dao_dirty dirty;
};
/* Audio Receiver Control register */
#define ARXCTL_EN 0x00000001
/* DAIO manager register dirty flags */
union daio_mgr_dirty {
struct {
u32 atxctl:8;
u32 arxctl:8;
u32 daoimap:1;
u32 rsv:15;
} bf;
u32 data;
};
/* DAIO manager control block */
struct daio_mgr_ctrl_blk {
struct daoimap daoimap;
unsigned int txctl[8];
unsigned int rxctl[8];
union daio_mgr_dirty dirty;
};
static int dai_srt_set_srco(void *blk, unsigned int src)
{
struct dai_ctrl_blk *ctl = blk;
set_field(&ctl->srt, SRTCTL_SRCO, src);
ctl->dirty.bf.srt = 1;
return 0;
}
static int dai_srt_set_srcm(void *blk, unsigned int src)
{
struct dai_ctrl_blk *ctl = blk;
set_field(&ctl->srt, SRTCTL_SRCM, src);
ctl->dirty.bf.srt = 1;
return 0;
}
static int dai_srt_set_rsr(void *blk, unsigned int rsr)
{
struct dai_ctrl_blk *ctl = blk;
set_field(&ctl->srt, SRTCTL_RSR, rsr);
ctl->dirty.bf.srt = 1;
return 0;
}
static int dai_srt_set_drat(void *blk, unsigned int drat)
{
struct dai_ctrl_blk *ctl = blk;
set_field(&ctl->srt, SRTCTL_DRAT, drat);
ctl->dirty.bf.srt = 1;
return 0;
}
static int dai_srt_set_ec(void *blk, unsigned int ec)
{
struct dai_ctrl_blk *ctl = blk;
set_field(&ctl->srt, SRTCTL_EC, ec ? 1 : 0);
ctl->dirty.bf.srt = 1;
return 0;
}
static int dai_srt_set_et(void *blk, unsigned int et)
{
struct dai_ctrl_blk *ctl = blk;
set_field(&ctl->srt, SRTCTL_ET, et ? 1 : 0);
ctl->dirty.bf.srt = 1;
return 0;
}
static int dai_commit_write(struct hw *hw, unsigned int idx, void *blk)
{
struct dai_ctrl_blk *ctl = blk;
if (ctl->dirty.bf.srt) {
hw_write_20kx(hw, AUDIO_IO_RX_SRT_CTL+0x40*idx, ctl->srt);
ctl->dirty.bf.srt = 0;
}
return 0;
}
static int dai_get_ctrl_blk(void **rblk)
{
struct dai_ctrl_blk *blk;
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
if (!blk)
return -ENOMEM;
*rblk = blk;
return 0;
}
static int dai_put_ctrl_blk(void *blk)
{
kfree(blk);
return 0;
}
static int dao_set_spos(void *blk, unsigned int spos)
{
((struct dao_ctrl_blk *)blk)->atxcsl = spos;
((struct dao_ctrl_blk *)blk)->dirty.bf.atxcsl = 1;
return 0;
}
static int dao_commit_write(struct hw *hw, unsigned int idx, void *blk)
{
struct dao_ctrl_blk *ctl = blk;
if (ctl->dirty.bf.atxcsl) {
if (idx < 4) {
/* S/PDIF SPOSx */
hw_write_20kx(hw, AUDIO_IO_TX_CSTAT_L+0x40*idx,
ctl->atxcsl);
}
ctl->dirty.bf.atxcsl = 0;
}
return 0;
}
static int dao_get_spos(void *blk, unsigned int *spos)
{
*spos = ((struct dao_ctrl_blk *)blk)->atxcsl;
return 0;
}
static int dao_get_ctrl_blk(void **rblk)
{
struct dao_ctrl_blk *blk;
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
if (!blk)
return -ENOMEM;
*rblk = blk;
return 0;
}
static int dao_put_ctrl_blk(void *blk)
{
kfree(blk);
return 0;
}
static int daio_mgr_enb_dai(void *blk, unsigned int idx)
{
struct daio_mgr_ctrl_blk *ctl = blk;
set_field(&ctl->rxctl[idx], ARXCTL_EN, 1);
ctl->dirty.bf.arxctl |= (0x1 << idx);
return 0;
}
static int daio_mgr_dsb_dai(void *blk, unsigned int idx)
{
struct daio_mgr_ctrl_blk *ctl = blk;
set_field(&ctl->rxctl[idx], ARXCTL_EN, 0);
ctl->dirty.bf.arxctl |= (0x1 << idx);
return 0;
}
static int daio_mgr_enb_dao(void *blk, unsigned int idx)
{
struct daio_mgr_ctrl_blk *ctl = blk;
set_field(&ctl->txctl[idx], ATXCTL_EN, 1);
ctl->dirty.bf.atxctl |= (0x1 << idx);
return 0;
}
static int daio_mgr_dsb_dao(void *blk, unsigned int idx)
{
struct daio_mgr_ctrl_blk *ctl = blk;
set_field(&ctl->txctl[idx], ATXCTL_EN, 0);
ctl->dirty.bf.atxctl |= (0x1 << idx);
return 0;
}
static int daio_mgr_dao_init(void *blk, unsigned int idx, unsigned int conf)
{
struct daio_mgr_ctrl_blk *ctl = blk;
if (idx < 4) {
/* S/PDIF output */
switch ((conf & 0x7)) {
case 1:
set_field(&ctl->txctl[idx], ATXCTL_NUC, 0);
break;
case 2:
set_field(&ctl->txctl[idx], ATXCTL_NUC, 1);
break;
case 4:
set_field(&ctl->txctl[idx], ATXCTL_NUC, 2);
break;
case 8:
set_field(&ctl->txctl[idx], ATXCTL_NUC, 3);
break;
default:
break;
}
/* CDIF */
set_field(&ctl->txctl[idx], ATXCTL_CD, (!(conf & 0x7)));
/* Non-audio */
set_field(&ctl->txctl[idx], ATXCTL_LIV, (conf >> 4) & 0x1);
/* Non-audio */
set_field(&ctl->txctl[idx], ATXCTL_RIV, (conf >> 4) & 0x1);
set_field(&ctl->txctl[idx], ATXCTL_RAW,
((conf >> 3) & 0x1) ? 0 : 0);
ctl->dirty.bf.atxctl |= (0x1 << idx);
} else {
/* I2S output */
/*idx %= 4; */
}
return 0;
}
static int daio_mgr_set_imaparc(void *blk, unsigned int slot)
{
struct daio_mgr_ctrl_blk *ctl = blk;
set_field(&ctl->daoimap.aim, AIM_ARC, slot);
ctl->dirty.bf.daoimap = 1;
return 0;
}
static int daio_mgr_set_imapnxt(void *blk, unsigned int next)
{
struct daio_mgr_ctrl_blk *ctl = blk;
set_field(&ctl->daoimap.aim, AIM_NXT, next);
ctl->dirty.bf.daoimap = 1;
return 0;
}
static int daio_mgr_set_imapaddr(void *blk, unsigned int addr)
{
((struct daio_mgr_ctrl_blk *)blk)->daoimap.idx = addr;
((struct daio_mgr_ctrl_blk *)blk)->dirty.bf.daoimap = 1;
return 0;
}
static int daio_mgr_commit_write(struct hw *hw, void *blk)
{
struct daio_mgr_ctrl_blk *ctl = blk;
unsigned int data;
int i;
for (i = 0; i < 8; i++) {
if ((ctl->dirty.bf.atxctl & (0x1 << i))) {
data = ctl->txctl[i];
hw_write_20kx(hw, (AUDIO_IO_TX_CTL+(0x40*i)), data);
ctl->dirty.bf.atxctl &= ~(0x1 << i);
mdelay(1);
}
if ((ctl->dirty.bf.arxctl & (0x1 << i))) {
data = ctl->rxctl[i];
hw_write_20kx(hw, (AUDIO_IO_RX_CTL+(0x40*i)), data);
ctl->dirty.bf.arxctl &= ~(0x1 << i);
mdelay(1);
}
}
if (ctl->dirty.bf.daoimap) {
hw_write_20kx(hw, AUDIO_IO_AIM+ctl->daoimap.idx*4,
ctl->daoimap.aim);
ctl->dirty.bf.daoimap = 0;
}
return 0;
}
static int daio_mgr_get_ctrl_blk(struct hw *hw, void **rblk)
{
struct daio_mgr_ctrl_blk *blk;
int i;
*rblk = NULL;
blk = kzalloc(sizeof(*blk), GFP_KERNEL);
if (!blk)
return -ENOMEM;
for (i = 0; i < 8; i++) {
blk->txctl[i] = hw_read_20kx(hw, AUDIO_IO_TX_CTL+(0x40*i));
blk->rxctl[i] = hw_read_20kx(hw, AUDIO_IO_RX_CTL+(0x40*i));
}
*rblk = blk;
return 0;
}
static int daio_mgr_put_ctrl_blk(void *blk)
{
kfree(blk);
return 0;
}
/* Timer interrupt */
static int set_timer_irq(struct hw *hw, int enable)
{
hw_write_20kx(hw, GIE, enable ? IT_INT : 0);
return 0;
}
static int set_timer_tick(struct hw *hw, unsigned int ticks)
{
if (ticks)
ticks |= TIMR_IE | TIMR_IP;
hw_write_20kx(hw, TIMR, ticks);
return 0;
}
static unsigned int get_wc(struct hw *hw)
{
return hw_read_20kx(hw, WC);
}
/* Card hardware initialization block */
struct dac_conf {
unsigned int msr; /* master sample rate in rsrs */
};
struct adc_conf {
unsigned int msr; /* master sample rate in rsrs */
unsigned char input; /* the input source of ADC */
unsigned char mic20db; /* boost mic by 20db if input is microphone */
};
struct daio_conf {
unsigned int msr; /* master sample rate in rsrs */
};
struct trn_conf {
unsigned long vm_pgt_phys;
};
static int hw_daio_init(struct hw *hw, const struct daio_conf *info)
{
u32 data;
int i;
/* Program I2S with proper sample rate and enable the correct I2S
* channel. ED(0/8/16/24): Enable all I2S/I2X master clock output */
if (1 == info->msr) {
hw_write_20kx(hw, AUDIO_IO_MCLK, 0x01010101);
hw_write_20kx(hw, AUDIO_IO_TX_BLRCLK, 0x01010101);
hw_write_20kx(hw, AUDIO_IO_RX_BLRCLK, 0);
} else if (2 == info->msr) {
if (hw->model != CTSB1270) {
hw_write_20kx(hw, AUDIO_IO_MCLK, 0x11111111);
} else {
/* PCM4220 on Titanium HD is different. */
hw_write_20kx(hw, AUDIO_IO_MCLK, 0x11011111);
}
/* Specify all playing 96khz
* EA [0] - Enabled
* RTA [4:5] - 96kHz
* EB [8] - Enabled
* RTB [12:13] - 96kHz
* EC [16] - Enabled
* RTC [20:21] - 96kHz
* ED [24] - Enabled
* RTD [28:29] - 96kHz */
hw_write_20kx(hw, AUDIO_IO_TX_BLRCLK, 0x11111111);
hw_write_20kx(hw, AUDIO_IO_RX_BLRCLK, 0);
} else if ((4 == info->msr) && (hw->model == CTSB1270)) {
hw_write_20kx(hw, AUDIO_IO_MCLK, 0x21011111);
hw_write_20kx(hw, AUDIO_IO_TX_BLRCLK, 0x21212121);
hw_write_20kx(hw, AUDIO_IO_RX_BLRCLK, 0);
} else {
dev_alert(hw->card->dev,
"ERROR!!! Invalid sampling rate!!!\n");
return -EINVAL;
}
for (i = 0; i < 8; i++) {
if (i <= 3) {
/* This comment looks wrong since loop is over 4 */
/* channels and emu20k2 supports 4 spdif IOs. */
/* 1st 3 channels are SPDIFs (SB0960) */
if (i == 3)
data = 0x1001001;
else
data = 0x1000001;
hw_write_20kx(hw, (AUDIO_IO_TX_CTL+(0x40*i)), data);
hw_write_20kx(hw, (AUDIO_IO_RX_CTL+(0x40*i)), data);
/* Initialize the SPDIF Out Channel status registers.
* The value specified here is based on the typical
* values provided in the specification, namely: Clock
* Accuracy of 1000ppm, Sample Rate of 48KHz,
* unspecified source number, Generation status = 1,
* Category code = 0x12 (Digital Signal Mixer),
* Mode = 0, Emph = 0, Copy Permitted, AN = 0
* (indicating that we're transmitting digital audio,
* and the Professional Use bit is 0. */
hw_write_20kx(hw, AUDIO_IO_TX_CSTAT_L+(0x40*i),
0x02109204); /* Default to 48kHz */
hw_write_20kx(hw, AUDIO_IO_TX_CSTAT_H+(0x40*i), 0x0B);
} else {
/* Again, loop is over 4 channels not 5. */
/* Next 5 channels are I2S (SB0960) */
data = 0x11;
hw_write_20kx(hw, AUDIO_IO_RX_CTL+(0x40*i), data);
if (2 == info->msr) {
/* Four channels per sample period */
data |= 0x1000;
} else if (4 == info->msr) {
/* FIXME: check this against the chip spec */
data |= 0x2000;
}
hw_write_20kx(hw, AUDIO_IO_TX_CTL+(0x40*i), data);
}
}
return 0;
}
/* TRANSPORT operations */
static int hw_trn_init(struct hw *hw, const struct trn_conf *info)
{
u32 vmctl, data;
u32 ptp_phys_low, ptp_phys_high;
int i;
/* Set up device page table */
if ((~0UL) == info->vm_pgt_phys) {
dev_alert(hw->card->dev,
"Wrong device page table page address!!!\n");
return -1;
}
vmctl = 0x80000C0F; /* 32-bit, 4k-size page */
ptp_phys_low = (u32)info->vm_pgt_phys;
ptp_phys_high = upper_32_bits(info->vm_pgt_phys);
if (sizeof(void *) == 8) /* 64bit address */
vmctl |= (3 << 8);
/* Write page table physical address to all PTPAL registers */
for (i = 0; i < 64; i++) {
hw_write_20kx(hw, VMEM_PTPAL+(16*i), ptp_phys_low);
hw_write_20kx(hw, VMEM_PTPAH+(16*i), ptp_phys_high);
}
/* Enable virtual memory transfer */
hw_write_20kx(hw, VMEM_CTL, vmctl);
/* Enable transport bus master and queueing of request */
hw_write_20kx(hw, TRANSPORT_CTL, 0x03);
hw_write_20kx(hw, TRANSPORT_INT, 0x200c01);
/* Enable transport ring */
data = hw_read_20kx(hw, TRANSPORT_ENB);
hw_write_20kx(hw, TRANSPORT_ENB, (data | 0x03));
return 0;
}
/* Card initialization */
#define GCTL_AIE 0x00000001
#define GCTL_UAA 0x00000002
#define GCTL_DPC 0x00000004
#define GCTL_DBP 0x00000008
#define GCTL_ABP 0x00000010
#define GCTL_TBP 0x00000020
#define GCTL_SBP 0x00000040
#define GCTL_FBP 0x00000080
#define GCTL_ME 0x00000100
#define GCTL_AID 0x00001000
#define PLLCTL_SRC 0x00000007
#define PLLCTL_SPE 0x00000008
#define PLLCTL_RD 0x000000F0
#define PLLCTL_FD 0x0001FF00
#define PLLCTL_OD 0x00060000
#define PLLCTL_B 0x00080000
#define PLLCTL_AS 0x00100000
#define PLLCTL_LF 0x03E00000
#define PLLCTL_SPS 0x1C000000
#define PLLCTL_AD 0x60000000
#define PLLSTAT_CCS 0x00000007
#define PLLSTAT_SPL 0x00000008
#define PLLSTAT_CRD 0x000000F0
#define PLLSTAT_CFD 0x0001FF00
#define PLLSTAT_SL 0x00020000
#define PLLSTAT_FAS 0x00040000
#define PLLSTAT_B 0x00080000
#define PLLSTAT_PD 0x00100000
#define PLLSTAT_OCA 0x00200000
#define PLLSTAT_NCA 0x00400000
static int hw_pll_init(struct hw *hw, unsigned int rsr)
{
unsigned int pllenb;
unsigned int pllctl;
unsigned int pllstat;
int i;
pllenb = 0xB;
hw_write_20kx(hw, PLL_ENB, pllenb);
pllctl = 0x20C00000;
set_field(&pllctl, PLLCTL_B, 0);
set_field(&pllctl, PLLCTL_FD, 48000 == rsr ? 16 - 4 : 147 - 4);
set_field(&pllctl, PLLCTL_RD, 48000 == rsr ? 1 - 1 : 10 - 1);
hw_write_20kx(hw, PLL_CTL, pllctl);
mdelay(40);
pllctl = hw_read_20kx(hw, PLL_CTL);
set_field(&pllctl, PLLCTL_FD, 48000 == rsr ? 16 - 2 : 147 - 2);
hw_write_20kx(hw, PLL_CTL, pllctl);
mdelay(40);
for (i = 0; i < 1000; i++) {
pllstat = hw_read_20kx(hw, PLL_STAT);
if (get_field(pllstat, PLLSTAT_PD))
continue;
if (get_field(pllstat, PLLSTAT_B) !=
get_field(pllctl, PLLCTL_B))
continue;
if (get_field(pllstat, PLLSTAT_CCS) !=
get_field(pllctl, PLLCTL_SRC))
continue;
if (get_field(pllstat, PLLSTAT_CRD) !=
get_field(pllctl, PLLCTL_RD))
continue;
if (get_field(pllstat, PLLSTAT_CFD) !=
get_field(pllctl, PLLCTL_FD))
continue;
break;
}
if (i >= 1000) {
dev_alert(hw->card->dev,
"PLL initialization failed!!!\n");
return -EBUSY;
}
return 0;
}
static int hw_auto_init(struct hw *hw)
{
unsigned int gctl;
int i;
gctl = hw_read_20kx(hw, GLOBAL_CNTL_GCTL);
set_field(&gctl, GCTL_AIE, 0);
hw_write_20kx(hw, GLOBAL_CNTL_GCTL, gctl);
set_field(&gctl, GCTL_AIE, 1);
hw_write_20kx(hw, GLOBAL_CNTL_GCTL, gctl);
mdelay(10);
for (i = 0; i < 400000; i++) {
gctl = hw_read_20kx(hw, GLOBAL_CNTL_GCTL);
if (get_field(gctl, GCTL_AID))
break;
}
if (!get_field(gctl, GCTL_AID)) {
dev_alert(hw->card->dev, "Card Auto-init failed!!!\n");
return -EBUSY;
}
return 0;
}
/* DAC operations */
#define CS4382_MC1 0x1
#define CS4382_MC2 0x2
#define CS4382_MC3 0x3
#define CS4382_FC 0x4
#define CS4382_IC 0x5
#define CS4382_XC1 0x6
#define CS4382_VCA1 0x7
#define CS4382_VCB1 0x8
#define CS4382_XC2 0x9
#define CS4382_VCA2 0xA
#define CS4382_VCB2 0xB
#define CS4382_XC3 0xC
#define CS4382_VCA3 0xD
#define CS4382_VCB3 0xE
#define CS4382_XC4 0xF
#define CS4382_VCA4 0x10
#define CS4382_VCB4 0x11
#define CS4382_CREV 0x12
/* I2C status */
#define STATE_LOCKED 0x00
#define STATE_UNLOCKED 0xAA
#define DATA_READY 0x800000 /* Used with I2C_IF_STATUS */
#define DATA_ABORT 0x10000 /* Used with I2C_IF_STATUS */
#define I2C_STATUS_DCM 0x00000001
#define I2C_STATUS_BC 0x00000006
#define I2C_STATUS_APD 0x00000008
#define I2C_STATUS_AB 0x00010000
#define I2C_STATUS_DR 0x00800000
#define I2C_ADDRESS_PTAD 0x0000FFFF
#define I2C_ADDRESS_SLAD 0x007F0000
struct regs_cs4382 {
u32 mode_control_1;
u32 mode_control_2;
u32 mode_control_3;
u32 filter_control;
u32 invert_control;
u32 mix_control_P1;
u32 vol_control_A1;
u32 vol_control_B1;
u32 mix_control_P2;
u32 vol_control_A2;
u32 vol_control_B2;
u32 mix_control_P3;
u32 vol_control_A3;
u32 vol_control_B3;
u32 mix_control_P4;
u32 vol_control_A4;
u32 vol_control_B4;
};
static int hw20k2_i2c_unlock_full_access(struct hw *hw)
{
u8 UnlockKeySequence_FLASH_FULLACCESS_MODE[2] = {0xB3, 0xD4};
/* Send keys for forced BIOS mode */
hw_write_20kx(hw, I2C_IF_WLOCK,
UnlockKeySequence_FLASH_FULLACCESS_MODE[0]);
hw_write_20kx(hw, I2C_IF_WLOCK,
UnlockKeySequence_FLASH_FULLACCESS_MODE[1]);
/* Check whether the chip is unlocked */
if (hw_read_20kx(hw, I2C_IF_WLOCK) == STATE_UNLOCKED)
return 0;
return -1;
}
static int hw20k2_i2c_lock_chip(struct hw *hw)
{
/* Write twice */
hw_write_20kx(hw, I2C_IF_WLOCK, STATE_LOCKED);
hw_write_20kx(hw, I2C_IF_WLOCK, STATE_LOCKED);
if (hw_read_20kx(hw, I2C_IF_WLOCK) == STATE_LOCKED)
return 0;
return -1;
}
static int hw20k2_i2c_init(struct hw *hw, u8 dev_id, u8 addr_size, u8 data_size)
{
struct hw20k2 *hw20k2 = (struct hw20k2 *)hw;
int err;
unsigned int i2c_status;
unsigned int i2c_addr;
err = hw20k2_i2c_unlock_full_access(hw);
if (err < 0)
return err;
hw20k2->addr_size = addr_size;
hw20k2->data_size = data_size;
hw20k2->dev_id = dev_id;
i2c_addr = 0;
set_field(&i2c_addr, I2C_ADDRESS_SLAD, dev_id);
hw_write_20kx(hw, I2C_IF_ADDRESS, i2c_addr);
i2c_status = hw_read_20kx(hw, I2C_IF_STATUS);
set_field(&i2c_status, I2C_STATUS_DCM, 1); /* Direct control mode */
hw_write_20kx(hw, I2C_IF_STATUS, i2c_status);
return 0;
}
static int hw20k2_i2c_uninit(struct hw *hw)
{
unsigned int i2c_status;
unsigned int i2c_addr;
i2c_addr = 0;
set_field(&i2c_addr, I2C_ADDRESS_SLAD, 0x57); /* I2C id */
hw_write_20kx(hw, I2C_IF_ADDRESS, i2c_addr);
i2c_status = hw_read_20kx(hw, I2C_IF_STATUS);
set_field(&i2c_status, I2C_STATUS_DCM, 0); /* I2C mode */
hw_write_20kx(hw, I2C_IF_STATUS, i2c_status);
return hw20k2_i2c_lock_chip(hw);
}
static int hw20k2_i2c_wait_data_ready(struct hw *hw)
{
int i = 0x400000;
unsigned int ret;
do {
ret = hw_read_20kx(hw, I2C_IF_STATUS);
} while ((!(ret & DATA_READY)) && --i);
return i;
}
static int hw20k2_i2c_read(struct hw *hw, u16 addr, u32 *datap)
{
struct hw20k2 *hw20k2 = (struct hw20k2 *)hw;
unsigned int i2c_status;
i2c_status = hw_read_20kx(hw, I2C_IF_STATUS);
set_field(&i2c_status, I2C_STATUS_BC,
(4 == hw20k2->addr_size) ? 0 : hw20k2->addr_size);
hw_write_20kx(hw, I2C_IF_STATUS, i2c_status);
if (!hw20k2_i2c_wait_data_ready(hw))
return -1;
hw_write_20kx(hw, I2C_IF_WDATA, addr);
if (!hw20k2_i2c_wait_data_ready(hw))
return -1;
/* Force a read operation */
hw_write_20kx(hw, I2C_IF_RDATA, 0);
if (!hw20k2_i2c_wait_data_ready(hw))
return -1;
*datap = hw_read_20kx(hw, I2C_IF_RDATA);
return 0;
}
static int hw20k2_i2c_write(struct hw *hw, u16 addr, u32 data)
{
struct hw20k2 *hw20k2 = (struct hw20k2 *)hw;
unsigned int i2c_data = (data << (hw20k2->addr_size * 8)) | addr;
unsigned int i2c_status;
i2c_status = hw_read_20kx(hw, I2C_IF_STATUS);
set_field(&i2c_status, I2C_STATUS_BC,
(4 == (hw20k2->addr_size + hw20k2->data_size)) ?
0 : (hw20k2->addr_size + hw20k2->data_size));
hw_write_20kx(hw, I2C_IF_STATUS, i2c_status);
hw20k2_i2c_wait_data_ready(hw);
/* Dummy write to trigger the write operation */
hw_write_20kx(hw, I2C_IF_WDATA, 0);
hw20k2_i2c_wait_data_ready(hw);
/* This is the real data */
hw_write_20kx(hw, I2C_IF_WDATA, i2c_data);
hw20k2_i2c_wait_data_ready(hw);
return 0;
}
static void hw_dac_stop(struct hw *hw)
{
u32 data;
data = hw_read_20kx(hw, GPIO_DATA);
data &= 0xFFFFFFFD;
hw_write_20kx(hw, GPIO_DATA, data);
mdelay(10);
}
static void hw_dac_start(struct hw *hw)
{
u32 data;
data = hw_read_20kx(hw, GPIO_DATA);
data |= 0x2;
hw_write_20kx(hw, GPIO_DATA, data);
mdelay(50);
}
static void hw_dac_reset(struct hw *hw)
{
hw_dac_stop(hw);
hw_dac_start(hw);
}
static int hw_dac_init(struct hw *hw, const struct dac_conf *info)
{
int err;
u32 data;
int i;
struct regs_cs4382 cs_read = {0};
struct regs_cs4382 cs_def = {
0x00000001, /* Mode Control 1 */
0x00000000, /* Mode Control 2 */
0x00000084, /* Mode Control 3 */
0x00000000, /* Filter Control */
0x00000000, /* Invert Control */
0x00000024, /* Mixing Control Pair 1 */
0x00000000, /* Vol Control A1 */
0x00000000, /* Vol Control B1 */
0x00000024, /* Mixing Control Pair 2 */
0x00000000, /* Vol Control A2 */
0x00000000, /* Vol Control B2 */
0x00000024, /* Mixing Control Pair 3 */
0x00000000, /* Vol Control A3 */
0x00000000, /* Vol Control B3 */
0x00000024, /* Mixing Control Pair 4 */
0x00000000, /* Vol Control A4 */
0x00000000 /* Vol Control B4 */
};
if (hw->model == CTSB1270) {
hw_dac_stop(hw);
data = hw_read_20kx(hw, GPIO_DATA);
data &= ~0x0600;
if (1 == info->msr)
data |= 0x0000; /* Single Speed Mode 0-50kHz */
else if (2 == info->msr)
data |= 0x0200; /* Double Speed Mode 50-100kHz */
else
data |= 0x0600; /* Quad Speed Mode 100-200kHz */
hw_write_20kx(hw, GPIO_DATA, data);
hw_dac_start(hw);
return 0;
}
/* Set DAC reset bit as output */
data = hw_read_20kx(hw, GPIO_CTRL);
data |= 0x02;
hw_write_20kx(hw, GPIO_CTRL, data);
err = hw20k2_i2c_init(hw, 0x18, 1, 1);
if (err < 0)
goto End;
for (i = 0; i < 2; i++) {
/* Reset DAC twice just in-case the chip
* didn't initialized properly */
hw_dac_reset(hw);
hw_dac_reset(hw);
if (hw20k2_i2c_read(hw, CS4382_MC1, &cs_read.mode_control_1))
continue;
if (hw20k2_i2c_read(hw, CS4382_MC2, &cs_read.mode_control_2))
continue;
if (hw20k2_i2c_read(hw, CS4382_MC3, &cs_read.mode_control_3))
continue;
if (hw20k2_i2c_read(hw, CS4382_FC, &cs_read.filter_control))
continue;
if (hw20k2_i2c_read(hw, CS4382_IC, &cs_read.invert_control))
continue;
if (hw20k2_i2c_read(hw, CS4382_XC1, &cs_read.mix_control_P1))
continue;
if (hw20k2_i2c_read(hw, CS4382_VCA1, &cs_read.vol_control_A1))
continue;
if (hw20k2_i2c_read(hw, CS4382_VCB1, &cs_read.vol_control_B1))
continue;
if (hw20k2_i2c_read(hw, CS4382_XC2, &cs_read.mix_control_P2))
continue;
if (hw20k2_i2c_read(hw, CS4382_VCA2, &cs_read.vol_control_A2))
continue;
if (hw20k2_i2c_read(hw, CS4382_VCB2, &cs_read.vol_control_B2))
continue;
if (hw20k2_i2c_read(hw, CS4382_XC3, &cs_read.mix_control_P3))
continue;
if (hw20k2_i2c_read(hw, CS4382_VCA3, &cs_read.vol_control_A3))
continue;
if (hw20k2_i2c_read(hw, CS4382_VCB3, &cs_read.vol_control_B3))
continue;
if (hw20k2_i2c_read(hw, CS4382_XC4, &cs_read.mix_control_P4))
continue;
if (hw20k2_i2c_read(hw, CS4382_VCA4, &cs_read.vol_control_A4))
continue;
if (hw20k2_i2c_read(hw, CS4382_VCB4, &cs_read.vol_control_B4))
continue;
if (memcmp(&cs_read, &cs_def, sizeof(cs_read)))
continue;
else
break;
}
if (i >= 2)
goto End;
/* Note: Every I2C write must have some delay.
* This is not a requirement but the delay works here... */
hw20k2_i2c_write(hw, CS4382_MC1, 0x80);
hw20k2_i2c_write(hw, CS4382_MC2, 0x10);
if (1 == info->msr) {
hw20k2_i2c_write(hw, CS4382_XC1, 0x24);
hw20k2_i2c_write(hw, CS4382_XC2, 0x24);
hw20k2_i2c_write(hw, CS4382_XC3, 0x24);
hw20k2_i2c_write(hw, CS4382_XC4, 0x24);
} else if (2 == info->msr) {
hw20k2_i2c_write(hw, CS4382_XC1, 0x25);
hw20k2_i2c_write(hw, CS4382_XC2, 0x25);
hw20k2_i2c_write(hw, CS4382_XC3, 0x25);
hw20k2_i2c_write(hw, CS4382_XC4, 0x25);
} else {
hw20k2_i2c_write(hw, CS4382_XC1, 0x26);
hw20k2_i2c_write(hw, CS4382_XC2, 0x26);
hw20k2_i2c_write(hw, CS4382_XC3, 0x26);
hw20k2_i2c_write(hw, CS4382_XC4, 0x26);
}
return 0;
End:
hw20k2_i2c_uninit(hw);
return -1;
}
/* ADC operations */
#define MAKE_WM8775_ADDR(addr, data) (u32)(((addr<<1)&0xFE)|((data>>8)&0x1))
#define MAKE_WM8775_DATA(data) (u32)(data&0xFF)
#define WM8775_IC 0x0B
#define WM8775_MMC 0x0C
#define WM8775_AADCL 0x0E
#define WM8775_AADCR 0x0F
#define WM8775_ADCMC 0x15
#define WM8775_RESET 0x17
static int hw_is_adc_input_selected(struct hw *hw, enum ADCSRC type)
{
u32 data;
if (hw->model == CTSB1270) {
/* Titanium HD has two ADC chips, one for line in and one */
/* for MIC. We don't need to switch the ADC input. */
return 1;
}
data = hw_read_20kx(hw, GPIO_DATA);
switch (type) {
case ADC_MICIN:
data = (data & (0x1 << 14)) ? 1 : 0;
break;
case ADC_LINEIN:
data = (data & (0x1 << 14)) ? 0 : 1;
break;
default:
data = 0;
}
return data;
}
#define MIC_BOOST_0DB 0xCF
#define MIC_BOOST_STEPS_PER_DB 2
static void hw_wm8775_input_select(struct hw *hw, u8 input, s8 gain_in_db)
{
u32 adcmc, gain;
if (input > 3)
input = 3;
adcmc = ((u32)1 << input) | 0x100; /* Link L+R gain... */
hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_ADCMC, adcmc),
MAKE_WM8775_DATA(adcmc));
if (gain_in_db < -103)
gain_in_db = -103;
if (gain_in_db > 24)
gain_in_db = 24;
gain = gain_in_db * MIC_BOOST_STEPS_PER_DB + MIC_BOOST_0DB;
hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_AADCL, gain),
MAKE_WM8775_DATA(gain));
/* ...so there should be no need for the following. */
hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_AADCR, gain),
MAKE_WM8775_DATA(gain));
}
static int hw_adc_input_select(struct hw *hw, enum ADCSRC type)
{
u32 data;
data = hw_read_20kx(hw, GPIO_DATA);
switch (type) {
case ADC_MICIN:
data |= (0x1 << 14);
hw_write_20kx(hw, GPIO_DATA, data);
hw_wm8775_input_select(hw, 0, 20); /* Mic, 20dB */
break;
case ADC_LINEIN:
data &= ~(0x1 << 14);
hw_write_20kx(hw, GPIO_DATA, data);
hw_wm8775_input_select(hw, 1, 0); /* Line-in, 0dB */
break;
default:
break;
}
return 0;
}
static int hw_adc_init(struct hw *hw, const struct adc_conf *info)
{
int err;
u32 data, ctl;
/* Set ADC reset bit as output */
data = hw_read_20kx(hw, GPIO_CTRL);
data |= (0x1 << 15);
hw_write_20kx(hw, GPIO_CTRL, data);
/* Initialize I2C */
err = hw20k2_i2c_init(hw, 0x1A, 1, 1);
if (err < 0) {
dev_alert(hw->card->dev, "Failure to acquire I2C!!!\n");
goto error;
}
/* Reset the ADC (reset is active low). */
data = hw_read_20kx(hw, GPIO_DATA);
data &= ~(0x1 << 15);
hw_write_20kx(hw, GPIO_DATA, data);
if (hw->model == CTSB1270) {
/* Set up the PCM4220 ADC on Titanium HD */
data &= ~0x0C;
if (1 == info->msr)
data |= 0x00; /* Single Speed Mode 32-50kHz */
else if (2 == info->msr)
data |= 0x08; /* Double Speed Mode 50-108kHz */
else
data |= 0x04; /* Quad Speed Mode 108kHz-216kHz */
hw_write_20kx(hw, GPIO_DATA, data);
}
mdelay(10);
/* Return the ADC to normal operation. */
data |= (0x1 << 15);
hw_write_20kx(hw, GPIO_DATA, data);
mdelay(50);
/* I2C write to register offset 0x0B to set ADC LRCLK polarity */
/* invert bit, interface format to I2S, word length to 24-bit, */
/* enable ADC high pass filter. Fixes bug 5323? */
hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_IC, 0x26),
MAKE_WM8775_DATA(0x26));
/* Set the master mode (256fs) */
if (1 == info->msr) {
/* slave mode, 128x oversampling 256fs */
hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_MMC, 0x02),
MAKE_WM8775_DATA(0x02));
} else if ((2 == info->msr) || (4 == info->msr)) {
/* slave mode, 64x oversampling, 256fs */
hw20k2_i2c_write(hw, MAKE_WM8775_ADDR(WM8775_MMC, 0x0A),
MAKE_WM8775_DATA(0x0A));
} else {
dev_alert(hw->card->dev,
"Invalid master sampling rate (msr %d)!!!\n",
info->msr);
err = -EINVAL;
goto error;
}
if (hw->model != CTSB1270) {
/* Configure GPIO bit 14 change to line-in/mic-in */
ctl = hw_read_20kx(hw, GPIO_CTRL);
ctl |= 0x1 << 14;
hw_write_20kx(hw, GPIO_CTRL, ctl);
hw_adc_input_select(hw, ADC_LINEIN);
} else {
hw_wm8775_input_select(hw, 0, 0);
}
return 0;
error:
hw20k2_i2c_uninit(hw);
return err;
}
static struct capabilities hw_capabilities(struct hw *hw)
{
struct capabilities cap;
cap.digit_io_switch = 0;
cap.dedicated_mic = hw->model == CTSB1270;
cap.output_switch = hw->model == CTSB1270;
cap.mic_source_switch = hw->model == CTSB1270;
return cap;
}
static int hw_output_switch_get(struct hw *hw)
{
u32 data = hw_read_20kx(hw, GPIO_EXT_DATA);
switch (data & 0x30) {
case 0x00:
return 0;
case 0x10:
return 1;
case 0x20:
return 2;
default:
return 3;
}
}
static int hw_output_switch_put(struct hw *hw, int position)
{
u32 data;
if (position == hw_output_switch_get(hw))
return 0;
/* Mute line and headphones (intended for anti-pop). */
data = hw_read_20kx(hw, GPIO_DATA);
data |= (0x03 << 11);
hw_write_20kx(hw, GPIO_DATA, data);
data = hw_read_20kx(hw, GPIO_EXT_DATA) & ~0x30;
switch (position) {
case 0:
break;
case 1:
data |= 0x10;
break;
default:
data |= 0x20;
}
hw_write_20kx(hw, GPIO_EXT_DATA, data);
/* Unmute line and headphones. */
data = hw_read_20kx(hw, GPIO_DATA);
data &= ~(0x03 << 11);
hw_write_20kx(hw, GPIO_DATA, data);
return 1;
}
static int hw_mic_source_switch_get(struct hw *hw)
{
struct hw20k2 *hw20k2 = (struct hw20k2 *)hw;
return hw20k2->mic_source;
}
static int hw_mic_source_switch_put(struct hw *hw, int position)
{
struct hw20k2 *hw20k2 = (struct hw20k2 *)hw;
if (position == hw20k2->mic_source)
return 0;
switch (position) {
case 0:
hw_wm8775_input_select(hw, 0, 0); /* Mic, 0dB */
break;
case 1:
hw_wm8775_input_select(hw, 1, 0); /* FP Mic, 0dB */
break;
case 2:
hw_wm8775_input_select(hw, 3, 0); /* Aux Ext, 0dB */
break;
default:
return 0;
}
hw20k2->mic_source = position;
return 1;
}
static irqreturn_t ct_20k2_interrupt(int irq, void *dev_id)
{
struct hw *hw = dev_id;
unsigned int status;
status = hw_read_20kx(hw, GIP);
if (!status)
return IRQ_NONE;
if (hw->irq_callback)
hw->irq_callback(hw->irq_callback_data, status);
hw_write_20kx(hw, GIP, status);
return IRQ_HANDLED;
}
static int hw_card_start(struct hw *hw)
{
int err = 0;
struct pci_dev *pci = hw->pci;
unsigned int gctl;
err = pci_enable_device(pci);
if (err < 0)
return err;
/* Set DMA transfer mask */
if (dma_set_mask(&pci->dev, CT_XFI_DMA_MASK) < 0 ||
dma_set_coherent_mask(&pci->dev, CT_XFI_DMA_MASK) < 0) {
dev_err(hw->card->dev,
"architecture does not support PCI busmaster DMA with mask 0x%llx\n",
CT_XFI_DMA_MASK);
err = -ENXIO;
goto error1;
}
if (!hw->io_base) {
err = pci_request_regions(pci, "XFi");
if (err < 0)
goto error1;
hw->io_base = pci_resource_start(hw->pci, 2);
hw->mem_base = ioremap(hw->io_base,
pci_resource_len(hw->pci, 2));
if (!hw->mem_base) {
err = -ENOENT;
goto error2;
}
}
/* Switch to 20k2 mode from UAA mode. */
gctl = hw_read_20kx(hw, GLOBAL_CNTL_GCTL);
set_field(&gctl, GCTL_UAA, 0);
hw_write_20kx(hw, GLOBAL_CNTL_GCTL, gctl);
if (hw->irq < 0) {
err = request_irq(pci->irq, ct_20k2_interrupt, IRQF_SHARED,
KBUILD_MODNAME, hw);
if (err < 0) {
dev_err(hw->card->dev,
"XFi: Cannot get irq %d\n", pci->irq);
goto error2;
}
hw->irq = pci->irq;
}
pci_set_master(pci);
return 0;
/*error3:
iounmap((void *)hw->mem_base);
hw->mem_base = (unsigned long)NULL;*/
error2:
pci_release_regions(pci);
hw->io_base = 0;
error1:
pci_disable_device(pci);
return err;
}
static int hw_card_stop(struct hw *hw)
{
unsigned int data;
/* disable transport bus master and queueing of request */
hw_write_20kx(hw, TRANSPORT_CTL, 0x00);
/* disable pll */
data = hw_read_20kx(hw, PLL_ENB);
hw_write_20kx(hw, PLL_ENB, (data & (~0x07)));
/* TODO: Disable interrupt and so on... */
return 0;
}
static int hw_card_shutdown(struct hw *hw)
{
if (hw->irq >= 0)
free_irq(hw->irq, hw);
hw->irq = -1;
iounmap(hw->mem_base);
hw->mem_base = NULL;
if (hw->io_base)
pci_release_regions(hw->pci);
hw->io_base = 0;
pci_disable_device(hw->pci);
return 0;
}
static int hw_card_init(struct hw *hw, struct card_conf *info)
{
int err;
unsigned int gctl;
u32 data = 0;
struct dac_conf dac_info = {0};
struct adc_conf adc_info = {0};
struct daio_conf daio_info = {0};
struct trn_conf trn_info = {0};
/* Get PCI io port/memory base address and
* do 20kx core switch if needed. */
err = hw_card_start(hw);
if (err)
return err;
/* PLL init */
err = hw_pll_init(hw, info->rsr);
if (err < 0)
return err;
/* kick off auto-init */
err = hw_auto_init(hw);
if (err < 0)
return err;
gctl = hw_read_20kx(hw, GLOBAL_CNTL_GCTL);
set_field(&gctl, GCTL_DBP, 1);
set_field(&gctl, GCTL_TBP, 1);
set_field(&gctl, GCTL_FBP, 1);
set_field(&gctl, GCTL_DPC, 0);
hw_write_20kx(hw, GLOBAL_CNTL_GCTL, gctl);
/* Reset all global pending interrupts */
hw_write_20kx(hw, GIE, 0);
/* Reset all SRC pending interrupts */
hw_write_20kx(hw, SRC_IP, 0);
if (hw->model != CTSB1270) {
/* TODO: detect the card ID and configure GPIO accordingly. */
/* Configures GPIO (0xD802 0x98028) */
/*hw_write_20kx(hw, GPIO_CTRL, 0x7F07);*/
/* Configures GPIO (SB0880) */
/*hw_write_20kx(hw, GPIO_CTRL, 0xFF07);*/
hw_write_20kx(hw, GPIO_CTRL, 0xD802);
} else {
hw_write_20kx(hw, GPIO_CTRL, 0x9E5F);
}
/* Enable audio ring */
hw_write_20kx(hw, MIXER_AR_ENABLE, 0x01);
trn_info.vm_pgt_phys = info->vm_pgt_phys;
err = hw_trn_init(hw, &trn_info);
if (err < 0)
return err;
daio_info.msr = info->msr;
err = hw_daio_init(hw, &daio_info);
if (err < 0)
return err;
dac_info.msr = info->msr;
err = hw_dac_init(hw, &dac_info);
if (err < 0)
return err;
adc_info.msr = info->msr;
adc_info.input = ADC_LINEIN;
adc_info.mic20db = 0;
err = hw_adc_init(hw, &adc_info);
if (err < 0)
return err;
data = hw_read_20kx(hw, SRC_MCTL);
data |= 0x1; /* Enables input from the audio ring */
hw_write_20kx(hw, SRC_MCTL, data);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int hw_suspend(struct hw *hw)
{
hw_card_stop(hw);
return 0;
}
static int hw_resume(struct hw *hw, struct card_conf *info)
{
/* Re-initialize card hardware. */
return hw_card_init(hw, info);
}
#endif
static u32 hw_read_20kx(struct hw *hw, u32 reg)
{
return readl(hw->mem_base + reg);
}
static void hw_write_20kx(struct hw *hw, u32 reg, u32 data)
{
writel(data, hw->mem_base + reg);
}
static struct hw ct20k2_preset = {
.irq = -1,
.card_init = hw_card_init,
.card_stop = hw_card_stop,
.pll_init = hw_pll_init,
.is_adc_source_selected = hw_is_adc_input_selected,
.select_adc_source = hw_adc_input_select,
.capabilities = hw_capabilities,
.output_switch_get = hw_output_switch_get,
.output_switch_put = hw_output_switch_put,
.mic_source_switch_get = hw_mic_source_switch_get,
.mic_source_switch_put = hw_mic_source_switch_put,
#ifdef CONFIG_PM_SLEEP
.suspend = hw_suspend,
.resume = hw_resume,
#endif
.src_rsc_get_ctrl_blk = src_get_rsc_ctrl_blk,
.src_rsc_put_ctrl_blk = src_put_rsc_ctrl_blk,
.src_mgr_get_ctrl_blk = src_mgr_get_ctrl_blk,
.src_mgr_put_ctrl_blk = src_mgr_put_ctrl_blk,
.src_set_state = src_set_state,
.src_set_bm = src_set_bm,
.src_set_rsr = src_set_rsr,
.src_set_sf = src_set_sf,
.src_set_wr = src_set_wr,
.src_set_pm = src_set_pm,
.src_set_rom = src_set_rom,
.src_set_vo = src_set_vo,
.src_set_st = src_set_st,
.src_set_ie = src_set_ie,
.src_set_ilsz = src_set_ilsz,
.src_set_bp = src_set_bp,
.src_set_cisz = src_set_cisz,
.src_set_ca = src_set_ca,
.src_set_sa = src_set_sa,
.src_set_la = src_set_la,
.src_set_pitch = src_set_pitch,
.src_set_dirty = src_set_dirty,
.src_set_clear_zbufs = src_set_clear_zbufs,
.src_set_dirty_all = src_set_dirty_all,
.src_commit_write = src_commit_write,
.src_get_ca = src_get_ca,
.src_get_dirty = src_get_dirty,
.src_dirty_conj_mask = src_dirty_conj_mask,
.src_mgr_enbs_src = src_mgr_enbs_src,
.src_mgr_enb_src = src_mgr_enb_src,
.src_mgr_dsb_src = src_mgr_dsb_src,
.src_mgr_commit_write = src_mgr_commit_write,
.srcimp_mgr_get_ctrl_blk = srcimp_mgr_get_ctrl_blk,
.srcimp_mgr_put_ctrl_blk = srcimp_mgr_put_ctrl_blk,
.srcimp_mgr_set_imaparc = srcimp_mgr_set_imaparc,
.srcimp_mgr_set_imapuser = srcimp_mgr_set_imapuser,
.srcimp_mgr_set_imapnxt = srcimp_mgr_set_imapnxt,
.srcimp_mgr_set_imapaddr = srcimp_mgr_set_imapaddr,
.srcimp_mgr_commit_write = srcimp_mgr_commit_write,
.amixer_rsc_get_ctrl_blk = amixer_rsc_get_ctrl_blk,
.amixer_rsc_put_ctrl_blk = amixer_rsc_put_ctrl_blk,
.amixer_mgr_get_ctrl_blk = amixer_mgr_get_ctrl_blk,
.amixer_mgr_put_ctrl_blk = amixer_mgr_put_ctrl_blk,
.amixer_set_mode = amixer_set_mode,
.amixer_set_iv = amixer_set_iv,
.amixer_set_x = amixer_set_x,
.amixer_set_y = amixer_set_y,
.amixer_set_sadr = amixer_set_sadr,
.amixer_set_se = amixer_set_se,
.amixer_set_dirty = amixer_set_dirty,
.amixer_set_dirty_all = amixer_set_dirty_all,
.amixer_commit_write = amixer_commit_write,
.amixer_get_y = amixer_get_y,
.amixer_get_dirty = amixer_get_dirty,
.dai_get_ctrl_blk = dai_get_ctrl_blk,
.dai_put_ctrl_blk = dai_put_ctrl_blk,
.dai_srt_set_srco = dai_srt_set_srco,
.dai_srt_set_srcm = dai_srt_set_srcm,
.dai_srt_set_rsr = dai_srt_set_rsr,
.dai_srt_set_drat = dai_srt_set_drat,
.dai_srt_set_ec = dai_srt_set_ec,
.dai_srt_set_et = dai_srt_set_et,
.dai_commit_write = dai_commit_write,
.dao_get_ctrl_blk = dao_get_ctrl_blk,
.dao_put_ctrl_blk = dao_put_ctrl_blk,
.dao_set_spos = dao_set_spos,
.dao_commit_write = dao_commit_write,
.dao_get_spos = dao_get_spos,
.daio_mgr_get_ctrl_blk = daio_mgr_get_ctrl_blk,
.daio_mgr_put_ctrl_blk = daio_mgr_put_ctrl_blk,
.daio_mgr_enb_dai = daio_mgr_enb_dai,
.daio_mgr_dsb_dai = daio_mgr_dsb_dai,
.daio_mgr_enb_dao = daio_mgr_enb_dao,
.daio_mgr_dsb_dao = daio_mgr_dsb_dao,
.daio_mgr_dao_init = daio_mgr_dao_init,
.daio_mgr_set_imaparc = daio_mgr_set_imaparc,
.daio_mgr_set_imapnxt = daio_mgr_set_imapnxt,
.daio_mgr_set_imapaddr = daio_mgr_set_imapaddr,
.daio_mgr_commit_write = daio_mgr_commit_write,
.set_timer_irq = set_timer_irq,
.set_timer_tick = set_timer_tick,
.get_wc = get_wc,
};
int create_20k2_hw_obj(struct hw **rhw)
{
struct hw20k2 *hw20k2;
*rhw = NULL;
hw20k2 = kzalloc(sizeof(*hw20k2), GFP_KERNEL);
if (!hw20k2)
return -ENOMEM;
hw20k2->hw = ct20k2_preset;
*rhw = &hw20k2->hw;
return 0;
}
int destroy_20k2_hw_obj(struct hw *hw)
{
if (hw->io_base)
hw_card_shutdown(hw);
kfree(hw);
return 0;
}
| gpl-2.0 |
sktjdgns1189/android_kernel_samsung_aries_KOR | arch/arm/mach-omap2/clkt2xxx_dpllcore.c | 908 | 4517 | /*
* DPLL + CORE_CLK composite clock functions
*
* Copyright (C) 2005-2008 Texas Instruments, Inc.
* Copyright (C) 2004-2010 Nokia Corporation
*
* Contacts:
* Richard Woodruff <r-woodruff2@ti.com>
* Paul Walmsley
*
* Based on earlier work by Tuukka Tikkanen, Tony Lindgren,
* Gordon McNutt and RidgeRun, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* XXX The DPLL and CORE clocks should be split into two separate clock
* types.
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <plat/clock.h>
#include <plat/sram.h>
#include <plat/sdrc.h>
#include "clock.h"
#include "clock2xxx.h"
#include "opp2xxx.h"
#include "cm.h"
#include "cm-regbits-24xx.h"
/* #define DOWN_VARIABLE_DPLL 1 */ /* Experimental */
/**
* omap2xxx_clk_get_core_rate - return the CORE_CLK rate
* @clk: pointer to the combined dpll_ck + core_ck (currently "dpll_ck")
*
* Returns the CORE_CLK rate. CORE_CLK can have one of three rate
* sources on OMAP2xxx: the DPLL CLKOUT rate, DPLL CLKOUTX2, or 32KHz
* (the latter is unusual). This currently should be called with
* struct clk *dpll_ck, which is a composite clock of dpll_ck and
* core_ck.
*/
unsigned long omap2xxx_clk_get_core_rate(struct clk *clk)
{
long long core_clk;
u32 v;
core_clk = omap2_get_dpll_rate(clk);
v = cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
v &= OMAP24XX_CORE_CLK_SRC_MASK;
if (v == CORE_CLK_SRC_32K)
core_clk = 32768;
else
core_clk *= v;
return core_clk;
}
/*
* Uses the current prcm set to tell if a rate is valid.
* You can go slower, but not faster within a given rate set.
*/
static long omap2_dpllcore_round_rate(unsigned long target_rate)
{
u32 high, low, core_clk_src;
core_clk_src = cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
core_clk_src &= OMAP24XX_CORE_CLK_SRC_MASK;
if (core_clk_src == CORE_CLK_SRC_DPLL) { /* DPLL clockout */
high = curr_prcm_set->dpll_speed * 2;
low = curr_prcm_set->dpll_speed;
} else { /* DPLL clockout x 2 */
high = curr_prcm_set->dpll_speed;
low = curr_prcm_set->dpll_speed / 2;
}
#ifdef DOWN_VARIABLE_DPLL
if (target_rate > high)
return high;
else
return target_rate;
#else
if (target_rate > low)
return high;
else
return low;
#endif
}
unsigned long omap2_dpllcore_recalc(struct clk *clk)
{
return omap2xxx_clk_get_core_rate(clk);
}
int omap2_reprogram_dpllcore(struct clk *clk, unsigned long rate)
{
u32 cur_rate, low, mult, div, valid_rate, done_rate;
u32 bypass = 0;
struct prcm_config tmpset;
const struct dpll_data *dd;
cur_rate = omap2xxx_clk_get_core_rate(dclk);
mult = cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
mult &= OMAP24XX_CORE_CLK_SRC_MASK;
if ((rate == (cur_rate / 2)) && (mult == 2)) {
omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL, 1);
} else if ((rate == (cur_rate * 2)) && (mult == 1)) {
omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1);
} else if (rate != cur_rate) {
valid_rate = omap2_dpllcore_round_rate(rate);
if (valid_rate != rate)
return -EINVAL;
if (mult == 1)
low = curr_prcm_set->dpll_speed;
else
low = curr_prcm_set->dpll_speed / 2;
dd = clk->dpll_data;
if (!dd)
return -EINVAL;
tmpset.cm_clksel1_pll = __raw_readl(dd->mult_div1_reg);
tmpset.cm_clksel1_pll &= ~(dd->mult_mask |
dd->div1_mask);
div = ((curr_prcm_set->xtal_speed / 1000000) - 1);
tmpset.cm_clksel2_pll = cm_read_mod_reg(PLL_MOD, CM_CLKSEL2);
tmpset.cm_clksel2_pll &= ~OMAP24XX_CORE_CLK_SRC_MASK;
if (rate > low) {
tmpset.cm_clksel2_pll |= CORE_CLK_SRC_DPLL_X2;
mult = ((rate / 2) / 1000000);
done_rate = CORE_CLK_SRC_DPLL_X2;
} else {
tmpset.cm_clksel2_pll |= CORE_CLK_SRC_DPLL;
mult = (rate / 1000000);
done_rate = CORE_CLK_SRC_DPLL;
}
tmpset.cm_clksel1_pll |= (div << __ffs(dd->mult_mask));
tmpset.cm_clksel1_pll |= (mult << __ffs(dd->div1_mask));
/* Worst case */
tmpset.base_sdrc_rfr = SDRC_RFR_CTRL_BYPASS;
if (rate == curr_prcm_set->xtal_speed) /* If asking for 1-1 */
bypass = 1;
/* For omap2xxx_sdrc_init_params() */
omap2xxx_sdrc_reprogram(CORE_CLK_SRC_DPLL_X2, 1);
/* Force dll lock mode */
omap2_set_prcm(tmpset.cm_clksel1_pll, tmpset.base_sdrc_rfr,
bypass);
/* Errata: ret dll entry state */
omap2xxx_sdrc_init_params(omap2xxx_sdrc_dll_is_unlocked());
omap2xxx_sdrc_reprogram(done_rate, 0);
}
return 0;
}
| gpl-2.0 |
spacecaker/Stock_spacecaker_kernel | drivers/media/video/gspca/spca505.c | 908 | 19468 | /*
* SPCA505 chip based cameras initialization data
*
* V4L2 by Jean-Francis Moine <http://moinejf.free.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#define MODULE_NAME "spca505"
#include "gspca.h"
MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
MODULE_DESCRIPTION("GSPCA/SPCA505 USB Camera Driver");
MODULE_LICENSE("GPL");
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
u8 brightness;
u8 subtype;
#define IntelPCCameraPro 0
#define Nxultra 1
};
/* V4L2 controls supported by the driver */
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
static const struct ctrl sd_ctrls[] = {
{
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Brightness",
.minimum = 0,
.maximum = 255,
.step = 1,
#define BRIGHTNESS_DEF 127
.default_value = BRIGHTNESS_DEF,
},
.set = sd_setbrightness,
.get = sd_getbrightness,
},
};
static const struct v4l2_pix_format vga_mode[] = {
{160, 120, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
.bytesperline = 160,
.sizeimage = 160 * 120 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 4},
{176, 144, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
.bytesperline = 176,
.sizeimage = 176 * 144 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 3},
{320, 240, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2},
{352, 288, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
.bytesperline = 352,
.sizeimage = 352 * 288 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1},
{640, 480, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
};
#define SPCA50X_OFFSET_DATA 10
#define SPCA50X_REG_USB 0x02 /* spca505 501 */
#define SPCA50X_USB_CTRL 0x00 /* spca505 */
#define SPCA50X_CUSB_ENABLE 0x01 /* spca505 */
#define SPCA50X_REG_GLOBAL 0x03 /* spca505 */
#define SPCA50X_GMISC0_IDSEL 0x01 /* Global control device ID select spca505 */
#define SPCA50X_GLOBAL_MISC0 0x00 /* Global control miscellaneous 0 spca505 */
#define SPCA50X_GLOBAL_MISC1 0x01 /* 505 */
#define SPCA50X_GLOBAL_MISC3 0x03 /* 505 */
#define SPCA50X_GMISC3_SAA7113RST 0x20 /* Not sure about this one spca505 */
/* Image format and compression control */
#define SPCA50X_REG_COMPRESS 0x04
/*
* Data to initialize a SPCA505. Common to the CCD and external modes
*/
static const u8 spca505_init_data[][3] = {
/* bmRequest,value,index */
{SPCA50X_REG_GLOBAL, SPCA50X_GMISC3_SAA7113RST, SPCA50X_GLOBAL_MISC3},
/* Sensor reset */
{SPCA50X_REG_GLOBAL, 0x00, SPCA50X_GLOBAL_MISC3},
{SPCA50X_REG_GLOBAL, 0x00, SPCA50X_GLOBAL_MISC1},
/* Block USB reset */
{SPCA50X_REG_GLOBAL, SPCA50X_GMISC0_IDSEL, SPCA50X_GLOBAL_MISC0},
{0x05, 0x01, 0x10},
/* Maybe power down some stuff */
{0x05, 0x0f, 0x11},
/* Setup internal CCD ? */
{0x06, 0x10, 0x08},
{0x06, 0x00, 0x09},
{0x06, 0x00, 0x0a},
{0x06, 0x00, 0x0b},
{0x06, 0x10, 0x0c},
{0x06, 0x00, 0x0d},
{0x06, 0x00, 0x0e},
{0x06, 0x00, 0x0f},
{0x06, 0x10, 0x10},
{0x06, 0x02, 0x11},
{0x06, 0x00, 0x12},
{0x06, 0x04, 0x13},
{0x06, 0x02, 0x14},
{0x06, 0x8a, 0x51},
{0x06, 0x40, 0x52},
{0x06, 0xb6, 0x53},
{0x06, 0x3d, 0x54},
{}
};
/*
* Data to initialize the camera using the internal CCD
*/
static const u8 spca505_open_data_ccd[][3] = {
/* bmRequest,value,index */
/* Internal CCD data set */
{0x03, 0x04, 0x01},
/* This could be a reset */
{0x03, 0x00, 0x01},
/* Setup compression and image registers. 0x6 and 0x7 seem to be
related to H&V hold, and are resolution mode specific */
{0x04, 0x10, 0x01},
/* DIFF(0x50), was (0x10) */
{0x04, 0x00, 0x04},
{0x04, 0x00, 0x05},
{0x04, 0x20, 0x06},
{0x04, 0x20, 0x07},
{0x08, 0x0a, 0x00},
/* DIFF (0x4a), was (0xa) */
{0x05, 0x00, 0x10},
{0x05, 0x00, 0x11},
{0x05, 0x00, 0x00},
/* DIFF not written */
{0x05, 0x00, 0x01},
/* DIFF not written */
{0x05, 0x00, 0x02},
/* DIFF not written */
{0x05, 0x00, 0x03},
/* DIFF not written */
{0x05, 0x00, 0x04},
/* DIFF not written */
{0x05, 0x80, 0x05},
/* DIFF not written */
{0x05, 0xe0, 0x06},
/* DIFF not written */
{0x05, 0x20, 0x07},
/* DIFF not written */
{0x05, 0xa0, 0x08},
/* DIFF not written */
{0x05, 0x0, 0x12},
/* DIFF not written */
{0x05, 0x02, 0x0f},
/* DIFF not written */
{0x05, 0x10, 0x46},
/* DIFF not written */
{0x05, 0x8, 0x4a},
/* DIFF not written */
{0x03, 0x08, 0x03},
/* DIFF (0x3,0x28,0x3) */
{0x03, 0x08, 0x01},
{0x03, 0x0c, 0x03},
/* DIFF not written */
{0x03, 0x21, 0x00},
/* DIFF (0x39) */
/* Extra block copied from init to hopefully ensure CCD is in a sane state */
{0x06, 0x10, 0x08},
{0x06, 0x00, 0x09},
{0x06, 0x00, 0x0a},
{0x06, 0x00, 0x0b},
{0x06, 0x10, 0x0c},
{0x06, 0x00, 0x0d},
{0x06, 0x00, 0x0e},
{0x06, 0x00, 0x0f},
{0x06, 0x10, 0x10},
{0x06, 0x02, 0x11},
{0x06, 0x00, 0x12},
{0x06, 0x04, 0x13},
{0x06, 0x02, 0x14},
{0x06, 0x8a, 0x51},
{0x06, 0x40, 0x52},
{0x06, 0xb6, 0x53},
{0x06, 0x3d, 0x54},
/* End of extra block */
{0x06, 0x3f, 0x1},
/* Block skipped */
{0x06, 0x10, 0x02},
{0x06, 0x64, 0x07},
{0x06, 0x10, 0x08},
{0x06, 0x00, 0x09},
{0x06, 0x00, 0x0a},
{0x06, 0x00, 0x0b},
{0x06, 0x10, 0x0c},
{0x06, 0x00, 0x0d},
{0x06, 0x00, 0x0e},
{0x06, 0x00, 0x0f},
{0x06, 0x10, 0x10},
{0x06, 0x02, 0x11},
{0x06, 0x00, 0x12},
{0x06, 0x04, 0x13},
{0x06, 0x02, 0x14},
{0x06, 0x8a, 0x51},
{0x06, 0x40, 0x52},
{0x06, 0xb6, 0x53},
{0x06, 0x3d, 0x54},
{0x06, 0x60, 0x57},
{0x06, 0x20, 0x58},
{0x06, 0x15, 0x59},
{0x06, 0x05, 0x5a},
{0x05, 0x01, 0xc0},
{0x05, 0x10, 0xcb},
{0x05, 0x80, 0xc1},
/* */
{0x05, 0x0, 0xc2},
/* 4 was 0 */
{0x05, 0x00, 0xca},
{0x05, 0x80, 0xc1},
/* */
{0x05, 0x04, 0xc2},
{0x05, 0x00, 0xca},
{0x05, 0x0, 0xc1},
/* */
{0x05, 0x00, 0xc2},
{0x05, 0x00, 0xca},
{0x05, 0x40, 0xc1},
/* */
{0x05, 0x17, 0xc2},
{0x05, 0x00, 0xca},
{0x05, 0x80, 0xc1},
/* */
{0x05, 0x06, 0xc2},
{0x05, 0x00, 0xca},
{0x05, 0x80, 0xc1},
/* */
{0x05, 0x04, 0xc2},
{0x05, 0x00, 0xca},
{0x03, 0x4c, 0x3},
{0x03, 0x18, 0x1},
{0x06, 0x70, 0x51},
{0x06, 0xbe, 0x53},
{0x06, 0x71, 0x57},
{0x06, 0x20, 0x58},
{0x06, 0x05, 0x59},
{0x06, 0x15, 0x5a},
{0x04, 0x00, 0x08},
/* Compress = OFF (0x1 to turn on) */
{0x04, 0x12, 0x09},
{0x04, 0x21, 0x0a},
{0x04, 0x10, 0x0b},
{0x04, 0x21, 0x0c},
{0x04, 0x05, 0x00},
/* was 5 (Image Type ? ) */
{0x04, 0x00, 0x01},
{0x06, 0x3f, 0x01},
{0x04, 0x00, 0x04},
{0x04, 0x00, 0x05},
{0x04, 0x40, 0x06},
{0x04, 0x40, 0x07},
{0x06, 0x1c, 0x17},
{0x06, 0xe2, 0x19},
{0x06, 0x1c, 0x1b},
{0x06, 0xe2, 0x1d},
{0x06, 0xaa, 0x1f},
{0x06, 0x70, 0x20},
{0x05, 0x01, 0x10},
{0x05, 0x00, 0x11},
{0x05, 0x01, 0x00},
{0x05, 0x05, 0x01},
{0x05, 0x00, 0xc1},
/* */
{0x05, 0x00, 0xc2},
{0x05, 0x00, 0xca},
{0x06, 0x70, 0x51},
{0x06, 0xbe, 0x53},
{}
};
/*
* Made by Tomasz Zablocki (skalamandra@poczta.onet.pl)
* SPCA505b chip based cameras initialization data
*/
/* jfm */
#define initial_brightness 0x7f /* 0x0(white)-0xff(black) */
/* #define initial_brightness 0x0 //0x0(white)-0xff(black) */
/*
* Data to initialize a SPCA505. Common to the CCD and external modes
*/
static const u8 spca505b_init_data[][3] = {
/* start */
{0x02, 0x00, 0x00}, /* init */
{0x02, 0x00, 0x01},
{0x02, 0x00, 0x02},
{0x02, 0x00, 0x03},
{0x02, 0x00, 0x04},
{0x02, 0x00, 0x05},
{0x02, 0x00, 0x06},
{0x02, 0x00, 0x07},
{0x02, 0x00, 0x08},
{0x02, 0x00, 0x09},
{0x03, 0x00, 0x00},
{0x03, 0x00, 0x01},
{0x03, 0x00, 0x02},
{0x03, 0x00, 0x03},
{0x03, 0x00, 0x04},
{0x03, 0x00, 0x05},
{0x03, 0x00, 0x06},
{0x04, 0x00, 0x00},
{0x04, 0x00, 0x02},
{0x04, 0x00, 0x04},
{0x04, 0x00, 0x05},
{0x04, 0x00, 0x06},
{0x04, 0x00, 0x07},
{0x04, 0x00, 0x08},
{0x04, 0x00, 0x09},
{0x04, 0x00, 0x0a},
{0x04, 0x00, 0x0b},
{0x04, 0x00, 0x0c},
{0x07, 0x00, 0x00},
{0x07, 0x00, 0x03},
{0x08, 0x00, 0x00},
{0x08, 0x00, 0x01},
{0x08, 0x00, 0x02},
{0x00, 0x01, 0x00},
{0x00, 0x01, 0x01},
{0x00, 0x01, 0x34},
{0x00, 0x01, 0x35},
{0x06, 0x18, 0x08},
{0x06, 0xfc, 0x09},
{0x06, 0xfc, 0x0a},
{0x06, 0xfc, 0x0b},
{0x06, 0x18, 0x0c},
{0x06, 0xfc, 0x0d},
{0x06, 0xfc, 0x0e},
{0x06, 0xfc, 0x0f},
{0x06, 0x18, 0x10},
{0x06, 0xfe, 0x12},
{0x06, 0x00, 0x11},
{0x06, 0x00, 0x14},
{0x06, 0x00, 0x13},
{0x06, 0x28, 0x51},
{0x06, 0xff, 0x53},
{0x02, 0x00, 0x08},
{0x03, 0x00, 0x03},
{0x03, 0x10, 0x03},
{}
};
/*
* Data to initialize the camera using the internal CCD
*/
static const u8 spca505b_open_data_ccd[][3] = {
/* {0x02,0x00,0x00}, */
{0x03, 0x04, 0x01}, /* rst */
{0x03, 0x00, 0x01},
{0x03, 0x00, 0x00},
{0x03, 0x21, 0x00},
{0x03, 0x00, 0x04},
{0x03, 0x00, 0x03},
{0x03, 0x18, 0x03},
{0x03, 0x08, 0x01},
{0x03, 0x1c, 0x03},
{0x03, 0x5c, 0x03},
{0x03, 0x5c, 0x03},
{0x03, 0x18, 0x01},
/* same as 505 */
{0x04, 0x10, 0x01},
{0x04, 0x00, 0x04},
{0x04, 0x00, 0x05},
{0x04, 0x20, 0x06},
{0x04, 0x20, 0x07},
{0x08, 0x0a, 0x00},
{0x05, 0x00, 0x10},
{0x05, 0x00, 0x11},
{0x05, 0x00, 0x12},
{0x05, 0x6f, 0x00},
{0x05, initial_brightness >> 6, 0x00},
{0x05, (initial_brightness << 2) & 0xff, 0x01},
{0x05, 0x00, 0x02},
{0x05, 0x01, 0x03},
{0x05, 0x00, 0x04},
{0x05, 0x03, 0x05},
{0x05, 0xe0, 0x06},
{0x05, 0x20, 0x07},
{0x05, 0xa0, 0x08},
{0x05, 0x00, 0x12},
{0x05, 0x02, 0x0f},
{0x05, 0x80, 0x14}, /* max exposure off (0=on) */
{0x05, 0x01, 0xb0},
{0x05, 0x01, 0xbf},
{0x03, 0x02, 0x06},
{0x05, 0x10, 0x46},
{0x05, 0x08, 0x4a},
{0x06, 0x00, 0x01},
{0x06, 0x10, 0x02},
{0x06, 0x64, 0x07},
{0x06, 0x18, 0x08},
{0x06, 0xfc, 0x09},
{0x06, 0xfc, 0x0a},
{0x06, 0xfc, 0x0b},
{0x04, 0x00, 0x01},
{0x06, 0x18, 0x0c},
{0x06, 0xfc, 0x0d},
{0x06, 0xfc, 0x0e},
{0x06, 0xfc, 0x0f},
{0x06, 0x11, 0x10}, /* contrast */
{0x06, 0x00, 0x11},
{0x06, 0xfe, 0x12},
{0x06, 0x00, 0x13},
{0x06, 0x00, 0x14},
{0x06, 0x9d, 0x51},
{0x06, 0x40, 0x52},
{0x06, 0x7c, 0x53},
{0x06, 0x40, 0x54},
{0x06, 0x02, 0x57},
{0x06, 0x03, 0x58},
{0x06, 0x15, 0x59},
{0x06, 0x05, 0x5a},
{0x06, 0x03, 0x56},
{0x06, 0x02, 0x3f},
{0x06, 0x00, 0x40},
{0x06, 0x39, 0x41},
{0x06, 0x69, 0x42},
{0x06, 0x87, 0x43},
{0x06, 0x9e, 0x44},
{0x06, 0xb1, 0x45},
{0x06, 0xbf, 0x46},
{0x06, 0xcc, 0x47},
{0x06, 0xd5, 0x48},
{0x06, 0xdd, 0x49},
{0x06, 0xe3, 0x4a},
{0x06, 0xe8, 0x4b},
{0x06, 0xed, 0x4c},
{0x06, 0xf2, 0x4d},
{0x06, 0xf7, 0x4e},
{0x06, 0xfc, 0x4f},
{0x06, 0xff, 0x50},
{0x05, 0x01, 0xc0},
{0x05, 0x10, 0xcb},
{0x05, 0x40, 0xc1},
{0x05, 0x04, 0xc2},
{0x05, 0x00, 0xca},
{0x05, 0x40, 0xc1},
{0x05, 0x09, 0xc2},
{0x05, 0x00, 0xca},
{0x05, 0xc0, 0xc1},
{0x05, 0x09, 0xc2},
{0x05, 0x00, 0xca},
{0x05, 0x40, 0xc1},
{0x05, 0x59, 0xc2},
{0x05, 0x00, 0xca},
{0x04, 0x00, 0x01},
{0x05, 0x80, 0xc1},
{0x05, 0xec, 0xc2},
{0x05, 0x0, 0xca},
{0x06, 0x02, 0x57},
{0x06, 0x01, 0x58},
{0x06, 0x15, 0x59},
{0x06, 0x0a, 0x5a},
{0x06, 0x01, 0x57},
{0x06, 0x8a, 0x03},
{0x06, 0x0a, 0x6c},
{0x06, 0x30, 0x01},
{0x06, 0x20, 0x02},
{0x06, 0x00, 0x03},
{0x05, 0x8c, 0x25},
{0x06, 0x4d, 0x51}, /* maybe saturation (4d) */
{0x06, 0x84, 0x53}, /* making green (84) */
{0x06, 0x00, 0x57}, /* sharpness (1) */
{0x06, 0x18, 0x08},
{0x06, 0xfc, 0x09},
{0x06, 0xfc, 0x0a},
{0x06, 0xfc, 0x0b},
{0x06, 0x18, 0x0c}, /* maybe hue (18) */
{0x06, 0xfc, 0x0d},
{0x06, 0xfc, 0x0e},
{0x06, 0xfc, 0x0f},
{0x06, 0x18, 0x10}, /* maybe contrast (18) */
{0x05, 0x01, 0x02},
{0x04, 0x00, 0x08}, /* compression */
{0x04, 0x12, 0x09},
{0x04, 0x21, 0x0a},
{0x04, 0x10, 0x0b},
{0x04, 0x21, 0x0c},
{0x04, 0x1d, 0x00}, /* imagetype (1d) */
{0x04, 0x41, 0x01}, /* hardware snapcontrol */
{0x04, 0x00, 0x04},
{0x04, 0x00, 0x05},
{0x04, 0x10, 0x06},
{0x04, 0x10, 0x07},
{0x04, 0x40, 0x06},
{0x04, 0x40, 0x07},
{0x04, 0x00, 0x04},
{0x04, 0x00, 0x05},
{0x06, 0x1c, 0x17},
{0x06, 0xe2, 0x19},
{0x06, 0x1c, 0x1b},
{0x06, 0xe2, 0x1d},
{0x06, 0x5f, 0x1f},
{0x06, 0x32, 0x20},
{0x05, initial_brightness >> 6, 0x00},
{0x05, (initial_brightness << 2) & 0xff, 0x01},
{0x05, 0x06, 0xc1},
{0x05, 0x58, 0xc2},
{0x05, 0x00, 0xca},
{0x05, 0x00, 0x11},
{}
};
static int reg_write(struct usb_device *dev,
u16 req, u16 index, u16 value)
{
int ret;
ret = usb_control_msg(dev,
usb_sndctrlpipe(dev, 0),
req,
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, NULL, 0, 500);
PDEBUG(D_USBO, "reg write: 0x%02x,0x%02x:0x%02x, %d",
req, index, value, ret);
if (ret < 0)
PDEBUG(D_ERR, "reg write: error %d", ret);
return ret;
}
/* returns: negative is error, pos or zero is data */
static int reg_read(struct gspca_dev *gspca_dev,
u16 req, /* bRequest */
u16 index) /* wIndex */
{
int ret;
ret = usb_control_msg(gspca_dev->dev,
usb_rcvctrlpipe(gspca_dev->dev, 0),
req,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, /* value */
index,
gspca_dev->usb_buf, 2,
500); /* timeout */
if (ret < 0)
return ret;
return (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0];
}
static int write_vector(struct gspca_dev *gspca_dev,
const u8 data[][3])
{
struct usb_device *dev = gspca_dev->dev;
int ret, i = 0;
while (data[i][0] != 0) {
ret = reg_write(dev, data[i][0], data[i][2], data[i][1]);
if (ret < 0)
return ret;
i++;
}
return 0;
}
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
cam = &gspca_dev->cam;
cam->cam_mode = vga_mode;
sd->subtype = id->driver_info;
if (sd->subtype != IntelPCCameraPro)
cam->nmodes = ARRAY_SIZE(vga_mode);
else /* no 640x480 for IntelPCCameraPro */
cam->nmodes = ARRAY_SIZE(vga_mode) - 1;
sd->brightness = BRIGHTNESS_DEF;
return 0;
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
if (write_vector(gspca_dev,
sd->subtype == Nxultra
? spca505b_init_data
: spca505_init_data))
return -EIO;
return 0;
}
static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 brightness = sd->brightness;
reg_write(gspca_dev->dev, 0x05, 0x00, (255 - brightness) >> 6);
reg_write(gspca_dev->dev, 0x05, 0x01, (255 - brightness) << 2);
}
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
struct usb_device *dev = gspca_dev->dev;
int ret, mode;
static u8 mode_tb[][3] = {
/* r00 r06 r07 */
{0x00, 0x10, 0x10}, /* 640x480 */
{0x01, 0x1a, 0x1a}, /* 352x288 */
{0x02, 0x1c, 0x1d}, /* 320x240 */
{0x04, 0x34, 0x34}, /* 176x144 */
{0x05, 0x40, 0x40} /* 160x120 */
};
if (sd->subtype == Nxultra)
write_vector(gspca_dev, spca505b_open_data_ccd);
else
write_vector(gspca_dev, spca505_open_data_ccd);
ret = reg_read(gspca_dev, 0x06, 0x16);
if (ret < 0) {
PDEBUG(D_ERR|D_CONF,
"register read failed err: %d",
ret);
return ret;
}
if (ret != 0x0101) {
PDEBUG(D_ERR|D_CONF,
"After vector read returns 0x%04x should be 0x0101",
ret);
}
ret = reg_write(gspca_dev->dev, 0x06, 0x16, 0x0a);
if (ret < 0)
return ret;
reg_write(gspca_dev->dev, 0x05, 0xc2, 0x12);
/* necessary because without it we can see stream
* only once after loading module */
/* stopping usb registers Tomasz change */
reg_write(dev, 0x02, 0x00, 0x00);
mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
reg_write(dev, SPCA50X_REG_COMPRESS, 0x00, mode_tb[mode][0]);
reg_write(dev, SPCA50X_REG_COMPRESS, 0x06, mode_tb[mode][1]);
reg_write(dev, SPCA50X_REG_COMPRESS, 0x07, mode_tb[mode][2]);
ret = reg_write(dev, SPCA50X_REG_USB,
SPCA50X_USB_CTRL,
SPCA50X_CUSB_ENABLE);
setbrightness(gspca_dev);
return ret;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
{
/* Disable ISO packet machine */
reg_write(gspca_dev->dev, 0x02, 0x00, 0x00);
}
/* called on streamoff with alt 0 and on disconnect */
static void sd_stop0(struct gspca_dev *gspca_dev)
{
if (!gspca_dev->present)
return;
/* This maybe reset or power control */
reg_write(gspca_dev->dev, 0x03, 0x03, 0x20);
reg_write(gspca_dev->dev, 0x03, 0x01, 0x00);
reg_write(gspca_dev->dev, 0x03, 0x00, 0x01);
reg_write(gspca_dev->dev, 0x05, 0x10, 0x01);
reg_write(gspca_dev->dev, 0x05, 0x11, 0x0f);
}
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
{
switch (data[0]) {
case 0: /* start of frame */
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
data += SPCA50X_OFFSET_DATA;
len -= SPCA50X_OFFSET_DATA;
gspca_frame_add(gspca_dev, FIRST_PACKET, data, len);
break;
case 0xff: /* drop */
break;
default:
data += 1;
len -= 1;
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
break;
}
}
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
sd->brightness = val;
if (gspca_dev->streaming)
setbrightness(gspca_dev);
return 0;
}
static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
*val = sd->brightness;
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.ctrls = sd_ctrls,
.nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
.start = sd_start,
.stopN = sd_stopN,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
};
/* -- module initialisation -- */
static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x401d), .driver_info = Nxultra},
{USB_DEVICE(0x0733, 0x0430), .driver_info = IntelPCCameraPro},
/*fixme: may be UsbGrabberPV321 BRIDGE_SPCA506 SENSOR_SAA7113 */
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
THIS_MODULE);
}
static struct usb_driver sd_driver = {
.name = MODULE_NAME,
.id_table = device_table,
.probe = sd_probe,
.disconnect = gspca_disconnect,
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
#endif
};
/* -- module insert / remove -- */
static int __init sd_mod_init(void)
{
int ret;
ret = usb_register(&sd_driver);
if (ret < 0)
return ret;
PDEBUG(D_PROBE, "registered");
return 0;
}
static void __exit sd_mod_exit(void)
{
usb_deregister(&sd_driver);
PDEBUG(D_PROBE, "deregistered");
}
module_init(sd_mod_init);
module_exit(sd_mod_exit);
| gpl-2.0 |
KonstaT/zte-kernel-msm7x27 | arch/arm/mach-at91/board-at572d940hf_ek.c | 908 | 7288 | /*
* linux/arch/arm/mach-at91/board-at572d940hf_ek.c
*
* Copyright (C) 2008 Atmel Antonio R. Costa <costa.antonior@gmail.com>
* Copyright (C) 2005 SAN People
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/ds1305.h>
#include <linux/irq.h>
#include <linux/mtd/physmap.h>
#include <mach/hardware.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/board.h>
#include <mach/gpio.h>
#include <mach/at91sam9_smc.h>
#include "sam9_smc.h"
#include "generic.h"
static void __init eb_map_io(void)
{
/* Initialize processor: 12.500 MHz crystal */
at572d940hf_initialize(12000000);
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* USART0 on ttyS1. (Rx & Tx only) */
at91_register_uart(AT572D940HF_ID_US0, 1, 0);
/* USART1 on ttyS2. (Rx & Tx only) */
at91_register_uart(AT572D940HF_ID_US1, 2, 0);
/* USART2 on ttyS3. (Tx & Rx only */
at91_register_uart(AT572D940HF_ID_US2, 3, 0);
/* set serial console to ttyS0 (ie, DBGU) */
at91_set_serial_console(0);
}
static void __init eb_init_irq(void)
{
at572d940hf_init_interrupts(NULL);
}
/*
* USB Host Port
*/
static struct at91_usbh_data __initdata eb_usbh_data = {
.ports = 2,
};
/*
* USB Device Port
*/
static struct at91_udc_data __initdata eb_udc_data = {
.vbus_pin = 0, /* no VBUS detection,UDC always on */
.pullup_pin = 0, /* pull-up driven by UDC */
};
/*
* MCI (SD/MMC)
*/
static struct at91_mmc_data __initdata eb_mmc_data = {
.wire4 = 1,
/* .det_pin = ... not connected */
/* .wp_pin = ... not connected */
/* .vcc_pin = ... not connected */
};
/*
* MACB Ethernet device
*/
static struct at91_eth_data __initdata eb_eth_data = {
.phy_irq_pin = AT91_PIN_PB25,
.is_rmii = 1,
};
/*
* NOR flash
*/
static struct mtd_partition eb_nor_partitions[] = {
{
.name = "Raw Environment",
.offset = 0,
.size = SZ_4M,
.mask_flags = 0,
},
{
.name = "OS FS",
.offset = MTDPART_OFS_APPEND,
.size = 3 * SZ_1M,
.mask_flags = 0,
},
{
.name = "APP FS",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
.mask_flags = 0,
},
};
static void nor_flash_set_vpp(struct map_info* mi, int i) {
};
static struct physmap_flash_data nor_flash_data = {
.width = 4,
.parts = eb_nor_partitions,
.nr_parts = ARRAY_SIZE(eb_nor_partitions),
.set_vpp = nor_flash_set_vpp,
};
static struct resource nor_flash_resources[] = {
{
.start = AT91_CHIPSELECT_0,
.end = AT91_CHIPSELECT_0 + SZ_16M - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device nor_flash = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &nor_flash_data,
},
.resource = nor_flash_resources,
.num_resources = ARRAY_SIZE(nor_flash_resources),
};
static struct sam9_smc_config __initdata eb_nor_smc_config = {
.ncs_read_setup = 1,
.nrd_setup = 1,
.ncs_write_setup = 1,
.nwe_setup = 1,
.ncs_read_pulse = 7,
.nrd_pulse = 7,
.ncs_write_pulse = 7,
.nwe_pulse = 7,
.read_cycle = 9,
.write_cycle = 9,
.mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_BAT_WRITE | AT91_SMC_DBW_32,
.tdf_cycles = 1,
};
static void __init eb_add_device_nor(void)
{
/* configure chip-select 0 (NOR) */
sam9_smc_configure(0, &eb_nor_smc_config);
platform_device_register(&nor_flash);
}
/*
* NAND flash
*/
static struct mtd_partition __initdata eb_nand_partition[] = {
{
.name = "Partition 1",
.offset = 0,
.size = SZ_16M,
},
{
.name = "Partition 2",
.offset = MTDPART_OFS_NXTBLK,
.size = MTDPART_SIZ_FULL,
}
};
static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
{
*num_partitions = ARRAY_SIZE(eb_nand_partition);
return eb_nand_partition;
}
static struct atmel_nand_data __initdata eb_nand_data = {
.ale = 22,
.cle = 21,
/* .det_pin = ... not connected */
/* .rdy_pin = AT91_PIN_PC16, */
.enable_pin = AT91_PIN_PA15,
.partition_info = nand_partitions,
#if defined(CONFIG_MTD_NAND_AT91_BUSWIDTH_16)
.bus_width_16 = 1,
#else
.bus_width_16 = 0,
#endif
};
static struct sam9_smc_config __initdata eb_nand_smc_config = {
.ncs_read_setup = 0,
.nrd_setup = 0,
.ncs_write_setup = 1,
.nwe_setup = 1,
.ncs_read_pulse = 3,
.nrd_pulse = 3,
.ncs_write_pulse = 3,
.nwe_pulse = 3,
.read_cycle = 5,
.write_cycle = 5,
.mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE,
.tdf_cycles = 12,
};
static void __init eb_add_device_nand(void)
{
/* setup bus-width (8 or 16) */
if (eb_nand_data.bus_width_16)
eb_nand_smc_config.mode |= AT91_SMC_DBW_16;
else
eb_nand_smc_config.mode |= AT91_SMC_DBW_8;
/* configure chip-select 3 (NAND) */
sam9_smc_configure(3, &eb_nand_smc_config);
at91_add_device_nand(&eb_nand_data);
}
/*
* SPI devices
*/
static struct resource rtc_resources[] = {
[0] = {
.start = AT572D940HF_ID_IRQ1,
.end = AT572D940HF_ID_IRQ1,
.flags = IORESOURCE_IRQ,
},
};
static struct ds1305_platform_data ds1306_data = {
.is_ds1306 = true,
.en_1hz = false,
};
static struct spi_board_info eb_spi_devices[] = {
{ /* RTC Dallas DS1306 */
.modalias = "rtc-ds1305",
.chip_select = 3,
.mode = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA,
.max_speed_hz = 500000,
.bus_num = 0,
.irq = AT572D940HF_ID_IRQ1,
.platform_data = (void *) &ds1306_data,
},
#if defined(CONFIG_MTD_AT91_DATAFLASH_CARD)
{ /* Dataflash card */
.modalias = "mtd_dataflash",
.chip_select = 0,
.max_speed_hz = 15 * 1000 * 1000,
.bus_num = 0,
},
#endif
};
static void __init eb_board_init(void)
{
/* Serial */
at91_add_device_serial();
/* USB Host */
at91_add_device_usbh(&eb_usbh_data);
/* USB Device */
at91_add_device_udc(&eb_udc_data);
/* I2C */
at91_add_device_i2c(NULL, 0);
/* NOR */
eb_add_device_nor();
/* NAND */
eb_add_device_nand();
/* SPI */
at91_add_device_spi(eb_spi_devices, ARRAY_SIZE(eb_spi_devices));
/* MMC */
at91_add_device_mmc(0, &eb_mmc_data);
/* Ethernet */
at91_add_device_eth(&eb_eth_data);
/* mAgic */
at91_add_device_mAgic();
}
MACHINE_START(AT572D940HFEB, "Atmel AT91D940HF-EB")
/* Maintainer: Atmel <costa.antonior@gmail.com> */
.phys_io = AT91_BASE_SYS,
.io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
.boot_params = AT91_SDRAM_BASE + 0x100,
.timer = &at91sam926x_timer,
.map_io = eb_map_io,
.init_irq = eb_init_irq,
.init_machine = eb_board_init,
MACHINE_END
| gpl-2.0 |
TomGiordano/kernel_huawei_u8220 | arch/arm/mach-h720x/h7201-eval.c | 1676 | 1111 | /*
* linux/arch/arm/mach-h720x/h7201-eval.c
*
* Copyright (C) 2003 Thomas Gleixner <tglx@linutronix.de>
* 2003 Robert Schwebel <r.schwebel@pengutronix.de>
* 2004 Sascha Hauer <s.hauer@pengutronix.de>
*
* Architecture specific stuff for Hynix GMS30C7201 development board
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/device.h>
#include <asm/setup.h>
#include <asm/types.h>
#include <asm/mach-types.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mach/arch.h>
#include <mach/hardware.h>
#include "common.h"
MACHINE_START(H7201, "Hynix GMS30C7201")
/* Maintainer: Robert Schwebel, Pengutronix */
.phys_io = 0x80000000,
.io_pg_offst = ((0xf0000000) >> 18) & 0xfffc,
.boot_params = 0xc0001000,
.map_io = h720x_map_io,
.init_irq = h720x_init_irq,
.timer = &h7201_timer,
MACHINE_END
| gpl-2.0 |
tjstyle/android_kernel_fih_msm7x30 | arch/arm/mach-netx/nxdkn.c | 1676 | 2533 | /*
* arch/arm/mach-netx/nxdkn.c
*
* Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/mtd/plat-ram.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
#include <linux/amba/clcd.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/netx-regs.h>
#include <mach/eth.h>
#include "generic.h"
static struct netxeth_platform_data eth0_platform_data = {
.xcno = 0,
};
static struct platform_device nxdkn_eth0_device = {
.name = "netx-eth",
.id = 0,
.num_resources = 0,
.resource = NULL,
.dev = {
.platform_data = ð0_platform_data,
}
};
static struct netxeth_platform_data eth1_platform_data = {
.xcno = 1,
};
static struct platform_device nxdkn_eth1_device = {
.name = "netx-eth",
.id = 1,
.num_resources = 0,
.resource = NULL,
.dev = {
.platform_data = ð1_platform_data,
}
};
static struct resource netx_uart0_resources[] = {
[0] = {
.start = 0x00100A00,
.end = 0x00100A3F,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = (NETX_IRQ_UART0),
.end = (NETX_IRQ_UART0),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device netx_uart0_device = {
.name = "netx-uart",
.id = 0,
.num_resources = ARRAY_SIZE(netx_uart0_resources),
.resource = netx_uart0_resources,
};
static struct platform_device *devices[] __initdata = {
&nxdkn_eth0_device,
&nxdkn_eth1_device,
&netx_uart0_device,
};
static void __init nxdkn_init(void)
{
platform_add_devices(devices, ARRAY_SIZE(devices));
}
MACHINE_START(NXDKN, "Hilscher nxdkn")
.phys_io = 0x00100000,
.io_pg_offst = (io_p2v(0x00100000) >> 18) & 0xfffc,
.boot_params = 0x80000100,
.map_io = netx_map_io,
.init_irq = netx_init_irq,
.timer = &netx_timer,
.init_machine = nxdkn_init,
MACHINE_END
| gpl-2.0 |
TeamEOS/kernel_moto_shamu | net/ipv4/tcp_minisocks.c | 1676 | 24743 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Implementation of the Transmission Control Protocol(TCP).
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Mark Evans, <evansmp@uhura.aston.ac.uk>
* Corey Minyard <wf-rch!minyard@relay.EU.net>
* Florian La Roche, <flla@stud.uni-sb.de>
* Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
* Linus Torvalds, <torvalds@cs.helsinki.fi>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
* Matthew Dillon, <dillon@apollo.west.oic.com>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Jorge Cwik, <jorge@laser.satlink.net>
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/workqueue.h>
#include <net/tcp.h>
#include <net/inet_common.h>
#include <net/xfrm.h>
int sysctl_tcp_syncookies __read_mostly = 1;
EXPORT_SYMBOL(sysctl_tcp_syncookies);
int sysctl_tcp_abort_on_overflow __read_mostly;
struct inet_timewait_death_row tcp_death_row = {
.sysctl_max_tw_buckets = NR_FILE * 2,
.period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
.death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
.hashinfo = &tcp_hashinfo,
.tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
(unsigned long)&tcp_death_row),
.twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
inet_twdr_twkill_work),
/* Short-time timewait calendar */
.twcal_hand = -1,
.twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
(unsigned long)&tcp_death_row),
};
EXPORT_SYMBOL_GPL(tcp_death_row);
static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
{
if (seq == s_win)
return true;
if (after(end_seq, s_win) && before(seq, e_win))
return true;
return seq == e_win && seq == end_seq;
}
/*
* * Main purpose of TIME-WAIT state is to close connection gracefully,
* when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
* (and, probably, tail of data) and one or more our ACKs are lost.
* * What is TIME-WAIT timeout? It is associated with maximal packet
* lifetime in the internet, which results in wrong conclusion, that
* it is set to catch "old duplicate segments" wandering out of their path.
* It is not quite correct. This timeout is calculated so that it exceeds
* maximal retransmission timeout enough to allow to lose one (or more)
* segments sent by peer and our ACKs. This time may be calculated from RTO.
* * When TIME-WAIT socket receives RST, it means that another end
* finally closed and we are allowed to kill TIME-WAIT too.
* * Second purpose of TIME-WAIT is catching old duplicate segments.
* Well, certainly it is pure paranoia, but if we load TIME-WAIT
* with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
* * If we invented some more clever way to catch duplicates
* (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
*
* The algorithm below is based on FORMAL INTERPRETATION of RFCs.
* When you compare it to RFCs, please, read section SEGMENT ARRIVES
* from the very beginning.
*
* NOTE. With recycling (and later with fin-wait-2) TW bucket
* is _not_ stateless. It means, that strictly speaking we must
* spinlock it. I do not want! Well, probability of misbehaviour
* is ridiculously low and, seems, we could use some mb() tricks
* to avoid misread sequence numbers, states etc. --ANK
*
* We don't need to initialize tmp_out.sack_ok as we don't use the results
*/
enum tcp_tw_status
tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
const struct tcphdr *th)
{
struct tcp_options_received tmp_opt;
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
bool paws_reject = false;
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
tcp_parse_options(skb, &tmp_opt, 0, NULL);
if (tmp_opt.saw_tstamp) {
tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
tmp_opt.ts_recent = tcptw->tw_ts_recent;
tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
}
}
if (tw->tw_substate == TCP_FIN_WAIT2) {
/* Just repeat all the checks of tcp_rcv_state_process() */
/* Out of window, send ACK */
if (paws_reject ||
!tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
tcptw->tw_rcv_nxt,
tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
return TCP_TW_ACK;
if (th->rst)
goto kill;
if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
goto kill_with_rst;
/* Dup ACK? */
if (!th->ack ||
!after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
inet_twsk_put(tw);
return TCP_TW_SUCCESS;
}
/* New data or FIN. If new data arrive after half-duplex close,
* reset.
*/
if (!th->fin ||
TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
kill_with_rst:
inet_twsk_deschedule(tw, &tcp_death_row);
inet_twsk_put(tw);
return TCP_TW_RST;
}
/* FIN arrived, enter true time-wait state. */
tw->tw_substate = TCP_TIME_WAIT;
tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if (tmp_opt.saw_tstamp) {
tcptw->tw_ts_recent_stamp = get_seconds();
tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
}
if (tcp_death_row.sysctl_tw_recycle &&
tcptw->tw_ts_recent_stamp &&
tcp_tw_remember_stamp(tw))
inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
TCP_TIMEWAIT_LEN);
else
inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
TCP_TIMEWAIT_LEN);
return TCP_TW_ACK;
}
/*
* Now real TIME-WAIT state.
*
* RFC 1122:
* "When a connection is [...] on TIME-WAIT state [...]
* [a TCP] MAY accept a new SYN from the remote TCP to
* reopen the connection directly, if it:
*
* (1) assigns its initial sequence number for the new
* connection to be larger than the largest sequence
* number it used on the previous connection incarnation,
* and
*
* (2) returns to TIME-WAIT state if the SYN turns out
* to be an old duplicate".
*/
if (!paws_reject &&
(TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
(TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
/* In window segment, it may be only reset or bare ack. */
if (th->rst) {
/* This is TIME_WAIT assassination, in two flavors.
* Oh well... nobody has a sufficient solution to this
* protocol bug yet.
*/
if (sysctl_tcp_rfc1337 == 0) {
kill:
inet_twsk_deschedule(tw, &tcp_death_row);
inet_twsk_put(tw);
return TCP_TW_SUCCESS;
}
}
inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
TCP_TIMEWAIT_LEN);
if (tmp_opt.saw_tstamp) {
tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
tcptw->tw_ts_recent_stamp = get_seconds();
}
inet_twsk_put(tw);
return TCP_TW_SUCCESS;
}
/* Out of window segment.
All the segments are ACKed immediately.
The only exception is new SYN. We accept it, if it is
not old duplicate and we are not in danger to be killed
by delayed old duplicates. RFC check is that it has
newer sequence number works at rates <40Mbit/sec.
However, if paws works, it is reliable AND even more,
we even may relax silly seq space cutoff.
RED-PEN: we violate main RFC requirement, if this SYN will appear
old duplicate (i.e. we receive RST in reply to SYN-ACK),
we must return socket to time-wait state. It is not good,
but not fatal yet.
*/
if (th->syn && !th->rst && !th->ack && !paws_reject &&
(after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
(tmp_opt.saw_tstamp &&
(s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
if (isn == 0)
isn++;
TCP_SKB_CB(skb)->when = isn;
return TCP_TW_SYN;
}
if (paws_reject)
NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
if (!th->rst) {
/* In this case we must reset the TIMEWAIT timer.
*
* If it is ACKless SYN it may be both old duplicate
* and new good SYN with random sequence number <rcv_nxt.
* Do not reschedule in the last case.
*/
if (paws_reject || th->ack)
inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
TCP_TIMEWAIT_LEN);
/* Send ACK. Note, we do not put the bucket,
* it will be released by caller.
*/
return TCP_TW_ACK;
}
inet_twsk_put(tw);
return TCP_TW_SUCCESS;
}
EXPORT_SYMBOL(tcp_timewait_state_process);
/*
* Move a socket to time-wait or dead fin-wait-2 state.
*/
void tcp_time_wait(struct sock *sk, int state, int timeo)
{
struct inet_timewait_sock *tw = NULL;
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
bool recycle_ok = false;
if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
recycle_ok = tcp_remember_stamp(sk);
if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
tw = inet_twsk_alloc(sk, state);
if (tw != NULL) {
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
struct inet_sock *inet = inet_sk(sk);
tw->tw_transparent = inet->transparent;
tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
tcptw->tw_rcv_nxt = tp->rcv_nxt;
tcptw->tw_snd_nxt = tp->snd_nxt;
tcptw->tw_rcv_wnd = tcp_receive_window(tp);
tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
tcptw->tw_ts_offset = tp->tsoffset;
#if IS_ENABLED(CONFIG_IPV6)
if (tw->tw_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet6_timewait_sock *tw6;
tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
tw6 = inet6_twsk((struct sock *)tw);
tw6->tw_v6_daddr = np->daddr;
tw6->tw_v6_rcv_saddr = np->rcv_saddr;
tw->tw_tclass = np->tclass;
tw->tw_ipv6only = np->ipv6only;
}
#endif
#ifdef CONFIG_TCP_MD5SIG
/*
* The timewait bucket does not have the key DB from the
* sock structure. We just make a quick copy of the
* md5 key being used (if indeed we are using one)
* so the timewait ack generating code has the key.
*/
do {
struct tcp_md5sig_key *key;
tcptw->tw_md5_key = NULL;
key = tp->af_specific->md5_lookup(sk, sk);
if (key != NULL) {
tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
if (tcptw->tw_md5_key && tcp_alloc_md5sig_pool(sk) == NULL)
BUG();
}
} while (0);
#endif
/* Linkage updates. */
__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
/* Get the TIME_WAIT timeout firing. */
if (timeo < rto)
timeo = rto;
if (recycle_ok) {
tw->tw_timeout = rto;
} else {
tw->tw_timeout = TCP_TIMEWAIT_LEN;
if (state == TCP_TIME_WAIT)
timeo = TCP_TIMEWAIT_LEN;
}
inet_twsk_schedule(tw, &tcp_death_row, timeo,
TCP_TIMEWAIT_LEN);
inet_twsk_put(tw);
} else {
/* Sorry, if we're out of memory, just CLOSE this
* socket up. We've got bigger problems than
* non-graceful socket closings.
*/
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
}
tcp_update_metrics(sk);
tcp_done(sk);
}
void tcp_twsk_destructor(struct sock *sk)
{
#ifdef CONFIG_TCP_MD5SIG
struct tcp_timewait_sock *twsk = tcp_twsk(sk);
if (twsk->tw_md5_key) {
tcp_free_md5sig_pool();
kfree_rcu(twsk->tw_md5_key, rcu);
}
#endif
}
EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
struct request_sock *req)
{
tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
}
/* This is not only more efficient than what we used to do, it eliminates
* a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
*
* Actually, we could lots of memory writes here. tp of listening
* socket contains all necessary default parameters.
*/
struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
{
struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
if (newsk != NULL) {
const struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_request_sock *treq = tcp_rsk(req);
struct inet_connection_sock *newicsk = inet_csk(newsk);
struct tcp_sock *newtp = tcp_sk(newsk);
/* Now setup tcp_sock */
newtp->pred_flags = 0;
newtp->rcv_wup = newtp->copied_seq =
newtp->rcv_nxt = treq->rcv_isn + 1;
newtp->snd_sml = newtp->snd_una =
newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
tcp_prequeue_init(newtp);
INIT_LIST_HEAD(&newtp->tsq_node);
tcp_init_wl(newtp, treq->rcv_isn);
newtp->srtt = 0;
newtp->mdev = TCP_TIMEOUT_INIT;
newicsk->icsk_rto = TCP_TIMEOUT_INIT;
newtp->packets_out = 0;
newtp->retrans_out = 0;
newtp->sacked_out = 0;
newtp->fackets_out = 0;
newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tcp_enable_early_retrans(newtp);
newtp->tlp_high_seq = 0;
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
* algorithms that we must have the following bandaid to talk
* efficiently to them. -DaveM
*/
newtp->snd_cwnd = TCP_INIT_CWND;
newtp->snd_cwnd_cnt = 0;
if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops &&
!try_module_get(newicsk->icsk_ca_ops->owner))
newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
tcp_set_ca_state(newsk, TCP_CA_Open);
tcp_init_xmit_timers(newsk);
skb_queue_head_init(&newtp->out_of_order_queue);
newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
newtp->rx_opt.saw_tstamp = 0;
newtp->rx_opt.dsack = 0;
newtp->rx_opt.num_sacks = 0;
newtp->urg_data = 0;
if (sock_flag(newsk, SOCK_KEEPOPEN))
inet_csk_reset_keepalive_timer(newsk,
keepalive_time_when(newtp));
newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
if (sysctl_tcp_fack)
tcp_enable_fack(newtp);
}
newtp->window_clamp = req->window_clamp;
newtp->rcv_ssthresh = req->rcv_wnd;
newtp->rcv_wnd = req->rcv_wnd;
newtp->rx_opt.wscale_ok = ireq->wscale_ok;
if (newtp->rx_opt.wscale_ok) {
newtp->rx_opt.snd_wscale = ireq->snd_wscale;
newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
} else {
newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
newtp->window_clamp = min(newtp->window_clamp, 65535U);
}
newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
newtp->rx_opt.snd_wscale);
newtp->max_window = newtp->snd_wnd;
if (newtp->rx_opt.tstamp_ok) {
newtp->rx_opt.ts_recent = req->ts_recent;
newtp->rx_opt.ts_recent_stamp = get_seconds();
newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
} else {
newtp->rx_opt.ts_recent_stamp = 0;
newtp->tcp_header_len = sizeof(struct tcphdr);
}
newtp->tsoffset = 0;
#ifdef CONFIG_TCP_MD5SIG
newtp->md5sig_info = NULL; /*XXX*/
if (newtp->af_specific->md5_lookup(sk, newsk))
newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
#endif
if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss;
TCP_ECN_openreq_child(newtp, req);
newtp->fastopen_rsk = NULL;
newtp->syn_data_acked = 0;
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
}
return newsk;
}
EXPORT_SYMBOL(tcp_create_openreq_child);
/*
* Process an incoming packet for SYN_RECV sockets represented as a
* request_sock. Normally sk is the listener socket but for TFO it
* points to the child socket.
*
* XXX (TFO) - The current impl contains a special check for ack
* validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
*
* We don't need to initialize tmp_opt.sack_ok as we don't use the results
*/
struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct request_sock **prev,
bool fastopen)
{
struct tcp_options_received tmp_opt;
struct sock *child;
const struct tcphdr *th = tcp_hdr(skb);
__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
bool paws_reject = false;
BUG_ON(fastopen == (sk->sk_state == TCP_LISTEN));
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(struct tcphdr)>>2)) {
tcp_parse_options(skb, &tmp_opt, 0, NULL);
if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = req->ts_recent;
/* We do not store true stamp, but it is not required,
* it can be estimated (approximately)
* from another data.
*/
tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
}
}
/* Check for pure retransmitted SYN. */
if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
flg == TCP_FLAG_SYN &&
!paws_reject) {
/*
* RFC793 draws (Incorrectly! It was fixed in RFC1122)
* this case on figure 6 and figure 8, but formal
* protocol description says NOTHING.
* To be more exact, it says that we should send ACK,
* because this segment (at least, if it has no data)
* is out of window.
*
* CONCLUSION: RFC793 (even with RFC1122) DOES NOT
* describe SYN-RECV state. All the description
* is wrong, we cannot believe to it and should
* rely only on common sense and implementation
* experience.
*
* Enforce "SYN-ACK" according to figure 8, figure 6
* of RFC793, fixed by RFC1122.
*
* Note that even if there is new data in the SYN packet
* they will be thrown away too.
*
* Reset timer after retransmitting SYNACK, similar to
* the idea of fast retransmit in recovery.
*/
if (!inet_rtx_syn_ack(sk, req))
req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout,
TCP_RTO_MAX) + jiffies;
return NULL;
}
/* Further reproduces section "SEGMENT ARRIVES"
for state SYN-RECEIVED of RFC793.
It is broken, however, it does not work only
when SYNs are crossed.
You would think that SYN crossing is impossible here, since
we should have a SYN_SENT socket (from connect()) on our end,
but this is not true if the crossed SYNs were sent to both
ends by a malicious third party. We must defend against this,
and to do that we first verify the ACK (as per RFC793, page
36) and reset if it is invalid. Is this a true full defense?
To convince ourselves, let us consider a way in which the ACK
test can still pass in this 'malicious crossed SYNs' case.
Malicious sender sends identical SYNs (and thus identical sequence
numbers) to both A and B:
A: gets SYN, seq=7
B: gets SYN, seq=7
By our good fortune, both A and B select the same initial
send sequence number of seven :-)
A: sends SYN|ACK, seq=7, ack_seq=8
B: sends SYN|ACK, seq=7, ack_seq=8
So we are now A eating this SYN|ACK, ACK test passes. So
does sequence test, SYN is truncated, and thus we consider
it a bare ACK.
If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
bare ACK. Otherwise, we create an established connection. Both
ends (listening sockets) accept the new incoming connection and try
to talk to each other. 8-)
Note: This case is both harmless, and rare. Possibility is about the
same as us discovering intelligent life on another plant tomorrow.
But generally, we should (RFC lies!) to accept ACK
from SYNACK both here and in tcp_rcv_state_process().
tcp_rcv_state_process() does not, hence, we do not too.
Note that the case is absolutely generic:
we cannot optimize anything here without
violating protocol. All the checks must be made
before attempt to create socket.
*/
/* RFC793 page 36: "If the connection is in any non-synchronized state ...
* and the incoming segment acknowledges something not yet
* sent (the segment carries an unacceptable ACK) ...
* a reset is sent."
*
* Invalid ACK: reset will be sent by listening socket.
* Note that the ACK validity check for a Fast Open socket is done
* elsewhere and is checked directly against the child socket rather
* than req because user data may have been sent out.
*/
if ((flg & TCP_FLAG_ACK) && !fastopen &&
(TCP_SKB_CB(skb)->ack_seq !=
tcp_rsk(req)->snt_isn + 1))
return sk;
/* Also, it would be not so bad idea to check rcv_tsecr, which
* is essentially ACK extension and too early or too late values
* should cause reset in unsynchronized states.
*/
/* RFC793: "first check sequence number". */
if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rcv_wnd)) {
/* Out of window: send ACK and drop. */
if (!(flg & TCP_FLAG_RST))
req->rsk_ops->send_ack(sk, skb, req);
if (paws_reject)
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
return NULL;
}
/* In sequence, PAWS is OK. */
if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
req->ts_recent = tmp_opt.rcv_tsval;
if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
/* Truncate SYN, it is out of window starting
at tcp_rsk(req)->rcv_isn + 1. */
flg &= ~TCP_FLAG_SYN;
}
/* RFC793: "second check the RST bit" and
* "fourth, check the SYN bit"
*/
if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
goto embryonic_reset;
}
/* ACK sequence verified above, just make sure ACK is
* set. If ACK not set, just silently drop the packet.
*
* XXX (TFO) - if we ever allow "data after SYN", the
* following check needs to be removed.
*/
if (!(flg & TCP_FLAG_ACK))
return NULL;
/* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */
if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
else if (req->num_retrans) /* don't take RTT sample if retrans && ~TS */
tcp_rsk(req)->snt_synack = 0;
/* For Fast Open no more processing is needed (sk is the
* child socket).
*/
if (fastopen)
return sk;
/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
inet_rsk(req)->acked = 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
return NULL;
}
/* OK, ACK is valid, create big socket and
* feed this segment to it. It will repeat all
* the tests. THIS SEGMENT MUST MOVE SOCKET TO
* ESTABLISHED STATE. If it will be dropped after
* socket is created, wait for troubles.
*/
child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
if (child == NULL)
goto listen_overflow;
inet_csk_reqsk_queue_unlink(sk, req, prev);
inet_csk_reqsk_queue_removed(sk, req);
inet_csk_reqsk_queue_add(sk, req, child);
return child;
listen_overflow:
if (!sysctl_tcp_abort_on_overflow) {
inet_rsk(req)->acked = 1;
return NULL;
}
embryonic_reset:
if (!(flg & TCP_FLAG_RST)) {
/* Received a bad SYN pkt - for TFO We try not to reset
* the local connection unless it's really necessary to
* avoid becoming vulnerable to outside attack aiming at
* resetting legit local connections.
*/
req->rsk_ops->send_reset(sk, skb);
} else if (fastopen) { /* received a valid RST pkt */
reqsk_fastopen_remove(sk, req, true);
tcp_reset(sk);
}
if (!fastopen) {
inet_csk_reqsk_queue_drop(sk, req, prev);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
}
return NULL;
}
EXPORT_SYMBOL(tcp_check_req);
/*
* Queue segment on the new socket if the new socket is active,
* otherwise we just shortcircuit this and continue with
* the new socket.
*
* For the vast majority of cases child->sk_state will be TCP_SYN_RECV
* when entering. But other states are possible due to a race condition
* where after __inet_lookup_established() fails but before the listener
* locked is obtained, other packets cause the same connection to
* be created.
*/
int tcp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb)
{
int ret = 0;
int state = child->sk_state;
if (!sock_owned_by_user(child)) {
ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
skb->len);
/* Wakeup parent, send SIGIO */
if (state == TCP_SYN_RECV && child->sk_state != state)
parent->sk_data_ready(parent, 0);
} else {
/* Alas, it is possible again, because we do lookup
* in main socket hash table and lock on listening
* socket does not protect us more.
*/
__sk_add_backlog(child, skb);
}
bh_unlock_sock(child);
sock_put(child);
return ret;
}
EXPORT_SYMBOL(tcp_child_process);
| gpl-2.0 |
penhoi/linux-3.13.11.lbrpmu | drivers/rtc/rtc-rs5c348.c | 1676 | 6863 | /*
* A SPI driver for the Ricoh RS5C348 RTC
*
* Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* The board specific init code should provide characteristics of this
* device:
* Mode 1 (High-Active, Shift-Then-Sample), High Avtive CS
*/
#include <linux/bcd.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/rtc.h>
#include <linux/workqueue.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
#define DRV_VERSION "0.2"
#define RS5C348_REG_SECS 0
#define RS5C348_REG_MINS 1
#define RS5C348_REG_HOURS 2
#define RS5C348_REG_WDAY 3
#define RS5C348_REG_DAY 4
#define RS5C348_REG_MONTH 5
#define RS5C348_REG_YEAR 6
#define RS5C348_REG_CTL1 14
#define RS5C348_REG_CTL2 15
#define RS5C348_SECS_MASK 0x7f
#define RS5C348_MINS_MASK 0x7f
#define RS5C348_HOURS_MASK 0x3f
#define RS5C348_WDAY_MASK 0x03
#define RS5C348_DAY_MASK 0x3f
#define RS5C348_MONTH_MASK 0x1f
#define RS5C348_BIT_PM 0x20 /* REG_HOURS */
#define RS5C348_BIT_Y2K 0x80 /* REG_MONTH */
#define RS5C348_BIT_24H 0x20 /* REG_CTL1 */
#define RS5C348_BIT_XSTP 0x10 /* REG_CTL2 */
#define RS5C348_BIT_VDET 0x40 /* REG_CTL2 */
#define RS5C348_CMD_W(addr) (((addr) << 4) | 0x08) /* single write */
#define RS5C348_CMD_R(addr) (((addr) << 4) | 0x0c) /* single read */
#define RS5C348_CMD_MW(addr) (((addr) << 4) | 0x00) /* burst write */
#define RS5C348_CMD_MR(addr) (((addr) << 4) | 0x04) /* burst read */
struct rs5c348_plat_data {
struct rtc_device *rtc;
int rtc_24h;
};
static int
rs5c348_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct spi_device *spi = to_spi_device(dev);
struct rs5c348_plat_data *pdata = dev_get_platdata(&spi->dev);
u8 txbuf[5+7], *txp;
int ret;
/* Transfer 5 bytes before writing SEC. This gives 31us for carry. */
txp = txbuf;
txbuf[0] = RS5C348_CMD_R(RS5C348_REG_CTL2); /* cmd, ctl2 */
txbuf[1] = 0; /* dummy */
txbuf[2] = RS5C348_CMD_R(RS5C348_REG_CTL2); /* cmd, ctl2 */
txbuf[3] = 0; /* dummy */
txbuf[4] = RS5C348_CMD_MW(RS5C348_REG_SECS); /* cmd, sec, ... */
txp = &txbuf[5];
txp[RS5C348_REG_SECS] = bin2bcd(tm->tm_sec);
txp[RS5C348_REG_MINS] = bin2bcd(tm->tm_min);
if (pdata->rtc_24h) {
txp[RS5C348_REG_HOURS] = bin2bcd(tm->tm_hour);
} else {
/* hour 0 is AM12, noon is PM12 */
txp[RS5C348_REG_HOURS] = bin2bcd((tm->tm_hour + 11) % 12 + 1) |
(tm->tm_hour >= 12 ? RS5C348_BIT_PM : 0);
}
txp[RS5C348_REG_WDAY] = bin2bcd(tm->tm_wday);
txp[RS5C348_REG_DAY] = bin2bcd(tm->tm_mday);
txp[RS5C348_REG_MONTH] = bin2bcd(tm->tm_mon + 1) |
(tm->tm_year >= 100 ? RS5C348_BIT_Y2K : 0);
txp[RS5C348_REG_YEAR] = bin2bcd(tm->tm_year % 100);
/* write in one transfer to avoid data inconsistency */
ret = spi_write_then_read(spi, txbuf, sizeof(txbuf), NULL, 0);
udelay(62); /* Tcsr 62us */
return ret;
}
static int
rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct spi_device *spi = to_spi_device(dev);
struct rs5c348_plat_data *pdata = dev_get_platdata(&spi->dev);
u8 txbuf[5], rxbuf[7];
int ret;
/* Transfer 5 byte befores reading SEC. This gives 31us for carry. */
txbuf[0] = RS5C348_CMD_R(RS5C348_REG_CTL2); /* cmd, ctl2 */
txbuf[1] = 0; /* dummy */
txbuf[2] = RS5C348_CMD_R(RS5C348_REG_CTL2); /* cmd, ctl2 */
txbuf[3] = 0; /* dummy */
txbuf[4] = RS5C348_CMD_MR(RS5C348_REG_SECS); /* cmd, sec, ... */
/* read in one transfer to avoid data inconsistency */
ret = spi_write_then_read(spi, txbuf, sizeof(txbuf),
rxbuf, sizeof(rxbuf));
udelay(62); /* Tcsr 62us */
if (ret < 0)
return ret;
tm->tm_sec = bcd2bin(rxbuf[RS5C348_REG_SECS] & RS5C348_SECS_MASK);
tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK);
tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK);
if (!pdata->rtc_24h) {
if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) {
tm->tm_hour -= 20;
tm->tm_hour %= 12;
tm->tm_hour += 12;
} else
tm->tm_hour %= 12;
}
tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK);
tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK);
tm->tm_mon =
bcd2bin(rxbuf[RS5C348_REG_MONTH] & RS5C348_MONTH_MASK) - 1;
/* year is 1900 + tm->tm_year */
tm->tm_year = bcd2bin(rxbuf[RS5C348_REG_YEAR]) +
((rxbuf[RS5C348_REG_MONTH] & RS5C348_BIT_Y2K) ? 100 : 0);
if (rtc_valid_tm(tm) < 0) {
dev_err(&spi->dev, "retrieved date/time is not valid.\n");
rtc_time_to_tm(0, tm);
}
return 0;
}
static const struct rtc_class_ops rs5c348_rtc_ops = {
.read_time = rs5c348_rtc_read_time,
.set_time = rs5c348_rtc_set_time,
};
static struct spi_driver rs5c348_driver;
static int rs5c348_probe(struct spi_device *spi)
{
int ret;
struct rtc_device *rtc;
struct rs5c348_plat_data *pdata;
pdata = devm_kzalloc(&spi->dev, sizeof(struct rs5c348_plat_data),
GFP_KERNEL);
if (!pdata)
return -ENOMEM;
spi->dev.platform_data = pdata;
/* Check D7 of SECOND register */
ret = spi_w8r8(spi, RS5C348_CMD_R(RS5C348_REG_SECS));
if (ret < 0 || (ret & 0x80)) {
dev_err(&spi->dev, "not found.\n");
goto kfree_exit;
}
dev_info(&spi->dev, "chip found, driver version " DRV_VERSION "\n");
dev_info(&spi->dev, "spiclk %u KHz.\n",
(spi->max_speed_hz + 500) / 1000);
/* turn RTC on if it was not on */
ret = spi_w8r8(spi, RS5C348_CMD_R(RS5C348_REG_CTL2));
if (ret < 0)
goto kfree_exit;
if (ret & (RS5C348_BIT_XSTP | RS5C348_BIT_VDET)) {
u8 buf[2];
struct rtc_time tm;
if (ret & RS5C348_BIT_VDET)
dev_warn(&spi->dev, "voltage-low detected.\n");
if (ret & RS5C348_BIT_XSTP)
dev_warn(&spi->dev, "oscillator-stop detected.\n");
rtc_time_to_tm(0, &tm); /* 1970/1/1 */
ret = rs5c348_rtc_set_time(&spi->dev, &tm);
if (ret < 0)
goto kfree_exit;
buf[0] = RS5C348_CMD_W(RS5C348_REG_CTL2);
buf[1] = 0;
ret = spi_write_then_read(spi, buf, sizeof(buf), NULL, 0);
if (ret < 0)
goto kfree_exit;
}
ret = spi_w8r8(spi, RS5C348_CMD_R(RS5C348_REG_CTL1));
if (ret < 0)
goto kfree_exit;
if (ret & RS5C348_BIT_24H)
pdata->rtc_24h = 1;
rtc = devm_rtc_device_register(&spi->dev, rs5c348_driver.driver.name,
&rs5c348_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
ret = PTR_ERR(rtc);
goto kfree_exit;
}
pdata->rtc = rtc;
return 0;
kfree_exit:
return ret;
}
static struct spi_driver rs5c348_driver = {
.driver = {
.name = "rtc-rs5c348",
.owner = THIS_MODULE,
},
.probe = rs5c348_probe,
};
module_spi_driver(rs5c348_driver);
MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
MODULE_DESCRIPTION("Ricoh RS5C348 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("spi:rtc-rs5c348");
| gpl-2.0 |
omnirom/android_kernel_huawei_angler | arch/tile/kernel/ptrace.c | 1932 | 7363 | /*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* Copied from i386: Ross Biro 1/23/92
*/
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/kprobes.h>
#include <linux/compat.h>
#include <linux/uaccess.h>
#include <linux/regset.h>
#include <linux/elf.h>
#include <linux/tracehook.h>
#include <asm/traps.h>
#include <arch/chip.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
void user_enable_single_step(struct task_struct *child)
{
set_tsk_thread_flag(child, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *child)
{
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
/*
* Called by kernel/ptrace.c when detaching..
*/
void ptrace_disable(struct task_struct *child)
{
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
/*
* These two are currently unused, but will be set by arch_ptrace()
* and used in the syscall assembly when we do support them.
*/
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
}
/*
* Get registers from task and ready the result for userspace.
* Note that we localize the API issues to getregs() and putregs() at
* some cost in performance, e.g. we need a full pt_regs copy for
* PEEKUSR, and two copies for POKEUSR. But in general we expect
* GETREGS/PUTREGS to be the API of choice anyway.
*/
static char *getregs(struct task_struct *child, struct pt_regs *uregs)
{
*uregs = *task_pt_regs(child);
/* Set up flags ABI bits. */
uregs->flags = 0;
#ifdef CONFIG_COMPAT
if (task_thread_info(child)->status & TS_COMPAT)
uregs->flags |= PT_FLAGS_COMPAT;
#endif
return (char *)uregs;
}
/* Put registers back to task. */
static void putregs(struct task_struct *child, struct pt_regs *uregs)
{
struct pt_regs *regs = task_pt_regs(child);
/* Don't allow overwriting the kernel-internal flags word. */
uregs->flags = regs->flags;
/* Only allow setting the ICS bit in the ex1 word. */
uregs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(uregs->ex1));
*regs = *uregs;
}
enum tile_regset {
REGSET_GPR,
};
static int tile_gpr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
struct pt_regs regs;
getregs(target, ®s);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, ®s, 0,
sizeof(regs));
}
static int tile_gpr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
struct pt_regs regs;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0,
sizeof(regs));
if (ret)
return ret;
putregs(target, ®s);
return 0;
}
static const struct user_regset tile_user_regset[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(elf_greg_t),
.align = sizeof(elf_greg_t),
.get = tile_gpr_get,
.set = tile_gpr_set,
},
};
static const struct user_regset_view tile_user_regset_view = {
.name = CHIP_ARCH_NAME,
.e_machine = ELF_ARCH,
.ei_osabi = ELF_OSABI,
.regsets = tile_user_regset,
.n = ARRAY_SIZE(tile_user_regset),
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &tile_user_regset_view;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
unsigned long __user *datap = (long __user __force *)data;
unsigned long tmp;
long ret = -EIO;
char *childreg;
struct pt_regs copyregs;
switch (request) {
case PTRACE_PEEKUSR: /* Read register from pt_regs. */
if (addr >= PTREGS_SIZE)
break;
childreg = getregs(child, ©regs) + addr;
#ifdef CONFIG_COMPAT
if (is_compat_task()) {
if (addr & (sizeof(compat_long_t)-1))
break;
ret = put_user(*(compat_long_t *)childreg,
(compat_long_t __user *)datap);
} else
#endif
{
if (addr & (sizeof(long)-1))
break;
ret = put_user(*(long *)childreg, datap);
}
break;
case PTRACE_POKEUSR: /* Write register in pt_regs. */
if (addr >= PTREGS_SIZE)
break;
childreg = getregs(child, ©regs) + addr;
#ifdef CONFIG_COMPAT
if (is_compat_task()) {
if (addr & (sizeof(compat_long_t)-1))
break;
*(compat_long_t *)childreg = data;
} else
#endif
{
if (addr & (sizeof(long)-1))
break;
*(long *)childreg = data;
}
putregs(child, ©regs);
ret = 0;
break;
case PTRACE_GETREGS: /* Get all registers from the child. */
ret = copy_regset_to_user(child, &tile_user_regset_view,
REGSET_GPR, 0,
sizeof(struct pt_regs), datap);
break;
case PTRACE_SETREGS: /* Set all registers in the child. */
ret = copy_regset_from_user(child, &tile_user_regset_view,
REGSET_GPR, 0,
sizeof(struct pt_regs), datap);
break;
case PTRACE_GETFPREGS: /* Get the child FPU state. */
case PTRACE_SETFPREGS: /* Set the child FPU state. */
break;
case PTRACE_SETOPTIONS:
/* Support TILE-specific ptrace options. */
BUILD_BUG_ON(PTRACE_O_MASK_TILE & PTRACE_O_MASK);
tmp = data & PTRACE_O_MASK_TILE;
data &= ~PTRACE_O_MASK_TILE;
ret = ptrace_request(child, request, addr, data);
if (ret == 0) {
unsigned int flags = child->ptrace;
flags &= ~(PTRACE_O_MASK_TILE << PT_OPT_FLAG_SHIFT);
flags |= (tmp << PT_OPT_FLAG_SHIFT);
child->ptrace = flags;
}
break;
default:
#ifdef CONFIG_COMPAT
if (task_thread_info(current)->status & TS_COMPAT) {
ret = compat_ptrace_request(child, request,
addr, data);
break;
}
#endif
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
#ifdef CONFIG_COMPAT
/* Not used; we handle compat issues in arch_ptrace() directly. */
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t addr, compat_ulong_t data)
{
BUG();
}
#endif
int do_syscall_trace_enter(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE)) {
if (tracehook_report_syscall_entry(regs))
regs->regs[TREG_SYSCALL_NR] = -1;
}
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, regs->regs[TREG_SYSCALL_NR]);
return regs->regs[TREG_SYSCALL_NR];
}
void do_syscall_trace_exit(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_exit(regs, regs->regs[0]);
}
void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
{
struct siginfo info;
memset(&info, 0, sizeof(info));
info.si_signo = SIGTRAP;
info.si_code = TRAP_BRKPT;
info.si_addr = (void __user *) regs->pc;
/* Send us the fakey SIGTRAP */
force_sig_info(SIGTRAP, &info, tsk);
}
/* Handle synthetic interrupt delivered only by the simulator. */
void __kprobes do_breakpoint(struct pt_regs* regs, int fault_num)
{
send_sigtrap(current, regs, fault_num);
}
| gpl-2.0 |
TeamMac/android_kernel_huawei_p6-u06 | arch/blackfin/mach-bf537/boards/stamp.c | 2188 | 78675 | /*
* Copyright 2004-2009 Analog Devices Inc.
* 2005 National ICT Australia (NICTA)
* Aidan Williams <aidan@nicta.com.au>
*
* Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/plat-ram.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
#include <linux/i2c.h>
#include <linux/i2c/adp5588.h>
#include <linux/etherdevice.h>
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/usb/sl811.h>
#include <linux/spi/mmc_spi.h>
#include <linux/leds.h>
#include <linux/input.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/reboot.h>
#include <asm/portmux.h>
#include <asm/dpmc.h>
#include <asm/bfin_sport.h>
#ifdef CONFIG_REGULATOR_FIXED_VOLTAGE
#include <linux/regulator/fixed.h>
#endif
#include <linux/regulator/machine.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/userspace-consumer.h>
/*
* Name the Board for the /proc/cpuinfo
*/
const char bfin_board_name[] = "ADI BF537-STAMP";
/*
* Driver needs to know address, irq and flag pin.
*/
#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
#include <linux/usb/isp1760.h>
static struct resource bfin_isp1760_resources[] = {
[0] = {
.start = 0x203C0000,
.end = 0x203C0000 + 0x000fffff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
},
};
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
.dack_polarity_high = 0,
.dreq_polarity_high = 0,
};
static struct platform_device bfin_isp1760_device = {
.name = "isp1760",
.id = 0,
.dev = {
.platform_data = &isp1760_priv,
},
.num_resources = ARRAY_SIZE(bfin_isp1760_resources),
.resource = bfin_isp1760_resources,
};
#endif
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
#include <linux/gpio_keys.h>
static struct gpio_keys_button bfin_gpio_keys_table[] = {
{BTN_0, GPIO_PF2, 1, "gpio-keys: BTN0"},
{BTN_1, GPIO_PF3, 1, "gpio-keys: BTN1"},
{BTN_2, GPIO_PF4, 1, "gpio-keys: BTN2"},
{BTN_3, GPIO_PF5, 1, "gpio-keys: BTN3"},
};
static struct gpio_keys_platform_data bfin_gpio_keys_data = {
.buttons = bfin_gpio_keys_table,
.nbuttons = ARRAY_SIZE(bfin_gpio_keys_table),
};
static struct platform_device bfin_device_gpiokeys = {
.name = "gpio-keys",
.dev = {
.platform_data = &bfin_gpio_keys_data,
},
};
#endif
#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
static struct resource bfin_pcmcia_cf_resources[] = {
{
.start = 0x20310000, /* IO PORT */
.end = 0x20312000,
.flags = IORESOURCE_MEM,
}, {
.start = 0x20311000, /* Attribute Memory */
.end = 0x20311FFF,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF4,
.end = IRQ_PF4,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
}, {
.start = 6, /* Card Detect PF6 */
.end = 6,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_pcmcia_cf_device = {
.name = "bfin_cf_pcmcia",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_pcmcia_cf_resources),
.resource = bfin_pcmcia_cf_resources,
};
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
static struct platform_device rtc_device = {
.name = "rtc-bfin",
.id = -1,
};
#endif
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
#include <linux/smc91x.h>
static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
.leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX,
};
static struct resource smc91x_resources[] = {
{
.name = "smc91x-regs",
.start = 0x20300300,
.end = 0x20300300 + 16,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
.dev = {
.platform_data = &smc91x_info,
},
};
#endif
#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
static struct resource dm9000_resources[] = {
[0] = {
.start = 0x203FB800,
.end = 0x203FB800 + 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 0x203FB804,
.end = 0x203FB804 + 1,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = IRQ_PF9,
.end = IRQ_PF9,
.flags = (IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE),
},
};
static struct platform_device dm9000_device = {
.name = "dm9000",
.id = -1,
.num_resources = ARRAY_SIZE(dm9000_resources),
.resource = dm9000_resources,
};
#endif
#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE)
static struct resource sl811_hcd_resources[] = {
{
.start = 0x20340000,
.end = 0x20340000,
.flags = IORESOURCE_MEM,
}, {
.start = 0x20340004,
.end = 0x20340004,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF4,
.end = IRQ_PF4,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS)
void sl811_port_power(struct device *dev, int is_on)
{
gpio_request(CONFIG_USB_SL811_BFIN_GPIO_VBUS, "usb:SL811_VBUS");
gpio_direction_output(CONFIG_USB_SL811_BFIN_GPIO_VBUS, is_on);
}
#endif
static struct sl811_platform_data sl811_priv = {
.potpg = 10,
.power = 250, /* == 500mA */
#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS)
.port_power = &sl811_port_power,
#endif
};
static struct platform_device sl811_hcd_device = {
.name = "sl811-hcd",
.id = 0,
.dev = {
.platform_data = &sl811_priv,
},
.num_resources = ARRAY_SIZE(sl811_hcd_resources),
.resource = sl811_hcd_resources,
};
#endif
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
static struct resource isp1362_hcd_resources[] = {
{
.start = 0x20360000,
.end = 0x20360000,
.flags = IORESOURCE_MEM,
}, {
.start = 0x20360004,
.end = 0x20360004,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF3,
.end = IRQ_PF3,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
},
};
static struct isp1362_platform_data isp1362_priv = {
.sel15Kres = 1,
.clknotstop = 0,
.oc_enable = 0,
.int_act_high = 0,
.int_edge_triggered = 0,
.remote_wakeup_connected = 0,
.no_power_switching = 1,
.power_switching_mode = 0,
};
static struct platform_device isp1362_hcd_device = {
.name = "isp1362-hcd",
.id = 0,
.dev = {
.platform_data = &isp1362_priv,
},
.num_resources = ARRAY_SIZE(isp1362_hcd_resources),
.resource = isp1362_hcd_resources,
};
#endif
#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
static unsigned short bfin_can_peripherals[] = {
P_CAN0_RX, P_CAN0_TX, 0
};
static struct resource bfin_can_resources[] = {
{
.start = 0xFFC02A00,
.end = 0xFFC02FFF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_CAN_RX,
.end = IRQ_CAN_RX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_CAN_TX,
.end = IRQ_CAN_TX,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_CAN_ERROR,
.end = IRQ_CAN_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_can_device = {
.name = "bfin_can",
.num_resources = ARRAY_SIZE(bfin_can_resources),
.resource = bfin_can_resources,
.dev = {
.platform_data = &bfin_can_peripherals, /* Passed to driver */
},
};
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
#include <linux/bfin_mac.h>
static const unsigned short bfin_mac_peripherals[] = P_MII0;
static struct bfin_phydev_platform_data bfin_phydev_data[] = {
{
.addr = 1,
.irq = PHY_POLL, /* IRQ_MAC_PHYINT */
},
};
static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
.phydev_number = 1,
.phydev_data = bfin_phydev_data,
.phy_mode = PHY_INTERFACE_MODE_MII,
.mac_peripherals = bfin_mac_peripherals,
};
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
.dev = {
.platform_data = &bfin_mii_bus_data,
}
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
.dev = {
.platform_data = &bfin_mii_bus,
}
};
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
static struct resource net2272_bfin_resources[] = {
{
.start = 0x20300000,
.end = 0x20300000 + 0x100,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF7,
.end = IRQ_PF7,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device net2272_bfin_device = {
.name = "net2272",
.id = -1,
.num_resources = ARRAY_SIZE(net2272_bfin_resources),
.resource = net2272_bfin_resources,
};
#endif
#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
static struct mtd_partition bfin_plat_nand_partitions[] = {
{
.name = "linux kernel(nand)",
.size = 0x400000,
.offset = 0,
}, {
.name = "file system(nand)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
},
};
#define BFIN_NAND_PLAT_CLE 2
#define BFIN_NAND_PLAT_ALE 1
static void bfin_plat_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
struct nand_chip *this = mtd->priv;
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
writeb(cmd, this->IO_ADDR_W + (1 << BFIN_NAND_PLAT_CLE));
else
writeb(cmd, this->IO_ADDR_W + (1 << BFIN_NAND_PLAT_ALE));
}
#define BFIN_NAND_PLAT_READY GPIO_PF3
static int bfin_plat_nand_dev_ready(struct mtd_info *mtd)
{
return gpio_get_value(BFIN_NAND_PLAT_READY);
}
static struct platform_nand_data bfin_plat_nand_data = {
.chip = {
.nr_chips = 1,
.chip_delay = 30,
.part_probe_types = part_probes,
.partitions = bfin_plat_nand_partitions,
.nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions),
},
.ctrl = {
.cmd_ctrl = bfin_plat_nand_cmd_ctrl,
.dev_ready = bfin_plat_nand_dev_ready,
},
};
#define MAX(x, y) (x > y ? x : y)
static struct resource bfin_plat_nand_resources = {
.start = 0x20212000,
.end = 0x20212000 + (1 << MAX(BFIN_NAND_PLAT_CLE, BFIN_NAND_PLAT_ALE)),
.flags = IORESOURCE_MEM,
};
static struct platform_device bfin_async_nand_device = {
.name = "gen_nand",
.id = -1,
.num_resources = 1,
.resource = &bfin_plat_nand_resources,
.dev = {
.platform_data = &bfin_plat_nand_data,
},
};
static void bfin_plat_nand_init(void)
{
gpio_request(BFIN_NAND_PLAT_READY, "bfin_nand_plat");
}
#else
static void bfin_plat_nand_init(void) {}
#endif
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static struct mtd_partition stamp_partitions[] = {
{
.name = "bootloader(nor)",
.size = 0x40000,
.offset = 0,
}, {
.name = "linux kernel(nor)",
.size = 0x180000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "file system(nor)",
.size = 0x400000 - 0x40000 - 0x180000 - 0x10000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "MAC Address(nor)",
.size = MTDPART_SIZ_FULL,
.offset = 0x3F0000,
.mask_flags = MTD_WRITEABLE,
}
};
static struct physmap_flash_data stamp_flash_data = {
.width = 2,
.parts = stamp_partitions,
.nr_parts = ARRAY_SIZE(stamp_partitions),
#ifdef CONFIG_ROMKERNEL
.probe_type = "map_rom",
#endif
};
static struct resource stamp_flash_resource = {
.start = 0x20000000,
.end = 0x203fffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device stamp_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &stamp_flash_data,
},
.num_resources = 1,
.resource = &stamp_flash_resource,
};
#endif
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
.name = "bootloader(spi)",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
.name = "linux kernel(spi)",
.size = 0x180000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "file system(spi)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct flash_platform_data bfin_spi_flash_data = {
.name = "m25p80",
.parts = bfin_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
/* .type = "m25p64", */
};
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
.bits_per_word = 8,
};
#endif
#if defined(CONFIG_BFIN_SPI_ADC) \
|| defined(CONFIG_BFIN_SPI_ADC_MODULE)
/* SPI ADC chip */
static struct bfin5xx_spi_chip spi_adc_chip_info = {
.enable_dma = 1, /* use dma transfer with this chip*/
.bits_per_word = 16,
};
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
|| defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 16,
};
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD193X) \
|| defined(CONFIG_SND_BF5XX_SOC_AD193X_MODULE)
static struct bfin5xx_spi_chip ad1938_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 8,
};
#endif
#if defined(CONFIG_SND_BF5XX_SOC_ADAV80X) \
|| defined(CONFIG_SND_BF5XX_SOC_ADAV80X_MODULE)
static struct bfin5xx_spi_chip adav801_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 8,
};
#endif
#if defined(CONFIG_INPUT_AD714X_SPI) || defined(CONFIG_INPUT_AD714X_SPI_MODULE)
#include <linux/input/ad714x.h>
static struct bfin5xx_spi_chip ad7147_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 16,
};
static struct ad714x_slider_plat ad7147_spi_slider_plat[] = {
{
.start_stage = 0,
.end_stage = 7,
.max_coord = 128,
},
};
static struct ad714x_button_plat ad7147_spi_button_plat[] = {
{
.keycode = BTN_FORWARD,
.l_mask = 0,
.h_mask = 0x600,
},
{
.keycode = BTN_LEFT,
.l_mask = 0,
.h_mask = 0x500,
},
{
.keycode = BTN_MIDDLE,
.l_mask = 0,
.h_mask = 0x800,
},
{
.keycode = BTN_RIGHT,
.l_mask = 0x100,
.h_mask = 0x400,
},
{
.keycode = BTN_BACK,
.l_mask = 0x200,
.h_mask = 0x400,
},
};
static struct ad714x_platform_data ad7147_spi_platform_data = {
.slider_num = 1,
.button_num = 5,
.slider = ad7147_spi_slider_plat,
.button = ad7147_spi_button_plat,
.stage_cfg_reg = {
{0xFBFF, 0x1FFF, 0, 0x2626, 1600, 1600, 1600, 1600},
{0xEFFF, 0x1FFF, 0, 0x2626, 1650, 1650, 1650, 1650},
{0xFFFF, 0x1FFE, 0, 0x2626, 1650, 1650, 1650, 1650},
{0xFFFF, 0x1FFB, 0, 0x2626, 1650, 1650, 1650, 1650},
{0xFFFF, 0x1FEF, 0, 0x2626, 1650, 1650, 1650, 1650},
{0xFFFF, 0x1FBF, 0, 0x2626, 1650, 1650, 1650, 1650},
{0xFFFF, 0x1EFF, 0, 0x2626, 1650, 1650, 1650, 1650},
{0xFFFF, 0x1BFF, 0, 0x2626, 1600, 1600, 1600, 1600},
{0xFF7B, 0x3FFF, 0x506, 0x2626, 1100, 1100, 1150, 1150},
{0xFDFE, 0x3FFF, 0x606, 0x2626, 1100, 1100, 1150, 1150},
{0xFEBA, 0x1FFF, 0x1400, 0x2626, 1200, 1200, 1300, 1300},
{0xFFEF, 0x1FFF, 0x0, 0x2626, 1100, 1100, 1150, 1150},
},
.sys_cfg_reg = {0x2B2, 0x0, 0x3233, 0x819, 0x832, 0xCFF, 0xCFF, 0x0},
};
#endif
#if defined(CONFIG_INPUT_AD714X_I2C) || defined(CONFIG_INPUT_AD714X_I2C_MODULE)
#include <linux/input/ad714x.h>
static struct ad714x_button_plat ad7142_i2c_button_plat[] = {
{
.keycode = BTN_1,
.l_mask = 0,
.h_mask = 0x1,
},
{
.keycode = BTN_2,
.l_mask = 0,
.h_mask = 0x2,
},
{
.keycode = BTN_3,
.l_mask = 0,
.h_mask = 0x4,
},
{
.keycode = BTN_4,
.l_mask = 0x0,
.h_mask = 0x8,
},
};
static struct ad714x_platform_data ad7142_i2c_platform_data = {
.button_num = 4,
.button = ad7142_i2c_button_plat,
.stage_cfg_reg = {
/* fixme: figure out right setting for all comoponent according
* to hardware feature of EVAL-AD7142EB board */
{0xE7FF, 0x3FFF, 0x0005, 0x2626, 0x01F4, 0x01F4, 0x028A, 0x028A},
{0xFDBF, 0x3FFF, 0x0001, 0x2626, 0x01F4, 0x01F4, 0x028A, 0x028A},
{0xFFFF, 0x2DFF, 0x0001, 0x2626, 0x01F4, 0x01F4, 0x028A, 0x028A},
{0xFFFF, 0x37BF, 0x0001, 0x2626, 0x01F4, 0x01F4, 0x028A, 0x028A},
{0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320},
{0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320},
{0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320},
{0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320},
{0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320},
{0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320},
{0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320},
{0xFFFF, 0x3FFF, 0x0000, 0x0606, 0x01F4, 0x01F4, 0x0320, 0x0320},
},
.sys_cfg_reg = {0x0B2, 0x0, 0x690, 0x664, 0x290F, 0xF, 0xF, 0x0},
};
#endif
#if defined(CONFIG_AD2S90) || defined(CONFIG_AD2S90_MODULE)
static struct bfin5xx_spi_chip ad2s90_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 16,
};
#endif
#if defined(CONFIG_AD2S120X) || defined(CONFIG_AD2S120X_MODULE)
static unsigned short ad2s120x_platform_data[] = {
/* used as SAMPLE and RDVEL */
GPIO_PF5, GPIO_PF6, 0
};
static struct bfin5xx_spi_chip ad2s120x_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 16,
};
#endif
#if defined(CONFIG_AD2S1210) || defined(CONFIG_AD2S1210_MODULE)
static unsigned short ad2s1210_platform_data[] = {
/* use as SAMPLE, A0, A1 */
GPIO_PF7, GPIO_PF8, GPIO_PF9,
# if defined(CONFIG_AD2S1210_GPIO_INPUT) || defined(CONFIG_AD2S1210_GPIO_OUTPUT)
/* the RES0 and RES1 pins */
GPIO_PF4, GPIO_PF5,
# endif
0,
};
static struct bfin5xx_spi_chip ad2s1210_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 8,
};
#endif
#if defined(CONFIG_AD7314) || defined(CONFIG_AD7314_MODULE)
static struct bfin5xx_spi_chip ad7314_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 16,
};
#endif
#if defined(CONFIG_AD7816) || defined(CONFIG_AD7816_MODULE)
static unsigned short ad7816_platform_data[] = {
GPIO_PF4, /* rdwr_pin */
GPIO_PF5, /* convert_pin */
GPIO_PF7, /* busy_pin */
0,
};
static struct bfin5xx_spi_chip ad7816_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 8,
};
#endif
#if defined(CONFIG_ADT7310) || defined(CONFIG_ADT7310_MODULE)
static unsigned long adt7310_platform_data[3] = {
/* INT bound temperature alarm event. line 1 */
IRQ_PG4, IRQF_TRIGGER_LOW,
/* CT bound temperature alarm event irq_flags. line 0 */
IRQF_TRIGGER_LOW,
};
static struct bfin5xx_spi_chip adt7310_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 8,
};
#endif
#if defined(CONFIG_AD7298) || defined(CONFIG_AD7298_MODULE)
static unsigned short ad7298_platform_data[] = {
GPIO_PF7, /* busy_pin */
0,
};
static struct bfin5xx_spi_chip ad7298_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 16,
};
#endif
#if defined(CONFIG_ADT7316_SPI) || defined(CONFIG_ADT7316_SPI_MODULE)
static unsigned long adt7316_spi_data[2] = {
IRQF_TRIGGER_LOW, /* interrupt flags */
GPIO_PF7, /* ldac_pin, 0 means DAC/LDAC registers control DAC update */
};
static struct bfin5xx_spi_chip adt7316_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 8,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
#define MMC_SPI_CARD_DETECT_INT IRQ_PF5
static int bfin_mmc_spi_init(struct device *dev,
irqreturn_t (*detect_int)(int, void *), void *data)
{
return request_irq(MMC_SPI_CARD_DETECT_INT, detect_int,
IRQF_TRIGGER_FALLING, "mmc-spi-detect", data);
}
static void bfin_mmc_spi_exit(struct device *dev, void *data)
{
free_irq(MMC_SPI_CARD_DETECT_INT, data);
}
static struct mmc_spi_platform_data bfin_mmc_spi_pdata = {
.init = bfin_mmc_spi_init,
.exit = bfin_mmc_spi_exit,
.detect_delay = 100, /* msecs */
};
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 8,
.pio_interrupt = 0,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
#include <linux/spi/ad7877.h>
static struct bfin5xx_spi_chip spi_ad7877_chip_info = {
.enable_dma = 0,
.bits_per_word = 16,
};
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
.vref_delay_usecs = 50, /* internal, no capacitor */
.x_plate_ohms = 419,
.y_plate_ohms = 486,
.pressure_max = 1000,
.pressure_min = 0,
.stopacq_polarity = 1,
.first_conversion_delay = 3,
.acquisition_time = 1,
.averaging = 1,
.pen_down_acc_interval = 1,
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE)
#include <linux/spi/ad7879.h>
static const struct ad7879_platform_data bfin_ad7879_ts_info = {
.model = 7879, /* Model = AD7879 */
.x_plate_ohms = 620, /* 620 Ohm from the touch datasheet */
.pressure_max = 10000,
.pressure_min = 0,
.first_conversion_delay = 3, /* wait 512us before do a first conversion */
.acquisition_time = 1, /* 4us acquisition time per sample */
.median = 2, /* do 8 measurements */
.averaging = 1, /* take the average of 4 middle samples */
.pen_down_acc_interval = 255, /* 9.4 ms */
.gpio_export = 1, /* Export GPIO to gpiolib */
.gpio_base = -1, /* Dynamic allocation */
};
#endif
#if defined(CONFIG_INPUT_ADXL34X) || defined(CONFIG_INPUT_ADXL34X_MODULE)
#include <linux/input/adxl34x.h>
static const struct adxl34x_platform_data adxl34x_info = {
.x_axis_offset = 0,
.y_axis_offset = 0,
.z_axis_offset = 0,
.tap_threshold = 0x31,
.tap_duration = 0x10,
.tap_latency = 0x60,
.tap_window = 0xF0,
.tap_axis_control = ADXL_TAP_X_EN | ADXL_TAP_Y_EN | ADXL_TAP_Z_EN,
.act_axis_control = 0xFF,
.activity_threshold = 5,
.inactivity_threshold = 3,
.inactivity_time = 4,
.free_fall_threshold = 0x7,
.free_fall_time = 0x20,
.data_rate = 0x8,
.data_range = ADXL_FULL_RES,
.ev_type = EV_ABS,
.ev_code_x = ABS_X, /* EV_REL */
.ev_code_y = ABS_Y, /* EV_REL */
.ev_code_z = ABS_Z, /* EV_REL */
.ev_code_tap = {BTN_TOUCH, BTN_TOUCH, BTN_TOUCH}, /* EV_KEY x,y,z */
/* .ev_code_ff = KEY_F,*/ /* EV_KEY */
/* .ev_code_act_inactivity = KEY_A,*/ /* EV_KEY */
.power_mode = ADXL_AUTO_SLEEP | ADXL_LINK,
.fifo_mode = ADXL_FIFO_STREAM,
.orientation_enable = ADXL_EN_ORIENTATION_3D,
.deadzone_angle = ADXL_DEADZONE_ANGLE_10p8,
.divisor_length = ADXL_LP_FILTER_DIVISOR_16,
/* EV_KEY {+Z, +Y, +X, -X, -Y, -Z} */
.ev_codes_orient_3d = {BTN_Z, BTN_Y, BTN_X, BTN_A, BTN_B, BTN_C},
};
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
static struct bfin5xx_spi_chip spi_ad7879_chip_info = {
.enable_dma = 0,
.bits_per_word = 16,
};
#endif
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
static struct bfin5xx_spi_chip spidev_chip_info = {
.enable_dma = 0,
.bits_per_word = 8,
};
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
static struct bfin5xx_spi_chip lq035q1_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 8,
};
#endif
#if defined(CONFIG_ENC28J60) || defined(CONFIG_ENC28J60_MODULE)
static struct bfin5xx_spi_chip enc28j60_spi_chip_info = {
.enable_dma = 1,
.bits_per_word = 8,
};
#endif
#if defined(CONFIG_ADF702X) || defined(CONFIG_ADF702X_MODULE)
static struct bfin5xx_spi_chip adf7021_spi_chip_info = {
.bits_per_word = 16,
};
#include <linux/spi/adf702x.h>
#define TXREG 0x0160A470
static const u32 adf7021_regs[] = {
0x09608FA0,
0x00575011,
0x00A7F092,
0x2B141563,
0x81F29E94,
0x00003155,
0x050A4F66,
0x00000007,
0x00000008,
0x000231E9,
0x3296354A,
0x891A2B3B,
0x00000D9C,
0x0000000D,
0x0000000E,
0x0000000F,
};
static struct adf702x_platform_data adf7021_platform_data = {
.regs_base = (void *)SPORT1_TCR1,
.dma_ch_rx = CH_SPORT1_RX,
.dma_ch_tx = CH_SPORT1_TX,
.irq_sport_err = IRQ_SPORT1_ERROR,
.gpio_int_rfs = GPIO_PF8,
.pin_req = {P_SPORT1_DTPRI, P_SPORT1_RFS, P_SPORT1_DRPRI,
P_SPORT1_RSCLK, P_SPORT1_TSCLK, 0},
.adf702x_model = MODEL_ADF7021,
.adf702x_regs = adf7021_regs,
.tx_reg = TXREG,
};
static inline void adf702x_mac_init(void)
{
random_ether_addr(adf7021_platform_data.mac_addr);
}
#else
static inline void adf702x_mac_init(void) {}
#endif
#if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
#include <linux/spi/ads7846.h>
static struct bfin5xx_spi_chip ad7873_spi_chip_info = {
.bits_per_word = 8,
};
static int ads7873_get_pendown_state(void)
{
return gpio_get_value(GPIO_PF6);
}
static struct ads7846_platform_data __initdata ad7873_pdata = {
.model = 7873, /* AD7873 */
.x_max = 0xfff,
.y_max = 0xfff,
.x_plate_ohms = 620,
.debounce_max = 1,
.debounce_rep = 0,
.debounce_tol = (~0),
.get_pendown_state = ads7873_get_pendown_state,
};
#endif
#if defined(CONFIG_MTD_DATAFLASH) \
|| defined(CONFIG_MTD_DATAFLASH_MODULE)
static struct mtd_partition bfin_spi_dataflash_partitions[] = {
{
.name = "bootloader(spi)",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
.name = "linux kernel(spi)",
.size = 0x180000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "file system(spi)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct flash_platform_data bfin_spi_dataflash_data = {
.name = "SPI Dataflash",
.parts = bfin_spi_dataflash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_dataflash_partitions),
};
/* DataFlash chip */
static struct bfin5xx_spi_chip data_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
.bits_per_word = 8,
};
#endif
#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
static struct bfin5xx_spi_chip spi_adxl34x_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
.bits_per_word = 8,
};
#endif
#if defined(CONFIG_AD7476) || defined(CONFIG_AD7476_MODULE)
static struct bfin5xx_spi_chip spi_ad7476_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
.bits_per_word = 8,
};
#endif
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
{
/* the modalias must be the same as spi device driver name */
.modalias = "m25p80", /* Name of spi_driver for this device */
.max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
.platform_data = &bfin_spi_flash_data,
.controller_data = &spi_flash_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_MTD_DATAFLASH) \
|| defined(CONFIG_MTD_DATAFLASH_MODULE)
{ /* DataFlash chip */
.modalias = "mtd_dataflash",
.max_speed_hz = 33250000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
.platform_data = &bfin_spi_dataflash_data,
.controller_data = &data_flash_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_BFIN_SPI_ADC) \
|| defined(CONFIG_BFIN_SPI_ADC_MODULE)
{
.modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
.max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. */
.platform_data = NULL, /* No spi_driver specific config */
.controller_data = &spi_adc_chip_info,
},
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \
|| defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE)
{
.modalias = "ad183x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
.platform_data = "ad1836", /* only includes chip name for the moment */
.controller_data = &ad1836_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD193X) || defined(CONFIG_SND_BF5XX_SOC_AD193X_MODULE)
{
.modalias = "ad193x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5,
.controller_data = &ad1938_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_SND_BF5XX_SOC_ADAV80X) || defined(CONFIG_SND_BF5XX_SOC_ADAV80X_MODULE)
{
.modalias = "adav80x",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
.controller_data = &adav801_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_INPUT_AD714X_SPI) || defined(CONFIG_INPUT_AD714X_SPI_MODULE)
{
.modalias = "ad714x_captouch",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.irq = IRQ_PF4,
.bus_num = 0,
.chip_select = 5,
.mode = SPI_MODE_3,
.platform_data = &ad7147_spi_platform_data,
.controller_data = &ad7147_spi_chip_info,
},
#endif
#if defined(CONFIG_AD2S90) || defined(CONFIG_AD2S90_MODULE)
{
.modalias = "ad2s90",
.bus_num = 0,
.chip_select = 3, /* change it for your board */
.mode = SPI_MODE_3,
.platform_data = NULL,
.controller_data = &ad2s90_spi_chip_info,
},
#endif
#if defined(CONFIG_AD2S120X) || defined(CONFIG_AD2S120X_MODULE)
{
.modalias = "ad2s120x",
.bus_num = 0,
.chip_select = 4, /* CS, change it for your board */
.platform_data = ad2s120x_platform_data,
.controller_data = &ad2s120x_spi_chip_info,
},
#endif
#if defined(CONFIG_AD2S1210) || defined(CONFIG_AD2S1210_MODULE)
{
.modalias = "ad2s1210",
.max_speed_hz = 8192000,
.bus_num = 0,
.chip_select = 4, /* CS, change it for your board */
.platform_data = ad2s1210_platform_data,
.controller_data = &ad2s1210_spi_chip_info,
},
#endif
#if defined(CONFIG_AD7314) || defined(CONFIG_AD7314_MODULE)
{
.modalias = "ad7314",
.max_speed_hz = 1000000,
.bus_num = 0,
.chip_select = 4, /* CS, change it for your board */
.controller_data = &ad7314_spi_chip_info,
.mode = SPI_MODE_1,
},
#endif
#if defined(CONFIG_AD7816) || defined(CONFIG_AD7816_MODULE)
{
.modalias = "ad7818",
.max_speed_hz = 1000000,
.bus_num = 0,
.chip_select = 4, /* CS, change it for your board */
.platform_data = ad7816_platform_data,
.controller_data = &ad7816_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_ADT7310) || defined(CONFIG_ADT7310_MODULE)
{
.modalias = "adt7310",
.max_speed_hz = 1000000,
.irq = IRQ_PG5, /* CT alarm event. Line 0 */
.bus_num = 0,
.chip_select = 4, /* CS, change it for your board */
.platform_data = adt7310_platform_data,
.controller_data = &adt7310_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_AD7298) || defined(CONFIG_AD7298_MODULE)
{
.modalias = "ad7298",
.max_speed_hz = 1000000,
.bus_num = 0,
.chip_select = 4, /* CS, change it for your board */
.platform_data = ad7298_platform_data,
.controller_data = &ad7298_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_ADT7316_SPI) || defined(CONFIG_ADT7316_SPI_MODULE)
{
.modalias = "adt7316",
.max_speed_hz = 1000000,
.irq = IRQ_PG5, /* interrupt line */
.bus_num = 0,
.chip_select = 4, /* CS, change it for your board */
.platform_data = adt7316_spi_data,
.controller_data = &adt7316_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 4,
.platform_data = &bfin_mmc_spi_pdata,
.controller_data = &mmc_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
{
.modalias = "ad7877",
.platform_data = &bfin_ad7877_ts_info,
.irq = IRQ_PF6,
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
.controller_data = &spi_ad7877_chip_info,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE)
{
.modalias = "ad7879",
.platform_data = &bfin_ad7879_ts_info,
.irq = IRQ_PF7,
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
.controller_data = &spi_ad7879_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
{
.modalias = "spidev",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
.controller_data = &spidev_chip_info,
},
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
{
.modalias = "bfin-lq035q1-spi",
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
.controller_data = &lq035q1_spi_chip_info,
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
#if defined(CONFIG_ENC28J60) || defined(CONFIG_ENC28J60_MODULE)
{
.modalias = "enc28j60",
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.irq = IRQ_PF6,
.bus_num = 0,
.chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */
.controller_data = &enc28j60_spi_chip_info,
.mode = SPI_MODE_0,
},
#endif
#if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE)
{
.modalias = "adxl34x",
.platform_data = &adxl34x_info,
.irq = IRQ_PF6,
.max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2,
.controller_data = &spi_adxl34x_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_ADF702X) || defined(CONFIG_ADF702X_MODULE)
{
.modalias = "adf702x",
.max_speed_hz = 16000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */
.controller_data = &adf7021_spi_chip_info,
.platform_data = &adf7021_platform_data,
.mode = SPI_MODE_0,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
{
.modalias = "ads7846",
.max_speed_hz = 2000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.irq = IRQ_PF6,
.chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */
.controller_data = &ad7873_spi_chip_info,
.platform_data = &ad7873_pdata,
.mode = SPI_MODE_0,
},
#endif
#if defined(CONFIG_AD7476) \
|| defined(CONFIG_AD7476_MODULE)
{
.modalias = "ad7476", /* Name of spi_driver for this device */
.max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. */
.platform_data = NULL, /* No spi_driver specific config */
.controller_data = &spi_ad7476_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_ADE7753) \
|| defined(CONFIG_ADE7753_MODULE)
{
.modalias = "ade7753",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_1,
},
#endif
#if defined(CONFIG_ADE7754) \
|| defined(CONFIG_ADE7754_MODULE)
{
.modalias = "ade7754",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_1,
},
#endif
#if defined(CONFIG_ADE7758) \
|| defined(CONFIG_ADE7758_MODULE)
{
.modalias = "ade7758",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_1,
},
#endif
#if defined(CONFIG_ADE7759) \
|| defined(CONFIG_ADE7759_MODULE)
{
.modalias = "ade7759",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_1,
},
#endif
#if defined(CONFIG_ADE7854_SPI) \
|| defined(CONFIG_ADE7854_SPI_MODULE)
{
.modalias = "ade7854",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_ADIS16060) \
|| defined(CONFIG_ADIS16060_MODULE)
{
.modalias = "adis16060_r",
.max_speed_hz = 2900000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = MAX_CTRL_CS + 1, /* CS for read, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_0,
},
{
.modalias = "adis16060_w",
.max_speed_hz = 2900000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 2, /* CS for write, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_1,
},
#endif
#if defined(CONFIG_ADIS16130) \
|| defined(CONFIG_ADIS16130_MODULE)
{
.modalias = "adis16130",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1, /* CS for read, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_ADIS16201) \
|| defined(CONFIG_ADIS16201_MODULE)
{
.modalias = "adis16201",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16203) \
|| defined(CONFIG_ADIS16203_MODULE)
{
.modalias = "adis16203",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16204) \
|| defined(CONFIG_ADIS16204_MODULE)
{
.modalias = "adis16204",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16209) \
|| defined(CONFIG_ADIS16209_MODULE)
{
.modalias = "adis16209",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16220) \
|| defined(CONFIG_ADIS16220_MODULE)
{
.modalias = "adis16220",
.max_speed_hz = 2000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16240) \
|| defined(CONFIG_ADIS16240_MODULE)
{
.modalias = "adis16240",
.max_speed_hz = 1500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16260) \
|| defined(CONFIG_ADIS16260_MODULE)
{
.modalias = "adis16260",
.max_speed_hz = 1500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16261) \
|| defined(CONFIG_ADIS16261_MODULE)
{
.modalias = "adis16261",
.max_speed_hz = 2500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_ADIS16300) \
|| defined(CONFIG_ADIS16300_MODULE)
{
.modalias = "adis16300",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16350) \
|| defined(CONFIG_ADIS16350_MODULE)
{
.modalias = "adis16364",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 5, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
.irq = IRQ_PF4,
},
#endif
#if defined(CONFIG_ADIS16400) \
|| defined(CONFIG_ADIS16400_MODULE)
{
.modalias = "adis16400",
.max_speed_hz = 1000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1, /* CS, change it for your board */
.platform_data = NULL, /* No spi_driver specific config */
.mode = SPI_MODE_3,
},
#endif
};
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS,
.enable_dma = 1, /* master has the ability to do dma transfer */
.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
};
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
.start = SPI0_REGBASE,
.end = SPI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI,
.end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_spi0_device = {
.name = "bfin-spi",
.id = 0, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi0_resource),
.resource = bfin_spi0_resource,
.dev = {
.platform_data = &bfin_spi0_info, /* Passed to driver */
},
};
#endif /* spi master and devices */
#if defined(CONFIG_SPI_BFIN_SPORT) || defined(CONFIG_SPI_BFIN_SPORT_MODULE)
/* SPORT SPI controller data */
static struct bfin5xx_spi_master bfin_sport_spi0_info = {
.num_chipselect = 1, /* master only supports one device */
.enable_dma = 0, /* master don't support DMA */
.pin_req = {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_DRPRI,
P_SPORT0_RSCLK, P_SPORT0_TFS, P_SPORT0_RFS, 0},
};
static struct resource bfin_sport_spi0_resource[] = {
[0] = {
.start = SPORT0_TCR1,
.end = SPORT0_TCR1 + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_sport_spi0_device = {
.name = "bfin-sport-spi",
.id = 1, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_sport_spi0_resource),
.resource = bfin_sport_spi0_resource,
.dev = {
.platform_data = &bfin_sport_spi0_info, /* Passed to driver */
},
};
static struct bfin5xx_spi_master bfin_sport_spi1_info = {
.num_chipselect = 1, /* master only supports one device */
.enable_dma = 0, /* master don't support DMA */
.pin_req = {P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_DRPRI,
P_SPORT1_RSCLK, P_SPORT1_TFS, P_SPORT1_RFS, 0},
};
static struct resource bfin_sport_spi1_resource[] = {
[0] = {
.start = SPORT1_TCR1,
.end = SPORT1_TCR1 + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_SPORT1_ERROR,
.end = IRQ_SPORT1_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_sport_spi1_device = {
.name = "bfin-sport-spi",
.id = 2, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_sport_spi1_resource),
.resource = bfin_sport_spi1_resource,
.dev = {
.platform_data = &bfin_sport_spi1_info, /* Passed to driver */
},
};
#endif /* sport spi master and devices */
#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE)
static struct platform_device bfin_fb_device = {
.name = "bf537-lq035",
};
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
#include <asm/bfin-lq035q1.h>
static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = {
.mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB,
.ppi_mode = USE_RGB565_16_BIT_PPI,
.use_bl = 0, /* let something else control the LCD Blacklight */
.gpio_bl = GPIO_PF7,
};
static struct resource bfin_lq035q1_resources[] = {
{
.start = IRQ_PPI_ERROR,
.end = IRQ_PPI_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_lq035q1_device = {
.name = "bfin-lq035q1",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_lq035q1_resources),
.resource = bfin_lq035q1_resources,
.dev = {
.platform_data = &bfin_lq035q1_data,
},
};
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
static struct resource bfin_uart0_resources[] = {
{
.start = UART0_THR,
.end = UART0_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_ERROR,
.end = IRQ_UART0_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_TX,
.end = CH_UART0_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX,
.flags = IORESOURCE_DMA,
},
#ifdef CONFIG_BFIN_UART0_CTSRTS
{ /* CTS pin */
.start = GPIO_PG7,
.end = GPIO_PG7,
.flags = IORESOURCE_IO,
},
{ /* RTS pin */
.start = GPIO_PG6,
.end = GPIO_PG6,
.flags = IORESOURCE_IO,
},
#endif
};
static unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
static struct platform_device bfin_uart0_device = {
.name = "bfin-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_uart0_resources),
.resource = bfin_uart0_resources,
.dev = {
.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
static struct resource bfin_uart1_resources[] = {
{
.start = UART1_THR,
.end = UART1_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_ERROR,
.end = IRQ_UART1_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_TX,
.end = CH_UART1_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX,
.flags = IORESOURCE_DMA,
},
};
static unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
static struct platform_device bfin_uart1_device = {
.name = "bfin-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_uart1_resources),
.resource = bfin_uart1_resources,
.dev = {
.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
static struct resource bfin_sir0_resources[] = {
{
.start = 0xFFC00400,
.end = 0xFFC004FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir0_device = {
.name = "bfin_sir",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sir0_resources),
.resource = bfin_sir0_resources,
};
#endif
#ifdef CONFIG_BFIN_SIR1
static struct resource bfin_sir1_resources[] = {
{
.start = 0xFFC02000,
.end = 0xFFC020FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir1_device = {
.name = "bfin_sir",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sir1_resources),
.resource = bfin_sir1_resources,
};
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
static struct resource bfin_twi0_resource[] = {
[0] = {
.start = TWI0_REGBASE,
.end = TWI0_REGBASE,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TWI,
.end = IRQ_TWI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device i2c_bfin_twi_device = {
.name = "i2c-bfin-twi",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_twi0_resource),
.resource = bfin_twi0_resource,
};
#endif
#if defined(CONFIG_KEYBOARD_ADP5588) || defined(CONFIG_KEYBOARD_ADP5588_MODULE)
static const unsigned short adp5588_keymap[ADP5588_KEYMAPSIZE] = {
[0] = KEY_GRAVE,
[1] = KEY_1,
[2] = KEY_2,
[3] = KEY_3,
[4] = KEY_4,
[5] = KEY_5,
[6] = KEY_6,
[7] = KEY_7,
[8] = KEY_8,
[9] = KEY_9,
[10] = KEY_0,
[11] = KEY_MINUS,
[12] = KEY_EQUAL,
[13] = KEY_BACKSLASH,
[15] = KEY_KP0,
[16] = KEY_Q,
[17] = KEY_W,
[18] = KEY_E,
[19] = KEY_R,
[20] = KEY_T,
[21] = KEY_Y,
[22] = KEY_U,
[23] = KEY_I,
[24] = KEY_O,
[25] = KEY_P,
[26] = KEY_LEFTBRACE,
[27] = KEY_RIGHTBRACE,
[29] = KEY_KP1,
[30] = KEY_KP2,
[31] = KEY_KP3,
[32] = KEY_A,
[33] = KEY_S,
[34] = KEY_D,
[35] = KEY_F,
[36] = KEY_G,
[37] = KEY_H,
[38] = KEY_J,
[39] = KEY_K,
[40] = KEY_L,
[41] = KEY_SEMICOLON,
[42] = KEY_APOSTROPHE,
[43] = KEY_BACKSLASH,
[45] = KEY_KP4,
[46] = KEY_KP5,
[47] = KEY_KP6,
[48] = KEY_102ND,
[49] = KEY_Z,
[50] = KEY_X,
[51] = KEY_C,
[52] = KEY_V,
[53] = KEY_B,
[54] = KEY_N,
[55] = KEY_M,
[56] = KEY_COMMA,
[57] = KEY_DOT,
[58] = KEY_SLASH,
[60] = KEY_KPDOT,
[61] = KEY_KP7,
[62] = KEY_KP8,
[63] = KEY_KP9,
[64] = KEY_SPACE,
[65] = KEY_BACKSPACE,
[66] = KEY_TAB,
[67] = KEY_KPENTER,
[68] = KEY_ENTER,
[69] = KEY_ESC,
[70] = KEY_DELETE,
[74] = KEY_KPMINUS,
[76] = KEY_UP,
[77] = KEY_DOWN,
[78] = KEY_RIGHT,
[79] = KEY_LEFT,
};
static struct adp5588_kpad_platform_data adp5588_kpad_data = {
.rows = 8,
.cols = 10,
.keymap = adp5588_keymap,
.keymapsize = ARRAY_SIZE(adp5588_keymap),
.repeat = 0,
};
#endif
#if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE)
#include <linux/mfd/adp5520.h>
/*
* ADP5520/5501 Backlight Data
*/
static struct adp5520_backlight_platform_data adp5520_backlight_data = {
.fade_in = ADP5520_FADE_T_1200ms,
.fade_out = ADP5520_FADE_T_1200ms,
.fade_led_law = ADP5520_BL_LAW_LINEAR,
.en_ambl_sens = 1,
.abml_filt = ADP5520_BL_AMBL_FILT_640ms,
.l1_daylight_max = ADP5520_BL_CUR_mA(15),
.l1_daylight_dim = ADP5520_BL_CUR_mA(0),
.l2_office_max = ADP5520_BL_CUR_mA(7),
.l2_office_dim = ADP5520_BL_CUR_mA(0),
.l3_dark_max = ADP5520_BL_CUR_mA(3),
.l3_dark_dim = ADP5520_BL_CUR_mA(0),
.l2_trip = ADP5520_L2_COMP_CURR_uA(700),
.l2_hyst = ADP5520_L2_COMP_CURR_uA(50),
.l3_trip = ADP5520_L3_COMP_CURR_uA(80),
.l3_hyst = ADP5520_L3_COMP_CURR_uA(20),
};
/*
* ADP5520/5501 LEDs Data
*/
static struct led_info adp5520_leds[] = {
{
.name = "adp5520-led1",
.default_trigger = "none",
.flags = FLAG_ID_ADP5520_LED1_ADP5501_LED0 | ADP5520_LED_OFFT_600ms,
},
#ifdef ADP5520_EN_ALL_LEDS
{
.name = "adp5520-led2",
.default_trigger = "none",
.flags = FLAG_ID_ADP5520_LED2_ADP5501_LED1,
},
{
.name = "adp5520-led3",
.default_trigger = "none",
.flags = FLAG_ID_ADP5520_LED3_ADP5501_LED2,
},
#endif
};
static struct adp5520_leds_platform_data adp5520_leds_data = {
.num_leds = ARRAY_SIZE(adp5520_leds),
.leds = adp5520_leds,
.fade_in = ADP5520_FADE_T_600ms,
.fade_out = ADP5520_FADE_T_600ms,
.led_on_time = ADP5520_LED_ONT_600ms,
};
/*
* ADP5520 GPIO Data
*/
static struct adp5520_gpio_platform_data adp5520_gpio_data = {
.gpio_start = 50,
.gpio_en_mask = ADP5520_GPIO_C1 | ADP5520_GPIO_C2 | ADP5520_GPIO_R2,
.gpio_pullup_mask = ADP5520_GPIO_C1 | ADP5520_GPIO_C2 | ADP5520_GPIO_R2,
};
/*
* ADP5520 Keypad Data
*/
static const unsigned short adp5520_keymap[ADP5520_KEYMAPSIZE] = {
[ADP5520_KEY(0, 0)] = KEY_GRAVE,
[ADP5520_KEY(0, 1)] = KEY_1,
[ADP5520_KEY(0, 2)] = KEY_2,
[ADP5520_KEY(0, 3)] = KEY_3,
[ADP5520_KEY(1, 0)] = KEY_4,
[ADP5520_KEY(1, 1)] = KEY_5,
[ADP5520_KEY(1, 2)] = KEY_6,
[ADP5520_KEY(1, 3)] = KEY_7,
[ADP5520_KEY(2, 0)] = KEY_8,
[ADP5520_KEY(2, 1)] = KEY_9,
[ADP5520_KEY(2, 2)] = KEY_0,
[ADP5520_KEY(2, 3)] = KEY_MINUS,
[ADP5520_KEY(3, 0)] = KEY_EQUAL,
[ADP5520_KEY(3, 1)] = KEY_BACKSLASH,
[ADP5520_KEY(3, 2)] = KEY_BACKSPACE,
[ADP5520_KEY(3, 3)] = KEY_ENTER,
};
static struct adp5520_keys_platform_data adp5520_keys_data = {
.rows_en_mask = ADP5520_ROW_R3 | ADP5520_ROW_R2 | ADP5520_ROW_R1 | ADP5520_ROW_R0,
.cols_en_mask = ADP5520_COL_C3 | ADP5520_COL_C2 | ADP5520_COL_C1 | ADP5520_COL_C0,
.keymap = adp5520_keymap,
.keymapsize = ARRAY_SIZE(adp5520_keymap),
.repeat = 0,
};
/*
* ADP5520/5501 Multifunction Device Init Data
*/
static struct adp5520_platform_data adp5520_pdev_data = {
.backlight = &adp5520_backlight_data,
.leds = &adp5520_leds_data,
.gpio = &adp5520_gpio_data,
.keys = &adp5520_keys_data,
};
#endif
#if defined(CONFIG_GPIO_ADP5588) || defined(CONFIG_GPIO_ADP5588_MODULE)
static struct adp5588_gpio_platform_data adp5588_gpio_data = {
.gpio_start = 50,
.pullup_dis_mask = 0,
};
#endif
#if defined(CONFIG_BACKLIGHT_ADP8870) || defined(CONFIG_BACKLIGHT_ADP8870_MODULE)
#include <linux/i2c/adp8870.h>
static struct led_info adp8870_leds[] = {
{
.name = "adp8870-led7",
.default_trigger = "none",
.flags = ADP8870_LED_D7 | ADP8870_LED_OFFT_600ms,
},
};
static struct adp8870_backlight_platform_data adp8870_pdata = {
.bl_led_assign = ADP8870_BL_D1 | ADP8870_BL_D2 | ADP8870_BL_D3 |
ADP8870_BL_D4 | ADP8870_BL_D5 | ADP8870_BL_D6, /* 1 = Backlight 0 = Individual LED */
.pwm_assign = 0, /* 1 = Enables PWM mode */
.bl_fade_in = ADP8870_FADE_T_1200ms, /* Backlight Fade-In Timer */
.bl_fade_out = ADP8870_FADE_T_1200ms, /* Backlight Fade-Out Timer */
.bl_fade_law = ADP8870_FADE_LAW_CUBIC1, /* fade-on/fade-off transfer characteristic */
.en_ambl_sens = 1, /* 1 = enable ambient light sensor */
.abml_filt = ADP8870_BL_AMBL_FILT_320ms, /* Light sensor filter time */
.l1_daylight_max = ADP8870_BL_CUR_mA(20), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l1_daylight_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l2_bright_max = ADP8870_BL_CUR_mA(14), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l2_bright_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l3_office_max = ADP8870_BL_CUR_mA(6), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l3_office_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l4_indoor_max = ADP8870_BL_CUR_mA(3), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l4_indor_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l5_dark_max = ADP8870_BL_CUR_mA(2), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l5_dark_dim = ADP8870_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l2_trip = ADP8870_L2_COMP_CURR_uA(710), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
.l2_hyst = ADP8870_L2_COMP_CURR_uA(73), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
.l3_trip = ADP8870_L3_COMP_CURR_uA(389), /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
.l3_hyst = ADP8870_L3_COMP_CURR_uA(54), /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
.l4_trip = ADP8870_L4_COMP_CURR_uA(167), /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */
.l4_hyst = ADP8870_L4_COMP_CURR_uA(16), /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */
.l5_trip = ADP8870_L5_COMP_CURR_uA(43), /* use L5_COMP_CURR_uA(I) 0 <= I <= 138 uA */
.l5_hyst = ADP8870_L5_COMP_CURR_uA(11), /* use L6_COMP_CURR_uA(I) 0 <= I <= 138 uA */
.leds = adp8870_leds,
.num_leds = ARRAY_SIZE(adp8870_leds),
.led_fade_law = ADP8870_FADE_LAW_SQUARE, /* fade-on/fade-off transfer characteristic */
.led_fade_in = ADP8870_FADE_T_600ms,
.led_fade_out = ADP8870_FADE_T_600ms,
.led_on_time = ADP8870_LED_ONT_200ms,
};
#endif
#if defined(CONFIG_BACKLIGHT_ADP8860) || defined(CONFIG_BACKLIGHT_ADP8860_MODULE)
#include <linux/i2c/adp8860.h>
static struct led_info adp8860_leds[] = {
{
.name = "adp8860-led7",
.default_trigger = "none",
.flags = ADP8860_LED_D7 | ADP8860_LED_OFFT_600ms,
},
};
static struct adp8860_backlight_platform_data adp8860_pdata = {
.bl_led_assign = ADP8860_BL_D1 | ADP8860_BL_D2 | ADP8860_BL_D3 |
ADP8860_BL_D4 | ADP8860_BL_D5 | ADP8860_BL_D6, /* 1 = Backlight 0 = Individual LED */
.bl_fade_in = ADP8860_FADE_T_1200ms, /* Backlight Fade-In Timer */
.bl_fade_out = ADP8860_FADE_T_1200ms, /* Backlight Fade-Out Timer */
.bl_fade_law = ADP8860_FADE_LAW_CUBIC1, /* fade-on/fade-off transfer characteristic */
.en_ambl_sens = 1, /* 1 = enable ambient light sensor */
.abml_filt = ADP8860_BL_AMBL_FILT_320ms, /* Light sensor filter time */
.l1_daylight_max = ADP8860_BL_CUR_mA(20), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l1_daylight_dim = ADP8860_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l2_office_max = ADP8860_BL_CUR_mA(6), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l2_office_dim = ADP8860_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l3_dark_max = ADP8860_BL_CUR_mA(2), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l3_dark_dim = ADP8860_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
.l2_trip = ADP8860_L2_COMP_CURR_uA(710), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
.l2_hyst = ADP8860_L2_COMP_CURR_uA(73), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
.l3_trip = ADP8860_L3_COMP_CURR_uA(43), /* use L3_COMP_CURR_uA(I) 0 <= I <= 138 uA */
.l3_hyst = ADP8860_L3_COMP_CURR_uA(11), /* use L3_COMP_CURR_uA(I) 0 <= I <= 138 uA */
.leds = adp8860_leds,
.num_leds = ARRAY_SIZE(adp8860_leds),
.led_fade_law = ADP8860_FADE_LAW_SQUARE, /* fade-on/fade-off transfer characteristic */
.led_fade_in = ADP8860_FADE_T_600ms,
.led_fade_out = ADP8860_FADE_T_600ms,
.led_on_time = ADP8860_LED_ONT_200ms,
};
#endif
#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE)
static struct regulator_consumer_supply ad5398_consumer = {
.supply = "current",
};
static struct regulator_init_data ad5398_regulator_data = {
.constraints = {
.name = "current range",
.max_uA = 120000,
.valid_ops_mask = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = 1,
.consumer_supplies = &ad5398_consumer,
};
#if defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER) || \
defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER_MODULE)
static struct platform_device ad5398_virt_consumer_device = {
.name = "reg-virt-consumer",
.id = 0,
.dev = {
.platform_data = "current", /* Passed to driver */
},
};
#endif
#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \
defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE)
static struct regulator_bulk_data ad5398_bulk_data = {
.supply = "current",
};
static struct regulator_userspace_consumer_data ad5398_userspace_comsumer_data = {
.name = "ad5398",
.num_supplies = 1,
.supplies = &ad5398_bulk_data,
};
static struct platform_device ad5398_userspace_consumer_device = {
.name = "reg-userspace-consumer",
.id = 0,
.dev = {
.platform_data = &ad5398_userspace_comsumer_data,
},
};
#endif
#endif
#if defined(CONFIG_ADT7410) || defined(CONFIG_ADT7410_MODULE)
/* INT bound temperature alarm event. line 1 */
static unsigned long adt7410_platform_data[2] = {
IRQ_PG4, IRQF_TRIGGER_LOW,
};
#endif
#if defined(CONFIG_ADT7316_I2C) || defined(CONFIG_ADT7316_I2C_MODULE)
/* INT bound temperature alarm event. line 1 */
static unsigned long adt7316_i2c_data[2] = {
IRQF_TRIGGER_LOW, /* interrupt flags */
GPIO_PF4, /* ldac_pin, 0 means DAC/LDAC registers control DAC update */
};
#endif
static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
#if defined(CONFIG_SND_BF5XX_SOC_AD193X) || defined(CONFIG_SND_BF5XX_SOC_AD193X_MODULE)
{
I2C_BOARD_INFO("ad1937", 0x04),
},
#endif
#if defined(CONFIG_SND_BF5XX_SOC_ADAV80X) || defined(CONFIG_SND_BF5XX_SOC_ADAV80X_MODULE)
{
I2C_BOARD_INFO("adav803", 0x10),
},
#endif
#if defined(CONFIG_INPUT_AD714X_I2C) || defined(CONFIG_INPUT_AD714X_I2C_MODULE)
{
I2C_BOARD_INFO("ad7142_captouch", 0x2C),
.irq = IRQ_PG5,
.platform_data = (void *)&ad7142_i2c_platform_data,
},
#endif
#if defined(CONFIG_AD7150) || defined(CONFIG_AD7150_MODULE)
{
I2C_BOARD_INFO("ad7150", 0x48),
.irq = IRQ_PG5, /* fixme: use real interrupt number */
},
#endif
#if defined(CONFIG_AD7152) || defined(CONFIG_AD7152_MODULE)
{
I2C_BOARD_INFO("ad7152", 0x48),
},
#endif
#if defined(CONFIG_AD774X) || defined(CONFIG_AD774X_MODULE)
{
I2C_BOARD_INFO("ad774x", 0x48),
},
#endif
#if defined(CONFIG_AD7414) || defined(CONFIG_AD7414_MODULE)
{
I2C_BOARD_INFO("ad7414", 0x9),
.irq = IRQ_PG5,
.irq_flags = IRQF_TRIGGER_LOW,
},
#endif
#if defined(CONFIG_AD7416) || defined(CONFIG_AD7416_MODULE)
{
I2C_BOARD_INFO("ad7417", 0xb),
.irq = IRQ_PG5,
.irq_flags = IRQF_TRIGGER_LOW,
.platform_data = (void *)GPIO_PF4,
},
#endif
#if defined(CONFIG_ADE7854_I2C) || defined(CONFIG_ADE7854_I2C_MODULE)
{
I2C_BOARD_INFO("ade7854", 0x38),
},
#endif
#if defined(CONFIG_ADT75) || defined(CONFIG_ADT75_MODULE)
{
I2C_BOARD_INFO("adt75", 0x9),
.irq = IRQ_PG5,
.irq_flags = IRQF_TRIGGER_LOW,
},
#endif
#if defined(CONFIG_ADT7408) || defined(CONFIG_ADT7408_MODULE)
{
I2C_BOARD_INFO("adt7408", 0x18),
.irq = IRQ_PG5,
.irq_flags = IRQF_TRIGGER_LOW,
},
#endif
#if defined(CONFIG_ADT7410) || defined(CONFIG_ADT7410_MODULE)
{
I2C_BOARD_INFO("adt7410", 0x48),
/* CT critical temperature event. line 0 */
.irq = IRQ_PG5,
.irq_flags = IRQF_TRIGGER_LOW,
.platform_data = (void *)&adt7410_platform_data,
},
#endif
#if defined(CONFIG_AD7291) || defined(CONFIG_AD7291_MODULE)
{
I2C_BOARD_INFO("ad7291", 0x20),
.irq = IRQ_PG5,
.irq_flags = IRQF_TRIGGER_LOW,
},
#endif
#if defined(CONFIG_ADT7316_I2C) || defined(CONFIG_ADT7316_I2C_MODULE)
{
I2C_BOARD_INFO("adt7316", 0x48),
.irq = IRQ_PG6,
.platform_data = (void *)&adt7316_i2c_data,
},
#endif
#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE)
{
I2C_BOARD_INFO("pcf8574_lcd", 0x22),
},
#endif
#if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE)
{
I2C_BOARD_INFO("pcf8574_keypad", 0x27),
.irq = IRQ_PG6,
},
#endif
#if defined(CONFIG_TOUCHSCREEN_AD7879_I2C) || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE)
{
I2C_BOARD_INFO("ad7879", 0x2F),
.irq = IRQ_PG5,
.platform_data = (void *)&bfin_ad7879_ts_info,
},
#endif
#if defined(CONFIG_KEYBOARD_ADP5588) || defined(CONFIG_KEYBOARD_ADP5588_MODULE)
{
I2C_BOARD_INFO("adp5588-keys", 0x34),
.irq = IRQ_PG0,
.platform_data = (void *)&adp5588_kpad_data,
},
#endif
#if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE)
{
I2C_BOARD_INFO("pmic-adp5520", 0x32),
.irq = IRQ_PG0,
.platform_data = (void *)&adp5520_pdev_data,
},
#endif
#if defined(CONFIG_INPUT_ADXL34X_I2C) || defined(CONFIG_INPUT_ADXL34X_I2C_MODULE)
{
I2C_BOARD_INFO("adxl34x", 0x53),
.irq = IRQ_PG3,
.platform_data = (void *)&adxl34x_info,
},
#endif
#if defined(CONFIG_GPIO_ADP5588) || defined(CONFIG_GPIO_ADP5588_MODULE)
{
I2C_BOARD_INFO("adp5588-gpio", 0x34),
.platform_data = (void *)&adp5588_gpio_data,
},
#endif
#if defined(CONFIG_FB_BFIN_7393) || defined(CONFIG_FB_BFIN_7393_MODULE)
{
I2C_BOARD_INFO("bfin-adv7393", 0x2B),
},
#endif
#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE)
{
I2C_BOARD_INFO("bf537-lq035-ad5280", 0x2F),
},
#endif
#if defined(CONFIG_BACKLIGHT_ADP8870) || defined(CONFIG_BACKLIGHT_ADP8870_MODULE)
{
I2C_BOARD_INFO("adp8870", 0x2B),
.platform_data = (void *)&adp8870_pdata,
},
#endif
#if defined(CONFIG_SND_SOC_ADAU1371) || defined(CONFIG_SND_SOC_ADAU1371_MODULE)
{
I2C_BOARD_INFO("adau1371", 0x1A),
},
#endif
#if defined(CONFIG_SND_SOC_ADAU1761) || defined(CONFIG_SND_SOC_ADAU1761_MODULE)
{
I2C_BOARD_INFO("adau1761", 0x38),
},
#endif
#if defined(CONFIG_SND_SOC_ADAU1361) || defined(CONFIG_SND_SOC_ADAU1361_MODULE)
{
I2C_BOARD_INFO("adau1361", 0x38),
},
#endif
#if defined(CONFIG_AD525X_DPOT) || defined(CONFIG_AD525X_DPOT_MODULE)
{
I2C_BOARD_INFO("ad5258", 0x18),
},
#endif
#if defined(CONFIG_SND_SOC_SSM2602) || defined(CONFIG_SND_SOC_SSM2602_MODULE)
{
I2C_BOARD_INFO("ssm2602", 0x1b),
},
#endif
#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE)
{
I2C_BOARD_INFO("ad5398", 0xC),
.platform_data = (void *)&ad5398_regulator_data,
},
#endif
#if defined(CONFIG_BACKLIGHT_ADP8860) || defined(CONFIG_BACKLIGHT_ADP8860_MODULE)
{
I2C_BOARD_INFO("adp8860", 0x2A),
.platform_data = (void *)&adp8860_pdata,
},
#endif
#if defined(CONFIG_SND_SOC_ADAU1373) || defined(CONFIG_SND_SOC_ADAU1373_MODULE)
{
I2C_BOARD_INFO("adau1373", 0x1A),
},
#endif
#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE)
{
I2C_BOARD_INFO("ad5252", 0x2e),
},
#endif
};
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
static struct resource bfin_sport0_uart_resources[] = {
{
.start = SPORT0_TCR1,
.end = SPORT0_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT0_RX,
.end = IRQ_SPORT0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
};
static struct platform_device bfin_sport0_uart_device = {
.name = "bfin-sport-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
.resource = bfin_sport0_uart_resources,
.dev = {
.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
static struct resource bfin_sport1_uart_resources[] = {
{
.start = SPORT1_TCR1,
.end = SPORT1_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT1_RX,
.end = IRQ_SPORT1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT1_ERROR,
.end = IRQ_SPORT1_ERROR,
.flags = IORESOURCE_IRQ,
},
};
static unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
};
static struct platform_device bfin_sport1_uart_device = {
.name = "bfin-sport-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
.resource = bfin_sport1_uart_resources,
.dev = {
.platform_data = &bfin_sport1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
#define CF_IDE_NAND_CARD_USE_HDD_INTERFACE
/* #define CF_IDE_NAND_CARD_USE_CF_IN_COMMON_MEMORY_MODE */
#ifdef CF_IDE_NAND_CARD_USE_HDD_INTERFACE
#define PATA_INT IRQ_PF5
static struct pata_platform_info bfin_pata_platform_data = {
.ioport_shift = 1,
.irq_flags = IRQF_TRIGGER_HIGH | IRQF_DISABLED,
};
static struct resource bfin_pata_resources[] = {
{
.start = 0x20314020,
.end = 0x2031403F,
.flags = IORESOURCE_MEM,
},
{
.start = 0x2031401C,
.end = 0x2031401F,
.flags = IORESOURCE_MEM,
},
{
.start = PATA_INT,
.end = PATA_INT,
.flags = IORESOURCE_IRQ,
},
};
#elif defined(CF_IDE_NAND_CARD_USE_CF_IN_COMMON_MEMORY_MODE)
static struct pata_platform_info bfin_pata_platform_data = {
.ioport_shift = 0,
};
/* CompactFlash Storage Card Memory Mapped Addressing
* /REG = A11 = 1
*/
static struct resource bfin_pata_resources[] = {
{
.start = 0x20211800,
.end = 0x20211807,
.flags = IORESOURCE_MEM,
},
{
.start = 0x2021180E, /* Device Ctl */
.end = 0x2021180E,
.flags = IORESOURCE_MEM,
},
};
#endif
static struct platform_device bfin_pata_device = {
.name = "pata_platform",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_pata_resources),
.resource = bfin_pata_resources,
.dev = {
.platform_data = &bfin_pata_platform_data,
}
};
#endif
static const unsigned int cclk_vlev_datasheet[] =
{
VRPAIR(VLEV_085, 250000000),
VRPAIR(VLEV_090, 376000000),
VRPAIR(VLEV_095, 426000000),
VRPAIR(VLEV_100, 426000000),
VRPAIR(VLEV_105, 476000000),
VRPAIR(VLEV_110, 476000000),
VRPAIR(VLEV_115, 476000000),
VRPAIR(VLEV_120, 500000000),
VRPAIR(VLEV_125, 533000000),
VRPAIR(VLEV_130, 600000000),
};
static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
.tuple_tab = cclk_vlev_datasheet,
.tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
.vr_settling_time = 25 /* us */,
};
static struct platform_device bfin_dpmc = {
.name = "bfin dpmc",
.dev = {
.platform_data = &bfin_dmpc_vreg_data,
},
};
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \
defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
#define SPORT_REQ(x) \
[x] = {P_SPORT##x##_TFS, P_SPORT##x##_DTPRI, P_SPORT##x##_TSCLK, \
P_SPORT##x##_RFS, P_SPORT##x##_DRPRI, P_SPORT##x##_RSCLK, 0}
static const u16 bfin_snd_pin[][7] = {
SPORT_REQ(0),
SPORT_REQ(1),
};
static struct bfin_snd_platform_data bfin_snd_data[] = {
{
.pin_req = &bfin_snd_pin[0][0],
},
{
.pin_req = &bfin_snd_pin[1][0],
},
};
#define BFIN_SND_RES(x) \
[x] = { \
{ \
.start = SPORT##x##_TCR1, \
.end = SPORT##x##_TCR1, \
.flags = IORESOURCE_MEM \
}, \
{ \
.start = CH_SPORT##x##_RX, \
.end = CH_SPORT##x##_RX, \
.flags = IORESOURCE_DMA, \
}, \
{ \
.start = CH_SPORT##x##_TX, \
.end = CH_SPORT##x##_TX, \
.flags = IORESOURCE_DMA, \
}, \
{ \
.start = IRQ_SPORT##x##_ERROR, \
.end = IRQ_SPORT##x##_ERROR, \
.flags = IORESOURCE_IRQ, \
} \
}
static struct resource bfin_snd_resources[][4] = {
BFIN_SND_RES(0),
BFIN_SND_RES(1),
};
static struct platform_device bfin_pcm = {
.name = "bfin-pcm-audio",
.id = -1,
};
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
static struct platform_device bfin_ad73311_codec_device = {
.name = "ad73311",
.id = -1,
};
#endif
#if defined(CONFIG_SND_BF5XX_SOC_I2S) || defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE)
static struct platform_device bfin_i2s = {
.name = "bfin-i2s",
.id = CONFIG_SND_BF5XX_SPORT_NUM,
.num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
.resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
.dev = {
.platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
},
};
#endif
#if defined(CONFIG_SND_BF5XX_SOC_TDM) || defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE)
static struct platform_device bfin_tdm = {
.name = "bfin-tdm",
.id = CONFIG_SND_BF5XX_SPORT_NUM,
.num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
.resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
.dev = {
.platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
},
};
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE)
static struct platform_device bfin_ac97 = {
.name = "bfin-ac97",
.id = CONFIG_SND_BF5XX_SPORT_NUM,
.num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]),
.resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM],
.dev = {
.platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM],
},
};
#endif
#if defined(CONFIG_REGULATOR_FIXED_VOLTAGE) || defined(CONFIG_REGULATOR_FIXED_VOLTAGE_MODULE)
#define REGULATOR_ADP122 "adp122"
#define REGULATOR_ADP122_UV 2500000
static struct regulator_consumer_supply adp122_consumers = {
.supply = REGULATOR_ADP122,
};
static struct regulator_init_data adp_switch_regulator_data = {
.constraints = {
.name = REGULATOR_ADP122,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.min_uV = REGULATOR_ADP122_UV,
.max_uV = REGULATOR_ADP122_UV,
.min_uA = 0,
.max_uA = 300000,
},
.num_consumer_supplies = 1, /* only 1 */
.consumer_supplies = &adp122_consumers,
};
static struct fixed_voltage_config adp_switch_pdata = {
.supply_name = REGULATOR_ADP122,
.microvolts = REGULATOR_ADP122_UV,
.gpio = GPIO_PF2,
.enable_high = 1,
.enabled_at_boot = 0,
.init_data = &adp_switch_regulator_data,
};
static struct platform_device adp_switch_device = {
.name = "reg-fixed-voltage",
.id = 0,
.dev = {
.platform_data = &adp_switch_pdata,
},
};
#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \
defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE)
static struct regulator_bulk_data adp122_bulk_data = {
.supply = REGULATOR_ADP122,
};
static struct regulator_userspace_consumer_data adp122_userspace_comsumer_data = {
.name = REGULATOR_ADP122,
.num_supplies = 1,
.supplies = &adp122_bulk_data,
};
static struct platform_device adp122_userspace_consumer_device = {
.name = "reg-userspace-consumer",
.id = 0,
.dev = {
.platform_data = &adp122_userspace_comsumer_data,
},
};
#endif
#endif
#if defined(CONFIG_IIO_GPIO_TRIGGER) || \
defined(CONFIG_IIO_GPIO_TRIGGER_MODULE)
static struct resource iio_gpio_trigger_resources[] = {
[0] = {
.start = IRQ_PF5,
.end = IRQ_PF5,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
},
};
static struct platform_device iio_gpio_trigger = {
.name = "iio_gpio_trigger",
.num_resources = ARRAY_SIZE(iio_gpio_trigger_resources),
.resource = iio_gpio_trigger_resources,
};
#endif
static struct platform_device *stamp_devices[] __initdata = {
&bfin_dpmc,
#if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE)
&bfin_pcmcia_cf_device,
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
&rtc_device,
#endif
#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE)
&sl811_hcd_device,
#endif
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
&isp1362_hcd_device,
#endif
#if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE)
&bfin_isp1760_device,
#endif
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
&smc91x_device,
#endif
#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
&dm9000_device,
#endif
#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
&bfin_can_device,
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
&bfin_mii_bus,
&bfin_mac_device,
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
&net2272_bfin_device,
#endif
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
&bfin_spi0_device,
#endif
#if defined(CONFIG_SPI_BFIN_SPORT) || defined(CONFIG_SPI_BFIN_SPORT_MODULE)
&bfin_sport_spi0_device,
&bfin_sport_spi1_device,
#endif
#if defined(CONFIG_FB_BF537_LQ035) || defined(CONFIG_FB_BF537_LQ035_MODULE)
&bfin_fb_device,
#endif
#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE)
&bfin_lq035q1_device,
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
&bfin_sir0_device,
#endif
#ifdef CONFIG_BFIN_SIR1
&bfin_sir1_device,
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
&i2c_bfin_twi_device,
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
&bfin_pata_device,
#endif
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
&bfin_device_gpiokeys,
#endif
#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
&bfin_async_nand_device,
#endif
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
&stamp_flash_device,
#endif
#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \
defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) || \
defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE)
&bfin_pcm,
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AD73311) || defined(CONFIG_SND_BF5XX_SOC_AD73311_MODULE)
&bfin_ad73311_codec_device,
#endif
#if defined(CONFIG_SND_BF5XX_SOC_I2S) || defined(CONFIG_SND_BF5XX_SOC_I2S_MODULE)
&bfin_i2s,
#endif
#if defined(CONFIG_SND_BF5XX_SOC_TDM) || defined(CONFIG_SND_BF5XX_SOC_TDM_MODULE)
&bfin_tdm,
#endif
#if defined(CONFIG_SND_BF5XX_SOC_AC97) || defined(CONFIG_SND_BF5XX_SOC_AC97_MODULE)
&bfin_ac97,
#endif
#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE)
#if defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER) || \
defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER_MODULE)
&ad5398_virt_consumer_device,
#endif
#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \
defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE)
&ad5398_userspace_consumer_device,
#endif
#endif
#if defined(CONFIG_REGULATOR_FIXED_VOLTAGE) || defined(CONFIG_REGULATOR_FIXED_VOLTAGE_MODULE)
&adp_switch_device,
#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \
defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE)
&adp122_userspace_consumer_device,
#endif
#endif
#if defined(CONFIG_IIO_GPIO_TRIGGER) || \
defined(CONFIG_IIO_GPIO_TRIGGER_MODULE)
&iio_gpio_trigger,
#endif
};
static int __init stamp_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
bfin_plat_nand_init();
adf702x_mac_init();
platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices));
i2c_register_board_info(0, bfin_i2c_board_info,
ARRAY_SIZE(bfin_i2c_board_info));
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
return 0;
}
arch_initcall(stamp_init);
static struct platform_device *stamp_early_devices[] __initdata = {
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
};
void __init native_machine_early_platform_add_devices(void)
{
printk(KERN_INFO "register early platform devices\n");
early_platform_add_devices(stamp_early_devices,
ARRAY_SIZE(stamp_early_devices));
}
void native_machine_restart(char *cmd)
{
/* workaround reboot hang when booting from SPI */
if ((bfin_read_SYSCR() & 0x7) == 0x3)
bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS);
}
/*
* Currently the MAC address is saved in Flash by U-Boot
*/
#define FLASH_MAC 0x203f0000
void bfin_get_ether_addr(char *addr)
{
*(u32 *)(&(addr[0])) = bfin_read32(FLASH_MAC);
*(u16 *)(&(addr[4])) = bfin_read16(FLASH_MAC + 4);
}
EXPORT_SYMBOL(bfin_get_ether_addr);
| gpl-2.0 |
AndroidDeveloperAlliance/ZenKernel_Grouper | drivers/staging/iio/trigger/iio-trig-bfin-timer.c | 2700 | 5657 | /*
* Copyright 2011 Analog Devices Inc.
*
* Licensed under the GPL-2.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <asm/gptimers.h>
#include "../iio.h"
#include "../trigger.h"
struct bfin_timer {
unsigned short id, bit;
unsigned long irqbit;
int irq;
};
/*
* this covers all hardware timer configurations on
* all Blackfin derivatives out there today
*/
static struct bfin_timer iio_bfin_timer_code[MAX_BLACKFIN_GPTIMERS] = {
{TIMER0_id, TIMER0bit, TIMER_STATUS_TIMIL0, IRQ_TIMER0},
{TIMER1_id, TIMER1bit, TIMER_STATUS_TIMIL1, IRQ_TIMER1},
{TIMER2_id, TIMER2bit, TIMER_STATUS_TIMIL2, IRQ_TIMER2},
#if (MAX_BLACKFIN_GPTIMERS > 3)
{TIMER3_id, TIMER3bit, TIMER_STATUS_TIMIL3, IRQ_TIMER3},
{TIMER4_id, TIMER4bit, TIMER_STATUS_TIMIL4, IRQ_TIMER4},
{TIMER5_id, TIMER5bit, TIMER_STATUS_TIMIL5, IRQ_TIMER5},
{TIMER6_id, TIMER6bit, TIMER_STATUS_TIMIL6, IRQ_TIMER6},
{TIMER7_id, TIMER7bit, TIMER_STATUS_TIMIL7, IRQ_TIMER7},
#endif
#if (MAX_BLACKFIN_GPTIMERS > 8)
{TIMER8_id, TIMER8bit, TIMER_STATUS_TIMIL8, IRQ_TIMER8},
{TIMER9_id, TIMER9bit, TIMER_STATUS_TIMIL9, IRQ_TIMER9},
{TIMER10_id, TIMER10bit, TIMER_STATUS_TIMIL10, IRQ_TIMER10},
#if (MAX_BLACKFIN_GPTIMERS > 11)
{TIMER11_id, TIMER11bit, TIMER_STATUS_TIMIL11, IRQ_TIMER11},
#endif
#endif
};
struct bfin_tmr_state {
struct iio_trigger *trig;
struct bfin_timer *t;
unsigned timer_num;
int irq;
};
static ssize_t iio_bfin_tmr_frequency_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct iio_trigger *trig = dev_get_drvdata(dev);
struct bfin_tmr_state *st = trig->private_data;
long val;
int ret;
ret = strict_strtoul(buf, 10, &val);
if (ret)
goto error_ret;
if (val > 100000) {
ret = -EINVAL;
goto error_ret;
}
disable_gptimers(st->t->bit);
if (!val)
goto error_ret;
val = get_sclk() / val;
if (val <= 4) {
ret = -EINVAL;
goto error_ret;
}
set_gptimer_period(st->t->id, val);
set_gptimer_pwidth(st->t->id, 1);
enable_gptimers(st->t->bit);
error_ret:
return ret ? ret : count;
}
static ssize_t iio_bfin_tmr_frequency_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_trigger *trig = dev_get_drvdata(dev);
struct bfin_tmr_state *st = trig->private_data;
return sprintf(buf, "%lu\n",
get_sclk() / get_gptimer_period(st->t->id));
}
static DEVICE_ATTR(frequency, S_IRUGO | S_IWUSR, iio_bfin_tmr_frequency_show,
iio_bfin_tmr_frequency_store);
static struct attribute *iio_bfin_tmr_trigger_attrs[] = {
&dev_attr_frequency.attr,
NULL,
};
static const struct attribute_group iio_bfin_tmr_trigger_attr_group = {
.attrs = iio_bfin_tmr_trigger_attrs,
};
static const struct attribute_group *iio_bfin_tmr_trigger_attr_groups[] = {
&iio_bfin_tmr_trigger_attr_group,
NULL
};
static irqreturn_t iio_bfin_tmr_trigger_isr(int irq, void *devid)
{
struct bfin_tmr_state *st = devid;
clear_gptimer_intr(st->t->id);
iio_trigger_poll(st->trig, 0);
return IRQ_HANDLED;
}
static int iio_bfin_tmr_get_number(int irq)
{
int i;
for (i = 0; i < MAX_BLACKFIN_GPTIMERS; i++)
if (iio_bfin_timer_code[i].irq == irq)
return i;
return -ENODEV;
}
static int __devinit iio_bfin_tmr_trigger_probe(struct platform_device *pdev)
{
struct bfin_tmr_state *st;
int ret;
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL) {
ret = -ENOMEM;
goto out;
}
st->irq = platform_get_irq(pdev, 0);
if (!st->irq) {
dev_err(&pdev->dev, "No IRQs specified");
ret = -ENODEV;
goto out1;
}
ret = iio_bfin_tmr_get_number(st->irq);
if (ret < 0)
goto out1;
st->timer_num = ret;
st->t = &iio_bfin_timer_code[st->timer_num];
st->trig = iio_allocate_trigger("bfintmr%d", st->timer_num);
if (!st->trig) {
ret = -ENOMEM;
goto out1;
}
st->trig->private_data = st;
st->trig->owner = THIS_MODULE;
st->trig->dev.groups = iio_bfin_tmr_trigger_attr_groups;
ret = iio_trigger_register(st->trig);
if (ret)
goto out2;
ret = request_irq(st->irq, iio_bfin_tmr_trigger_isr,
0, st->trig->name, st);
if (ret) {
dev_err(&pdev->dev,
"request IRQ-%d failed", st->irq);
goto out4;
}
set_gptimer_config(st->t->id, OUT_DIS | PWM_OUT | PERIOD_CNT | IRQ_ENA);
dev_info(&pdev->dev, "iio trigger Blackfin TMR%d, IRQ-%d",
st->timer_num, st->irq);
platform_set_drvdata(pdev, st);
return 0;
out4:
iio_trigger_unregister(st->trig);
out2:
iio_put_trigger(st->trig);
out1:
kfree(st);
out:
return ret;
}
static int __devexit iio_bfin_tmr_trigger_remove(struct platform_device *pdev)
{
struct bfin_tmr_state *st = platform_get_drvdata(pdev);
disable_gptimers(st->t->bit);
free_irq(st->irq, st);
iio_trigger_unregister(st->trig);
iio_put_trigger(st->trig);
kfree(st);
return 0;
}
static struct platform_driver iio_bfin_tmr_trigger_driver = {
.driver = {
.name = "iio_bfin_tmr_trigger",
.owner = THIS_MODULE,
},
.probe = iio_bfin_tmr_trigger_probe,
.remove = __devexit_p(iio_bfin_tmr_trigger_remove),
};
static int __init iio_bfin_tmr_trig_init(void)
{
return platform_driver_register(&iio_bfin_tmr_trigger_driver);
}
module_init(iio_bfin_tmr_trig_init);
static void __exit iio_bfin_tmr_trig_exit(void)
{
platform_driver_unregister(&iio_bfin_tmr_trigger_driver);
}
module_exit(iio_bfin_tmr_trig_exit);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Blackfin system timer based trigger for the iio subsystem");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:iio-trig-bfin-timer");
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.