repo_name
string
path
string
copies
string
size
string
content
string
license
string
NamelessRom/android_kernel_nvidia_shieldtablet
arch/arm/mach-pxa/colibri-pxa320.c
4089
6542
/* * arch/arm/mach-pxa/colibri-pxa320.c * * Support for Toradex PXA320/310 based Colibri module * * Daniel Mack <daniel@caiaq.de> * Matthias Meier <matthias.j.meier@gmx.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/usb/gpio_vbus.h> #include <asm/mach-types.h> #include <asm/sizes.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <mach/pxa320.h> #include <mach/colibri.h> #include <linux/platform_data/video-pxafb.h> #include <linux/platform_data/usb-ohci-pxa27x.h> #include <mach/audio.h> #include <mach/pxa27x-udc.h> #include <mach/udc.h> #include "generic.h" #include "devices.h" #ifdef CONFIG_MACH_COLIBRI_EVALBOARD static mfp_cfg_t colibri_pxa320_evalboard_pin_config[] __initdata = { /* MMC */ GPIO22_MMC1_CLK, GPIO23_MMC1_CMD, GPIO18_MMC1_DAT0, GPIO19_MMC1_DAT1, GPIO20_MMC1_DAT2, GPIO21_MMC1_DAT3, GPIO28_GPIO, /* SD detect */ /* UART 1 configuration (may be set by bootloader) */ GPIO99_UART1_CTS, GPIO104_UART1_RTS, GPIO97_UART1_RXD, GPIO98_UART1_TXD, GPIO101_UART1_DTR, GPIO103_UART1_DSR, GPIO100_UART1_DCD, GPIO102_UART1_RI, /* UART 2 configuration */ GPIO109_UART2_CTS, GPIO112_UART2_RTS, GPIO110_UART2_RXD, GPIO111_UART2_TXD, /* UART 3 configuration */ GPIO30_UART3_RXD, GPIO31_UART3_TXD, /* UHC */ GPIO2_2_USBH_PEN, GPIO3_2_USBH_PWR, /* I2C */ GPIO32_I2C_SCL, GPIO33_I2C_SDA, /* PCMCIA */ MFP_CFG(GPIO59, AF7), /* PRST ; AF7 to tristate */ MFP_CFG(GPIO61, AF7), /* PCE1 ; AF7 to tristate */ MFP_CFG(GPIO60, AF7), /* PCE2 ; AF7 to tristate */ MFP_CFG(GPIO62, AF7), /* PCD ; AF7 to tristate */ MFP_CFG(GPIO56, AF7), /* PSKTSEL ; AF7 to tristate */ GPIO27_GPIO, /* RDnWR ; input/tristate */ GPIO50_GPIO, /* PREG ; input/tristate */ GPIO2_RDY, GPIO5_NPIOR, GPIO6_NPIOW, GPIO7_NPIOS16, GPIO8_NPWAIT, GPIO29_GPIO, /* PRDY (READY GPIO) */ GPIO57_GPIO, /* PPEN (POWER GPIO) */ GPIO81_GPIO, /* PCD (DETECT GPIO) */ GPIO77_GPIO, /* PRST (RESET GPIO) */ GPIO53_GPIO, /* PBVD1 */ GPIO79_GPIO, /* PBVD2 */ GPIO54_GPIO, /* POE */ }; #else static mfp_cfg_t colibri_pxa320_evalboard_pin_config[] __initdata = {}; #endif #if defined(CONFIG_AX88796) #define COLIBRI_ETH_IRQ_GPIO mfp_to_gpio(GPIO36_GPIO) /* * Asix AX88796 Ethernet */ static struct ax_plat_data colibri_asix_platdata = { .flags = 0, /* defined later */ .wordlength = 2, }; static struct resource colibri_asix_resource[] = { [0] = { .start = PXA3xx_CS2_PHYS, .end = PXA3xx_CS2_PHYS + (0x20 * 2) - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = PXA_GPIO_TO_IRQ(COLIBRI_ETH_IRQ_GPIO), .end = PXA_GPIO_TO_IRQ(COLIBRI_ETH_IRQ_GPIO), .flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING, } }; static struct platform_device asix_device = { .name = "ax88796", .id = 0, .num_resources = ARRAY_SIZE(colibri_asix_resource), .resource = colibri_asix_resource, .dev = { .platform_data = &colibri_asix_platdata } }; static mfp_cfg_t colibri_pxa320_eth_pin_config[] __initdata = { GPIO3_nCS2, /* AX88796 chip select */ GPIO36_GPIO | MFP_PULL_HIGH /* AX88796 IRQ */ }; static void __init colibri_pxa320_init_eth(void) { colibri_pxa3xx_init_eth(&colibri_asix_platdata); pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_eth_pin_config)); platform_device_register(&asix_device); } #else static inline void __init colibri_pxa320_init_eth(void) {} #endif /* CONFIG_AX88796 */ #if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE) static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = { .gpio_vbus = mfp_to_gpio(MFP_PIN_GPIO96), .gpio_pullup = -1, }; static struct platform_device colibri_pxa320_gpio_vbus = { .name = "gpio-vbus", .id = -1, .dev = { .platform_data = &colibri_pxa320_gpio_vbus_info, }, }; static void colibri_pxa320_udc_command(int cmd) { if (cmd == PXA2XX_UDC_CMD_CONNECT) UP2OCR = UP2OCR_HXOE | UP2OCR_DPPUE; else if (cmd == PXA2XX_UDC_CMD_DISCONNECT) UP2OCR = UP2OCR_HXOE; } static struct pxa2xx_udc_mach_info colibri_pxa320_udc_info __initdata = { .udc_command = colibri_pxa320_udc_command, .gpio_pullup = -1, }; static void __init colibri_pxa320_init_udc(void) { pxa_set_udc_info(&colibri_pxa320_udc_info); platform_device_register(&colibri_pxa320_gpio_vbus); } #else static inline void colibri_pxa320_init_udc(void) {} #endif #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) static mfp_cfg_t colibri_pxa320_lcd_pin_config[] __initdata = { GPIO6_2_LCD_LDD_0, GPIO7_2_LCD_LDD_1, GPIO8_2_LCD_LDD_2, GPIO9_2_LCD_LDD_3, GPIO10_2_LCD_LDD_4, GPIO11_2_LCD_LDD_5, GPIO12_2_LCD_LDD_6, GPIO13_2_LCD_LDD_7, GPIO63_LCD_LDD_8, GPIO64_LCD_LDD_9, GPIO65_LCD_LDD_10, GPIO66_LCD_LDD_11, GPIO67_LCD_LDD_12, GPIO68_LCD_LDD_13, GPIO69_LCD_LDD_14, GPIO70_LCD_LDD_15, GPIO71_LCD_LDD_16, GPIO72_LCD_LDD_17, GPIO73_LCD_CS_N, GPIO74_LCD_VSYNC, GPIO14_2_LCD_FCLK, GPIO15_2_LCD_LCLK, GPIO16_2_LCD_PCLK, GPIO17_2_LCD_BIAS, }; static void __init colibri_pxa320_init_lcd(void) { pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_lcd_pin_config)); } #else static inline void colibri_pxa320_init_lcd(void) {} #endif #if defined(CONFIG_SND_AC97_CODEC) || \ defined(CONFIG_SND_AC97_CODEC_MODULE) static mfp_cfg_t colibri_pxa320_ac97_pin_config[] __initdata = { GPIO34_AC97_SYSCLK, GPIO35_AC97_SDATA_IN_0, GPIO37_AC97_SDATA_OUT, GPIO38_AC97_SYNC, GPIO39_AC97_BITCLK, GPIO40_AC97_nACRESET }; static inline void __init colibri_pxa320_init_ac97(void) { pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_ac97_pin_config)); pxa_set_ac97_info(NULL); } #else static inline void colibri_pxa320_init_ac97(void) {} #endif void __init colibri_pxa320_init(void) { colibri_pxa320_init_eth(); colibri_pxa3xx_init_nand(); colibri_pxa320_init_lcd(); colibri_pxa3xx_init_lcd(mfp_to_gpio(GPIO49_GPIO)); colibri_pxa320_init_ac97(); colibri_pxa320_init_udc(); /* Evalboard init */ pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_evalboard_pin_config)); colibri_evalboard_init(); } MACHINE_START(COLIBRI320, "Toradex Colibri PXA320") .atag_offset = 0x100, .init_machine = colibri_pxa320_init, .map_io = pxa3xx_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = pxa3xx_init_irq, .handle_irq = pxa3xx_handle_irq, .init_time = pxa_timer_init, .restart = pxa_restart, MACHINE_END
gpl-2.0
yukchou/linux-sunxi-1
drivers/pci/hotplug/pciehp_pci.c
4857
4188
/* * PCI Express Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include "../pci.h" #include "pciehp.h" static int __ref pciehp_add_bridge(struct pci_dev *dev) { struct pci_bus *parent = dev->bus; int pass, busnr, start = parent->secondary; int end = parent->subordinate; for (busnr = start; busnr <= end; busnr++) { if (!pci_find_bus(pci_domain_nr(parent), busnr)) break; } if (busnr-- > end) { err("No bus number available for hot-added bridge %s\n", pci_name(dev)); return -1; } for (pass = 0; pass < 2; pass++) busnr = pci_scan_bridge(parent, dev, busnr, pass); if (!dev->subordinate) return -1; return 0; } int pciehp_configure_device(struct slot *p_slot) { struct pci_dev *dev; struct pci_dev *bridge = p_slot->ctrl->pcie->port; struct pci_bus *parent = bridge->subordinate; int num, fn; struct controller *ctrl = p_slot->ctrl; dev = pci_get_slot(parent, PCI_DEVFN(0, 0)); if (dev) { ctrl_err(ctrl, "Device %s already exists " "at %04x:%02x:00, cannot hot-add\n", pci_name(dev), pci_domain_nr(parent), parent->number); pci_dev_put(dev); return -EINVAL; } num = pci_scan_slot(parent, PCI_DEVFN(0, 0)); if (num == 0) { ctrl_err(ctrl, "No new device found\n"); return -ENODEV; } for (fn = 0; fn < 8; fn++) { dev = pci_get_slot(parent, PCI_DEVFN(0, fn)); if (!dev) continue; if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) || (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { pciehp_add_bridge(dev); } pci_dev_put(dev); } pci_assign_unassigned_bridge_resources(bridge); for (fn = 0; fn < 8; fn++) { dev = pci_get_slot(parent, PCI_DEVFN(0, fn)); if (!dev) continue; if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { pci_dev_put(dev); continue; } pci_configure_slot(dev); pci_dev_put(dev); } pci_bus_add_devices(parent); return 0; } int pciehp_unconfigure_device(struct slot *p_slot) { int ret, rc = 0; int j; u8 bctl = 0; u8 presence = 0; struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate; u16 command; struct controller *ctrl = p_slot->ctrl; ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n", __func__, pci_domain_nr(parent), parent->number); ret = pciehp_get_adapter_status(p_slot, &presence); if (ret) presence = 0; for (j = 0; j < 8; j++) { struct pci_dev *temp = pci_get_slot(parent, PCI_DEVFN(0, j)); if (!temp) continue; if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) { pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl); if (bctl & PCI_BRIDGE_CTL_VGA) { ctrl_err(ctrl, "Cannot remove display device %s\n", pci_name(temp)); pci_dev_put(temp); rc = -EINVAL; break; } } pci_stop_and_remove_bus_device(temp); /* * Ensure that no new Requests will be generated from * the device. */ if (presence) { pci_read_config_word(temp, PCI_COMMAND, &command); command &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_SERR); command |= PCI_COMMAND_INTX_DISABLE; pci_write_config_word(temp, PCI_COMMAND, command); } pci_dev_put(temp); } return rc; }
gpl-2.0
android-armv7a-belalang-tempur/android_SpeedKernel_3.4-1
drivers/hwmon/f71805f.c
4857
49650
/* * f71805f.c - driver for the Fintek F71805F/FG and F71872F/FG Super-I/O * chips integrated hardware monitoring features * Copyright (C) 2005-2006 Jean Delvare <khali@linux-fr.org> * * The F71805F/FG is a LPC Super-I/O chip made by Fintek. It integrates * complete hardware monitoring features: voltage, fan and temperature * sensors, and manual and automatic fan speed control. * * The F71872F/FG is almost the same, with two more voltages monitored, * and 6 VID inputs. * * The F71806F/FG is essentially the same as the F71872F/FG. It even has * the same chip ID, so the driver can't differentiate between. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/sysfs.h> #include <linux/ioport.h> #include <linux/acpi.h> #include <linux/io.h> static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); static struct platform_device *pdev; #define DRVNAME "f71805f" enum kinds { f71805f, f71872f }; /* * Super-I/O constants and functions */ #define F71805F_LD_HWM 0x04 #define SIO_REG_LDSEL 0x07 /* Logical device select */ #define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */ #define SIO_REG_DEVREV 0x22 /* Device revision */ #define SIO_REG_MANID 0x23 /* Fintek ID (2 bytes) */ #define SIO_REG_FNSEL1 0x29 /* Multi Function Select 1 (F71872F) */ #define SIO_REG_ENABLE 0x30 /* Logical device enable */ #define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */ #define SIO_FINTEK_ID 0x1934 #define SIO_F71805F_ID 0x0406 #define SIO_F71872F_ID 0x0341 static inline int superio_inb(int base, int reg) { outb(reg, base); return inb(base + 1); } static int superio_inw(int base, int reg) { int val; outb(reg++, base); val = inb(base + 1) << 8; outb(reg, base); val |= inb(base + 1); return val; } static inline void superio_select(int base, int ld) { outb(SIO_REG_LDSEL, base); outb(ld, base + 1); } static inline void superio_enter(int base) { outb(0x87, base); outb(0x87, base); } static inline void superio_exit(int base) { outb(0xaa, base); } /* * ISA constants */ #define REGION_LENGTH 8 #define ADDR_REG_OFFSET 5 #define DATA_REG_OFFSET 6 /* * Registers */ /* in nr from 0 to 10 (8-bit values) */ #define F71805F_REG_IN(nr) (0x10 + (nr)) #define F71805F_REG_IN_HIGH(nr) ((nr) < 10 ? 0x40 + 2 * (nr) : 0x2E) #define F71805F_REG_IN_LOW(nr) ((nr) < 10 ? 0x41 + 2 * (nr) : 0x2F) /* fan nr from 0 to 2 (12-bit values, two registers) */ #define F71805F_REG_FAN(nr) (0x20 + 2 * (nr)) #define F71805F_REG_FAN_LOW(nr) (0x28 + 2 * (nr)) #define F71805F_REG_FAN_TARGET(nr) (0x69 + 16 * (nr)) #define F71805F_REG_FAN_CTRL(nr) (0x60 + 16 * (nr)) #define F71805F_REG_PWM_FREQ(nr) (0x63 + 16 * (nr)) #define F71805F_REG_PWM_DUTY(nr) (0x6B + 16 * (nr)) /* temp nr from 0 to 2 (8-bit values) */ #define F71805F_REG_TEMP(nr) (0x1B + (nr)) #define F71805F_REG_TEMP_HIGH(nr) (0x54 + 2 * (nr)) #define F71805F_REG_TEMP_HYST(nr) (0x55 + 2 * (nr)) #define F71805F_REG_TEMP_MODE 0x01 /* pwm/fan pwmnr from 0 to 2, auto point apnr from 0 to 2 */ /* map Fintek numbers to our numbers as follows: 9->0, 5->1, 1->2 */ #define F71805F_REG_PWM_AUTO_POINT_TEMP(pwmnr, apnr) \ (0xA0 + 0x10 * (pwmnr) + (2 - (apnr))) #define F71805F_REG_PWM_AUTO_POINT_FAN(pwmnr, apnr) \ (0xA4 + 0x10 * (pwmnr) + \ 2 * (2 - (apnr))) #define F71805F_REG_START 0x00 /* status nr from 0 to 2 */ #define F71805F_REG_STATUS(nr) (0x36 + (nr)) /* individual register bits */ #define FAN_CTRL_DC_MODE 0x10 #define FAN_CTRL_LATCH_FULL 0x08 #define FAN_CTRL_MODE_MASK 0x03 #define FAN_CTRL_MODE_SPEED 0x00 #define FAN_CTRL_MODE_TEMPERATURE 0x01 #define FAN_CTRL_MODE_MANUAL 0x02 /* * Data structures and manipulation thereof */ struct f71805f_auto_point { u8 temp[3]; u16 fan[3]; }; struct f71805f_data { unsigned short addr; const char *name; struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ unsigned long last_limits; /* In jiffies */ /* Register values */ u8 in[11]; u8 in_high[11]; u8 in_low[11]; u16 has_in; u16 fan[3]; u16 fan_low[3]; u16 fan_target[3]; u8 fan_ctrl[3]; u8 pwm[3]; u8 pwm_freq[3]; u8 temp[3]; u8 temp_high[3]; u8 temp_hyst[3]; u8 temp_mode; unsigned long alarms; struct f71805f_auto_point auto_points[3]; }; struct f71805f_sio_data { enum kinds kind; u8 fnsel1; }; static inline long in_from_reg(u8 reg) { return reg * 8; } /* The 2 least significant bits are not used */ static inline u8 in_to_reg(long val) { if (val <= 0) return 0; if (val >= 2016) return 0xfc; return ((val + 16) / 32) << 2; } /* in0 is downscaled by a factor 2 internally */ static inline long in0_from_reg(u8 reg) { return reg * 16; } static inline u8 in0_to_reg(long val) { if (val <= 0) return 0; if (val >= 4032) return 0xfc; return ((val + 32) / 64) << 2; } /* The 4 most significant bits are not used */ static inline long fan_from_reg(u16 reg) { reg &= 0xfff; if (!reg || reg == 0xfff) return 0; return 1500000 / reg; } static inline u16 fan_to_reg(long rpm) { /* * If the low limit is set below what the chip can measure, * store the largest possible 12-bit value in the registers, * so that no alarm will ever trigger. */ if (rpm < 367) return 0xfff; return 1500000 / rpm; } static inline unsigned long pwm_freq_from_reg(u8 reg) { unsigned long clock = (reg & 0x80) ? 48000000UL : 1000000UL; reg &= 0x7f; if (reg == 0) reg++; return clock / (reg << 8); } static inline u8 pwm_freq_to_reg(unsigned long val) { if (val >= 187500) /* The highest we can do */ return 0x80; if (val >= 1475) /* Use 48 MHz clock */ return 0x80 | (48000000UL / (val << 8)); if (val < 31) /* The lowest we can do */ return 0x7f; else /* Use 1 MHz clock */ return 1000000UL / (val << 8); } static inline int pwm_mode_from_reg(u8 reg) { return !(reg & FAN_CTRL_DC_MODE); } static inline long temp_from_reg(u8 reg) { return reg * 1000; } static inline u8 temp_to_reg(long val) { if (val <= 0) return 0; if (val >= 1000 * 0xff) return 0xff; return (val + 500) / 1000; } /* * Device I/O access */ /* Must be called with data->update_lock held, except during initialization */ static u8 f71805f_read8(struct f71805f_data *data, u8 reg) { outb(reg, data->addr + ADDR_REG_OFFSET); return inb(data->addr + DATA_REG_OFFSET); } /* Must be called with data->update_lock held, except during initialization */ static void f71805f_write8(struct f71805f_data *data, u8 reg, u8 val) { outb(reg, data->addr + ADDR_REG_OFFSET); outb(val, data->addr + DATA_REG_OFFSET); } /* * It is important to read the MSB first, because doing so latches the * value of the LSB, so we are sure both bytes belong to the same value. * Must be called with data->update_lock held, except during initialization */ static u16 f71805f_read16(struct f71805f_data *data, u8 reg) { u16 val; outb(reg, data->addr + ADDR_REG_OFFSET); val = inb(data->addr + DATA_REG_OFFSET) << 8; outb(++reg, data->addr + ADDR_REG_OFFSET); val |= inb(data->addr + DATA_REG_OFFSET); return val; } /* Must be called with data->update_lock held, except during initialization */ static void f71805f_write16(struct f71805f_data *data, u8 reg, u16 val) { outb(reg, data->addr + ADDR_REG_OFFSET); outb(val >> 8, data->addr + DATA_REG_OFFSET); outb(++reg, data->addr + ADDR_REG_OFFSET); outb(val & 0xff, data->addr + DATA_REG_OFFSET); } static struct f71805f_data *f71805f_update_device(struct device *dev) { struct f71805f_data *data = dev_get_drvdata(dev); int nr, apnr; mutex_lock(&data->update_lock); /* Limit registers cache is refreshed after 60 seconds */ if (time_after(jiffies, data->last_updated + 60 * HZ) || !data->valid) { for (nr = 0; nr < 11; nr++) { if (!(data->has_in & (1 << nr))) continue; data->in_high[nr] = f71805f_read8(data, F71805F_REG_IN_HIGH(nr)); data->in_low[nr] = f71805f_read8(data, F71805F_REG_IN_LOW(nr)); } for (nr = 0; nr < 3; nr++) { data->fan_low[nr] = f71805f_read16(data, F71805F_REG_FAN_LOW(nr)); data->fan_target[nr] = f71805f_read16(data, F71805F_REG_FAN_TARGET(nr)); data->pwm_freq[nr] = f71805f_read8(data, F71805F_REG_PWM_FREQ(nr)); } for (nr = 0; nr < 3; nr++) { data->temp_high[nr] = f71805f_read8(data, F71805F_REG_TEMP_HIGH(nr)); data->temp_hyst[nr] = f71805f_read8(data, F71805F_REG_TEMP_HYST(nr)); } data->temp_mode = f71805f_read8(data, F71805F_REG_TEMP_MODE); for (nr = 0; nr < 3; nr++) { for (apnr = 0; apnr < 3; apnr++) { data->auto_points[nr].temp[apnr] = f71805f_read8(data, F71805F_REG_PWM_AUTO_POINT_TEMP(nr, apnr)); data->auto_points[nr].fan[apnr] = f71805f_read16(data, F71805F_REG_PWM_AUTO_POINT_FAN(nr, apnr)); } } data->last_limits = jiffies; } /* Measurement registers cache is refreshed after 1 second */ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { for (nr = 0; nr < 11; nr++) { if (!(data->has_in & (1 << nr))) continue; data->in[nr] = f71805f_read8(data, F71805F_REG_IN(nr)); } for (nr = 0; nr < 3; nr++) { data->fan[nr] = f71805f_read16(data, F71805F_REG_FAN(nr)); data->fan_ctrl[nr] = f71805f_read8(data, F71805F_REG_FAN_CTRL(nr)); data->pwm[nr] = f71805f_read8(data, F71805F_REG_PWM_DUTY(nr)); } for (nr = 0; nr < 3; nr++) { data->temp[nr] = f71805f_read8(data, F71805F_REG_TEMP(nr)); } data->alarms = f71805f_read8(data, F71805F_REG_STATUS(0)) + (f71805f_read8(data, F71805F_REG_STATUS(1)) << 8) + (f71805f_read8(data, F71805F_REG_STATUS(2)) << 16); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* * Sysfs interface */ static ssize_t show_in0(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; return sprintf(buf, "%ld\n", in0_from_reg(data->in[nr])); } static ssize_t show_in0_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; return sprintf(buf, "%ld\n", in0_from_reg(data->in_high[nr])); } static ssize_t show_in0_min(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; return sprintf(buf, "%ld\n", in0_from_reg(data->in_low[nr])); } static ssize_t set_in0_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71805f_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_high[nr] = in0_to_reg(val); f71805f_write8(data, F71805F_REG_IN_HIGH(nr), data->in_high[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_in0_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71805f_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_low[nr] = in0_to_reg(val); f71805f_write8(data, F71805F_REG_IN_LOW(nr), data->in_low[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_in(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; return sprintf(buf, "%ld\n", in_from_reg(data->in[nr])); } static ssize_t show_in_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; return sprintf(buf, "%ld\n", in_from_reg(data->in_high[nr])); } static ssize_t show_in_min(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; return sprintf(buf, "%ld\n", in_from_reg(data->in_low[nr])); } static ssize_t set_in_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71805f_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_high[nr] = in_to_reg(val); f71805f_write8(data, F71805F_REG_IN_HIGH(nr), data->in_high[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_in_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71805f_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_low[nr] = in_to_reg(val); f71805f_write8(data, F71805F_REG_IN_LOW(nr), data->in_low[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_fan(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; return sprintf(buf, "%ld\n", fan_from_reg(data->fan[nr])); } static ssize_t show_fan_min(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; return sprintf(buf, "%ld\n", fan_from_reg(data->fan_low[nr])); } static ssize_t show_fan_target(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; return sprintf(buf, "%ld\n", fan_from_reg(data->fan_target[nr])); } static ssize_t set_fan_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71805f_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->fan_low[nr] = fan_to_reg(val); f71805f_write16(data, F71805F_REG_FAN_LOW(nr), data->fan_low[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_fan_target(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71805f_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->fan_target[nr] = fan_to_reg(val); f71805f_write16(data, F71805F_REG_FAN_TARGET(nr), data->fan_target[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; return sprintf(buf, "%d\n", (int)data->pwm[nr]); } static ssize_t show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; int mode; switch (data->fan_ctrl[nr] & FAN_CTRL_MODE_MASK) { case FAN_CTRL_MODE_SPEED: mode = 3; break; case FAN_CTRL_MODE_TEMPERATURE: mode = 2; break; default: /* MANUAL */ mode = 1; } return sprintf(buf, "%d\n", mode); } static ssize_t show_pwm_freq(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; return sprintf(buf, "%lu\n", pwm_freq_from_reg(data->pwm_freq[nr])); } static ssize_t show_pwm_mode(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; return sprintf(buf, "%d\n", pwm_mode_from_reg(data->fan_ctrl[nr])); } static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71805f_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; if (val > 255) return -EINVAL; mutex_lock(&data->update_lock); data->pwm[nr] = val; f71805f_write8(data, F71805F_REG_PWM_DUTY(nr), data->pwm[nr]); mutex_unlock(&data->update_lock); return count; } static struct attribute *f71805f_attr_pwm[]; static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71805f_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; u8 reg; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; if (val < 1 || val > 3) return -EINVAL; if (val > 1) { /* Automatic mode, user can't set PWM value */ if (sysfs_chmod_file(&dev->kobj, f71805f_attr_pwm[nr], S_IRUGO)) dev_dbg(dev, "chmod -w pwm%d failed\n", nr + 1); } mutex_lock(&data->update_lock); reg = f71805f_read8(data, F71805F_REG_FAN_CTRL(nr)) & ~FAN_CTRL_MODE_MASK; switch (val) { case 1: reg |= FAN_CTRL_MODE_MANUAL; break; case 2: reg |= FAN_CTRL_MODE_TEMPERATURE; break; case 3: reg |= FAN_CTRL_MODE_SPEED; break; } data->fan_ctrl[nr] = reg; f71805f_write8(data, F71805F_REG_FAN_CTRL(nr), reg); mutex_unlock(&data->update_lock); if (val == 1) { /* Manual mode, user can set PWM value */ if (sysfs_chmod_file(&dev->kobj, f71805f_attr_pwm[nr], S_IRUGO | S_IWUSR)) dev_dbg(dev, "chmod +w pwm%d failed\n", nr + 1); } return count; } static ssize_t set_pwm_freq(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71805f_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->pwm_freq[nr] = pwm_freq_to_reg(val); f71805f_write8(data, F71805F_REG_PWM_FREQ(nr), data->pwm_freq[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_auto_point_temp(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr); int pwmnr = attr->nr; int apnr = attr->index; return sprintf(buf, "%ld\n", temp_from_reg(data->auto_points[pwmnr].temp[apnr])); } static ssize_t set_pwm_auto_point_temp(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71805f_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr); int pwmnr = attr->nr; int apnr = attr->index; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->auto_points[pwmnr].temp[apnr] = temp_to_reg(val); f71805f_write8(data, F71805F_REG_PWM_AUTO_POINT_TEMP(pwmnr, apnr), data->auto_points[pwmnr].temp[apnr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_auto_point_fan(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr); int pwmnr = attr->nr; int apnr = attr->index; return sprintf(buf, "%ld\n", fan_from_reg(data->auto_points[pwmnr].fan[apnr])); } static ssize_t set_pwm_auto_point_fan(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71805f_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr); int pwmnr = attr->nr; int apnr = attr->index; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->auto_points[pwmnr].fan[apnr] = fan_to_reg(val); f71805f_write16(data, F71805F_REG_PWM_AUTO_POINT_FAN(pwmnr, apnr), data->auto_points[pwmnr].fan[apnr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; return sprintf(buf, "%ld\n", temp_from_reg(data->temp[nr])); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; return sprintf(buf, "%ld\n", temp_from_reg(data->temp_high[nr])); } static ssize_t show_temp_hyst(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; return sprintf(buf, "%ld\n", temp_from_reg(data->temp_hyst[nr])); } static ssize_t show_temp_type(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; /* 3 is diode, 4 is thermistor */ return sprintf(buf, "%u\n", (data->temp_mode & (1 << nr)) ? 3 : 4); } static ssize_t set_temp_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71805f_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_high[nr] = temp_to_reg(val); f71805f_write8(data, F71805F_REG_TEMP_HIGH(nr), data->temp_high[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71805f_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int nr = attr->index; long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_hyst[nr] = temp_to_reg(val); f71805f_write8(data, F71805F_REG_TEMP_HYST(nr), data->temp_hyst[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_alarms_in(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); return sprintf(buf, "%lu\n", data->alarms & 0x7ff); } static ssize_t show_alarms_fan(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); return sprintf(buf, "%lu\n", (data->alarms >> 16) & 0x07); } static ssize_t show_alarms_temp(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); return sprintf(buf, "%lu\n", (data->alarms >> 11) & 0x07); } static ssize_t show_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = f71805f_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int bitnr = attr->index; return sprintf(buf, "%lu\n", (data->alarms >> bitnr) & 1); } static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { struct f71805f_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", data->name); } static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_in0, NULL, 0); static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO | S_IWUSR, show_in0_max, set_in0_max, 0); static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO | S_IWUSR, show_in0_min, set_in0_min, 0); static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in, NULL, 1); static SENSOR_DEVICE_ATTR(in1_max, S_IRUGO | S_IWUSR, show_in_max, set_in_max, 1); static SENSOR_DEVICE_ATTR(in1_min, S_IRUGO | S_IWUSR, show_in_min, set_in_min, 1); static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in, NULL, 2); static SENSOR_DEVICE_ATTR(in2_max, S_IRUGO | S_IWUSR, show_in_max, set_in_max, 2); static SENSOR_DEVICE_ATTR(in2_min, S_IRUGO | S_IWUSR, show_in_min, set_in_min, 2); static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in, NULL, 3); static SENSOR_DEVICE_ATTR(in3_max, S_IRUGO | S_IWUSR, show_in_max, set_in_max, 3); static SENSOR_DEVICE_ATTR(in3_min, S_IRUGO | S_IWUSR, show_in_min, set_in_min, 3); static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_in, NULL, 4); static SENSOR_DEVICE_ATTR(in4_max, S_IRUGO | S_IWUSR, show_in_max, set_in_max, 4); static SENSOR_DEVICE_ATTR(in4_min, S_IRUGO | S_IWUSR, show_in_min, set_in_min, 4); static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_in, NULL, 5); static SENSOR_DEVICE_ATTR(in5_max, S_IRUGO | S_IWUSR, show_in_max, set_in_max, 5); static SENSOR_DEVICE_ATTR(in5_min, S_IRUGO | S_IWUSR, show_in_min, set_in_min, 5); static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_in, NULL, 6); static SENSOR_DEVICE_ATTR(in6_max, S_IRUGO | S_IWUSR, show_in_max, set_in_max, 6); static SENSOR_DEVICE_ATTR(in6_min, S_IRUGO | S_IWUSR, show_in_min, set_in_min, 6); static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_in, NULL, 7); static SENSOR_DEVICE_ATTR(in7_max, S_IRUGO | S_IWUSR, show_in_max, set_in_max, 7); static SENSOR_DEVICE_ATTR(in7_min, S_IRUGO | S_IWUSR, show_in_min, set_in_min, 7); static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, show_in, NULL, 8); static SENSOR_DEVICE_ATTR(in8_max, S_IRUGO | S_IWUSR, show_in_max, set_in_max, 8); static SENSOR_DEVICE_ATTR(in8_min, S_IRUGO | S_IWUSR, show_in_min, set_in_min, 8); static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, show_in0, NULL, 9); static SENSOR_DEVICE_ATTR(in9_max, S_IRUGO | S_IWUSR, show_in0_max, set_in0_max, 9); static SENSOR_DEVICE_ATTR(in9_min, S_IRUGO | S_IWUSR, show_in0_min, set_in0_min, 9); static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, show_in0, NULL, 10); static SENSOR_DEVICE_ATTR(in10_max, S_IRUGO | S_IWUSR, show_in0_max, set_in0_max, 10); static SENSOR_DEVICE_ATTR(in10_min, S_IRUGO | S_IWUSR, show_in0_min, set_in0_min, 10); static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0); static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO | S_IWUSR, show_fan_min, set_fan_min, 0); static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, show_fan_target, set_fan_target, 0); static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1); static SENSOR_DEVICE_ATTR(fan2_min, S_IRUGO | S_IWUSR, show_fan_min, set_fan_min, 1); static SENSOR_DEVICE_ATTR(fan2_target, S_IRUGO | S_IWUSR, show_fan_target, set_fan_target, 1); static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2); static SENSOR_DEVICE_ATTR(fan3_min, S_IRUGO | S_IWUSR, show_fan_min, set_fan_min, 2); static SENSOR_DEVICE_ATTR(fan3_target, S_IRUGO | S_IWUSR, show_fan_target, set_fan_target, 2); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max, set_temp_max, 0); static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp_hyst, set_temp_hyst, 0); static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1); static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max, set_temp_max, 1); static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_hyst, set_temp_hyst, 1); static SENSOR_DEVICE_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2); static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max, set_temp_max, 2); static SENSOR_DEVICE_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_hyst, set_temp_hyst, 2); static SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2); /* * pwm (value) files are created read-only, write permission is * then added or removed dynamically as needed */ static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, show_pwm, set_pwm, 0); static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, show_pwm_enable, set_pwm_enable, 0); static SENSOR_DEVICE_ATTR(pwm1_freq, S_IRUGO | S_IWUSR, show_pwm_freq, set_pwm_freq, 0); static SENSOR_DEVICE_ATTR(pwm1_mode, S_IRUGO, show_pwm_mode, NULL, 0); static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO, show_pwm, set_pwm, 1); static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR, show_pwm_enable, set_pwm_enable, 1); static SENSOR_DEVICE_ATTR(pwm2_freq, S_IRUGO | S_IWUSR, show_pwm_freq, set_pwm_freq, 1); static SENSOR_DEVICE_ATTR(pwm2_mode, S_IRUGO, show_pwm_mode, NULL, 1); static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO, show_pwm, set_pwm, 2); static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR, show_pwm_enable, set_pwm_enable, 2); static SENSOR_DEVICE_ATTR(pwm3_freq, S_IRUGO | S_IWUSR, show_pwm_freq, set_pwm_freq, 2); static SENSOR_DEVICE_ATTR(pwm3_mode, S_IRUGO, show_pwm_mode, NULL, 2); static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_temp, S_IRUGO | S_IWUSR, show_pwm_auto_point_temp, set_pwm_auto_point_temp, 0, 0); static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_fan, S_IRUGO | S_IWUSR, show_pwm_auto_point_fan, set_pwm_auto_point_fan, 0, 0); static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_temp, S_IRUGO | S_IWUSR, show_pwm_auto_point_temp, set_pwm_auto_point_temp, 0, 1); static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_fan, S_IRUGO | S_IWUSR, show_pwm_auto_point_fan, set_pwm_auto_point_fan, 0, 1); static SENSOR_DEVICE_ATTR_2(pwm1_auto_point3_temp, S_IRUGO | S_IWUSR, show_pwm_auto_point_temp, set_pwm_auto_point_temp, 0, 2); static SENSOR_DEVICE_ATTR_2(pwm1_auto_point3_fan, S_IRUGO | S_IWUSR, show_pwm_auto_point_fan, set_pwm_auto_point_fan, 0, 2); static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_temp, S_IRUGO | S_IWUSR, show_pwm_auto_point_temp, set_pwm_auto_point_temp, 1, 0); static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_fan, S_IRUGO | S_IWUSR, show_pwm_auto_point_fan, set_pwm_auto_point_fan, 1, 0); static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_temp, S_IRUGO | S_IWUSR, show_pwm_auto_point_temp, set_pwm_auto_point_temp, 1, 1); static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_fan, S_IRUGO | S_IWUSR, show_pwm_auto_point_fan, set_pwm_auto_point_fan, 1, 1); static SENSOR_DEVICE_ATTR_2(pwm2_auto_point3_temp, S_IRUGO | S_IWUSR, show_pwm_auto_point_temp, set_pwm_auto_point_temp, 1, 2); static SENSOR_DEVICE_ATTR_2(pwm2_auto_point3_fan, S_IRUGO | S_IWUSR, show_pwm_auto_point_fan, set_pwm_auto_point_fan, 1, 2); static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_temp, S_IRUGO | S_IWUSR, show_pwm_auto_point_temp, set_pwm_auto_point_temp, 2, 0); static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_fan, S_IRUGO | S_IWUSR, show_pwm_auto_point_fan, set_pwm_auto_point_fan, 2, 0); static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_temp, S_IRUGO | S_IWUSR, show_pwm_auto_point_temp, set_pwm_auto_point_temp, 2, 1); static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_fan, S_IRUGO | S_IWUSR, show_pwm_auto_point_fan, set_pwm_auto_point_fan, 2, 1); static SENSOR_DEVICE_ATTR_2(pwm3_auto_point3_temp, S_IRUGO | S_IWUSR, show_pwm_auto_point_temp, set_pwm_auto_point_temp, 2, 2); static SENSOR_DEVICE_ATTR_2(pwm3_auto_point3_fan, S_IRUGO | S_IWUSR, show_pwm_auto_point_fan, set_pwm_auto_point_fan, 2, 2); static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 7); static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 8); static SENSOR_DEVICE_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 9); static SENSOR_DEVICE_ATTR(in10_alarm, S_IRUGO, show_alarm, NULL, 10); static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 11); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 12); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 16); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 17); static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 18); static DEVICE_ATTR(alarms_in, S_IRUGO, show_alarms_in, NULL); static DEVICE_ATTR(alarms_fan, S_IRUGO, show_alarms_fan, NULL); static DEVICE_ATTR(alarms_temp, S_IRUGO, show_alarms_temp, NULL); static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static struct attribute *f71805f_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in5_max.dev_attr.attr, &sensor_dev_attr_in5_min.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in6_max.dev_attr.attr, &sensor_dev_attr_in6_min.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &sensor_dev_attr_in7_max.dev_attr.attr, &sensor_dev_attr_in7_min.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan1_target.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &sensor_dev_attr_fan2_target.dev_attr.attr, &sensor_dev_attr_fan3_input.dev_attr.attr, &sensor_dev_attr_fan3_min.dev_attr.attr, &sensor_dev_attr_fan3_alarm.dev_attr.attr, &sensor_dev_attr_fan3_target.dev_attr.attr, &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_pwm1_enable.dev_attr.attr, &sensor_dev_attr_pwm1_mode.dev_attr.attr, &sensor_dev_attr_pwm2.dev_attr.attr, &sensor_dev_attr_pwm2_enable.dev_attr.attr, &sensor_dev_attr_pwm2_mode.dev_attr.attr, &sensor_dev_attr_pwm3.dev_attr.attr, &sensor_dev_attr_pwm3_enable.dev_attr.attr, &sensor_dev_attr_pwm3_mode.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, &sensor_dev_attr_temp1_type.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_max_hyst.dev_attr.attr, &sensor_dev_attr_temp2_type.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_max_hyst.dev_attr.attr, &sensor_dev_attr_temp3_type.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point1_fan.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point2_fan.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr, &sensor_dev_attr_pwm1_auto_point3_fan.dev_attr.attr, &sensor_dev_attr_pwm2_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_pwm2_auto_point1_fan.dev_attr.attr, &sensor_dev_attr_pwm2_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_pwm2_auto_point2_fan.dev_attr.attr, &sensor_dev_attr_pwm2_auto_point3_temp.dev_attr.attr, &sensor_dev_attr_pwm2_auto_point3_fan.dev_attr.attr, &sensor_dev_attr_pwm3_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_pwm3_auto_point1_fan.dev_attr.attr, &sensor_dev_attr_pwm3_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_pwm3_auto_point2_fan.dev_attr.attr, &sensor_dev_attr_pwm3_auto_point3_temp.dev_attr.attr, &sensor_dev_attr_pwm3_auto_point3_fan.dev_attr.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in5_alarm.dev_attr.attr, &sensor_dev_attr_in6_alarm.dev_attr.attr, &sensor_dev_attr_in7_alarm.dev_attr.attr, &dev_attr_alarms_in.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, &sensor_dev_attr_temp3_alarm.dev_attr.attr, &dev_attr_alarms_temp.attr, &dev_attr_alarms_fan.attr, &dev_attr_name.attr, NULL }; static const struct attribute_group f71805f_group = { .attrs = f71805f_attributes, }; static struct attribute *f71805f_attributes_optin[4][5] = { { &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_in8_input.dev_attr.attr, &sensor_dev_attr_in8_max.dev_attr.attr, &sensor_dev_attr_in8_min.dev_attr.attr, &sensor_dev_attr_in8_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_in9_input.dev_attr.attr, &sensor_dev_attr_in9_max.dev_attr.attr, &sensor_dev_attr_in9_min.dev_attr.attr, &sensor_dev_attr_in9_alarm.dev_attr.attr, NULL }, { &sensor_dev_attr_in10_input.dev_attr.attr, &sensor_dev_attr_in10_max.dev_attr.attr, &sensor_dev_attr_in10_min.dev_attr.attr, &sensor_dev_attr_in10_alarm.dev_attr.attr, NULL } }; static const struct attribute_group f71805f_group_optin[4] = { { .attrs = f71805f_attributes_optin[0] }, { .attrs = f71805f_attributes_optin[1] }, { .attrs = f71805f_attributes_optin[2] }, { .attrs = f71805f_attributes_optin[3] }, }; /* * We don't include pwm_freq files in the arrays above, because they must be * created conditionally (only if pwm_mode is 1 == PWM) */ static struct attribute *f71805f_attributes_pwm_freq[] = { &sensor_dev_attr_pwm1_freq.dev_attr.attr, &sensor_dev_attr_pwm2_freq.dev_attr.attr, &sensor_dev_attr_pwm3_freq.dev_attr.attr, NULL }; static const struct attribute_group f71805f_group_pwm_freq = { .attrs = f71805f_attributes_pwm_freq, }; /* We also need an indexed access to pwmN files to toggle writability */ static struct attribute *f71805f_attr_pwm[] = { &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_pwm2.dev_attr.attr, &sensor_dev_attr_pwm3.dev_attr.attr, }; /* * Device registration and initialization */ static void __devinit f71805f_init_device(struct f71805f_data *data) { u8 reg; int i; reg = f71805f_read8(data, F71805F_REG_START); if ((reg & 0x41) != 0x01) { printk(KERN_DEBUG DRVNAME ": Starting monitoring " "operations\n"); f71805f_write8(data, F71805F_REG_START, (reg | 0x01) & ~0x40); } /* * Fan monitoring can be disabled. If it is, we won't be polling * the register values, and won't create the related sysfs files. */ for (i = 0; i < 3; i++) { data->fan_ctrl[i] = f71805f_read8(data, F71805F_REG_FAN_CTRL(i)); /* * Clear latch full bit, else "speed mode" fan speed control * doesn't work */ if (data->fan_ctrl[i] & FAN_CTRL_LATCH_FULL) { data->fan_ctrl[i] &= ~FAN_CTRL_LATCH_FULL; f71805f_write8(data, F71805F_REG_FAN_CTRL(i), data->fan_ctrl[i]); } } } static int __devinit f71805f_probe(struct platform_device *pdev) { struct f71805f_sio_data *sio_data = pdev->dev.platform_data; struct f71805f_data *data; struct resource *res; int i, err; static const char * const names[] = { "f71805f", "f71872f", }; data = kzalloc(sizeof(struct f71805f_data), GFP_KERNEL); if (!data) { err = -ENOMEM; pr_err("Out of memory\n"); goto exit; } res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!request_region(res->start + ADDR_REG_OFFSET, 2, DRVNAME)) { err = -EBUSY; dev_err(&pdev->dev, "Failed to request region 0x%lx-0x%lx\n", (unsigned long)(res->start + ADDR_REG_OFFSET), (unsigned long)(res->start + ADDR_REG_OFFSET + 1)); goto exit_free; } data->addr = res->start; data->name = names[sio_data->kind]; mutex_init(&data->update_lock); platform_set_drvdata(pdev, data); /* Some voltage inputs depend on chip model and configuration */ switch (sio_data->kind) { case f71805f: data->has_in = 0x1ff; break; case f71872f: data->has_in = 0x6ef; if (sio_data->fnsel1 & 0x01) data->has_in |= (1 << 4); /* in4 */ if (sio_data->fnsel1 & 0x02) data->has_in |= (1 << 8); /* in8 */ break; } /* Initialize the F71805F chip */ f71805f_init_device(data); /* Register sysfs interface files */ err = sysfs_create_group(&pdev->dev.kobj, &f71805f_group); if (err) goto exit_release_region; if (data->has_in & (1 << 4)) { /* in4 */ err = sysfs_create_group(&pdev->dev.kobj, &f71805f_group_optin[0]); if (err) goto exit_remove_files; } if (data->has_in & (1 << 8)) { /* in8 */ err = sysfs_create_group(&pdev->dev.kobj, &f71805f_group_optin[1]); if (err) goto exit_remove_files; } if (data->has_in & (1 << 9)) { /* in9 (F71872F/FG only) */ err = sysfs_create_group(&pdev->dev.kobj, &f71805f_group_optin[2]); if (err) goto exit_remove_files; } if (data->has_in & (1 << 10)) { /* in9 (F71872F/FG only) */ err = sysfs_create_group(&pdev->dev.kobj, &f71805f_group_optin[3]); if (err) goto exit_remove_files; } for (i = 0; i < 3; i++) { /* If control mode is PWM, create pwm_freq file */ if (!(data->fan_ctrl[i] & FAN_CTRL_DC_MODE)) { err = sysfs_create_file(&pdev->dev.kobj, f71805f_attributes_pwm_freq[i]); if (err) goto exit_remove_files; } /* If PWM is in manual mode, add write permission */ if (data->fan_ctrl[i] & FAN_CTRL_MODE_MANUAL) { err = sysfs_chmod_file(&pdev->dev.kobj, f71805f_attr_pwm[i], S_IRUGO | S_IWUSR); if (err) { dev_err(&pdev->dev, "chmod +w pwm%d failed\n", i + 1); goto exit_remove_files; } } } data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); dev_err(&pdev->dev, "Class registration failed (%d)\n", err); goto exit_remove_files; } return 0; exit_remove_files: sysfs_remove_group(&pdev->dev.kobj, &f71805f_group); for (i = 0; i < 4; i++) sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_optin[i]); sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_pwm_freq); exit_release_region: release_region(res->start + ADDR_REG_OFFSET, 2); exit_free: platform_set_drvdata(pdev, NULL); kfree(data); exit: return err; } static int __devexit f71805f_remove(struct platform_device *pdev) { struct f71805f_data *data = platform_get_drvdata(pdev); struct resource *res; int i; hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &f71805f_group); for (i = 0; i < 4; i++) sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_optin[i]); sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_pwm_freq); platform_set_drvdata(pdev, NULL); kfree(data); res = platform_get_resource(pdev, IORESOURCE_IO, 0); release_region(res->start + ADDR_REG_OFFSET, 2); return 0; } static struct platform_driver f71805f_driver = { .driver = { .owner = THIS_MODULE, .name = DRVNAME, }, .probe = f71805f_probe, .remove = __devexit_p(f71805f_remove), }; static int __init f71805f_device_add(unsigned short address, const struct f71805f_sio_data *sio_data) { struct resource res = { .start = address, .end = address + REGION_LENGTH - 1, .flags = IORESOURCE_IO, }; int err; pdev = platform_device_alloc(DRVNAME, address); if (!pdev) { err = -ENOMEM; pr_err("Device allocation failed\n"); goto exit; } res.name = pdev->name; err = acpi_check_resource_conflict(&res); if (err) goto exit_device_put; err = platform_device_add_resources(pdev, &res, 1); if (err) { pr_err("Device resource addition failed (%d)\n", err); goto exit_device_put; } err = platform_device_add_data(pdev, sio_data, sizeof(struct f71805f_sio_data)); if (err) { pr_err("Platform data allocation failed\n"); goto exit_device_put; } err = platform_device_add(pdev); if (err) { pr_err("Device addition failed (%d)\n", err); goto exit_device_put; } return 0; exit_device_put: platform_device_put(pdev); exit: return err; } static int __init f71805f_find(int sioaddr, unsigned short *address, struct f71805f_sio_data *sio_data) { int err = -ENODEV; u16 devid; static const char * const names[] = { "F71805F/FG", "F71872F/FG or F71806F/FG", }; superio_enter(sioaddr); devid = superio_inw(sioaddr, SIO_REG_MANID); if (devid != SIO_FINTEK_ID) goto exit; devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID); switch (devid) { case SIO_F71805F_ID: sio_data->kind = f71805f; break; case SIO_F71872F_ID: sio_data->kind = f71872f; sio_data->fnsel1 = superio_inb(sioaddr, SIO_REG_FNSEL1); break; default: pr_info("Unsupported Fintek device, skipping\n"); goto exit; } superio_select(sioaddr, F71805F_LD_HWM); if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) { pr_warn("Device not activated, skipping\n"); goto exit; } *address = superio_inw(sioaddr, SIO_REG_ADDR); if (*address == 0) { pr_warn("Base address not set, skipping\n"); goto exit; } *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */ err = 0; pr_info("Found %s chip at %#x, revision %u\n", names[sio_data->kind], *address, superio_inb(sioaddr, SIO_REG_DEVREV)); exit: superio_exit(sioaddr); return err; } static int __init f71805f_init(void) { int err; unsigned short address; struct f71805f_sio_data sio_data; if (f71805f_find(0x2e, &address, &sio_data) && f71805f_find(0x4e, &address, &sio_data)) return -ENODEV; err = platform_driver_register(&f71805f_driver); if (err) goto exit; /* Sets global pdev as a side effect */ err = f71805f_device_add(address, &sio_data); if (err) goto exit_driver; return 0; exit_driver: platform_driver_unregister(&f71805f_driver); exit: return err; } static void __exit f71805f_exit(void) { platform_device_unregister(pdev); platform_driver_unregister(&f71805f_driver); } MODULE_AUTHOR("Jean Delvare <khali@linux-fr>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("F71805F/F71872F hardware monitoring driver"); module_init(f71805f_init); module_exit(f71805f_exit);
gpl-2.0
sorinstanila/kernel_msm_kk
drivers/hwmon/smsc47m192.c
4857
21295
/* * smsc47m192.c - Support for hardware monitoring block of * SMSC LPC47M192 and compatible Super I/O chips * * Copyright (C) 2006 Hartmut Rick <linux@rick.claranet.de> * * Derived from lm78.c and other chip drivers. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/sysfs.h> #include <linux/mutex.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; /* SMSC47M192 registers */ #define SMSC47M192_REG_IN(nr) ((nr) < 6 ? (0x20 + (nr)) : \ (0x50 + (nr) - 6)) #define SMSC47M192_REG_IN_MAX(nr) ((nr) < 6 ? (0x2b + (nr) * 2) : \ (0x54 + (((nr) - 6) * 2))) #define SMSC47M192_REG_IN_MIN(nr) ((nr) < 6 ? (0x2c + (nr) * 2) : \ (0x55 + (((nr) - 6) * 2))) static u8 SMSC47M192_REG_TEMP[3] = { 0x27, 0x26, 0x52 }; static u8 SMSC47M192_REG_TEMP_MAX[3] = { 0x39, 0x37, 0x58 }; static u8 SMSC47M192_REG_TEMP_MIN[3] = { 0x3A, 0x38, 0x59 }; #define SMSC47M192_REG_TEMP_OFFSET(nr) ((nr) == 2 ? 0x1e : 0x1f) #define SMSC47M192_REG_ALARM1 0x41 #define SMSC47M192_REG_ALARM2 0x42 #define SMSC47M192_REG_VID 0x47 #define SMSC47M192_REG_VID4 0x49 #define SMSC47M192_REG_CONFIG 0x40 #define SMSC47M192_REG_SFR 0x4f #define SMSC47M192_REG_COMPANY_ID 0x3e #define SMSC47M192_REG_VERSION 0x3f /* generalised scaling with integer rounding */ static inline int SCALE(long val, int mul, int div) { if (val < 0) return (val * mul - div / 2) / div; else return (val * mul + div / 2) / div; } /* Conversions */ /* smsc47m192 internally scales voltage measurements */ static const u16 nom_mv[] = { 2500, 2250, 3300, 5000, 12000, 3300, 1500, 1800 }; static inline unsigned int IN_FROM_REG(u8 reg, int n) { return SCALE(reg, nom_mv[n], 192); } static inline u8 IN_TO_REG(unsigned long val, int n) { return SENSORS_LIMIT(SCALE(val, 192, nom_mv[n]), 0, 255); } /* * TEMP: 0.001 degC units (-128C to +127C) * REG: 1C/bit, two's complement */ static inline s8 TEMP_TO_REG(int val) { return SENSORS_LIMIT(SCALE(val, 1, 1000), -128000, 127000); } static inline int TEMP_FROM_REG(s8 val) { return val * 1000; } struct smsc47m192_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u8 in[8]; /* Register value */ u8 in_max[8]; /* Register value */ u8 in_min[8]; /* Register value */ s8 temp[3]; /* Register value */ s8 temp_max[3]; /* Register value */ s8 temp_min[3]; /* Register value */ s8 temp_offset[3]; /* Register value */ u16 alarms; /* Register encoding, combined */ u8 vid; /* Register encoding, combined */ u8 vrm; }; static int smsc47m192_probe(struct i2c_client *client, const struct i2c_device_id *id); static int smsc47m192_detect(struct i2c_client *client, struct i2c_board_info *info); static int smsc47m192_remove(struct i2c_client *client); static struct smsc47m192_data *smsc47m192_update_device(struct device *dev); static const struct i2c_device_id smsc47m192_id[] = { { "smsc47m192", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, smsc47m192_id); static struct i2c_driver smsc47m192_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "smsc47m192", }, .probe = smsc47m192_probe, .remove = smsc47m192_remove, .id_table = smsc47m192_id, .detect = smsc47m192_detect, .address_list = normal_i2c, }; /* Voltages */ static ssize_t show_in(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", IN_FROM_REG(data->in[nr], nr)); } static ssize_t show_in_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[nr], nr)); } static ssize_t show_in_max(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[nr], nr)); } static ssize_t set_in_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_min[nr] = IN_TO_REG(val, nr); i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MIN(nr), data->in_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_in_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_max[nr] = IN_TO_REG(val, nr); i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MAX(nr), data->in_max[nr]); mutex_unlock(&data->update_lock); return count; } #define show_in_offset(offset) \ static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \ show_in, NULL, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \ show_in_min, set_in_min, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \ show_in_max, set_in_max, offset); show_in_offset(0) show_in_offset(1) show_in_offset(2) show_in_offset(3) show_in_offset(4) show_in_offset(5) show_in_offset(6) show_in_offset(7) /* Temperatures */ static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr])); } static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr])); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr])); } static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_min[nr] = TEMP_TO_REG(val); i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MIN[nr], data->temp_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_max[nr] = TEMP_TO_REG(val); i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MAX[nr], data->temp_max[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_offset(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_offset[nr])); } static ssize_t set_temp_offset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); u8 sfr = i2c_smbus_read_byte_data(client, SMSC47M192_REG_SFR); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_offset[nr] = TEMP_TO_REG(val); if (nr > 1) i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_OFFSET(nr), data->temp_offset[nr]); else if (data->temp_offset[nr] != 0) { /* * offset[0] and offset[1] share the same register, * SFR bit 4 activates offset[0] */ i2c_smbus_write_byte_data(client, SMSC47M192_REG_SFR, (sfr & 0xef) | (nr == 0 ? 0x10 : 0)); data->temp_offset[1-nr] = 0; i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_OFFSET(nr), data->temp_offset[nr]); } else if ((sfr & 0x10) == (nr == 0 ? 0x10 : 0)) i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_OFFSET(nr), 0); mutex_unlock(&data->update_lock); return count; } #define show_temp_index(index) \ static SENSOR_DEVICE_ATTR(temp##index##_input, S_IRUGO, \ show_temp, NULL, index-1); \ static SENSOR_DEVICE_ATTR(temp##index##_min, S_IRUGO | S_IWUSR, \ show_temp_min, set_temp_min, index-1); \ static SENSOR_DEVICE_ATTR(temp##index##_max, S_IRUGO | S_IWUSR, \ show_temp_max, set_temp_max, index-1); \ static SENSOR_DEVICE_ATTR(temp##index##_offset, S_IRUGO | S_IWUSR, \ show_temp_offset, set_temp_offset, index-1); show_temp_index(1) show_temp_index(2) show_temp_index(3) /* VID */ static ssize_t show_vid(struct device *dev, struct device_attribute *attr, char *buf) { struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm)); } static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL); static ssize_t show_vrm(struct device *dev, struct device_attribute *attr, char *buf) { struct smsc47m192_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", data->vrm); } static ssize_t set_vrm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct smsc47m192_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; data->vrm = val; return count; } static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm); /* Alarms */ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%u\n", (data->alarms & nr) ? 1 : 0); } static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 0x0010); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 0x0020); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 0x0040); static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 0x4000); static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 0x8000); static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0x0001); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 0x0002); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 0x0004); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 0x0008); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 0x0100); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 0x0200); static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 0x0400); static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 0x0800); static struct attribute *smsc47m192_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in5_min.dev_attr.attr, &sensor_dev_attr_in5_max.dev_attr.attr, &sensor_dev_attr_in5_alarm.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in6_min.dev_attr.attr, &sensor_dev_attr_in6_max.dev_attr.attr, &sensor_dev_attr_in6_alarm.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &sensor_dev_attr_in7_min.dev_attr.attr, &sensor_dev_attr_in7_max.dev_attr.attr, &sensor_dev_attr_in7_alarm.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_offset.dev_attr.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp2_offset.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, &sensor_dev_attr_temp2_fault.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp3_offset.dev_attr.attr, &sensor_dev_attr_temp3_alarm.dev_attr.attr, &sensor_dev_attr_temp3_fault.dev_attr.attr, &dev_attr_cpu0_vid.attr, &dev_attr_vrm.attr, NULL }; static const struct attribute_group smsc47m192_group = { .attrs = smsc47m192_attributes, }; static struct attribute *smsc47m192_attributes_in4[] = { &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, NULL }; static const struct attribute_group smsc47m192_group_in4 = { .attrs = smsc47m192_attributes_in4, }; static void smsc47m192_init_client(struct i2c_client *client) { int i; u8 config = i2c_smbus_read_byte_data(client, SMSC47M192_REG_CONFIG); u8 sfr = i2c_smbus_read_byte_data(client, SMSC47M192_REG_SFR); /* select cycle mode (pause 1 sec between updates) */ i2c_smbus_write_byte_data(client, SMSC47M192_REG_SFR, (sfr & 0xfd) | 0x02); if (!(config & 0x01)) { /* initialize alarm limits */ for (i = 0; i < 8; i++) { i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MIN(i), 0); i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MAX(i), 0xff); } for (i = 0; i < 3; i++) { i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MIN[i], 0x80); i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MAX[i], 0x7f); } /* start monitoring */ i2c_smbus_write_byte_data(client, SMSC47M192_REG_CONFIG, (config & 0xf7) | 0x01); } } /* Return 0 if detection is successful, -ENODEV otherwise */ static int smsc47m192_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int version; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; /* Detection criteria from sensors_detect script */ version = i2c_smbus_read_byte_data(client, SMSC47M192_REG_VERSION); if (i2c_smbus_read_byte_data(client, SMSC47M192_REG_COMPANY_ID) == 0x55 && (version & 0xf0) == 0x20 && (i2c_smbus_read_byte_data(client, SMSC47M192_REG_VID) & 0x70) == 0x00 && (i2c_smbus_read_byte_data(client, SMSC47M192_REG_VID4) & 0xfe) == 0x80) { dev_info(&adapter->dev, "found SMSC47M192 or compatible, " "version 2, stepping A%d\n", version & 0x0f); } else { dev_dbg(&adapter->dev, "SMSC47M192 detection failed at 0x%02x\n", client->addr); return -ENODEV; } strlcpy(info->type, "smsc47m192", I2C_NAME_SIZE); return 0; } static int smsc47m192_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct smsc47m192_data *data; int config; int err; data = kzalloc(sizeof(struct smsc47m192_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); data->vrm = vid_which_vrm(); mutex_init(&data->update_lock); /* Initialize the SMSC47M192 chip */ smsc47m192_init_client(client); /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &smsc47m192_group); if (err) goto exit_free; /* Pin 110 is either in4 (+12V) or VID4 */ config = i2c_smbus_read_byte_data(client, SMSC47M192_REG_CONFIG); if (!(config & 0x20)) { err = sysfs_create_group(&client->dev.kobj, &smsc47m192_group_in4); if (err) goto exit_remove_files; } data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_files; } return 0; exit_remove_files: sysfs_remove_group(&client->dev.kobj, &smsc47m192_group); sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4); exit_free: kfree(data); exit: return err; } static int smsc47m192_remove(struct i2c_client *client) { struct smsc47m192_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &smsc47m192_group); sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4); kfree(data); return 0; } static struct smsc47m192_data *smsc47m192_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); int i, config; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { u8 sfr = i2c_smbus_read_byte_data(client, SMSC47M192_REG_SFR); dev_dbg(&client->dev, "Starting smsc47m192 update\n"); for (i = 0; i <= 7; i++) { data->in[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_IN(i)); data->in_min[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_IN_MIN(i)); data->in_max[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_IN_MAX(i)); } for (i = 0; i < 3; i++) { data->temp[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_TEMP[i]); data->temp_max[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_TEMP_MAX[i]); data->temp_min[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_TEMP_MIN[i]); } for (i = 1; i < 3; i++) data->temp_offset[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_TEMP_OFFSET(i)); /* * first offset is temp_offset[0] if SFR bit 4 is set, * temp_offset[1] otherwise */ if (sfr & 0x10) { data->temp_offset[0] = data->temp_offset[1]; data->temp_offset[1] = 0; } else data->temp_offset[0] = 0; data->vid = i2c_smbus_read_byte_data(client, SMSC47M192_REG_VID) & 0x0f; config = i2c_smbus_read_byte_data(client, SMSC47M192_REG_CONFIG); if (config & 0x20) data->vid |= (i2c_smbus_read_byte_data(client, SMSC47M192_REG_VID4) & 0x01) << 4; data->alarms = i2c_smbus_read_byte_data(client, SMSC47M192_REG_ALARM1) | (i2c_smbus_read_byte_data(client, SMSC47M192_REG_ALARM2) << 8); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } module_i2c_driver(smsc47m192_driver); MODULE_AUTHOR("Hartmut Rick <linux@rick.claranet.de>"); MODULE_DESCRIPTION("SMSC47M192 driver"); MODULE_LICENSE("GPL");
gpl-2.0
Pantech-Discover/android_kernel_pantech_magnus
drivers/hwmon/smsc47m192.c
4857
21295
/* * smsc47m192.c - Support for hardware monitoring block of * SMSC LPC47M192 and compatible Super I/O chips * * Copyright (C) 2006 Hartmut Rick <linux@rick.claranet.de> * * Derived from lm78.c and other chip drivers. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/sysfs.h> #include <linux/mutex.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; /* SMSC47M192 registers */ #define SMSC47M192_REG_IN(nr) ((nr) < 6 ? (0x20 + (nr)) : \ (0x50 + (nr) - 6)) #define SMSC47M192_REG_IN_MAX(nr) ((nr) < 6 ? (0x2b + (nr) * 2) : \ (0x54 + (((nr) - 6) * 2))) #define SMSC47M192_REG_IN_MIN(nr) ((nr) < 6 ? (0x2c + (nr) * 2) : \ (0x55 + (((nr) - 6) * 2))) static u8 SMSC47M192_REG_TEMP[3] = { 0x27, 0x26, 0x52 }; static u8 SMSC47M192_REG_TEMP_MAX[3] = { 0x39, 0x37, 0x58 }; static u8 SMSC47M192_REG_TEMP_MIN[3] = { 0x3A, 0x38, 0x59 }; #define SMSC47M192_REG_TEMP_OFFSET(nr) ((nr) == 2 ? 0x1e : 0x1f) #define SMSC47M192_REG_ALARM1 0x41 #define SMSC47M192_REG_ALARM2 0x42 #define SMSC47M192_REG_VID 0x47 #define SMSC47M192_REG_VID4 0x49 #define SMSC47M192_REG_CONFIG 0x40 #define SMSC47M192_REG_SFR 0x4f #define SMSC47M192_REG_COMPANY_ID 0x3e #define SMSC47M192_REG_VERSION 0x3f /* generalised scaling with integer rounding */ static inline int SCALE(long val, int mul, int div) { if (val < 0) return (val * mul - div / 2) / div; else return (val * mul + div / 2) / div; } /* Conversions */ /* smsc47m192 internally scales voltage measurements */ static const u16 nom_mv[] = { 2500, 2250, 3300, 5000, 12000, 3300, 1500, 1800 }; static inline unsigned int IN_FROM_REG(u8 reg, int n) { return SCALE(reg, nom_mv[n], 192); } static inline u8 IN_TO_REG(unsigned long val, int n) { return SENSORS_LIMIT(SCALE(val, 192, nom_mv[n]), 0, 255); } /* * TEMP: 0.001 degC units (-128C to +127C) * REG: 1C/bit, two's complement */ static inline s8 TEMP_TO_REG(int val) { return SENSORS_LIMIT(SCALE(val, 1, 1000), -128000, 127000); } static inline int TEMP_FROM_REG(s8 val) { return val * 1000; } struct smsc47m192_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u8 in[8]; /* Register value */ u8 in_max[8]; /* Register value */ u8 in_min[8]; /* Register value */ s8 temp[3]; /* Register value */ s8 temp_max[3]; /* Register value */ s8 temp_min[3]; /* Register value */ s8 temp_offset[3]; /* Register value */ u16 alarms; /* Register encoding, combined */ u8 vid; /* Register encoding, combined */ u8 vrm; }; static int smsc47m192_probe(struct i2c_client *client, const struct i2c_device_id *id); static int smsc47m192_detect(struct i2c_client *client, struct i2c_board_info *info); static int smsc47m192_remove(struct i2c_client *client); static struct smsc47m192_data *smsc47m192_update_device(struct device *dev); static const struct i2c_device_id smsc47m192_id[] = { { "smsc47m192", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, smsc47m192_id); static struct i2c_driver smsc47m192_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "smsc47m192", }, .probe = smsc47m192_probe, .remove = smsc47m192_remove, .id_table = smsc47m192_id, .detect = smsc47m192_detect, .address_list = normal_i2c, }; /* Voltages */ static ssize_t show_in(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", IN_FROM_REG(data->in[nr], nr)); } static ssize_t show_in_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[nr], nr)); } static ssize_t show_in_max(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[nr], nr)); } static ssize_t set_in_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_min[nr] = IN_TO_REG(val, nr); i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MIN(nr), data->in_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_in_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_max[nr] = IN_TO_REG(val, nr); i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MAX(nr), data->in_max[nr]); mutex_unlock(&data->update_lock); return count; } #define show_in_offset(offset) \ static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \ show_in, NULL, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \ show_in_min, set_in_min, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \ show_in_max, set_in_max, offset); show_in_offset(0) show_in_offset(1) show_in_offset(2) show_in_offset(3) show_in_offset(4) show_in_offset(5) show_in_offset(6) show_in_offset(7) /* Temperatures */ static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr])); } static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr])); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr])); } static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_min[nr] = TEMP_TO_REG(val); i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MIN[nr], data->temp_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_max[nr] = TEMP_TO_REG(val); i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MAX[nr], data->temp_max[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_offset(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_offset[nr])); } static ssize_t set_temp_offset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); u8 sfr = i2c_smbus_read_byte_data(client, SMSC47M192_REG_SFR); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_offset[nr] = TEMP_TO_REG(val); if (nr > 1) i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_OFFSET(nr), data->temp_offset[nr]); else if (data->temp_offset[nr] != 0) { /* * offset[0] and offset[1] share the same register, * SFR bit 4 activates offset[0] */ i2c_smbus_write_byte_data(client, SMSC47M192_REG_SFR, (sfr & 0xef) | (nr == 0 ? 0x10 : 0)); data->temp_offset[1-nr] = 0; i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_OFFSET(nr), data->temp_offset[nr]); } else if ((sfr & 0x10) == (nr == 0 ? 0x10 : 0)) i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_OFFSET(nr), 0); mutex_unlock(&data->update_lock); return count; } #define show_temp_index(index) \ static SENSOR_DEVICE_ATTR(temp##index##_input, S_IRUGO, \ show_temp, NULL, index-1); \ static SENSOR_DEVICE_ATTR(temp##index##_min, S_IRUGO | S_IWUSR, \ show_temp_min, set_temp_min, index-1); \ static SENSOR_DEVICE_ATTR(temp##index##_max, S_IRUGO | S_IWUSR, \ show_temp_max, set_temp_max, index-1); \ static SENSOR_DEVICE_ATTR(temp##index##_offset, S_IRUGO | S_IWUSR, \ show_temp_offset, set_temp_offset, index-1); show_temp_index(1) show_temp_index(2) show_temp_index(3) /* VID */ static ssize_t show_vid(struct device *dev, struct device_attribute *attr, char *buf) { struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm)); } static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL); static ssize_t show_vrm(struct device *dev, struct device_attribute *attr, char *buf) { struct smsc47m192_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", data->vrm); } static ssize_t set_vrm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct smsc47m192_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; data->vrm = val; return count; } static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm); /* Alarms */ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%u\n", (data->alarms & nr) ? 1 : 0); } static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 0x0010); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 0x0020); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 0x0040); static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 0x4000); static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 0x8000); static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0x0001); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 0x0002); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 0x0004); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 0x0008); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 0x0100); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 0x0200); static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 0x0400); static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 0x0800); static struct attribute *smsc47m192_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in5_min.dev_attr.attr, &sensor_dev_attr_in5_max.dev_attr.attr, &sensor_dev_attr_in5_alarm.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in6_min.dev_attr.attr, &sensor_dev_attr_in6_max.dev_attr.attr, &sensor_dev_attr_in6_alarm.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &sensor_dev_attr_in7_min.dev_attr.attr, &sensor_dev_attr_in7_max.dev_attr.attr, &sensor_dev_attr_in7_alarm.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_offset.dev_attr.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp2_offset.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, &sensor_dev_attr_temp2_fault.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp3_offset.dev_attr.attr, &sensor_dev_attr_temp3_alarm.dev_attr.attr, &sensor_dev_attr_temp3_fault.dev_attr.attr, &dev_attr_cpu0_vid.attr, &dev_attr_vrm.attr, NULL }; static const struct attribute_group smsc47m192_group = { .attrs = smsc47m192_attributes, }; static struct attribute *smsc47m192_attributes_in4[] = { &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, NULL }; static const struct attribute_group smsc47m192_group_in4 = { .attrs = smsc47m192_attributes_in4, }; static void smsc47m192_init_client(struct i2c_client *client) { int i; u8 config = i2c_smbus_read_byte_data(client, SMSC47M192_REG_CONFIG); u8 sfr = i2c_smbus_read_byte_data(client, SMSC47M192_REG_SFR); /* select cycle mode (pause 1 sec between updates) */ i2c_smbus_write_byte_data(client, SMSC47M192_REG_SFR, (sfr & 0xfd) | 0x02); if (!(config & 0x01)) { /* initialize alarm limits */ for (i = 0; i < 8; i++) { i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MIN(i), 0); i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MAX(i), 0xff); } for (i = 0; i < 3; i++) { i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MIN[i], 0x80); i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MAX[i], 0x7f); } /* start monitoring */ i2c_smbus_write_byte_data(client, SMSC47M192_REG_CONFIG, (config & 0xf7) | 0x01); } } /* Return 0 if detection is successful, -ENODEV otherwise */ static int smsc47m192_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int version; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; /* Detection criteria from sensors_detect script */ version = i2c_smbus_read_byte_data(client, SMSC47M192_REG_VERSION); if (i2c_smbus_read_byte_data(client, SMSC47M192_REG_COMPANY_ID) == 0x55 && (version & 0xf0) == 0x20 && (i2c_smbus_read_byte_data(client, SMSC47M192_REG_VID) & 0x70) == 0x00 && (i2c_smbus_read_byte_data(client, SMSC47M192_REG_VID4) & 0xfe) == 0x80) { dev_info(&adapter->dev, "found SMSC47M192 or compatible, " "version 2, stepping A%d\n", version & 0x0f); } else { dev_dbg(&adapter->dev, "SMSC47M192 detection failed at 0x%02x\n", client->addr); return -ENODEV; } strlcpy(info->type, "smsc47m192", I2C_NAME_SIZE); return 0; } static int smsc47m192_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct smsc47m192_data *data; int config; int err; data = kzalloc(sizeof(struct smsc47m192_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); data->vrm = vid_which_vrm(); mutex_init(&data->update_lock); /* Initialize the SMSC47M192 chip */ smsc47m192_init_client(client); /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &smsc47m192_group); if (err) goto exit_free; /* Pin 110 is either in4 (+12V) or VID4 */ config = i2c_smbus_read_byte_data(client, SMSC47M192_REG_CONFIG); if (!(config & 0x20)) { err = sysfs_create_group(&client->dev.kobj, &smsc47m192_group_in4); if (err) goto exit_remove_files; } data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_files; } return 0; exit_remove_files: sysfs_remove_group(&client->dev.kobj, &smsc47m192_group); sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4); exit_free: kfree(data); exit: return err; } static int smsc47m192_remove(struct i2c_client *client) { struct smsc47m192_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &smsc47m192_group); sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4); kfree(data); return 0; } static struct smsc47m192_data *smsc47m192_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); int i, config; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { u8 sfr = i2c_smbus_read_byte_data(client, SMSC47M192_REG_SFR); dev_dbg(&client->dev, "Starting smsc47m192 update\n"); for (i = 0; i <= 7; i++) { data->in[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_IN(i)); data->in_min[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_IN_MIN(i)); data->in_max[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_IN_MAX(i)); } for (i = 0; i < 3; i++) { data->temp[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_TEMP[i]); data->temp_max[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_TEMP_MAX[i]); data->temp_min[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_TEMP_MIN[i]); } for (i = 1; i < 3; i++) data->temp_offset[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_TEMP_OFFSET(i)); /* * first offset is temp_offset[0] if SFR bit 4 is set, * temp_offset[1] otherwise */ if (sfr & 0x10) { data->temp_offset[0] = data->temp_offset[1]; data->temp_offset[1] = 0; } else data->temp_offset[0] = 0; data->vid = i2c_smbus_read_byte_data(client, SMSC47M192_REG_VID) & 0x0f; config = i2c_smbus_read_byte_data(client, SMSC47M192_REG_CONFIG); if (config & 0x20) data->vid |= (i2c_smbus_read_byte_data(client, SMSC47M192_REG_VID4) & 0x01) << 4; data->alarms = i2c_smbus_read_byte_data(client, SMSC47M192_REG_ALARM1) | (i2c_smbus_read_byte_data(client, SMSC47M192_REG_ALARM2) << 8); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } module_i2c_driver(smsc47m192_driver); MODULE_AUTHOR("Hartmut Rick <linux@rick.claranet.de>"); MODULE_DESCRIPTION("SMSC47M192 driver"); MODULE_LICENSE("GPL");
gpl-2.0
vincentmli/linux-stable
drivers/hwmon/pc87427.c
4857
41293
/* * pc87427.c - hardware monitoring driver for the * National Semiconductor PC87427 Super-I/O chip * Copyright (C) 2006, 2008, 2010 Jean Delvare <khali@linux-fr.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Supports the following chips: * * Chip #vin #fan #pwm #temp devid * PC87427 - 8 4 6 0xF2 * * This driver assumes that no more than one chip is present. * Only fans are fully supported so far. Temperatures are in read-only * mode, and voltages aren't supported at all. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/platform_device.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/sysfs.h> #include <linux/ioport.h> #include <linux/acpi.h> #include <linux/io.h> static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); static struct platform_device *pdev; #define DRVNAME "pc87427" /* * The lock mutex protects both the I/O accesses (needed because the * device is using banked registers) and the register cache (needed to keep * the data in the registers and the cache in sync at any time). */ struct pc87427_data { struct device *hwmon_dev; struct mutex lock; int address[2]; const char *name; unsigned long last_updated; /* in jiffies */ u8 fan_enabled; /* bit vector */ u16 fan[8]; /* register values */ u16 fan_min[8]; /* register values */ u8 fan_status[8]; /* register values */ u8 pwm_enabled; /* bit vector */ u8 pwm_auto_ok; /* bit vector */ u8 pwm_enable[4]; /* register values */ u8 pwm[4]; /* register values */ u8 temp_enabled; /* bit vector */ s16 temp[6]; /* register values */ s8 temp_min[6]; /* register values */ s8 temp_max[6]; /* register values */ s8 temp_crit[6]; /* register values */ u8 temp_status[6]; /* register values */ u8 temp_type[6]; /* register values */ }; struct pc87427_sio_data { unsigned short address[2]; u8 has_fanin; u8 has_fanout; }; /* * Super-I/O registers and operations */ #define SIOREG_LDSEL 0x07 /* Logical device select */ #define SIOREG_DEVID 0x20 /* Device ID */ #define SIOREG_CF2 0x22 /* Configuration 2 */ #define SIOREG_CF3 0x23 /* Configuration 3 */ #define SIOREG_CF4 0x24 /* Configuration 4 */ #define SIOREG_CF5 0x25 /* Configuration 5 */ #define SIOREG_CFB 0x2B /* Configuration B */ #define SIOREG_CFC 0x2C /* Configuration C */ #define SIOREG_CFD 0x2D /* Configuration D */ #define SIOREG_ACT 0x30 /* Device activation */ #define SIOREG_MAP 0x50 /* I/O or memory mapping */ #define SIOREG_IOBASE 0x60 /* I/O base address */ static const u8 logdev[2] = { 0x09, 0x14 }; static const char *logdev_str[2] = { DRVNAME " FMC", DRVNAME " HMC" }; #define LD_FAN 0 #define LD_IN 1 #define LD_TEMP 1 static inline void superio_outb(int sioaddr, int reg, int val) { outb(reg, sioaddr); outb(val, sioaddr + 1); } static inline int superio_inb(int sioaddr, int reg) { outb(reg, sioaddr); return inb(sioaddr + 1); } static inline void superio_exit(int sioaddr) { outb(0x02, sioaddr); outb(0x02, sioaddr + 1); } /* * Logical devices */ #define REGION_LENGTH 32 #define PC87427_REG_BANK 0x0f #define BANK_FM(nr) (nr) #define BANK_FT(nr) (0x08 + (nr)) #define BANK_FC(nr) (0x10 + (nr) * 2) #define BANK_TM(nr) (nr) #define BANK_VM(nr) (0x08 + (nr)) /* * I/O access functions */ /* ldi is the logical device index */ static inline int pc87427_read8(struct pc87427_data *data, u8 ldi, u8 reg) { return inb(data->address[ldi] + reg); } /* Must be called with data->lock held, except during init */ static inline int pc87427_read8_bank(struct pc87427_data *data, u8 ldi, u8 bank, u8 reg) { outb(bank, data->address[ldi] + PC87427_REG_BANK); return inb(data->address[ldi] + reg); } /* Must be called with data->lock held, except during init */ static inline void pc87427_write8_bank(struct pc87427_data *data, u8 ldi, u8 bank, u8 reg, u8 value) { outb(bank, data->address[ldi] + PC87427_REG_BANK); outb(value, data->address[ldi] + reg); } /* * Fan registers and conversions */ /* fan data registers are 16-bit wide */ #define PC87427_REG_FAN 0x12 #define PC87427_REG_FAN_MIN 0x14 #define PC87427_REG_FAN_STATUS 0x10 #define FAN_STATUS_STALL (1 << 3) #define FAN_STATUS_LOSPD (1 << 1) #define FAN_STATUS_MONEN (1 << 0) /* * Dedicated function to read all registers related to a given fan input. * This saves us quite a few locks and bank selections. * Must be called with data->lock held. * nr is from 0 to 7 */ static void pc87427_readall_fan(struct pc87427_data *data, u8 nr) { int iobase = data->address[LD_FAN]; outb(BANK_FM(nr), iobase + PC87427_REG_BANK); data->fan[nr] = inw(iobase + PC87427_REG_FAN); data->fan_min[nr] = inw(iobase + PC87427_REG_FAN_MIN); data->fan_status[nr] = inb(iobase + PC87427_REG_FAN_STATUS); /* Clear fan alarm bits */ outb(data->fan_status[nr], iobase + PC87427_REG_FAN_STATUS); } /* * The 2 LSB of fan speed registers are used for something different. * The actual 2 LSB of the measurements are not available. */ static inline unsigned long fan_from_reg(u16 reg) { reg &= 0xfffc; if (reg == 0x0000 || reg == 0xfffc) return 0; return 5400000UL / reg; } /* The 2 LSB of the fan speed limit registers are not significant. */ static inline u16 fan_to_reg(unsigned long val) { if (val < 83UL) return 0xffff; if (val >= 1350000UL) return 0x0004; return ((1350000UL + val / 2) / val) << 2; } /* * PWM registers and conversions */ #define PC87427_REG_PWM_ENABLE 0x10 #define PC87427_REG_PWM_DUTY 0x12 #define PWM_ENABLE_MODE_MASK (7 << 4) #define PWM_ENABLE_CTLEN (1 << 0) #define PWM_MODE_MANUAL (0 << 4) #define PWM_MODE_AUTO (1 << 4) #define PWM_MODE_OFF (2 << 4) #define PWM_MODE_ON (7 << 4) /* * Dedicated function to read all registers related to a given PWM output. * This saves us quite a few locks and bank selections. * Must be called with data->lock held. * nr is from 0 to 3 */ static void pc87427_readall_pwm(struct pc87427_data *data, u8 nr) { int iobase = data->address[LD_FAN]; outb(BANK_FC(nr), iobase + PC87427_REG_BANK); data->pwm_enable[nr] = inb(iobase + PC87427_REG_PWM_ENABLE); data->pwm[nr] = inb(iobase + PC87427_REG_PWM_DUTY); } static inline int pwm_enable_from_reg(u8 reg) { switch (reg & PWM_ENABLE_MODE_MASK) { case PWM_MODE_ON: return 0; case PWM_MODE_MANUAL: case PWM_MODE_OFF: return 1; case PWM_MODE_AUTO: return 2; default: return -EPROTO; } } static inline u8 pwm_enable_to_reg(unsigned long val, u8 pwmval) { switch (val) { default: return PWM_MODE_ON; case 1: return pwmval ? PWM_MODE_MANUAL : PWM_MODE_OFF; case 2: return PWM_MODE_AUTO; } } /* * Temperature registers and conversions */ #define PC87427_REG_TEMP_STATUS 0x10 #define PC87427_REG_TEMP 0x14 #define PC87427_REG_TEMP_MAX 0x18 #define PC87427_REG_TEMP_MIN 0x19 #define PC87427_REG_TEMP_CRIT 0x1a #define PC87427_REG_TEMP_TYPE 0x1d #define TEMP_STATUS_CHANEN (1 << 0) #define TEMP_STATUS_LOWFLG (1 << 1) #define TEMP_STATUS_HIGHFLG (1 << 2) #define TEMP_STATUS_CRITFLG (1 << 3) #define TEMP_STATUS_SENSERR (1 << 5) #define TEMP_TYPE_MASK (3 << 5) #define TEMP_TYPE_THERMISTOR (1 << 5) #define TEMP_TYPE_REMOTE_DIODE (2 << 5) #define TEMP_TYPE_LOCAL_DIODE (3 << 5) /* * Dedicated function to read all registers related to a given temperature * input. This saves us quite a few locks and bank selections. * Must be called with data->lock held. * nr is from 0 to 5 */ static void pc87427_readall_temp(struct pc87427_data *data, u8 nr) { int iobase = data->address[LD_TEMP]; outb(BANK_TM(nr), iobase + PC87427_REG_BANK); data->temp[nr] = le16_to_cpu(inw(iobase + PC87427_REG_TEMP)); data->temp_max[nr] = inb(iobase + PC87427_REG_TEMP_MAX); data->temp_min[nr] = inb(iobase + PC87427_REG_TEMP_MIN); data->temp_crit[nr] = inb(iobase + PC87427_REG_TEMP_CRIT); data->temp_type[nr] = inb(iobase + PC87427_REG_TEMP_TYPE); data->temp_status[nr] = inb(iobase + PC87427_REG_TEMP_STATUS); /* Clear fan alarm bits */ outb(data->temp_status[nr], iobase + PC87427_REG_TEMP_STATUS); } static inline unsigned int temp_type_from_reg(u8 reg) { switch (reg & TEMP_TYPE_MASK) { case TEMP_TYPE_THERMISTOR: return 4; case TEMP_TYPE_REMOTE_DIODE: case TEMP_TYPE_LOCAL_DIODE: return 3; default: return 0; } } /* * We assume 8-bit thermal sensors; 9-bit thermal sensors are possible * too, but I have no idea how to figure out when they are used. */ static inline long temp_from_reg(s16 reg) { return reg * 1000 / 256; } static inline long temp_from_reg8(s8 reg) { return reg * 1000; } /* * Data interface */ static struct pc87427_data *pc87427_update_device(struct device *dev) { struct pc87427_data *data = dev_get_drvdata(dev); int i; mutex_lock(&data->lock); if (!time_after(jiffies, data->last_updated + HZ) && data->last_updated) goto done; /* Fans */ for (i = 0; i < 8; i++) { if (!(data->fan_enabled & (1 << i))) continue; pc87427_readall_fan(data, i); } /* PWM outputs */ for (i = 0; i < 4; i++) { if (!(data->pwm_enabled & (1 << i))) continue; pc87427_readall_pwm(data, i); } /* Temperature channels */ for (i = 0; i < 6; i++) { if (!(data->temp_enabled & (1 << i))) continue; pc87427_readall_temp(data, i); } data->last_updated = jiffies; done: mutex_unlock(&data->lock); return data; } static ssize_t show_fan_input(struct device *dev, struct device_attribute *devattr, char *buf) { struct pc87427_data *data = pc87427_update_device(dev); int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%lu\n", fan_from_reg(data->fan[nr])); } static ssize_t show_fan_min(struct device *dev, struct device_attribute *devattr, char *buf) { struct pc87427_data *data = pc87427_update_device(dev); int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%lu\n", fan_from_reg(data->fan_min[nr])); } static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct pc87427_data *data = pc87427_update_device(dev); int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%d\n", !!(data->fan_status[nr] & FAN_STATUS_LOSPD)); } static ssize_t show_fan_fault(struct device *dev, struct device_attribute *devattr, char *buf) { struct pc87427_data *data = pc87427_update_device(dev); int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%d\n", !!(data->fan_status[nr] & FAN_STATUS_STALL)); } static ssize_t set_fan_min(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct pc87427_data *data = dev_get_drvdata(dev); int nr = to_sensor_dev_attr(devattr)->index; unsigned long val; int iobase = data->address[LD_FAN]; if (kstrtoul(buf, 10, &val) < 0) return -EINVAL; mutex_lock(&data->lock); outb(BANK_FM(nr), iobase + PC87427_REG_BANK); /* * The low speed limit registers are read-only while monitoring * is enabled, so we have to disable monitoring, then change the * limit, and finally enable monitoring again. */ outb(0, iobase + PC87427_REG_FAN_STATUS); data->fan_min[nr] = fan_to_reg(val); outw(data->fan_min[nr], iobase + PC87427_REG_FAN_MIN); outb(FAN_STATUS_MONEN, iobase + PC87427_REG_FAN_STATUS); mutex_unlock(&data->lock); return count; } static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0); static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1); static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan_input, NULL, 2); static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan_input, NULL, 3); static SENSOR_DEVICE_ATTR(fan5_input, S_IRUGO, show_fan_input, NULL, 4); static SENSOR_DEVICE_ATTR(fan6_input, S_IRUGO, show_fan_input, NULL, 5); static SENSOR_DEVICE_ATTR(fan7_input, S_IRUGO, show_fan_input, NULL, 6); static SENSOR_DEVICE_ATTR(fan8_input, S_IRUGO, show_fan_input, NULL, 7); static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 0); static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 1); static SENSOR_DEVICE_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 2); static SENSOR_DEVICE_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 3); static SENSOR_DEVICE_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 4); static SENSOR_DEVICE_ATTR(fan6_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 5); static SENSOR_DEVICE_ATTR(fan7_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 6); static SENSOR_DEVICE_ATTR(fan8_min, S_IWUSR | S_IRUGO, show_fan_min, set_fan_min, 7); static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_fan_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_fan_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_fan_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_fan_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO, show_fan_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_fan_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(fan7_alarm, S_IRUGO, show_fan_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(fan8_alarm, S_IRUGO, show_fan_alarm, NULL, 7); static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, show_fan_fault, NULL, 0); static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, show_fan_fault, NULL, 1); static SENSOR_DEVICE_ATTR(fan3_fault, S_IRUGO, show_fan_fault, NULL, 2); static SENSOR_DEVICE_ATTR(fan4_fault, S_IRUGO, show_fan_fault, NULL, 3); static SENSOR_DEVICE_ATTR(fan5_fault, S_IRUGO, show_fan_fault, NULL, 4); static SENSOR_DEVICE_ATTR(fan6_fault, S_IRUGO, show_fan_fault, NULL, 5); static SENSOR_DEVICE_ATTR(fan7_fault, S_IRUGO, show_fan_fault, NULL, 6); static SENSOR_DEVICE_ATTR(fan8_fault, S_IRUGO, show_fan_fault, NULL, 7); static struct attribute *pc87427_attributes_fan[8][5] = { { &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan1_alarm.dev_attr.attr, &sensor_dev_attr_fan1_fault.dev_attr.attr, NULL }, { &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan2_alarm.dev_attr.attr, &sensor_dev_attr_fan2_fault.dev_attr.attr, NULL }, { &sensor_dev_attr_fan3_input.dev_attr.attr, &sensor_dev_attr_fan3_min.dev_attr.attr, &sensor_dev_attr_fan3_alarm.dev_attr.attr, &sensor_dev_attr_fan3_fault.dev_attr.attr, NULL }, { &sensor_dev_attr_fan4_input.dev_attr.attr, &sensor_dev_attr_fan4_min.dev_attr.attr, &sensor_dev_attr_fan4_alarm.dev_attr.attr, &sensor_dev_attr_fan4_fault.dev_attr.attr, NULL }, { &sensor_dev_attr_fan5_input.dev_attr.attr, &sensor_dev_attr_fan5_min.dev_attr.attr, &sensor_dev_attr_fan5_alarm.dev_attr.attr, &sensor_dev_attr_fan5_fault.dev_attr.attr, NULL }, { &sensor_dev_attr_fan6_input.dev_attr.attr, &sensor_dev_attr_fan6_min.dev_attr.attr, &sensor_dev_attr_fan6_alarm.dev_attr.attr, &sensor_dev_attr_fan6_fault.dev_attr.attr, NULL }, { &sensor_dev_attr_fan7_input.dev_attr.attr, &sensor_dev_attr_fan7_min.dev_attr.attr, &sensor_dev_attr_fan7_alarm.dev_attr.attr, &sensor_dev_attr_fan7_fault.dev_attr.attr, NULL }, { &sensor_dev_attr_fan8_input.dev_attr.attr, &sensor_dev_attr_fan8_min.dev_attr.attr, &sensor_dev_attr_fan8_alarm.dev_attr.attr, &sensor_dev_attr_fan8_fault.dev_attr.attr, NULL } }; static const struct attribute_group pc87427_group_fan[8] = { { .attrs = pc87427_attributes_fan[0] }, { .attrs = pc87427_attributes_fan[1] }, { .attrs = pc87427_attributes_fan[2] }, { .attrs = pc87427_attributes_fan[3] }, { .attrs = pc87427_attributes_fan[4] }, { .attrs = pc87427_attributes_fan[5] }, { .attrs = pc87427_attributes_fan[6] }, { .attrs = pc87427_attributes_fan[7] }, }; /* * Must be called with data->lock held and pc87427_readall_pwm() freshly * called */ static void update_pwm_enable(struct pc87427_data *data, int nr, u8 mode) { int iobase = data->address[LD_FAN]; data->pwm_enable[nr] &= ~PWM_ENABLE_MODE_MASK; data->pwm_enable[nr] |= mode; outb(data->pwm_enable[nr], iobase + PC87427_REG_PWM_ENABLE); } static ssize_t show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf) { struct pc87427_data *data = pc87427_update_device(dev); int nr = to_sensor_dev_attr(devattr)->index; int pwm_enable; pwm_enable = pwm_enable_from_reg(data->pwm_enable[nr]); if (pwm_enable < 0) return pwm_enable; return sprintf(buf, "%d\n", pwm_enable); } static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct pc87427_data *data = dev_get_drvdata(dev); int nr = to_sensor_dev_attr(devattr)->index; unsigned long val; if (kstrtoul(buf, 10, &val) < 0 || val > 2) return -EINVAL; /* Can't go to automatic mode if it isn't configured */ if (val == 2 && !(data->pwm_auto_ok & (1 << nr))) return -EINVAL; mutex_lock(&data->lock); pc87427_readall_pwm(data, nr); update_pwm_enable(data, nr, pwm_enable_to_reg(val, data->pwm[nr])); mutex_unlock(&data->lock); return count; } static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { struct pc87427_data *data = pc87427_update_device(dev); int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%d\n", (int)data->pwm[nr]); } static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct pc87427_data *data = dev_get_drvdata(dev); int nr = to_sensor_dev_attr(devattr)->index; unsigned long val; int iobase = data->address[LD_FAN]; u8 mode; if (kstrtoul(buf, 10, &val) < 0 || val > 0xff) return -EINVAL; mutex_lock(&data->lock); pc87427_readall_pwm(data, nr); mode = data->pwm_enable[nr] & PWM_ENABLE_MODE_MASK; if (mode != PWM_MODE_MANUAL && mode != PWM_MODE_OFF) { dev_notice(dev, "Can't set PWM%d duty cycle while not in " "manual mode\n", nr + 1); mutex_unlock(&data->lock); return -EPERM; } /* We may have to change the mode */ if (mode == PWM_MODE_MANUAL && val == 0) { /* Transition from Manual to Off */ update_pwm_enable(data, nr, PWM_MODE_OFF); mode = PWM_MODE_OFF; dev_dbg(dev, "Switching PWM%d from %s to %s\n", nr + 1, "manual", "off"); } else if (mode == PWM_MODE_OFF && val != 0) { /* Transition from Off to Manual */ update_pwm_enable(data, nr, PWM_MODE_MANUAL); mode = PWM_MODE_MANUAL; dev_dbg(dev, "Switching PWM%d from %s to %s\n", nr + 1, "off", "manual"); } data->pwm[nr] = val; if (mode == PWM_MODE_MANUAL) outb(val, iobase + PC87427_REG_PWM_DUTY); mutex_unlock(&data->lock); return count; } static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable, set_pwm_enable, 0); static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable, set_pwm_enable, 1); static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_enable, set_pwm_enable, 2); static SENSOR_DEVICE_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, show_pwm_enable, set_pwm_enable, 3); static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0); static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1); static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 2); static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 3); static struct attribute *pc87427_attributes_pwm[4][3] = { { &sensor_dev_attr_pwm1_enable.dev_attr.attr, &sensor_dev_attr_pwm1.dev_attr.attr, NULL }, { &sensor_dev_attr_pwm2_enable.dev_attr.attr, &sensor_dev_attr_pwm2.dev_attr.attr, NULL }, { &sensor_dev_attr_pwm3_enable.dev_attr.attr, &sensor_dev_attr_pwm3.dev_attr.attr, NULL }, { &sensor_dev_attr_pwm4_enable.dev_attr.attr, &sensor_dev_attr_pwm4.dev_attr.attr, NULL } }; static const struct attribute_group pc87427_group_pwm[4] = { { .attrs = pc87427_attributes_pwm[0] }, { .attrs = pc87427_attributes_pwm[1] }, { .attrs = pc87427_attributes_pwm[2] }, { .attrs = pc87427_attributes_pwm[3] }, }; static ssize_t show_temp_input(struct device *dev, struct device_attribute *devattr, char *buf) { struct pc87427_data *data = pc87427_update_device(dev); int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%ld\n", temp_from_reg(data->temp[nr])); } static ssize_t show_temp_min(struct device *dev, struct device_attribute *devattr, char *buf) { struct pc87427_data *data = pc87427_update_device(dev); int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%ld\n", temp_from_reg8(data->temp_min[nr])); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct pc87427_data *data = pc87427_update_device(dev); int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%ld\n", temp_from_reg8(data->temp_max[nr])); } static ssize_t show_temp_crit(struct device *dev, struct device_attribute *devattr, char *buf) { struct pc87427_data *data = pc87427_update_device(dev); int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%ld\n", temp_from_reg8(data->temp_crit[nr])); } static ssize_t show_temp_type(struct device *dev, struct device_attribute *devattr, char *buf) { struct pc87427_data *data = pc87427_update_device(dev); int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%u\n", temp_type_from_reg(data->temp_type[nr])); } static ssize_t show_temp_min_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct pc87427_data *data = pc87427_update_device(dev); int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%d\n", !!(data->temp_status[nr] & TEMP_STATUS_LOWFLG)); } static ssize_t show_temp_max_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct pc87427_data *data = pc87427_update_device(dev); int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%d\n", !!(data->temp_status[nr] & TEMP_STATUS_HIGHFLG)); } static ssize_t show_temp_crit_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct pc87427_data *data = pc87427_update_device(dev); int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%d\n", !!(data->temp_status[nr] & TEMP_STATUS_CRITFLG)); } static ssize_t show_temp_fault(struct device *dev, struct device_attribute *devattr, char *buf) { struct pc87427_data *data = pc87427_update_device(dev); int nr = to_sensor_dev_attr(devattr)->index; return sprintf(buf, "%d\n", !!(data->temp_status[nr] & TEMP_STATUS_SENSERR)); } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp_input, NULL, 2); static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp_input, NULL, 3); static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp_input, NULL, 4); static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, show_temp_input, NULL, 5); static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO, show_temp_min, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO, show_temp_min, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO, show_temp_min, NULL, 2); static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO, show_temp_min, NULL, 3); static SENSOR_DEVICE_ATTR(temp5_min, S_IRUGO, show_temp_min, NULL, 4); static SENSOR_DEVICE_ATTR(temp6_min, S_IRUGO, show_temp_min, NULL, 5); static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO, show_temp_max, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO, show_temp_max, NULL, 2); static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO, show_temp_max, NULL, 3); static SENSOR_DEVICE_ATTR(temp5_max, S_IRUGO, show_temp_max, NULL, 4); static SENSOR_DEVICE_ATTR(temp6_max, S_IRUGO, show_temp_max, NULL, 5); static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp_crit, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, show_temp_crit, NULL, 2); static SENSOR_DEVICE_ATTR(temp4_crit, S_IRUGO, show_temp_crit, NULL, 3); static SENSOR_DEVICE_ATTR(temp5_crit, S_IRUGO, show_temp_crit, NULL, 4); static SENSOR_DEVICE_ATTR(temp6_crit, S_IRUGO, show_temp_crit, NULL, 5); static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2); static SENSOR_DEVICE_ATTR(temp4_type, S_IRUGO, show_temp_type, NULL, 3); static SENSOR_DEVICE_ATTR(temp5_type, S_IRUGO, show_temp_type, NULL, 4); static SENSOR_DEVICE_ATTR(temp6_type, S_IRUGO, show_temp_type, NULL, 5); static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_temp_min_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_temp_min_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_temp_min_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_temp_min_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(temp5_min_alarm, S_IRUGO, show_temp_min_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(temp6_min_alarm, S_IRUGO, show_temp_min_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_temp_max_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_temp_max_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_temp_max_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_temp_max_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_temp_max_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(temp6_max_alarm, S_IRUGO, show_temp_max_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_temp_crit_alarm, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_temp_crit_alarm, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_temp_crit_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(temp4_crit_alarm, S_IRUGO, show_temp_crit_alarm, NULL, 3); static SENSOR_DEVICE_ATTR(temp5_crit_alarm, S_IRUGO, show_temp_crit_alarm, NULL, 4); static SENSOR_DEVICE_ATTR(temp6_crit_alarm, S_IRUGO, show_temp_crit_alarm, NULL, 5); static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_temp_fault, NULL, 2); static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_temp_fault, NULL, 3); static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_temp_fault, NULL, 4); static SENSOR_DEVICE_ATTR(temp6_fault, S_IRUGO, show_temp_fault, NULL, 5); static struct attribute *pc87427_attributes_temp[6][10] = { { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp1_type.dev_attr.attr, &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp1_fault.dev_attr.attr, NULL }, { &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_crit.dev_attr.attr, &sensor_dev_attr_temp2_type.dev_attr.attr, &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp2_fault.dev_attr.attr, NULL }, { &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_crit.dev_attr.attr, &sensor_dev_attr_temp3_type.dev_attr.attr, &sensor_dev_attr_temp3_min_alarm.dev_attr.attr, &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp3_fault.dev_attr.attr, NULL }, { &sensor_dev_attr_temp4_input.dev_attr.attr, &sensor_dev_attr_temp4_min.dev_attr.attr, &sensor_dev_attr_temp4_max.dev_attr.attr, &sensor_dev_attr_temp4_crit.dev_attr.attr, &sensor_dev_attr_temp4_type.dev_attr.attr, &sensor_dev_attr_temp4_min_alarm.dev_attr.attr, &sensor_dev_attr_temp4_max_alarm.dev_attr.attr, &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp4_fault.dev_attr.attr, NULL }, { &sensor_dev_attr_temp5_input.dev_attr.attr, &sensor_dev_attr_temp5_min.dev_attr.attr, &sensor_dev_attr_temp5_max.dev_attr.attr, &sensor_dev_attr_temp5_crit.dev_attr.attr, &sensor_dev_attr_temp5_type.dev_attr.attr, &sensor_dev_attr_temp5_min_alarm.dev_attr.attr, &sensor_dev_attr_temp5_max_alarm.dev_attr.attr, &sensor_dev_attr_temp5_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp5_fault.dev_attr.attr, NULL }, { &sensor_dev_attr_temp6_input.dev_attr.attr, &sensor_dev_attr_temp6_min.dev_attr.attr, &sensor_dev_attr_temp6_max.dev_attr.attr, &sensor_dev_attr_temp6_crit.dev_attr.attr, &sensor_dev_attr_temp6_type.dev_attr.attr, &sensor_dev_attr_temp6_min_alarm.dev_attr.attr, &sensor_dev_attr_temp6_max_alarm.dev_attr.attr, &sensor_dev_attr_temp6_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp6_fault.dev_attr.attr, NULL } }; static const struct attribute_group pc87427_group_temp[6] = { { .attrs = pc87427_attributes_temp[0] }, { .attrs = pc87427_attributes_temp[1] }, { .attrs = pc87427_attributes_temp[2] }, { .attrs = pc87427_attributes_temp[3] }, { .attrs = pc87427_attributes_temp[4] }, { .attrs = pc87427_attributes_temp[5] }, }; static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { struct pc87427_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", data->name); } static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); /* * Device detection, attach and detach */ static void pc87427_release_regions(struct platform_device *pdev, int count) { struct resource *res; int i; for (i = 0; i < count; i++) { res = platform_get_resource(pdev, IORESOURCE_IO, i); release_region(res->start, resource_size(res)); } } static int __devinit pc87427_request_regions(struct platform_device *pdev, int count) { struct resource *res; int i, err = 0; for (i = 0; i < count; i++) { res = platform_get_resource(pdev, IORESOURCE_IO, i); if (!res) { err = -ENOENT; dev_err(&pdev->dev, "Missing resource #%d\n", i); break; } if (!request_region(res->start, resource_size(res), DRVNAME)) { err = -EBUSY; dev_err(&pdev->dev, "Failed to request region 0x%lx-0x%lx\n", (unsigned long)res->start, (unsigned long)res->end); break; } } if (err && i) pc87427_release_regions(pdev, i); return err; } static void __devinit pc87427_init_device(struct device *dev) { struct pc87427_sio_data *sio_data = dev->platform_data; struct pc87427_data *data = dev_get_drvdata(dev); int i; u8 reg; /* The FMC module should be ready */ reg = pc87427_read8(data, LD_FAN, PC87427_REG_BANK); if (!(reg & 0x80)) dev_warn(dev, "%s module not ready!\n", "FMC"); /* Check which fans are enabled */ for (i = 0; i < 8; i++) { if (!(sio_data->has_fanin & (1 << i))) /* Not wired */ continue; reg = pc87427_read8_bank(data, LD_FAN, BANK_FM(i), PC87427_REG_FAN_STATUS); if (reg & FAN_STATUS_MONEN) data->fan_enabled |= (1 << i); } if (!data->fan_enabled) { dev_dbg(dev, "Enabling monitoring of all fans\n"); for (i = 0; i < 8; i++) { if (!(sio_data->has_fanin & (1 << i))) /* Not wired */ continue; pc87427_write8_bank(data, LD_FAN, BANK_FM(i), PC87427_REG_FAN_STATUS, FAN_STATUS_MONEN); } data->fan_enabled = sio_data->has_fanin; } /* Check which PWM outputs are enabled */ for (i = 0; i < 4; i++) { if (!(sio_data->has_fanout & (1 << i))) /* Not wired */ continue; reg = pc87427_read8_bank(data, LD_FAN, BANK_FC(i), PC87427_REG_PWM_ENABLE); if (reg & PWM_ENABLE_CTLEN) data->pwm_enabled |= (1 << i); /* * We don't expose an interface to reconfigure the automatic * fan control mode, so only allow to return to this mode if * it was originally set. */ if ((reg & PWM_ENABLE_MODE_MASK) == PWM_MODE_AUTO) { dev_dbg(dev, "PWM%d is in automatic control mode\n", i + 1); data->pwm_auto_ok |= (1 << i); } } /* The HMC module should be ready */ reg = pc87427_read8(data, LD_TEMP, PC87427_REG_BANK); if (!(reg & 0x80)) dev_warn(dev, "%s module not ready!\n", "HMC"); /* Check which temperature channels are enabled */ for (i = 0; i < 6; i++) { reg = pc87427_read8_bank(data, LD_TEMP, BANK_TM(i), PC87427_REG_TEMP_STATUS); if (reg & TEMP_STATUS_CHANEN) data->temp_enabled |= (1 << i); } } static void pc87427_remove_files(struct device *dev) { struct pc87427_data *data = dev_get_drvdata(dev); int i; device_remove_file(dev, &dev_attr_name); for (i = 0; i < 8; i++) { if (!(data->fan_enabled & (1 << i))) continue; sysfs_remove_group(&dev->kobj, &pc87427_group_fan[i]); } for (i = 0; i < 4; i++) { if (!(data->pwm_enabled & (1 << i))) continue; sysfs_remove_group(&dev->kobj, &pc87427_group_pwm[i]); } for (i = 0; i < 6; i++) { if (!(data->temp_enabled & (1 << i))) continue; sysfs_remove_group(&dev->kobj, &pc87427_group_temp[i]); } } static int __devinit pc87427_probe(struct platform_device *pdev) { struct pc87427_sio_data *sio_data = pdev->dev.platform_data; struct pc87427_data *data; int i, err, res_count; data = kzalloc(sizeof(struct pc87427_data), GFP_KERNEL); if (!data) { err = -ENOMEM; pr_err("Out of memory\n"); goto exit; } data->address[0] = sio_data->address[0]; data->address[1] = sio_data->address[1]; res_count = (data->address[0] != 0) + (data->address[1] != 0); err = pc87427_request_regions(pdev, res_count); if (err) goto exit_kfree; mutex_init(&data->lock); data->name = "pc87427"; platform_set_drvdata(pdev, data); pc87427_init_device(&pdev->dev); /* Register sysfs hooks */ err = device_create_file(&pdev->dev, &dev_attr_name); if (err) goto exit_release_region; for (i = 0; i < 8; i++) { if (!(data->fan_enabled & (1 << i))) continue; err = sysfs_create_group(&pdev->dev.kobj, &pc87427_group_fan[i]); if (err) goto exit_remove_files; } for (i = 0; i < 4; i++) { if (!(data->pwm_enabled & (1 << i))) continue; err = sysfs_create_group(&pdev->dev.kobj, &pc87427_group_pwm[i]); if (err) goto exit_remove_files; } for (i = 0; i < 6; i++) { if (!(data->temp_enabled & (1 << i))) continue; err = sysfs_create_group(&pdev->dev.kobj, &pc87427_group_temp[i]); if (err) goto exit_remove_files; } data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); dev_err(&pdev->dev, "Class registration failed (%d)\n", err); goto exit_remove_files; } return 0; exit_remove_files: pc87427_remove_files(&pdev->dev); exit_release_region: pc87427_release_regions(pdev, res_count); exit_kfree: platform_set_drvdata(pdev, NULL); kfree(data); exit: return err; } static int __devexit pc87427_remove(struct platform_device *pdev) { struct pc87427_data *data = platform_get_drvdata(pdev); int res_count; res_count = (data->address[0] != 0) + (data->address[1] != 0); hwmon_device_unregister(data->hwmon_dev); pc87427_remove_files(&pdev->dev); platform_set_drvdata(pdev, NULL); kfree(data); pc87427_release_regions(pdev, res_count); return 0; } static struct platform_driver pc87427_driver = { .driver = { .owner = THIS_MODULE, .name = DRVNAME, }, .probe = pc87427_probe, .remove = __devexit_p(pc87427_remove), }; static int __init pc87427_device_add(const struct pc87427_sio_data *sio_data) { struct resource res[2] = { { .flags = IORESOURCE_IO }, { .flags = IORESOURCE_IO }, }; int err, i, res_count; res_count = 0; for (i = 0; i < 2; i++) { if (!sio_data->address[i]) continue; res[res_count].start = sio_data->address[i]; res[res_count].end = sio_data->address[i] + REGION_LENGTH - 1; res[res_count].name = logdev_str[i]; err = acpi_check_resource_conflict(&res[res_count]); if (err) goto exit; res_count++; } pdev = platform_device_alloc(DRVNAME, res[0].start); if (!pdev) { err = -ENOMEM; pr_err("Device allocation failed\n"); goto exit; } err = platform_device_add_resources(pdev, res, res_count); if (err) { pr_err("Device resource addition failed (%d)\n", err); goto exit_device_put; } err = platform_device_add_data(pdev, sio_data, sizeof(struct pc87427_sio_data)); if (err) { pr_err("Platform data allocation failed\n"); goto exit_device_put; } err = platform_device_add(pdev); if (err) { pr_err("Device addition failed (%d)\n", err); goto exit_device_put; } return 0; exit_device_put: platform_device_put(pdev); exit: return err; } static int __init pc87427_find(int sioaddr, struct pc87427_sio_data *sio_data) { u16 val; u8 cfg, cfg_b; int i, err = 0; /* Identify device */ val = force_id ? force_id : superio_inb(sioaddr, SIOREG_DEVID); if (val != 0xf2) { /* PC87427 */ err = -ENODEV; goto exit; } for (i = 0; i < 2; i++) { sio_data->address[i] = 0; /* Select logical device */ superio_outb(sioaddr, SIOREG_LDSEL, logdev[i]); val = superio_inb(sioaddr, SIOREG_ACT); if (!(val & 0x01)) { pr_info("Logical device 0x%02x not activated\n", logdev[i]); continue; } val = superio_inb(sioaddr, SIOREG_MAP); if (val & 0x01) { pr_warn("Logical device 0x%02x is memory-mapped, " "can't use\n", logdev[i]); continue; } val = (superio_inb(sioaddr, SIOREG_IOBASE) << 8) | superio_inb(sioaddr, SIOREG_IOBASE + 1); if (!val) { pr_info("I/O base address not set for logical device " "0x%02x\n", logdev[i]); continue; } sio_data->address[i] = val; } /* No point in loading the driver if everything is disabled */ if (!sio_data->address[0] && !sio_data->address[1]) { err = -ENODEV; goto exit; } /* Check which fan inputs are wired */ sio_data->has_fanin = (1 << 2) | (1 << 3); /* FANIN2, FANIN3 */ cfg = superio_inb(sioaddr, SIOREG_CF2); if (!(cfg & (1 << 3))) sio_data->has_fanin |= (1 << 0); /* FANIN0 */ if (!(cfg & (1 << 2))) sio_data->has_fanin |= (1 << 4); /* FANIN4 */ cfg = superio_inb(sioaddr, SIOREG_CFD); if (!(cfg & (1 << 0))) sio_data->has_fanin |= (1 << 1); /* FANIN1 */ cfg = superio_inb(sioaddr, SIOREG_CF4); if (!(cfg & (1 << 0))) sio_data->has_fanin |= (1 << 7); /* FANIN7 */ cfg_b = superio_inb(sioaddr, SIOREG_CFB); if (!(cfg & (1 << 1)) && (cfg_b & (1 << 3))) sio_data->has_fanin |= (1 << 5); /* FANIN5 */ cfg = superio_inb(sioaddr, SIOREG_CF3); if ((cfg & (1 << 3)) && !(cfg_b & (1 << 5))) sio_data->has_fanin |= (1 << 6); /* FANIN6 */ /* Check which fan outputs are wired */ sio_data->has_fanout = (1 << 0); /* FANOUT0 */ if (cfg_b & (1 << 0)) sio_data->has_fanout |= (1 << 3); /* FANOUT3 */ cfg = superio_inb(sioaddr, SIOREG_CFC); if (!(cfg & (1 << 4))) { if (cfg_b & (1 << 1)) sio_data->has_fanout |= (1 << 1); /* FANOUT1 */ if (cfg_b & (1 << 2)) sio_data->has_fanout |= (1 << 2); /* FANOUT2 */ } /* FANOUT1 and FANOUT2 can each be routed to 2 different pins */ cfg = superio_inb(sioaddr, SIOREG_CF5); if (cfg & (1 << 6)) sio_data->has_fanout |= (1 << 1); /* FANOUT1 */ if (cfg & (1 << 5)) sio_data->has_fanout |= (1 << 2); /* FANOUT2 */ exit: superio_exit(sioaddr); return err; } static int __init pc87427_init(void) { int err; struct pc87427_sio_data sio_data; if (pc87427_find(0x2e, &sio_data) && pc87427_find(0x4e, &sio_data)) return -ENODEV; err = platform_driver_register(&pc87427_driver); if (err) goto exit; /* Sets global pdev as a side effect */ err = pc87427_device_add(&sio_data); if (err) goto exit_driver; return 0; exit_driver: platform_driver_unregister(&pc87427_driver); exit: return err; } static void __exit pc87427_exit(void) { platform_device_unregister(pdev); platform_driver_unregister(&pc87427_driver); } MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>"); MODULE_DESCRIPTION("PC87427 hardware monitoring driver"); MODULE_LICENSE("GPL"); module_init(pc87427_init); module_exit(pc87427_exit);
gpl-2.0
nocoast/android_kernel_lge_g2
drivers/hwmon/wm831x-hwmon.c
4857
6171
/* * drivers/hwmon/wm831x-hwmon.c - Wolfson Microelectronics WM831x PMIC * hardware monitoring features. * * Copyright (C) 2009 Wolfson Microelectronics plc * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License v2 as published by the * Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/slab.h> #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/auxadc.h> struct wm831x_hwmon { struct wm831x *wm831x; struct device *classdev; }; static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "wm831x\n"); } static const char * const input_names[] = { [WM831X_AUX_SYSVDD] = "SYSVDD", [WM831X_AUX_USB] = "USB", [WM831X_AUX_BKUP_BATT] = "Backup battery", [WM831X_AUX_BATT] = "Battery", [WM831X_AUX_WALL] = "WALL", [WM831X_AUX_CHIP_TEMP] = "PMIC", [WM831X_AUX_BATT_TEMP] = "Battery", }; static ssize_t show_voltage(struct device *dev, struct device_attribute *attr, char *buf) { struct wm831x_hwmon *hwmon = dev_get_drvdata(dev); int channel = to_sensor_dev_attr(attr)->index; int ret; ret = wm831x_auxadc_read_uv(hwmon->wm831x, channel); if (ret < 0) return ret; return sprintf(buf, "%d\n", DIV_ROUND_CLOSEST(ret, 1000)); } static ssize_t show_chip_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct wm831x_hwmon *hwmon = dev_get_drvdata(dev); int channel = to_sensor_dev_attr(attr)->index; int ret; ret = wm831x_auxadc_read(hwmon->wm831x, channel); if (ret < 0) return ret; /* Degrees celsius = (512.18-ret) / 1.0983 */ ret = 512180 - (ret * 1000); ret = DIV_ROUND_CLOSEST(ret * 10000, 10983); return sprintf(buf, "%d\n", ret); } static ssize_t show_label(struct device *dev, struct device_attribute *attr, char *buf) { int channel = to_sensor_dev_attr(attr)->index; return sprintf(buf, "%s\n", input_names[channel]); } #define WM831X_VOLTAGE(id, name) \ static SENSOR_DEVICE_ATTR(in##id##_input, S_IRUGO, show_voltage, \ NULL, name) #define WM831X_NAMED_VOLTAGE(id, name) \ WM831X_VOLTAGE(id, name); \ static SENSOR_DEVICE_ATTR(in##id##_label, S_IRUGO, show_label, \ NULL, name) static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); WM831X_VOLTAGE(0, WM831X_AUX_AUX1); WM831X_VOLTAGE(1, WM831X_AUX_AUX2); WM831X_VOLTAGE(2, WM831X_AUX_AUX3); WM831X_VOLTAGE(3, WM831X_AUX_AUX4); WM831X_NAMED_VOLTAGE(4, WM831X_AUX_SYSVDD); WM831X_NAMED_VOLTAGE(5, WM831X_AUX_USB); WM831X_NAMED_VOLTAGE(6, WM831X_AUX_BATT); WM831X_NAMED_VOLTAGE(7, WM831X_AUX_WALL); WM831X_NAMED_VOLTAGE(8, WM831X_AUX_BKUP_BATT); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_chip_temp, NULL, WM831X_AUX_CHIP_TEMP); static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, WM831X_AUX_CHIP_TEMP); /* * Report as a voltage since conversion depends on external components * and that's what the ABI wants. */ static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_voltage, NULL, WM831X_AUX_BATT_TEMP); static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, show_label, NULL, WM831X_AUX_BATT_TEMP); static struct attribute *wm831x_attributes[] = { &dev_attr_name.attr, &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_label.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in5_label.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in6_label.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &sensor_dev_attr_in7_label.dev_attr.attr, &sensor_dev_attr_in8_input.dev_attr.attr, &sensor_dev_attr_in8_label.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_label.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_label.dev_attr.attr, NULL }; static const struct attribute_group wm831x_attr_group = { .attrs = wm831x_attributes, }; static int __devinit wm831x_hwmon_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_hwmon *hwmon; int ret; hwmon = kzalloc(sizeof(struct wm831x_hwmon), GFP_KERNEL); if (!hwmon) return -ENOMEM; hwmon->wm831x = wm831x; ret = sysfs_create_group(&pdev->dev.kobj, &wm831x_attr_group); if (ret) goto err; hwmon->classdev = hwmon_device_register(&pdev->dev); if (IS_ERR(hwmon->classdev)) { ret = PTR_ERR(hwmon->classdev); goto err_sysfs; } platform_set_drvdata(pdev, hwmon); return 0; err_sysfs: sysfs_remove_group(&pdev->dev.kobj, &wm831x_attr_group); err: kfree(hwmon); return ret; } static int __devexit wm831x_hwmon_remove(struct platform_device *pdev) { struct wm831x_hwmon *hwmon = platform_get_drvdata(pdev); hwmon_device_unregister(hwmon->classdev); sysfs_remove_group(&pdev->dev.kobj, &wm831x_attr_group); platform_set_drvdata(pdev, NULL); kfree(hwmon); return 0; } static struct platform_driver wm831x_hwmon_driver = { .probe = wm831x_hwmon_probe, .remove = __devexit_p(wm831x_hwmon_remove), .driver = { .name = "wm831x-hwmon", .owner = THIS_MODULE, }, }; module_platform_driver(wm831x_hwmon_driver); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("WM831x Hardware Monitoring"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm831x-hwmon");
gpl-2.0
alexax66/CM13_kernel_serranodsxx
arch/arm/mach-rpc/ecard.c
4857
26053
/* * linux/arch/arm/kernel/ecard.c * * Copyright 1995-2001 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Find all installed expansion cards, and handle interrupts from them. * * Created from information from Acorns RiscOS3 PRMs * * 08-Dec-1996 RMK Added code for the 9'th expansion card - the ether * podule slot. * 06-May-1997 RMK Added blacklist for cards whose loader doesn't work. * 12-Sep-1997 RMK Created new handling of interrupt enables/disables * - cards can now register their own routine to control * interrupts (recommended). * 29-Sep-1997 RMK Expansion card interrupt hardware not being re-enabled * on reset from Linux. (Caused cards not to respond * under RiscOS without hard reset). * 15-Feb-1998 RMK Added DMA support * 12-Sep-1998 RMK Added EASI support * 10-Jan-1999 RMK Run loaders in a simulated RISC OS environment. * 17-Apr-1999 RMK Support for EASI Type C cycles. */ #define ECARD_C #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/completion.h> #include <linux/reboot.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/device.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/kthread.h> #include <linux/irq.h> #include <linux/io.h> #include <asm/dma.h> #include <asm/ecard.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/mach/irq.h> #include <asm/tlbflush.h> #include "ecard.h" struct ecard_request { void (*fn)(struct ecard_request *); ecard_t *ec; unsigned int address; unsigned int length; unsigned int use_loader; void *buffer; struct completion *complete; }; struct expcard_blacklist { unsigned short manufacturer; unsigned short product; const char *type; }; static ecard_t *cards; static ecard_t *slot_to_expcard[MAX_ECARDS]; static unsigned int ectcr; /* List of descriptions of cards which don't have an extended * identification, or chunk directories containing a description. */ static struct expcard_blacklist __initdata blacklist[] = { { MANU_ACORN, PROD_ACORN_ETHER1, "Acorn Ether1" } }; asmlinkage extern int ecard_loader_reset(unsigned long base, loader_t loader); asmlinkage extern int ecard_loader_read(int off, unsigned long base, loader_t loader); static inline unsigned short ecard_getu16(unsigned char *v) { return v[0] | v[1] << 8; } static inline signed long ecard_gets24(unsigned char *v) { return v[0] | v[1] << 8 | v[2] << 16 | ((v[2] & 0x80) ? 0xff000000 : 0); } static inline ecard_t *slot_to_ecard(unsigned int slot) { return slot < MAX_ECARDS ? slot_to_expcard[slot] : NULL; } /* ===================== Expansion card daemon ======================== */ /* * Since the loader programs on the expansion cards need to be run * in a specific environment, create a separate task with this * environment up, and pass requests to this task as and when we * need to. * * This should allow 99% of loaders to be called from Linux. * * From a security standpoint, we trust the card vendors. This * may be a misplaced trust. */ static void ecard_task_reset(struct ecard_request *req) { struct expansion_card *ec = req->ec; struct resource *res; res = ec->slot_no == 8 ? &ec->resource[ECARD_RES_MEMC] : ec->easi ? &ec->resource[ECARD_RES_EASI] : &ec->resource[ECARD_RES_IOCSYNC]; ecard_loader_reset(res->start, ec->loader); } static void ecard_task_readbytes(struct ecard_request *req) { struct expansion_card *ec = req->ec; unsigned char *buf = req->buffer; unsigned int len = req->length; unsigned int off = req->address; if (ec->slot_no == 8) { void __iomem *base = (void __iomem *) ec->resource[ECARD_RES_MEMC].start; /* * The card maintains an index which increments the address * into a 4096-byte page on each access. We need to keep * track of the counter. */ static unsigned int index; unsigned int page; page = (off >> 12) * 4; if (page > 256 * 4) return; off &= 4095; /* * If we are reading offset 0, or our current index is * greater than the offset, reset the hardware index counter. */ if (off == 0 || index > off) { writeb(0, base); index = 0; } /* * Increment the hardware index counter until we get to the * required offset. The read bytes are discarded. */ while (index < off) { readb(base + page); index += 1; } while (len--) { *buf++ = readb(base + page); index += 1; } } else { unsigned long base = (ec->easi ? &ec->resource[ECARD_RES_EASI] : &ec->resource[ECARD_RES_IOCSYNC])->start; void __iomem *pbase = (void __iomem *)base; if (!req->use_loader || !ec->loader) { off *= 4; while (len--) { *buf++ = readb(pbase + off); off += 4; } } else { while(len--) { /* * The following is required by some * expansion card loader programs. */ *(unsigned long *)0x108 = 0; *buf++ = ecard_loader_read(off++, base, ec->loader); } } } } static DECLARE_WAIT_QUEUE_HEAD(ecard_wait); static struct ecard_request *ecard_req; static DEFINE_MUTEX(ecard_mutex); /* * Set up the expansion card daemon's page tables. */ static void ecard_init_pgtables(struct mm_struct *mm) { struct vm_area_struct vma; /* We want to set up the page tables for the following mapping: * Virtual Physical * 0x03000000 0x03000000 * 0x03010000 unmapped * 0x03210000 0x03210000 * 0x03400000 unmapped * 0x08000000 0x08000000 * 0x10000000 unmapped * * FIXME: we don't follow this 100% yet. */ pgd_t *src_pgd, *dst_pgd; src_pgd = pgd_offset(mm, (unsigned long)IO_BASE); dst_pgd = pgd_offset(mm, IO_START); memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (IO_SIZE / PGDIR_SIZE)); src_pgd = pgd_offset(mm, (unsigned long)EASI_BASE); dst_pgd = pgd_offset(mm, EASI_START); memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); vma.vm_flags = VM_EXEC; vma.vm_mm = mm; flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE); } static int ecard_init_mm(void) { struct mm_struct * mm = mm_alloc(); struct mm_struct *active_mm = current->active_mm; if (!mm) return -ENOMEM; current->mm = mm; current->active_mm = mm; activate_mm(active_mm, mm); mmdrop(active_mm); ecard_init_pgtables(mm); return 0; } static int ecard_task(void * unused) { /* * Allocate a mm. We're not a lazy-TLB kernel task since we need * to set page table entries where the user space would be. Note * that this also creates the page tables. Failure is not an * option here. */ if (ecard_init_mm()) panic("kecardd: unable to alloc mm\n"); while (1) { struct ecard_request *req; wait_event_interruptible(ecard_wait, ecard_req != NULL); req = xchg(&ecard_req, NULL); if (req != NULL) { req->fn(req); complete(req->complete); } } } /* * Wake the expansion card daemon to action our request. * * FIXME: The test here is not sufficient to detect if the * kcardd is running. */ static void ecard_call(struct ecard_request *req) { DECLARE_COMPLETION_ONSTACK(completion); req->complete = &completion; mutex_lock(&ecard_mutex); ecard_req = req; wake_up(&ecard_wait); /* * Now wait for kecardd to run. */ wait_for_completion(&completion); mutex_unlock(&ecard_mutex); } /* ======================= Mid-level card control ===================== */ static void ecard_readbytes(void *addr, ecard_t *ec, int off, int len, int useld) { struct ecard_request req; req.fn = ecard_task_readbytes; req.ec = ec; req.address = off; req.length = len; req.use_loader = useld; req.buffer = addr; ecard_call(&req); } int ecard_readchunk(struct in_chunk_dir *cd, ecard_t *ec, int id, int num) { struct ex_chunk_dir excd; int index = 16; int useld = 0; if (!ec->cid.cd) return 0; while(1) { ecard_readbytes(&excd, ec, index, 8, useld); index += 8; if (c_id(&excd) == 0) { if (!useld && ec->loader) { useld = 1; index = 0; continue; } return 0; } if (c_id(&excd) == 0xf0) { /* link */ index = c_start(&excd); continue; } if (c_id(&excd) == 0x80) { /* loader */ if (!ec->loader) { ec->loader = kmalloc(c_len(&excd), GFP_KERNEL); if (ec->loader) ecard_readbytes(ec->loader, ec, (int)c_start(&excd), c_len(&excd), useld); else return 0; } continue; } if (c_id(&excd) == id && num-- == 0) break; } if (c_id(&excd) & 0x80) { switch (c_id(&excd) & 0x70) { case 0x70: ecard_readbytes((unsigned char *)excd.d.string, ec, (int)c_start(&excd), c_len(&excd), useld); break; case 0x00: break; } } cd->start_offset = c_start(&excd); memcpy(cd->d.string, excd.d.string, 256); return 1; } /* ======================= Interrupt control ============================ */ static void ecard_def_irq_enable(ecard_t *ec, int irqnr) { } static void ecard_def_irq_disable(ecard_t *ec, int irqnr) { } static int ecard_def_irq_pending(ecard_t *ec) { return !ec->irqmask || readb(ec->irqaddr) & ec->irqmask; } static void ecard_def_fiq_enable(ecard_t *ec, int fiqnr) { panic("ecard_def_fiq_enable called - impossible"); } static void ecard_def_fiq_disable(ecard_t *ec, int fiqnr) { panic("ecard_def_fiq_disable called - impossible"); } static int ecard_def_fiq_pending(ecard_t *ec) { return !ec->fiqmask || readb(ec->fiqaddr) & ec->fiqmask; } static expansioncard_ops_t ecard_default_ops = { ecard_def_irq_enable, ecard_def_irq_disable, ecard_def_irq_pending, ecard_def_fiq_enable, ecard_def_fiq_disable, ecard_def_fiq_pending }; /* * Enable and disable interrupts from expansion cards. * (interrupts are disabled for these functions). * * They are not meant to be called directly, but via enable/disable_irq. */ static void ecard_irq_unmask(struct irq_data *d) { ecard_t *ec = irq_data_get_irq_chip_data(d); if (ec) { if (!ec->ops) ec->ops = &ecard_default_ops; if (ec->claimed && ec->ops->irqenable) ec->ops->irqenable(ec, d->irq); else printk(KERN_ERR "ecard: rejecting request to " "enable IRQs for %d\n", d->irq); } } static void ecard_irq_mask(struct irq_data *d) { ecard_t *ec = irq_data_get_irq_chip_data(d); if (ec) { if (!ec->ops) ec->ops = &ecard_default_ops; if (ec->ops && ec->ops->irqdisable) ec->ops->irqdisable(ec, d->irq); } } static struct irq_chip ecard_chip = { .name = "ECARD", .irq_ack = ecard_irq_mask, .irq_mask = ecard_irq_mask, .irq_unmask = ecard_irq_unmask, }; void ecard_enablefiq(unsigned int fiqnr) { ecard_t *ec = slot_to_ecard(fiqnr); if (ec) { if (!ec->ops) ec->ops = &ecard_default_ops; if (ec->claimed && ec->ops->fiqenable) ec->ops->fiqenable(ec, fiqnr); else printk(KERN_ERR "ecard: rejecting request to " "enable FIQs for %d\n", fiqnr); } } void ecard_disablefiq(unsigned int fiqnr) { ecard_t *ec = slot_to_ecard(fiqnr); if (ec) { if (!ec->ops) ec->ops = &ecard_default_ops; if (ec->ops->fiqdisable) ec->ops->fiqdisable(ec, fiqnr); } } static void ecard_dump_irq_state(void) { ecard_t *ec; printk("Expansion card IRQ state:\n"); for (ec = cards; ec; ec = ec->next) { if (ec->slot_no == 8) continue; printk(" %d: %sclaimed, ", ec->slot_no, ec->claimed ? "" : "not "); if (ec->ops && ec->ops->irqpending && ec->ops != &ecard_default_ops) printk("irq %spending\n", ec->ops->irqpending(ec) ? "" : "not "); else printk("irqaddr %p, mask = %02X, status = %02X\n", ec->irqaddr, ec->irqmask, readb(ec->irqaddr)); } } static void ecard_check_lockup(struct irq_desc *desc) { static unsigned long last; static int lockup; /* * If the timer interrupt has not run since the last million * unrecognised expansion card interrupts, then there is * something seriously wrong. Disable the expansion card * interrupts so at least we can continue. * * Maybe we ought to start a timer to re-enable them some time * later? */ if (last == jiffies) { lockup += 1; if (lockup > 1000000) { printk(KERN_ERR "\nInterrupt lockup detected - " "disabling all expansion card interrupts\n"); desc->irq_data.chip->irq_mask(&desc->irq_data); ecard_dump_irq_state(); } } else lockup = 0; /* * If we did not recognise the source of this interrupt, * warn the user, but don't flood the user with these messages. */ if (!last || time_after(jiffies, last + 5*HZ)) { last = jiffies; printk(KERN_WARNING "Unrecognised interrupt from backplane\n"); ecard_dump_irq_state(); } } static void ecard_irq_handler(unsigned int irq, struct irq_desc *desc) { ecard_t *ec; int called = 0; desc->irq_data.chip->irq_mask(&desc->irq_data); for (ec = cards; ec; ec = ec->next) { int pending; if (!ec->claimed || !ec->irq || ec->slot_no == 8) continue; if (ec->ops && ec->ops->irqpending) pending = ec->ops->irqpending(ec); else pending = ecard_default_ops.irqpending(ec); if (pending) { generic_handle_irq(ec->irq); called ++; } } desc->irq_data.chip->irq_unmask(&desc->irq_data); if (called == 0) ecard_check_lockup(desc); } static void __iomem *__ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed) { void __iomem *address = NULL; int slot = ec->slot_no; if (ec->slot_no == 8) return ECARD_MEMC8_BASE; ectcr &= ~(1 << slot); switch (type) { case ECARD_MEMC: if (slot < 4) address = ECARD_MEMC_BASE + (slot << 14); break; case ECARD_IOC: if (slot < 4) address = ECARD_IOC_BASE + (slot << 14); else address = ECARD_IOC4_BASE + ((slot - 4) << 14); if (address) address += speed << 19; break; case ECARD_EASI: address = ECARD_EASI_BASE + (slot << 24); if (speed == ECARD_FAST) ectcr |= 1 << slot; break; default: break; } #ifdef IOMD_ECTCR iomd_writeb(ectcr, IOMD_ECTCR); #endif return address; } static int ecard_prints(struct seq_file *m, ecard_t *ec) { seq_printf(m, " %d: %s ", ec->slot_no, ec->easi ? "EASI" : " "); if (ec->cid.id == 0) { struct in_chunk_dir incd; seq_printf(m, "[%04X:%04X] ", ec->cid.manufacturer, ec->cid.product); if (!ec->card_desc && ec->cid.cd && ecard_readchunk(&incd, ec, 0xf5, 0)) { ec->card_desc = kmalloc(strlen(incd.d.string)+1, GFP_KERNEL); if (ec->card_desc) strcpy((char *)ec->card_desc, incd.d.string); } seq_printf(m, "%s\n", ec->card_desc ? ec->card_desc : "*unknown*"); } else seq_printf(m, "Simple card %d\n", ec->cid.id); return 0; } static int ecard_devices_proc_show(struct seq_file *m, void *v) { ecard_t *ec = cards; while (ec) { ecard_prints(m, ec); ec = ec->next; } return 0; } static int ecard_devices_proc_open(struct inode *inode, struct file *file) { return single_open(file, ecard_devices_proc_show, NULL); } static const struct file_operations bus_ecard_proc_fops = { .owner = THIS_MODULE, .open = ecard_devices_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct proc_dir_entry *proc_bus_ecard_dir = NULL; static void ecard_proc_init(void) { proc_bus_ecard_dir = proc_mkdir("bus/ecard", NULL); proc_create("devices", 0, proc_bus_ecard_dir, &bus_ecard_proc_fops); } #define ec_set_resource(ec,nr,st,sz) \ do { \ (ec)->resource[nr].name = dev_name(&ec->dev); \ (ec)->resource[nr].start = st; \ (ec)->resource[nr].end = (st) + (sz) - 1; \ (ec)->resource[nr].flags = IORESOURCE_MEM; \ } while (0) static void __init ecard_free_card(struct expansion_card *ec) { int i; for (i = 0; i < ECARD_NUM_RESOURCES; i++) if (ec->resource[i].flags) release_resource(&ec->resource[i]); kfree(ec); } static struct expansion_card *__init ecard_alloc_card(int type, int slot) { struct expansion_card *ec; unsigned long base; int i; ec = kzalloc(sizeof(ecard_t), GFP_KERNEL); if (!ec) { ec = ERR_PTR(-ENOMEM); goto nomem; } ec->slot_no = slot; ec->easi = type == ECARD_EASI; ec->irq = 0; ec->fiq = 0; ec->dma = NO_DMA; ec->ops = &ecard_default_ops; dev_set_name(&ec->dev, "ecard%d", slot); ec->dev.parent = NULL; ec->dev.bus = &ecard_bus_type; ec->dev.dma_mask = &ec->dma_mask; ec->dma_mask = (u64)0xffffffff; ec->dev.coherent_dma_mask = ec->dma_mask; if (slot < 4) { ec_set_resource(ec, ECARD_RES_MEMC, PODSLOT_MEMC_BASE + (slot << 14), PODSLOT_MEMC_SIZE); base = PODSLOT_IOC0_BASE + (slot << 14); } else base = PODSLOT_IOC4_BASE + ((slot - 4) << 14); #ifdef CONFIG_ARCH_RPC if (slot < 8) { ec_set_resource(ec, ECARD_RES_EASI, PODSLOT_EASI_BASE + (slot << 24), PODSLOT_EASI_SIZE); } if (slot == 8) { ec_set_resource(ec, ECARD_RES_MEMC, NETSLOT_BASE, NETSLOT_SIZE); } else #endif for (i = 0; i <= ECARD_RES_IOCSYNC - ECARD_RES_IOCSLOW; i++) ec_set_resource(ec, i + ECARD_RES_IOCSLOW, base + (i << 19), PODSLOT_IOC_SIZE); for (i = 0; i < ECARD_NUM_RESOURCES; i++) { if (ec->resource[i].flags && request_resource(&iomem_resource, &ec->resource[i])) { dev_err(&ec->dev, "resource(s) not available\n"); ec->resource[i].end -= ec->resource[i].start; ec->resource[i].start = 0; ec->resource[i].flags = 0; } } nomem: return ec; } static ssize_t ecard_show_irq(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->irq); } static ssize_t ecard_show_dma(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->dma); } static ssize_t ecard_show_resources(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); char *str = buf; int i; for (i = 0; i < ECARD_NUM_RESOURCES; i++) str += sprintf(str, "%08x %08x %08lx\n", ec->resource[i].start, ec->resource[i].end, ec->resource[i].flags); return str - buf; } static ssize_t ecard_show_vendor(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->cid.manufacturer); } static ssize_t ecard_show_device(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%u\n", ec->cid.product); } static ssize_t ecard_show_type(struct device *dev, struct device_attribute *attr, char *buf) { struct expansion_card *ec = ECARD_DEV(dev); return sprintf(buf, "%s\n", ec->easi ? "EASI" : "IOC"); } static struct device_attribute ecard_dev_attrs[] = { __ATTR(device, S_IRUGO, ecard_show_device, NULL), __ATTR(dma, S_IRUGO, ecard_show_dma, NULL), __ATTR(irq, S_IRUGO, ecard_show_irq, NULL), __ATTR(resource, S_IRUGO, ecard_show_resources, NULL), __ATTR(type, S_IRUGO, ecard_show_type, NULL), __ATTR(vendor, S_IRUGO, ecard_show_vendor, NULL), __ATTR_NULL, }; int ecard_request_resources(struct expansion_card *ec) { int i, err = 0; for (i = 0; i < ECARD_NUM_RESOURCES; i++) { if (ecard_resource_end(ec, i) && !request_mem_region(ecard_resource_start(ec, i), ecard_resource_len(ec, i), ec->dev.driver->name)) { err = -EBUSY; break; } } if (err) { while (i--) if (ecard_resource_end(ec, i)) release_mem_region(ecard_resource_start(ec, i), ecard_resource_len(ec, i)); } return err; } EXPORT_SYMBOL(ecard_request_resources); void ecard_release_resources(struct expansion_card *ec) { int i; for (i = 0; i < ECARD_NUM_RESOURCES; i++) if (ecard_resource_end(ec, i)) release_mem_region(ecard_resource_start(ec, i), ecard_resource_len(ec, i)); } EXPORT_SYMBOL(ecard_release_resources); void ecard_setirq(struct expansion_card *ec, const struct expansion_card_ops *ops, void *irq_data) { ec->irq_data = irq_data; barrier(); ec->ops = ops; } EXPORT_SYMBOL(ecard_setirq); void __iomem *ecardm_iomap(struct expansion_card *ec, unsigned int res, unsigned long offset, unsigned long maxsize) { unsigned long start = ecard_resource_start(ec, res); unsigned long end = ecard_resource_end(ec, res); if (offset > (end - start)) return NULL; start += offset; if (maxsize && end - start > maxsize) end = start + maxsize; return devm_ioremap(&ec->dev, start, end - start); } EXPORT_SYMBOL(ecardm_iomap); /* * Probe for an expansion card. * * If bit 1 of the first byte of the card is set, then the * card does not exist. */ static int __init ecard_probe(int slot, unsigned irq, card_type_t type) { ecard_t **ecp; ecard_t *ec; struct ex_ecid cid; void __iomem *addr; int i, rc; ec = ecard_alloc_card(type, slot); if (IS_ERR(ec)) { rc = PTR_ERR(ec); goto nomem; } rc = -ENODEV; if ((addr = __ecard_address(ec, type, ECARD_SYNC)) == NULL) goto nodev; cid.r_zero = 1; ecard_readbytes(&cid, ec, 0, 16, 0); if (cid.r_zero) goto nodev; ec->cid.id = cid.r_id; ec->cid.cd = cid.r_cd; ec->cid.is = cid.r_is; ec->cid.w = cid.r_w; ec->cid.manufacturer = ecard_getu16(cid.r_manu); ec->cid.product = ecard_getu16(cid.r_prod); ec->cid.country = cid.r_country; ec->cid.irqmask = cid.r_irqmask; ec->cid.irqoff = ecard_gets24(cid.r_irqoff); ec->cid.fiqmask = cid.r_fiqmask; ec->cid.fiqoff = ecard_gets24(cid.r_fiqoff); ec->fiqaddr = ec->irqaddr = addr; if (ec->cid.is) { ec->irqmask = ec->cid.irqmask; ec->irqaddr += ec->cid.irqoff; ec->fiqmask = ec->cid.fiqmask; ec->fiqaddr += ec->cid.fiqoff; } else { ec->irqmask = 1; ec->fiqmask = 4; } for (i = 0; i < ARRAY_SIZE(blacklist); i++) if (blacklist[i].manufacturer == ec->cid.manufacturer && blacklist[i].product == ec->cid.product) { ec->card_desc = blacklist[i].type; break; } ec->irq = irq; /* * hook the interrupt handlers */ if (slot < 8) { irq_set_chip_and_handler(ec->irq, &ecard_chip, handle_level_irq); irq_set_chip_data(ec->irq, ec); set_irq_flags(ec->irq, IRQF_VALID); } #ifdef CONFIG_ARCH_RPC /* On RiscPC, only first two slots have DMA capability */ if (slot < 2) ec->dma = 2 + slot; #endif for (ecp = &cards; *ecp; ecp = &(*ecp)->next); *ecp = ec; slot_to_expcard[slot] = ec; device_register(&ec->dev); return 0; nodev: ecard_free_card(ec); nomem: return rc; } /* * Initialise the expansion card system. * Locate all hardware - interrupt management and * actual cards. */ static int __init ecard_init(void) { struct task_struct *task; int slot, irqbase; irqbase = irq_alloc_descs(-1, 0, 8, -1); if (irqbase < 0) return irqbase; task = kthread_run(ecard_task, NULL, "kecardd"); if (IS_ERR(task)) { printk(KERN_ERR "Ecard: unable to create kernel thread: %ld\n", PTR_ERR(task)); irq_free_descs(irqbase, 8); return PTR_ERR(task); } printk("Probing expansion cards\n"); for (slot = 0; slot < 8; slot ++) { if (ecard_probe(slot, irqbase + slot, ECARD_EASI) == -ENODEV) ecard_probe(slot, irqbase + slot, ECARD_IOC); } ecard_probe(8, 11, ECARD_IOC); irq_set_chained_handler(IRQ_EXPANSIONCARD, ecard_irq_handler); ecard_proc_init(); return 0; } subsys_initcall(ecard_init); /* * ECARD "bus" */ static const struct ecard_id * ecard_match_device(const struct ecard_id *ids, struct expansion_card *ec) { int i; for (i = 0; ids[i].manufacturer != 65535; i++) if (ec->cid.manufacturer == ids[i].manufacturer && ec->cid.product == ids[i].product) return ids + i; return NULL; } static int ecard_drv_probe(struct device *dev) { struct expansion_card *ec = ECARD_DEV(dev); struct ecard_driver *drv = ECARD_DRV(dev->driver); const struct ecard_id *id; int ret; id = ecard_match_device(drv->id_table, ec); ec->claimed = 1; ret = drv->probe(ec, id); if (ret) ec->claimed = 0; return ret; } static int ecard_drv_remove(struct device *dev) { struct expansion_card *ec = ECARD_DEV(dev); struct ecard_driver *drv = ECARD_DRV(dev->driver); drv->remove(ec); ec->claimed = 0; /* * Restore the default operations. We ensure that the * ops are set before we change the data. */ ec->ops = &ecard_default_ops; barrier(); ec->irq_data = NULL; return 0; } /* * Before rebooting, we must make sure that the expansion card is in a * sensible state, so it can be re-detected. This means that the first * page of the ROM must be visible. We call the expansion cards reset * handler, if any. */ static void ecard_drv_shutdown(struct device *dev) { struct expansion_card *ec = ECARD_DEV(dev); struct ecard_driver *drv = ECARD_DRV(dev->driver); struct ecard_request req; if (dev->driver) { if (drv->shutdown) drv->shutdown(ec); ec->claimed = 0; } /* * If this card has a loader, call the reset handler. */ if (ec->loader) { req.fn = ecard_task_reset; req.ec = ec; ecard_call(&req); } } int ecard_register_driver(struct ecard_driver *drv) { drv->drv.bus = &ecard_bus_type; return driver_register(&drv->drv); } void ecard_remove_driver(struct ecard_driver *drv) { driver_unregister(&drv->drv); } static int ecard_match(struct device *_dev, struct device_driver *_drv) { struct expansion_card *ec = ECARD_DEV(_dev); struct ecard_driver *drv = ECARD_DRV(_drv); int ret; if (drv->id_table) { ret = ecard_match_device(drv->id_table, ec) != NULL; } else { ret = ec->cid.id == drv->id; } return ret; } struct bus_type ecard_bus_type = { .name = "ecard", .dev_attrs = ecard_dev_attrs, .match = ecard_match, .probe = ecard_drv_probe, .remove = ecard_drv_remove, .shutdown = ecard_drv_shutdown, }; static int ecard_bus_init(void) { return bus_register(&ecard_bus_type); } postcore_initcall(ecard_bus_init); EXPORT_SYMBOL(ecard_readchunk); EXPORT_SYMBOL(ecard_register_driver); EXPORT_SYMBOL(ecard_remove_driver); EXPORT_SYMBOL(ecard_bus_type);
gpl-2.0
TeamEOS/kernel_oppo_msm8974
drivers/hwmon/max1111.c
4857
5602
/* * max1111.c - +2.7V, Low-Power, Multichannel, Serial 8-bit ADCs * * Based on arch/arm/mach-pxa/corgi_ssp.c * * Copyright (C) 2004-2005 Richard Purdie * * Copyright (C) 2008 Marvell International Ltd. * Eric Miao <eric.miao@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publishhed by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/err.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/spi/spi.h> #include <linux/slab.h> #define MAX1111_TX_BUF_SIZE 1 #define MAX1111_RX_BUF_SIZE 2 /* MAX1111 Commands */ #define MAX1111_CTRL_PD0 (1u << 0) #define MAX1111_CTRL_PD1 (1u << 1) #define MAX1111_CTRL_SGL (1u << 2) #define MAX1111_CTRL_UNI (1u << 3) #define MAX1111_CTRL_SEL_SH (5) /* NOTE: bit 4 is ignored */ #define MAX1111_CTRL_STR (1u << 7) struct max1111_data { struct spi_device *spi; struct device *hwmon_dev; struct spi_message msg; struct spi_transfer xfer[2]; uint8_t tx_buf[MAX1111_TX_BUF_SIZE]; uint8_t rx_buf[MAX1111_RX_BUF_SIZE]; struct mutex drvdata_lock; /* protect msg, xfer and buffers from multiple access */ }; static int max1111_read(struct device *dev, int channel) { struct max1111_data *data = dev_get_drvdata(dev); uint8_t v1, v2; int err; /* writing to drvdata struct is not thread safe, wait on mutex */ mutex_lock(&data->drvdata_lock); data->tx_buf[0] = (channel << MAX1111_CTRL_SEL_SH) | MAX1111_CTRL_PD0 | MAX1111_CTRL_PD1 | MAX1111_CTRL_SGL | MAX1111_CTRL_UNI | MAX1111_CTRL_STR; err = spi_sync(data->spi, &data->msg); if (err < 0) { dev_err(dev, "spi_sync failed with %d\n", err); mutex_unlock(&data->drvdata_lock); return err; } v1 = data->rx_buf[0]; v2 = data->rx_buf[1]; mutex_unlock(&data->drvdata_lock); if ((v1 & 0xc0) || (v2 & 0x3f)) return -EINVAL; return (v1 << 2) | (v2 >> 6); } #ifdef CONFIG_SHARPSL_PM static struct max1111_data *the_max1111; int max1111_read_channel(int channel) { return max1111_read(&the_max1111->spi->dev, channel); } EXPORT_SYMBOL(max1111_read_channel); #endif /* * NOTE: SPI devices do not have a default 'name' attribute, which is * likely to be used by hwmon applications to distinguish between * different devices, explicitly add a name attribute here. */ static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "max1111\n"); } static ssize_t show_adc(struct device *dev, struct device_attribute *attr, char *buf) { int channel = to_sensor_dev_attr(attr)->index; int ret; ret = max1111_read(dev, channel); if (ret < 0) return ret; /* * assume the reference voltage to be 2.048V, with an 8-bit sample, * the LSB weight is 8mV */ return sprintf(buf, "%d\n", ret * 8); } #define MAX1111_ADC_ATTR(_id) \ SENSOR_DEVICE_ATTR(in##_id##_input, S_IRUGO, show_adc, NULL, _id) static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static MAX1111_ADC_ATTR(0); static MAX1111_ADC_ATTR(1); static MAX1111_ADC_ATTR(2); static MAX1111_ADC_ATTR(3); static struct attribute *max1111_attributes[] = { &dev_attr_name.attr, &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, NULL, }; static const struct attribute_group max1111_attr_group = { .attrs = max1111_attributes, }; static int __devinit setup_transfer(struct max1111_data *data) { struct spi_message *m; struct spi_transfer *x; m = &data->msg; x = &data->xfer[0]; spi_message_init(m); x->tx_buf = &data->tx_buf[0]; x->len = MAX1111_TX_BUF_SIZE; spi_message_add_tail(x, m); x++; x->rx_buf = &data->rx_buf[0]; x->len = MAX1111_RX_BUF_SIZE; spi_message_add_tail(x, m); return 0; } static int __devinit max1111_probe(struct spi_device *spi) { struct max1111_data *data; int err; spi->bits_per_word = 8; spi->mode = SPI_MODE_0; err = spi_setup(spi); if (err < 0) return err; data = kzalloc(sizeof(struct max1111_data), GFP_KERNEL); if (data == NULL) { dev_err(&spi->dev, "failed to allocate memory\n"); return -ENOMEM; } err = setup_transfer(data); if (err) goto err_free_data; mutex_init(&data->drvdata_lock); data->spi = spi; spi_set_drvdata(spi, data); err = sysfs_create_group(&spi->dev.kobj, &max1111_attr_group); if (err) { dev_err(&spi->dev, "failed to create attribute group\n"); goto err_free_data; } data->hwmon_dev = hwmon_device_register(&spi->dev); if (IS_ERR(data->hwmon_dev)) { dev_err(&spi->dev, "failed to create hwmon device\n"); err = PTR_ERR(data->hwmon_dev); goto err_remove; } #ifdef CONFIG_SHARPSL_PM the_max1111 = data; #endif return 0; err_remove: sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group); err_free_data: kfree(data); return err; } static int __devexit max1111_remove(struct spi_device *spi) { struct max1111_data *data = spi_get_drvdata(spi); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group); mutex_destroy(&data->drvdata_lock); kfree(data); return 0; } static struct spi_driver max1111_driver = { .driver = { .name = "max1111", .owner = THIS_MODULE, }, .probe = max1111_probe, .remove = __devexit_p(max1111_remove), }; module_spi_driver(max1111_driver); MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"); MODULE_DESCRIPTION("MAX1111 ADC Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:max1111");
gpl-2.0
DragonDevs/android_kernel_zte_msm8226
drivers/hwmon/max6650.c
4857
19873
/* * max6650.c - Part of lm_sensors, Linux kernel modules for hardware * monitoring. * * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> * * based on code written by John Morris <john.morris@spirentcom.com> * Copyright (c) 2003 Spirent Communications * and Claus Gindhart <claus.gindhart@kontron.com> * * This module has only been tested with the MAX6650 chip. It should * also work with the MAX6651. It does not distinguish max6650 and max6651 * chips. * * The datasheet was last seen at: * * http://pdfserv.maxim-ic.com/en/ds/MAX6650-MAX6651.pdf * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> /* * Insmod parameters */ /* fan_voltage: 5=5V fan, 12=12V fan, 0=don't change */ static int fan_voltage; /* prescaler: Possible values are 1, 2, 4, 8, 16 or 0 for don't change */ static int prescaler; /* clock: The clock frequency of the chip the driver should assume */ static int clock = 254000; module_param(fan_voltage, int, S_IRUGO); module_param(prescaler, int, S_IRUGO); module_param(clock, int, S_IRUGO); /* * MAX 6650/6651 registers */ #define MAX6650_REG_SPEED 0x00 #define MAX6650_REG_CONFIG 0x02 #define MAX6650_REG_GPIO_DEF 0x04 #define MAX6650_REG_DAC 0x06 #define MAX6650_REG_ALARM_EN 0x08 #define MAX6650_REG_ALARM 0x0A #define MAX6650_REG_TACH0 0x0C #define MAX6650_REG_TACH1 0x0E #define MAX6650_REG_TACH2 0x10 #define MAX6650_REG_TACH3 0x12 #define MAX6650_REG_GPIO_STAT 0x14 #define MAX6650_REG_COUNT 0x16 /* * Config register bits */ #define MAX6650_CFG_V12 0x08 #define MAX6650_CFG_PRESCALER_MASK 0x07 #define MAX6650_CFG_PRESCALER_2 0x01 #define MAX6650_CFG_PRESCALER_4 0x02 #define MAX6650_CFG_PRESCALER_8 0x03 #define MAX6650_CFG_PRESCALER_16 0x04 #define MAX6650_CFG_MODE_MASK 0x30 #define MAX6650_CFG_MODE_ON 0x00 #define MAX6650_CFG_MODE_OFF 0x10 #define MAX6650_CFG_MODE_CLOSED_LOOP 0x20 #define MAX6650_CFG_MODE_OPEN_LOOP 0x30 #define MAX6650_COUNT_MASK 0x03 /* * Alarm status register bits */ #define MAX6650_ALRM_MAX 0x01 #define MAX6650_ALRM_MIN 0x02 #define MAX6650_ALRM_TACH 0x04 #define MAX6650_ALRM_GPIO1 0x08 #define MAX6650_ALRM_GPIO2 0x10 /* Minimum and maximum values of the FAN-RPM */ #define FAN_RPM_MIN 240 #define FAN_RPM_MAX 30000 #define DIV_FROM_REG(reg) (1 << (reg & 7)) static int max6650_probe(struct i2c_client *client, const struct i2c_device_id *id); static int max6650_init_client(struct i2c_client *client); static int max6650_remove(struct i2c_client *client); static struct max6650_data *max6650_update_device(struct device *dev); /* * Driver data (common to all clients) */ static const struct i2c_device_id max6650_id[] = { { "max6650", 1 }, { "max6651", 4 }, { } }; MODULE_DEVICE_TABLE(i2c, max6650_id); static struct i2c_driver max6650_driver = { .driver = { .name = "max6650", }, .probe = max6650_probe, .remove = max6650_remove, .id_table = max6650_id, }; /* * Client data (each client gets its own) */ struct max6650_data { struct device *hwmon_dev; struct mutex update_lock; int nr_fans; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ /* register values */ u8 speed; u8 config; u8 tach[4]; u8 count; u8 dac; u8 alarm; }; static ssize_t get_fan(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct max6650_data *data = max6650_update_device(dev); int rpm; /* * Calculation details: * * Each tachometer counts over an interval given by the "count" * register (0.25, 0.5, 1 or 2 seconds). This module assumes * that the fans produce two pulses per revolution (this seems * to be the most common). */ rpm = ((data->tach[attr->index] * 120) / DIV_FROM_REG(data->count)); return sprintf(buf, "%d\n", rpm); } /* * Set the fan speed to the specified RPM (or read back the RPM setting). * This works in closed loop mode only. Use pwm1 for open loop speed setting. * * The MAX6650/1 will automatically control fan speed when in closed loop * mode. * * Assumptions: * * 1) The MAX6650/1 internal 254kHz clock frequency is set correctly. Use * the clock module parameter if you need to fine tune this. * * 2) The prescaler (low three bits of the config register) has already * been set to an appropriate value. Use the prescaler module parameter * if your BIOS doesn't initialize the chip properly. * * The relevant equations are given on pages 21 and 22 of the datasheet. * * From the datasheet, the relevant equation when in regulation is: * * [fCLK / (128 x (KTACH + 1))] = 2 x FanSpeed / KSCALE * * where: * * fCLK is the oscillator frequency (either the 254kHz internal * oscillator or the externally applied clock) * * KTACH is the value in the speed register * * FanSpeed is the speed of the fan in rps * * KSCALE is the prescaler value (1, 2, 4, 8, or 16) * * When reading, we need to solve for FanSpeed. When writing, we need to * solve for KTACH. * * Note: this tachometer is completely separate from the tachometers * used to measure the fan speeds. Only one fan's speed (fan1) is * controlled. */ static ssize_t get_target(struct device *dev, struct device_attribute *devattr, char *buf) { struct max6650_data *data = max6650_update_device(dev); int kscale, ktach, rpm; /* * Use the datasheet equation: * * FanSpeed = KSCALE x fCLK / [256 x (KTACH + 1)] * * then multiply by 60 to give rpm. */ kscale = DIV_FROM_REG(data->config); ktach = data->speed; rpm = 60 * kscale * clock / (256 * (ktach + 1)); return sprintf(buf, "%d\n", rpm); } static ssize_t set_target(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct max6650_data *data = i2c_get_clientdata(client); int kscale, ktach; unsigned long rpm; int err; err = kstrtoul(buf, 10, &rpm); if (err) return err; rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX); /* * Divide the required speed by 60 to get from rpm to rps, then * use the datasheet equation: * * KTACH = [(fCLK x KSCALE) / (256 x FanSpeed)] - 1 */ mutex_lock(&data->update_lock); kscale = DIV_FROM_REG(data->config); ktach = ((clock * kscale) / (256 * rpm / 60)) - 1; if (ktach < 0) ktach = 0; if (ktach > 255) ktach = 255; data->speed = ktach; i2c_smbus_write_byte_data(client, MAX6650_REG_SPEED, data->speed); mutex_unlock(&data->update_lock); return count; } /* * Get/set the fan speed in open loop mode using pwm1 sysfs file. * Speed is given as a relative value from 0 to 255, where 255 is maximum * speed. Note that this is done by writing directly to the chip's DAC, * it won't change the closed loop speed set by fan1_target. * Also note that due to rounding errors it is possible that you don't read * back exactly the value you have set. */ static ssize_t get_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { int pwm; struct max6650_data *data = max6650_update_device(dev); /* * Useful range for dac is 0-180 for 12V fans and 0-76 for 5V fans. * Lower DAC values mean higher speeds. */ if (data->config & MAX6650_CFG_V12) pwm = 255 - (255 * (int)data->dac)/180; else pwm = 255 - (255 * (int)data->dac)/76; if (pwm < 0) pwm = 0; return sprintf(buf, "%d\n", pwm); } static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct max6650_data *data = i2c_get_clientdata(client); unsigned long pwm; int err; err = kstrtoul(buf, 10, &pwm); if (err) return err; pwm = SENSORS_LIMIT(pwm, 0, 255); mutex_lock(&data->update_lock); if (data->config & MAX6650_CFG_V12) data->dac = 180 - (180 * pwm)/255; else data->dac = 76 - (76 * pwm)/255; i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, data->dac); mutex_unlock(&data->update_lock); return count; } /* * Get/Set controller mode: * Possible values: * 0 = Fan always on * 1 = Open loop, Voltage is set according to speed, not regulated. * 2 = Closed loop, RPM for all fans regulated by fan1 tachometer */ static ssize_t get_enable(struct device *dev, struct device_attribute *devattr, char *buf) { struct max6650_data *data = max6650_update_device(dev); int mode = (data->config & MAX6650_CFG_MODE_MASK) >> 4; int sysfs_modes[4] = {0, 1, 2, 1}; return sprintf(buf, "%d\n", sysfs_modes[mode]); } static ssize_t set_enable(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct max6650_data *data = i2c_get_clientdata(client); int max6650_modes[3] = {0, 3, 2}; unsigned long mode; int err; err = kstrtoul(buf, 10, &mode); if (err) return err; if (mode > 2) return -EINVAL; mutex_lock(&data->update_lock); data->config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG); data->config = (data->config & ~MAX6650_CFG_MODE_MASK) | (max6650_modes[mode] << 4); i2c_smbus_write_byte_data(client, MAX6650_REG_CONFIG, data->config); mutex_unlock(&data->update_lock); return count; } /* * Read/write functions for fan1_div sysfs file. The MAX6650 has no such * divider. We handle this by converting between divider and counttime: * * (counttime == k) <==> (divider == 2^k), k = 0, 1, 2, or 3 * * Lower values of k allow to connect a faster fan without the risk of * counter overflow. The price is lower resolution. You can also set counttime * using the module parameter. Note that the module parameter "prescaler" also * influences the behaviour. Unfortunately, there's no sysfs attribute * defined for that. See the data sheet for details. */ static ssize_t get_div(struct device *dev, struct device_attribute *devattr, char *buf) { struct max6650_data *data = max6650_update_device(dev); return sprintf(buf, "%d\n", DIV_FROM_REG(data->count)); } static ssize_t set_div(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct max6650_data *data = i2c_get_clientdata(client); unsigned long div; int err; err = kstrtoul(buf, 10, &div); if (err) return err; mutex_lock(&data->update_lock); switch (div) { case 1: data->count = 0; break; case 2: data->count = 1; break; case 4: data->count = 2; break; case 8: data->count = 3; break; default: mutex_unlock(&data->update_lock); return -EINVAL; } i2c_smbus_write_byte_data(client, MAX6650_REG_COUNT, data->count); mutex_unlock(&data->update_lock); return count; } /* * Get alarm stati: * Possible values: * 0 = no alarm * 1 = alarm */ static ssize_t get_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct max6650_data *data = max6650_update_device(dev); struct i2c_client *client = to_i2c_client(dev); int alarm = 0; if (data->alarm & attr->index) { mutex_lock(&data->update_lock); alarm = 1; data->alarm &= ~attr->index; data->alarm |= i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM); mutex_unlock(&data->update_lock); } return sprintf(buf, "%d\n", alarm); } static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, 0); static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan, NULL, 1); static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, get_fan, NULL, 2); static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, get_fan, NULL, 3); static DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO, get_target, set_target); static DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, get_div, set_div); static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, get_enable, set_enable); static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm); static SENSOR_DEVICE_ATTR(fan1_max_alarm, S_IRUGO, get_alarm, NULL, MAX6650_ALRM_MAX); static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, get_alarm, NULL, MAX6650_ALRM_MIN); static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, get_alarm, NULL, MAX6650_ALRM_TACH); static SENSOR_DEVICE_ATTR(gpio1_alarm, S_IRUGO, get_alarm, NULL, MAX6650_ALRM_GPIO1); static SENSOR_DEVICE_ATTR(gpio2_alarm, S_IRUGO, get_alarm, NULL, MAX6650_ALRM_GPIO2); static umode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct i2c_client *client = to_i2c_client(dev); u8 alarm_en = i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM_EN); struct device_attribute *devattr; /* * Hide the alarms that have not been enabled by the firmware */ devattr = container_of(a, struct device_attribute, attr); if (devattr == &sensor_dev_attr_fan1_max_alarm.dev_attr || devattr == &sensor_dev_attr_fan1_min_alarm.dev_attr || devattr == &sensor_dev_attr_fan1_fault.dev_attr || devattr == &sensor_dev_attr_gpio1_alarm.dev_attr || devattr == &sensor_dev_attr_gpio2_alarm.dev_attr) { if (!(alarm_en & to_sensor_dev_attr(devattr)->index)) return 0; } return a->mode; } static struct attribute *max6650_attrs[] = { &sensor_dev_attr_fan1_input.dev_attr.attr, &dev_attr_fan1_target.attr, &dev_attr_fan1_div.attr, &dev_attr_pwm1_enable.attr, &dev_attr_pwm1.attr, &sensor_dev_attr_fan1_max_alarm.dev_attr.attr, &sensor_dev_attr_fan1_min_alarm.dev_attr.attr, &sensor_dev_attr_fan1_fault.dev_attr.attr, &sensor_dev_attr_gpio1_alarm.dev_attr.attr, &sensor_dev_attr_gpio2_alarm.dev_attr.attr, NULL }; static struct attribute_group max6650_attr_grp = { .attrs = max6650_attrs, .is_visible = max6650_attrs_visible, }; static struct attribute *max6651_attrs[] = { &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan3_input.dev_attr.attr, &sensor_dev_attr_fan4_input.dev_attr.attr, NULL }; static const struct attribute_group max6651_attr_grp = { .attrs = max6651_attrs, }; /* * Real code */ static int max6650_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct max6650_data *data; int err; data = kzalloc(sizeof(struct max6650_data), GFP_KERNEL); if (!data) { dev_err(&client->dev, "out of memory.\n"); return -ENOMEM; } i2c_set_clientdata(client, data); mutex_init(&data->update_lock); data->nr_fans = id->driver_data; /* * Initialize the max6650 chip */ err = max6650_init_client(client); if (err) goto err_free; err = sysfs_create_group(&client->dev.kobj, &max6650_attr_grp); if (err) goto err_free; /* 3 additional fan inputs for the MAX6651 */ if (data->nr_fans == 4) { err = sysfs_create_group(&client->dev.kobj, &max6651_attr_grp); if (err) goto err_remove; } data->hwmon_dev = hwmon_device_register(&client->dev); if (!IS_ERR(data->hwmon_dev)) return 0; err = PTR_ERR(data->hwmon_dev); dev_err(&client->dev, "error registering hwmon device.\n"); if (data->nr_fans == 4) sysfs_remove_group(&client->dev.kobj, &max6651_attr_grp); err_remove: sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp); err_free: kfree(data); return err; } static int max6650_remove(struct i2c_client *client) { struct max6650_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); if (data->nr_fans == 4) sysfs_remove_group(&client->dev.kobj, &max6651_attr_grp); sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp); kfree(data); return 0; } static int max6650_init_client(struct i2c_client *client) { struct max6650_data *data = i2c_get_clientdata(client); int config; int err = -EIO; config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG); if (config < 0) { dev_err(&client->dev, "Error reading config, aborting.\n"); return err; } switch (fan_voltage) { case 0: break; case 5: config &= ~MAX6650_CFG_V12; break; case 12: config |= MAX6650_CFG_V12; break; default: dev_err(&client->dev, "illegal value for fan_voltage (%d)\n", fan_voltage); } dev_info(&client->dev, "Fan voltage is set to %dV.\n", (config & MAX6650_CFG_V12) ? 12 : 5); switch (prescaler) { case 0: break; case 1: config &= ~MAX6650_CFG_PRESCALER_MASK; break; case 2: config = (config & ~MAX6650_CFG_PRESCALER_MASK) | MAX6650_CFG_PRESCALER_2; break; case 4: config = (config & ~MAX6650_CFG_PRESCALER_MASK) | MAX6650_CFG_PRESCALER_4; break; case 8: config = (config & ~MAX6650_CFG_PRESCALER_MASK) | MAX6650_CFG_PRESCALER_8; break; case 16: config = (config & ~MAX6650_CFG_PRESCALER_MASK) | MAX6650_CFG_PRESCALER_16; break; default: dev_err(&client->dev, "illegal value for prescaler (%d)\n", prescaler); } dev_info(&client->dev, "Prescaler is set to %d.\n", 1 << (config & MAX6650_CFG_PRESCALER_MASK)); /* * If mode is set to "full off", we change it to "open loop" and * set DAC to 255, which has the same effect. We do this because * there's no "full off" mode defined in hwmon specifcations. */ if ((config & MAX6650_CFG_MODE_MASK) == MAX6650_CFG_MODE_OFF) { dev_dbg(&client->dev, "Change mode to open loop, full off.\n"); config = (config & ~MAX6650_CFG_MODE_MASK) | MAX6650_CFG_MODE_OPEN_LOOP; if (i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, 255)) { dev_err(&client->dev, "DAC write error, aborting.\n"); return err; } } if (i2c_smbus_write_byte_data(client, MAX6650_REG_CONFIG, config)) { dev_err(&client->dev, "Config write error, aborting.\n"); return err; } data->config = config; data->count = i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT); return 0; } static const u8 tach_reg[] = { MAX6650_REG_TACH0, MAX6650_REG_TACH1, MAX6650_REG_TACH2, MAX6650_REG_TACH3, }; static struct max6650_data *max6650_update_device(struct device *dev) { int i; struct i2c_client *client = to_i2c_client(dev); struct max6650_data *data = i2c_get_clientdata(client); mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { data->speed = i2c_smbus_read_byte_data(client, MAX6650_REG_SPEED); data->config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG); for (i = 0; i < data->nr_fans; i++) { data->tach[i] = i2c_smbus_read_byte_data(client, tach_reg[i]); } data->count = i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT); data->dac = i2c_smbus_read_byte_data(client, MAX6650_REG_DAC); /* * Alarms are cleared on read in case the condition that * caused the alarm is removed. Keep the value latched here * for providing the register through different alarm files. */ data->alarm |= i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } module_i2c_driver(max6650_driver); MODULE_AUTHOR("Hans J. Koch"); MODULE_DESCRIPTION("MAX6650 sensor driver"); MODULE_LICENSE("GPL");
gpl-2.0
vetzki/kernel_msm
arch/ia64/kernel/irq_ia64.c
5113
16110
/* * linux/arch/ia64/kernel/irq_ia64.c * * Copyright (C) 1998-2001 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com> * * 6/10/99: Updated to bring in sync with x86 version to facilitate * support for SMP and different interrupt controllers. * * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector * PCI to vector allocation routine. * 04/14/2004 Ashok Raj <ashok.raj@intel.com> * Added CPU Hotplug handling for IPF. */ #include <linux/module.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/kernel_stat.h> #include <linux/ptrace.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/threads.h> #include <linux/bitops.h> #include <linux/irq.h> #include <linux/ratelimit.h> #include <linux/acpi.h> #include <linux/sched.h> #include <asm/delay.h> #include <asm/intrinsics.h> #include <asm/io.h> #include <asm/hw_irq.h> #include <asm/machvec.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #ifdef CONFIG_PERFMON # include <asm/perfmon.h> #endif #define IRQ_DEBUG 0 #define IRQ_VECTOR_UNASSIGNED (0) #define IRQ_UNUSED (0) #define IRQ_USED (1) #define IRQ_RSVD (2) /* These can be overridden in platform_irq_init */ int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR; int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR; /* default base addr of IPI table */ void __iomem *ipi_base_addr = ((void __iomem *) (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); static cpumask_t vector_allocation_domain(int cpu); /* * Legacy IRQ to IA-64 vector translation table. */ __u8 isa_irq_to_vector_map[16] = { /* 8259 IRQ translation, first 16 entries */ 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21 }; EXPORT_SYMBOL(isa_irq_to_vector_map); DEFINE_SPINLOCK(vector_lock); struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = { [0 ... NR_IRQS - 1] = { .vector = IRQ_VECTOR_UNASSIGNED, .domain = CPU_MASK_NONE } }; DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = { [0 ... IA64_NUM_VECTORS - 1] = -1 }; static cpumask_t vector_table[IA64_NUM_VECTORS] = { [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE }; static int irq_status[NR_IRQS] = { [0 ... NR_IRQS -1] = IRQ_UNUSED }; int check_irq_used(int irq) { if (irq_status[irq] == IRQ_USED) return 1; return -1; } static inline int find_unassigned_irq(void) { int irq; for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++) if (irq_status[irq] == IRQ_UNUSED) return irq; return -ENOSPC; } static inline int find_unassigned_vector(cpumask_t domain) { cpumask_t mask; int pos, vector; cpumask_and(&mask, &domain, cpu_online_mask); if (cpus_empty(mask)) return -EINVAL; for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) { vector = IA64_FIRST_DEVICE_VECTOR + pos; cpus_and(mask, domain, vector_table[vector]); if (!cpus_empty(mask)) continue; return vector; } return -ENOSPC; } static int __bind_irq_vector(int irq, int vector, cpumask_t domain) { cpumask_t mask; int cpu; struct irq_cfg *cfg = &irq_cfg[irq]; BUG_ON((unsigned)irq >= NR_IRQS); BUG_ON((unsigned)vector >= IA64_NUM_VECTORS); cpumask_and(&mask, &domain, cpu_online_mask); if (cpus_empty(mask)) return -EINVAL; if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain)) return 0; if (cfg->vector != IRQ_VECTOR_UNASSIGNED) return -EBUSY; for_each_cpu_mask(cpu, mask) per_cpu(vector_irq, cpu)[vector] = irq; cfg->vector = vector; cfg->domain = domain; irq_status[irq] = IRQ_USED; cpus_or(vector_table[vector], vector_table[vector], domain); return 0; } int bind_irq_vector(int irq, int vector, cpumask_t domain) { unsigned long flags; int ret; spin_lock_irqsave(&vector_lock, flags); ret = __bind_irq_vector(irq, vector, domain); spin_unlock_irqrestore(&vector_lock, flags); return ret; } static void __clear_irq_vector(int irq) { int vector, cpu; cpumask_t mask; cpumask_t domain; struct irq_cfg *cfg = &irq_cfg[irq]; BUG_ON((unsigned)irq >= NR_IRQS); BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED); vector = cfg->vector; domain = cfg->domain; cpumask_and(&mask, &cfg->domain, cpu_online_mask); for_each_cpu_mask(cpu, mask) per_cpu(vector_irq, cpu)[vector] = -1; cfg->vector = IRQ_VECTOR_UNASSIGNED; cfg->domain = CPU_MASK_NONE; irq_status[irq] = IRQ_UNUSED; cpus_andnot(vector_table[vector], vector_table[vector], domain); } static void clear_irq_vector(int irq) { unsigned long flags; spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq); spin_unlock_irqrestore(&vector_lock, flags); } int ia64_native_assign_irq_vector (int irq) { unsigned long flags; int vector, cpu; cpumask_t domain = CPU_MASK_NONE; vector = -ENOSPC; spin_lock_irqsave(&vector_lock, flags); for_each_online_cpu(cpu) { domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector >= 0) break; } if (vector < 0) goto out; if (irq == AUTO_ASSIGN) irq = vector; BUG_ON(__bind_irq_vector(irq, vector, domain)); out: spin_unlock_irqrestore(&vector_lock, flags); return vector; } void ia64_native_free_irq_vector (int vector) { if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) return; clear_irq_vector(vector); } int reserve_irq_vector (int vector) { if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) return -EINVAL; return !!bind_irq_vector(vector, vector, CPU_MASK_ALL); } /* * Initialize vector_irq on a new cpu. This function must be called * with vector_lock held. */ void __setup_vector_irq(int cpu) { int irq, vector; /* Clear vector_irq */ for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) per_cpu(vector_irq, cpu)[vector] = -1; /* Mark the inuse vectors */ for (irq = 0; irq < NR_IRQS; ++irq) { if (!cpu_isset(cpu, irq_cfg[irq].domain)) continue; vector = irq_to_vector(irq); per_cpu(vector_irq, cpu)[vector] = irq; } } #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) static enum vector_domain_type { VECTOR_DOMAIN_NONE, VECTOR_DOMAIN_PERCPU } vector_domain_type = VECTOR_DOMAIN_NONE; static cpumask_t vector_allocation_domain(int cpu) { if (vector_domain_type == VECTOR_DOMAIN_PERCPU) return cpumask_of_cpu(cpu); return CPU_MASK_ALL; } static int __irq_prepare_move(int irq, int cpu) { struct irq_cfg *cfg = &irq_cfg[irq]; int vector; cpumask_t domain; if (cfg->move_in_progress || cfg->move_cleanup_count) return -EBUSY; if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) return -EINVAL; if (cpu_isset(cpu, cfg->domain)) return 0; domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector < 0) return -ENOSPC; cfg->move_in_progress = 1; cfg->old_domain = cfg->domain; cfg->vector = IRQ_VECTOR_UNASSIGNED; cfg->domain = CPU_MASK_NONE; BUG_ON(__bind_irq_vector(irq, vector, domain)); return 0; } int irq_prepare_move(int irq, int cpu) { unsigned long flags; int ret; spin_lock_irqsave(&vector_lock, flags); ret = __irq_prepare_move(irq, cpu); spin_unlock_irqrestore(&vector_lock, flags); return ret; } void irq_complete_move(unsigned irq) { struct irq_cfg *cfg = &irq_cfg[irq]; cpumask_t cleanup_mask; int i; if (likely(!cfg->move_in_progress)) return; if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain))) return; cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask); cfg->move_cleanup_count = cpus_weight(cleanup_mask); for_each_cpu_mask(i, cleanup_mask) platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); cfg->move_in_progress = 0; } static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) { int me = smp_processor_id(); ia64_vector vector; unsigned long flags; for (vector = IA64_FIRST_DEVICE_VECTOR; vector < IA64_LAST_DEVICE_VECTOR; vector++) { int irq; struct irq_desc *desc; struct irq_cfg *cfg; irq = __get_cpu_var(vector_irq)[vector]; if (irq < 0) continue; desc = irq_to_desc(irq); cfg = irq_cfg + irq; raw_spin_lock(&desc->lock); if (!cfg->move_cleanup_count) goto unlock; if (!cpu_isset(me, cfg->old_domain)) goto unlock; spin_lock_irqsave(&vector_lock, flags); __get_cpu_var(vector_irq)[vector] = -1; cpu_clear(me, vector_table[vector]); spin_unlock_irqrestore(&vector_lock, flags); cfg->move_cleanup_count--; unlock: raw_spin_unlock(&desc->lock); } return IRQ_HANDLED; } static struct irqaction irq_move_irqaction = { .handler = smp_irq_move_cleanup_interrupt, .flags = IRQF_DISABLED, .name = "irq_move" }; static int __init parse_vector_domain(char *arg) { if (!arg) return -EINVAL; if (!strcmp(arg, "percpu")) { vector_domain_type = VECTOR_DOMAIN_PERCPU; no_int_routing = 1; } return 0; } early_param("vector", parse_vector_domain); #else static cpumask_t vector_allocation_domain(int cpu) { return CPU_MASK_ALL; } #endif void destroy_and_reserve_irq(unsigned int irq) { unsigned long flags; dynamic_irq_cleanup(irq); spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq); irq_status[irq] = IRQ_RSVD; spin_unlock_irqrestore(&vector_lock, flags); } /* * Dynamic irq allocate and deallocation for MSI */ int create_irq(void) { unsigned long flags; int irq, vector, cpu; cpumask_t domain = CPU_MASK_NONE; irq = vector = -ENOSPC; spin_lock_irqsave(&vector_lock, flags); for_each_online_cpu(cpu) { domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector >= 0) break; } if (vector < 0) goto out; irq = find_unassigned_irq(); if (irq < 0) goto out; BUG_ON(__bind_irq_vector(irq, vector, domain)); out: spin_unlock_irqrestore(&vector_lock, flags); if (irq >= 0) dynamic_irq_init(irq); return irq; } void destroy_irq(unsigned int irq) { dynamic_irq_cleanup(irq); clear_irq_vector(irq); } #ifdef CONFIG_SMP # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) # define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH) #else # define IS_RESCHEDULE(vec) (0) # define IS_LOCAL_TLB_FLUSH(vec) (0) #endif /* * That's where the IVT branches when we get an external * interrupt. This branches to the correct hardware IRQ handler via * function ptr. */ void ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); unsigned long saved_tpr; #if IRQ_DEBUG { unsigned long bsp, sp; /* * Note: if the interrupt happened while executing in * the context switch routine (ia64_switch_to), we may * get a spurious stack overflow here. This is * because the register and the memory stack are not * switched atomically. */ bsp = ia64_getreg(_IA64_REG_AR_BSP); sp = ia64_getreg(_IA64_REG_SP); if ((sp - bsp) < 1024) { static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); if (__ratelimit(&ratelimit)) { printk("ia64_handle_irq: DANGER: less than " "1KB of free stack space!!\n" "(bsp=0x%lx, sp=%lx)\n", bsp, sp); } } } #endif /* IRQ_DEBUG */ /* * Always set TPR to limit maximum interrupt nesting depth to * 16 (without this, it would be ~240, which could easily lead * to kernel stack overflows). */ irq_enter(); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); struct irq_desc *desc = irq_to_desc(irq); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); kstat_incr_irqs_this_cpu(irq, desc); } else if (unlikely(IS_RESCHEDULE(vector))) { scheduler_ipi(); kstat_incr_irqs_this_cpu(irq, desc); } else { ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); if (unlikely(irq < 0)) { printk(KERN_ERR "%s: Unexpected interrupt " "vector %d on CPU %d is not mapped " "to any IRQ!\n", __func__, vector, smp_processor_id()); } else generic_handle_irq(irq); /* * Disable interrupts and send EOI: */ local_irq_disable(); ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); } ia64_eoi(); vector = ia64_get_ivr(); } /* * This must be done *after* the ia64_eoi(). For example, the keyboard softirq * handler needs to be able to wait for further keyboard interrupts, which can't * come through until ia64_eoi() has been done. */ irq_exit(); set_irq_regs(old_regs); } #ifdef CONFIG_HOTPLUG_CPU /* * This function emulates a interrupt processing when a cpu is about to be * brought down. */ void ia64_process_pending_intr(void) { ia64_vector vector; unsigned long saved_tpr; extern unsigned int vectors_in_migration[NR_IRQS]; vector = ia64_get_ivr(); irq_enter(); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); /* * Perform normal interrupt style processing */ while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); struct irq_desc *desc = irq_to_desc(irq); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); kstat_incr_irqs_this_cpu(irq, desc); } else if (unlikely(IS_RESCHEDULE(vector))) { kstat_incr_irqs_this_cpu(irq, desc); } else { struct pt_regs *old_regs = set_irq_regs(NULL); ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); /* * Now try calling normal ia64_handle_irq as it would have got called * from a real intr handler. Try passing null for pt_regs, hopefully * it will work. I hope it works!. * Probably could shared code. */ if (unlikely(irq < 0)) { printk(KERN_ERR "%s: Unexpected interrupt " "vector %d on CPU %d not being mapped " "to any IRQ!!\n", __func__, vector, smp_processor_id()); } else { vectors_in_migration[irq]=0; generic_handle_irq(irq); } set_irq_regs(old_regs); /* * Disable interrupts and send EOI */ local_irq_disable(); ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); } ia64_eoi(); vector = ia64_get_ivr(); } irq_exit(); } #endif #ifdef CONFIG_SMP static irqreturn_t dummy_handler (int irq, void *dev_id) { BUG(); } static struct irqaction ipi_irqaction = { .handler = handle_IPI, .flags = IRQF_DISABLED, .name = "IPI" }; /* * KVM uses this interrupt to force a cpu out of guest mode */ static struct irqaction resched_irqaction = { .handler = dummy_handler, .flags = IRQF_DISABLED, .name = "resched" }; static struct irqaction tlb_irqaction = { .handler = dummy_handler, .flags = IRQF_DISABLED, .name = "tlb_flush" }; #endif void ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action) { unsigned int irq; irq = vec; BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL)); irq_set_status_flags(irq, IRQ_PER_CPU); irq_set_chip(irq, &irq_type_ia64_lsapic); if (action) setup_irq(irq, action); irq_set_handler(irq, handle_percpu_irq); } void __init ia64_native_register_ipi(void) { #ifdef CONFIG_SMP register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction); #endif } void __init init_IRQ (void) { #ifdef CONFIG_ACPI acpi_boot_init(); #endif ia64_register_ipi(); register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); #ifdef CONFIG_SMP #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) if (vector_domain_type != VECTOR_DOMAIN_NONE) register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); #endif #endif #ifdef CONFIG_PERFMON pfm_init_percpu(); #endif platform_irq_init(); } void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect) { void __iomem *ipi_addr; unsigned long ipi_data; unsigned long phys_cpu_id; phys_cpu_id = cpu_physical_id(cpu); /* * cpu number is in 8bit ID and 8bit EID */ ipi_data = (delivery_mode << 8) | (vector & 0xff); ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3)); writeq(ipi_data, ipi_addr); }
gpl-2.0
coreentin/android_kernel_nvidia_s8515
arch/mn10300/kernel/cevt-mn10300.c
7417
3259
/* MN10300 clockevents * * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. * Written by Mark Salter (msalter@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/smp.h> #include <asm/timex.h> #include "internal.h" #ifdef CONFIG_SMP #if (CONFIG_NR_CPUS > 2) && !defined(CONFIG_GEENERIC_CLOCKEVENTS_BROADCAST) #error "This doesn't scale well! Need per-core local timers." #endif #else /* CONFIG_SMP */ #define stop_jiffies_counter1() #define reload_jiffies_counter1(x) #define TMJC1IRQ TMJCIRQ #endif static int next_event(unsigned long delta, struct clock_event_device *evt) { unsigned int cpu = smp_processor_id(); if (cpu == 0) { stop_jiffies_counter(); reload_jiffies_counter(delta - 1); } else { stop_jiffies_counter1(); reload_jiffies_counter1(delta - 1); } return 0; } static void set_clock_mode(enum clock_event_mode mode, struct clock_event_device *evt) { /* Nothing to do ... */ } static DEFINE_PER_CPU(struct clock_event_device, mn10300_clockevent_device); static DEFINE_PER_CPU(struct irqaction, timer_irq); static irqreturn_t timer_interrupt(int irq, void *dev_id) { struct clock_event_device *cd; unsigned int cpu = smp_processor_id(); if (cpu == 0) stop_jiffies_counter(); else stop_jiffies_counter1(); cd = &per_cpu(mn10300_clockevent_device, cpu); cd->event_handler(cd); return IRQ_HANDLED; } static void event_handler(struct clock_event_device *dev) { } int __init init_clockevents(void) { struct clock_event_device *cd; struct irqaction *iact; unsigned int cpu = smp_processor_id(); cd = &per_cpu(mn10300_clockevent_device, cpu); if (cpu == 0) { stop_jiffies_counter(); cd->irq = TMJCIRQ; } else { stop_jiffies_counter1(); cd->irq = TMJC1IRQ; } cd->name = "Timestamp"; cd->features = CLOCK_EVT_FEAT_ONESHOT; /* Calculate shift/mult. We want to spawn at least 1 second */ clockevents_calc_mult_shift(cd, MN10300_JCCLK, 1); /* Calculate the min / max delta */ cd->max_delta_ns = clockevent_delta2ns(TMJCBR_MAX, cd); cd->min_delta_ns = clockevent_delta2ns(100, cd); cd->rating = 200; cd->cpumask = cpumask_of(smp_processor_id()); cd->set_mode = set_clock_mode; cd->event_handler = event_handler; cd->set_next_event = next_event; iact = &per_cpu(timer_irq, cpu); iact->flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER; iact->handler = timer_interrupt; clockevents_register_device(cd); #if defined(CONFIG_SMP) && !defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) /* setup timer irq affinity so it only runs on this cpu */ { struct irq_data *data; data = irq_get_irq_data(cd->irq); cpumask_copy(data->affinity, cpumask_of(cpu)); iact->flags |= IRQF_NOBALANCING; } #endif if (cpu == 0) { reload_jiffies_counter(MN10300_JC_PER_HZ - 1); iact->name = "CPU0 Timer"; } else { reload_jiffies_counter1(MN10300_JC_PER_HZ - 1); iact->name = "CPU1 Timer"; } setup_jiffies_interrupt(cd->irq, iact); return 0; }
gpl-2.0
ashishkrishnan/android_kernel_samsung_smdk4412
drivers/infiniband/hw/ehca/ehca_uverbs.c
9209
8308
/* * IBM eServer eHCA Infiniband device driver for Linux on POWER * * userspace support verbs * * Authors: Christoph Raisch <raisch@de.ibm.com> * Hoang-Nam Nguyen <hnguyen@de.ibm.com> * Heiko J Schick <schickhj@de.ibm.com> * * Copyright (c) 2005 IBM Corporation * * All rights reserved. * * This source code is distributed under a dual license of GPL v2.0 and OpenIB * BSD. * * OpenIB BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <linux/slab.h> #include "ehca_classes.h" #include "ehca_iverbs.h" #include "ehca_mrmw.h" #include "ehca_tools.h" #include "hcp_if.h" struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device, struct ib_udata *udata) { struct ehca_ucontext *my_context; my_context = kzalloc(sizeof *my_context, GFP_KERNEL); if (!my_context) { ehca_err(device, "Out of memory device=%p", device); return ERR_PTR(-ENOMEM); } return &my_context->ib_ucontext; } int ehca_dealloc_ucontext(struct ib_ucontext *context) { kfree(container_of(context, struct ehca_ucontext, ib_ucontext)); return 0; } static void ehca_mm_open(struct vm_area_struct *vma) { u32 *count = (u32 *)vma->vm_private_data; if (!count) { ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx", vma->vm_start, vma->vm_end); return; } (*count)++; if (!(*count)) ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx", vma->vm_start, vma->vm_end); ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x", vma->vm_start, vma->vm_end, *count); } static void ehca_mm_close(struct vm_area_struct *vma) { u32 *count = (u32 *)vma->vm_private_data; if (!count) { ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx", vma->vm_start, vma->vm_end); return; } (*count)--; ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x", vma->vm_start, vma->vm_end, *count); } static const struct vm_operations_struct vm_ops = { .open = ehca_mm_open, .close = ehca_mm_close, }; static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas, u32 *mm_count) { int ret; u64 vsize, physical; vsize = vma->vm_end - vma->vm_start; if (vsize < EHCA_PAGESIZE) { ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start); return -EINVAL; } physical = galpas->user.fw_handle; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical); /* VM_IO | VM_RESERVED are set by remap_pfn_range() */ ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT, vma->vm_page_prot); if (unlikely(ret)) { ehca_gen_err("remap_pfn_range() failed ret=%i", ret); return -ENOMEM; } vma->vm_private_data = mm_count; (*mm_count)++; vma->vm_ops = &vm_ops; return 0; } static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue, u32 *mm_count) { int ret; u64 start, ofs; struct page *page; vma->vm_flags |= VM_RESERVED; start = vma->vm_start; for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) { u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs); page = virt_to_page(virt_addr); ret = vm_insert_page(vma, start, page); if (unlikely(ret)) { ehca_gen_err("vm_insert_page() failed rc=%i", ret); return ret; } start += PAGE_SIZE; } vma->vm_private_data = mm_count; (*mm_count)++; vma->vm_ops = &vm_ops; return 0; } static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq, u32 rsrc_type) { int ret; switch (rsrc_type) { case 0: /* galpa fw handle */ ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number); ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa); if (unlikely(ret)) { ehca_err(cq->ib_cq.device, "ehca_mmap_fw() failed rc=%i cq_num=%x", ret, cq->cq_number); return ret; } break; case 1: /* cq queue_addr */ ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number); ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue); if (unlikely(ret)) { ehca_err(cq->ib_cq.device, "ehca_mmap_queue() failed rc=%i cq_num=%x", ret, cq->cq_number); return ret; } break; default: ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x", rsrc_type, cq->cq_number); return -EINVAL; } return 0; } static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, u32 rsrc_type) { int ret; switch (rsrc_type) { case 0: /* galpa fw handle */ ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num); ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa); if (unlikely(ret)) { ehca_err(qp->ib_qp.device, "remap_pfn_range() failed ret=%i qp_num=%x", ret, qp->ib_qp.qp_num); return -ENOMEM; } break; case 1: /* qp rqueue_addr */ ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num); ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, &qp->mm_count_rqueue); if (unlikely(ret)) { ehca_err(qp->ib_qp.device, "ehca_mmap_queue(rq) failed rc=%i qp_num=%x", ret, qp->ib_qp.qp_num); return ret; } break; case 2: /* qp squeue_addr */ ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num); ret = ehca_mmap_queue(vma, &qp->ipz_squeue, &qp->mm_count_squeue); if (unlikely(ret)) { ehca_err(qp->ib_qp.device, "ehca_mmap_queue(sq) failed rc=%i qp_num=%x", ret, qp->ib_qp.qp_num); return ret; } break; default: ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x", rsrc_type, qp->ib_qp.qp_num); return -EINVAL; } return 0; } int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { u64 fileoffset = vma->vm_pgoff; u32 idr_handle = fileoffset & 0x1FFFFFF; u32 q_type = (fileoffset >> 27) & 0x1; /* CQ, QP,... */ u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */ u32 ret; struct ehca_cq *cq; struct ehca_qp *qp; struct ib_uobject *uobject; switch (q_type) { case 0: /* CQ */ read_lock(&ehca_cq_idr_lock); cq = idr_find(&ehca_cq_idr, idr_handle); read_unlock(&ehca_cq_idr_lock); /* make sure this mmap really belongs to the authorized user */ if (!cq) return -EINVAL; if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context) return -EINVAL; ret = ehca_mmap_cq(vma, cq, rsrc_type); if (unlikely(ret)) { ehca_err(cq->ib_cq.device, "ehca_mmap_cq() failed rc=%i cq_num=%x", ret, cq->cq_number); return ret; } break; case 1: /* QP */ read_lock(&ehca_qp_idr_lock); qp = idr_find(&ehca_qp_idr, idr_handle); read_unlock(&ehca_qp_idr_lock); /* make sure this mmap really belongs to the authorized user */ if (!qp) return -EINVAL; uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject; if (!uobject || uobject->context != context) return -EINVAL; ret = ehca_mmap_qp(vma, qp, rsrc_type); if (unlikely(ret)) { ehca_err(qp->ib_qp.device, "ehca_mmap_qp() failed rc=%i qp_num=%x", ret, qp->ib_qp.qp_num); return ret; } break; default: ehca_gen_err("bad queue type %x", q_type); return -EINVAL; } return 0; }
gpl-2.0
AnguisCaptor/PwnKernel_Hammerhead_L
drivers/scsi/libsas/sas_task.c
9977
1083
#include <linux/kernel.h> #include <linux/export.h> #include <scsi/sas.h> #include <scsi/libsas.h> /* fill task_status_struct based on SSP response frame */ void sas_ssp_task_response(struct device *dev, struct sas_task *task, struct ssp_response_iu *iu) { struct task_status_struct *tstat = &task->task_status; tstat->resp = SAS_TASK_COMPLETE; if (iu->datapres == 0) tstat->stat = iu->status; else if (iu->datapres == 1) tstat->stat = iu->resp_data[3]; else if (iu->datapres == 2) { tstat->stat = SAM_STAT_CHECK_CONDITION; tstat->buf_valid_size = min_t(int, SAS_STATUS_BUF_SIZE, be32_to_cpu(iu->sense_data_len)); memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size); if (iu->status != SAM_STAT_CHECK_CONDITION) dev_printk(KERN_WARNING, dev, "dev %llx sent sense data, but " "stat(%x) is not CHECK CONDITION\n", SAS_ADDR(task->dev->sas_addr), iu->status); } else /* when datapres contains corrupt/unknown value... */ tstat->stat = SAM_STAT_CHECK_CONDITION; } EXPORT_SYMBOL_GPL(sas_ssp_task_response);
gpl-2.0
barnacles10/ChopSuey
arch/mips/bcm63xx/cs.c
13561
3250
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/log2.h> #include <bcm63xx_cpu.h> #include <bcm63xx_io.h> #include <bcm63xx_regs.h> #include <bcm63xx_cs.h> static DEFINE_SPINLOCK(bcm63xx_cs_lock); /* * check if given chip select exists */ static int is_valid_cs(unsigned int cs) { if (cs > 6) return 0; return 1; } /* * Configure chipselect base address and size (bytes). * Size must be a power of two between 8k and 256M. */ int bcm63xx_set_cs_base(unsigned int cs, u32 base, unsigned int size) { unsigned long flags; u32 val; if (!is_valid_cs(cs)) return -EINVAL; /* sanity check on size */ if (size != roundup_pow_of_two(size)) return -EINVAL; if (size < 8 * 1024 || size > 256 * 1024 * 1024) return -EINVAL; val = (base & MPI_CSBASE_BASE_MASK); /* 8k => 0 - 256M => 15 */ val |= (ilog2(size) - ilog2(8 * 1024)) << MPI_CSBASE_SIZE_SHIFT; spin_lock_irqsave(&bcm63xx_cs_lock, flags); bcm_mpi_writel(val, MPI_CSBASE_REG(cs)); spin_unlock_irqrestore(&bcm63xx_cs_lock, flags); return 0; } EXPORT_SYMBOL(bcm63xx_set_cs_base); /* * configure chipselect timing (ns) */ int bcm63xx_set_cs_timing(unsigned int cs, unsigned int wait, unsigned int setup, unsigned int hold) { unsigned long flags; u32 val; if (!is_valid_cs(cs)) return -EINVAL; spin_lock_irqsave(&bcm63xx_cs_lock, flags); val = bcm_mpi_readl(MPI_CSCTL_REG(cs)); val &= ~(MPI_CSCTL_WAIT_MASK); val &= ~(MPI_CSCTL_SETUP_MASK); val &= ~(MPI_CSCTL_HOLD_MASK); val |= wait << MPI_CSCTL_WAIT_SHIFT; val |= setup << MPI_CSCTL_SETUP_SHIFT; val |= hold << MPI_CSCTL_HOLD_SHIFT; bcm_mpi_writel(val, MPI_CSCTL_REG(cs)); spin_unlock_irqrestore(&bcm63xx_cs_lock, flags); return 0; } EXPORT_SYMBOL(bcm63xx_set_cs_timing); /* * configure other chipselect parameter (data bus size, ...) */ int bcm63xx_set_cs_param(unsigned int cs, u32 params) { unsigned long flags; u32 val; if (!is_valid_cs(cs)) return -EINVAL; /* none of this fields apply to pcmcia */ if (cs == MPI_CS_PCMCIA_COMMON || cs == MPI_CS_PCMCIA_ATTR || cs == MPI_CS_PCMCIA_IO) return -EINVAL; spin_lock_irqsave(&bcm63xx_cs_lock, flags); val = bcm_mpi_readl(MPI_CSCTL_REG(cs)); val &= ~(MPI_CSCTL_DATA16_MASK); val &= ~(MPI_CSCTL_SYNCMODE_MASK); val &= ~(MPI_CSCTL_TSIZE_MASK); val &= ~(MPI_CSCTL_ENDIANSWAP_MASK); val |= params; bcm_mpi_writel(val, MPI_CSCTL_REG(cs)); spin_unlock_irqrestore(&bcm63xx_cs_lock, flags); return 0; } EXPORT_SYMBOL(bcm63xx_set_cs_param); /* * set cs status (enable/disable) */ int bcm63xx_set_cs_status(unsigned int cs, int enable) { unsigned long flags; u32 val; if (!is_valid_cs(cs)) return -EINVAL; spin_lock_irqsave(&bcm63xx_cs_lock, flags); val = bcm_mpi_readl(MPI_CSCTL_REG(cs)); if (enable) val |= MPI_CSCTL_ENABLE_MASK; else val &= ~MPI_CSCTL_ENABLE_MASK; bcm_mpi_writel(val, MPI_CSCTL_REG(cs)); spin_unlock_irqrestore(&bcm63xx_cs_lock, flags); return 0; } EXPORT_SYMBOL(bcm63xx_set_cs_status);
gpl-2.0
mathkid95/linux_samsung_ics
drivers/media/dvb/ttpci/av7110_ipack.c
14841
7991
#include "dvb_filter.h" #include "av7110_ipack.h" #include <linux/string.h> /* for memcpy() */ #include <linux/vmalloc.h> void av7110_ipack_reset(struct ipack *p) { p->found = 0; p->cid = 0; p->plength = 0; p->flag1 = 0; p->flag2 = 0; p->hlength = 0; p->mpeg = 0; p->check = 0; p->which = 0; p->done = 0; p->count = 0; } int av7110_ipack_init(struct ipack *p, int size, void (*func)(u8 *buf, int size, void *priv)) { if (!(p->buf = vmalloc(size*sizeof(u8)))) { printk(KERN_WARNING "Couldn't allocate memory for ipack\n"); return -ENOMEM; } p->size = size; p->func = func; p->repack_subids = 0; av7110_ipack_reset(p); return 0; } void av7110_ipack_free(struct ipack *p) { vfree(p->buf); } static void send_ipack(struct ipack *p) { int off; struct dvb_audio_info ai; int ac3_off = 0; int streamid = 0; int nframes = 0; int f = 0; switch (p->mpeg) { case 2: if (p->count < 10) return; p->buf[3] = p->cid; p->buf[4] = (u8)(((p->count - 6) & 0xff00) >> 8); p->buf[5] = (u8)((p->count - 6) & 0x00ff); if (p->repack_subids && p->cid == PRIVATE_STREAM1) { off = 9 + p->buf[8]; streamid = p->buf[off]; if ((streamid & 0xf8) == 0x80) { ai.off = 0; ac3_off = ((p->buf[off + 2] << 8)| p->buf[off + 3]); if (ac3_off < p->count) f = dvb_filter_get_ac3info(p->buf + off + 3 + ac3_off, p->count - ac3_off, &ai, 0); if (!f) { nframes = (p->count - off - 3 - ac3_off) / ai.framesize + 1; p->buf[off + 2] = (ac3_off >> 8) & 0xff; p->buf[off + 3] = (ac3_off) & 0xff; p->buf[off + 1] = nframes; ac3_off += nframes * ai.framesize - p->count; } } } p->func(p->buf, p->count, p->data); p->buf[6] = 0x80; p->buf[7] = 0x00; p->buf[8] = 0x00; p->count = 9; if (p->repack_subids && p->cid == PRIVATE_STREAM1 && (streamid & 0xf8) == 0x80) { p->count += 4; p->buf[9] = streamid; p->buf[10] = (ac3_off >> 8) & 0xff; p->buf[11] = (ac3_off) & 0xff; p->buf[12] = 0; } break; case 1: if (p->count < 8) return; p->buf[3] = p->cid; p->buf[4] = (u8)(((p->count - 6) & 0xff00) >> 8); p->buf[5] = (u8)((p->count - 6) & 0x00ff); p->func(p->buf, p->count, p->data); p->buf[6] = 0x0f; p->count = 7; break; } } void av7110_ipack_flush(struct ipack *p) { if (p->plength != MMAX_PLENGTH - 6 || p->found <= 6) return; p->plength = p->found - 6; p->found = 0; send_ipack(p); av7110_ipack_reset(p); } static void write_ipack(struct ipack *p, const u8 *data, int count) { u8 headr[3] = { 0x00, 0x00, 0x01 }; if (p->count < 6) { memcpy(p->buf, headr, 3); p->count = 6; } if (p->count + count < p->size){ memcpy(p->buf+p->count, data, count); p->count += count; } else { int rest = p->size - p->count; memcpy(p->buf+p->count, data, rest); p->count += rest; send_ipack(p); if (count - rest > 0) write_ipack(p, data + rest, count - rest); } } int av7110_ipack_instant_repack (const u8 *buf, int count, struct ipack *p) { int l; int c = 0; while (c < count && (p->mpeg == 0 || (p->mpeg == 1 && p->found < 7) || (p->mpeg == 2 && p->found < 9)) && (p->found < 5 || !p->done)) { switch (p->found) { case 0: case 1: if (buf[c] == 0x00) p->found++; else p->found = 0; c++; break; case 2: if (buf[c] == 0x01) p->found++; else if (buf[c] == 0) p->found = 2; else p->found = 0; c++; break; case 3: p->cid = 0; switch (buf[c]) { case PROG_STREAM_MAP: case PRIVATE_STREAM2: case PROG_STREAM_DIR: case ECM_STREAM : case EMM_STREAM : case PADDING_STREAM : case DSM_CC_STREAM : case ISO13522_STREAM: p->done = 1; /* fall through */ case PRIVATE_STREAM1: case VIDEO_STREAM_S ... VIDEO_STREAM_E: case AUDIO_STREAM_S ... AUDIO_STREAM_E: p->found++; p->cid = buf[c]; c++; break; default: p->found = 0; break; } break; case 4: if (count-c > 1) { p->plen[0] = buf[c]; c++; p->plen[1] = buf[c]; c++; p->found += 2; p->plength = (p->plen[0] << 8) | p->plen[1]; } else { p->plen[0] = buf[c]; p->found++; return count; } break; case 5: p->plen[1] = buf[c]; c++; p->found++; p->plength = (p->plen[0] << 8) | p->plen[1]; break; case 6: if (!p->done) { p->flag1 = buf[c]; c++; p->found++; if ((p->flag1 & 0xc0) == 0x80) p->mpeg = 2; else { p->hlength = 0; p->which = 0; p->mpeg = 1; p->flag2 = 0; } } break; case 7: if (!p->done && p->mpeg == 2) { p->flag2 = buf[c]; c++; p->found++; } break; case 8: if (!p->done && p->mpeg == 2) { p->hlength = buf[c]; c++; p->found++; } break; } } if (c == count) return count; if (!p->plength) p->plength = MMAX_PLENGTH - 6; if (p->done || ((p->mpeg == 2 && p->found >= 9) || (p->mpeg == 1 && p->found >= 7))) { switch (p->cid) { case AUDIO_STREAM_S ... AUDIO_STREAM_E: case VIDEO_STREAM_S ... VIDEO_STREAM_E: case PRIVATE_STREAM1: if (p->mpeg == 2 && p->found == 9) { write_ipack(p, &p->flag1, 1); write_ipack(p, &p->flag2, 1); write_ipack(p, &p->hlength, 1); } if (p->mpeg == 1 && p->found == 7) write_ipack(p, &p->flag1, 1); if (p->mpeg == 2 && (p->flag2 & PTS_ONLY) && p->found < 14) { while (c < count && p->found < 14) { p->pts[p->found - 9] = buf[c]; write_ipack(p, buf + c, 1); c++; p->found++; } if (c == count) return count; } if (p->mpeg == 1 && p->which < 2000) { if (p->found == 7) { p->check = p->flag1; p->hlength = 1; } while (!p->which && c < count && p->check == 0xff){ p->check = buf[c]; write_ipack(p, buf + c, 1); c++; p->found++; p->hlength++; } if (c == count) return count; if ((p->check & 0xc0) == 0x40 && !p->which) { p->check = buf[c]; write_ipack(p, buf + c, 1); c++; p->found++; p->hlength++; p->which = 1; if (c == count) return count; p->check = buf[c]; write_ipack(p, buf + c, 1); c++; p->found++; p->hlength++; p->which = 2; if (c == count) return count; } if (p->which == 1) { p->check = buf[c]; write_ipack(p, buf + c, 1); c++; p->found++; p->hlength++; p->which = 2; if (c == count) return count; } if ((p->check & 0x30) && p->check != 0xff) { p->flag2 = (p->check & 0xf0) << 2; p->pts[0] = p->check; p->which = 3; } if (c == count) return count; if (p->which > 2){ if ((p->flag2 & PTS_DTS_FLAGS) == PTS_ONLY) { while (c < count && p->which < 7) { p->pts[p->which - 2] = buf[c]; write_ipack(p, buf + c, 1); c++; p->found++; p->which++; p->hlength++; } if (c == count) return count; } else if ((p->flag2 & PTS_DTS_FLAGS) == PTS_DTS) { while (c < count && p->which < 12) { if (p->which < 7) p->pts[p->which - 2] = buf[c]; write_ipack(p, buf + c, 1); c++; p->found++; p->which++; p->hlength++; } if (c == count) return count; } p->which = 2000; } } while (c < count && p->found < p->plength + 6) { l = count - c; if (l + p->found > p->plength + 6) l = p->plength + 6 - p->found; write_ipack(p, buf + c, l); p->found += l; c += l; } break; } if (p->done) { if (p->found + count - c < p->plength + 6) { p->found += count - c; c = count; } else { c += p->plength + 6 - p->found; p->found = p->plength + 6; } } if (p->plength && p->found == p->plength + 6) { send_ipack(p); av7110_ipack_reset(p); if (c < count) av7110_ipack_instant_repack(buf + c, count - c, p); } } return count; }
gpl-2.0
ShinySide/G531M
drivers/input/misc/da9055_onkey.c
250
4106
/* * ON pin driver for Dialog DA9055 PMICs * * Copyright(c) 2012 Dialog Semiconductor Ltd. * * Author: David Dajun Chen <dchen@diasemi.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/input.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/mfd/da9055/core.h> #include <linux/mfd/da9055/reg.h> struct da9055_onkey { struct da9055 *da9055; struct input_dev *input; struct delayed_work work; }; static void da9055_onkey_query(struct da9055_onkey *onkey) { int key_stat; key_stat = da9055_reg_read(onkey->da9055, DA9055_REG_STATUS_A); if (key_stat < 0) { dev_err(onkey->da9055->dev, "Failed to read onkey event %d\n", key_stat); } else { key_stat &= DA9055_NOKEY_STS; /* * Onkey status bit is cleared when onkey button is released. */ if (!key_stat) { input_report_key(onkey->input, KEY_POWER, 0); input_sync(onkey->input); } } /* * Interrupt is generated only when the ONKEY pin is asserted. * Hence the deassertion of the pin is simulated through work queue. */ if (key_stat) schedule_delayed_work(&onkey->work, msecs_to_jiffies(10)); } static void da9055_onkey_work(struct work_struct *work) { struct da9055_onkey *onkey = container_of(work, struct da9055_onkey, work.work); da9055_onkey_query(onkey); } static irqreturn_t da9055_onkey_irq(int irq, void *data) { struct da9055_onkey *onkey = data; input_report_key(onkey->input, KEY_POWER, 1); input_sync(onkey->input); da9055_onkey_query(onkey); return IRQ_HANDLED; } static int da9055_onkey_probe(struct platform_device *pdev) { struct da9055 *da9055 = dev_get_drvdata(pdev->dev.parent); struct da9055_onkey *onkey; struct input_dev *input_dev; int irq, err; irq = platform_get_irq_byname(pdev, "ONKEY"); if (irq < 0) { dev_err(&pdev->dev, "Failed to get an IRQ for input device, %d\n", irq); return -EINVAL; } onkey = devm_kzalloc(&pdev->dev, sizeof(*onkey), GFP_KERNEL); if (!onkey) { dev_err(&pdev->dev, "Failed to allocate memory\n"); return -ENOMEM; } input_dev = input_allocate_device(); if (!input_dev) { dev_err(&pdev->dev, "Failed to allocate memory\n"); return -ENOMEM; } onkey->input = input_dev; onkey->da9055 = da9055; input_dev->name = "da9055-onkey"; input_dev->phys = "da9055-onkey/input0"; input_dev->dev.parent = &pdev->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY); __set_bit(KEY_POWER, input_dev->keybit); INIT_DELAYED_WORK(&onkey->work, da9055_onkey_work); irq = regmap_irq_get_virq(da9055->irq_data, irq); err = request_threaded_irq(irq, NULL, da9055_onkey_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "ONKEY", onkey); if (err < 0) { dev_err(&pdev->dev, "Failed to register ONKEY IRQ %d, error = %d\n", irq, err); goto err_free_input; } err = input_register_device(input_dev); if (err) { dev_err(&pdev->dev, "Unable to register input device, %d\n", err); goto err_free_irq; } platform_set_drvdata(pdev, onkey); return 0; err_free_irq: free_irq(irq, onkey); cancel_delayed_work_sync(&onkey->work); err_free_input: input_free_device(input_dev); return err; } static int da9055_onkey_remove(struct platform_device *pdev) { struct da9055_onkey *onkey = platform_get_drvdata(pdev); int irq = platform_get_irq_byname(pdev, "ONKEY"); irq = regmap_irq_get_virq(onkey->da9055->irq_data, irq); free_irq(irq, onkey); cancel_delayed_work_sync(&onkey->work); input_unregister_device(onkey->input); return 0; } static struct platform_driver da9055_onkey_driver = { .probe = da9055_onkey_probe, .remove = da9055_onkey_remove, .driver = { .name = "da9055-onkey", .owner = THIS_MODULE, }, }; module_platform_driver(da9055_onkey_driver); MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>"); MODULE_DESCRIPTION("Onkey driver for DA9055"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:da9055-onkey");
gpl-2.0
mukelarvin-price/linux_imx
arch/powerpc/sysdev/fsl_soc.c
250
6042
/* * FSL SoC setup code * * Maintained by Kumar Gala (see MAINTAINERS for contact information) * * 2006 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/major.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/export.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/phy.h> #include <linux/phy_fixed.h> #include <linux/spi/spi.h> #include <linux/fsl_devices.h> #include <linux/fs_enet_pd.h> #include <linux/fs_uart_pd.h> #include <asm/system.h> #include <linux/atomic.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/time.h> #include <asm/prom.h> #include <asm/machdep.h> #include <sysdev/fsl_soc.h> #include <mm/mmu_decl.h> #include <asm/cpm2.h> #include <asm/fsl_hcalls.h> /* For the Freescale hypervisor */ extern void init_fcc_ioports(struct fs_platform_info*); extern void init_fec_ioports(struct fs_platform_info*); extern void init_smc_ioports(struct fs_uart_platform_info*); static phys_addr_t immrbase = -1; phys_addr_t get_immrbase(void) { struct device_node *soc; if (immrbase != -1) return immrbase; soc = of_find_node_by_type(NULL, "soc"); if (soc) { int size; u32 naddr; const u32 *prop = of_get_property(soc, "#address-cells", &size); if (prop && size == 4) naddr = *prop; else naddr = 2; prop = of_get_property(soc, "ranges", &size); if (prop) immrbase = of_translate_address(soc, prop + naddr); of_node_put(soc); } return immrbase; } EXPORT_SYMBOL(get_immrbase); static u32 sysfreq = -1; u32 fsl_get_sys_freq(void) { struct device_node *soc; const u32 *prop; int size; if (sysfreq != -1) return sysfreq; soc = of_find_node_by_type(NULL, "soc"); if (!soc) return -1; prop = of_get_property(soc, "clock-frequency", &size); if (!prop || size != sizeof(*prop) || *prop == 0) prop = of_get_property(soc, "bus-frequency", &size); if (prop && size == sizeof(*prop)) sysfreq = *prop; of_node_put(soc); return sysfreq; } EXPORT_SYMBOL(fsl_get_sys_freq); #if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx) static u32 brgfreq = -1; u32 get_brgfreq(void) { struct device_node *node; const unsigned int *prop; int size; if (brgfreq != -1) return brgfreq; node = of_find_compatible_node(NULL, NULL, "fsl,cpm-brg"); if (node) { prop = of_get_property(node, "clock-frequency", &size); if (prop && size == 4) brgfreq = *prop; of_node_put(node); return brgfreq; } /* Legacy device binding -- will go away when no users are left. */ node = of_find_node_by_type(NULL, "cpm"); if (!node) node = of_find_compatible_node(NULL, NULL, "fsl,qe"); if (!node) node = of_find_node_by_type(NULL, "qe"); if (node) { prop = of_get_property(node, "brg-frequency", &size); if (prop && size == 4) brgfreq = *prop; if (brgfreq == -1 || brgfreq == 0) { prop = of_get_property(node, "bus-frequency", &size); if (prop && size == 4) brgfreq = *prop / 2; } of_node_put(node); } return brgfreq; } EXPORT_SYMBOL(get_brgfreq); static u32 fs_baudrate = -1; u32 get_baudrate(void) { struct device_node *node; if (fs_baudrate != -1) return fs_baudrate; node = of_find_node_by_type(NULL, "serial"); if (node) { int size; const unsigned int *prop = of_get_property(node, "current-speed", &size); if (prop) fs_baudrate = *prop; of_node_put(node); } return fs_baudrate; } EXPORT_SYMBOL(get_baudrate); #endif /* CONFIG_CPM2 */ #ifdef CONFIG_FIXED_PHY static int __init of_add_fixed_phys(void) { int ret; struct device_node *np; u32 *fixed_link; struct fixed_phy_status status = {}; for_each_node_by_name(np, "ethernet") { fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL); if (!fixed_link) continue; status.link = 1; status.duplex = fixed_link[1]; status.speed = fixed_link[2]; status.pause = fixed_link[3]; status.asym_pause = fixed_link[4]; ret = fixed_phy_add(PHY_POLL, fixed_link[0], &status); if (ret) { of_node_put(np); return ret; } } return 0; } arch_initcall(of_add_fixed_phys); #endif /* CONFIG_FIXED_PHY */ #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) static __be32 __iomem *rstcr; static int __init setup_rstcr(void) { struct device_node *np; for_each_node_by_name(np, "global-utilities") { if ((of_get_property(np, "fsl,has-rstcr", NULL))) { rstcr = of_iomap(np, 0) + 0xb0; if (!rstcr) printk (KERN_ERR "Error: reset control " "register not mapped!\n"); break; } } if (!rstcr && ppc_md.restart == fsl_rstcr_restart) printk(KERN_ERR "No RSTCR register, warm reboot won't work\n"); if (np) of_node_put(np); return 0; } arch_initcall(setup_rstcr); void fsl_rstcr_restart(char *cmd) { local_irq_disable(); if (rstcr) /* set reset control register */ out_be32(rstcr, 0x2); /* HRESET_REQ */ while (1) ; } #endif #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) struct platform_diu_data_ops diu_ops; EXPORT_SYMBOL(diu_ops); #endif /* * Restart the current partition * * This function should be assigned to the ppc_md.restart function pointer, * to initiate a partition restart when we're running under the Freescale * hypervisor. */ void fsl_hv_restart(char *cmd) { pr_info("hv restart\n"); fh_partition_restart(-1); } /* * Halt the current partition * * This function should be assigned to the ppc_md.power_off and ppc_md.halt * function pointers, to shut down the partition when we're running under * the Freescale hypervisor. */ void fsl_hv_halt(void) { pr_info("hv exit\n"); fh_partition_stop(-1); }
gpl-2.0
pitah81/android_kernel_elephone_p8000
arch/tile/kernel/pci-dma.c
2298
16165
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/swiotlb.h> #include <linux/vmalloc.h> #include <linux/export.h> #include <asm/tlbflush.h> #include <asm/homecache.h> /* Generic DMA mapping functions: */ /* * Allocate what Linux calls "coherent" memory. On TILEPro this is * uncached memory; on TILE-Gx it is hash-for-home memory. */ #ifdef __tilepro__ #define PAGE_HOME_DMA PAGE_HOME_UNCACHED #else #define PAGE_HOME_DMA PAGE_HOME_HASH #endif static void *tile_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) { u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32); int node = dev_to_node(dev); int order = get_order(size); struct page *pg; dma_addr_t addr; gfp |= __GFP_ZERO; /* * If the mask specifies that the memory be in the first 4 GB, then * we force the allocation to come from the DMA zone. We also * force the node to 0 since that's the only node where the DMA * zone isn't empty. If the mask size is smaller than 32 bits, we * may still not be able to guarantee a suitable memory address, in * which case we will return NULL. But such devices are uncommon. */ if (dma_mask <= DMA_BIT_MASK(32)) { gfp |= GFP_DMA; node = 0; } pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA); if (pg == NULL) return NULL; addr = page_to_phys(pg); if (addr + size > dma_mask) { __homecache_free_pages(pg, order); return NULL; } *dma_handle = addr; return page_address(pg); } /* * Free memory that was allocated with tile_dma_alloc_coherent. */ static void tile_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { homecache_free_pages((unsigned long)vaddr, get_order(size)); } /* * The map routines "map" the specified address range for DMA * accesses. The memory belongs to the device after this call is * issued, until it is unmapped with dma_unmap_single. * * We don't need to do any mapping, we just flush the address range * out of the cache and return a DMA address. * * The unmap routines do whatever is necessary before the processor * accesses the memory again, and must be called before the driver * touches the memory. We can get away with a cache invalidate if we * can count on nothing having been touched. */ /* Set up a single page for DMA access. */ static void __dma_prep_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { /* * Flush the page from cache if necessary. * On tilegx, data is delivered to hash-for-home L3; on tilepro, * data is delivered direct to memory. * * NOTE: If we were just doing DMA_TO_DEVICE we could optimize * this to be a "flush" not a "finv" and keep some of the * state in cache across the DMA operation, but it doesn't seem * worth creating the necessary flush_buffer_xxx() infrastructure. */ int home = page_home(page); switch (home) { case PAGE_HOME_HASH: #ifdef __tilegx__ return; #endif break; case PAGE_HOME_UNCACHED: #ifdef __tilepro__ return; #endif break; case PAGE_HOME_IMMUTABLE: /* Should be going to the device only. */ BUG_ON(direction == DMA_FROM_DEVICE || direction == DMA_BIDIRECTIONAL); return; case PAGE_HOME_INCOHERENT: /* Incoherent anyway, so no need to work hard here. */ return; default: BUG_ON(home < 0 || home >= NR_CPUS); break; } homecache_finv_page(page); #ifdef DEBUG_ALIGNMENT /* Warn if the region isn't cacheline aligned. */ if (offset & (L2_CACHE_BYTES - 1) || (size & (L2_CACHE_BYTES - 1))) pr_warn("Unaligned DMA to non-hfh memory: PA %#llx/%#lx\n", PFN_PHYS(page_to_pfn(page)) + offset, size); #endif } /* Make the page ready to be read by the core. */ static void __dma_complete_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { #ifdef __tilegx__ switch (page_home(page)) { case PAGE_HOME_HASH: /* I/O device delivered data the way the cpu wanted it. */ break; case PAGE_HOME_INCOHERENT: /* Incoherent anyway, so no need to work hard here. */ break; case PAGE_HOME_IMMUTABLE: /* Extra read-only copies are not a problem. */ break; default: /* Flush the bogus hash-for-home I/O entries to memory. */ homecache_finv_map_page(page, PAGE_HOME_HASH); break; } #endif } static void __dma_prep_pa_range(dma_addr_t dma_addr, size_t size, enum dma_data_direction direction) { struct page *page = pfn_to_page(PFN_DOWN(dma_addr)); unsigned long offset = dma_addr & (PAGE_SIZE - 1); size_t bytes = min(size, (size_t)(PAGE_SIZE - offset)); while (size != 0) { __dma_prep_page(page, offset, bytes, direction); size -= bytes; ++page; offset = 0; bytes = min((size_t)PAGE_SIZE, size); } } static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size, enum dma_data_direction direction) { struct page *page = pfn_to_page(PFN_DOWN(dma_addr)); unsigned long offset = dma_addr & (PAGE_SIZE - 1); size_t bytes = min(size, (size_t)(PAGE_SIZE - offset)); while (size != 0) { __dma_complete_page(page, offset, bytes, direction); size -= bytes; ++page; offset = 0; bytes = min((size_t)PAGE_SIZE, size); } } static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, struct dma_attrs *attrs) { struct scatterlist *sg; int i; BUG_ON(!valid_dma_direction(direction)); WARN_ON(nents == 0 || sglist->length == 0); for_each_sg(sglist, sg, nents, i) { sg->dma_address = sg_phys(sg); __dma_prep_pa_range(sg->dma_address, sg->length, direction); #ifdef CONFIG_NEED_SG_DMA_LENGTH sg->dma_length = sg->length; #endif } return nents; } static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, struct dma_attrs *attrs) { struct scatterlist *sg; int i; BUG_ON(!valid_dma_direction(direction)); for_each_sg(sglist, sg, nents, i) { sg->dma_address = sg_phys(sg); __dma_complete_pa_range(sg->dma_address, sg->length, direction); } } static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { BUG_ON(!valid_dma_direction(direction)); BUG_ON(offset + size > PAGE_SIZE); __dma_prep_page(page, offset, size, direction); return page_to_pa(page) + offset; } static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { BUG_ON(!valid_dma_direction(direction)); __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), dma_address & PAGE_OFFSET, size, direction); } static void tile_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { BUG_ON(!valid_dma_direction(direction)); __dma_complete_pa_range(dma_handle, size, direction); } static void tile_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { __dma_prep_pa_range(dma_handle, size, direction); } static void tile_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { struct scatterlist *sg; int i; BUG_ON(!valid_dma_direction(direction)); WARN_ON(nelems == 0 || sglist->length == 0); for_each_sg(sglist, sg, nelems, i) { dma_sync_single_for_cpu(dev, sg->dma_address, sg_dma_len(sg), direction); } } static void tile_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { struct scatterlist *sg; int i; BUG_ON(!valid_dma_direction(direction)); WARN_ON(nelems == 0 || sglist->length == 0); for_each_sg(sglist, sg, nelems, i) { dma_sync_single_for_device(dev, sg->dma_address, sg_dma_len(sg), direction); } } static inline int tile_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } static inline int tile_dma_supported(struct device *dev, u64 mask) { return 1; } static struct dma_map_ops tile_default_dma_map_ops = { .alloc = tile_dma_alloc_coherent, .free = tile_dma_free_coherent, .map_page = tile_dma_map_page, .unmap_page = tile_dma_unmap_page, .map_sg = tile_dma_map_sg, .unmap_sg = tile_dma_unmap_sg, .sync_single_for_cpu = tile_dma_sync_single_for_cpu, .sync_single_for_device = tile_dma_sync_single_for_device, .sync_sg_for_cpu = tile_dma_sync_sg_for_cpu, .sync_sg_for_device = tile_dma_sync_sg_for_device, .mapping_error = tile_dma_mapping_error, .dma_supported = tile_dma_supported }; struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; EXPORT_SYMBOL(tile_dma_map_ops); /* Generic PCI DMA mapping functions */ static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) { int node = dev_to_node(dev); int order = get_order(size); struct page *pg; dma_addr_t addr; gfp |= __GFP_ZERO; pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA); if (pg == NULL) return NULL; addr = page_to_phys(pg); *dma_handle = phys_to_dma(dev, addr); return page_address(pg); } /* * Free memory that was allocated with tile_pci_dma_alloc_coherent. */ static void tile_pci_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { homecache_free_pages((unsigned long)vaddr, get_order(size)); } static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, struct dma_attrs *attrs) { struct scatterlist *sg; int i; BUG_ON(!valid_dma_direction(direction)); WARN_ON(nents == 0 || sglist->length == 0); for_each_sg(sglist, sg, nents, i) { sg->dma_address = sg_phys(sg); __dma_prep_pa_range(sg->dma_address, sg->length, direction); sg->dma_address = phys_to_dma(dev, sg->dma_address); #ifdef CONFIG_NEED_SG_DMA_LENGTH sg->dma_length = sg->length; #endif } return nents; } static void tile_pci_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, struct dma_attrs *attrs) { struct scatterlist *sg; int i; BUG_ON(!valid_dma_direction(direction)); for_each_sg(sglist, sg, nents, i) { sg->dma_address = sg_phys(sg); __dma_complete_pa_range(sg->dma_address, sg->length, direction); } } static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { BUG_ON(!valid_dma_direction(direction)); BUG_ON(offset + size > PAGE_SIZE); __dma_prep_page(page, offset, size, direction); return phys_to_dma(dev, page_to_pa(page) + offset); } static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { BUG_ON(!valid_dma_direction(direction)); dma_address = dma_to_phys(dev, dma_address); __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), dma_address & PAGE_OFFSET, size, direction); } static void tile_pci_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { BUG_ON(!valid_dma_direction(direction)); dma_handle = dma_to_phys(dev, dma_handle); __dma_complete_pa_range(dma_handle, size, direction); } static void tile_pci_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { dma_handle = dma_to_phys(dev, dma_handle); __dma_prep_pa_range(dma_handle, size, direction); } static void tile_pci_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { struct scatterlist *sg; int i; BUG_ON(!valid_dma_direction(direction)); WARN_ON(nelems == 0 || sglist->length == 0); for_each_sg(sglist, sg, nelems, i) { dma_sync_single_for_cpu(dev, sg->dma_address, sg_dma_len(sg), direction); } } static void tile_pci_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { struct scatterlist *sg; int i; BUG_ON(!valid_dma_direction(direction)); WARN_ON(nelems == 0 || sglist->length == 0); for_each_sg(sglist, sg, nelems, i) { dma_sync_single_for_device(dev, sg->dma_address, sg_dma_len(sg), direction); } } static inline int tile_pci_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; } static inline int tile_pci_dma_supported(struct device *dev, u64 mask) { return 1; } static struct dma_map_ops tile_pci_default_dma_map_ops = { .alloc = tile_pci_dma_alloc_coherent, .free = tile_pci_dma_free_coherent, .map_page = tile_pci_dma_map_page, .unmap_page = tile_pci_dma_unmap_page, .map_sg = tile_pci_dma_map_sg, .unmap_sg = tile_pci_dma_unmap_sg, .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu, .sync_single_for_device = tile_pci_dma_sync_single_for_device, .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu, .sync_sg_for_device = tile_pci_dma_sync_sg_for_device, .mapping_error = tile_pci_dma_mapping_error, .dma_supported = tile_pci_dma_supported }; struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; EXPORT_SYMBOL(gx_pci_dma_map_ops); /* PCI DMA mapping functions for legacy PCI devices */ #ifdef CONFIG_SWIOTLB static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) { gfp |= GFP_DMA; return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); } static void tile_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, struct dma_attrs *attrs) { swiotlb_free_coherent(dev, size, vaddr, dma_addr); } static struct dma_map_ops pci_swiotlb_dma_ops = { .alloc = tile_swiotlb_alloc_coherent, .free = tile_swiotlb_free_coherent, .map_page = swiotlb_map_page, .unmap_page = swiotlb_unmap_page, .map_sg = swiotlb_map_sg_attrs, .unmap_sg = swiotlb_unmap_sg_attrs, .sync_single_for_cpu = swiotlb_sync_single_for_cpu, .sync_single_for_device = swiotlb_sync_single_for_device, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = swiotlb_sync_sg_for_device, .dma_supported = swiotlb_dma_supported, .mapping_error = swiotlb_dma_mapping_error, }; struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; #else struct dma_map_ops *gx_legacy_pci_dma_map_ops; #endif EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK int dma_set_coherent_mask(struct device *dev, u64 mask) { struct dma_map_ops *dma_ops = get_dma_ops(dev); /* Handle legacy PCI devices with limited memory addressability. */ if (((dma_ops == gx_pci_dma_map_ops) || (dma_ops == gx_legacy_pci_dma_map_ops)) && (mask <= DMA_BIT_MASK(32))) { if (mask > dev->archdata.max_direct_dma_addr) mask = dev->archdata.max_direct_dma_addr; } if (!dma_supported(dev, mask)) return -EIO; dev->coherent_dma_mask = mask; return 0; } EXPORT_SYMBOL(dma_set_coherent_mask); #endif
gpl-2.0
xwliu/Cubietruck_Plus-kernel-source
drivers/input/ff-core.c
4858
9133
/* * Force feedback support for Linux input subsystem * * Copyright (c) 2006 Anssi Hannula <anssi.hannula@gmail.com> * Copyright (c) 2006 Dmitry Torokhov <dtor@mail.ru> */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* #define DEBUG */ #define pr_fmt(fmt) KBUILD_BASENAME ": " fmt #include <linux/input.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/slab.h> /* * Check that the effect_id is a valid effect and whether the user * is the owner */ static int check_effect_access(struct ff_device *ff, int effect_id, struct file *file) { if (effect_id < 0 || effect_id >= ff->max_effects || !ff->effect_owners[effect_id]) return -EINVAL; if (file && ff->effect_owners[effect_id] != file) return -EACCES; return 0; } /* * Checks whether 2 effects can be combined together */ static inline int check_effects_compatible(struct ff_effect *e1, struct ff_effect *e2) { return e1->type == e2->type && (e1->type != FF_PERIODIC || e1->u.periodic.waveform == e2->u.periodic.waveform); } /* * Convert an effect into compatible one */ static int compat_effect(struct ff_device *ff, struct ff_effect *effect) { int magnitude; switch (effect->type) { case FF_RUMBLE: if (!test_bit(FF_PERIODIC, ff->ffbit)) return -EINVAL; /* * calculate manginude of sine wave as average of rumble's * 2/3 of strong magnitude and 1/3 of weak magnitude */ magnitude = effect->u.rumble.strong_magnitude / 3 + effect->u.rumble.weak_magnitude / 6; effect->type = FF_PERIODIC; effect->u.periodic.waveform = FF_SINE; effect->u.periodic.period = 50; effect->u.periodic.magnitude = max(magnitude, 0x7fff); effect->u.periodic.offset = 0; effect->u.periodic.phase = 0; effect->u.periodic.envelope.attack_length = 0; effect->u.periodic.envelope.attack_level = 0; effect->u.periodic.envelope.fade_length = 0; effect->u.periodic.envelope.fade_level = 0; return 0; default: /* Let driver handle conversion */ return 0; } } /** * input_ff_upload() - upload effect into force-feedback device * @dev: input device * @effect: effect to be uploaded * @file: owner of the effect */ int input_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct file *file) { struct ff_device *ff = dev->ff; struct ff_effect *old; int ret = 0; int id; if (!test_bit(EV_FF, dev->evbit)) return -ENOSYS; if (effect->type < FF_EFFECT_MIN || effect->type > FF_EFFECT_MAX || !test_bit(effect->type, dev->ffbit)) { pr_debug("invalid or not supported effect type in upload\n"); return -EINVAL; } if (effect->type == FF_PERIODIC && (effect->u.periodic.waveform < FF_WAVEFORM_MIN || effect->u.periodic.waveform > FF_WAVEFORM_MAX || !test_bit(effect->u.periodic.waveform, dev->ffbit))) { pr_debug("invalid or not supported wave form in upload\n"); return -EINVAL; } if (!test_bit(effect->type, ff->ffbit)) { ret = compat_effect(ff, effect); if (ret) return ret; } mutex_lock(&ff->mutex); if (effect->id == -1) { for (id = 0; id < ff->max_effects; id++) if (!ff->effect_owners[id]) break; if (id >= ff->max_effects) { ret = -ENOSPC; goto out; } effect->id = id; old = NULL; } else { id = effect->id; ret = check_effect_access(ff, id, file); if (ret) goto out; old = &ff->effects[id]; if (!check_effects_compatible(effect, old)) { ret = -EINVAL; goto out; } } ret = ff->upload(dev, effect, old); if (ret) goto out; spin_lock_irq(&dev->event_lock); ff->effects[id] = *effect; ff->effect_owners[id] = file; spin_unlock_irq(&dev->event_lock); out: mutex_unlock(&ff->mutex); return ret; } EXPORT_SYMBOL_GPL(input_ff_upload); /* * Erases the effect if the requester is also the effect owner. The mutex * should already be locked before calling this function. */ static int erase_effect(struct input_dev *dev, int effect_id, struct file *file) { struct ff_device *ff = dev->ff; int error; error = check_effect_access(ff, effect_id, file); if (error) return error; spin_lock_irq(&dev->event_lock); ff->playback(dev, effect_id, 0); ff->effect_owners[effect_id] = NULL; spin_unlock_irq(&dev->event_lock); if (ff->erase) { error = ff->erase(dev, effect_id); if (error) { spin_lock_irq(&dev->event_lock); ff->effect_owners[effect_id] = file; spin_unlock_irq(&dev->event_lock); return error; } } return 0; } /** * input_ff_erase - erase a force-feedback effect from device * @dev: input device to erase effect from * @effect_id: id of the ffect to be erased * @file: purported owner of the request * * This function erases a force-feedback effect from specified device. * The effect will only be erased if it was uploaded through the same * file handle that is requesting erase. */ int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file) { struct ff_device *ff = dev->ff; int ret; if (!test_bit(EV_FF, dev->evbit)) return -ENOSYS; mutex_lock(&ff->mutex); ret = erase_effect(dev, effect_id, file); mutex_unlock(&ff->mutex); return ret; } EXPORT_SYMBOL_GPL(input_ff_erase); /* * flush_effects - erase all effects owned by a file handle */ static int flush_effects(struct input_dev *dev, struct file *file) { struct ff_device *ff = dev->ff; int i; pr_debug("flushing now\n"); mutex_lock(&ff->mutex); for (i = 0; i < ff->max_effects; i++) erase_effect(dev, i, file); mutex_unlock(&ff->mutex); return 0; } /** * input_ff_event() - generic handler for force-feedback events * @dev: input device to send the effect to * @type: event type (anything but EV_FF is ignored) * @code: event code * @value: event value */ int input_ff_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { struct ff_device *ff = dev->ff; if (type != EV_FF) return 0; switch (code) { case FF_GAIN: if (!test_bit(FF_GAIN, dev->ffbit) || value > 0xffff) break; ff->set_gain(dev, value); break; case FF_AUTOCENTER: if (!test_bit(FF_AUTOCENTER, dev->ffbit) || value > 0xffff) break; ff->set_autocenter(dev, value); break; default: if (check_effect_access(ff, code, NULL) == 0) ff->playback(dev, code, value); break; } return 0; } EXPORT_SYMBOL_GPL(input_ff_event); /** * input_ff_create() - create force-feedback device * @dev: input device supporting force-feedback * @max_effects: maximum number of effects supported by the device * * This function allocates all necessary memory for a force feedback * portion of an input device and installs all default handlers. * @dev->ffbit should be already set up before calling this function. * Once ff device is created you need to setup its upload, erase, * playback and other handlers before registering input device */ int input_ff_create(struct input_dev *dev, unsigned int max_effects) { struct ff_device *ff; size_t ff_dev_size; int i; if (!max_effects) { pr_err("cannot allocate device without any effects\n"); return -EINVAL; } ff_dev_size = sizeof(struct ff_device) + max_effects * sizeof(struct file *); if (ff_dev_size < max_effects) /* overflow */ return -EINVAL; ff = kzalloc(ff_dev_size, GFP_KERNEL); if (!ff) return -ENOMEM; ff->effects = kcalloc(max_effects, sizeof(struct ff_effect), GFP_KERNEL); if (!ff->effects) { kfree(ff); return -ENOMEM; } ff->max_effects = max_effects; mutex_init(&ff->mutex); dev->ff = ff; dev->flush = flush_effects; dev->event = input_ff_event; __set_bit(EV_FF, dev->evbit); /* Copy "true" bits into ff device bitmap */ for (i = 0; i <= FF_MAX; i++) if (test_bit(i, dev->ffbit)) __set_bit(i, ff->ffbit); /* we can emulate RUMBLE with periodic effects */ if (test_bit(FF_PERIODIC, ff->ffbit)) __set_bit(FF_RUMBLE, dev->ffbit); return 0; } EXPORT_SYMBOL_GPL(input_ff_create); /** * input_ff_destroy() - frees force feedback portion of input device * @dev: input device supporting force feedback * * This function is only needed in error path as input core will * automatically free force feedback structures when device is * destroyed. */ void input_ff_destroy(struct input_dev *dev) { struct ff_device *ff = dev->ff; __clear_bit(EV_FF, dev->evbit); if (ff) { if (ff->destroy) ff->destroy(ff); kfree(ff->private); kfree(ff->effects); kfree(ff); dev->ff = NULL; } } EXPORT_SYMBOL_GPL(input_ff_destroy);
gpl-2.0
ckh469/vagabond
drivers/pci/hotplug/shpchp_ctrl.c
6650
18838
/* * Standard Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/pci.h> #include "../pci.h" #include "shpchp.h" static void interrupt_event_handler(struct work_struct *work); static int shpchp_enable_slot(struct slot *p_slot); static int shpchp_disable_slot(struct slot *p_slot); static int queue_interrupt_event(struct slot *p_slot, u32 event_type) { struct event_info *info; info = kmalloc(sizeof(*info), GFP_ATOMIC); if (!info) return -ENOMEM; info->event_type = event_type; info->p_slot = p_slot; INIT_WORK(&info->work, interrupt_event_handler); queue_work(shpchp_wq, &info->work); return 0; } u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl) { struct slot *p_slot; u32 event_type; /* Attention Button Change */ ctrl_dbg(ctrl, "Attention button interrupt received\n"); p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); /* * Button pressed - See if need to TAKE ACTION!!! */ ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot)); event_type = INT_BUTTON_PRESS; queue_interrupt_event(p_slot, event_type); return 0; } u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl) { struct slot *p_slot; u8 getstatus; u32 event_type; /* Switch Change */ ctrl_dbg(ctrl, "Switch interrupt received\n"); p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); ctrl_dbg(ctrl, "Card present %x Power status %x\n", p_slot->presence_save, p_slot->pwr_save); if (getstatus) { /* * Switch opened */ ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot)); event_type = INT_SWITCH_OPEN; if (p_slot->pwr_save && p_slot->presence_save) { event_type = INT_POWER_FAULT; ctrl_err(ctrl, "Surprise Removal of card\n"); } } else { /* * Switch closed */ ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot)); event_type = INT_SWITCH_CLOSE; } queue_interrupt_event(p_slot, event_type); return 1; } u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl) { struct slot *p_slot; u32 event_type; /* Presence Change */ ctrl_dbg(ctrl, "Presence/Notify input change\n"); p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); /* * Save the presence state */ p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); if (p_slot->presence_save) { /* * Card Present */ ctrl_info(ctrl, "Card present on Slot(%s)\n", slot_name(p_slot)); event_type = INT_PRESENCE_ON; } else { /* * Not Present */ ctrl_info(ctrl, "Card not present on Slot(%s)\n", slot_name(p_slot)); event_type = INT_PRESENCE_OFF; } queue_interrupt_event(p_slot, event_type); return 1; } u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl) { struct slot *p_slot; u32 event_type; /* Power fault */ ctrl_dbg(ctrl, "Power fault interrupt received\n"); p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) { /* * Power fault Cleared */ ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n", slot_name(p_slot)); p_slot->status = 0x00; event_type = INT_POWER_FAULT_CLEAR; } else { /* * Power fault */ ctrl_info(ctrl, "Power fault on Slot(%s)\n", slot_name(p_slot)); event_type = INT_POWER_FAULT; /* set power fault status for this board */ p_slot->status = 0xFF; ctrl_info(ctrl, "Power fault bit %x set\n", hp_slot); } queue_interrupt_event(p_slot, event_type); return 1; } /* The following routines constitute the bulk of the hotplug controller logic */ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot, enum pci_bus_speed speed) { int rc = 0; ctrl_dbg(ctrl, "Change speed to %d\n", speed); if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed))) { ctrl_err(ctrl, "%s: Issue of set bus speed mode command " "failed\n", __func__); return WRONG_BUS_FREQUENCY; } return rc; } static int fix_bus_speed(struct controller *ctrl, struct slot *pslot, u8 flag, enum pci_bus_speed asp, enum pci_bus_speed bsp, enum pci_bus_speed msp) { int rc = 0; /* * If other slots on the same bus are occupied, we cannot * change the bus speed. */ if (flag) { if (asp < bsp) { ctrl_err(ctrl, "Speed of bus %x and adapter %x " "mismatch\n", bsp, asp); rc = WRONG_BUS_FREQUENCY; } return rc; } if (asp < msp) { if (bsp != asp) rc = change_bus_speed(ctrl, pslot, asp); } else { if (bsp != msp) rc = change_bus_speed(ctrl, pslot, msp); } return rc; } /** * board_added - Called after a board has been added to the system. * @p_slot: target &slot * * Turns power on for the board. * Configures board. */ static int board_added(struct slot *p_slot) { u8 hp_slot; u8 slots_not_empty = 0; int rc = 0; enum pci_bus_speed asp, bsp, msp; struct controller *ctrl = p_slot->ctrl; struct pci_bus *parent = ctrl->pci_dev->subordinate; hp_slot = p_slot->device - ctrl->slot_device_offset; ctrl_dbg(ctrl, "%s: p_slot->device, slot_offset, hp_slot = %d, %d ,%d\n", __func__, p_slot->device, ctrl->slot_device_offset, hp_slot); /* Power on slot without connecting to bus */ rc = p_slot->hpc_ops->power_on_slot(p_slot); if (rc) { ctrl_err(ctrl, "Failed to power on slot\n"); return -1; } if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) { if (slots_not_empty) return WRONG_BUS_FREQUENCY; if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) { ctrl_err(ctrl, "%s: Issue of set bus speed mode command" " failed\n", __func__); return WRONG_BUS_FREQUENCY; } /* turn on board, blink green LED, turn off Amber LED */ if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) { ctrl_err(ctrl, "Issue of Slot Enable command failed\n"); return rc; } } rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp); if (rc) { ctrl_err(ctrl, "Can't get adapter speed or " "bus mode mismatch\n"); return WRONG_BUS_FREQUENCY; } bsp = ctrl->pci_dev->bus->cur_bus_speed; msp = ctrl->pci_dev->bus->max_bus_speed; /* Check if there are other slots or devices on the same bus */ if (!list_empty(&ctrl->pci_dev->subordinate->devices)) slots_not_empty = 1; ctrl_dbg(ctrl, "%s: slots_not_empty %d, adapter_speed %d, bus_speed %d," " max_bus_speed %d\n", __func__, slots_not_empty, asp, bsp, msp); rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, asp, bsp, msp); if (rc) return rc; /* turn on board, blink green LED, turn off Amber LED */ if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) { ctrl_err(ctrl, "Issue of Slot Enable command failed\n"); return rc; } /* Wait for ~1 second */ msleep(1000); ctrl_dbg(ctrl, "%s: slot status = %x\n", __func__, p_slot->status); /* Check for a power fault */ if (p_slot->status == 0xFF) { /* power fault occurred, but it was benign */ ctrl_dbg(ctrl, "%s: Power fault\n", __func__); rc = POWER_FAILURE; p_slot->status = 0; goto err_exit; } if (shpchp_configure_device(p_slot)) { ctrl_err(ctrl, "Cannot add device at %04x:%02x:%02x\n", pci_domain_nr(parent), p_slot->bus, p_slot->device); goto err_exit; } p_slot->status = 0; p_slot->is_a_board = 0x01; p_slot->pwr_save = 1; p_slot->hpc_ops->green_led_on(p_slot); return 0; err_exit: /* turn off slot, turn on Amber LED, turn off Green LED */ rc = p_slot->hpc_ops->slot_disable(p_slot); if (rc) { ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n", __func__); return rc; } return(rc); } /** * remove_board - Turns off slot and LEDs * @p_slot: target &slot */ static int remove_board(struct slot *p_slot) { struct controller *ctrl = p_slot->ctrl; u8 hp_slot; int rc; if (shpchp_unconfigure_device(p_slot)) return(1); hp_slot = p_slot->device - ctrl->slot_device_offset; p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); ctrl_dbg(ctrl, "%s: hp_slot = %d\n", __func__, hp_slot); /* Change status to shutdown */ if (p_slot->is_a_board) p_slot->status = 0x01; /* turn off slot, turn on Amber LED, turn off Green LED */ rc = p_slot->hpc_ops->slot_disable(p_slot); if (rc) { ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n", __func__); return rc; } rc = p_slot->hpc_ops->set_attention_status(p_slot, 0); if (rc) { ctrl_err(ctrl, "Issue of Set Attention command failed\n"); return rc; } p_slot->pwr_save = 0; p_slot->is_a_board = 0; return 0; } struct pushbutton_work_info { struct slot *p_slot; struct work_struct work; }; /** * shpchp_pushbutton_thread - handle pushbutton events * @work: &struct work_struct to be handled * * Scheduled procedure to handle blocking stuff for the pushbuttons. * Handles all pending events and exits. */ static void shpchp_pushbutton_thread(struct work_struct *work) { struct pushbutton_work_info *info = container_of(work, struct pushbutton_work_info, work); struct slot *p_slot = info->p_slot; mutex_lock(&p_slot->lock); switch (p_slot->state) { case POWEROFF_STATE: mutex_unlock(&p_slot->lock); shpchp_disable_slot(p_slot); mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; break; case POWERON_STATE: mutex_unlock(&p_slot->lock); if (shpchp_enable_slot(p_slot)) p_slot->hpc_ops->green_led_off(p_slot); mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; break; default: break; } mutex_unlock(&p_slot->lock); kfree(info); } void shpchp_queue_pushbutton_work(struct work_struct *work) { struct slot *p_slot = container_of(work, struct slot, work.work); struct pushbutton_work_info *info; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) { ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n", __func__); return; } info->p_slot = p_slot; INIT_WORK(&info->work, shpchp_pushbutton_thread); mutex_lock(&p_slot->lock); switch (p_slot->state) { case BLINKINGOFF_STATE: p_slot->state = POWEROFF_STATE; break; case BLINKINGON_STATE: p_slot->state = POWERON_STATE; break; default: kfree(info); goto out; } queue_work(shpchp_ordered_wq, &info->work); out: mutex_unlock(&p_slot->lock); } static int update_slot_info (struct slot *slot) { struct hotplug_slot_info *info; int result; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; slot->hpc_ops->get_power_status(slot, &(info->power_status)); slot->hpc_ops->get_attention_status(slot, &(info->attention_status)); slot->hpc_ops->get_latch_status(slot, &(info->latch_status)); slot->hpc_ops->get_adapter_status(slot, &(info->adapter_status)); result = pci_hp_change_slot_info(slot->hotplug_slot, info); kfree (info); return result; } /* * Note: This function must be called with slot->lock held */ static void handle_button_press_event(struct slot *p_slot) { u8 getstatus; struct controller *ctrl = p_slot->ctrl; switch (p_slot->state) { case STATIC_STATE: p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (getstatus) { p_slot->state = BLINKINGOFF_STATE; ctrl_info(ctrl, "PCI slot #%s - powering off due to " "button press.\n", slot_name(p_slot)); } else { p_slot->state = BLINKINGON_STATE; ctrl_info(ctrl, "PCI slot #%s - powering on due to " "button press.\n", slot_name(p_slot)); } /* blink green LED and turn off amber */ p_slot->hpc_ops->green_led_blink(p_slot); p_slot->hpc_ops->set_attention_status(p_slot, 0); queue_delayed_work(shpchp_wq, &p_slot->work, 5*HZ); break; case BLINKINGOFF_STATE: case BLINKINGON_STATE: /* * Cancel if we are still blinking; this means that we * press the attention again before the 5 sec. limit * expires to cancel hot-add or hot-remove */ ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot)); cancel_delayed_work(&p_slot->work); if (p_slot->state == BLINKINGOFF_STATE) p_slot->hpc_ops->green_led_on(p_slot); else p_slot->hpc_ops->green_led_off(p_slot); p_slot->hpc_ops->set_attention_status(p_slot, 0); ctrl_info(ctrl, "PCI slot #%s - action canceled due to " "button press\n", slot_name(p_slot)); p_slot->state = STATIC_STATE; break; case POWEROFF_STATE: case POWERON_STATE: /* * Ignore if the slot is on power-on or power-off state; * this means that the previous attention button action * to hot-add or hot-remove is undergoing */ ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot)); update_slot_info(p_slot); break; default: ctrl_warn(ctrl, "Not a valid state\n"); break; } } static void interrupt_event_handler(struct work_struct *work) { struct event_info *info = container_of(work, struct event_info, work); struct slot *p_slot = info->p_slot; mutex_lock(&p_slot->lock); switch (info->event_type) { case INT_BUTTON_PRESS: handle_button_press_event(p_slot); break; case INT_POWER_FAULT: ctrl_dbg(p_slot->ctrl, "%s: Power fault\n", __func__); p_slot->hpc_ops->set_attention_status(p_slot, 1); p_slot->hpc_ops->green_led_off(p_slot); break; default: update_slot_info(p_slot); break; } mutex_unlock(&p_slot->lock); kfree(info); } static int shpchp_enable_slot (struct slot *p_slot) { u8 getstatus = 0; int rc, retval = -ENODEV; struct controller *ctrl = p_slot->ctrl; /* Check to see if (latch closed, card present, power off) */ mutex_lock(&p_slot->ctrl->crit_sect); rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); if (rc || !getstatus) { ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); goto out; } rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if (rc || getstatus) { ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot)); goto out; } rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (rc || getstatus) { ctrl_info(ctrl, "Already enabled on slot(%s)\n", slot_name(p_slot)); goto out; } p_slot->is_a_board = 1; /* We have to save the presence info for these slots */ p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); p_slot->hpc_ops->get_power_status(p_slot, &(p_slot->pwr_save)); ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save); p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if(((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) || (p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458)) && p_slot->ctrl->num_slots == 1) { /* handle amd pogo errata; this must be done before enable */ amd_pogo_errata_save_misc_reg(p_slot); retval = board_added(p_slot); /* handle amd pogo errata; this must be done after enable */ amd_pogo_errata_restore_misc_reg(p_slot); } else retval = board_added(p_slot); if (retval) { p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); } update_slot_info(p_slot); out: mutex_unlock(&p_slot->ctrl->crit_sect); return retval; } static int shpchp_disable_slot (struct slot *p_slot) { u8 getstatus = 0; int rc, retval = -ENODEV; struct controller *ctrl = p_slot->ctrl; if (!p_slot->ctrl) return -ENODEV; /* Check to see if (latch closed, card present, power on) */ mutex_lock(&p_slot->ctrl->crit_sect); rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); if (rc || !getstatus) { ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); goto out; } rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if (rc || getstatus) { ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot)); goto out; } rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (rc || !getstatus) { ctrl_info(ctrl, "Already disabled on slot(%s)\n", slot_name(p_slot)); goto out; } retval = remove_board(p_slot); update_slot_info(p_slot); out: mutex_unlock(&p_slot->ctrl->crit_sect); return retval; } int shpchp_sysfs_enable_slot(struct slot *p_slot) { int retval = -ENODEV; struct controller *ctrl = p_slot->ctrl; mutex_lock(&p_slot->lock); switch (p_slot->state) { case BLINKINGON_STATE: cancel_delayed_work(&p_slot->work); case STATIC_STATE: p_slot->state = POWERON_STATE; mutex_unlock(&p_slot->lock); retval = shpchp_enable_slot(p_slot); mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; break; case POWERON_STATE: ctrl_info(ctrl, "Slot %s is already in powering on state\n", slot_name(p_slot)); break; case BLINKINGOFF_STATE: case POWEROFF_STATE: ctrl_info(ctrl, "Already enabled on slot %s\n", slot_name(p_slot)); break; default: ctrl_err(ctrl, "Not a valid state on slot %s\n", slot_name(p_slot)); break; } mutex_unlock(&p_slot->lock); return retval; } int shpchp_sysfs_disable_slot(struct slot *p_slot) { int retval = -ENODEV; struct controller *ctrl = p_slot->ctrl; mutex_lock(&p_slot->lock); switch (p_slot->state) { case BLINKINGOFF_STATE: cancel_delayed_work(&p_slot->work); case STATIC_STATE: p_slot->state = POWEROFF_STATE; mutex_unlock(&p_slot->lock); retval = shpchp_disable_slot(p_slot); mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; break; case POWEROFF_STATE: ctrl_info(ctrl, "Slot %s is already in powering off state\n", slot_name(p_slot)); break; case BLINKINGON_STATE: case POWERON_STATE: ctrl_info(ctrl, "Already disabled on slot %s\n", slot_name(p_slot)); break; default: ctrl_err(ctrl, "Not a valid state on slot %s\n", slot_name(p_slot)); break; } mutex_unlock(&p_slot->lock); return retval; }
gpl-2.0
dragonpt/Kernel_3.4.67_KK_Wiko_DarkMoon
net/ipv6/netfilter/ip6t_frag.c
8698
3973
/* Kernel module to match FRAG parameters. */ /* (C) 2001-2002 Andras Kis-Szabo <kisza@sch.bme.hu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/ipv6.h> #include <linux/types.h> #include <net/checksum.h> #include <net/ipv6.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/netfilter_ipv6/ip6t_frag.h> MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Xtables: IPv6 fragment match"); MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>"); /* Returns 1 if the id is matched by the range, 0 otherwise */ static inline bool id_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert) { bool r; pr_debug("id_match:%c 0x%x <= 0x%x <= 0x%x\n", invert ? '!' : ' ', min, id, max); r = (id >= min && id <= max) ^ invert; pr_debug(" result %s\n", r ? "PASS" : "FAILED"); return r; } static bool frag_mt6(const struct sk_buff *skb, struct xt_action_param *par) { struct frag_hdr _frag; const struct frag_hdr *fh; const struct ip6t_frag *fraginfo = par->matchinfo; unsigned int ptr; int err; err = ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL); if (err < 0) { if (err != -ENOENT) par->hotdrop = true; return false; } fh = skb_header_pointer(skb, ptr, sizeof(_frag), &_frag); if (fh == NULL) { par->hotdrop = true; return false; } pr_debug("INFO %04X ", fh->frag_off); pr_debug("OFFSET %04X ", ntohs(fh->frag_off) & ~0x7); pr_debug("RES %02X %04X", fh->reserved, ntohs(fh->frag_off) & 0x6); pr_debug("MF %04X ", fh->frag_off & htons(IP6_MF)); pr_debug("ID %u %08X\n", ntohl(fh->identification), ntohl(fh->identification)); pr_debug("IPv6 FRAG id %02X ", id_match(fraginfo->ids[0], fraginfo->ids[1], ntohl(fh->identification), !!(fraginfo->invflags & IP6T_FRAG_INV_IDS))); pr_debug("res %02X %02X%04X %02X ", fraginfo->flags & IP6T_FRAG_RES, fh->reserved, ntohs(fh->frag_off) & 0x6, !((fraginfo->flags & IP6T_FRAG_RES) && (fh->reserved || (ntohs(fh->frag_off) & 0x06)))); pr_debug("first %02X %02X %02X ", fraginfo->flags & IP6T_FRAG_FST, ntohs(fh->frag_off) & ~0x7, !((fraginfo->flags & IP6T_FRAG_FST) && (ntohs(fh->frag_off) & ~0x7))); pr_debug("mf %02X %02X %02X ", fraginfo->flags & IP6T_FRAG_MF, ntohs(fh->frag_off) & IP6_MF, !((fraginfo->flags & IP6T_FRAG_MF) && !((ntohs(fh->frag_off) & IP6_MF)))); pr_debug("last %02X %02X %02X\n", fraginfo->flags & IP6T_FRAG_NMF, ntohs(fh->frag_off) & IP6_MF, !((fraginfo->flags & IP6T_FRAG_NMF) && (ntohs(fh->frag_off) & IP6_MF))); return (fh != NULL) && id_match(fraginfo->ids[0], fraginfo->ids[1], ntohl(fh->identification), !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)) && !((fraginfo->flags & IP6T_FRAG_RES) && (fh->reserved || (ntohs(fh->frag_off) & 0x6))) && !((fraginfo->flags & IP6T_FRAG_FST) && (ntohs(fh->frag_off) & ~0x7)) && !((fraginfo->flags & IP6T_FRAG_MF) && !(ntohs(fh->frag_off) & IP6_MF)) && !((fraginfo->flags & IP6T_FRAG_NMF) && (ntohs(fh->frag_off) & IP6_MF)); } static int frag_mt6_check(const struct xt_mtchk_param *par) { const struct ip6t_frag *fraginfo = par->matchinfo; if (fraginfo->invflags & ~IP6T_FRAG_INV_MASK) { pr_debug("unknown flags %X\n", fraginfo->invflags); return -EINVAL; } return 0; } static struct xt_match frag_mt6_reg __read_mostly = { .name = "frag", .family = NFPROTO_IPV6, .match = frag_mt6, .matchsize = sizeof(struct ip6t_frag), .checkentry = frag_mt6_check, .me = THIS_MODULE, }; static int __init frag_mt6_init(void) { return xt_register_match(&frag_mt6_reg); } static void __exit frag_mt6_exit(void) { xt_unregister_match(&frag_mt6_reg); } module_init(frag_mt6_init); module_exit(frag_mt6_exit);
gpl-2.0
hanshuebner/linux-xlnx
fs/udf/partition.c
8698
8854
/* * partition.c * * PURPOSE * Partition handling routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from: * ftp://prep.ai.mit.edu/pub/gnu/GPL * Each contributing author retains all rights to their own work. * * (C) 1998-2001 Ben Fennema * * HISTORY * * 12/06/98 blf Created file. * */ #include "udfdecl.h" #include "udf_sb.h" #include "udf_i.h" #include <linux/fs.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/mutex.h> uint32_t udf_get_pblock(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset) { struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map; if (partition >= sbi->s_partitions) { udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n", block, partition, offset); return 0xFFFFFFFF; } map = &sbi->s_partmaps[partition]; if (map->s_partition_func) return map->s_partition_func(sb, block, partition, offset); else return map->s_partition_root + block + offset; } uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset) { struct buffer_head *bh = NULL; uint32_t newblock; uint32_t index; uint32_t loc; struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map; struct udf_virtual_data *vdata; struct udf_inode_info *iinfo = UDF_I(sbi->s_vat_inode); map = &sbi->s_partmaps[partition]; vdata = &map->s_type_specific.s_virtual; if (block > vdata->s_num_entries) { udf_debug("Trying to access block beyond end of VAT (%d max %d)\n", block, vdata->s_num_entries); return 0xFFFFFFFF; } if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { loc = le32_to_cpu(((__le32 *)(iinfo->i_ext.i_data + vdata->s_start_offset))[block]); goto translate; } index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t); if (block >= index) { block -= index; newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t))); index = block % (sb->s_blocksize / sizeof(uint32_t)); } else { newblock = 0; index = vdata->s_start_offset / sizeof(uint32_t) + block; } loc = udf_block_map(sbi->s_vat_inode, newblock); bh = sb_bread(sb, loc); if (!bh) { udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n", sb, block, partition, loc, index); return 0xFFFFFFFF; } loc = le32_to_cpu(((__le32 *)bh->b_data)[index]); brelse(bh); translate: if (iinfo->i_location.partitionReferenceNum == partition) { udf_debug("recursive call to udf_get_pblock!\n"); return 0xFFFFFFFF; } return udf_get_pblock(sb, loc, iinfo->i_location.partitionReferenceNum, offset); } inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset) { return udf_get_pblock_virt15(sb, block, partition, offset); } uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset) { int i; struct sparingTable *st = NULL; struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map; uint32_t packet; struct udf_sparing_data *sdata; map = &sbi->s_partmaps[partition]; sdata = &map->s_type_specific.s_sparing; packet = (block + offset) & ~(sdata->s_packet_len - 1); for (i = 0; i < 4; i++) { if (sdata->s_spar_map[i] != NULL) { st = (struct sparingTable *) sdata->s_spar_map[i]->b_data; break; } } if (st) { for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) { struct sparingEntry *entry = &st->mapEntry[i]; u32 origLoc = le32_to_cpu(entry->origLocation); if (origLoc >= 0xFFFFFFF0) break; else if (origLoc == packet) return le32_to_cpu(entry->mappedLocation) + ((block + offset) & (sdata->s_packet_len - 1)); else if (origLoc > packet) break; } } return map->s_partition_root + block + offset; } int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block) { struct udf_sparing_data *sdata; struct sparingTable *st = NULL; struct sparingEntry mapEntry; uint32_t packet; int i, j, k, l; struct udf_sb_info *sbi = UDF_SB(sb); u16 reallocationTableLen; struct buffer_head *bh; int ret = 0; mutex_lock(&sbi->s_alloc_mutex); for (i = 0; i < sbi->s_partitions; i++) { struct udf_part_map *map = &sbi->s_partmaps[i]; if (old_block > map->s_partition_root && old_block < map->s_partition_root + map->s_partition_len) { sdata = &map->s_type_specific.s_sparing; packet = (old_block - map->s_partition_root) & ~(sdata->s_packet_len - 1); for (j = 0; j < 4; j++) if (sdata->s_spar_map[j] != NULL) { st = (struct sparingTable *) sdata->s_spar_map[j]->b_data; break; } if (!st) { ret = 1; goto out; } reallocationTableLen = le16_to_cpu(st->reallocationTableLen); for (k = 0; k < reallocationTableLen; k++) { struct sparingEntry *entry = &st->mapEntry[k]; u32 origLoc = le32_to_cpu(entry->origLocation); if (origLoc == 0xFFFFFFFF) { for (; j < 4; j++) { int len; bh = sdata->s_spar_map[j]; if (!bh) continue; st = (struct sparingTable *) bh->b_data; entry->origLocation = cpu_to_le32(packet); len = sizeof(struct sparingTable) + reallocationTableLen * sizeof(struct sparingEntry); udf_update_tag((char *)st, len); mark_buffer_dirty(bh); } *new_block = le32_to_cpu( entry->mappedLocation) + ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1)); ret = 0; goto out; } else if (origLoc == packet) { *new_block = le32_to_cpu( entry->mappedLocation) + ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1)); ret = 0; goto out; } else if (origLoc > packet) break; } for (l = k; l < reallocationTableLen; l++) { struct sparingEntry *entry = &st->mapEntry[l]; u32 origLoc = le32_to_cpu(entry->origLocation); if (origLoc != 0xFFFFFFFF) continue; for (; j < 4; j++) { bh = sdata->s_spar_map[j]; if (!bh) continue; st = (struct sparingTable *)bh->b_data; mapEntry = st->mapEntry[l]; mapEntry.origLocation = cpu_to_le32(packet); memmove(&st->mapEntry[k + 1], &st->mapEntry[k], (l - k) * sizeof(struct sparingEntry)); st->mapEntry[k] = mapEntry; udf_update_tag((char *)st, sizeof(struct sparingTable) + reallocationTableLen * sizeof(struct sparingEntry)); mark_buffer_dirty(bh); } *new_block = le32_to_cpu( st->mapEntry[k].mappedLocation) + ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1)); ret = 0; goto out; } ret = 1; goto out; } /* if old_block */ } if (i == sbi->s_partitions) { /* outside of partitions */ /* for now, fail =) */ ret = 1; } out: mutex_unlock(&sbi->s_alloc_mutex); return ret; } static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block, uint16_t partition, uint32_t offset) { struct super_block *sb = inode->i_sb; struct udf_part_map *map; struct kernel_lb_addr eloc; uint32_t elen; sector_t ext_offset; struct extent_position epos = {}; uint32_t phyblock; if (inode_bmap(inode, block, &epos, &eloc, &elen, &ext_offset) != (EXT_RECORDED_ALLOCATED >> 30)) phyblock = 0xFFFFFFFF; else { map = &UDF_SB(sb)->s_partmaps[partition]; /* map to sparable/physical partition desc */ phyblock = udf_get_pblock(sb, eloc.logicalBlockNum, map->s_partition_num, ext_offset + offset); } brelse(epos.bh); return phyblock; } uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset) { struct udf_sb_info *sbi = UDF_SB(sb); struct udf_part_map *map; struct udf_meta_data *mdata; uint32_t retblk; struct inode *inode; udf_debug("READING from METADATA\n"); map = &sbi->s_partmaps[partition]; mdata = &map->s_type_specific.s_metadata; inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe; /* We shouldn't mount such media... */ BUG_ON(!inode); retblk = udf_try_read_meta(inode, block, partition, offset); if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) { udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n"); if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) { mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc, map->s_partition_num); mdata->s_flags |= MF_MIRROR_FE_LOADED; } inode = mdata->s_mirror_fe; if (!inode) return 0xFFFFFFFF; retblk = udf_try_read_meta(inode, block, partition, offset); } return retblk; }
gpl-2.0
ayushtyagi28/android_kernel_cyanogen_msm8994
sound/pci/emu10k1/irq.c
12794
6691
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Creative Labs, Inc. * Routines for IRQ control of EMU10K1 chips * * BUGS: * -- * * TODO: * -- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <sound/core.h> #include <sound/emu10k1.h> irqreturn_t snd_emu10k1_interrupt(int irq, void *dev_id) { struct snd_emu10k1 *emu = dev_id; unsigned int status, status2, orig_status, orig_status2; int handled = 0; int timeout = 0; while (((status = inl(emu->port + IPR)) != 0) && (timeout < 1000)) { timeout++; orig_status = status; handled = 1; if ((status & 0xffffffff) == 0xffffffff) { snd_printk(KERN_INFO "snd-emu10k1: Suspected sound card removal\n"); break; } if (status & IPR_PCIERROR) { snd_printk(KERN_ERR "interrupt: PCI error\n"); snd_emu10k1_intr_disable(emu, INTE_PCIERRORENABLE); status &= ~IPR_PCIERROR; } if (status & (IPR_VOLINCR|IPR_VOLDECR|IPR_MUTE)) { if (emu->hwvol_interrupt) emu->hwvol_interrupt(emu, status); else snd_emu10k1_intr_disable(emu, INTE_VOLINCRENABLE|INTE_VOLDECRENABLE|INTE_MUTEENABLE); status &= ~(IPR_VOLINCR|IPR_VOLDECR|IPR_MUTE); } if (status & IPR_CHANNELLOOP) { int voice; int voice_max = status & IPR_CHANNELNUMBERMASK; u32 val; struct snd_emu10k1_voice *pvoice = emu->voices; val = snd_emu10k1_ptr_read(emu, CLIPL, 0); for (voice = 0; voice <= voice_max; voice++) { if (voice == 0x20) val = snd_emu10k1_ptr_read(emu, CLIPH, 0); if (val & 1) { if (pvoice->use && pvoice->interrupt != NULL) { pvoice->interrupt(emu, pvoice); snd_emu10k1_voice_intr_ack(emu, voice); } else { snd_emu10k1_voice_intr_disable(emu, voice); } } val >>= 1; pvoice++; } val = snd_emu10k1_ptr_read(emu, HLIPL, 0); for (voice = 0; voice <= voice_max; voice++) { if (voice == 0x20) val = snd_emu10k1_ptr_read(emu, HLIPH, 0); if (val & 1) { if (pvoice->use && pvoice->interrupt != NULL) { pvoice->interrupt(emu, pvoice); snd_emu10k1_voice_half_loop_intr_ack(emu, voice); } else { snd_emu10k1_voice_half_loop_intr_disable(emu, voice); } } val >>= 1; pvoice++; } status &= ~IPR_CHANNELLOOP; } status &= ~IPR_CHANNELNUMBERMASK; if (status & (IPR_ADCBUFFULL|IPR_ADCBUFHALFFULL)) { if (emu->capture_interrupt) emu->capture_interrupt(emu, status); else snd_emu10k1_intr_disable(emu, INTE_ADCBUFENABLE); status &= ~(IPR_ADCBUFFULL|IPR_ADCBUFHALFFULL); } if (status & (IPR_MICBUFFULL|IPR_MICBUFHALFFULL)) { if (emu->capture_mic_interrupt) emu->capture_mic_interrupt(emu, status); else snd_emu10k1_intr_disable(emu, INTE_MICBUFENABLE); status &= ~(IPR_MICBUFFULL|IPR_MICBUFHALFFULL); } if (status & (IPR_EFXBUFFULL|IPR_EFXBUFHALFFULL)) { if (emu->capture_efx_interrupt) emu->capture_efx_interrupt(emu, status); else snd_emu10k1_intr_disable(emu, INTE_EFXBUFENABLE); status &= ~(IPR_EFXBUFFULL|IPR_EFXBUFHALFFULL); } if (status & (IPR_MIDITRANSBUFEMPTY|IPR_MIDIRECVBUFEMPTY)) { if (emu->midi.interrupt) emu->midi.interrupt(emu, status); else snd_emu10k1_intr_disable(emu, INTE_MIDITXENABLE|INTE_MIDIRXENABLE); status &= ~(IPR_MIDITRANSBUFEMPTY|IPR_MIDIRECVBUFEMPTY); } if (status & (IPR_A_MIDITRANSBUFEMPTY2|IPR_A_MIDIRECVBUFEMPTY2)) { if (emu->midi2.interrupt) emu->midi2.interrupt(emu, status); else snd_emu10k1_intr_disable(emu, INTE_A_MIDITXENABLE2|INTE_A_MIDIRXENABLE2); status &= ~(IPR_A_MIDITRANSBUFEMPTY2|IPR_A_MIDIRECVBUFEMPTY2); } if (status & IPR_INTERVALTIMER) { if (emu->timer) snd_timer_interrupt(emu->timer, emu->timer->sticks); else snd_emu10k1_intr_disable(emu, INTE_INTERVALTIMERENB); status &= ~IPR_INTERVALTIMER; } if (status & (IPR_GPSPDIFSTATUSCHANGE|IPR_CDROMSTATUSCHANGE)) { if (emu->spdif_interrupt) emu->spdif_interrupt(emu, status); else snd_emu10k1_intr_disable(emu, INTE_GPSPDIFENABLE|INTE_CDSPDIFENABLE); status &= ~(IPR_GPSPDIFSTATUSCHANGE|IPR_CDROMSTATUSCHANGE); } if (status & IPR_FXDSP) { if (emu->dsp_interrupt) emu->dsp_interrupt(emu); else snd_emu10k1_intr_disable(emu, INTE_FXDSPENABLE); status &= ~IPR_FXDSP; } if (status & IPR_P16V) { while ((status2 = inl(emu->port + IPR2)) != 0) { u32 mask = INTE2_PLAYBACK_CH_0_LOOP; /* Full Loop */ struct snd_emu10k1_voice *pvoice = &(emu->p16v_voices[0]); struct snd_emu10k1_voice *cvoice = &(emu->p16v_capture_voice); //printk(KERN_INFO "status2=0x%x\n", status2); orig_status2 = status2; if(status2 & mask) { if(pvoice->use) { snd_pcm_period_elapsed(pvoice->epcm->substream); } else { snd_printk(KERN_ERR "p16v: status: 0x%08x, mask=0x%08x, pvoice=%p, use=%d\n", status2, mask, pvoice, pvoice->use); } } if(status2 & 0x110000) { //printk(KERN_INFO "capture int found\n"); if(cvoice->use) { //printk(KERN_INFO "capture period_elapsed\n"); snd_pcm_period_elapsed(cvoice->epcm->substream); } } outl(orig_status2, emu->port + IPR2); /* ack all */ } status &= ~IPR_P16V; } if (status) { unsigned int bits; snd_printk(KERN_ERR "emu10k1: unhandled interrupt: 0x%08x\n", status); //make sure any interrupts we don't handle are disabled: bits = INTE_FXDSPENABLE | INTE_PCIERRORENABLE | INTE_VOLINCRENABLE | INTE_VOLDECRENABLE | INTE_MUTEENABLE | INTE_MICBUFENABLE | INTE_ADCBUFENABLE | INTE_EFXBUFENABLE | INTE_GPSPDIFENABLE | INTE_CDSPDIFENABLE | INTE_INTERVALTIMERENB | INTE_MIDITXENABLE | INTE_MIDIRXENABLE; if (emu->audigy) bits |= INTE_A_MIDITXENABLE2 | INTE_A_MIDIRXENABLE2; snd_emu10k1_intr_disable(emu, bits); } outl(orig_status, emu->port + IPR); /* ack all */ } if (timeout == 1000) snd_printk(KERN_INFO "emu10k1 irq routine failure\n"); return IRQ_RETVAL(handled); }
gpl-2.0
CoRfr/linux
drivers/net/ethernet/intel/igb/igb_hwmon.c
251
7002
/******************************************************************************* Intel(R) Gigabit Ethernet Linux driver Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include "igb.h" #include "e1000_82575.h" #include "e1000_hw.h" #include <linux/module.h> #include <linux/types.h> #include <linux/sysfs.h> #include <linux/kobject.h> #include <linux/device.h> #include <linux/netdevice.h> #include <linux/hwmon.h> #include <linux/pci.h> #ifdef CONFIG_IGB_HWMON static struct i2c_board_info i350_sensor_info = { I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), }; /* hwmon callback functions */ static ssize_t igb_hwmon_show_location(struct device *dev, struct device_attribute *attr, char *buf) { struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, dev_attr); return sprintf(buf, "loc%u\n", igb_attr->sensor->location); } static ssize_t igb_hwmon_show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, dev_attr); unsigned int value; /* reset the temp field */ igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw); value = igb_attr->sensor->temp; /* display millidegree */ value *= 1000; return sprintf(buf, "%u\n", value); } static ssize_t igb_hwmon_show_cautionthresh(struct device *dev, struct device_attribute *attr, char *buf) { struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, dev_attr); unsigned int value = igb_attr->sensor->caution_thresh; /* display millidegree */ value *= 1000; return sprintf(buf, "%u\n", value); } static ssize_t igb_hwmon_show_maxopthresh(struct device *dev, struct device_attribute *attr, char *buf) { struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr, dev_attr); unsigned int value = igb_attr->sensor->max_op_thresh; /* display millidegree */ value *= 1000; return sprintf(buf, "%u\n", value); } /* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. * @ adapter: pointer to the adapter structure * @ offset: offset in the eeprom sensor data table * @ type: type of sensor data to display * * For each file we want in hwmon's sysfs interface we need a device_attribute * This is included in our hwmon_attr struct that contains the references to * the data structures we need to get the data to display. */ static int igb_add_hwmon_attr(struct igb_adapter *adapter, unsigned int offset, int type) { int rc; unsigned int n_attr; struct hwmon_attr *igb_attr; n_attr = adapter->igb_hwmon_buff->n_hwmon; igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr]; switch (type) { case IGB_HWMON_TYPE_LOC: igb_attr->dev_attr.show = igb_hwmon_show_location; snprintf(igb_attr->name, sizeof(igb_attr->name), "temp%u_label", offset + 1); break; case IGB_HWMON_TYPE_TEMP: igb_attr->dev_attr.show = igb_hwmon_show_temp; snprintf(igb_attr->name, sizeof(igb_attr->name), "temp%u_input", offset + 1); break; case IGB_HWMON_TYPE_CAUTION: igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh; snprintf(igb_attr->name, sizeof(igb_attr->name), "temp%u_max", offset + 1); break; case IGB_HWMON_TYPE_MAX: igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh; snprintf(igb_attr->name, sizeof(igb_attr->name), "temp%u_crit", offset + 1); break; default: rc = -EPERM; return rc; } /* These always the same regardless of type */ igb_attr->sensor = &adapter->hw.mac.thermal_sensor_data.sensor[offset]; igb_attr->hw = &adapter->hw; igb_attr->dev_attr.store = NULL; igb_attr->dev_attr.attr.mode = S_IRUGO; igb_attr->dev_attr.attr.name = igb_attr->name; sysfs_attr_init(&igb_attr->dev_attr.attr); adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr; ++adapter->igb_hwmon_buff->n_hwmon; return 0; } static void igb_sysfs_del_adapter(struct igb_adapter *adapter) { } /* called from igb_main.c */ void igb_sysfs_exit(struct igb_adapter *adapter) { igb_sysfs_del_adapter(adapter); } /* called from igb_main.c */ int igb_sysfs_init(struct igb_adapter *adapter) { struct hwmon_buff *igb_hwmon; struct i2c_client *client; struct device *hwmon_dev; unsigned int i; int rc = 0; /* If this method isn't defined we don't support thermals */ if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) goto exit; /* Don't create thermal hwmon interface if no sensors present */ rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)); if (rc) goto exit; igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon), GFP_KERNEL); if (!igb_hwmon) { rc = -ENOMEM; goto exit; } adapter->igb_hwmon_buff = igb_hwmon; for (i = 0; i < E1000_MAX_SENSORS; i++) { /* Only create hwmon sysfs entries for sensors that have * meaningful data. */ if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0) continue; /* Bail if any hwmon attr struct fails to initialize */ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION); if (rc) goto exit; rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC); if (rc) goto exit; rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP); if (rc) goto exit; rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX); if (rc) goto exit; } /* init i2c_client */ client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info); if (client == NULL) { dev_info(&adapter->pdev->dev, "Failed to create new i2c device.\n"); rc = -ENODEV; goto exit; } adapter->i2c_client = client; igb_hwmon->groups[0] = &igb_hwmon->group; igb_hwmon->group.attrs = igb_hwmon->attrs; hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev, client->name, igb_hwmon, igb_hwmon->groups); if (IS_ERR(hwmon_dev)) { rc = PTR_ERR(hwmon_dev); goto err; } goto exit; err: igb_sysfs_del_adapter(adapter); exit: return rc; } #endif
gpl-2.0
xcaliburinhand/I9000-Reoriented-for-I897-Froyo
sound/soc/codecs/wm8753.c
507
55116
/* * wm8753.c -- WM8753 ALSA Soc Audio driver * * Copyright 2003 Wolfson Microelectronics PLC. * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Notes: * The WM8753 is a low power, high quality stereo codec with integrated PCM * codec designed for portable digital telephony applications. * * Dual DAI:- * * This driver support 2 DAI PCM's. This makes the default PCM available for * HiFi audio (e.g. MP3, ogg) playback/capture and the other PCM available for * voice. * * Please note that the voice PCM can be connected directly to a Bluetooth * codec or GSM modem and thus cannot be read or written to, although it is * available to be configured with snd_hw_params(), etc and kcontrols in the * normal alsa manner. * * Fast DAI switching:- * * The driver can now fast switch between the DAI configurations via a * an alsa kcontrol. This allows the PCM to remain open. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/initval.h> #include <sound/tlv.h> #include <asm/div64.h> #include "wm8753.h" static int caps_charge = 2000; module_param(caps_charge, int, 0); MODULE_PARM_DESC(caps_charge, "WM8753 cap charge time (msecs)"); static void wm8753_set_dai_mode(struct snd_soc_codec *codec, unsigned int mode); /* * wm8753 register cache * We can't read the WM8753 register space when we * are using 2 wire for device control, so we cache them instead. */ static const u16 wm8753_reg[] = { 0x0008, 0x0000, 0x000a, 0x000a, 0x0033, 0x0000, 0x0007, 0x00ff, 0x00ff, 0x000f, 0x000f, 0x007b, 0x0000, 0x0032, 0x0000, 0x00c3, 0x00c3, 0x00c0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0055, 0x0005, 0x0050, 0x0055, 0x0050, 0x0055, 0x0050, 0x0055, 0x0079, 0x0079, 0x0079, 0x0079, 0x0079, 0x0000, 0x0000, 0x0000, 0x0000, 0x0097, 0x0097, 0x0000, 0x0004, 0x0000, 0x0083, 0x0024, 0x01ba, 0x0000, 0x0083, 0x0024, 0x01ba, 0x0000, 0x0000, 0x0000 }; /* codec private data */ struct wm8753_priv { unsigned int sysclk; unsigned int pcmclk; struct snd_soc_codec codec; u16 reg_cache[ARRAY_SIZE(wm8753_reg)]; }; /* * read wm8753 register cache */ static inline unsigned int wm8753_read_reg_cache(struct snd_soc_codec *codec, unsigned int reg) { u16 *cache = codec->reg_cache; if (reg < 1 || reg >= (ARRAY_SIZE(wm8753_reg) + 1)) return -1; return cache[reg - 1]; } /* * write wm8753 register cache */ static inline void wm8753_write_reg_cache(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { u16 *cache = codec->reg_cache; if (reg < 1 || reg >= (ARRAY_SIZE(wm8753_reg) + 1)) return; cache[reg - 1] = value; } /* * write to the WM8753 register space */ static int wm8753_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { u8 data[2]; /* data is * D15..D9 WM8753 register offset * D8...D0 register data */ data[0] = (reg << 1) | ((value >> 8) & 0x0001); data[1] = value & 0x00ff; wm8753_write_reg_cache(codec, reg, value); if (codec->hw_write(codec->control_data, data, 2) == 2) return 0; else return -EIO; } #define wm8753_reset(c) wm8753_write(c, WM8753_RESET, 0) /* * WM8753 Controls */ static const char *wm8753_base[] = {"Linear Control", "Adaptive Boost"}; static const char *wm8753_base_filter[] = {"130Hz @ 48kHz", "200Hz @ 48kHz", "100Hz @ 16kHz", "400Hz @ 48kHz", "100Hz @ 8kHz", "200Hz @ 8kHz"}; static const char *wm8753_treble[] = {"8kHz", "4kHz"}; static const char *wm8753_alc_func[] = {"Off", "Right", "Left", "Stereo"}; static const char *wm8753_ng_type[] = {"Constant PGA Gain", "Mute ADC Output"}; static const char *wm8753_3d_func[] = {"Capture", "Playback"}; static const char *wm8753_3d_uc[] = {"2.2kHz", "1.5kHz"}; static const char *wm8753_3d_lc[] = {"200Hz", "500Hz"}; static const char *wm8753_deemp[] = {"None", "32kHz", "44.1kHz", "48kHz"}; static const char *wm8753_mono_mix[] = {"Stereo", "Left", "Right", "Mono"}; static const char *wm8753_dac_phase[] = {"Non Inverted", "Inverted"}; static const char *wm8753_line_mix[] = {"Line 1 + 2", "Line 1 - 2", "Line 1", "Line 2"}; static const char *wm8753_mono_mux[] = {"Line Mix", "Rx Mix"}; static const char *wm8753_right_mux[] = {"Line 2", "Rx Mix"}; static const char *wm8753_left_mux[] = {"Line 1", "Rx Mix"}; static const char *wm8753_rxmsel[] = {"RXP - RXN", "RXP + RXN", "RXP", "RXN"}; static const char *wm8753_sidetone_mux[] = {"Left PGA", "Mic 1", "Mic 2", "Right PGA"}; static const char *wm8753_mono2_src[] = {"Inverted Mono 1", "Left", "Right", "Left + Right"}; static const char *wm8753_out3[] = {"VREF", "ROUT2", "Left + Right"}; static const char *wm8753_out4[] = {"VREF", "Capture ST", "LOUT2"}; static const char *wm8753_radcsel[] = {"PGA", "Line or RXP-RXN", "Sidetone"}; static const char *wm8753_ladcsel[] = {"PGA", "Line or RXP-RXN", "Line"}; static const char *wm8753_mono_adc[] = {"Stereo", "Analogue Mix Left", "Analogue Mix Right", "Digital Mono Mix"}; static const char *wm8753_adc_hp[] = {"3.4Hz @ 48kHz", "82Hz @ 16k", "82Hz @ 8kHz", "170Hz @ 8kHz"}; static const char *wm8753_adc_filter[] = {"HiFi", "Voice"}; static const char *wm8753_mic_sel[] = {"Mic 1", "Mic 2", "Mic 3"}; static const char *wm8753_dai_mode[] = {"DAI 0", "DAI 1", "DAI 2", "DAI 3"}; static const char *wm8753_dat_sel[] = {"Stereo", "Left ADC", "Right ADC", "Channel Swap"}; static const char *wm8753_rout2_phase[] = {"Non Inverted", "Inverted"}; static const struct soc_enum wm8753_enum[] = { SOC_ENUM_SINGLE(WM8753_BASS, 7, 2, wm8753_base), SOC_ENUM_SINGLE(WM8753_BASS, 4, 6, wm8753_base_filter), SOC_ENUM_SINGLE(WM8753_TREBLE, 6, 2, wm8753_treble), SOC_ENUM_SINGLE(WM8753_ALC1, 7, 4, wm8753_alc_func), SOC_ENUM_SINGLE(WM8753_NGATE, 1, 2, wm8753_ng_type), SOC_ENUM_SINGLE(WM8753_3D, 7, 2, wm8753_3d_func), SOC_ENUM_SINGLE(WM8753_3D, 6, 2, wm8753_3d_uc), SOC_ENUM_SINGLE(WM8753_3D, 5, 2, wm8753_3d_lc), SOC_ENUM_SINGLE(WM8753_DAC, 1, 4, wm8753_deemp), SOC_ENUM_SINGLE(WM8753_DAC, 4, 4, wm8753_mono_mix), SOC_ENUM_SINGLE(WM8753_DAC, 6, 2, wm8753_dac_phase), SOC_ENUM_SINGLE(WM8753_INCTL1, 3, 4, wm8753_line_mix), SOC_ENUM_SINGLE(WM8753_INCTL1, 2, 2, wm8753_mono_mux), SOC_ENUM_SINGLE(WM8753_INCTL1, 1, 2, wm8753_right_mux), SOC_ENUM_SINGLE(WM8753_INCTL1, 0, 2, wm8753_left_mux), SOC_ENUM_SINGLE(WM8753_INCTL2, 6, 4, wm8753_rxmsel), SOC_ENUM_SINGLE(WM8753_INCTL2, 4, 4, wm8753_sidetone_mux), SOC_ENUM_SINGLE(WM8753_OUTCTL, 7, 4, wm8753_mono2_src), SOC_ENUM_SINGLE(WM8753_OUTCTL, 0, 3, wm8753_out3), SOC_ENUM_SINGLE(WM8753_ADCTL2, 7, 3, wm8753_out4), SOC_ENUM_SINGLE(WM8753_ADCIN, 2, 3, wm8753_radcsel), SOC_ENUM_SINGLE(WM8753_ADCIN, 0, 3, wm8753_ladcsel), SOC_ENUM_SINGLE(WM8753_ADCIN, 4, 4, wm8753_mono_adc), SOC_ENUM_SINGLE(WM8753_ADC, 2, 4, wm8753_adc_hp), SOC_ENUM_SINGLE(WM8753_ADC, 4, 2, wm8753_adc_filter), SOC_ENUM_SINGLE(WM8753_MICBIAS, 6, 3, wm8753_mic_sel), SOC_ENUM_SINGLE(WM8753_IOCTL, 2, 4, wm8753_dai_mode), SOC_ENUM_SINGLE(WM8753_ADC, 7, 4, wm8753_dat_sel), SOC_ENUM_SINGLE(WM8753_OUTCTL, 2, 2, wm8753_rout2_phase), }; static int wm8753_get_dai(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); int mode = wm8753_read_reg_cache(codec, WM8753_IOCTL); ucontrol->value.integer.value[0] = (mode & 0xc) >> 2; return 0; } static int wm8753_set_dai(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); int mode = wm8753_read_reg_cache(codec, WM8753_IOCTL); if (((mode & 0xc) >> 2) == ucontrol->value.integer.value[0]) return 0; mode &= 0xfff3; mode |= (ucontrol->value.integer.value[0] << 2); wm8753_write(codec, WM8753_IOCTL, mode); wm8753_set_dai_mode(codec, ucontrol->value.integer.value[0]); return 1; } static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 300, 0); static const DECLARE_TLV_DB_SCALE(mic_preamp_tlv, 1200, 600, 0); static const DECLARE_TLV_DB_SCALE(adc_tlv, -9750, 50, 1); static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1); static const unsigned int out_tlv[] = { TLV_DB_RANGE_HEAD(2), /* 0000000 - 0101111 = "Analogue mute" */ 0, 48, TLV_DB_SCALE_ITEM(-25500, 0, 0), 48, 127, TLV_DB_SCALE_ITEM(-7300, 100, 0), }; static const DECLARE_TLV_DB_SCALE(mix_tlv, -1500, 300, 0); static const DECLARE_TLV_DB_SCALE(voice_mix_tlv, -1200, 300, 0); static const DECLARE_TLV_DB_SCALE(pga_tlv, -1725, 75, 0); static const struct snd_kcontrol_new wm8753_snd_controls[] = { SOC_DOUBLE_R_TLV("PCM Volume", WM8753_LDAC, WM8753_RDAC, 0, 255, 0, dac_tlv), SOC_DOUBLE_R_TLV("ADC Capture Volume", WM8753_LADC, WM8753_RADC, 0, 255, 0, adc_tlv), SOC_DOUBLE_R_TLV("Headphone Playback Volume", WM8753_LOUT1V, WM8753_ROUT1V, 0, 127, 0, out_tlv), SOC_DOUBLE_R_TLV("Speaker Playback Volume", WM8753_LOUT2V, WM8753_ROUT2V, 0, 127, 0, out_tlv), SOC_SINGLE_TLV("Mono Playback Volume", WM8753_MOUTV, 0, 127, 0, out_tlv), SOC_DOUBLE_R_TLV("Bypass Playback Volume", WM8753_LOUTM1, WM8753_ROUTM1, 4, 7, 1, mix_tlv), SOC_DOUBLE_R_TLV("Sidetone Playback Volume", WM8753_LOUTM2, WM8753_ROUTM2, 4, 7, 1, mix_tlv), SOC_DOUBLE_R_TLV("Voice Playback Volume", WM8753_LOUTM2, WM8753_ROUTM2, 0, 7, 1, voice_mix_tlv), SOC_DOUBLE_R("Headphone Playback ZC Switch", WM8753_LOUT1V, WM8753_ROUT1V, 7, 1, 0), SOC_DOUBLE_R("Speaker Playback ZC Switch", WM8753_LOUT2V, WM8753_ROUT2V, 7, 1, 0), SOC_SINGLE_TLV("Mono Bypass Playback Volume", WM8753_MOUTM1, 4, 7, 1, mix_tlv), SOC_SINGLE_TLV("Mono Sidetone Playback Volume", WM8753_MOUTM2, 4, 7, 1, mix_tlv), SOC_SINGLE_TLV("Mono Voice Playback Volume", WM8753_MOUTM2, 0, 7, 1, voice_mix_tlv), SOC_SINGLE("Mono Playback ZC Switch", WM8753_MOUTV, 7, 1, 0), SOC_ENUM("Bass Boost", wm8753_enum[0]), SOC_ENUM("Bass Filter", wm8753_enum[1]), SOC_SINGLE("Bass Volume", WM8753_BASS, 0, 15, 1), SOC_SINGLE("Treble Volume", WM8753_TREBLE, 0, 15, 1), SOC_ENUM("Treble Cut-off", wm8753_enum[2]), SOC_DOUBLE_TLV("Sidetone Capture Volume", WM8753_RECMIX1, 0, 4, 7, 1, rec_mix_tlv), SOC_SINGLE_TLV("Voice Sidetone Capture Volume", WM8753_RECMIX2, 0, 7, 1, rec_mix_tlv), SOC_DOUBLE_R_TLV("Capture Volume", WM8753_LINVOL, WM8753_RINVOL, 0, 63, 0, pga_tlv), SOC_DOUBLE_R("Capture ZC Switch", WM8753_LINVOL, WM8753_RINVOL, 6, 1, 0), SOC_DOUBLE_R("Capture Switch", WM8753_LINVOL, WM8753_RINVOL, 7, 1, 1), SOC_ENUM("Capture Filter Select", wm8753_enum[23]), SOC_ENUM("Capture Filter Cut-off", wm8753_enum[24]), SOC_SINGLE("Capture Filter Switch", WM8753_ADC, 0, 1, 1), SOC_SINGLE("ALC Capture Target Volume", WM8753_ALC1, 0, 7, 0), SOC_SINGLE("ALC Capture Max Volume", WM8753_ALC1, 4, 7, 0), SOC_ENUM("ALC Capture Function", wm8753_enum[3]), SOC_SINGLE("ALC Capture ZC Switch", WM8753_ALC2, 8, 1, 0), SOC_SINGLE("ALC Capture Hold Time", WM8753_ALC2, 0, 15, 1), SOC_SINGLE("ALC Capture Decay Time", WM8753_ALC3, 4, 15, 1), SOC_SINGLE("ALC Capture Attack Time", WM8753_ALC3, 0, 15, 0), SOC_SINGLE("ALC Capture NG Threshold", WM8753_NGATE, 3, 31, 0), SOC_ENUM("ALC Capture NG Type", wm8753_enum[4]), SOC_SINGLE("ALC Capture NG Switch", WM8753_NGATE, 0, 1, 0), SOC_ENUM("3D Function", wm8753_enum[5]), SOC_ENUM("3D Upper Cut-off", wm8753_enum[6]), SOC_ENUM("3D Lower Cut-off", wm8753_enum[7]), SOC_SINGLE("3D Volume", WM8753_3D, 1, 15, 0), SOC_SINGLE("3D Switch", WM8753_3D, 0, 1, 0), SOC_SINGLE("Capture 6dB Attenuate", WM8753_ADCTL1, 2, 1, 0), SOC_SINGLE("Playback 6dB Attenuate", WM8753_ADCTL1, 1, 1, 0), SOC_ENUM("De-emphasis", wm8753_enum[8]), SOC_ENUM("Playback Mono Mix", wm8753_enum[9]), SOC_ENUM("Playback Phase", wm8753_enum[10]), SOC_SINGLE_TLV("Mic2 Capture Volume", WM8753_INCTL1, 7, 3, 0, mic_preamp_tlv), SOC_SINGLE_TLV("Mic1 Capture Volume", WM8753_INCTL1, 5, 3, 0, mic_preamp_tlv), SOC_ENUM_EXT("DAI Mode", wm8753_enum[26], wm8753_get_dai, wm8753_set_dai), SOC_ENUM("ADC Data Select", wm8753_enum[27]), SOC_ENUM("ROUT2 Phase", wm8753_enum[28]), }; /* * _DAPM_ Controls */ /* Left Mixer */ static const struct snd_kcontrol_new wm8753_left_mixer_controls[] = { SOC_DAPM_SINGLE("Voice Playback Switch", WM8753_LOUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Sidetone Playback Switch", WM8753_LOUTM2, 7, 1, 0), SOC_DAPM_SINGLE("Left Playback Switch", WM8753_LOUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Bypass Playback Switch", WM8753_LOUTM1, 7, 1, 0), }; /* Right mixer */ static const struct snd_kcontrol_new wm8753_right_mixer_controls[] = { SOC_DAPM_SINGLE("Voice Playback Switch", WM8753_ROUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Sidetone Playback Switch", WM8753_ROUTM2, 7, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", WM8753_ROUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Bypass Playback Switch", WM8753_ROUTM1, 7, 1, 0), }; /* Mono mixer */ static const struct snd_kcontrol_new wm8753_mono_mixer_controls[] = { SOC_DAPM_SINGLE("Left Playback Switch", WM8753_MOUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", WM8753_MOUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Voice Playback Switch", WM8753_MOUTM2, 3, 1, 0), SOC_DAPM_SINGLE("Sidetone Playback Switch", WM8753_MOUTM2, 7, 1, 0), SOC_DAPM_SINGLE("Bypass Playback Switch", WM8753_MOUTM1, 7, 1, 0), }; /* Mono 2 Mux */ static const struct snd_kcontrol_new wm8753_mono2_controls = SOC_DAPM_ENUM("Route", wm8753_enum[17]); /* Out 3 Mux */ static const struct snd_kcontrol_new wm8753_out3_controls = SOC_DAPM_ENUM("Route", wm8753_enum[18]); /* Out 4 Mux */ static const struct snd_kcontrol_new wm8753_out4_controls = SOC_DAPM_ENUM("Route", wm8753_enum[19]); /* ADC Mono Mix */ static const struct snd_kcontrol_new wm8753_adc_mono_controls = SOC_DAPM_ENUM("Route", wm8753_enum[22]); /* Record mixer */ static const struct snd_kcontrol_new wm8753_record_mixer_controls[] = { SOC_DAPM_SINGLE("Voice Capture Switch", WM8753_RECMIX2, 3, 1, 0), SOC_DAPM_SINGLE("Left Capture Switch", WM8753_RECMIX1, 3, 1, 0), SOC_DAPM_SINGLE("Right Capture Switch", WM8753_RECMIX1, 7, 1, 0), }; /* Left ADC mux */ static const struct snd_kcontrol_new wm8753_adc_left_controls = SOC_DAPM_ENUM("Route", wm8753_enum[21]); /* Right ADC mux */ static const struct snd_kcontrol_new wm8753_adc_right_controls = SOC_DAPM_ENUM("Route", wm8753_enum[20]); /* MIC mux */ static const struct snd_kcontrol_new wm8753_mic_mux_controls = SOC_DAPM_ENUM("Route", wm8753_enum[16]); /* ALC mixer */ static const struct snd_kcontrol_new wm8753_alc_mixer_controls[] = { SOC_DAPM_SINGLE("Line Capture Switch", WM8753_INCTL2, 3, 1, 0), SOC_DAPM_SINGLE("Mic2 Capture Switch", WM8753_INCTL2, 2, 1, 0), SOC_DAPM_SINGLE("Mic1 Capture Switch", WM8753_INCTL2, 1, 1, 0), SOC_DAPM_SINGLE("Rx Capture Switch", WM8753_INCTL2, 0, 1, 0), }; /* Left Line mux */ static const struct snd_kcontrol_new wm8753_line_left_controls = SOC_DAPM_ENUM("Route", wm8753_enum[14]); /* Right Line mux */ static const struct snd_kcontrol_new wm8753_line_right_controls = SOC_DAPM_ENUM("Route", wm8753_enum[13]); /* Mono Line mux */ static const struct snd_kcontrol_new wm8753_line_mono_controls = SOC_DAPM_ENUM("Route", wm8753_enum[12]); /* Line mux and mixer */ static const struct snd_kcontrol_new wm8753_line_mux_mix_controls = SOC_DAPM_ENUM("Route", wm8753_enum[11]); /* Rx mux and mixer */ static const struct snd_kcontrol_new wm8753_rx_mux_mix_controls = SOC_DAPM_ENUM("Route", wm8753_enum[15]); /* Mic Selector Mux */ static const struct snd_kcontrol_new wm8753_mic_sel_mux_controls = SOC_DAPM_ENUM("Route", wm8753_enum[25]); static const struct snd_soc_dapm_widget wm8753_dapm_widgets[] = { SND_SOC_DAPM_MICBIAS("Mic Bias", WM8753_PWR1, 5, 0), SND_SOC_DAPM_MIXER("Left Mixer", WM8753_PWR4, 0, 0, &wm8753_left_mixer_controls[0], ARRAY_SIZE(wm8753_left_mixer_controls)), SND_SOC_DAPM_PGA("Left Out 1", WM8753_PWR3, 8, 0, NULL, 0), SND_SOC_DAPM_PGA("Left Out 2", WM8753_PWR3, 6, 0, NULL, 0), SND_SOC_DAPM_DAC("Left DAC", "Left HiFi Playback", WM8753_PWR1, 3, 0), SND_SOC_DAPM_OUTPUT("LOUT1"), SND_SOC_DAPM_OUTPUT("LOUT2"), SND_SOC_DAPM_MIXER("Right Mixer", WM8753_PWR4, 1, 0, &wm8753_right_mixer_controls[0], ARRAY_SIZE(wm8753_right_mixer_controls)), SND_SOC_DAPM_PGA("Right Out 1", WM8753_PWR3, 7, 0, NULL, 0), SND_SOC_DAPM_PGA("Right Out 2", WM8753_PWR3, 5, 0, NULL, 0), SND_SOC_DAPM_DAC("Right DAC", "Right HiFi Playback", WM8753_PWR1, 2, 0), SND_SOC_DAPM_OUTPUT("ROUT1"), SND_SOC_DAPM_OUTPUT("ROUT2"), SND_SOC_DAPM_MIXER("Mono Mixer", WM8753_PWR4, 2, 0, &wm8753_mono_mixer_controls[0], ARRAY_SIZE(wm8753_mono_mixer_controls)), SND_SOC_DAPM_PGA("Mono Out 1", WM8753_PWR3, 2, 0, NULL, 0), SND_SOC_DAPM_PGA("Mono Out 2", WM8753_PWR3, 1, 0, NULL, 0), SND_SOC_DAPM_DAC("Voice DAC", "Voice Playback", WM8753_PWR1, 4, 0), SND_SOC_DAPM_OUTPUT("MONO1"), SND_SOC_DAPM_MUX("Mono 2 Mux", SND_SOC_NOPM, 0, 0, &wm8753_mono2_controls), SND_SOC_DAPM_OUTPUT("MONO2"), SND_SOC_DAPM_MIXER("Out3 Left + Right", -1, 0, 0, NULL, 0), SND_SOC_DAPM_MUX("Out3 Mux", SND_SOC_NOPM, 0, 0, &wm8753_out3_controls), SND_SOC_DAPM_PGA("Out 3", WM8753_PWR3, 4, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("OUT3"), SND_SOC_DAPM_MUX("Out4 Mux", SND_SOC_NOPM, 0, 0, &wm8753_out4_controls), SND_SOC_DAPM_PGA("Out 4", WM8753_PWR3, 3, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("OUT4"), SND_SOC_DAPM_MIXER("Playback Mixer", WM8753_PWR4, 3, 0, &wm8753_record_mixer_controls[0], ARRAY_SIZE(wm8753_record_mixer_controls)), SND_SOC_DAPM_ADC("Left ADC", "Left Capture", WM8753_PWR2, 3, 0), SND_SOC_DAPM_ADC("Right ADC", "Right Capture", WM8753_PWR2, 2, 0), SND_SOC_DAPM_MUX("Capture Left Mixer", SND_SOC_NOPM, 0, 0, &wm8753_adc_mono_controls), SND_SOC_DAPM_MUX("Capture Right Mixer", SND_SOC_NOPM, 0, 0, &wm8753_adc_mono_controls), SND_SOC_DAPM_MUX("Capture Left Mux", SND_SOC_NOPM, 0, 0, &wm8753_adc_left_controls), SND_SOC_DAPM_MUX("Capture Right Mux", SND_SOC_NOPM, 0, 0, &wm8753_adc_right_controls), SND_SOC_DAPM_MUX("Mic Sidetone Mux", SND_SOC_NOPM, 0, 0, &wm8753_mic_mux_controls), SND_SOC_DAPM_PGA("Left Capture Volume", WM8753_PWR2, 5, 0, NULL, 0), SND_SOC_DAPM_PGA("Right Capture Volume", WM8753_PWR2, 4, 0, NULL, 0), SND_SOC_DAPM_MIXER("ALC Mixer", WM8753_PWR2, 6, 0, &wm8753_alc_mixer_controls[0], ARRAY_SIZE(wm8753_alc_mixer_controls)), SND_SOC_DAPM_MUX("Line Left Mux", SND_SOC_NOPM, 0, 0, &wm8753_line_left_controls), SND_SOC_DAPM_MUX("Line Right Mux", SND_SOC_NOPM, 0, 0, &wm8753_line_right_controls), SND_SOC_DAPM_MUX("Line Mono Mux", SND_SOC_NOPM, 0, 0, &wm8753_line_mono_controls), SND_SOC_DAPM_MUX("Line Mixer", WM8753_PWR2, 0, 0, &wm8753_line_mux_mix_controls), SND_SOC_DAPM_MUX("Rx Mixer", WM8753_PWR2, 1, 0, &wm8753_rx_mux_mix_controls), SND_SOC_DAPM_PGA("Mic 1 Volume", WM8753_PWR2, 8, 0, NULL, 0), SND_SOC_DAPM_PGA("Mic 2 Volume", WM8753_PWR2, 7, 0, NULL, 0), SND_SOC_DAPM_MUX("Mic Selection Mux", SND_SOC_NOPM, 0, 0, &wm8753_mic_sel_mux_controls), SND_SOC_DAPM_INPUT("LINE1"), SND_SOC_DAPM_INPUT("LINE2"), SND_SOC_DAPM_INPUT("RXP"), SND_SOC_DAPM_INPUT("RXN"), SND_SOC_DAPM_INPUT("ACIN"), SND_SOC_DAPM_OUTPUT("ACOP"), SND_SOC_DAPM_INPUT("MIC1N"), SND_SOC_DAPM_INPUT("MIC1"), SND_SOC_DAPM_INPUT("MIC2N"), SND_SOC_DAPM_INPUT("MIC2"), SND_SOC_DAPM_VMID("VREF"), }; static const struct snd_soc_dapm_route audio_map[] = { /* left mixer */ {"Left Mixer", "Left Playback Switch", "Left DAC"}, {"Left Mixer", "Voice Playback Switch", "Voice DAC"}, {"Left Mixer", "Sidetone Playback Switch", "Mic Sidetone Mux"}, {"Left Mixer", "Bypass Playback Switch", "Line Left Mux"}, /* right mixer */ {"Right Mixer", "Right Playback Switch", "Right DAC"}, {"Right Mixer", "Voice Playback Switch", "Voice DAC"}, {"Right Mixer", "Sidetone Playback Switch", "Mic Sidetone Mux"}, {"Right Mixer", "Bypass Playback Switch", "Line Right Mux"}, /* mono mixer */ {"Mono Mixer", "Voice Playback Switch", "Voice DAC"}, {"Mono Mixer", "Left Playback Switch", "Left DAC"}, {"Mono Mixer", "Right Playback Switch", "Right DAC"}, {"Mono Mixer", "Sidetone Playback Switch", "Mic Sidetone Mux"}, {"Mono Mixer", "Bypass Playback Switch", "Line Mono Mux"}, /* left out */ {"Left Out 1", NULL, "Left Mixer"}, {"Left Out 2", NULL, "Left Mixer"}, {"LOUT1", NULL, "Left Out 1"}, {"LOUT2", NULL, "Left Out 2"}, /* right out */ {"Right Out 1", NULL, "Right Mixer"}, {"Right Out 2", NULL, "Right Mixer"}, {"ROUT1", NULL, "Right Out 1"}, {"ROUT2", NULL, "Right Out 2"}, /* mono 1 out */ {"Mono Out 1", NULL, "Mono Mixer"}, {"MONO1", NULL, "Mono Out 1"}, /* mono 2 out */ {"Mono 2 Mux", "Left + Right", "Out3 Left + Right"}, {"Mono 2 Mux", "Inverted Mono 1", "MONO1"}, {"Mono 2 Mux", "Left", "Left Mixer"}, {"Mono 2 Mux", "Right", "Right Mixer"}, {"Mono Out 2", NULL, "Mono 2 Mux"}, {"MONO2", NULL, "Mono Out 2"}, /* out 3 */ {"Out3 Left + Right", NULL, "Left Mixer"}, {"Out3 Left + Right", NULL, "Right Mixer"}, {"Out3 Mux", "VREF", "VREF"}, {"Out3 Mux", "Left + Right", "Out3 Left + Right"}, {"Out3 Mux", "ROUT2", "ROUT2"}, {"Out 3", NULL, "Out3 Mux"}, {"OUT3", NULL, "Out 3"}, /* out 4 */ {"Out4 Mux", "VREF", "VREF"}, {"Out4 Mux", "Capture ST", "Playback Mixer"}, {"Out4 Mux", "LOUT2", "LOUT2"}, {"Out 4", NULL, "Out4 Mux"}, {"OUT4", NULL, "Out 4"}, /* record mixer */ {"Playback Mixer", "Left Capture Switch", "Left Mixer"}, {"Playback Mixer", "Voice Capture Switch", "Mono Mixer"}, {"Playback Mixer", "Right Capture Switch", "Right Mixer"}, /* Mic/SideTone Mux */ {"Mic Sidetone Mux", "Left PGA", "Left Capture Volume"}, {"Mic Sidetone Mux", "Right PGA", "Right Capture Volume"}, {"Mic Sidetone Mux", "Mic 1", "Mic 1 Volume"}, {"Mic Sidetone Mux", "Mic 2", "Mic 2 Volume"}, /* Capture Left Mux */ {"Capture Left Mux", "PGA", "Left Capture Volume"}, {"Capture Left Mux", "Line or RXP-RXN", "Line Left Mux"}, {"Capture Left Mux", "Line", "LINE1"}, /* Capture Right Mux */ {"Capture Right Mux", "PGA", "Right Capture Volume"}, {"Capture Right Mux", "Line or RXP-RXN", "Line Right Mux"}, {"Capture Right Mux", "Sidetone", "Playback Mixer"}, /* Mono Capture mixer-mux */ {"Capture Right Mixer", "Stereo", "Capture Right Mux"}, {"Capture Left Mixer", "Stereo", "Capture Left Mux"}, {"Capture Left Mixer", "Analogue Mix Left", "Capture Left Mux"}, {"Capture Left Mixer", "Analogue Mix Left", "Capture Right Mux"}, {"Capture Right Mixer", "Analogue Mix Right", "Capture Left Mux"}, {"Capture Right Mixer", "Analogue Mix Right", "Capture Right Mux"}, {"Capture Left Mixer", "Digital Mono Mix", "Capture Left Mux"}, {"Capture Left Mixer", "Digital Mono Mix", "Capture Right Mux"}, {"Capture Right Mixer", "Digital Mono Mix", "Capture Left Mux"}, {"Capture Right Mixer", "Digital Mono Mix", "Capture Right Mux"}, /* ADC */ {"Left ADC", NULL, "Capture Left Mixer"}, {"Right ADC", NULL, "Capture Right Mixer"}, /* Left Capture Volume */ {"Left Capture Volume", NULL, "ACIN"}, /* Right Capture Volume */ {"Right Capture Volume", NULL, "Mic 2 Volume"}, /* ALC Mixer */ {"ALC Mixer", "Line Capture Switch", "Line Mixer"}, {"ALC Mixer", "Mic2 Capture Switch", "Mic 2 Volume"}, {"ALC Mixer", "Mic1 Capture Switch", "Mic 1 Volume"}, {"ALC Mixer", "Rx Capture Switch", "Rx Mixer"}, /* Line Left Mux */ {"Line Left Mux", "Line 1", "LINE1"}, {"Line Left Mux", "Rx Mix", "Rx Mixer"}, /* Line Right Mux */ {"Line Right Mux", "Line 2", "LINE2"}, {"Line Right Mux", "Rx Mix", "Rx Mixer"}, /* Line Mono Mux */ {"Line Mono Mux", "Line Mix", "Line Mixer"}, {"Line Mono Mux", "Rx Mix", "Rx Mixer"}, /* Line Mixer/Mux */ {"Line Mixer", "Line 1 + 2", "LINE1"}, {"Line Mixer", "Line 1 - 2", "LINE1"}, {"Line Mixer", "Line 1 + 2", "LINE2"}, {"Line Mixer", "Line 1 - 2", "LINE2"}, {"Line Mixer", "Line 1", "LINE1"}, {"Line Mixer", "Line 2", "LINE2"}, /* Rx Mixer/Mux */ {"Rx Mixer", "RXP - RXN", "RXP"}, {"Rx Mixer", "RXP + RXN", "RXP"}, {"Rx Mixer", "RXP - RXN", "RXN"}, {"Rx Mixer", "RXP + RXN", "RXN"}, {"Rx Mixer", "RXP", "RXP"}, {"Rx Mixer", "RXN", "RXN"}, /* Mic 1 Volume */ {"Mic 1 Volume", NULL, "MIC1N"}, {"Mic 1 Volume", NULL, "Mic Selection Mux"}, /* Mic 2 Volume */ {"Mic 2 Volume", NULL, "MIC2N"}, {"Mic 2 Volume", NULL, "MIC2"}, /* Mic Selector Mux */ {"Mic Selection Mux", "Mic 1", "MIC1"}, {"Mic Selection Mux", "Mic 2", "MIC2N"}, {"Mic Selection Mux", "Mic 3", "MIC2"}, /* ACOP */ {"ACOP", NULL, "ALC Mixer"}, }; static int wm8753_add_widgets(struct snd_soc_codec *codec) { snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets, ARRAY_SIZE(wm8753_dapm_widgets)); snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); snd_soc_dapm_new_widgets(codec); return 0; } /* PLL divisors */ struct _pll_div { u32 div2:1; u32 n:4; u32 k:24; }; /* The size in bits of the pll divide multiplied by 10 * to allow rounding later */ #define FIXED_PLL_SIZE ((1 << 22) * 10) static void pll_factors(struct _pll_div *pll_div, unsigned int target, unsigned int source) { u64 Kpart; unsigned int K, Ndiv, Nmod; Ndiv = target / source; if (Ndiv < 6) { source >>= 1; pll_div->div2 = 1; Ndiv = target / source; } else pll_div->div2 = 0; if ((Ndiv < 6) || (Ndiv > 12)) printk(KERN_WARNING "wm8753: unsupported N = %u\n", Ndiv); pll_div->n = Ndiv; Nmod = target % source; Kpart = FIXED_PLL_SIZE * (long long)Nmod; do_div(Kpart, source); K = Kpart & 0xFFFFFFFF; /* Check if we need to round */ if ((K % 10) >= 5) K += 5; /* Move down to proper range now rounding is done */ K /= 10; pll_div->k = K; } static int wm8753_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id, unsigned int freq_in, unsigned int freq_out) { u16 reg, enable; int offset; struct snd_soc_codec *codec = codec_dai->codec; if (pll_id < WM8753_PLL1 || pll_id > WM8753_PLL2) return -ENODEV; if (pll_id == WM8753_PLL1) { offset = 0; enable = 0x10; reg = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0xffef; } else { offset = 4; enable = 0x8; reg = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0xfff7; } if (!freq_in || !freq_out) { /* disable PLL */ wm8753_write(codec, WM8753_PLL1CTL1 + offset, 0x0026); wm8753_write(codec, WM8753_CLOCK, reg); return 0; } else { u16 value = 0; struct _pll_div pll_div; pll_factors(&pll_div, freq_out * 8, freq_in); /* set up N and K PLL divisor ratios */ /* bits 8:5 = PLL_N, bits 3:0 = PLL_K[21:18] */ value = (pll_div.n << 5) + ((pll_div.k & 0x3c0000) >> 18); wm8753_write(codec, WM8753_PLL1CTL2 + offset, value); /* bits 8:0 = PLL_K[17:9] */ value = (pll_div.k & 0x03fe00) >> 9; wm8753_write(codec, WM8753_PLL1CTL3 + offset, value); /* bits 8:0 = PLL_K[8:0] */ value = pll_div.k & 0x0001ff; wm8753_write(codec, WM8753_PLL1CTL4 + offset, value); /* set PLL as input and enable */ wm8753_write(codec, WM8753_PLL1CTL1 + offset, 0x0027 | (pll_div.div2 << 3)); wm8753_write(codec, WM8753_CLOCK, reg | enable); } return 0; } struct _coeff_div { u32 mclk; u32 rate; u8 sr:5; u8 usb:1; }; /* codec hifi mclk (after PLL) clock divider coefficients */ static const struct _coeff_div coeff_div[] = { /* 8k */ {12288000, 8000, 0x6, 0x0}, {11289600, 8000, 0x16, 0x0}, {18432000, 8000, 0x7, 0x0}, {16934400, 8000, 0x17, 0x0}, {12000000, 8000, 0x6, 0x1}, /* 11.025k */ {11289600, 11025, 0x18, 0x0}, {16934400, 11025, 0x19, 0x0}, {12000000, 11025, 0x19, 0x1}, /* 16k */ {12288000, 16000, 0xa, 0x0}, {18432000, 16000, 0xb, 0x0}, {12000000, 16000, 0xa, 0x1}, /* 22.05k */ {11289600, 22050, 0x1a, 0x0}, {16934400, 22050, 0x1b, 0x0}, {12000000, 22050, 0x1b, 0x1}, /* 32k */ {12288000, 32000, 0xc, 0x0}, {18432000, 32000, 0xd, 0x0}, {12000000, 32000, 0xa, 0x1}, /* 44.1k */ {11289600, 44100, 0x10, 0x0}, {16934400, 44100, 0x11, 0x0}, {12000000, 44100, 0x11, 0x1}, /* 48k */ {12288000, 48000, 0x0, 0x0}, {18432000, 48000, 0x1, 0x0}, {12000000, 48000, 0x0, 0x1}, /* 88.2k */ {11289600, 88200, 0x1e, 0x0}, {16934400, 88200, 0x1f, 0x0}, {12000000, 88200, 0x1f, 0x1}, /* 96k */ {12288000, 96000, 0xe, 0x0}, {18432000, 96000, 0xf, 0x0}, {12000000, 96000, 0xe, 0x1}, }; static int get_coeff(int mclk, int rate) { int i; for (i = 0; i < ARRAY_SIZE(coeff_div); i++) { if (coeff_div[i].rate == rate && coeff_div[i].mclk == mclk) return i; } return -EINVAL; } /* * Clock after PLL and dividers */ static int wm8753_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8753_priv *wm8753 = codec->private_data; switch (freq) { case 11289600: case 12000000: case 12288000: case 16934400: case 18432000: if (clk_id == WM8753_MCLK) { wm8753->sysclk = freq; return 0; } else if (clk_id == WM8753_PCMCLK) { wm8753->pcmclk = freq; return 0; } break; } return -EINVAL; } /* * Set's ADC and Voice DAC format. */ static int wm8753_vdac_adc_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 voice = wm8753_read_reg_cache(codec, WM8753_PCM) & 0x01ec; /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: voice |= 0x0002; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: voice |= 0x0001; break; case SND_SOC_DAIFMT_DSP_A: voice |= 0x0003; break; case SND_SOC_DAIFMT_DSP_B: voice |= 0x0013; break; default: return -EINVAL; } wm8753_write(codec, WM8753_PCM, voice); return 0; } /* * Set PCM DAI bit size and sample rate. */ static int wm8753_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_device *socdev = rtd->socdev; struct snd_soc_codec *codec = socdev->card->codec; struct wm8753_priv *wm8753 = codec->private_data; u16 voice = wm8753_read_reg_cache(codec, WM8753_PCM) & 0x01f3; u16 srate = wm8753_read_reg_cache(codec, WM8753_SRATE1) & 0x017f; /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: voice |= 0x0004; break; case SNDRV_PCM_FORMAT_S24_LE: voice |= 0x0008; break; case SNDRV_PCM_FORMAT_S32_LE: voice |= 0x000c; break; } /* sample rate */ if (params_rate(params) * 384 == wm8753->pcmclk) srate |= 0x80; wm8753_write(codec, WM8753_SRATE1, srate); wm8753_write(codec, WM8753_PCM, voice); return 0; } /* * Set's PCM dai fmt and BCLK. */ static int wm8753_pcm_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 voice, ioctl; voice = wm8753_read_reg_cache(codec, WM8753_PCM) & 0x011f; ioctl = wm8753_read_reg_cache(codec, WM8753_IOCTL) & 0x015d; /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: break; case SND_SOC_DAIFMT_CBM_CFM: ioctl |= 0x2; case SND_SOC_DAIFMT_CBM_CFS: voice |= 0x0040; break; default: return -EINVAL; } /* clock inversion */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: case SND_SOC_DAIFMT_DSP_B: /* frame inversion not valid for DSP modes */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_NF: voice |= 0x0080; break; default: return -EINVAL; } break; case SND_SOC_DAIFMT_I2S: case SND_SOC_DAIFMT_RIGHT_J: case SND_SOC_DAIFMT_LEFT_J: voice &= ~0x0010; switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: voice |= 0x0090; break; case SND_SOC_DAIFMT_IB_NF: voice |= 0x0080; break; case SND_SOC_DAIFMT_NB_IF: voice |= 0x0010; break; default: return -EINVAL; } break; default: return -EINVAL; } wm8753_write(codec, WM8753_PCM, voice); wm8753_write(codec, WM8753_IOCTL, ioctl); return 0; } static int wm8753_set_dai_clkdiv(struct snd_soc_dai *codec_dai, int div_id, int div) { struct snd_soc_codec *codec = codec_dai->codec; u16 reg; switch (div_id) { case WM8753_PCMDIV: reg = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0x003f; wm8753_write(codec, WM8753_CLOCK, reg | div); break; case WM8753_BCLKDIV: reg = wm8753_read_reg_cache(codec, WM8753_SRATE2) & 0x01c7; wm8753_write(codec, WM8753_SRATE2, reg | div); break; case WM8753_VXCLKDIV: reg = wm8753_read_reg_cache(codec, WM8753_SRATE2) & 0x003f; wm8753_write(codec, WM8753_SRATE2, reg | div); break; default: return -EINVAL; } return 0; } /* * Set's HiFi DAC format. */ static int wm8753_hdac_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 hifi = wm8753_read_reg_cache(codec, WM8753_HIFI) & 0x01e0; /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: hifi |= 0x0002; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: hifi |= 0x0001; break; case SND_SOC_DAIFMT_DSP_A: hifi |= 0x0003; break; case SND_SOC_DAIFMT_DSP_B: hifi |= 0x0013; break; default: return -EINVAL; } wm8753_write(codec, WM8753_HIFI, hifi); return 0; } /* * Set's I2S DAI format. */ static int wm8753_i2s_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 ioctl, hifi; hifi = wm8753_read_reg_cache(codec, WM8753_HIFI) & 0x011f; ioctl = wm8753_read_reg_cache(codec, WM8753_IOCTL) & 0x00ae; /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: break; case SND_SOC_DAIFMT_CBM_CFM: ioctl |= 0x1; case SND_SOC_DAIFMT_CBM_CFS: hifi |= 0x0040; break; default: return -EINVAL; } /* clock inversion */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: case SND_SOC_DAIFMT_DSP_B: /* frame inversion not valid for DSP modes */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_NF: hifi |= 0x0080; break; default: return -EINVAL; } break; case SND_SOC_DAIFMT_I2S: case SND_SOC_DAIFMT_RIGHT_J: case SND_SOC_DAIFMT_LEFT_J: hifi &= ~0x0010; switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: hifi |= 0x0090; break; case SND_SOC_DAIFMT_IB_NF: hifi |= 0x0080; break; case SND_SOC_DAIFMT_NB_IF: hifi |= 0x0010; break; default: return -EINVAL; } break; default: return -EINVAL; } wm8753_write(codec, WM8753_HIFI, hifi); wm8753_write(codec, WM8753_IOCTL, ioctl); return 0; } /* * Set PCM DAI bit size and sample rate. */ static int wm8753_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_device *socdev = rtd->socdev; struct snd_soc_codec *codec = socdev->card->codec; struct wm8753_priv *wm8753 = codec->private_data; u16 srate = wm8753_read_reg_cache(codec, WM8753_SRATE1) & 0x01c0; u16 hifi = wm8753_read_reg_cache(codec, WM8753_HIFI) & 0x01f3; int coeff; /* is digital filter coefficient valid ? */ coeff = get_coeff(wm8753->sysclk, params_rate(params)); if (coeff < 0) { printk(KERN_ERR "wm8753 invalid MCLK or rate\n"); return coeff; } wm8753_write(codec, WM8753_SRATE1, srate | (coeff_div[coeff].sr << 1) | coeff_div[coeff].usb); /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: hifi |= 0x0004; break; case SNDRV_PCM_FORMAT_S24_LE: hifi |= 0x0008; break; case SNDRV_PCM_FORMAT_S32_LE: hifi |= 0x000c; break; } wm8753_write(codec, WM8753_HIFI, hifi); return 0; } static int wm8753_mode1v_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 clock; /* set clk source as pcmclk */ clock = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0xfffb; wm8753_write(codec, WM8753_CLOCK, clock); if (wm8753_vdac_adc_set_dai_fmt(codec_dai, fmt) < 0) return -EINVAL; return wm8753_pcm_set_dai_fmt(codec_dai, fmt); } static int wm8753_mode1h_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { if (wm8753_hdac_set_dai_fmt(codec_dai, fmt) < 0) return -EINVAL; return wm8753_i2s_set_dai_fmt(codec_dai, fmt); } static int wm8753_mode2_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 clock; /* set clk source as pcmclk */ clock = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0xfffb; wm8753_write(codec, WM8753_CLOCK, clock); if (wm8753_vdac_adc_set_dai_fmt(codec_dai, fmt) < 0) return -EINVAL; return wm8753_i2s_set_dai_fmt(codec_dai, fmt); } static int wm8753_mode3_4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 clock; /* set clk source as mclk */ clock = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0xfffb; wm8753_write(codec, WM8753_CLOCK, clock | 0x4); if (wm8753_hdac_set_dai_fmt(codec_dai, fmt) < 0) return -EINVAL; if (wm8753_vdac_adc_set_dai_fmt(codec_dai, fmt) < 0) return -EINVAL; return wm8753_i2s_set_dai_fmt(codec_dai, fmt); } static int wm8753_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; u16 mute_reg = wm8753_read_reg_cache(codec, WM8753_DAC) & 0xfff7; /* the digital mute covers the HiFi and Voice DAC's on the WM8753. * make sure we check if they are not both active when we mute */ if (mute && dai->id == 1) { if (!wm8753_dai[WM8753_DAI_VOICE].playback.active || !wm8753_dai[WM8753_DAI_HIFI].playback.active) wm8753_write(codec, WM8753_DAC, mute_reg | 0x8); } else { if (mute) wm8753_write(codec, WM8753_DAC, mute_reg | 0x8); else wm8753_write(codec, WM8753_DAC, mute_reg); } return 0; } static int wm8753_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { u16 pwr_reg = wm8753_read_reg_cache(codec, WM8753_PWR1) & 0xfe3e; switch (level) { case SND_SOC_BIAS_ON: /* set vmid to 50k and unmute dac */ wm8753_write(codec, WM8753_PWR1, pwr_reg | 0x00c0); break; case SND_SOC_BIAS_PREPARE: /* set vmid to 5k for quick power up */ wm8753_write(codec, WM8753_PWR1, pwr_reg | 0x01c1); break; case SND_SOC_BIAS_STANDBY: /* mute dac and set vmid to 500k, enable VREF */ wm8753_write(codec, WM8753_PWR1, pwr_reg | 0x0141); break; case SND_SOC_BIAS_OFF: wm8753_write(codec, WM8753_PWR1, 0x0001); break; } codec->bias_level = level; return 0; } #define WM8753_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |\ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000) #define WM8753_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE) /* * The WM8753 supports upto 4 different and mutually exclusive DAI * configurations. This gives 2 PCM's available for use, hifi and voice. * NOTE: The Voice PCM cannot play or capture audio to the CPU as it's DAI * is connected between the wm8753 and a BT codec or GSM modem. * * 1. Voice over PCM DAI - HIFI DAC over HIFI DAI * 2. Voice over HIFI DAI - HIFI disabled * 3. Voice disabled - HIFI over HIFI * 4. Voice disabled - HIFI over HIFI, uses voice DAI LRC for capture */ static struct snd_soc_dai_ops wm8753_dai_ops_hifi_mode1 = { .hw_params = wm8753_i2s_hw_params, .digital_mute = wm8753_mute, .set_fmt = wm8753_mode1h_set_dai_fmt, .set_clkdiv = wm8753_set_dai_clkdiv, .set_pll = wm8753_set_dai_pll, .set_sysclk = wm8753_set_dai_sysclk, }; static struct snd_soc_dai_ops wm8753_dai_ops_voice_mode1 = { .hw_params = wm8753_pcm_hw_params, .digital_mute = wm8753_mute, .set_fmt = wm8753_mode1v_set_dai_fmt, .set_clkdiv = wm8753_set_dai_clkdiv, .set_pll = wm8753_set_dai_pll, .set_sysclk = wm8753_set_dai_sysclk, }; static struct snd_soc_dai_ops wm8753_dai_ops_voice_mode2 = { .hw_params = wm8753_pcm_hw_params, .digital_mute = wm8753_mute, .set_fmt = wm8753_mode2_set_dai_fmt, .set_clkdiv = wm8753_set_dai_clkdiv, .set_pll = wm8753_set_dai_pll, .set_sysclk = wm8753_set_dai_sysclk, }; static struct snd_soc_dai_ops wm8753_dai_ops_hifi_mode3 = { .hw_params = wm8753_i2s_hw_params, .digital_mute = wm8753_mute, .set_fmt = wm8753_mode3_4_set_dai_fmt, .set_clkdiv = wm8753_set_dai_clkdiv, .set_pll = wm8753_set_dai_pll, .set_sysclk = wm8753_set_dai_sysclk, }; static struct snd_soc_dai_ops wm8753_dai_ops_hifi_mode4 = { .hw_params = wm8753_i2s_hw_params, .digital_mute = wm8753_mute, .set_fmt = wm8753_mode3_4_set_dai_fmt, .set_clkdiv = wm8753_set_dai_clkdiv, .set_pll = wm8753_set_dai_pll, .set_sysclk = wm8753_set_dai_sysclk, }; static const struct snd_soc_dai wm8753_all_dai[] = { /* DAI HiFi mode 1 */ { .name = "WM8753 HiFi", .id = 1, .playback = { .stream_name = "HiFi Playback", .channels_min = 1, .channels_max = 2, .rates = WM8753_RATES, .formats = WM8753_FORMATS}, .capture = { /* dummy for fast DAI switching */ .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8753_RATES, .formats = WM8753_FORMATS}, .ops = &wm8753_dai_ops_hifi_mode1, }, /* DAI Voice mode 1 */ { .name = "WM8753 Voice", .id = 1, .playback = { .stream_name = "Voice Playback", .channels_min = 1, .channels_max = 1, .rates = WM8753_RATES, .formats = WM8753_FORMATS,}, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8753_RATES, .formats = WM8753_FORMATS,}, .ops = &wm8753_dai_ops_voice_mode1, }, /* DAI HiFi mode 2 - dummy */ { .name = "WM8753 HiFi", .id = 2, }, /* DAI Voice mode 2 */ { .name = "WM8753 Voice", .id = 2, .playback = { .stream_name = "Voice Playback", .channels_min = 1, .channels_max = 1, .rates = WM8753_RATES, .formats = WM8753_FORMATS,}, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8753_RATES, .formats = WM8753_FORMATS,}, .ops = &wm8753_dai_ops_voice_mode2, }, /* DAI HiFi mode 3 */ { .name = "WM8753 HiFi", .id = 3, .playback = { .stream_name = "HiFi Playback", .channels_min = 1, .channels_max = 2, .rates = WM8753_RATES, .formats = WM8753_FORMATS,}, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8753_RATES, .formats = WM8753_FORMATS,}, .ops = &wm8753_dai_ops_hifi_mode3, }, /* DAI Voice mode 3 - dummy */ { .name = "WM8753 Voice", .id = 3, }, /* DAI HiFi mode 4 */ { .name = "WM8753 HiFi", .id = 4, .playback = { .stream_name = "HiFi Playback", .channels_min = 1, .channels_max = 2, .rates = WM8753_RATES, .formats = WM8753_FORMATS,}, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8753_RATES, .formats = WM8753_FORMATS,}, .ops = &wm8753_dai_ops_hifi_mode4, }, /* DAI Voice mode 4 - dummy */ { .name = "WM8753 Voice", .id = 4, }, }; struct snd_soc_dai wm8753_dai[] = { { .name = "WM8753 DAI 0", }, { .name = "WM8753 DAI 1", }, }; EXPORT_SYMBOL_GPL(wm8753_dai); static void wm8753_set_dai_mode(struct snd_soc_codec *codec, unsigned int mode) { if (mode < 4) { int playback_active, capture_active, codec_active, pop_wait; void *private_data; struct list_head list; playback_active = wm8753_dai[0].playback.active; capture_active = wm8753_dai[0].capture.active; codec_active = wm8753_dai[0].active; private_data = wm8753_dai[0].private_data; pop_wait = wm8753_dai[0].pop_wait; list = wm8753_dai[0].list; wm8753_dai[0] = wm8753_all_dai[mode << 1]; wm8753_dai[0].playback.active = playback_active; wm8753_dai[0].capture.active = capture_active; wm8753_dai[0].active = codec_active; wm8753_dai[0].private_data = private_data; wm8753_dai[0].pop_wait = pop_wait; wm8753_dai[0].list = list; playback_active = wm8753_dai[1].playback.active; capture_active = wm8753_dai[1].capture.active; codec_active = wm8753_dai[1].active; private_data = wm8753_dai[1].private_data; pop_wait = wm8753_dai[1].pop_wait; list = wm8753_dai[1].list; wm8753_dai[1] = wm8753_all_dai[(mode << 1) + 1]; wm8753_dai[1].playback.active = playback_active; wm8753_dai[1].capture.active = capture_active; wm8753_dai[1].active = codec_active; wm8753_dai[1].private_data = private_data; wm8753_dai[1].pop_wait = pop_wait; wm8753_dai[1].list = list; } wm8753_dai[0].codec = codec; wm8753_dai[1].codec = codec; } static void wm8753_work(struct work_struct *work) { struct snd_soc_codec *codec = container_of(work, struct snd_soc_codec, delayed_work.work); wm8753_set_bias_level(codec, codec->bias_level); } static int wm8753_suspend(struct platform_device *pdev, pm_message_t state) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_codec *codec = socdev->card->codec; /* we only need to suspend if we are a valid card */ if (!codec->card) return 0; wm8753_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int wm8753_resume(struct platform_device *pdev) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_codec *codec = socdev->card->codec; int i; u8 data[2]; u16 *cache = codec->reg_cache; /* we only need to resume if we are a valid card */ if (!codec->card) return 0; /* Sync reg_cache with the hardware */ for (i = 0; i < ARRAY_SIZE(wm8753_reg); i++) { if (i + 1 == WM8753_RESET) continue; /* No point in writing hardware default values back */ if (cache[i] == wm8753_reg[i]) continue; data[0] = ((i + 1) << 1) | ((cache[i] >> 8) & 0x0001); data[1] = cache[i] & 0x00ff; codec->hw_write(codec->control_data, data, 2); } wm8753_set_bias_level(codec, SND_SOC_BIAS_STANDBY); /* charge wm8753 caps */ if (codec->suspend_bias_level == SND_SOC_BIAS_ON) { wm8753_set_bias_level(codec, SND_SOC_BIAS_PREPARE); codec->bias_level = SND_SOC_BIAS_ON; schedule_delayed_work(&codec->delayed_work, msecs_to_jiffies(caps_charge)); } return 0; } static struct snd_soc_codec *wm8753_codec; static int wm8753_probe(struct platform_device *pdev) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_codec *codec; int ret = 0; if (!wm8753_codec) { dev_err(&pdev->dev, "WM8753 codec not yet registered\n"); return -EINVAL; } socdev->card->codec = wm8753_codec; codec = wm8753_codec; wm8753_set_dai_mode(codec, 0); /* register pcms */ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); if (ret < 0) { printk(KERN_ERR "wm8753: failed to create pcms\n"); goto pcm_err; } snd_soc_add_controls(codec, wm8753_snd_controls, ARRAY_SIZE(wm8753_snd_controls)); wm8753_add_widgets(codec); ret = snd_soc_init_card(socdev); if (ret < 0) { printk(KERN_ERR "wm8753: failed to register card\n"); goto card_err; } return 0; card_err: snd_soc_free_pcms(socdev); snd_soc_dapm_free(socdev); pcm_err: return ret; } /* * This function forces any delayed work to be queued and run. */ static int run_delayed_work(struct delayed_work *dwork) { int ret; /* cancel any work waiting to be queued. */ ret = cancel_delayed_work(dwork); /* if there was any work waiting then we run it now and * wait for it's completion */ if (ret) { schedule_delayed_work(dwork, 0); flush_scheduled_work(); } return ret; } /* power down chip */ static int wm8753_remove(struct platform_device *pdev) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); snd_soc_free_pcms(socdev); snd_soc_dapm_free(socdev); return 0; } struct snd_soc_codec_device soc_codec_dev_wm8753 = { .probe = wm8753_probe, .remove = wm8753_remove, .suspend = wm8753_suspend, .resume = wm8753_resume, }; EXPORT_SYMBOL_GPL(soc_codec_dev_wm8753); static int wm8753_register(struct wm8753_priv *wm8753) { int ret, i; struct snd_soc_codec *codec = &wm8753->codec; u16 reg; if (wm8753_codec) { dev_err(codec->dev, "Multiple WM8753 devices not supported\n"); ret = -EINVAL; goto err; } mutex_init(&codec->mutex); INIT_LIST_HEAD(&codec->dapm_widgets); INIT_LIST_HEAD(&codec->dapm_paths); codec->name = "WM8753"; codec->owner = THIS_MODULE; codec->read = wm8753_read_reg_cache; codec->write = wm8753_write; codec->bias_level = SND_SOC_BIAS_STANDBY; codec->set_bias_level = wm8753_set_bias_level; codec->dai = wm8753_dai; codec->num_dai = 2; codec->reg_cache_size = ARRAY_SIZE(wm8753->reg_cache) + 1; codec->reg_cache = &wm8753->reg_cache; codec->private_data = wm8753; memcpy(codec->reg_cache, wm8753_reg, sizeof(wm8753->reg_cache)); INIT_DELAYED_WORK(&codec->delayed_work, wm8753_work); ret = wm8753_reset(codec); if (ret < 0) { dev_err(codec->dev, "Failed to issue reset\n"); goto err; } /* charge output caps */ wm8753_set_bias_level(codec, SND_SOC_BIAS_PREPARE); schedule_delayed_work(&codec->delayed_work, msecs_to_jiffies(caps_charge)); /* set the update bits */ reg = wm8753_read_reg_cache(codec, WM8753_LDAC); wm8753_write(codec, WM8753_LDAC, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_RDAC); wm8753_write(codec, WM8753_RDAC, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_LADC); wm8753_write(codec, WM8753_LADC, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_RADC); wm8753_write(codec, WM8753_RADC, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_LOUT1V); wm8753_write(codec, WM8753_LOUT1V, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_ROUT1V); wm8753_write(codec, WM8753_ROUT1V, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_LOUT2V); wm8753_write(codec, WM8753_LOUT2V, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_ROUT2V); wm8753_write(codec, WM8753_ROUT2V, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_LINVOL); wm8753_write(codec, WM8753_LINVOL, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_RINVOL); wm8753_write(codec, WM8753_RINVOL, reg | 0x0100); wm8753_codec = codec; for (i = 0; i < ARRAY_SIZE(wm8753_dai); i++) wm8753_dai[i].dev = codec->dev; ret = snd_soc_register_codec(codec); if (ret != 0) { dev_err(codec->dev, "Failed to register codec: %d\n", ret); goto err; } ret = snd_soc_register_dais(&wm8753_dai[0], ARRAY_SIZE(wm8753_dai)); if (ret != 0) { dev_err(codec->dev, "Failed to register DAIs: %d\n", ret); goto err_codec; } return 0; err_codec: run_delayed_work(&codec->delayed_work); snd_soc_unregister_codec(codec); err: kfree(wm8753); return ret; } static void wm8753_unregister(struct wm8753_priv *wm8753) { wm8753_set_bias_level(&wm8753->codec, SND_SOC_BIAS_OFF); run_delayed_work(&wm8753->codec.delayed_work); snd_soc_unregister_dais(&wm8753_dai[0], ARRAY_SIZE(wm8753_dai)); snd_soc_unregister_codec(&wm8753->codec); kfree(wm8753); wm8753_codec = NULL; } #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) static int wm8753_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct snd_soc_codec *codec; struct wm8753_priv *wm8753; wm8753 = kzalloc(sizeof(struct wm8753_priv), GFP_KERNEL); if (wm8753 == NULL) return -ENOMEM; codec = &wm8753->codec; codec->hw_write = (hw_write_t)i2c_master_send; codec->control_data = i2c; i2c_set_clientdata(i2c, wm8753); codec->dev = &i2c->dev; return wm8753_register(wm8753); } static int wm8753_i2c_remove(struct i2c_client *client) { struct wm8753_priv *wm8753 = i2c_get_clientdata(client); wm8753_unregister(wm8753); return 0; } #ifdef CONFIG_PM static int wm8753_i2c_suspend(struct i2c_client *client, pm_message_t msg) { return snd_soc_suspend_device(&client->dev); } static int wm8753_i2c_resume(struct i2c_client *client) { return snd_soc_resume_device(&client->dev); } #else #define wm8753_i2c_suspend NULL #define wm8753_i2c_resume NULL #endif static const struct i2c_device_id wm8753_i2c_id[] = { { "wm8753", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8753_i2c_id); static struct i2c_driver wm8753_i2c_driver = { .driver = { .name = "wm8753", .owner = THIS_MODULE, }, .probe = wm8753_i2c_probe, .remove = wm8753_i2c_remove, .suspend = wm8753_i2c_suspend, .resume = wm8753_i2c_resume, .id_table = wm8753_i2c_id, }; #endif #if defined(CONFIG_SPI_MASTER) static int wm8753_spi_write(struct spi_device *spi, const char *data, int len) { struct spi_transfer t; struct spi_message m; u8 msg[2]; if (len <= 0) return 0; msg[0] = data[0]; msg[1] = data[1]; spi_message_init(&m); memset(&t, 0, (sizeof t)); t.tx_buf = &msg[0]; t.len = len; spi_message_add_tail(&t, &m); spi_sync(spi, &m); return len; } static int __devinit wm8753_spi_probe(struct spi_device *spi) { struct snd_soc_codec *codec; struct wm8753_priv *wm8753; wm8753 = kzalloc(sizeof(struct wm8753_priv), GFP_KERNEL); if (wm8753 == NULL) return -ENOMEM; codec = &wm8753->codec; codec->control_data = spi; codec->hw_write = (hw_write_t)wm8753_spi_write; codec->dev = &spi->dev; dev_set_drvdata(&spi->dev, wm8753); return wm8753_register(wm8753); } static int __devexit wm8753_spi_remove(struct spi_device *spi) { struct wm8753_priv *wm8753 = dev_get_drvdata(&spi->dev); wm8753_unregister(wm8753); return 0; } #ifdef CONFIG_PM static int wm8753_spi_suspend(struct spi_device *spi, pm_message_t msg) { return snd_soc_suspend_device(&spi->dev); } static int wm8753_spi_resume(struct spi_device *spi) { return snd_soc_resume_device(&spi->dev); } #else #define wm8753_spi_suspend NULL #define wm8753_spi_resume NULL #endif static struct spi_driver wm8753_spi_driver = { .driver = { .name = "wm8753", .bus = &spi_bus_type, .owner = THIS_MODULE, }, .probe = wm8753_spi_probe, .remove = __devexit_p(wm8753_spi_remove), .suspend = wm8753_spi_suspend, .resume = wm8753_spi_resume, }; #endif static int __init wm8753_modinit(void) { int ret; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) ret = i2c_add_driver(&wm8753_i2c_driver); if (ret != 0) pr_err("Failed to register WM8753 I2C driver: %d\n", ret); #endif #if defined(CONFIG_SPI_MASTER) ret = spi_register_driver(&wm8753_spi_driver); if (ret != 0) pr_err("Failed to register WM8753 SPI driver: %d\n", ret); #endif return 0; } module_init(wm8753_modinit); static void __exit wm8753_exit(void) { #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) i2c_del_driver(&wm8753_i2c_driver); #endif #if defined(CONFIG_SPI_MASTER) spi_unregister_driver(&wm8753_spi_driver); #endif } module_exit(wm8753_exit); MODULE_DESCRIPTION("ASoC WM8753 driver"); MODULE_AUTHOR("Liam Girdwood"); MODULE_LICENSE("GPL");
gpl-2.0
NamJa/surface3-kernel
drivers/usb/phy/phy-samsung-usb2.c
507
14094
/* linux/drivers/usb/phy/phy-samsung-usb2.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Author: Praveen Paneri <p.paneri@samsung.com> * * Samsung USB2.0 PHY transceiver; talks to S3C HS OTG controller, EHCI-S5P and * OHCI-EXYNOS controllers. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/err.h> #include <linux/io.h> #include <linux/of.h> #include <linux/usb/otg.h> #include <linux/usb/samsung_usb_phy.h> #include <linux/platform_data/samsung-usbphy.h> #include "phy-samsung-usb.h" static int samsung_usbphy_set_host(struct usb_otg *otg, struct usb_bus *host) { if (!otg) return -ENODEV; if (!otg->host) otg->host = host; return 0; } static bool exynos5_phyhost_is_on(void __iomem *regs) { u32 reg; reg = readl(regs + EXYNOS5_PHY_HOST_CTRL0); return !(reg & HOST_CTRL0_SIDDQ); } static void samsung_exynos5_usb2phy_enable(struct samsung_usbphy *sphy) { void __iomem *regs = sphy->regs; u32 phyclk = sphy->ref_clk_freq; u32 phyhost; u32 phyotg; u32 phyhsic; u32 ehcictrl; u32 ohcictrl; /* * phy_usage helps in keeping usage count for phy * so that the first consumer enabling the phy is also * the last consumer to disable it. */ atomic_inc(&sphy->phy_usage); if (exynos5_phyhost_is_on(regs)) { dev_info(sphy->dev, "Already power on PHY\n"); return; } /* Host configuration */ phyhost = readl(regs + EXYNOS5_PHY_HOST_CTRL0); /* phy reference clock configuration */ phyhost &= ~HOST_CTRL0_FSEL_MASK; phyhost |= HOST_CTRL0_FSEL(phyclk); /* host phy reset */ phyhost &= ~(HOST_CTRL0_PHYSWRST | HOST_CTRL0_PHYSWRSTALL | HOST_CTRL0_SIDDQ | /* Enable normal mode of operation */ HOST_CTRL0_FORCESUSPEND | HOST_CTRL0_FORCESLEEP); /* Link reset */ phyhost |= (HOST_CTRL0_LINKSWRST | HOST_CTRL0_UTMISWRST | /* COMMON Block configuration during suspend */ HOST_CTRL0_COMMONON_N); writel(phyhost, regs + EXYNOS5_PHY_HOST_CTRL0); udelay(10); phyhost &= ~(HOST_CTRL0_LINKSWRST | HOST_CTRL0_UTMISWRST); writel(phyhost, regs + EXYNOS5_PHY_HOST_CTRL0); /* OTG configuration */ phyotg = readl(regs + EXYNOS5_PHY_OTG_SYS); /* phy reference clock configuration */ phyotg &= ~OTG_SYS_FSEL_MASK; phyotg |= OTG_SYS_FSEL(phyclk); /* Enable normal mode of operation */ phyotg &= ~(OTG_SYS_FORCESUSPEND | OTG_SYS_SIDDQ_UOTG | OTG_SYS_FORCESLEEP | OTG_SYS_REFCLKSEL_MASK | /* COMMON Block configuration during suspend */ OTG_SYS_COMMON_ON); /* OTG phy & link reset */ phyotg |= (OTG_SYS_PHY0_SWRST | OTG_SYS_LINKSWRST_UOTG | OTG_SYS_PHYLINK_SWRESET | OTG_SYS_OTGDISABLE | /* Set phy refclk */ OTG_SYS_REFCLKSEL_CLKCORE); writel(phyotg, regs + EXYNOS5_PHY_OTG_SYS); udelay(10); phyotg &= ~(OTG_SYS_PHY0_SWRST | OTG_SYS_LINKSWRST_UOTG | OTG_SYS_PHYLINK_SWRESET); writel(phyotg, regs + EXYNOS5_PHY_OTG_SYS); /* HSIC phy configuration */ phyhsic = (HSIC_CTRL_REFCLKDIV_12 | HSIC_CTRL_REFCLKSEL | HSIC_CTRL_PHYSWRST); writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL1); writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL2); udelay(10); phyhsic &= ~HSIC_CTRL_PHYSWRST; writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL1); writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL2); udelay(80); /* enable EHCI DMA burst */ ehcictrl = readl(regs + EXYNOS5_PHY_HOST_EHCICTRL); ehcictrl |= (HOST_EHCICTRL_ENAINCRXALIGN | HOST_EHCICTRL_ENAINCR4 | HOST_EHCICTRL_ENAINCR8 | HOST_EHCICTRL_ENAINCR16); writel(ehcictrl, regs + EXYNOS5_PHY_HOST_EHCICTRL); /* set ohci_suspend_on_n */ ohcictrl = readl(regs + EXYNOS5_PHY_HOST_OHCICTRL); ohcictrl |= HOST_OHCICTRL_SUSPLGCY; writel(ohcictrl, regs + EXYNOS5_PHY_HOST_OHCICTRL); } static void samsung_usb2phy_enable(struct samsung_usbphy *sphy) { void __iomem *regs = sphy->regs; u32 phypwr; u32 phyclk; u32 rstcon; /* set clock frequency for PLL */ phyclk = sphy->ref_clk_freq; phypwr = readl(regs + SAMSUNG_PHYPWR); rstcon = readl(regs + SAMSUNG_RSTCON); switch (sphy->drv_data->cpu_type) { case TYPE_S3C64XX: phyclk &= ~PHYCLK_COMMON_ON_N; phypwr &= ~PHYPWR_NORMAL_MASK; rstcon |= RSTCON_SWRST; break; case TYPE_EXYNOS4X12: phypwr &= ~(PHYPWR_NORMAL_MASK_HSIC0 | PHYPWR_NORMAL_MASK_HSIC1 | PHYPWR_NORMAL_MASK_PHY1); rstcon |= RSTCON_HOSTPHY_SWRST; case TYPE_EXYNOS4210: phypwr &= ~PHYPWR_NORMAL_MASK_PHY0; rstcon |= RSTCON_SWRST; default: break; } writel(phyclk, regs + SAMSUNG_PHYCLK); /* Configure PHY0 for normal operation*/ writel(phypwr, regs + SAMSUNG_PHYPWR); /* reset all ports of PHY and Link */ writel(rstcon, regs + SAMSUNG_RSTCON); udelay(10); if (sphy->drv_data->cpu_type == TYPE_EXYNOS4X12) rstcon &= ~RSTCON_HOSTPHY_SWRST; rstcon &= ~RSTCON_SWRST; writel(rstcon, regs + SAMSUNG_RSTCON); } static void samsung_exynos5_usb2phy_disable(struct samsung_usbphy *sphy) { void __iomem *regs = sphy->regs; u32 phyhost; u32 phyotg; u32 phyhsic; if (atomic_dec_return(&sphy->phy_usage) > 0) { dev_info(sphy->dev, "still being used\n"); return; } phyhsic = (HSIC_CTRL_REFCLKDIV_12 | HSIC_CTRL_REFCLKSEL | HSIC_CTRL_SIDDQ | HSIC_CTRL_FORCESLEEP | HSIC_CTRL_FORCESUSPEND); writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL1); writel(phyhsic, regs + EXYNOS5_PHY_HSIC_CTRL2); phyhost = readl(regs + EXYNOS5_PHY_HOST_CTRL0); phyhost |= (HOST_CTRL0_SIDDQ | HOST_CTRL0_FORCESUSPEND | HOST_CTRL0_FORCESLEEP | HOST_CTRL0_PHYSWRST | HOST_CTRL0_PHYSWRSTALL); writel(phyhost, regs + EXYNOS5_PHY_HOST_CTRL0); phyotg = readl(regs + EXYNOS5_PHY_OTG_SYS); phyotg |= (OTG_SYS_FORCESUSPEND | OTG_SYS_SIDDQ_UOTG | OTG_SYS_FORCESLEEP); writel(phyotg, regs + EXYNOS5_PHY_OTG_SYS); } static void samsung_usb2phy_disable(struct samsung_usbphy *sphy) { void __iomem *regs = sphy->regs; u32 phypwr; phypwr = readl(regs + SAMSUNG_PHYPWR); switch (sphy->drv_data->cpu_type) { case TYPE_S3C64XX: phypwr |= PHYPWR_NORMAL_MASK; break; case TYPE_EXYNOS4X12: phypwr |= (PHYPWR_NORMAL_MASK_HSIC0 | PHYPWR_NORMAL_MASK_HSIC1 | PHYPWR_NORMAL_MASK_PHY1); case TYPE_EXYNOS4210: phypwr |= PHYPWR_NORMAL_MASK_PHY0; default: break; } /* Disable analog and otg block power */ writel(phypwr, regs + SAMSUNG_PHYPWR); } /* * The function passed to the usb driver for phy initialization */ static int samsung_usb2phy_init(struct usb_phy *phy) { struct samsung_usbphy *sphy; struct usb_bus *host = NULL; unsigned long flags; int ret = 0; sphy = phy_to_sphy(phy); host = phy->otg->host; /* Enable the phy clock */ ret = clk_prepare_enable(sphy->clk); if (ret) { dev_err(sphy->dev, "%s: clk_prepare_enable failed\n", __func__); return ret; } spin_lock_irqsave(&sphy->lock, flags); if (host) { /* setting default phy-type for USB 2.0 */ if (!strstr(dev_name(host->controller), "ehci") || !strstr(dev_name(host->controller), "ohci")) samsung_usbphy_set_type(&sphy->phy, USB_PHY_TYPE_HOST); } else { samsung_usbphy_set_type(&sphy->phy, USB_PHY_TYPE_DEVICE); } /* Disable phy isolation */ if (sphy->plat && sphy->plat->pmu_isolation) sphy->plat->pmu_isolation(false); else if (sphy->drv_data->set_isolation) sphy->drv_data->set_isolation(sphy, false); /* Selecting Host/OTG mode; After reset USB2.0PHY_CFG: HOST */ samsung_usbphy_cfg_sel(sphy); /* Initialize usb phy registers */ sphy->drv_data->phy_enable(sphy); spin_unlock_irqrestore(&sphy->lock, flags); /* Disable the phy clock */ clk_disable_unprepare(sphy->clk); return ret; } /* * The function passed to the usb driver for phy shutdown */ static void samsung_usb2phy_shutdown(struct usb_phy *phy) { struct samsung_usbphy *sphy; struct usb_bus *host = NULL; unsigned long flags; sphy = phy_to_sphy(phy); host = phy->otg->host; if (clk_prepare_enable(sphy->clk)) { dev_err(sphy->dev, "%s: clk_prepare_enable failed\n", __func__); return; } spin_lock_irqsave(&sphy->lock, flags); if (host) { /* setting default phy-type for USB 2.0 */ if (!strstr(dev_name(host->controller), "ehci") || !strstr(dev_name(host->controller), "ohci")) samsung_usbphy_set_type(&sphy->phy, USB_PHY_TYPE_HOST); } else { samsung_usbphy_set_type(&sphy->phy, USB_PHY_TYPE_DEVICE); } /* De-initialize usb phy registers */ sphy->drv_data->phy_disable(sphy); /* Enable phy isolation */ if (sphy->plat && sphy->plat->pmu_isolation) sphy->plat->pmu_isolation(true); else if (sphy->drv_data->set_isolation) sphy->drv_data->set_isolation(sphy, true); spin_unlock_irqrestore(&sphy->lock, flags); clk_disable_unprepare(sphy->clk); } static int samsung_usb2phy_probe(struct platform_device *pdev) { struct samsung_usbphy *sphy; struct usb_otg *otg; struct samsung_usbphy_data *pdata = dev_get_platdata(&pdev->dev); const struct samsung_usbphy_drvdata *drv_data; struct device *dev = &pdev->dev; struct resource *phy_mem; void __iomem *phy_base; struct clk *clk; int ret; phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); phy_base = devm_ioremap_resource(dev, phy_mem); if (IS_ERR(phy_base)) return PTR_ERR(phy_base); sphy = devm_kzalloc(dev, sizeof(*sphy), GFP_KERNEL); if (!sphy) return -ENOMEM; otg = devm_kzalloc(dev, sizeof(*otg), GFP_KERNEL); if (!otg) return -ENOMEM; drv_data = samsung_usbphy_get_driver_data(pdev); if (drv_data->cpu_type == TYPE_EXYNOS5250) clk = devm_clk_get(dev, "usbhost"); else clk = devm_clk_get(dev, "otg"); if (IS_ERR(clk)) { dev_err(dev, "Failed to get usbhost/otg clock\n"); return PTR_ERR(clk); } sphy->dev = dev; if (dev->of_node) { ret = samsung_usbphy_parse_dt(sphy); if (ret < 0) return ret; } else { if (!pdata) { dev_err(dev, "no platform data specified\n"); return -EINVAL; } } sphy->plat = pdata; sphy->regs = phy_base; sphy->clk = clk; sphy->drv_data = drv_data; sphy->phy.dev = sphy->dev; sphy->phy.label = "samsung-usb2phy"; sphy->phy.type = USB_PHY_TYPE_USB2; sphy->phy.init = samsung_usb2phy_init; sphy->phy.shutdown = samsung_usb2phy_shutdown; sphy->ref_clk_freq = samsung_usbphy_get_refclk_freq(sphy); if (sphy->ref_clk_freq < 0) return -EINVAL; sphy->phy.otg = otg; sphy->phy.otg->phy = &sphy->phy; sphy->phy.otg->set_host = samsung_usbphy_set_host; spin_lock_init(&sphy->lock); platform_set_drvdata(pdev, sphy); return usb_add_phy_dev(&sphy->phy); } static int samsung_usb2phy_remove(struct platform_device *pdev) { struct samsung_usbphy *sphy = platform_get_drvdata(pdev); usb_remove_phy(&sphy->phy); if (sphy->pmuregs) iounmap(sphy->pmuregs); if (sphy->sysreg) iounmap(sphy->sysreg); return 0; } static const struct samsung_usbphy_drvdata usb2phy_s3c64xx = { .cpu_type = TYPE_S3C64XX, .devphy_en_mask = S3C64XX_USBPHY_ENABLE, .rate_to_clksel = samsung_usbphy_rate_to_clksel_64xx, .set_isolation = NULL, /* TODO */ .phy_enable = samsung_usb2phy_enable, .phy_disable = samsung_usb2phy_disable, }; static const struct samsung_usbphy_drvdata usb2phy_exynos4 = { .cpu_type = TYPE_EXYNOS4210, .devphy_en_mask = EXYNOS_USBPHY_ENABLE, .hostphy_en_mask = EXYNOS_USBPHY_ENABLE, .rate_to_clksel = samsung_usbphy_rate_to_clksel_64xx, .set_isolation = samsung_usbphy_set_isolation_4210, .phy_enable = samsung_usb2phy_enable, .phy_disable = samsung_usb2phy_disable, }; static const struct samsung_usbphy_drvdata usb2phy_exynos4x12 = { .cpu_type = TYPE_EXYNOS4X12, .devphy_en_mask = EXYNOS_USBPHY_ENABLE, .hostphy_en_mask = EXYNOS_USBPHY_ENABLE, .rate_to_clksel = samsung_usbphy_rate_to_clksel_4x12, .set_isolation = samsung_usbphy_set_isolation_4210, .phy_enable = samsung_usb2phy_enable, .phy_disable = samsung_usb2phy_disable, }; static struct samsung_usbphy_drvdata usb2phy_exynos5 = { .cpu_type = TYPE_EXYNOS5250, .hostphy_en_mask = EXYNOS_USBPHY_ENABLE, .hostphy_reg_offset = EXYNOS_USBHOST_PHY_CTRL_OFFSET, .rate_to_clksel = samsung_usbphy_rate_to_clksel_4x12, .set_isolation = samsung_usbphy_set_isolation_4210, .phy_enable = samsung_exynos5_usb2phy_enable, .phy_disable = samsung_exynos5_usb2phy_disable, }; #ifdef CONFIG_OF static const struct of_device_id samsung_usbphy_dt_match[] = { { .compatible = "samsung,s3c64xx-usb2phy", .data = &usb2phy_s3c64xx, }, { .compatible = "samsung,exynos4210-usb2phy", .data = &usb2phy_exynos4, }, { .compatible = "samsung,exynos4x12-usb2phy", .data = &usb2phy_exynos4x12, }, { .compatible = "samsung,exynos5250-usb2phy", .data = &usb2phy_exynos5 }, {}, }; MODULE_DEVICE_TABLE(of, samsung_usbphy_dt_match); #endif static struct platform_device_id samsung_usbphy_driver_ids[] = { { .name = "s3c64xx-usb2phy", .driver_data = (unsigned long)&usb2phy_s3c64xx, }, { .name = "exynos4210-usb2phy", .driver_data = (unsigned long)&usb2phy_exynos4, }, { .name = "exynos4x12-usb2phy", .driver_data = (unsigned long)&usb2phy_exynos4x12, }, { .name = "exynos5250-usb2phy", .driver_data = (unsigned long)&usb2phy_exynos5, }, {}, }; MODULE_DEVICE_TABLE(platform, samsung_usbphy_driver_ids); static struct platform_driver samsung_usb2phy_driver = { .probe = samsung_usb2phy_probe, .remove = samsung_usb2phy_remove, .id_table = samsung_usbphy_driver_ids, .driver = { .name = "samsung-usb2phy", .owner = THIS_MODULE, .of_match_table = of_match_ptr(samsung_usbphy_dt_match), }, }; module_platform_driver(samsung_usb2phy_driver); MODULE_DESCRIPTION("Samsung USB 2.0 phy controller"); MODULE_AUTHOR("Praveen Paneri <p.paneri@samsung.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:samsung-usb2phy");
gpl-2.0
nxnfufunezn/linux
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
763
86100
/* * Copyright (C) 2003 - 2009 NetXen, Inc. * Copyright (C) 2009 - QLogic Corporation. * All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. * * The full GNU General Public License is included in this distribution * in the file called "COPYING". * */ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> #include "netxen_nic_hw.h" #include "netxen_nic.h" #include <linux/dma-mapping.h> #include <linux/if_vlan.h> #include <net/ip.h> #include <linux/ipv6.h> #include <linux/inetdevice.h> #include <linux/sysfs.h> #include <linux/aer.h> MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME); char netxen_nic_driver_name[] = "netxen_nic"; static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v" NETXEN_NIC_LINUX_VERSIONID; static int port_mode = NETXEN_PORT_MODE_AUTO_NEG; /* Default to restricted 1G auto-neg mode */ static int wol_port_mode = 5; static int use_msi = 1; static int use_msi_x = 1; static int auto_fw_reset = AUTO_FW_RESET_ENABLED; module_param(auto_fw_reset, int, 0644); MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled"); static int netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void netxen_nic_remove(struct pci_dev *pdev); static int netxen_nic_open(struct net_device *netdev); static int netxen_nic_close(struct net_device *netdev); static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *, struct net_device *); static void netxen_tx_timeout(struct net_device *netdev); static void netxen_tx_timeout_task(struct work_struct *work); static void netxen_fw_poll_work(struct work_struct *work); static void netxen_schedule_work(struct netxen_adapter *adapter, work_func_t func, int delay); static void netxen_cancel_fw_work(struct netxen_adapter *adapter); static int netxen_nic_poll(struct napi_struct *napi, int budget); #ifdef CONFIG_NET_POLL_CONTROLLER static void netxen_nic_poll_controller(struct net_device *netdev); #endif static void netxen_create_sysfs_entries(struct netxen_adapter *adapter); static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter); static void netxen_create_diag_entries(struct netxen_adapter *adapter); static void netxen_remove_diag_entries(struct netxen_adapter *adapter); static int nx_dev_request_aer(struct netxen_adapter *adapter); static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter); static int netxen_can_start_firmware(struct netxen_adapter *adapter); static irqreturn_t netxen_intr(int irq, void *data); static irqreturn_t netxen_msi_intr(int irq, void *data); static irqreturn_t netxen_msix_intr(int irq, void *data); static void netxen_free_ip_list(struct netxen_adapter *, bool); static void netxen_restore_indev_addr(struct net_device *dev, unsigned long); static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); static int netxen_nic_set_mac(struct net_device *netdev, void *p); /* PCI Device ID Table */ #define ENTRY(device) \ {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \ .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} static const struct pci_device_id netxen_pci_tbl[] = { ENTRY(PCI_DEVICE_ID_NX2031_10GXSR), ENTRY(PCI_DEVICE_ID_NX2031_10GCX4), ENTRY(PCI_DEVICE_ID_NX2031_4GCU), ENTRY(PCI_DEVICE_ID_NX2031_IMEZ), ENTRY(PCI_DEVICE_ID_NX2031_HMEZ), ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT), ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2), ENTRY(PCI_DEVICE_ID_NX3031), {0,} }; MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); static uint32_t crb_cmd_producer[4] = { CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1, CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3 }; void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, struct nx_host_tx_ring *tx_ring) { NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer); } static uint32_t crb_cmd_consumer[4] = { CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1, CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3 }; static inline void netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter, struct nx_host_tx_ring *tx_ring) { NXWRIO(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer); } static uint32_t msi_tgt_status[8] = { ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1, ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3, ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5, ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 }; static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring) { struct netxen_adapter *adapter = sds_ring->adapter; NXWRIO(adapter, sds_ring->crb_intr_mask, 0); } static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring) { struct netxen_adapter *adapter = sds_ring->adapter; NXWRIO(adapter, sds_ring->crb_intr_mask, 0x1); if (!NETXEN_IS_MSI_FAMILY(adapter)) NXWRIO(adapter, adapter->tgt_mask_reg, 0xfbff); } static int netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count) { int size = sizeof(struct nx_host_sds_ring) * count; recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); return recv_ctx->sds_rings == NULL; } static void netxen_free_sds_rings(struct netxen_recv_context *recv_ctx) { kfree(recv_ctx->sds_rings); recv_ctx->sds_rings = NULL; } static int netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) { int ring; struct nx_host_sds_ring *sds_ring; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) return -ENOMEM; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; netif_napi_add(netdev, &sds_ring->napi, netxen_nic_poll, NAPI_POLL_WEIGHT); } return 0; } static void netxen_napi_del(struct netxen_adapter *adapter) { int ring; struct nx_host_sds_ring *sds_ring; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; netif_napi_del(&sds_ring->napi); } netxen_free_sds_rings(&adapter->recv_ctx); } static void netxen_napi_enable(struct netxen_adapter *adapter) { int ring; struct nx_host_sds_ring *sds_ring; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; napi_enable(&sds_ring->napi); netxen_nic_enable_int(sds_ring); } } static void netxen_napi_disable(struct netxen_adapter *adapter) { int ring; struct nx_host_sds_ring *sds_ring; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; netxen_nic_disable_int(sds_ring); napi_synchronize(&sds_ring->napi); napi_disable(&sds_ring->napi); } } static int nx_set_dma_mask(struct netxen_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; uint64_t mask, cmask; adapter->pci_using_dac = 0; mask = DMA_BIT_MASK(32); cmask = DMA_BIT_MASK(32); if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { #ifndef CONFIG_IA64 mask = DMA_BIT_MASK(35); #endif } else { mask = DMA_BIT_MASK(39); cmask = mask; } if (pci_set_dma_mask(pdev, mask) == 0 && pci_set_consistent_dma_mask(pdev, cmask) == 0) { adapter->pci_using_dac = 1; return 0; } return -EIO; } /* Update addressable range if firmware supports it */ static int nx_update_dma_mask(struct netxen_adapter *adapter) { int change, shift, err; uint64_t mask, old_mask, old_cmask; struct pci_dev *pdev = adapter->pdev; change = 0; shift = NXRD32(adapter, CRB_DMA_SHIFT); if (shift > 32) return 0; if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9)) change = 1; else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4)) change = 1; if (change) { old_mask = pdev->dma_mask; old_cmask = pdev->dev.coherent_dma_mask; mask = DMA_BIT_MASK(32+shift); err = pci_set_dma_mask(pdev, mask); if (err) goto err_out; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { err = pci_set_consistent_dma_mask(pdev, mask); if (err) goto err_out; } dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift); } return 0; err_out: pci_set_dma_mask(pdev, old_mask); pci_set_consistent_dma_mask(pdev, old_cmask); return err; } static int netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot) { u32 val, timeout; if (first_boot == 0x55555555) { /* This is the first boot after power up */ NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) return 0; /* PCI bus master workaround */ first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4)); if (!(first_boot & 0x4)) { first_boot |= 0x4; NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot); NXRD32(adapter, NETXEN_PCIE_REG(0x4)); } /* This is the first boot after power up */ first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET); if (first_boot != 0x80000f) { /* clear the register for future unloads/loads */ NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0); return -EIO; } /* Start P2 boot loader */ val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE); NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1); timeout = 0; do { msleep(1); val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); if (++timeout > 5000) return -EIO; } while (val == NETXEN_BDINFO_MAGIC); } return 0; } static void netxen_set_port_mode(struct netxen_adapter *adapter) { u32 val, data; val = adapter->ahw.board_type; if ((val == NETXEN_BRDTYPE_P3_HMEZ) || (val == NETXEN_BRDTYPE_P3_XG_LOM)) { if (port_mode == NETXEN_PORT_MODE_802_3_AP) { data = NETXEN_PORT_MODE_802_3_AP; NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); } else if (port_mode == NETXEN_PORT_MODE_XG) { data = NETXEN_PORT_MODE_XG; NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) { data = NETXEN_PORT_MODE_AUTO_NEG_1G; NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) { data = NETXEN_PORT_MODE_AUTO_NEG_XG; NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); } else { data = NETXEN_PORT_MODE_AUTO_NEG; NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data); } if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) && (wol_port_mode != NETXEN_PORT_MODE_XG) && (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) && (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) { wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG; } NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode); } } #define PCI_CAP_ID_GEN 0x10 static void netxen_pcie_strap_init(struct netxen_adapter *adapter) { u32 pdevfuncsave; u32 c8c9value = 0; u32 chicken = 0; u32 control = 0; int i, pos; struct pci_dev *pdev; pdev = adapter->pdev; chicken = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3)); /* clear chicken3.25:24 */ chicken &= 0xFCFFFFFF; /* * if gen1 and B0, set F1020 - if gen 2, do nothing * if gen2 set to F1000 */ pos = pci_find_capability(pdev, PCI_CAP_ID_GEN); if (pos == 0xC0) { pci_read_config_dword(pdev, pos + 0x10, &control); if ((control & 0x000F0000) != 0x00020000) { /* set chicken3.24 if gen1 */ chicken |= 0x01000000; } dev_info(&adapter->pdev->dev, "Gen2 strapping detected\n"); c8c9value = 0xF1000; } else { /* set chicken3.24 if gen1 */ chicken |= 0x01000000; dev_info(&adapter->pdev->dev, "Gen1 strapping detected\n"); if (adapter->ahw.revision_id == NX_P3_B0) c8c9value = 0xF1020; else c8c9value = 0; } NXWR32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3), chicken); if (!c8c9value) return; pdevfuncsave = pdev->devfn; if (pdevfuncsave & 0x07) return; for (i = 0; i < 8; i++) { pci_read_config_dword(pdev, pos + 8, &control); pci_read_config_dword(pdev, pos + 8, &control); pci_write_config_dword(pdev, pos + 8, c8c9value); pdev->devfn++; } pdev->devfn = pdevfuncsave; } static void netxen_set_msix_bit(struct pci_dev *pdev, int enable) { u32 control; if (pdev->msix_cap) { pci_read_config_dword(pdev, pdev->msix_cap, &control); if (enable) control |= PCI_MSIX_FLAGS_ENABLE; else control = 0; pci_write_config_dword(pdev, pdev->msix_cap, control); } } static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count) { int i; for (i = 0; i < count; i++) adapter->msix_entries[i].entry = i; } static int netxen_read_mac_addr(struct netxen_adapter *adapter) { int i; unsigned char *p; u64 mac_addr; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0) return -EIO; } else { if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0) return -EIO; } p = (unsigned char *)&mac_addr; for (i = 0; i < 6; i++) netdev->dev_addr[i] = *(p + 5 - i); memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); /* set station address */ if (!is_valid_ether_addr(netdev->dev_addr)) dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr); return 0; } static int netxen_nic_set_mac(struct net_device *netdev, void *p) { struct netxen_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (netif_running(netdev)) { netif_device_detach(netdev); netxen_napi_disable(adapter); } memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); adapter->macaddr_set(adapter, addr->sa_data); if (netif_running(netdev)) { netif_device_attach(netdev); netxen_napi_enable(adapter); } return 0; } static void netxen_set_multicast_list(struct net_device *dev) { struct netxen_adapter *adapter = netdev_priv(dev); adapter->set_multi(dev); } static netdev_features_t netxen_fix_features(struct net_device *dev, netdev_features_t features) { if (!(features & NETIF_F_RXCSUM)) { netdev_info(dev, "disabling LRO as RXCSUM is off\n"); features &= ~NETIF_F_LRO; } return features; } static int netxen_set_features(struct net_device *dev, netdev_features_t features) { struct netxen_adapter *adapter = netdev_priv(dev); int hw_lro; if (!((dev->features ^ features) & NETIF_F_LRO)) return 0; hw_lro = (features & NETIF_F_LRO) ? NETXEN_NIC_LRO_ENABLED : NETXEN_NIC_LRO_DISABLED; if (netxen_config_hw_lro(adapter, hw_lro)) return -EIO; if (!(features & NETIF_F_LRO) && netxen_send_lro_cleanup(adapter)) return -EIO; return 0; } static const struct net_device_ops netxen_netdev_ops = { .ndo_open = netxen_nic_open, .ndo_stop = netxen_nic_close, .ndo_start_xmit = netxen_nic_xmit_frame, .ndo_get_stats64 = netxen_nic_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = netxen_set_multicast_list, .ndo_set_mac_address = netxen_nic_set_mac, .ndo_change_mtu = netxen_nic_change_mtu, .ndo_tx_timeout = netxen_tx_timeout, .ndo_fix_features = netxen_fix_features, .ndo_set_features = netxen_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = netxen_nic_poll_controller, #endif }; static inline bool netxen_function_zero(struct pci_dev *pdev) { return (PCI_FUNC(pdev->devfn) == 0) ? true : false; } static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter, u32 mode) { NXWR32(adapter, NETXEN_INTR_MODE_REG, mode); } static inline u32 netxen_get_interrupt_mode(struct netxen_adapter *adapter) { return NXRD32(adapter, NETXEN_INTR_MODE_REG); } static void netxen_initialize_interrupt_registers(struct netxen_adapter *adapter) { struct netxen_legacy_intr_set *legacy_intrp; u32 tgt_status_reg, int_state_reg; if (adapter->ahw.revision_id >= NX_P3_B0) legacy_intrp = &legacy_intr[adapter->ahw.pci_func]; else legacy_intrp = &legacy_intr[0]; tgt_status_reg = legacy_intrp->tgt_status_reg; int_state_reg = ISR_INT_STATE_REG; adapter->int_vec_bit = legacy_intrp->int_vec_bit; adapter->tgt_status_reg = netxen_get_ioaddr(adapter, tgt_status_reg); adapter->tgt_mask_reg = netxen_get_ioaddr(adapter, legacy_intrp->tgt_mask_reg); adapter->pci_int_reg = netxen_get_ioaddr(adapter, legacy_intrp->pci_int_reg); adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR); if (adapter->ahw.revision_id >= NX_P3_B1) adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, int_state_reg); else adapter->crb_int_state_reg = netxen_get_ioaddr(adapter, CRB_INT_VECTOR); } static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter, int num_msix) { struct pci_dev *pdev = adapter->pdev; u32 value; int err; if (adapter->msix_supported) { netxen_init_msix_entries(adapter, num_msix); err = pci_enable_msix_range(pdev, adapter->msix_entries, num_msix, num_msix); if (err > 0) { adapter->flags |= NETXEN_NIC_MSIX_ENABLED; netxen_set_msix_bit(pdev, 1); if (adapter->rss_supported) adapter->max_sds_rings = num_msix; dev_info(&pdev->dev, "using msi-x interrupts\n"); return 0; } /* fall through for msi */ } if (use_msi && !pci_enable_msi(pdev)) { value = msi_tgt_status[adapter->ahw.pci_func]; adapter->flags |= NETXEN_NIC_MSI_ENABLED; adapter->tgt_status_reg = netxen_get_ioaddr(adapter, value); adapter->msix_entries[0].vector = pdev->irq; dev_info(&pdev->dev, "using msi interrupts\n"); return 0; } dev_err(&pdev->dev, "Failed to acquire MSI-X/MSI interrupt vector\n"); return -EIO; } static int netxen_setup_intr(struct netxen_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int num_msix; if (adapter->rss_supported) num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ? MSIX_ENTRIES_PER_ADAPTER : 2; else num_msix = 1; adapter->max_sds_rings = 1; adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED); netxen_initialize_interrupt_registers(adapter); netxen_set_msix_bit(pdev, 0); if (netxen_function_zero(pdev)) { if (!netxen_setup_msi_interrupts(adapter, num_msix)) netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE); else netxen_set_interrupt_mode(adapter, NETXEN_INTX_MODE); } else { if (netxen_get_interrupt_mode(adapter) == NETXEN_MSI_MODE && netxen_setup_msi_interrupts(adapter, num_msix)) { dev_err(&pdev->dev, "Co-existence of MSI-X/MSI and INTx interrupts is not supported\n"); return -EIO; } } if (!NETXEN_IS_MSI_FAMILY(adapter)) { adapter->msix_entries[0].vector = pdev->irq; dev_info(&pdev->dev, "using legacy interrupts\n"); } return 0; } static void netxen_teardown_intr(struct netxen_adapter *adapter) { if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) pci_disable_msix(adapter->pdev); if (adapter->flags & NETXEN_NIC_MSI_ENABLED) pci_disable_msi(adapter->pdev); } static void netxen_cleanup_pci_map(struct netxen_adapter *adapter) { if (adapter->ahw.db_base != NULL) iounmap(adapter->ahw.db_base); if (adapter->ahw.pci_base0 != NULL) iounmap(adapter->ahw.pci_base0); if (adapter->ahw.pci_base1 != NULL) iounmap(adapter->ahw.pci_base1); if (adapter->ahw.pci_base2 != NULL) iounmap(adapter->ahw.pci_base2); } static int netxen_setup_pci_map(struct netxen_adapter *adapter) { void __iomem *db_ptr = NULL; resource_size_t mem_base, db_base; unsigned long mem_len, db_len = 0; struct pci_dev *pdev = adapter->pdev; int pci_func = adapter->ahw.pci_func; struct netxen_hardware_context *ahw = &adapter->ahw; int err = 0; /* * Set the CRB window to invalid. If any register in window 0 is * accessed it should set the window to 0 and then reset it to 1. */ adapter->ahw.crb_win = -1; adapter->ahw.ocm_win = -1; /* remap phys address */ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ mem_len = pci_resource_len(pdev, 0); /* 128 Meg of memory */ if (mem_len == NETXEN_PCI_128MB_SIZE) { ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_SIZE); ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) { dev_err(&pdev->dev, "failed to map PCI bar 0\n"); err = -EIO; goto err_out; } ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE; } else if (mem_len == NETXEN_PCI_32MB_SIZE) { ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) { dev_err(&pdev->dev, "failed to map PCI bar 0\n"); err = -EIO; goto err_out; } } else if (mem_len == NETXEN_PCI_2MB_SIZE) { ahw->pci_base0 = pci_ioremap_bar(pdev, 0); if (ahw->pci_base0 == NULL) { dev_err(&pdev->dev, "failed to map PCI bar 0\n"); return -EIO; } ahw->pci_len0 = mem_len; } else { return -EIO; } netxen_setup_hwops(adapter); dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func))); } if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) goto skip_doorbell; db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ db_len = pci_resource_len(pdev, 4); if (db_len == 0) { printk(KERN_ERR "%s: doorbell is disabled\n", netxen_nic_driver_name); err = -EIO; goto err_out; } db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES); if (!db_ptr) { printk(KERN_ERR "%s: Failed to allocate doorbell map.", netxen_nic_driver_name); err = -EIO; goto err_out; } skip_doorbell: adapter->ahw.db_base = db_ptr; adapter->ahw.db_len = db_len; return 0; err_out: netxen_cleanup_pci_map(adapter); return err; } static void netxen_check_options(struct netxen_adapter *adapter) { u32 fw_major, fw_minor, fw_build, prev_fw_version; char brd_name[NETXEN_MAX_SHORT_NAME]; char serial_num[32]; int i, offset, val, err; __le32 *ptr32; struct pci_dev *pdev = adapter->pdev; adapter->driver_mismatch = 0; ptr32 = (__le32 *)&serial_num; offset = NX_FW_SERIAL_NUM_OFFSET; for (i = 0; i < 8; i++) { if (netxen_rom_fast_read(adapter, offset, &val) == -1) { dev_err(&pdev->dev, "error reading board info\n"); adapter->driver_mismatch = 1; return; } ptr32[i] = cpu_to_le32(val); offset += sizeof(u32); } fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); prev_fw_version = adapter->fw_version; adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build); /* Get FW Mini Coredump template and store it */ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { if (adapter->mdump.md_template == NULL || adapter->fw_version > prev_fw_version) { kfree(adapter->mdump.md_template); adapter->mdump.md_template = NULL; err = netxen_setup_minidump(adapter); if (err) dev_err(&adapter->pdev->dev, "Failed to setup minidump rcode = %d\n", err); } } if (adapter->portnum == 0) { if (netxen_nic_get_brd_name_by_type(adapter->ahw.board_type, brd_name)) strcpy(serial_num, "Unknown"); pr_info("%s: %s Board S/N %s Chip rev 0x%x\n", module_name(THIS_MODULE), brd_name, serial_num, adapter->ahw.revision_id); } if (adapter->fw_version < NETXEN_VERSION_CODE(3, 4, 216)) { adapter->driver_mismatch = 1; dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n", fw_major, fw_minor, fw_build); return; } if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { i = NXRD32(adapter, NETXEN_SRE_MISC); adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0; } dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d [%s]\n", NETXEN_NIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build, adapter->ahw.cut_through ? "cut-through" : "legacy"); if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222)) adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1); if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; } else if (adapter->ahw.port_type == NETXEN_NIC_GBE) { adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G; adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; } adapter->msix_supported = 0; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { adapter->msix_supported = !!use_msi_x; adapter->rss_supported = !!use_msi_x; } else { u32 flashed_ver = 0; netxen_rom_fast_read(adapter, NX_FW_VERSION_OFFSET, (int *)&flashed_ver); flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) { switch (adapter->ahw.board_type) { case NETXEN_BRDTYPE_P2_SB31_10G: case NETXEN_BRDTYPE_P2_SB31_10G_CX4: adapter->msix_supported = !!use_msi_x; adapter->rss_supported = !!use_msi_x; break; default: break; } } } adapter->num_txd = MAX_CMD_DESCRIPTORS; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS; adapter->max_rds_rings = 3; } else { adapter->num_lro_rxd = 0; adapter->max_rds_rings = 2; } } static int netxen_start_firmware(struct netxen_adapter *adapter) { int val, err, first_boot; struct pci_dev *pdev = adapter->pdev; /* required for NX2031 dummy dma */ err = nx_set_dma_mask(adapter); if (err) return err; err = netxen_can_start_firmware(adapter); if (err < 0) return err; if (!err) goto wait_init; first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc)); err = netxen_check_hw_init(adapter, first_boot); if (err) { dev_err(&pdev->dev, "error in init HW init sequence\n"); return err; } netxen_request_firmware(adapter); err = netxen_need_fw_reset(adapter); if (err < 0) goto err_out; if (err == 0) goto pcie_strap_init; if (first_boot != 0x55555555) { NXWR32(adapter, CRB_CMDPEG_STATE, 0); netxen_pinit_from_rom(adapter); msleep(1); } NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555); NXWR32(adapter, NETXEN_PEG_HALT_STATUS1, 0); NXWR32(adapter, NETXEN_PEG_HALT_STATUS2, 0); if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) netxen_set_port_mode(adapter); err = netxen_load_firmware(adapter); if (err) goto err_out; netxen_release_firmware(adapter); if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { /* Initialize multicast addr pool owners */ val = 0x7654; if (adapter->ahw.port_type == NETXEN_NIC_XGBE) val |= 0x0f000000; NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val); } err = netxen_init_dummy_dma(adapter); if (err) goto err_out; /* * Tell the hardware our version number. */ val = (_NETXEN_NIC_LINUX_MAJOR << 16) | ((_NETXEN_NIC_LINUX_MINOR << 8)) | (_NETXEN_NIC_LINUX_SUBVERSION); NXWR32(adapter, CRB_DRIVER_VERSION, val); pcie_strap_init: if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) netxen_pcie_strap_init(adapter); wait_init: /* Handshake with the card before we register the devices. */ err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); if (err) { netxen_free_dummy_dma(adapter); goto err_out; } NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_READY); nx_update_dma_mask(adapter); netxen_check_options(adapter); adapter->need_fw_reset = 0; /* fall through and release firmware */ err_out: netxen_release_firmware(adapter); return err; } static int netxen_nic_request_irq(struct netxen_adapter *adapter) { irq_handler_t handler; struct nx_host_sds_ring *sds_ring; int err, ring; unsigned long flags = 0; struct net_device *netdev = adapter->netdev; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) handler = netxen_msix_intr; else if (adapter->flags & NETXEN_NIC_MSI_ENABLED) handler = netxen_msi_intr; else { flags |= IRQF_SHARED; handler = netxen_intr; } adapter->irq = netdev->irq; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; sprintf(sds_ring->name, "%s[%d]", netdev->name, ring); err = request_irq(sds_ring->irq, handler, flags, sds_ring->name, sds_ring); if (err) return err; } return 0; } static void netxen_nic_free_irq(struct netxen_adapter *adapter) { int ring; struct nx_host_sds_ring *sds_ring; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; free_irq(sds_ring->irq, sds_ring); } } static void netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter) { adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT; adapter->coal.normal.data.rx_time_us = NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US; adapter->coal.normal.data.rx_packets = NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS; adapter->coal.normal.data.tx_time_us = NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US; adapter->coal.normal.data.tx_packets = NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS; } /* with rtnl_lock */ static int __netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) { int err; if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) return -EIO; err = adapter->init_port(adapter, adapter->physical_port); if (err) { printk(KERN_ERR "%s: Failed to initialize port %d\n", netxen_nic_driver_name, adapter->portnum); return err; } if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) adapter->macaddr_set(adapter, adapter->mac_addr); adapter->set_multi(netdev); adapter->set_mtu(adapter, netdev->mtu); adapter->ahw.linkup = 0; if (adapter->max_sds_rings > 1) netxen_config_rss(adapter, 1); if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) netxen_config_intr_coalesce(adapter); if (netdev->features & NETIF_F_LRO) netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED); netxen_napi_enable(adapter); if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) netxen_linkevent_request(adapter, 1); else netxen_nic_set_link_parameters(adapter); set_bit(__NX_DEV_UP, &adapter->state); return 0; } /* Usage: During resume and firmware recovery module.*/ static inline int netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) { int err = 0; rtnl_lock(); if (netif_running(netdev)) err = __netxen_nic_up(adapter, netdev); rtnl_unlock(); return err; } /* with rtnl_lock */ static void __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) { if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) return; if (!test_and_clear_bit(__NX_DEV_UP, &adapter->state)) return; smp_mb(); netif_carrier_off(netdev); netif_tx_disable(netdev); if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) netxen_linkevent_request(adapter, 0); if (adapter->stop_port) adapter->stop_port(adapter); if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) netxen_p3_free_mac_list(adapter); adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE); netxen_napi_disable(adapter); netxen_release_tx_buffers(adapter); } /* Usage: During suspend and firmware recovery module */ static inline void netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) { rtnl_lock(); if (netif_running(netdev)) __netxen_nic_down(adapter, netdev); rtnl_unlock(); } static int netxen_nic_attach(struct netxen_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; int err, ring; struct nx_host_rds_ring *rds_ring; struct nx_host_tx_ring *tx_ring; u32 capab2; if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) return 0; err = netxen_init_firmware(adapter); if (err) return err; adapter->flags &= ~NETXEN_FW_MSS_CAP; if (adapter->capabilities & NX_FW_CAPABILITY_MORE_CAPS) { capab2 = NXRD32(adapter, CRB_FW_CAPABILITIES_2); if (capab2 & NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG) adapter->flags |= NETXEN_FW_MSS_CAP; } err = netxen_napi_add(adapter, netdev); if (err) return err; err = netxen_alloc_sw_resources(adapter); if (err) { printk(KERN_ERR "%s: Error in setting sw resources\n", netdev->name); return err; } err = netxen_alloc_hw_resources(adapter); if (err) { printk(KERN_ERR "%s: Error in setting hw resources\n", netdev->name); goto err_out_free_sw; } if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { tx_ring = adapter->tx_ring; tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, crb_cmd_producer[adapter->portnum]); tx_ring->crb_cmd_consumer = netxen_get_ioaddr(adapter, crb_cmd_consumer[adapter->portnum]); tx_ring->producer = 0; tx_ring->sw_consumer = 0; netxen_nic_update_cmd_producer(adapter, tx_ring); netxen_nic_update_cmd_consumer(adapter, tx_ring); } for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &adapter->recv_ctx.rds_rings[ring]; netxen_post_rx_buffers(adapter, ring, rds_ring); } err = netxen_nic_request_irq(adapter); if (err) { dev_err(&pdev->dev, "%s: failed to setup interrupt\n", netdev->name); goto err_out_free_rxbuf; } if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) netxen_nic_init_coalesce_defaults(adapter); netxen_create_sysfs_entries(adapter); adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; return 0; err_out_free_rxbuf: netxen_release_rx_buffers(adapter); netxen_free_hw_resources(adapter); err_out_free_sw: netxen_free_sw_resources(adapter); return err; } static void netxen_nic_detach(struct netxen_adapter *adapter) { if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) return; netxen_remove_sysfs_entries(adapter); netxen_free_hw_resources(adapter); netxen_release_rx_buffers(adapter); netxen_nic_free_irq(adapter); netxen_napi_del(adapter); netxen_free_sw_resources(adapter); adapter->is_up = 0; } int netxen_nic_reset_context(struct netxen_adapter *adapter) { int err = 0; struct net_device *netdev = adapter->netdev; if (test_and_set_bit(__NX_RESETTING, &adapter->state)) return -EBUSY; if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { netif_device_detach(netdev); if (netif_running(netdev)) __netxen_nic_down(adapter, netdev); netxen_nic_detach(adapter); if (netif_running(netdev)) { err = netxen_nic_attach(adapter); if (!err) err = __netxen_nic_up(adapter, netdev); if (err) goto done; } netif_device_attach(netdev); } done: clear_bit(__NX_RESETTING, &adapter->state); return err; } static int netxen_setup_netdev(struct netxen_adapter *adapter, struct net_device *netdev) { int err = 0; struct pci_dev *pdev = adapter->pdev; adapter->mc_enabled = 0; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) adapter->max_mc_count = 38; else adapter->max_mc_count = 16; netdev->netdev_ops = &netxen_netdev_ops; netdev->watchdog_timeo = 5*HZ; netxen_nic_change_mtu(netdev, netdev->mtu); netdev->ethtool_ops = &netxen_nic_ethtool_ops; netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_RXCSUM; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) netdev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; netdev->vlan_features |= netdev->hw_features; if (adapter->pci_using_dac) { netdev->features |= NETIF_F_HIGHDMA; netdev->vlan_features |= NETIF_F_HIGHDMA; } if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO) netdev->hw_features |= NETIF_F_LRO; netdev->features |= netdev->hw_features; netdev->irq = adapter->msix_entries[0].vector; INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); if (netxen_read_mac_addr(adapter)) dev_warn(&pdev->dev, "failed to read mac addr\n"); netif_carrier_off(netdev); err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "failed to register net device\n"); return err; } return 0; } #define NETXEN_ULA_ADAPTER_KEY (0xdaddad01) #define NETXEN_NON_ULA_ADAPTER_KEY (0xdaddad00) static void netxen_read_ula_info(struct netxen_adapter *adapter) { u32 temp; /* Print ULA info only once for an adapter */ if (adapter->portnum != 0) return; temp = NXRD32(adapter, NETXEN_ULA_KEY); switch (temp) { case NETXEN_ULA_ADAPTER_KEY: dev_info(&adapter->pdev->dev, "ULA adapter"); break; case NETXEN_NON_ULA_ADAPTER_KEY: dev_info(&adapter->pdev->dev, "non ULA adapter"); break; default: break; } return; } #ifdef CONFIG_PCIEAER static void netxen_mask_aer_correctable(struct netxen_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct pci_dev *root = pdev->bus->self; u32 aer_pos; /* root bus? */ if (!root) return; if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM && adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP) return; if (pci_pcie_type(root) != PCI_EXP_TYPE_ROOT_PORT) return; aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR); if (!aer_pos) return; pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff); } #endif static int netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev = NULL; struct netxen_adapter *adapter = NULL; int i = 0, err; int pci_func_id = PCI_FUNC(pdev->devfn); uint8_t revision_id; u32 val; if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { pr_warn("%s: chip revisions between 0x%x-0x%x will not be enabled\n", module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); return -ENODEV; } if ((err = pci_enable_device(pdev))) return err; if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { err = -ENODEV; goto err_out_disable_pdev; } if ((err = pci_request_regions(pdev, netxen_nic_driver_name))) goto err_out_disable_pdev; if (NX_IS_REVISION_P3(pdev->revision)) pci_enable_pcie_error_reporting(pdev); pci_set_master(pdev); netdev = alloc_etherdev(sizeof(struct netxen_adapter)); if(!netdev) { err = -ENOMEM; goto err_out_free_res; } SET_NETDEV_DEV(netdev, &pdev->dev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; adapter->ahw.pci_func = pci_func_id; revision_id = pdev->revision; adapter->ahw.revision_id = revision_id; rwlock_init(&adapter->ahw.crb_lock); spin_lock_init(&adapter->ahw.mem_lock); spin_lock_init(&adapter->tx_clean_lock); INIT_LIST_HEAD(&adapter->mac_list); INIT_LIST_HEAD(&adapter->ip_list); err = netxen_setup_pci_map(adapter); if (err) goto err_out_free_netdev; /* This will be reset for mezz cards */ adapter->portnum = pci_func_id; err = netxen_nic_get_board_info(adapter); if (err) { dev_err(&pdev->dev, "Error getting board config info.\n"); goto err_out_iounmap; } #ifdef CONFIG_PCIEAER netxen_mask_aer_correctable(adapter); #endif /* Mezz cards have PCI function 0,2,3 enabled */ switch (adapter->ahw.board_type) { case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ: if (pci_func_id >= 2) adapter->portnum = pci_func_id - 2; break; default: break; } err = netxen_check_flash_fw_compatibility(adapter); if (err) goto err_out_iounmap; if (adapter->portnum == 0) { val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); if (val != 0xffffffff && val != 0) { NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0); adapter->need_fw_reset = 1; } } err = netxen_start_firmware(adapter); if (err) goto err_out_decr_ref; /* * See if the firmware gave us a virtual-physical port mapping. */ adapter->physical_port = adapter->portnum; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { i = NXRD32(adapter, CRB_V2P(adapter->portnum)); if (i != 0x55555555) adapter->physical_port = i; } netxen_nic_clear_stats(adapter); err = netxen_setup_intr(adapter); if (err) { dev_err(&adapter->pdev->dev, "Failed to setup interrupts, error = %d\n", err); goto err_out_disable_msi; } netxen_read_ula_info(adapter); err = netxen_setup_netdev(adapter, netdev); if (err) goto err_out_disable_msi; pci_set_drvdata(pdev, adapter); netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); switch (adapter->ahw.port_type) { case NETXEN_NIC_GBE: dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n", adapter->netdev->name); break; case NETXEN_NIC_XGBE: dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", adapter->netdev->name); break; } netxen_create_diag_entries(adapter); return 0; err_out_disable_msi: netxen_teardown_intr(adapter); netxen_free_dummy_dma(adapter); err_out_decr_ref: nx_decr_dev_ref_cnt(adapter); err_out_iounmap: netxen_cleanup_pci_map(adapter); err_out_free_netdev: free_netdev(netdev); err_out_free_res: pci_release_regions(pdev); err_out_disable_pdev: pci_disable_device(pdev); return err; } static void netxen_cleanup_minidump(struct netxen_adapter *adapter) { kfree(adapter->mdump.md_template); adapter->mdump.md_template = NULL; if (adapter->mdump.md_capture_buff) { vfree(adapter->mdump.md_capture_buff); adapter->mdump.md_capture_buff = NULL; } } static void netxen_nic_remove(struct pci_dev *pdev) { struct netxen_adapter *adapter; struct net_device *netdev; adapter = pci_get_drvdata(pdev); if (adapter == NULL) return; netdev = adapter->netdev; netxen_cancel_fw_work(adapter); unregister_netdev(netdev); cancel_work_sync(&adapter->tx_timeout_task); netxen_free_ip_list(adapter, false); netxen_nic_detach(adapter); nx_decr_dev_ref_cnt(adapter); if (adapter->portnum == 0) netxen_free_dummy_dma(adapter); clear_bit(__NX_RESETTING, &adapter->state); netxen_teardown_intr(adapter); netxen_set_interrupt_mode(adapter, 0); netxen_remove_diag_entries(adapter); netxen_cleanup_pci_map(adapter); netxen_release_firmware(adapter); if (NX_IS_REVISION_P3(pdev->revision)) { netxen_cleanup_minidump(adapter); pci_disable_pcie_error_reporting(pdev); } pci_release_regions(pdev); pci_disable_device(pdev); free_netdev(netdev); } static void netxen_nic_detach_func(struct netxen_adapter *adapter) { struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); netxen_cancel_fw_work(adapter); if (netif_running(netdev)) netxen_nic_down(adapter, netdev); cancel_work_sync(&adapter->tx_timeout_task); netxen_nic_detach(adapter); if (adapter->portnum == 0) netxen_free_dummy_dma(adapter); nx_decr_dev_ref_cnt(adapter); clear_bit(__NX_RESETTING, &adapter->state); } static int netxen_nic_attach_func(struct pci_dev *pdev) { struct netxen_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; int err; err = pci_enable_device(pdev); if (err) return err; pci_set_power_state(pdev, PCI_D0); pci_set_master(pdev); pci_restore_state(pdev); adapter->ahw.crb_win = -1; adapter->ahw.ocm_win = -1; err = netxen_start_firmware(adapter); if (err) { dev_err(&pdev->dev, "failed to start firmware\n"); return err; } if (netif_running(netdev)) { err = netxen_nic_attach(adapter); if (err) goto err_out; err = netxen_nic_up(adapter, netdev); if (err) goto err_out_detach; netxen_restore_indev_addr(netdev, NETDEV_UP); } netif_device_attach(netdev); netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); return 0; err_out_detach: netxen_nic_detach(adapter); err_out: nx_decr_dev_ref_cnt(adapter); return err; } static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct netxen_adapter *adapter = pci_get_drvdata(pdev); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; if (nx_dev_request_aer(adapter)) return PCI_ERS_RESULT_RECOVERED; netxen_nic_detach_func(adapter); pci_disable_device(pdev); return PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev) { int err = 0; err = netxen_nic_attach_func(pdev); return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } static void netxen_io_resume(struct pci_dev *pdev) { pci_cleanup_aer_uncorrect_error_status(pdev); } static void netxen_nic_shutdown(struct pci_dev *pdev) { struct netxen_adapter *adapter = pci_get_drvdata(pdev); netxen_nic_detach_func(adapter); if (pci_save_state(pdev)) return; if (netxen_nic_wol_supported(adapter)) { pci_enable_wake(pdev, PCI_D3cold, 1); pci_enable_wake(pdev, PCI_D3hot, 1); } pci_disable_device(pdev); } #ifdef CONFIG_PM static int netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) { struct netxen_adapter *adapter = pci_get_drvdata(pdev); int retval; netxen_nic_detach_func(adapter); retval = pci_save_state(pdev); if (retval) return retval; if (netxen_nic_wol_supported(adapter)) { pci_enable_wake(pdev, PCI_D3cold, 1); pci_enable_wake(pdev, PCI_D3hot, 1); } pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int netxen_nic_resume(struct pci_dev *pdev) { return netxen_nic_attach_func(pdev); } #endif static int netxen_nic_open(struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); int err = 0; if (adapter->driver_mismatch) return -EIO; err = netxen_nic_attach(adapter); if (err) return err; err = __netxen_nic_up(adapter, netdev); if (err) goto err_out; netif_start_queue(netdev); return 0; err_out: netxen_nic_detach(adapter); return err; } /* * netxen_nic_close - Disables a network interface entry point */ static int netxen_nic_close(struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); __netxen_nic_down(adapter, netdev); return 0; } static void netxen_tso_check(struct net_device *netdev, struct nx_host_tx_ring *tx_ring, struct cmd_desc_type0 *first_desc, struct sk_buff *skb) { u8 opcode = TX_ETHER_PKT; __be16 protocol = skb->protocol; u16 flags = 0, vid = 0; u32 producer; int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0; struct cmd_desc_type0 *hwdesc; struct vlan_ethhdr *vh; if (protocol == cpu_to_be16(ETH_P_8021Q)) { vh = (struct vlan_ethhdr *)skb->data; protocol = vh->h_vlan_encapsulated_proto; flags = FLAGS_VLAN_TAGGED; } else if (skb_vlan_tag_present(skb)) { flags = FLAGS_VLAN_OOB; vid = skb_vlan_tag_get(skb); netxen_set_tx_vlan_tci(first_desc, vid); vlan_oob = 1; } if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && skb_shinfo(skb)->gso_size > 0) { hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); first_desc->total_hdr_length = hdr_len; if (vlan_oob) { first_desc->total_hdr_length += VLAN_HLEN; first_desc->tcp_hdr_offset = VLAN_HLEN; first_desc->ip_hdr_offset = VLAN_HLEN; /* Only in case of TSO on vlan device */ flags |= FLAGS_VLAN_TAGGED; } opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ? TX_TCP_LSO6 : TX_TCP_LSO; tso = 1; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 l4proto; if (protocol == cpu_to_be16(ETH_P_IP)) { l4proto = ip_hdr(skb)->protocol; if (l4proto == IPPROTO_TCP) opcode = TX_TCP_PKT; else if(l4proto == IPPROTO_UDP) opcode = TX_UDP_PKT; } else if (protocol == cpu_to_be16(ETH_P_IPV6)) { l4proto = ipv6_hdr(skb)->nexthdr; if (l4proto == IPPROTO_TCP) opcode = TX_TCPV6_PKT; else if(l4proto == IPPROTO_UDP) opcode = TX_UDPV6_PKT; } } first_desc->tcp_hdr_offset += skb_transport_offset(skb); first_desc->ip_hdr_offset += skb_network_offset(skb); netxen_set_tx_flags_opcode(first_desc, flags, opcode); if (!tso) return; /* For LSO, we need to copy the MAC/IP/TCP headers into * the descriptor ring */ producer = tx_ring->producer; copied = 0; offset = 2; if (vlan_oob) { /* Create a TSO vlan header template for firmware */ hwdesc = &tx_ring->desc_head[producer]; tx_ring->cmd_buf_arr[producer].skb = NULL; copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, hdr_len + VLAN_HLEN); vh = (struct vlan_ethhdr *)((char *)hwdesc + 2); skb_copy_from_linear_data(skb, vh, 12); vh->h_vlan_proto = htons(ETH_P_8021Q); vh->h_vlan_TCI = htons(vid); skb_copy_from_linear_data_offset(skb, 12, (char *)vh + 16, copy_len - 16); copied = copy_len - VLAN_HLEN; offset = 0; producer = get_next_index(producer, tx_ring->num_desc); } while (copied < hdr_len) { copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, (hdr_len - copied)); hwdesc = &tx_ring->desc_head[producer]; tx_ring->cmd_buf_arr[producer].skb = NULL; skb_copy_from_linear_data_offset(skb, copied, (char *)hwdesc + offset, copy_len); copied += copy_len; offset = 0; producer = get_next_index(producer, tx_ring->num_desc); } tx_ring->producer = producer; barrier(); } static int netxen_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, struct netxen_cmd_buffer *pbuf) { struct netxen_skb_frag *nf; struct skb_frag_struct *frag; int i, nr_frags; dma_addr_t map; nr_frags = skb_shinfo(skb)->nr_frags; nf = &pbuf->frag_array[0]; map = pci_map_single(pdev, skb->data, skb_headlen(skb), PCI_DMA_TODEVICE); if (pci_dma_mapping_error(pdev, map)) goto out_err; nf->dma = map; nf->length = skb_headlen(skb); for (i = 0; i < nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; nf = &pbuf->frag_array[i+1]; map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, map)) goto unwind; nf->dma = map; nf->length = skb_frag_size(frag); } return 0; unwind: while (--i >= 0) { nf = &pbuf->frag_array[i+1]; pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); nf->dma = 0ULL; } nf = &pbuf->frag_array[0]; pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); nf->dma = 0ULL; out_err: return -ENOMEM; } static inline void netxen_clear_cmddesc(u64 *desc) { desc[0] = 0ULL; desc[2] = 0ULL; } static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); struct nx_host_tx_ring *tx_ring = adapter->tx_ring; struct netxen_cmd_buffer *pbuf; struct netxen_skb_frag *buffrag; struct cmd_desc_type0 *hwdesc, *first_desc; struct pci_dev *pdev; int i, k; int delta = 0; struct skb_frag_struct *frag; u32 producer; int frag_count, no_of_desc; u32 num_txd = tx_ring->num_desc; frag_count = skb_shinfo(skb)->nr_frags + 1; /* 14 frags supported for normal packet and * 32 frags supported for TSO packet */ if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) { for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) { frag = &skb_shinfo(skb)->frags[i]; delta += skb_frag_size(frag); } if (!__pskb_pull_tail(skb, delta)) goto drop_packet; frag_count = 1 + skb_shinfo(skb)->nr_frags; } /* 4 fragments per cmd des */ no_of_desc = (frag_count + 3) >> 2; if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) { netif_stop_queue(netdev); smp_mb(); if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) netif_start_queue(netdev); else return NETDEV_TX_BUSY; } producer = tx_ring->producer; pbuf = &tx_ring->cmd_buf_arr[producer]; pdev = adapter->pdev; if (netxen_map_tx_skb(pdev, skb, pbuf)) goto drop_packet; pbuf->skb = skb; pbuf->frag_count = frag_count; first_desc = hwdesc = &tx_ring->desc_head[producer]; netxen_clear_cmddesc((u64 *)hwdesc); netxen_set_tx_frags_len(first_desc, frag_count, skb->len); netxen_set_tx_port(first_desc, adapter->portnum); for (i = 0; i < frag_count; i++) { k = i % 4; if ((k == 0) && (i > 0)) { /* move to next desc.*/ producer = get_next_index(producer, num_txd); hwdesc = &tx_ring->desc_head[producer]; netxen_clear_cmddesc((u64 *)hwdesc); tx_ring->cmd_buf_arr[producer].skb = NULL; } buffrag = &pbuf->frag_array[i]; hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); switch (k) { case 0: hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); break; case 1: hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma); break; case 2: hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma); break; case 3: hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma); break; } } tx_ring->producer = get_next_index(producer, num_txd); netxen_tso_check(netdev, tx_ring, first_desc, skb); adapter->stats.txbytes += skb->len; adapter->stats.xmitcalled++; netxen_nic_update_cmd_producer(adapter, tx_ring); return NETDEV_TX_OK; drop_packet: adapter->stats.txdropped++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; } static int netxen_nic_check_temp(struct netxen_adapter *adapter) { struct net_device *netdev = adapter->netdev; uint32_t temp, temp_state, temp_val; int rv = 0; temp = NXRD32(adapter, CRB_TEMP_STATE); temp_state = nx_get_temp_state(temp); temp_val = nx_get_temp_val(temp); if (temp_state == NX_TEMP_PANIC) { printk(KERN_ALERT "%s: Device temperature %d degrees C exceeds" " maximum allowed. Hardware has been shut down.\n", netdev->name, temp_val); rv = 1; } else if (temp_state == NX_TEMP_WARN) { if (adapter->temp == NX_TEMP_NORMAL) { printk(KERN_ALERT "%s: Device temperature %d degrees C " "exceeds operating range." " Immediate action needed.\n", netdev->name, temp_val); } } else { if (adapter->temp == NX_TEMP_WARN) { printk(KERN_INFO "%s: Device temperature is now %d degrees C" " in normal range.\n", netdev->name, temp_val); } } adapter->temp = temp_state; return rv; } void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup) { struct net_device *netdev = adapter->netdev; if (adapter->ahw.linkup && !linkup) { printk(KERN_INFO "%s: %s NIC Link is down\n", netxen_nic_driver_name, netdev->name); adapter->ahw.linkup = 0; if (netif_running(netdev)) { netif_carrier_off(netdev); netif_stop_queue(netdev); } adapter->link_changed = !adapter->has_link_events; } else if (!adapter->ahw.linkup && linkup) { printk(KERN_INFO "%s: %s NIC Link is up\n", netxen_nic_driver_name, netdev->name); adapter->ahw.linkup = 1; if (netif_running(netdev)) { netif_carrier_on(netdev); netif_wake_queue(netdev); } adapter->link_changed = !adapter->has_link_events; } } static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter) { u32 val, port, linkup; port = adapter->physical_port; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { val = NXRD32(adapter, CRB_XG_STATE_P3); val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); linkup = (val == XG_LINK_UP_P3); } else { val = NXRD32(adapter, CRB_XG_STATE); val = (val >> port*8) & 0xff; linkup = (val == XG_LINK_UP); } netxen_advert_link_change(adapter, linkup); } static void netxen_tx_timeout(struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); if (test_bit(__NX_RESETTING, &adapter->state)) return; dev_err(&netdev->dev, "transmit timeout, resetting.\n"); schedule_work(&adapter->tx_timeout_task); } static void netxen_tx_timeout_task(struct work_struct *work) { struct netxen_adapter *adapter = container_of(work, struct netxen_adapter, tx_timeout_task); if (!netif_running(adapter->netdev)) return; if (test_and_set_bit(__NX_RESETTING, &adapter->state)) return; if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS) goto request_reset; rtnl_lock(); if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { /* try to scrub interrupt */ netxen_napi_disable(adapter); netxen_napi_enable(adapter); netif_wake_queue(adapter->netdev); clear_bit(__NX_RESETTING, &adapter->state); } else { clear_bit(__NX_RESETTING, &adapter->state); if (netxen_nic_reset_context(adapter)) { rtnl_unlock(); goto request_reset; } } adapter->netdev->trans_start = jiffies; rtnl_unlock(); return; request_reset: adapter->need_fw_reset = 1; clear_bit(__NX_RESETTING, &adapter->state); } static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct netxen_adapter *adapter = netdev_priv(netdev); stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; stats->tx_packets = adapter->stats.xmitfinished; stats->rx_bytes = adapter->stats.rxbytes; stats->tx_bytes = adapter->stats.txbytes; stats->rx_dropped = adapter->stats.rxdropped; stats->tx_dropped = adapter->stats.txdropped; return stats; } static irqreturn_t netxen_intr(int irq, void *data) { struct nx_host_sds_ring *sds_ring = data; struct netxen_adapter *adapter = sds_ring->adapter; u32 status = 0; status = readl(adapter->isr_int_vec); if (!(status & adapter->int_vec_bit)) return IRQ_NONE; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { /* check interrupt state machine, to be sure */ status = readl(adapter->crb_int_state_reg); if (!ISR_LEGACY_INT_TRIGGERED(status)) return IRQ_NONE; } else { unsigned long our_int = 0; our_int = readl(adapter->crb_int_state_reg); /* not our interrupt */ if (!test_and_clear_bit((7 + adapter->portnum), &our_int)) return IRQ_NONE; /* claim interrupt */ writel((our_int & 0xffffffff), adapter->crb_int_state_reg); /* clear interrupt */ netxen_nic_disable_int(sds_ring); } writel(0xffffffff, adapter->tgt_status_reg); /* read twice to ensure write is flushed */ readl(adapter->isr_int_vec); readl(adapter->isr_int_vec); napi_schedule(&sds_ring->napi); return IRQ_HANDLED; } static irqreturn_t netxen_msi_intr(int irq, void *data) { struct nx_host_sds_ring *sds_ring = data; struct netxen_adapter *adapter = sds_ring->adapter; /* clear interrupt */ writel(0xffffffff, adapter->tgt_status_reg); napi_schedule(&sds_ring->napi); return IRQ_HANDLED; } static irqreturn_t netxen_msix_intr(int irq, void *data) { struct nx_host_sds_ring *sds_ring = data; napi_schedule(&sds_ring->napi); return IRQ_HANDLED; } static int netxen_nic_poll(struct napi_struct *napi, int budget) { struct nx_host_sds_ring *sds_ring = container_of(napi, struct nx_host_sds_ring, napi); struct netxen_adapter *adapter = sds_ring->adapter; int tx_complete; int work_done; tx_complete = netxen_process_cmd_ring(adapter); work_done = netxen_process_rcv_ring(sds_ring, budget); if (!tx_complete) work_done = budget; if (work_done < budget) { napi_complete(&sds_ring->napi); if (test_bit(__NX_DEV_UP, &adapter->state)) netxen_nic_enable_int(sds_ring); } return work_done; } #ifdef CONFIG_NET_POLL_CONTROLLER static void netxen_nic_poll_controller(struct net_device *netdev) { int ring; struct nx_host_sds_ring *sds_ring; struct netxen_adapter *adapter = netdev_priv(netdev); struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; disable_irq(adapter->irq); for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; netxen_intr(adapter->irq, sds_ring); } enable_irq(adapter->irq); } #endif static int nx_incr_dev_ref_cnt(struct netxen_adapter *adapter) { int count; if (netxen_api_lock(adapter)) return -EIO; count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); netxen_api_unlock(adapter); return count; } static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter) { int count, state; if (netxen_api_lock(adapter)) return -EIO; count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); WARN_ON(count == 0); NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count); state = NXRD32(adapter, NX_CRB_DEV_STATE); if (count == 0 && state != NX_DEV_FAILED) NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD); netxen_api_unlock(adapter); return count; } static int nx_dev_request_aer(struct netxen_adapter *adapter) { u32 state; int ret = -EINVAL; if (netxen_api_lock(adapter)) return ret; state = NXRD32(adapter, NX_CRB_DEV_STATE); if (state == NX_DEV_NEED_AER) ret = 0; else if (state == NX_DEV_READY) { NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER); ret = 0; } netxen_api_unlock(adapter); return ret; } int nx_dev_request_reset(struct netxen_adapter *adapter) { u32 state; int ret = -EINVAL; if (netxen_api_lock(adapter)) return ret; state = NXRD32(adapter, NX_CRB_DEV_STATE); if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED) ret = 0; else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) { NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET); adapter->flags |= NETXEN_FW_RESET_OWNER; ret = 0; } netxen_api_unlock(adapter); return ret; } static int netxen_can_start_firmware(struct netxen_adapter *adapter) { int count; int can_start = 0; if (netxen_api_lock(adapter)) { nx_incr_dev_ref_cnt(adapter); return -1; } count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); if ((count < 0) || (count >= NX_MAX_PCI_FUNC)) count = 0; if (count == 0) { can_start = 1; NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_INITALIZING); } NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count); netxen_api_unlock(adapter); return can_start; } static void netxen_schedule_work(struct netxen_adapter *adapter, work_func_t func, int delay) { INIT_DELAYED_WORK(&adapter->fw_work, func); schedule_delayed_work(&adapter->fw_work, delay); } static void netxen_cancel_fw_work(struct netxen_adapter *adapter) { while (test_and_set_bit(__NX_RESETTING, &adapter->state)) msleep(10); cancel_delayed_work_sync(&adapter->fw_work); } static void netxen_attach_work(struct work_struct *work) { struct netxen_adapter *adapter = container_of(work, struct netxen_adapter, fw_work.work); struct net_device *netdev = adapter->netdev; int err = 0; if (netif_running(netdev)) { err = netxen_nic_attach(adapter); if (err) goto done; err = netxen_nic_up(adapter, netdev); if (err) { netxen_nic_detach(adapter); goto done; } netxen_restore_indev_addr(netdev, NETDEV_UP); } netif_device_attach(netdev); done: adapter->fw_fail_cnt = 0; clear_bit(__NX_RESETTING, &adapter->state); netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); } static void netxen_fwinit_work(struct work_struct *work) { struct netxen_adapter *adapter = container_of(work, struct netxen_adapter, fw_work.work); int dev_state; int count; dev_state = NXRD32(adapter, NX_CRB_DEV_STATE); if (adapter->flags & NETXEN_FW_RESET_OWNER) { count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); WARN_ON(count == 0); if (count == 1) { if (adapter->mdump.md_enabled) { rtnl_lock(); netxen_dump_fw(adapter); rtnl_unlock(); } adapter->flags &= ~NETXEN_FW_RESET_OWNER; if (netxen_api_lock(adapter)) { clear_bit(__NX_RESETTING, &adapter->state); NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_FAILED); return; } count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count); NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD); dev_state = NX_DEV_COLD; netxen_api_unlock(adapter); } } switch (dev_state) { case NX_DEV_COLD: case NX_DEV_READY: if (!netxen_start_firmware(adapter)) { netxen_schedule_work(adapter, netxen_attach_work, 0); return; } break; case NX_DEV_NEED_RESET: case NX_DEV_INITALIZING: netxen_schedule_work(adapter, netxen_fwinit_work, 2 * FW_POLL_DELAY); return; case NX_DEV_FAILED: default: nx_incr_dev_ref_cnt(adapter); break; } if (netxen_api_lock(adapter)) { clear_bit(__NX_RESETTING, &adapter->state); return; } NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_FAILED); netxen_api_unlock(adapter); dev_err(&adapter->pdev->dev, "%s: Device initialization Failed\n", adapter->netdev->name); clear_bit(__NX_RESETTING, &adapter->state); } static void netxen_detach_work(struct work_struct *work) { struct netxen_adapter *adapter = container_of(work, struct netxen_adapter, fw_work.work); struct net_device *netdev = adapter->netdev; int ref_cnt = 0, delay; u32 status; netif_device_detach(netdev); netxen_nic_down(adapter, netdev); rtnl_lock(); netxen_nic_detach(adapter); rtnl_unlock(); status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); if (status & NX_RCODE_FATAL_ERROR) goto err_ret; if (adapter->temp == NX_TEMP_PANIC) goto err_ret; if (!(adapter->flags & NETXEN_FW_RESET_OWNER)) ref_cnt = nx_decr_dev_ref_cnt(adapter); if (ref_cnt == -EIO) goto err_ret; delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY); adapter->fw_wait_cnt = 0; netxen_schedule_work(adapter, netxen_fwinit_work, delay); return; err_ret: clear_bit(__NX_RESETTING, &adapter->state); } static int netxen_check_health(struct netxen_adapter *adapter) { u32 state, heartbit; u32 peg_status; struct net_device *netdev = adapter->netdev; state = NXRD32(adapter, NX_CRB_DEV_STATE); if (state == NX_DEV_NEED_AER) return 0; if (netxen_nic_check_temp(adapter)) goto detach; if (adapter->need_fw_reset) { if (nx_dev_request_reset(adapter)) return 0; goto detach; } /* NX_DEV_NEED_RESET, this state can be marked in two cases * 1. Tx timeout 2. Fw hang * Send request to destroy context in case of tx timeout only * and doesn't required in case of Fw hang */ if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED) { adapter->need_fw_reset = 1; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) goto detach; } if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return 0; heartbit = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); if (heartbit != adapter->heartbit) { adapter->heartbit = heartbit; adapter->fw_fail_cnt = 0; if (adapter->need_fw_reset) goto detach; return 0; } if (++adapter->fw_fail_cnt < FW_FAIL_THRESH) return 0; if (nx_dev_request_reset(adapter)) return 0; clear_bit(__NX_FW_ATTACHED, &adapter->state); dev_err(&netdev->dev, "firmware hang detected\n"); peg_status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n" "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" "PEG_NET_4_PC: 0x%x\n", peg_status, NXRD32(adapter, NETXEN_PEG_HALT_STATUS2), NXRD32(adapter, NETXEN_CRB_PEG_NET_0 + 0x3c), NXRD32(adapter, NETXEN_CRB_PEG_NET_1 + 0x3c), NXRD32(adapter, NETXEN_CRB_PEG_NET_2 + 0x3c), NXRD32(adapter, NETXEN_CRB_PEG_NET_3 + 0x3c), NXRD32(adapter, NETXEN_CRB_PEG_NET_4 + 0x3c)); if (NX_FWERROR_PEGSTAT1(peg_status) == 0x67) dev_err(&adapter->pdev->dev, "Firmware aborted with error code 0x00006700. " "Device is being reset.\n"); detach: if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) && !test_and_set_bit(__NX_RESETTING, &adapter->state)) netxen_schedule_work(adapter, netxen_detach_work, 0); return 1; } static void netxen_fw_poll_work(struct work_struct *work) { struct netxen_adapter *adapter = container_of(work, struct netxen_adapter, fw_work.work); if (test_bit(__NX_RESETTING, &adapter->state)) goto reschedule; if (test_bit(__NX_DEV_UP, &adapter->state) && !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) { if (!adapter->has_link_events) { netxen_nic_handle_phy_intr(adapter); if (adapter->link_changed) netxen_nic_set_link_parameters(adapter); } } if (netxen_check_health(adapter)) return; reschedule: netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY); } static ssize_t netxen_store_bridged_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *net = to_net_dev(dev); struct netxen_adapter *adapter = netdev_priv(net); unsigned long new; int ret = -EINVAL; if (!(adapter->capabilities & NX_FW_CAPABILITY_BDG)) goto err_out; if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) goto err_out; if (kstrtoul(buf, 2, &new)) goto err_out; if (!netxen_config_bridged_mode(adapter, !!new)) ret = len; err_out: return ret; } static ssize_t netxen_show_bridged_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *net = to_net_dev(dev); struct netxen_adapter *adapter; int bridged_mode = 0; adapter = netdev_priv(net); if (adapter->capabilities & NX_FW_CAPABILITY_BDG) bridged_mode = !!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED); return sprintf(buf, "%d\n", bridged_mode); } static struct device_attribute dev_attr_bridged_mode = { .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, .show = netxen_show_bridged_mode, .store = netxen_store_bridged_mode, }; static ssize_t netxen_store_diag_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct netxen_adapter *adapter = dev_get_drvdata(dev); unsigned long new; if (kstrtoul(buf, 2, &new)) return -EINVAL; if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) adapter->flags ^= NETXEN_NIC_DIAG_ENABLED; return len; } static ssize_t netxen_show_diag_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct netxen_adapter *adapter = dev_get_drvdata(dev); return sprintf(buf, "%d\n", !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)); } static struct device_attribute dev_attr_diag_mode = { .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, .show = netxen_show_diag_mode, .store = netxen_store_diag_mode, }; static int netxen_sysfs_validate_crb(struct netxen_adapter *adapter, loff_t offset, size_t size) { size_t crb_size = 4; if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) return -EIO; if (offset < NETXEN_PCI_CRBSPACE) { if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return -EINVAL; if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, NETXEN_PCI_CAMQM_2M_END)) crb_size = 8; else return -EINVAL; } if ((size != crb_size) || (offset & (crb_size-1))) return -EINVAL; return 0; } static ssize_t netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = container_of(kobj, struct device, kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); u32 data; u64 qmdata; int ret; ret = netxen_sysfs_validate_crb(adapter, offset, size); if (ret != 0) return ret; if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, NETXEN_PCI_CAMQM_2M_END)) { netxen_pci_camqm_read_2M(adapter, offset, &qmdata); memcpy(buf, &qmdata, size); } else { data = NXRD32(adapter, offset); memcpy(buf, &data, size); } return size; } static ssize_t netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = container_of(kobj, struct device, kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); u32 data; u64 qmdata; int ret; ret = netxen_sysfs_validate_crb(adapter, offset, size); if (ret != 0) return ret; if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM, NETXEN_PCI_CAMQM_2M_END)) { memcpy(&qmdata, buf, size); netxen_pci_camqm_write_2M(adapter, offset, qmdata); } else { memcpy(&data, buf, size); NXWR32(adapter, offset, data); } return size; } static int netxen_sysfs_validate_mem(struct netxen_adapter *adapter, loff_t offset, size_t size) { if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) return -EIO; if ((size != 8) || (offset & 0x7)) return -EIO; return 0; } static ssize_t netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = container_of(kobj, struct device, kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); u64 data; int ret; ret = netxen_sysfs_validate_mem(adapter, offset, size); if (ret != 0) return ret; if (adapter->pci_mem_read(adapter, offset, &data)) return -EIO; memcpy(buf, &data, size); return size; } static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = container_of(kobj, struct device, kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); u64 data; int ret; ret = netxen_sysfs_validate_mem(adapter, offset, size); if (ret != 0) return ret; memcpy(&data, buf, size); if (adapter->pci_mem_write(adapter, offset, data)) return -EIO; return size; } static struct bin_attribute bin_attr_crb = { .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)}, .size = 0, .read = netxen_sysfs_read_crb, .write = netxen_sysfs_write_crb, }; static struct bin_attribute bin_attr_mem = { .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)}, .size = 0, .read = netxen_sysfs_read_mem, .write = netxen_sysfs_write_mem, }; static ssize_t netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { struct device *dev = container_of(kobj, struct device, kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); struct net_device *netdev = adapter->netdev; struct netxen_dimm_cfg dimm; u8 dw, rows, cols, banks, ranks; u32 val; if (size < attr->size) { netdev_err(netdev, "Invalid size\n"); return -EINVAL; } memset(&dimm, 0, sizeof(struct netxen_dimm_cfg)); val = NXRD32(adapter, NETXEN_DIMM_CAPABILITY); /* Checks if DIMM info is valid. */ if (val & NETXEN_DIMM_VALID_FLAG) { netdev_err(netdev, "Invalid DIMM flag\n"); dimm.presence = 0xff; goto out; } rows = NETXEN_DIMM_NUMROWS(val); cols = NETXEN_DIMM_NUMCOLS(val); ranks = NETXEN_DIMM_NUMRANKS(val); banks = NETXEN_DIMM_NUMBANKS(val); dw = NETXEN_DIMM_DATAWIDTH(val); dimm.presence = (val & NETXEN_DIMM_PRESENT); /* Checks if DIMM info is present. */ if (!dimm.presence) { netdev_err(netdev, "DIMM not present\n"); goto out; } dimm.dimm_type = NETXEN_DIMM_TYPE(val); switch (dimm.dimm_type) { case NETXEN_DIMM_TYPE_RDIMM: case NETXEN_DIMM_TYPE_UDIMM: case NETXEN_DIMM_TYPE_SO_DIMM: case NETXEN_DIMM_TYPE_Micro_DIMM: case NETXEN_DIMM_TYPE_Mini_RDIMM: case NETXEN_DIMM_TYPE_Mini_UDIMM: break; default: netdev_err(netdev, "Invalid DIMM type %x\n", dimm.dimm_type); goto out; } if (val & NETXEN_DIMM_MEMTYPE_DDR2_SDRAM) dimm.mem_type = NETXEN_DIMM_MEM_DDR2_SDRAM; else dimm.mem_type = NETXEN_DIMM_MEMTYPE(val); if (val & NETXEN_DIMM_SIZE) { dimm.size = NETXEN_DIMM_STD_MEM_SIZE; goto out; } if (!rows) { netdev_err(netdev, "Invalid no of rows %x\n", rows); goto out; } if (!cols) { netdev_err(netdev, "Invalid no of columns %x\n", cols); goto out; } if (!banks) { netdev_err(netdev, "Invalid no of banks %x\n", banks); goto out; } ranks += 1; switch (dw) { case 0x0: dw = 32; break; case 0x1: dw = 33; break; case 0x2: dw = 36; break; case 0x3: dw = 64; break; case 0x4: dw = 72; break; case 0x5: dw = 80; break; case 0x6: dw = 128; break; case 0x7: dw = 144; break; default: netdev_err(netdev, "Invalid data-width %x\n", dw); goto out; } dimm.size = ((1 << rows) * (1 << cols) * dw * banks * ranks) / 8; /* Size returned in MB. */ dimm.size = (dimm.size) / 0x100000; out: memcpy(buf, &dimm, sizeof(struct netxen_dimm_cfg)); return sizeof(struct netxen_dimm_cfg); } static struct bin_attribute bin_attr_dimm = { .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) }, .size = sizeof(struct netxen_dimm_cfg), .read = netxen_sysfs_read_dimm, }; static void netxen_create_sysfs_entries(struct netxen_adapter *adapter) { struct device *dev = &adapter->pdev->dev; if (adapter->capabilities & NX_FW_CAPABILITY_BDG) { /* bridged_mode control */ if (device_create_file(dev, &dev_attr_bridged_mode)) { dev_warn(dev, "failed to create bridged_mode sysfs entry\n"); } } } static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter) { struct device *dev = &adapter->pdev->dev; if (adapter->capabilities & NX_FW_CAPABILITY_BDG) device_remove_file(dev, &dev_attr_bridged_mode); } static void netxen_create_diag_entries(struct netxen_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct device *dev; dev = &pdev->dev; if (device_create_file(dev, &dev_attr_diag_mode)) dev_info(dev, "failed to create diag_mode sysfs entry\n"); if (device_create_bin_file(dev, &bin_attr_crb)) dev_info(dev, "failed to create crb sysfs entry\n"); if (device_create_bin_file(dev, &bin_attr_mem)) dev_info(dev, "failed to create mem sysfs entry\n"); if (device_create_bin_file(dev, &bin_attr_dimm)) dev_info(dev, "failed to create dimm sysfs entry\n"); } static void netxen_remove_diag_entries(struct netxen_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct device *dev = &pdev->dev; device_remove_file(dev, &dev_attr_diag_mode); device_remove_bin_file(dev, &bin_attr_crb); device_remove_bin_file(dev, &bin_attr_mem); device_remove_bin_file(dev, &bin_attr_dimm); } #ifdef CONFIG_INET #define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops) static int netxen_destip_supported(struct netxen_adapter *adapter) { if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) return 0; if (adapter->ahw.cut_through) return 0; return 1; } static void netxen_free_ip_list(struct netxen_adapter *adapter, bool master) { struct nx_ip_list *cur, *tmp_cur; list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) { if (master) { if (cur->master) { netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); list_del(&cur->list); kfree(cur); } } else { netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); list_del(&cur->list); kfree(cur); } } } static bool netxen_list_config_ip(struct netxen_adapter *adapter, struct in_ifaddr *ifa, unsigned long event) { struct net_device *dev; struct nx_ip_list *cur, *tmp_cur; struct list_head *head; bool ret = false; dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; if (dev == NULL) goto out; switch (event) { case NX_IP_UP: list_for_each(head, &adapter->ip_list) { cur = list_entry(head, struct nx_ip_list, list); if (cur->ip_addr == ifa->ifa_address) goto out; } cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC); if (cur == NULL) goto out; if (dev->priv_flags & IFF_802_1Q_VLAN) dev = vlan_dev_real_dev(dev); cur->master = !!netif_is_bond_master(dev); cur->ip_addr = ifa->ifa_address; list_add_tail(&cur->list, &adapter->ip_list); netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP); ret = true; break; case NX_IP_DOWN: list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) { if (cur->ip_addr == ifa->ifa_address) { list_del(&cur->list); kfree(cur); netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN); ret = true; break; } } } out: return ret; } static void netxen_config_indev_addr(struct netxen_adapter *adapter, struct net_device *dev, unsigned long event) { struct in_device *indev; if (!netxen_destip_supported(adapter)) return; indev = in_dev_get(dev); if (!indev) return; for_ifa(indev) { switch (event) { case NETDEV_UP: netxen_list_config_ip(adapter, ifa, NX_IP_UP); break; case NETDEV_DOWN: netxen_list_config_ip(adapter, ifa, NX_IP_DOWN); break; default: break; } } endfor_ifa(indev); in_dev_put(indev); } static void netxen_restore_indev_addr(struct net_device *netdev, unsigned long event) { struct netxen_adapter *adapter = netdev_priv(netdev); struct nx_ip_list *pos, *tmp_pos; unsigned long ip_event; ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; netxen_config_indev_addr(adapter, netdev, event); list_for_each_entry_safe(pos, tmp_pos, &adapter->ip_list, list) { netxen_config_ipaddr(adapter, pos->ip_addr, ip_event); } } static inline bool netxen_config_checkdev(struct net_device *dev) { struct netxen_adapter *adapter; if (!is_netxen_netdev(dev)) return false; adapter = netdev_priv(dev); if (!adapter) return false; if (!netxen_destip_supported(adapter)) return false; if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) return false; return true; } /** * netxen_config_master - configure addresses based on master * @dev: netxen device * @event: netdev event */ static void netxen_config_master(struct net_device *dev, unsigned long event) { struct net_device *master, *slave; struct netxen_adapter *adapter = netdev_priv(dev); rcu_read_lock(); master = netdev_master_upper_dev_get_rcu(dev); /* * This is the case where the netxen nic is being * enslaved and is dev_open()ed in bond_enslave() * Now we should program the bond's (and its vlans') * addresses in the netxen NIC. */ if (master && netif_is_bond_master(master) && !netif_is_bond_slave(dev)) { netxen_config_indev_addr(adapter, master, event); for_each_netdev_rcu(&init_net, slave) if (slave->priv_flags & IFF_802_1Q_VLAN && vlan_dev_real_dev(slave) == master) netxen_config_indev_addr(adapter, slave, event); } rcu_read_unlock(); /* * This is the case where the netxen nic is being * released and is dev_close()ed in bond_release() * just before IFF_BONDING is stripped. */ if (!master && dev->priv_flags & IFF_BONDING) netxen_free_ip_list(adapter, true); } static int netxen_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netxen_adapter *adapter; struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net_device *orig_dev = dev; struct net_device *slave; recheck: if (dev == NULL) goto done; if (dev->priv_flags & IFF_802_1Q_VLAN) { dev = vlan_dev_real_dev(dev); goto recheck; } if (event == NETDEV_UP || event == NETDEV_DOWN) { /* If this is a bonding device, look for netxen-based slaves*/ if (netif_is_bond_master(dev)) { rcu_read_lock(); for_each_netdev_in_bond_rcu(dev, slave) { if (!netxen_config_checkdev(slave)) continue; adapter = netdev_priv(slave); netxen_config_indev_addr(adapter, orig_dev, event); } rcu_read_unlock(); } else { if (!netxen_config_checkdev(dev)) goto done; adapter = netdev_priv(dev); /* Act only if the actual netxen is the target */ if (orig_dev == dev) netxen_config_master(dev, event); netxen_config_indev_addr(adapter, orig_dev, event); } } done: return NOTIFY_DONE; } static int netxen_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netxen_adapter *adapter; struct net_device *dev, *slave; struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; unsigned long ip_event; dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; recheck: if (dev == NULL) goto done; if (dev->priv_flags & IFF_802_1Q_VLAN) { dev = vlan_dev_real_dev(dev); goto recheck; } if (event == NETDEV_UP || event == NETDEV_DOWN) { /* If this is a bonding device, look for netxen-based slaves*/ if (netif_is_bond_master(dev)) { rcu_read_lock(); for_each_netdev_in_bond_rcu(dev, slave) { if (!netxen_config_checkdev(slave)) continue; adapter = netdev_priv(slave); netxen_list_config_ip(adapter, ifa, ip_event); } rcu_read_unlock(); } else { if (!netxen_config_checkdev(dev)) goto done; adapter = netdev_priv(dev); netxen_list_config_ip(adapter, ifa, ip_event); } } done: return NOTIFY_DONE; } static struct notifier_block netxen_netdev_cb = { .notifier_call = netxen_netdev_event, }; static struct notifier_block netxen_inetaddr_cb = { .notifier_call = netxen_inetaddr_event, }; #else static void netxen_restore_indev_addr(struct net_device *dev, unsigned long event) { } static void netxen_free_ip_list(struct netxen_adapter *adapter, bool master) { } #endif static const struct pci_error_handlers netxen_err_handler = { .error_detected = netxen_io_error_detected, .slot_reset = netxen_io_slot_reset, .resume = netxen_io_resume, }; static struct pci_driver netxen_driver = { .name = netxen_nic_driver_name, .id_table = netxen_pci_tbl, .probe = netxen_nic_probe, .remove = netxen_nic_remove, #ifdef CONFIG_PM .suspend = netxen_nic_suspend, .resume = netxen_nic_resume, #endif .shutdown = netxen_nic_shutdown, .err_handler = &netxen_err_handler }; static int __init netxen_init_module(void) { printk(KERN_INFO "%s\n", netxen_nic_driver_string); #ifdef CONFIG_INET register_netdevice_notifier(&netxen_netdev_cb); register_inetaddr_notifier(&netxen_inetaddr_cb); #endif return pci_register_driver(&netxen_driver); } module_init(netxen_init_module); static void __exit netxen_exit_module(void) { pci_unregister_driver(&netxen_driver); #ifdef CONFIG_INET unregister_inetaddr_notifier(&netxen_inetaddr_cb); unregister_netdevice_notifier(&netxen_netdev_cb); #endif } module_exit(netxen_exit_module);
gpl-2.0
pronobis/linux_kernel_arm_N8000
drivers/video/samsung/nt35560.c
763
12983
/* * LD9040 AMOLED LCD panel driver. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/wait.h> #include <linux/fb.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/spi/spi.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/lcd.h> #include <linux/backlight.h> #ifdef CONFIG_HAS_EARLYSUSPEND #include <linux/earlysuspend.h> #endif #include "nt35560.h" #define BOOT_GAMMA_LEVEL 10 #define MAX_GAMMA_LEVEL 25 #define SLEEPMSEC 0x1000 #define ENDDEF 0x2000 #define DEFMASK 0xFF00 #define COMMAND_ONLY 0xFE #define DATA_ONLY 0xFF #define BOOT_BRIGHTNESS 122 #define MIN_BRIGHTNESS 0 #define MAX_BRIGHTNESS 255 #define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) struct lcd_info { struct device *dev; struct spi_device *spi; unsigned int power; unsigned int current_bl; unsigned int bl; unsigned int ldi_enable; struct mutex lock; struct mutex bl_lock; struct lcd_device *ld; struct backlight_device *bd; struct lcd_platform_data *lcd_pd; struct early_suspend early_suspend; }; static int nt35560_spi_write_byte(struct lcd_info *lcd, int addr, int data) { u16 buf[1]; struct spi_message msg; struct spi_transfer xfer = { .len = 2, .tx_buf = buf, }; buf[0] = (addr << 8) | data; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); return spi_sync(lcd->spi, &msg); } static int nt35560_spi_write(struct lcd_info *lcd, unsigned char address, unsigned char command) { int ret = 0; if (address != DATA_ONLY) ret = nt35560_spi_write_byte(lcd, 0x0, address); if (command != COMMAND_ONLY) ret = nt35560_spi_write_byte(lcd, 0x1, command); return ret; } static int nt35560_panel_send_sequence(struct lcd_info *lcd, const unsigned short *seq) { int ret = 0, i = 0; const unsigned short *wbuf; mutex_lock(&lcd->lock); wbuf = seq; while ((wbuf[i] & DEFMASK) != ENDDEF) { if ((wbuf[i] & DEFMASK) != SLEEPMSEC) { ret = nt35560_spi_write(lcd, wbuf[i], wbuf[i+1]); if (ret) break; } else if ((wbuf[i] & DEFMASK) == SLEEPMSEC) msleep(wbuf[i+1]); i += 2; } mutex_unlock(&lcd->lock); return ret; } static int get_backlight_level_from_brightness(unsigned int brightness) { int backlightlevel; /* brightness setting from platform is from 0 to 255 * But in this driver, brightness is only supported from 0 to 24 */ switch (brightness) { case 0: backlightlevel = 0; break; case 1 ... 29: backlightlevel = 0; break; case 30 ... 34: backlightlevel = 1; break; case 35 ... 44: backlightlevel = 2; break; case 45 ... 54: backlightlevel = 3; break; case 55 ... 64: backlightlevel = 4; break; case 65 ... 74: backlightlevel = 5; break; case 75 ... 83: backlightlevel = 6; break; case 84 ... 93: backlightlevel = 7; break; case 94 ... 103: backlightlevel = 8; break; case 104 ... 113: backlightlevel = 9; break; case 114 ... 122: backlightlevel = 10; break; case 123 ... 132: backlightlevel = 11; break; case 133 ... 142: backlightlevel = 12; break; case 143 ... 152: backlightlevel = 13; break; case 153 ... 162: backlightlevel = 14; break; case 163 ... 171: backlightlevel = 15; break; case 172 ... 181: backlightlevel = 16; break; case 182 ... 191: backlightlevel = 17; break; case 192 ... 201: backlightlevel = 18; break; case 202 ... 210: backlightlevel = 19; break; case 211 ... 220: backlightlevel = 20; break; case 221 ... 230: backlightlevel = 21; break; case 231 ... 240: backlightlevel = 22; break; case 241 ... 250: backlightlevel = 23; break; case 251 ... 255: backlightlevel = 24; break; default: backlightlevel = 24; break; } return backlightlevel; } static int nt35560_ldi_init(struct lcd_info *lcd) { int ret, i; const unsigned short *init_seq[] = { SEQ_SET_PIXEL_FORMAT, SEQ_RGBCTRL, SEQ_SET_HORIZONTAL_ADDRESS, SEQ_SET_VERTICAL_ADDRESS, SEQ_SET_ADDRESS_MODE, SEQ_SLPOUT, SEQ_WRDISBV, SEQ_WRCTRLD_1, SEQ_WRCABCMB, SEQ_WRCTRLD_2, }; for (i = 0; i < ARRAY_SIZE(init_seq); i++) { ret = nt35560_panel_send_sequence(lcd, init_seq[i]); if (ret) break; } return ret; } static int nt35560_ldi_enable(struct lcd_info *lcd) { int ret = 0; ret = nt35560_panel_send_sequence(lcd, SEQ_DISPLAY_ON); return ret; } static int nt35560_ldi_disable(struct lcd_info *lcd) { int ret, i; const unsigned short *off_seq[] = { SEQ_SET_DISPLAY_OFF, SEQ_SLPIN }; lcd->ldi_enable = 0; for (i = 0; i < ARRAY_SIZE(off_seq); i++) { ret = nt35560_panel_send_sequence(lcd, off_seq[i]); if (ret) break; } return ret; } static int update_brightness(struct lcd_info *lcd, u8 force) { int ret = 0, brightness; mutex_lock(&lcd->bl_lock); brightness = lcd->bd->props.brightness; lcd->bl = get_backlight_level_from_brightness(brightness); if ((lcd->current_bl == lcd->bl) && (!force)) { mutex_unlock(&lcd->bl_lock); return ret; } dev_info(&lcd->ld->dev, "brightness=%d, bl=%d\n", brightness, lcd->bl); mutex_unlock(&lcd->bl_lock); return ret; } static int nt35560_power_on(struct lcd_info *lcd) { int ret = 0; struct lcd_platform_data *pd = NULL; pd = lcd->lcd_pd; dev_info(&lcd->ld->dev, "%s\n", __func__); if (!pd) { dev_err(&lcd->ld->dev, "platform data is NULL.\n"); return -EFAULT; } if (!pd->power_on) { dev_err(&lcd->ld->dev, "power_on is NULL.\n"); return -EFAULT; } else { pd->power_on(lcd->ld, 1); msleep(pd->power_on_delay); } if (!pd->reset) { dev_err(&lcd->ld->dev, "reset is NULL.\n"); return -EFAULT; } else { pd->reset(lcd->ld); msleep(pd->reset_delay); } ret = nt35560_ldi_init(lcd); if (ret) { dev_err(&lcd->ld->dev, "failed to initialize ldi.\n"); goto err; } ret = nt35560_ldi_enable(lcd); if (ret) { dev_err(&lcd->ld->dev, "failed to enable ldi.\n"); goto err; } lcd->ldi_enable = 1; update_brightness(lcd, 1); err: return ret; } static int nt35560_power_off(struct lcd_info *lcd) { int ret = 0; struct lcd_platform_data *pd = NULL; dev_info(&lcd->ld->dev, "%s\n", __func__); pd = lcd->lcd_pd; if (!pd) { dev_err(&lcd->ld->dev, "platform data is NULL.\n"); return -EFAULT; } ret = nt35560_ldi_disable(lcd); if (ret) { dev_err(&lcd->ld->dev, "lcd setting failed.\n"); ret = -EIO; goto err; } if (!pd->gpio_cfg_earlysuspend) { dev_err(&lcd->ld->dev, "gpio_cfg_earlysuspend is NULL.\n"); ret = -EFAULT; goto err; } else pd->gpio_cfg_earlysuspend(lcd->ld); if (!pd->power_on) { dev_err(&lcd->ld->dev, "power_on is NULL.\n"); ret = -EFAULT; goto err; } else { msleep(pd->power_off_delay); pd->power_on(lcd->ld, 0); } err: return ret; } static int nt35560_power(struct lcd_info *lcd, int power) { int ret = 0; if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power)) ret = nt35560_power_on(lcd); else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power)) ret = nt35560_power_off(lcd); if (!ret) lcd->power = power; return ret; } static int nt35560_set_power(struct lcd_device *ld, int power) { struct lcd_info *lcd = lcd_get_data(ld); if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN && power != FB_BLANK_NORMAL) { dev_err(&lcd->ld->dev, "power value should be 0, 1 or 4.\n"); return -EINVAL; } return nt35560_power(lcd, power); } static int nt35560_get_power(struct lcd_device *ld) { struct lcd_info *lcd = lcd_get_data(ld); return lcd->power; } static int nt35560_get_brightness(struct backlight_device *bd) { return bd->props.brightness; } static int nt35560_set_brightness(struct backlight_device *bd) { int ret = 0, brightness = bd->props.brightness; struct lcd_info *lcd = bl_get_data(bd); if (brightness < MIN_BRIGHTNESS || brightness > bd->props.max_brightness) { dev_err(&bd->dev, "lcd brightness should be %d to %d. now %d\n", MIN_BRIGHTNESS, MAX_BRIGHTNESS, brightness); return -EINVAL; } if (lcd->ldi_enable) { ret = update_brightness(lcd, 0); if (ret < 0) return -EINVAL; } return ret; } static struct lcd_ops nt35560_lcd_ops = { .set_power = nt35560_set_power, .get_power = nt35560_get_power, }; static const struct backlight_ops nt35560_backlight_ops = { .get_brightness = nt35560_get_brightness, .update_status = nt35560_set_brightness, }; static ssize_t lcdtype_show(struct device *dev, struct device_attribute *attr, char *buf) { char temp[15]; sprintf(temp, "SMD_AMS427G03\n"); strcat(buf, temp); return strlen(buf); } static DEVICE_ATTR(lcd_type, 0664, lcdtype_show, NULL); #if defined(CONFIG_PM) #ifdef CONFIG_HAS_EARLYSUSPEND void nt35560_early_suspend(struct early_suspend *h) { struct lcd_info *lcd = container_of(h, struct lcd_info , early_suspend); dev_info(&lcd->ld->dev, "+%s\n", __func__); nt35560_power(lcd, FB_BLANK_POWERDOWN); dev_info(&lcd->ld->dev, "-%s\n", __func__); return ; } void nt35560_late_resume(struct early_suspend *h) { struct lcd_info *lcd = container_of(h, struct lcd_info , early_suspend); dev_info(&lcd->ld->dev, "+%s\n", __func__); nt35560_power(lcd, FB_BLANK_UNBLANK); dev_info(&lcd->ld->dev, "-%s\n", __func__); return ; } #endif #endif static int nt35560_probe(struct spi_device *spi) { int ret = 0; struct lcd_info *lcd; lcd = kzalloc(sizeof(struct lcd_info), GFP_KERNEL); if (!lcd) { pr_err("failed to allocate for lcd\n"); ret = -ENOMEM; goto err_alloc; } /* nt35560 lcd panel uses 3-wire 9bits SPI Mode. */ spi->bits_per_word = 9; ret = spi_setup(spi); if (ret < 0) { dev_err(&spi->dev, "spi setup failed.\n"); goto out_free_lcd; } lcd->spi = spi; lcd->dev = &spi->dev; lcd->lcd_pd = (struct lcd_platform_data *)spi->dev.platform_data; if (!lcd->lcd_pd) { dev_err(&spi->dev, "platform data is NULL.\n"); goto out_free_lcd; } lcd->ld = lcd_device_register("panel", &spi->dev, lcd, &nt35560_lcd_ops); if (IS_ERR(lcd->ld)) { ret = PTR_ERR(lcd->ld); goto out_free_lcd; } lcd->bd = backlight_device_register("panel", &spi->dev, lcd, &nt35560_backlight_ops, NULL); if (IS_ERR(lcd->bd)) { ret = PTR_ERR(lcd->bd); goto out_free_backlight; } lcd->bd->props.max_brightness = MAX_BRIGHTNESS; lcd->bd->props.brightness = BOOT_BRIGHTNESS; lcd->bl = BOOT_GAMMA_LEVEL; lcd->current_bl = lcd->bl; ret = device_create_file(&lcd->ld->dev, &dev_attr_lcd_type); if (ret < 0) dev_err(&lcd->ld->dev, "failed to add sysfs entries\n"); mutex_init(&lcd->lock); mutex_init(&lcd->bl_lock); /* * if lcd panel was on from bootloader like u-boot then * do not lcd on. */ if (!lcd->lcd_pd->lcd_enabled) { /* * if lcd panel was off from bootloader then * current lcd status is powerdown and then * it enables lcd panel. */ lcd->power = FB_BLANK_POWERDOWN; nt35560_power(lcd, FB_BLANK_UNBLANK); } else { lcd->power = FB_BLANK_UNBLANK; lcd->ldi_enable = 1; if (!lcd->lcd_pd->power_on) { dev_err(lcd->dev, "power_on is NULL.\n"); goto out_free_backlight; } else lcd->lcd_pd->power_on(lcd->ld, 1); } dev_set_drvdata(&spi->dev, lcd); #ifdef CONFIG_HAS_EARLYSUSPEND lcd->early_suspend.suspend = nt35560_early_suspend; lcd->early_suspend.resume = nt35560_late_resume; lcd->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 1; register_early_suspend(&lcd->early_suspend); #endif dev_info(&lcd->ld->dev, "nt35560 panel driver has been probed.\n"); return 0; out_free_backlight: lcd_device_unregister(lcd->ld); kfree(lcd); return ret; out_free_lcd: kfree(lcd); return ret; err_alloc: return ret; } static int __devexit nt35560_remove(struct spi_device *spi) { struct lcd_info *lcd = dev_get_drvdata(&spi->dev); nt35560_power(lcd, FB_BLANK_POWERDOWN); lcd_device_unregister(lcd->ld); backlight_device_unregister(lcd->bd); kfree(lcd); return 0; } static struct spi_driver nt35560_driver = { .driver = { .name = "nt35560", .bus = &spi_bus_type, .owner = THIS_MODULE, }, .probe = nt35560_probe, .remove = __devexit_p(nt35560_remove), }; static int __init nt35560_init(void) { return spi_register_driver(&nt35560_driver); } static void __exit nt35560_exit(void) { spi_unregister_driver(&nt35560_driver); } module_init(nt35560_init); module_exit(nt35560_exit); MODULE_DESCRIPTION("NT35560 TFT Panel Driver"); MODULE_LICENSE("GPL");
gpl-2.0
sembre/kernel_totoro_update3
common/drivers/input/misc/wistron_btns.c
763
34800
/* * Wistron laptop button driver * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org> * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru> * * You can redistribute and/or modify this program under the terms of the * GNU General Public License version 2 as published by the Free Software * Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General * Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/io.h> #include <linux/dmi.h> #include <linux/init.h> #include <linux/input-polldev.h> #include <linux/input/sparse-keymap.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/mc146818rtc.h> #include <linux/module.h> #include <linux/preempt.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/platform_device.h> #include <linux/leds.h> /* How often we poll keys - msecs */ #define POLL_INTERVAL_DEFAULT 500 /* when idle */ #define POLL_INTERVAL_BURST 100 /* when a key was recently pressed */ /* BIOS subsystem IDs */ #define WIFI 0x35 #define BLUETOOTH 0x34 #define MAIL_LED 0x31 MODULE_AUTHOR("Miloslav Trmac <mitr@volny.cz>"); MODULE_DESCRIPTION("Wistron laptop button driver"); MODULE_LICENSE("GPL v2"); MODULE_VERSION("0.3"); static int force; /* = 0; */ module_param(force, bool, 0); MODULE_PARM_DESC(force, "Load even if computer is not in database"); static char *keymap_name; /* = NULL; */ module_param_named(keymap, keymap_name, charp, 0); MODULE_PARM_DESC(keymap, "Keymap name, if it can't be autodetected [generic, 1557/MS2141]"); static struct platform_device *wistron_device; /* BIOS interface implementation */ static void __iomem *bios_entry_point; /* BIOS routine entry point */ static void __iomem *bios_code_map_base; static void __iomem *bios_data_map_base; static u8 cmos_address; struct regs { u32 eax, ebx, ecx; }; static void call_bios(struct regs *regs) { unsigned long flags; preempt_disable(); local_irq_save(flags); asm volatile ("pushl %%ebp;" "movl %7, %%ebp;" "call *%6;" "popl %%ebp" : "=a" (regs->eax), "=b" (regs->ebx), "=c" (regs->ecx) : "0" (regs->eax), "1" (regs->ebx), "2" (regs->ecx), "m" (bios_entry_point), "m" (bios_data_map_base) : "edx", "edi", "esi", "memory"); local_irq_restore(flags); preempt_enable(); } static ssize_t __init locate_wistron_bios(void __iomem *base) { static unsigned char __initdata signature[] = { 0x42, 0x21, 0x55, 0x30 }; ssize_t offset; for (offset = 0; offset < 0x10000; offset += 0x10) { if (check_signature(base + offset, signature, sizeof(signature)) != 0) return offset; } return -1; } static int __init map_bios(void) { void __iomem *base; ssize_t offset; u32 entry_point; base = ioremap(0xF0000, 0x10000); /* Can't fail */ offset = locate_wistron_bios(base); if (offset < 0) { printk(KERN_ERR "wistron_btns: BIOS entry point not found\n"); iounmap(base); return -ENODEV; } entry_point = readl(base + offset + 5); printk(KERN_DEBUG "wistron_btns: BIOS signature found at %p, entry point %08X\n", base + offset, entry_point); if (entry_point >= 0xF0000) { bios_code_map_base = base; bios_entry_point = bios_code_map_base + (entry_point & 0xFFFF); } else { iounmap(base); bios_code_map_base = ioremap(entry_point & ~0x3FFF, 0x4000); if (bios_code_map_base == NULL) { printk(KERN_ERR "wistron_btns: Can't map BIOS code at %08X\n", entry_point & ~0x3FFF); goto err; } bios_entry_point = bios_code_map_base + (entry_point & 0x3FFF); } /* The Windows driver maps 0x10000 bytes, we keep only one page... */ bios_data_map_base = ioremap(0x400, 0xc00); if (bios_data_map_base == NULL) { printk(KERN_ERR "wistron_btns: Can't map BIOS data\n"); goto err_code; } return 0; err_code: iounmap(bios_code_map_base); err: return -ENOMEM; } static inline void unmap_bios(void) { iounmap(bios_code_map_base); iounmap(bios_data_map_base); } /* BIOS calls */ static u16 bios_pop_queue(void) { struct regs regs; memset(&regs, 0, sizeof (regs)); regs.eax = 0x9610; regs.ebx = 0x061C; regs.ecx = 0x0000; call_bios(&regs); return regs.eax; } static void __devinit bios_attach(void) { struct regs regs; memset(&regs, 0, sizeof (regs)); regs.eax = 0x9610; regs.ebx = 0x012E; call_bios(&regs); } static void bios_detach(void) { struct regs regs; memset(&regs, 0, sizeof (regs)); regs.eax = 0x9610; regs.ebx = 0x002E; call_bios(&regs); } static u8 __devinit bios_get_cmos_address(void) { struct regs regs; memset(&regs, 0, sizeof (regs)); regs.eax = 0x9610; regs.ebx = 0x051C; call_bios(&regs); return regs.ecx; } static u16 __devinit bios_get_default_setting(u8 subsys) { struct regs regs; memset(&regs, 0, sizeof (regs)); regs.eax = 0x9610; regs.ebx = 0x0200 | subsys; call_bios(&regs); return regs.eax; } static void bios_set_state(u8 subsys, int enable) { struct regs regs; memset(&regs, 0, sizeof (regs)); regs.eax = 0x9610; regs.ebx = (enable ? 0x0100 : 0x0000) | subsys; call_bios(&regs); } /* Hardware database */ #define KE_WIFI (KE_LAST + 1) #define KE_BLUETOOTH (KE_LAST + 2) #define FE_MAIL_LED 0x01 #define FE_WIFI_LED 0x02 #define FE_UNTESTED 0x80 static struct key_entry *keymap; /* = NULL; Current key map */ static bool have_wifi; static bool have_bluetooth; static int leds_present; /* bitmask of leds present */ static int __init dmi_matched(const struct dmi_system_id *dmi) { const struct key_entry *key; keymap = dmi->driver_data; for (key = keymap; key->type != KE_END; key++) { if (key->type == KE_WIFI) have_wifi = true; else if (key->type == KE_BLUETOOTH) have_bluetooth = true; } leds_present = key->code & (FE_MAIL_LED | FE_WIFI_LED); return 1; } static struct key_entry keymap_empty[] __initdata = { { KE_END, 0 } }; static struct key_entry keymap_fs_amilo_pro_v2000[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_WIFI, 0x30 }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_END, 0 } }; static struct key_entry keymap_fs_amilo_pro_v3505[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, /* Fn+F1 */ { KE_KEY, 0x06, {KEY_DISPLAYTOGGLE} }, /* Fn+F4 */ { KE_BLUETOOTH, 0x30 }, /* Fn+F10 */ { KE_KEY, 0x31, {KEY_MAIL} }, /* mail button */ { KE_KEY, 0x36, {KEY_WWW} }, /* www button */ { KE_WIFI, 0x78 }, /* satelite dish button */ { KE_END, 0 } }; static struct key_entry keymap_fujitsu_n3510[] __initdata = { { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x71, {KEY_STOPCD} }, { KE_KEY, 0x72, {KEY_PLAYPAUSE} }, { KE_KEY, 0x74, {KEY_REWIND} }, { KE_KEY, 0x78, {KEY_FORWARD} }, { KE_END, 0 } }; static struct key_entry keymap_wistron_ms2111[] __initdata = { { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_KEY, 0x13, {KEY_PROG3} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_END, FE_MAIL_LED } }; static struct key_entry keymap_wistron_md40100[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x02, {KEY_CONFIG} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_KEY, 0x37, {KEY_DISPLAYTOGGLE} }, /* Display on/off */ { KE_END, FE_MAIL_LED | FE_WIFI_LED | FE_UNTESTED } }; static struct key_entry keymap_wistron_ms2141[] __initdata = { { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_WIFI, 0x30 }, { KE_KEY, 0x22, {KEY_REWIND} }, { KE_KEY, 0x23, {KEY_FORWARD} }, { KE_KEY, 0x24, {KEY_PLAYPAUSE} }, { KE_KEY, 0x25, {KEY_STOPCD} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_END, 0 } }; static struct key_entry keymap_acer_aspire_1500[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x03, {KEY_POWER} }, { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_WIFI, 0x30 }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_KEY, 0x49, {KEY_CONFIG} }, { KE_BLUETOOTH, 0x44 }, { KE_END, FE_UNTESTED } }; static struct key_entry keymap_acer_aspire_1600[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x03, {KEY_POWER} }, { KE_KEY, 0x08, {KEY_MUTE} }, { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_KEY, 0x13, {KEY_PROG3} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_KEY, 0x49, {KEY_CONFIG} }, { KE_WIFI, 0x30 }, { KE_BLUETOOTH, 0x44 }, { KE_END, FE_MAIL_LED | FE_UNTESTED } }; /* 3020 has been tested */ static struct key_entry keymap_acer_aspire_5020[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x03, {KEY_POWER} }, { KE_KEY, 0x05, {KEY_SWITCHVIDEOMODE} }, /* Display selection */ { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_KEY, 0x6a, {KEY_CONFIG} }, { KE_WIFI, 0x30 }, { KE_BLUETOOTH, 0x44 }, { KE_END, FE_MAIL_LED | FE_UNTESTED } }; static struct key_entry keymap_acer_travelmate_2410[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x6d, {KEY_POWER} }, { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_KEY, 0x6a, {KEY_CONFIG} }, { KE_WIFI, 0x30 }, { KE_BLUETOOTH, 0x44 }, { KE_END, FE_MAIL_LED | FE_UNTESTED } }; static struct key_entry keymap_acer_travelmate_110[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x02, {KEY_CONFIG} }, { KE_KEY, 0x03, {KEY_POWER} }, { KE_KEY, 0x08, {KEY_MUTE} }, { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_KEY, 0x20, {KEY_VOLUMEUP} }, { KE_KEY, 0x21, {KEY_VOLUMEDOWN} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_SW, 0x4a, {.sw = {SW_LID, 1}} }, /* lid close */ { KE_SW, 0x4b, {.sw = {SW_LID, 0}} }, /* lid open */ { KE_WIFI, 0x30 }, { KE_END, FE_MAIL_LED | FE_UNTESTED } }; static struct key_entry keymap_acer_travelmate_300[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x02, {KEY_CONFIG} }, { KE_KEY, 0x03, {KEY_POWER} }, { KE_KEY, 0x08, {KEY_MUTE} }, { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_KEY, 0x20, {KEY_VOLUMEUP} }, { KE_KEY, 0x21, {KEY_VOLUMEDOWN} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_WIFI, 0x30 }, { KE_BLUETOOTH, 0x44 }, { KE_END, FE_MAIL_LED | FE_UNTESTED } }; static struct key_entry keymap_acer_travelmate_380[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x02, {KEY_CONFIG} }, { KE_KEY, 0x03, {KEY_POWER} }, /* not 370 */ { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_KEY, 0x13, {KEY_PROG3} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_WIFI, 0x30 }, { KE_END, FE_MAIL_LED | FE_UNTESTED } }; /* unusual map */ static struct key_entry keymap_acer_travelmate_220[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x02, {KEY_CONFIG} }, { KE_KEY, 0x11, {KEY_MAIL} }, { KE_KEY, 0x12, {KEY_WWW} }, { KE_KEY, 0x13, {KEY_PROG2} }, { KE_KEY, 0x31, {KEY_PROG1} }, { KE_END, FE_WIFI_LED | FE_UNTESTED } }; static struct key_entry keymap_acer_travelmate_230[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x02, {KEY_CONFIG} }, { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_END, FE_WIFI_LED | FE_UNTESTED } }; static struct key_entry keymap_acer_travelmate_240[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x02, {KEY_CONFIG} }, { KE_KEY, 0x03, {KEY_POWER} }, { KE_KEY, 0x08, {KEY_MUTE} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_BLUETOOTH, 0x44 }, { KE_WIFI, 0x30 }, { KE_END, FE_UNTESTED } }; static struct key_entry keymap_acer_travelmate_350[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x02, {KEY_CONFIG} }, { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_KEY, 0x13, {KEY_MAIL} }, { KE_KEY, 0x14, {KEY_PROG3} }, { KE_KEY, 0x15, {KEY_WWW} }, { KE_END, FE_MAIL_LED | FE_WIFI_LED | FE_UNTESTED } }; static struct key_entry keymap_acer_travelmate_360[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x02, {KEY_CONFIG} }, { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_KEY, 0x13, {KEY_MAIL} }, { KE_KEY, 0x14, {KEY_PROG3} }, { KE_KEY, 0x15, {KEY_WWW} }, { KE_KEY, 0x40, {KEY_WLAN} }, { KE_END, FE_WIFI_LED | FE_UNTESTED } /* no mail led */ }; /* Wifi subsystem only activates the led. Therefore we need to pass * wifi event as a normal key, then userspace can really change the wifi state. * TODO we need to export led state to userspace (wifi and mail) */ static struct key_entry keymap_acer_travelmate_610[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x02, {KEY_CONFIG} }, { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_KEY, 0x13, {KEY_PROG3} }, { KE_KEY, 0x14, {KEY_MAIL} }, { KE_KEY, 0x15, {KEY_WWW} }, { KE_KEY, 0x40, {KEY_WLAN} }, { KE_END, FE_MAIL_LED | FE_WIFI_LED } }; static struct key_entry keymap_acer_travelmate_630[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x02, {KEY_CONFIG} }, { KE_KEY, 0x03, {KEY_POWER} }, { KE_KEY, 0x08, {KEY_MUTE} }, /* not 620 */ { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_KEY, 0x13, {KEY_PROG3} }, { KE_KEY, 0x20, {KEY_VOLUMEUP} }, { KE_KEY, 0x21, {KEY_VOLUMEDOWN} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_WIFI, 0x30 }, { KE_END, FE_MAIL_LED | FE_UNTESTED } }; static struct key_entry keymap_aopen_1559as[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x06, {KEY_PROG3} }, { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_WIFI, 0x30 }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_END, 0 }, }; static struct key_entry keymap_fs_amilo_d88x0[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x08, {KEY_MUTE} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_KEY, 0x13, {KEY_PROG3} }, { KE_END, FE_MAIL_LED | FE_WIFI_LED | FE_UNTESTED } }; static struct key_entry keymap_wistron_md2900[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x02, {KEY_CONFIG} }, { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_WIFI, 0x30 }, { KE_END, FE_MAIL_LED | FE_UNTESTED } }; static struct key_entry keymap_wistron_md96500[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x02, {KEY_CONFIG} }, { KE_KEY, 0x05, {KEY_SWITCHVIDEOMODE} }, /* Display selection */ { KE_KEY, 0x06, {KEY_DISPLAYTOGGLE} }, /* Display on/off */ { KE_KEY, 0x08, {KEY_MUTE} }, { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_KEY, 0x20, {KEY_VOLUMEUP} }, { KE_KEY, 0x21, {KEY_VOLUMEDOWN} }, { KE_KEY, 0x22, {KEY_REWIND} }, { KE_KEY, 0x23, {KEY_FORWARD} }, { KE_KEY, 0x24, {KEY_PLAYPAUSE} }, { KE_KEY, 0x25, {KEY_STOPCD} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_WIFI, 0x30 }, { KE_BLUETOOTH, 0x44 }, { KE_END, FE_UNTESTED } }; static struct key_entry keymap_wistron_generic[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x02, {KEY_CONFIG} }, { KE_KEY, 0x03, {KEY_POWER} }, { KE_KEY, 0x05, {KEY_SWITCHVIDEOMODE} }, /* Display selection */ { KE_KEY, 0x06, {KEY_DISPLAYTOGGLE} }, /* Display on/off */ { KE_KEY, 0x08, {KEY_MUTE} }, { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_KEY, 0x13, {KEY_PROG3} }, { KE_KEY, 0x14, {KEY_MAIL} }, { KE_KEY, 0x15, {KEY_WWW} }, { KE_KEY, 0x20, {KEY_VOLUMEUP} }, { KE_KEY, 0x21, {KEY_VOLUMEDOWN} }, { KE_KEY, 0x22, {KEY_REWIND} }, { KE_KEY, 0x23, {KEY_FORWARD} }, { KE_KEY, 0x24, {KEY_PLAYPAUSE} }, { KE_KEY, 0x25, {KEY_STOPCD} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_KEY, 0x37, {KEY_DISPLAYTOGGLE} }, /* Display on/off */ { KE_KEY, 0x40, {KEY_WLAN} }, { KE_KEY, 0x49, {KEY_CONFIG} }, { KE_SW, 0x4a, {.sw = {SW_LID, 1}} }, /* lid close */ { KE_SW, 0x4b, {.sw = {SW_LID, 0}} }, /* lid open */ { KE_KEY, 0x6a, {KEY_CONFIG} }, { KE_KEY, 0x6d, {KEY_POWER} }, { KE_KEY, 0x71, {KEY_STOPCD} }, { KE_KEY, 0x72, {KEY_PLAYPAUSE} }, { KE_KEY, 0x74, {KEY_REWIND} }, { KE_KEY, 0x78, {KEY_FORWARD} }, { KE_WIFI, 0x30 }, { KE_BLUETOOTH, 0x44 }, { KE_END, 0 } }; static struct key_entry keymap_aopen_1557[] __initdata = { { KE_KEY, 0x01, {KEY_HELP} }, { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_WIFI, 0x30 }, { KE_KEY, 0x22, {KEY_REWIND} }, { KE_KEY, 0x23, {KEY_FORWARD} }, { KE_KEY, 0x24, {KEY_PLAYPAUSE} }, { KE_KEY, 0x25, {KEY_STOPCD} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_END, 0 } }; static struct key_entry keymap_prestigio[] __initdata = { { KE_KEY, 0x11, {KEY_PROG1} }, { KE_KEY, 0x12, {KEY_PROG2} }, { KE_WIFI, 0x30 }, { KE_KEY, 0x22, {KEY_REWIND} }, { KE_KEY, 0x23, {KEY_FORWARD} }, { KE_KEY, 0x24, {KEY_PLAYPAUSE} }, { KE_KEY, 0x25, {KEY_STOPCD} }, { KE_KEY, 0x31, {KEY_MAIL} }, { KE_KEY, 0x36, {KEY_WWW} }, { KE_END, 0 } }; /* * If your machine is not here (which is currently rather likely), please send * a list of buttons and their key codes (reported when loading this module * with force=1) and the output of dmidecode to $MODULE_AUTHOR. */ static const struct dmi_system_id __initconst dmi_ids[] = { { /* Fujitsu-Siemens Amilo Pro V2000 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2000"), }, .driver_data = keymap_fs_amilo_pro_v2000 }, { /* Fujitsu-Siemens Amilo Pro Edition V3505 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro Edition V3505"), }, .driver_data = keymap_fs_amilo_pro_v3505 }, { /* Fujitsu-Siemens Amilo M7400 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), DMI_MATCH(DMI_PRODUCT_NAME, "AMILO M "), }, .driver_data = keymap_fs_amilo_pro_v2000 }, { /* Maxdata Pro 7000 DX */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MAXDATA"), DMI_MATCH(DMI_PRODUCT_NAME, "Pro 7000"), }, .driver_data = keymap_fs_amilo_pro_v2000 }, { /* Fujitsu N3510 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), DMI_MATCH(DMI_PRODUCT_NAME, "N3510"), }, .driver_data = keymap_fujitsu_n3510 }, { /* Acer Aspire 1500 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1500"), }, .driver_data = keymap_acer_aspire_1500 }, { /* Acer Aspire 1600 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1600"), }, .driver_data = keymap_acer_aspire_1600 }, { /* Acer Aspire 3020 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3020"), }, .driver_data = keymap_acer_aspire_5020 }, { /* Acer Aspire 5020 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5020"), }, .driver_data = keymap_acer_aspire_5020 }, { /* Acer TravelMate 2100 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2100"), }, .driver_data = keymap_acer_aspire_5020 }, { /* Acer TravelMate 2410 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2410"), }, .driver_data = keymap_acer_travelmate_2410 }, { /* Acer TravelMate C300 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate C300"), }, .driver_data = keymap_acer_travelmate_300 }, { /* Acer TravelMate C100 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate C100"), }, .driver_data = keymap_acer_travelmate_300 }, { /* Acer TravelMate C110 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate C110"), }, .driver_data = keymap_acer_travelmate_110 }, { /* Acer TravelMate 380 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 380"), }, .driver_data = keymap_acer_travelmate_380 }, { /* Acer TravelMate 370 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 370"), }, .driver_data = keymap_acer_travelmate_380 /* keyboard minus 1 key */ }, { /* Acer TravelMate 220 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 220"), }, .driver_data = keymap_acer_travelmate_220 }, { /* Acer TravelMate 260 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 260"), }, .driver_data = keymap_acer_travelmate_220 }, { /* Acer TravelMate 230 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 230"), /* acerhk looks for "TravelMate F4..." ?! */ }, .driver_data = keymap_acer_travelmate_230 }, { /* Acer TravelMate 280 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 280"), }, .driver_data = keymap_acer_travelmate_230 }, { /* Acer TravelMate 240 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 240"), }, .driver_data = keymap_acer_travelmate_240 }, { /* Acer TravelMate 250 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 250"), }, .driver_data = keymap_acer_travelmate_240 }, { /* Acer TravelMate 2424NWXCi */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2420"), }, .driver_data = keymap_acer_travelmate_240 }, { /* Acer TravelMate 350 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 350"), }, .driver_data = keymap_acer_travelmate_350 }, { /* Acer TravelMate 360 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), }, .driver_data = keymap_acer_travelmate_360 }, { /* Acer TravelMate 610 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ACER"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 610"), }, .driver_data = keymap_acer_travelmate_610 }, { /* Acer TravelMate 620 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 620"), }, .driver_data = keymap_acer_travelmate_630 }, { /* Acer TravelMate 630 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 630"), }, .driver_data = keymap_acer_travelmate_630 }, { /* AOpen 1559AS */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "E2U"), DMI_MATCH(DMI_BOARD_NAME, "E2U"), }, .driver_data = keymap_aopen_1559as }, { /* Medion MD 9783 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MEDIONNB"), DMI_MATCH(DMI_PRODUCT_NAME, "MD 9783"), }, .driver_data = keymap_wistron_ms2111 }, { /* Medion MD 40100 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MEDIONNB"), DMI_MATCH(DMI_PRODUCT_NAME, "WID2000"), }, .driver_data = keymap_wistron_md40100 }, { /* Medion MD 2900 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MEDIONNB"), DMI_MATCH(DMI_PRODUCT_NAME, "WIM 2000"), }, .driver_data = keymap_wistron_md2900 }, { /* Medion MD 42200 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Medion"), DMI_MATCH(DMI_PRODUCT_NAME, "WIM 2030"), }, .driver_data = keymap_fs_amilo_pro_v2000 }, { /* Medion MD 96500 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MEDIONPC"), DMI_MATCH(DMI_PRODUCT_NAME, "WIM 2040"), }, .driver_data = keymap_wistron_md96500 }, { /* Medion MD 95400 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MEDIONPC"), DMI_MATCH(DMI_PRODUCT_NAME, "WIM 2050"), }, .driver_data = keymap_wistron_md96500 }, { /* Fujitsu Siemens Amilo D7820 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), /* not sure */ DMI_MATCH(DMI_PRODUCT_NAME, "Amilo D"), }, .driver_data = keymap_fs_amilo_d88x0 }, { /* Fujitsu Siemens Amilo D88x0 */ .callback = dmi_matched, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), DMI_MATCH(DMI_PRODUCT_NAME, "AMILO D"), }, .driver_data = keymap_fs_amilo_d88x0 }, { NULL, } }; /* Copy the good keymap, as the original ones are free'd */ static int __init copy_keymap(void) { const struct key_entry *key; struct key_entry *new_keymap; unsigned int length = 1; for (key = keymap; key->type != KE_END; key++) length++; new_keymap = kmemdup(keymap, length * sizeof(struct key_entry), GFP_KERNEL); if (!new_keymap) return -ENOMEM; keymap = new_keymap; return 0; } static int __init select_keymap(void) { dmi_check_system(dmi_ids); if (keymap_name != NULL) { if (strcmp (keymap_name, "1557/MS2141") == 0) keymap = keymap_wistron_ms2141; else if (strcmp (keymap_name, "aopen1557") == 0) keymap = keymap_aopen_1557; else if (strcmp (keymap_name, "prestigio") == 0) keymap = keymap_prestigio; else if (strcmp (keymap_name, "generic") == 0) keymap = keymap_wistron_generic; else { printk(KERN_ERR "wistron_btns: Keymap unknown\n"); return -EINVAL; } } if (keymap == NULL) { if (!force) { printk(KERN_ERR "wistron_btns: System unknown\n"); return -ENODEV; } keymap = keymap_empty; } return copy_keymap(); } /* Input layer interface */ static struct input_polled_dev *wistron_idev; static unsigned long jiffies_last_press; static bool wifi_enabled; static bool bluetooth_enabled; /* led management */ static void wistron_mail_led_set(struct led_classdev *led_cdev, enum led_brightness value) { bios_set_state(MAIL_LED, (value != LED_OFF) ? 1 : 0); } /* same as setting up wifi card, but for laptops on which the led is managed */ static void wistron_wifi_led_set(struct led_classdev *led_cdev, enum led_brightness value) { bios_set_state(WIFI, (value != LED_OFF) ? 1 : 0); } static struct led_classdev wistron_mail_led = { .name = "wistron:green:mail", .brightness_set = wistron_mail_led_set, }; static struct led_classdev wistron_wifi_led = { .name = "wistron:red:wifi", .brightness_set = wistron_wifi_led_set, }; static void __devinit wistron_led_init(struct device *parent) { if (leds_present & FE_WIFI_LED) { u16 wifi = bios_get_default_setting(WIFI); if (wifi & 1) { wistron_wifi_led.brightness = (wifi & 2) ? LED_FULL : LED_OFF; if (led_classdev_register(parent, &wistron_wifi_led)) leds_present &= ~FE_WIFI_LED; else bios_set_state(WIFI, wistron_wifi_led.brightness); } else leds_present &= ~FE_WIFI_LED; } if (leds_present & FE_MAIL_LED) { /* bios_get_default_setting(MAIL) always retuns 0, so just turn the led off */ wistron_mail_led.brightness = LED_OFF; if (led_classdev_register(parent, &wistron_mail_led)) leds_present &= ~FE_MAIL_LED; else bios_set_state(MAIL_LED, wistron_mail_led.brightness); } } static void __devexit wistron_led_remove(void) { if (leds_present & FE_MAIL_LED) led_classdev_unregister(&wistron_mail_led); if (leds_present & FE_WIFI_LED) led_classdev_unregister(&wistron_wifi_led); } static inline void wistron_led_suspend(void) { if (leds_present & FE_MAIL_LED) led_classdev_suspend(&wistron_mail_led); if (leds_present & FE_WIFI_LED) led_classdev_suspend(&wistron_wifi_led); } static inline void wistron_led_resume(void) { if (leds_present & FE_MAIL_LED) led_classdev_resume(&wistron_mail_led); if (leds_present & FE_WIFI_LED) led_classdev_resume(&wistron_wifi_led); } static void handle_key(u8 code) { const struct key_entry *key = sparse_keymap_entry_from_scancode(wistron_idev->input, code); if (key) { switch (key->type) { case KE_WIFI: if (have_wifi) { wifi_enabled = !wifi_enabled; bios_set_state(WIFI, wifi_enabled); } break; case KE_BLUETOOTH: if (have_bluetooth) { bluetooth_enabled = !bluetooth_enabled; bios_set_state(BLUETOOTH, bluetooth_enabled); } break; default: sparse_keymap_report_entry(wistron_idev->input, key, 1, true); break; } jiffies_last_press = jiffies; } else printk(KERN_NOTICE "wistron_btns: Unknown key code %02X\n", code); } static void poll_bios(bool discard) { u8 qlen; u16 val; for (;;) { qlen = CMOS_READ(cmos_address); if (qlen == 0) break; val = bios_pop_queue(); if (val != 0 && !discard) handle_key((u8)val); } } static void wistron_flush(struct input_polled_dev *dev) { /* Flush stale event queue */ poll_bios(true); } static void wistron_poll(struct input_polled_dev *dev) { poll_bios(false); /* Increase poll frequency if user is currently pressing keys (< 2s ago) */ if (time_before(jiffies, jiffies_last_press + 2 * HZ)) dev->poll_interval = POLL_INTERVAL_BURST; else dev->poll_interval = POLL_INTERVAL_DEFAULT; } static int __devinit wistron_setup_keymap(struct input_dev *dev, struct key_entry *entry) { switch (entry->type) { /* if wifi or bluetooth are not available, create normal keys */ case KE_WIFI: if (!have_wifi) { entry->type = KE_KEY; entry->keycode = KEY_WLAN; } break; case KE_BLUETOOTH: if (!have_bluetooth) { entry->type = KE_KEY; entry->keycode = KEY_BLUETOOTH; } break; case KE_END: if (entry->code & FE_UNTESTED) printk(KERN_WARNING "Untested laptop multimedia keys, " "please report success or failure to " "eric.piel@tremplin-utc.net\n"); break; } return 0; } static int __devinit setup_input_dev(void) { struct input_dev *input_dev; int error; wistron_idev = input_allocate_polled_device(); if (!wistron_idev) return -ENOMEM; wistron_idev->open = wistron_flush; wistron_idev->poll = wistron_poll; wistron_idev->poll_interval = POLL_INTERVAL_DEFAULT; input_dev = wistron_idev->input; input_dev->name = "Wistron laptop buttons"; input_dev->phys = "wistron/input0"; input_dev->id.bustype = BUS_HOST; input_dev->dev.parent = &wistron_device->dev; error = sparse_keymap_setup(input_dev, keymap, wistron_setup_keymap); if (error) goto err_free_dev; error = input_register_polled_device(wistron_idev); if (error) goto err_free_keymap; return 0; err_free_keymap: sparse_keymap_free(input_dev); err_free_dev: input_free_polled_device(wistron_idev); return error; } /* Driver core */ static int __devinit wistron_probe(struct platform_device *dev) { int err; bios_attach(); cmos_address = bios_get_cmos_address(); if (have_wifi) { u16 wifi = bios_get_default_setting(WIFI); if (wifi & 1) wifi_enabled = wifi & 2; else have_wifi = 0; if (have_wifi) bios_set_state(WIFI, wifi_enabled); } if (have_bluetooth) { u16 bt = bios_get_default_setting(BLUETOOTH); if (bt & 1) bluetooth_enabled = bt & 2; else have_bluetooth = false; if (have_bluetooth) bios_set_state(BLUETOOTH, bluetooth_enabled); } wistron_led_init(&dev->dev); err = setup_input_dev(); if (err) { bios_detach(); return err; } return 0; } static int __devexit wistron_remove(struct platform_device *dev) { wistron_led_remove(); input_unregister_polled_device(wistron_idev); sparse_keymap_free(wistron_idev->input); input_free_polled_device(wistron_idev); bios_detach(); return 0; } #ifdef CONFIG_PM static int wistron_suspend(struct device *dev) { if (have_wifi) bios_set_state(WIFI, 0); if (have_bluetooth) bios_set_state(BLUETOOTH, 0); wistron_led_suspend(); return 0; } static int wistron_resume(struct device *dev) { if (have_wifi) bios_set_state(WIFI, wifi_enabled); if (have_bluetooth) bios_set_state(BLUETOOTH, bluetooth_enabled); wistron_led_resume(); poll_bios(true); return 0; } static const struct dev_pm_ops wistron_pm_ops = { .suspend = wistron_suspend, .resume = wistron_resume, .poweroff = wistron_suspend, .restore = wistron_resume, }; #endif static struct platform_driver wistron_driver = { .driver = { .name = "wistron-bios", .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &wistron_pm_ops, #endif }, .probe = wistron_probe, .remove = __devexit_p(wistron_remove), }; static int __init wb_module_init(void) { int err; err = select_keymap(); if (err) return err; err = map_bios(); if (err) return err; err = platform_driver_register(&wistron_driver); if (err) goto err_unmap_bios; wistron_device = platform_device_alloc("wistron-bios", -1); if (!wistron_device) { err = -ENOMEM; goto err_unregister_driver; } err = platform_device_add(wistron_device); if (err) goto err_free_device; return 0; err_free_device: platform_device_put(wistron_device); err_unregister_driver: platform_driver_unregister(&wistron_driver); err_unmap_bios: unmap_bios(); return err; } static void __exit wb_module_exit(void) { platform_device_unregister(wistron_device); platform_driver_unregister(&wistron_driver); unmap_bios(); kfree(keymap); } module_init(wb_module_init); module_exit(wb_module_exit);
gpl-2.0
jigpu/input
sound/isa/als100.c
1275
10557
/* card-als100.c - driver for Avance Logic ALS100 based soundcards. Copyright (C) 1999-2000 by Massimo Piccioni <dafastidio@libero.it> Copyright (C) 1999-2002 by Massimo Piccioni <dafastidio@libero.it> Thanks to Pierfrancesco 'qM2' Passerini. Generalised for soundcards based on DT-0196 and ALS-007 chips by Jonathan Woithe <jwoithe@just42.net>: June 2002. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/wait.h> #include <linux/time.h> #include <linux/pnp.h> #include <linux/module.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/mpu401.h> #include <sound/opl3.h> #include <sound/sb.h> #define PFX "als100: " MODULE_DESCRIPTION("Avance Logic ALS007/ALS1X0"); MODULE_SUPPORTED_DEVICE("{{Diamond Technologies DT-019X}," "{Avance Logic ALS-007}}" "{{Avance Logic,ALS100 - PRO16PNP}," "{Avance Logic,ALS110}," "{Avance Logic,ALS120}," "{Avance Logic,ALS200}," "{3D Melody,MF1000}," "{Digimate,3D Sound}," "{Avance Logic,ALS120}," "{RTL,RTL3000}}"); MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>"); MODULE_LICENSE("GPL"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */ static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* PnP setup */ static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* PnP setup */ static int dma8[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */ static int dma16[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Avance Logic based soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Avance Logic based soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Avance Logic based soundcard."); MODULE_ALIAS("snd-dt019x"); struct snd_card_als100 { struct pnp_dev *dev; struct pnp_dev *devmpu; struct pnp_dev *devopl; struct snd_sb *chip; }; static struct pnp_card_device_id snd_als100_pnpids[] = { /* DT197A30 */ { .id = "RWB1688", .devs = { { "@@@0001" }, { "@X@0001" }, { "@H@0001" } }, .driver_data = SB_HW_DT019X }, /* DT0196 / ALS-007 */ { .id = "ALS0007", .devs = { { "@@@0001" }, { "@X@0001" }, { "@H@0001" } }, .driver_data = SB_HW_DT019X }, /* ALS100 - PRO16PNP */ { .id = "ALS0001", .devs = { { "@@@0001" }, { "@X@0001" }, { "@H@0001" } }, .driver_data = SB_HW_ALS100 }, /* ALS110 - MF1000 - Digimate 3D Sound */ { .id = "ALS0110", .devs = { { "@@@1001" }, { "@X@1001" }, { "@H@1001" } }, .driver_data = SB_HW_ALS100 }, /* ALS120 */ { .id = "ALS0120", .devs = { { "@@@2001" }, { "@X@2001" }, { "@H@2001" } }, .driver_data = SB_HW_ALS100 }, /* ALS200 */ { .id = "ALS0200", .devs = { { "@@@0020" }, { "@X@0020" }, { "@H@0001" } }, .driver_data = SB_HW_ALS100 }, /* ALS200 OEM */ { .id = "ALS0200", .devs = { { "@@@0020" }, { "@X@0020" }, { "@H@0020" } }, .driver_data = SB_HW_ALS100 }, /* RTL3000 */ { .id = "RTL3000", .devs = { { "@@@2001" }, { "@X@2001" }, { "@H@2001" } }, .driver_data = SB_HW_ALS100 }, { .id = "" } /* end */ }; MODULE_DEVICE_TABLE(pnp_card, snd_als100_pnpids); static int snd_card_als100_pnp(int dev, struct snd_card_als100 *acard, struct pnp_card_link *card, const struct pnp_card_device_id *id) { struct pnp_dev *pdev; int err; acard->dev = pnp_request_card_device(card, id->devs[0].id, NULL); if (acard->dev == NULL) return -ENODEV; acard->devmpu = pnp_request_card_device(card, id->devs[1].id, acard->dev); acard->devopl = pnp_request_card_device(card, id->devs[2].id, acard->dev); pdev = acard->dev; err = pnp_activate_dev(pdev); if (err < 0) { snd_printk(KERN_ERR PFX "AUDIO pnp configure failure\n"); return err; } port[dev] = pnp_port_start(pdev, 0); if (id->driver_data == SB_HW_DT019X) dma8[dev] = pnp_dma(pdev, 0); else { dma8[dev] = pnp_dma(pdev, 1); dma16[dev] = pnp_dma(pdev, 0); } irq[dev] = pnp_irq(pdev, 0); pdev = acard->devmpu; if (pdev != NULL) { err = pnp_activate_dev(pdev); if (err < 0) goto __mpu_error; mpu_port[dev] = pnp_port_start(pdev, 0); mpu_irq[dev] = pnp_irq(pdev, 0); } else { __mpu_error: if (pdev) { pnp_release_card_device(pdev); snd_printk(KERN_ERR PFX "MPU401 pnp configure failure, skipping\n"); } acard->devmpu = NULL; mpu_port[dev] = -1; } pdev = acard->devopl; if (pdev != NULL) { err = pnp_activate_dev(pdev); if (err < 0) goto __fm_error; fm_port[dev] = pnp_port_start(pdev, 0); } else { __fm_error: if (pdev) { pnp_release_card_device(pdev); snd_printk(KERN_ERR PFX "OPL3 pnp configure failure, skipping\n"); } acard->devopl = NULL; fm_port[dev] = -1; } return 0; } static int snd_card_als100_probe(int dev, struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { int error; struct snd_sb *chip; struct snd_card *card; struct snd_card_als100 *acard; struct snd_opl3 *opl3; error = snd_card_new(&pcard->card->dev, index[dev], id[dev], THIS_MODULE, sizeof(struct snd_card_als100), &card); if (error < 0) return error; acard = card->private_data; if ((error = snd_card_als100_pnp(dev, acard, pcard, pid))) { snd_card_free(card); return error; } if (pid->driver_data == SB_HW_DT019X) dma16[dev] = -1; error = snd_sbdsp_create(card, port[dev], irq[dev], snd_sb16dsp_interrupt, dma8[dev], dma16[dev], pid->driver_data, &chip); if (error < 0) { snd_card_free(card); return error; } acard->chip = chip; if (pid->driver_data == SB_HW_DT019X) { strcpy(card->driver, "DT-019X"); strcpy(card->shortname, "Diamond Tech. DT-019X"); sprintf(card->longname, "%s, %s at 0x%lx, irq %d, dma %d", card->shortname, chip->name, chip->port, irq[dev], dma8[dev]); } else { strcpy(card->driver, "ALS100"); strcpy(card->shortname, "Avance Logic ALS100"); sprintf(card->longname, "%s, %s at 0x%lx, irq %d, dma %d&%d", card->shortname, chip->name, chip->port, irq[dev], dma8[dev], dma16[dev]); } if ((error = snd_sb16dsp_pcm(chip, 0)) < 0) { snd_card_free(card); return error; } if ((error = snd_sbmixer_new(chip)) < 0) { snd_card_free(card); return error; } if (mpu_port[dev] > 0 && mpu_port[dev] != SNDRV_AUTO_PORT) { int mpu_type = MPU401_HW_ALS100; if (mpu_irq[dev] == SNDRV_AUTO_IRQ) mpu_irq[dev] = -1; if (pid->driver_data == SB_HW_DT019X) mpu_type = MPU401_HW_MPU401; if (snd_mpu401_uart_new(card, 0, mpu_type, mpu_port[dev], 0, mpu_irq[dev], NULL) < 0) snd_printk(KERN_ERR PFX "no MPU-401 device at 0x%lx\n", mpu_port[dev]); } if (fm_port[dev] > 0 && fm_port[dev] != SNDRV_AUTO_PORT) { if (snd_opl3_create(card, fm_port[dev], fm_port[dev] + 2, OPL3_HW_AUTO, 0, &opl3) < 0) { snd_printk(KERN_ERR PFX "no OPL device at 0x%lx-0x%lx\n", fm_port[dev], fm_port[dev] + 2); } else { if ((error = snd_opl3_timer_new(opl3, 0, 1)) < 0) { snd_card_free(card); return error; } if ((error = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { snd_card_free(card); return error; } } } if ((error = snd_card_register(card)) < 0) { snd_card_free(card); return error; } pnp_set_card_drvdata(pcard, card); return 0; } static unsigned int als100_devices; static int snd_als100_pnp_detect(struct pnp_card_link *card, const struct pnp_card_device_id *id) { static int dev; int res; for ( ; dev < SNDRV_CARDS; dev++) { if (!enable[dev]) continue; res = snd_card_als100_probe(dev, card, id); if (res < 0) return res; dev++; als100_devices++; return 0; } return -ENODEV; } static void snd_als100_pnp_remove(struct pnp_card_link *pcard) { snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); } #ifdef CONFIG_PM static int snd_als100_pnp_suspend(struct pnp_card_link *pcard, pm_message_t state) { struct snd_card *card = pnp_get_card_drvdata(pcard); struct snd_card_als100 *acard = card->private_data; struct snd_sb *chip = acard->chip; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); snd_sbmixer_suspend(chip); return 0; } static int snd_als100_pnp_resume(struct pnp_card_link *pcard) { struct snd_card *card = pnp_get_card_drvdata(pcard); struct snd_card_als100 *acard = card->private_data; struct snd_sb *chip = acard->chip; snd_sbdsp_reset(chip); snd_sbmixer_resume(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif static struct pnp_card_driver als100_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = "als100", .id_table = snd_als100_pnpids, .probe = snd_als100_pnp_detect, .remove = snd_als100_pnp_remove, #ifdef CONFIG_PM .suspend = snd_als100_pnp_suspend, .resume = snd_als100_pnp_resume, #endif }; static int __init alsa_card_als100_init(void) { int err; err = pnp_register_card_driver(&als100_pnpc_driver); if (err) return err; if (!als100_devices) { pnp_unregister_card_driver(&als100_pnpc_driver); #ifdef MODULE snd_printk(KERN_ERR "no Avance Logic based soundcards found\n"); #endif return -ENODEV; } return 0; } static void __exit alsa_card_als100_exit(void) { pnp_unregister_card_driver(&als100_pnpc_driver); } module_init(alsa_card_als100_init) module_exit(alsa_card_als100_exit)
gpl-2.0
davidmueller13/g3_kernel
drivers/staging/vt6656/key.c
1531
29341
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: key.c * * Purpose: Implement functions for 802.11i Key management * * Author: Jerry Chen * * Date: May 29, 2003 * * Functions: * KeyvInitTable - Init Key management table * KeybGetKey - Get Key from table * KeybSetKey - Set Key to table * KeybRemoveKey - Remove Key from table * KeybGetTransmitKey - Get Transmit Key from table * * Revision History: * */ #include "tmacro.h" #include "key.h" #include "mac.h" #include "rndis.h" #include "control.h" /*--------------------- Static Definitions -------------------------*/ /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ static int msglevel =MSG_LEVEL_INFO; //static int msglevel =MSG_LEVEL_DEBUG; /*--------------------- Static Functions --------------------------*/ /*--------------------- Export Variables --------------------------*/ /*--------------------- Static Definitions -------------------------*/ /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ /*--------------------- Static Functions --------------------------*/ static void s_vCheckKeyTableValid(void *pDeviceHandler, PSKeyManagement pTable) { PSDevice pDevice = (PSDevice) pDeviceHandler; int i; WORD wLength = 0; BYTE pbyData[MAX_KEY_TABLE]; for (i=0;i<MAX_KEY_TABLE;i++) { if ((pTable->KeyTable[i].bInUse == TRUE) && (pTable->KeyTable[i].PairwiseKey.bKeyValid == FALSE) && (pTable->KeyTable[i].GroupKey[0].bKeyValid == FALSE) && (pTable->KeyTable[i].GroupKey[1].bKeyValid == FALSE) && (pTable->KeyTable[i].GroupKey[2].bKeyValid == FALSE) && (pTable->KeyTable[i].GroupKey[3].bKeyValid == FALSE) ) { pTable->KeyTable[i].bInUse = FALSE; pTable->KeyTable[i].wKeyCtl = 0; pTable->KeyTable[i].bSoftWEP = FALSE; pbyData[wLength++] = (BYTE) i; //MACvDisableKeyEntry(pDevice, i); } } if ( wLength != 0 ) { CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_CLRKEYENTRY, 0, 0, wLength, pbyData ); } } /*--------------------- Export Functions --------------------------*/ /* * Description: Init Key management table * * Parameters: * In: * pTable - Pointer to Key table * Out: * none * * Return Value: none * */ void KeyvInitTable(void *pDeviceHandler, PSKeyManagement pTable) { PSDevice pDevice = (PSDevice) pDeviceHandler; int i; int jj; BYTE pbyData[MAX_KEY_TABLE+1]; spin_lock_irq(&pDevice->lock); for (i=0;i<MAX_KEY_TABLE;i++) { pTable->KeyTable[i].bInUse = FALSE; pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE; pTable->KeyTable[i].PairwiseKey.pvKeyTable = (void *)&pTable->KeyTable[i]; for (jj=0; jj < MAX_GROUP_KEY; jj++) { pTable->KeyTable[i].GroupKey[jj].bKeyValid = FALSE; pTable->KeyTable[i].GroupKey[jj].pvKeyTable = (void *) &(pTable->KeyTable[i]); } pTable->KeyTable[i].wKeyCtl = 0; pTable->KeyTable[i].dwGTKeyIndex = 0; pTable->KeyTable[i].bSoftWEP = FALSE; pbyData[i] = (BYTE) i; } pbyData[i] = (BYTE) i; CONTROLnsRequestOut(pDevice, MESSAGE_TYPE_CLRKEYENTRY, 0, 0, 11, pbyData ); spin_unlock_irq(&pDevice->lock); return; } /* * Description: Get Key from table * * Parameters: * In: * pTable - Pointer to Key table * pbyBSSID - BSSID of Key * dwKeyIndex - Key Index (0xFFFFFFFF means pairwise key) * Out: * pKey - Key return * * Return Value: TRUE if found otherwise FALSE * */ BOOL KeybGetKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex, PSKeyItem *pKey) { int i; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybGetKey() \n"); *pKey = NULL; for (i=0;i<MAX_KEY_TABLE;i++) { if ((pTable->KeyTable[i].bInUse == TRUE) && !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) { if (dwKeyIndex == 0xFFFFFFFF) { if (pTable->KeyTable[i].PairwiseKey.bKeyValid == TRUE) { *pKey = &(pTable->KeyTable[i].PairwiseKey); return (TRUE); } else { return (FALSE); } } else if (dwKeyIndex < MAX_GROUP_KEY) { if (pTable->KeyTable[i].GroupKey[dwKeyIndex].bKeyValid == TRUE) { *pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex]); return (TRUE); } else { return (FALSE); } } else { return (FALSE); } } } return (FALSE); } /* * Description: Set Key to table * * Parameters: * In: * pTable - Pointer to Key table * pbyBSSID - BSSID of Key * dwKeyIndex - Key index (reference to NDIS DDK) * uKeyLength - Key length * KeyRSC - Key RSC * pbyKey - Pointer to key * Out: * none * * Return Value: TRUE if success otherwise FALSE * */ BOOL KeybSetKey( void *pDeviceHandler, PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex, u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode ) { PSDevice pDevice = (PSDevice) pDeviceHandler; int i,j; unsigned int ii; PSKeyItem pKey; unsigned int uKeyIdx; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enter KeybSetKey: %X\n", dwKeyIndex); j = (MAX_KEY_TABLE-1); for (i=0;i<(MAX_KEY_TABLE-1);i++) { if ((pTable->KeyTable[i].bInUse == FALSE) && (j == (MAX_KEY_TABLE-1))) { // found empty table j = i; } if ((pTable->KeyTable[i].bInUse == TRUE) && !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) { // found table already exist if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key pKey = &(pTable->KeyTable[i].PairwiseKey); pTable->KeyTable[i].wKeyCtl &= 0xFFF0; // clear pairwise key control filed pTable->KeyTable[i].wKeyCtl |= byKeyDecMode; uKeyIdx = 4; // use HW key entry 4 for pairwise key } else { // Group key if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY) return (FALSE); pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF]); if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Group transmit key(R)[%X]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i); } pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed pTable->KeyTable[i].wKeyCtl |= (byKeyDecMode << 4); pTable->KeyTable[i].wKeyCtl |= 0x0040; // use group key for group address uKeyIdx = (dwKeyIndex & 0x000000FF); } pTable->KeyTable[i].wKeyCtl |= 0x8000; // enable on-fly pKey->bKeyValid = TRUE; pKey->uKeyLength = uKeyLength; pKey->dwKeyIndex = dwKeyIndex; pKey->byCipherSuite = byKeyDecMode; memcpy(pKey->abyKey, pbyKey, uKeyLength); if (byKeyDecMode == KEY_CTL_WEP) { if (uKeyLength == WLAN_WEP40_KEYLEN) pKey->abyKey[15] &= 0x7F; if (uKeyLength == WLAN_WEP104_KEYLEN) pKey->abyKey[15] |= 0x80; } MACvSetKeyEntry(pDevice, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pbyBSSID, (PDWORD)pKey->abyKey); if ((dwKeyIndex & USE_KEYRSC) == 0) { // RSC set by NIC memset(&(pKey->KeyRSC), 0, sizeof(QWORD)); } else { memcpy(&(pKey->KeyRSC), pKeyRSC, sizeof(QWORD)); } pKey->dwTSC47_16 = 0; pKey->wTSC15_0 = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybSetKey(R): \n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->bKeyValid: %d\n ", pKey->bKeyValid); //DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->uKeyLength: %d\n ", pKey->uKeyLength); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->abyKey: "); for (ii = 0; ii < pKey->uKeyLength; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%02x ", pKey->abyKey[ii]); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ", pKey->dwTSC47_16); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ", pKey->dwKeyIndex); return (TRUE); } } if (j < (MAX_KEY_TABLE-1)) { memcpy(pTable->KeyTable[j].abyBSSID, pbyBSSID, ETH_ALEN); pTable->KeyTable[j].bInUse = TRUE; if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key pKey = &(pTable->KeyTable[j].PairwiseKey); pTable->KeyTable[j].wKeyCtl &= 0xFFF0; // clear pairwise key control filed pTable->KeyTable[j].wKeyCtl |= byKeyDecMode; uKeyIdx = 4; // use HW key entry 4 for pairwise key } else { // Group key if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY) return (FALSE); pKey = &(pTable->KeyTable[j].GroupKey[dwKeyIndex & 0x000000FF]); if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[j].dwGTKeyIndex = dwKeyIndex; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Group transmit key(N)[%X]: %d\n", pTable->KeyTable[j].dwGTKeyIndex, j); } pTable->KeyTable[j].wKeyCtl &= 0xFF0F; // clear group key control filed pTable->KeyTable[j].wKeyCtl |= (byKeyDecMode << 4); pTable->KeyTable[j].wKeyCtl |= 0x0040; // use group key for group address uKeyIdx = (dwKeyIndex & 0x000000FF); } pTable->KeyTable[j].wKeyCtl |= 0x8000; // enable on-fly pKey->bKeyValid = TRUE; pKey->uKeyLength = uKeyLength; pKey->dwKeyIndex = dwKeyIndex; pKey->byCipherSuite = byKeyDecMode; memcpy(pKey->abyKey, pbyKey, uKeyLength); if (byKeyDecMode == KEY_CTL_WEP) { if (uKeyLength == WLAN_WEP40_KEYLEN) pKey->abyKey[15] &= 0x7F; if (uKeyLength == WLAN_WEP104_KEYLEN) pKey->abyKey[15] |= 0x80; } MACvSetKeyEntry(pDevice, pTable->KeyTable[j].wKeyCtl, j, uKeyIdx, pbyBSSID, (PDWORD)pKey->abyKey); if ((dwKeyIndex & USE_KEYRSC) == 0) { // RSC set by NIC memset(&(pKey->KeyRSC), 0, sizeof(QWORD)); } else { memcpy(&(pKey->KeyRSC), pKeyRSC, sizeof(QWORD)); } pKey->dwTSC47_16 = 0; pKey->wTSC15_0 = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybSetKey(N): \n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->bKeyValid: %d\n ", pKey->bKeyValid); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->uKeyLength: %d\n ", (int)pKey->uKeyLength); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->abyKey: "); for (ii = 0; ii < pKey->uKeyLength; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%02x ", pKey->abyKey[ii]); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ", pKey->dwTSC47_16); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ", pKey->dwKeyIndex); return (TRUE); } return (FALSE); } /* * Description: Remove Key from table * * Parameters: * In: * pTable - Pointer to Key table * pbyBSSID - BSSID of Key * dwKeyIndex - Key Index (reference to NDIS DDK) * Out: * none * * Return Value: TRUE if success otherwise FALSE * */ BOOL KeybRemoveKey( void *pDeviceHandler, PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyIndex ) { PSDevice pDevice = (PSDevice) pDeviceHandler; int i; BOOL bReturnValue = FALSE; if (is_broadcast_ether_addr(pbyBSSID)) { // dealte all key if ((dwKeyIndex & PAIRWISE_KEY) != 0) { for (i=0;i<MAX_KEY_TABLE;i++) { pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE; } bReturnValue = TRUE; } else if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) { for (i=0;i<MAX_KEY_TABLE;i++) { pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = FALSE; if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[i].dwGTKeyIndex & 0x7FFFFFFF)) { // remove Group transmit key pTable->KeyTable[i].dwGTKeyIndex = 0; } } bReturnValue = TRUE; } else { bReturnValue = FALSE; } } else { for (i=0;i<MAX_KEY_TABLE;i++) { if ( (pTable->KeyTable[i].bInUse == TRUE) && !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) { if ((dwKeyIndex & PAIRWISE_KEY) != 0) { pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE; bReturnValue = TRUE; break; } else if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) { pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = FALSE; if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[i].dwGTKeyIndex & 0x7FFFFFFF)) { // remove Group transmit key pTable->KeyTable[i].dwGTKeyIndex = 0; } bReturnValue = TRUE; break; } else { bReturnValue = FALSE; break; } } //pTable->KeyTable[i].bInUse == TRUE } //for bReturnValue = TRUE; } s_vCheckKeyTableValid(pDevice,pTable); return bReturnValue; } /* * Description: Remove Key from table * * Parameters: * In: * pTable - Pointer to Key table * pbyBSSID - BSSID of Key * Out: * none * * Return Value: TRUE if success otherwise FALSE * */ BOOL KeybRemoveAllKey( void *pDeviceHandler, PSKeyManagement pTable, PBYTE pbyBSSID ) { PSDevice pDevice = (PSDevice) pDeviceHandler; int i,u; for (i=0;i<MAX_KEY_TABLE;i++) { if ((pTable->KeyTable[i].bInUse == TRUE) && !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) { pTable->KeyTable[i].PairwiseKey.bKeyValid = FALSE; for (u = 0; u < MAX_GROUP_KEY; u++) pTable->KeyTable[i].GroupKey[u].bKeyValid = FALSE; pTable->KeyTable[i].dwGTKeyIndex = 0; s_vCheckKeyTableValid(pDevice, pTable); return (TRUE); } } return (FALSE); } /* * Description: Remove WEP Key from table * * Parameters: * In: * pTable - Pointer to Key table * Out: * none * * Return Value: TRUE if success otherwise FALSE * */ void KeyvRemoveWEPKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex ) { PSDevice pDevice = (PSDevice) pDeviceHandler; if ((dwKeyIndex & 0x000000FF) < MAX_GROUP_KEY) { if (pTable->KeyTable[MAX_KEY_TABLE-1].bInUse == TRUE) { if (pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].byCipherSuite == KEY_CTL_WEP) { pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF].bKeyValid = FALSE; if ((dwKeyIndex & 0x7FFFFFFF) == (pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex & 0x7FFFFFFF)) { // remove Group transmit key pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = 0; } } } s_vCheckKeyTableValid(pDevice, pTable); } return; } void KeyvRemoveAllWEPKey(void *pDeviceHandler, PSKeyManagement pTable) { PSDevice pDevice = (PSDevice) pDeviceHandler; int i; for (i = 0; i < MAX_GROUP_KEY; i++) KeyvRemoveWEPKey(pDevice, pTable, i); } /* * Description: Get Transmit Key from table * * Parameters: * In: * pTable - Pointer to Key table * pbyBSSID - BSSID of Key * Out: * pKey - Key return * * Return Value: TRUE if found otherwise FALSE * */ BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType, PSKeyItem *pKey) { int i, ii; *pKey = NULL; for (i = 0; i < MAX_KEY_TABLE; i++) { if ((pTable->KeyTable[i].bInUse == TRUE) && !compare_ether_addr(pTable->KeyTable[i].abyBSSID, pbyBSSID)) { if (dwKeyType == PAIRWISE_KEY) { if (pTable->KeyTable[i].PairwiseKey.bKeyValid == TRUE) { *pKey = &(pTable->KeyTable[i].PairwiseKey); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybGetTransmitKey:"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"PAIRWISE_KEY: KeyTable.abyBSSID: "); for (ii = 0; ii < 6; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%x ", pTable->KeyTable[i].abyBSSID[ii]); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); return (TRUE); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"PairwiseKey.bKeyValid == FALSE\n"); return (FALSE); } } // End of Type == PAIRWISE else { if (pTable->KeyTable[i].dwGTKeyIndex == 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ERROR: dwGTKeyIndex == 0 !!!\n"); return FALSE; } if (pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)].bKeyValid == TRUE) { *pKey = &(pTable->KeyTable[i].GroupKey[(pTable->KeyTable[i].dwGTKeyIndex&0x000000FF)]); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybGetTransmitKey:"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GROUP_KEY: KeyTable.abyBSSID\n"); for (ii = 0; ii < 6; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%x ", pTable->KeyTable[i].abyBSSID[ii]); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %X\n", pTable->KeyTable[i].dwGTKeyIndex); return (TRUE); } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GroupKey.bKeyValid == FALSE\n"); return (FALSE); } } // End of Type = GROUP } // BSSID match } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ERROR: NO Match BSSID !!! "); for (ii = 0; ii < 6; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", *(pbyBSSID+ii)); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); return (FALSE); } /* * Description: Check Pairewise Key * * Parameters: * In: * pTable - Pointer to Key table * Out: * none * * Return Value: TRUE if found otherwise FALSE * */ BOOL KeybCheckPairewiseKey(PSKeyManagement pTable, PSKeyItem *pKey) { int i; *pKey = NULL; for (i=0;i<MAX_KEY_TABLE;i++) { if ((pTable->KeyTable[i].bInUse == TRUE) && (pTable->KeyTable[i].PairwiseKey.bKeyValid == TRUE)) { *pKey = &(pTable->KeyTable[i].PairwiseKey); return (TRUE); } } return (FALSE); } /* * Description: Set Key to table * * Parameters: * In: * pTable - Pointer to Key table * dwKeyIndex - Key index (reference to NDIS DDK) * uKeyLength - Key length * KeyRSC - Key RSC * pbyKey - Pointer to key * Out: * none * * Return Value: TRUE if success otherwise FALSE * */ BOOL KeybSetDefaultKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex, u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode ) { PSDevice pDevice = (PSDevice) pDeviceHandler; unsigned int ii; PSKeyItem pKey; unsigned int uKeyIdx; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Enter KeybSetDefaultKey: %1x, %d\n", (int) dwKeyIndex, (int) uKeyLength); if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key return (FALSE); } else if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY) { return (FALSE); } if (uKeyLength > MAX_KEY_LEN) return false; pTable->KeyTable[MAX_KEY_TABLE-1].bInUse = TRUE; for (ii = 0; ii < ETH_ALEN; ii++) pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID[ii] = 0xFF; // Group key pKey = &(pTable->KeyTable[MAX_KEY_TABLE-1].GroupKey[dwKeyIndex & 0x000000FF]); if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = dwKeyIndex; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Group transmit key(R)[%X]: %d\n", pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex, MAX_KEY_TABLE-1); } pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl &= 0x7F00; // clear all key control filed pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= (byKeyDecMode << 4); pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= (byKeyDecMode); pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0x0044; // use group key for all address uKeyIdx = (dwKeyIndex & 0x000000FF); if ((uKeyLength == WLAN_WEP232_KEYLEN) && (byKeyDecMode == KEY_CTL_WEP)) { pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0x4000; // disable on-fly disable address match pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP = TRUE; } else { if (pTable->KeyTable[MAX_KEY_TABLE-1].bSoftWEP == FALSE) pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl |= 0xC000; // enable on-fly disable address match } pKey->bKeyValid = TRUE; pKey->uKeyLength = uKeyLength; pKey->dwKeyIndex = dwKeyIndex; pKey->byCipherSuite = byKeyDecMode; memcpy(pKey->abyKey, pbyKey, uKeyLength); if (byKeyDecMode == KEY_CTL_WEP) { if (uKeyLength == WLAN_WEP40_KEYLEN) pKey->abyKey[15] &= 0x7F; if (uKeyLength == WLAN_WEP104_KEYLEN) pKey->abyKey[15] |= 0x80; } MACvSetKeyEntry(pDevice, pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl, MAX_KEY_TABLE-1, uKeyIdx, pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID, (PDWORD) pKey->abyKey); if ((dwKeyIndex & USE_KEYRSC) == 0) { // RSC set by NIC memset(&(pKey->KeyRSC), 0, sizeof(QWORD)); } else { memcpy(&(pKey->KeyRSC), pKeyRSC, sizeof(QWORD)); } pKey->dwTSC47_16 = 0; pKey->wTSC15_0 = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybSetKey(R): \n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->bKeyValid: %d\n", pKey->bKeyValid); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->uKeyLength: %d\n", (int)pKey->uKeyLength); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->abyKey: \n"); for (ii = 0; ii < pKey->uKeyLength; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%x", pKey->abyKey[ii]); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n", pKey->dwTSC47_16); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n", pKey->wTSC15_0); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n", pKey->dwKeyIndex); return (TRUE); } /* * Description: Set Key to table * * Parameters: * In: * pTable - Pointer to Key table * dwKeyIndex - Key index (reference to NDIS DDK) * uKeyLength - Key length * KeyRSC - Key RSC * pbyKey - Pointer to key * Out: * none * * Return Value: TRUE if success otherwise FALSE * */ BOOL KeybSetAllGroupKey( void *pDeviceHandler, PSKeyManagement pTable, DWORD dwKeyIndex, u32 uKeyLength, PQWORD pKeyRSC, PBYTE pbyKey, BYTE byKeyDecMode ) { PSDevice pDevice = (PSDevice) pDeviceHandler; int i; unsigned int ii; PSKeyItem pKey; unsigned int uKeyIdx; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %X\n", dwKeyIndex); if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key return (FALSE); } else if ((dwKeyIndex & 0x000000FF) >= MAX_GROUP_KEY) { return (FALSE); } for (i=0; i < MAX_KEY_TABLE-1; i++) { if (pTable->KeyTable[i].bInUse == TRUE) { // found table already exist // Group key pKey = &(pTable->KeyTable[i].GroupKey[dwKeyIndex & 0x000000FF]); if ((dwKeyIndex & TRANSMIT_KEY) != 0) { // Group transmit key pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Group transmit key(R)[%X]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i); } pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed pTable->KeyTable[i].wKeyCtl |= (byKeyDecMode << 4); pTable->KeyTable[i].wKeyCtl |= 0x0040; // use group key for group address uKeyIdx = (dwKeyIndex & 0x000000FF); pTable->KeyTable[i].wKeyCtl |= 0x8000; // enable on-fly pKey->bKeyValid = TRUE; pKey->uKeyLength = uKeyLength; pKey->dwKeyIndex = dwKeyIndex; pKey->byCipherSuite = byKeyDecMode; memcpy(pKey->abyKey, pbyKey, uKeyLength); if (byKeyDecMode == KEY_CTL_WEP) { if (uKeyLength == WLAN_WEP40_KEYLEN) pKey->abyKey[15] &= 0x7F; if (uKeyLength == WLAN_WEP104_KEYLEN) pKey->abyKey[15] |= 0x80; } MACvSetKeyEntry(pDevice, pTable->KeyTable[i].wKeyCtl, i, uKeyIdx, pTable->KeyTable[i].abyBSSID, (PDWORD) pKey->abyKey); if ((dwKeyIndex & USE_KEYRSC) == 0) { // RSC set by NIC memset(&(pKey->KeyRSC), 0, sizeof(QWORD)); } else { memcpy(&(pKey->KeyRSC), pKeyRSC, sizeof(QWORD)); } pKey->dwTSC47_16 = 0; pKey->wTSC15_0 = 0; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"KeybSetKey(R): \n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->bKeyValid: %d\n ", pKey->bKeyValid); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->uKeyLength: %d\n ", (int)pKey->uKeyLength); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->abyKey: "); for (ii = 0; ii < pKey->uKeyLength; ii++) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%02x ", pKey->abyKey[ii]); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n"); //DBG_PRN_GRP12(("pKey->dwTSC47_16: %lX\n ", pKey->dwTSC47_16)); //DBG_PRN_GRP12(("pKey->wTSC15_0: %X\n ", pKey->wTSC15_0)); //DBG_PRN_GRP12(("pKey->dwKeyIndex: %lX\n ", pKey->dwKeyIndex)); } // (pTable->KeyTable[i].bInUse == TRUE) } return (TRUE); }
gpl-2.0
djvoleur/V_920P_BOF7
drivers/acpi/acpica/utresrc.c
2299
21565
/******************************************************************************* * * Module Name: utresrc - Resource management utilities * ******************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acresrc.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utresrc") #if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER) /* * Strings used to decode resource descriptors. * Used by both the disassembler and the debugger resource dump routines */ const char *acpi_gbl_bm_decode[] = { "NotBusMaster", "BusMaster" }; const char *acpi_gbl_config_decode[] = { "0 - Good Configuration", "1 - Acceptable Configuration", "2 - Suboptimal Configuration", "3 - ***Invalid Configuration***", }; const char *acpi_gbl_consume_decode[] = { "ResourceProducer", "ResourceConsumer" }; const char *acpi_gbl_dec_decode[] = { "PosDecode", "SubDecode" }; const char *acpi_gbl_he_decode[] = { "Level", "Edge" }; const char *acpi_gbl_io_decode[] = { "Decode10", "Decode16" }; const char *acpi_gbl_ll_decode[] = { "ActiveHigh", "ActiveLow" }; const char *acpi_gbl_max_decode[] = { "MaxNotFixed", "MaxFixed" }; const char *acpi_gbl_mem_decode[] = { "NonCacheable", "Cacheable", "WriteCombining", "Prefetchable" }; const char *acpi_gbl_min_decode[] = { "MinNotFixed", "MinFixed" }; const char *acpi_gbl_mtp_decode[] = { "AddressRangeMemory", "AddressRangeReserved", "AddressRangeACPI", "AddressRangeNVS" }; const char *acpi_gbl_rng_decode[] = { "InvalidRanges", "NonISAOnlyRanges", "ISAOnlyRanges", "EntireRange" }; const char *acpi_gbl_rw_decode[] = { "ReadOnly", "ReadWrite" }; const char *acpi_gbl_shr_decode[] = { "Exclusive", "Shared", "ExclusiveAndWake", /* ACPI 5.0 */ "SharedAndWake" /* ACPI 5.0 */ }; const char *acpi_gbl_siz_decode[] = { "Transfer8", "Transfer8_16", "Transfer16", "InvalidSize" }; const char *acpi_gbl_trs_decode[] = { "DenseTranslation", "SparseTranslation" }; const char *acpi_gbl_ttp_decode[] = { "TypeStatic", "TypeTranslation" }; const char *acpi_gbl_typ_decode[] = { "Compatibility", "TypeA", "TypeB", "TypeF" }; const char *acpi_gbl_ppc_decode[] = { "PullDefault", "PullUp", "PullDown", "PullNone" }; const char *acpi_gbl_ior_decode[] = { "IoRestrictionNone", "IoRestrictionInputOnly", "IoRestrictionOutputOnly", "IoRestrictionNoneAndPreserve" }; const char *acpi_gbl_dts_decode[] = { "Width8bit", "Width16bit", "Width32bit", "Width64bit", "Width128bit", "Width256bit", }; /* GPIO connection type */ const char *acpi_gbl_ct_decode[] = { "Interrupt", "I/O" }; /* Serial bus type */ const char *acpi_gbl_sbt_decode[] = { "/* UNKNOWN serial bus type */", "I2C", "SPI", "UART" }; /* I2C serial bus access mode */ const char *acpi_gbl_am_decode[] = { "AddressingMode7Bit", "AddressingMode10Bit" }; /* I2C serial bus slave mode */ const char *acpi_gbl_sm_decode[] = { "ControllerInitiated", "DeviceInitiated" }; /* SPI serial bus wire mode */ const char *acpi_gbl_wm_decode[] = { "FourWireMode", "ThreeWireMode" }; /* SPI serial clock phase */ const char *acpi_gbl_cph_decode[] = { "ClockPhaseFirst", "ClockPhaseSecond" }; /* SPI serial bus clock polarity */ const char *acpi_gbl_cpo_decode[] = { "ClockPolarityLow", "ClockPolarityHigh" }; /* SPI serial bus device polarity */ const char *acpi_gbl_dp_decode[] = { "PolarityLow", "PolarityHigh" }; /* UART serial bus endian */ const char *acpi_gbl_ed_decode[] = { "LittleEndian", "BigEndian" }; /* UART serial bus bits per byte */ const char *acpi_gbl_bpb_decode[] = { "DataBitsFive", "DataBitsSix", "DataBitsSeven", "DataBitsEight", "DataBitsNine", "/* UNKNOWN Bits per byte */", "/* UNKNOWN Bits per byte */", "/* UNKNOWN Bits per byte */" }; /* UART serial bus stop bits */ const char *acpi_gbl_sb_decode[] = { "StopBitsNone", "StopBitsOne", "StopBitsOnePlusHalf", "StopBitsTwo" }; /* UART serial bus flow control */ const char *acpi_gbl_fc_decode[] = { "FlowControlNone", "FlowControlHardware", "FlowControlXON", "/* UNKNOWN flow control keyword */" }; /* UART serial bus parity type */ const char *acpi_gbl_pt_decode[] = { "ParityTypeNone", "ParityTypeEven", "ParityTypeOdd", "ParityTypeMark", "ParityTypeSpace", "/* UNKNOWN parity keyword */", "/* UNKNOWN parity keyword */", "/* UNKNOWN parity keyword */" }; #endif /* * Base sizes of the raw AML resource descriptors, indexed by resource type. * Zero indicates a reserved (and therefore invalid) resource type. */ const u8 acpi_gbl_resource_aml_sizes[] = { /* Small descriptors */ 0, 0, 0, 0, ACPI_AML_SIZE_SMALL(struct aml_resource_irq), ACPI_AML_SIZE_SMALL(struct aml_resource_dma), ACPI_AML_SIZE_SMALL(struct aml_resource_start_dependent), ACPI_AML_SIZE_SMALL(struct aml_resource_end_dependent), ACPI_AML_SIZE_SMALL(struct aml_resource_io), ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_io), ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_dma), 0, 0, 0, ACPI_AML_SIZE_SMALL(struct aml_resource_vendor_small), ACPI_AML_SIZE_SMALL(struct aml_resource_end_tag), /* Large descriptors */ 0, ACPI_AML_SIZE_LARGE(struct aml_resource_memory24), ACPI_AML_SIZE_LARGE(struct aml_resource_generic_register), 0, ACPI_AML_SIZE_LARGE(struct aml_resource_vendor_large), ACPI_AML_SIZE_LARGE(struct aml_resource_memory32), ACPI_AML_SIZE_LARGE(struct aml_resource_fixed_memory32), ACPI_AML_SIZE_LARGE(struct aml_resource_address32), ACPI_AML_SIZE_LARGE(struct aml_resource_address16), ACPI_AML_SIZE_LARGE(struct aml_resource_extended_irq), ACPI_AML_SIZE_LARGE(struct aml_resource_address64), ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64), ACPI_AML_SIZE_LARGE(struct aml_resource_gpio), 0, ACPI_AML_SIZE_LARGE(struct aml_resource_common_serialbus), }; const u8 acpi_gbl_resource_aml_serial_bus_sizes[] = { 0, ACPI_AML_SIZE_LARGE(struct aml_resource_i2c_serialbus), ACPI_AML_SIZE_LARGE(struct aml_resource_spi_serialbus), ACPI_AML_SIZE_LARGE(struct aml_resource_uart_serialbus), }; /* * Resource types, used to validate the resource length field. * The length of fixed-length types must match exactly, variable * lengths must meet the minimum required length, etc. * Zero indicates a reserved (and therefore invalid) resource type. */ static const u8 acpi_gbl_resource_types[] = { /* Small descriptors */ 0, 0, 0, 0, ACPI_SMALL_VARIABLE_LENGTH, /* 04 IRQ */ ACPI_FIXED_LENGTH, /* 05 DMA */ ACPI_SMALL_VARIABLE_LENGTH, /* 06 start_dependent_functions */ ACPI_FIXED_LENGTH, /* 07 end_dependent_functions */ ACPI_FIXED_LENGTH, /* 08 IO */ ACPI_FIXED_LENGTH, /* 09 fixed_IO */ ACPI_FIXED_LENGTH, /* 0A fixed_DMA */ 0, 0, 0, ACPI_VARIABLE_LENGTH, /* 0E vendor_short */ ACPI_FIXED_LENGTH, /* 0F end_tag */ /* Large descriptors */ 0, ACPI_FIXED_LENGTH, /* 01 Memory24 */ ACPI_FIXED_LENGTH, /* 02 generic_register */ 0, ACPI_VARIABLE_LENGTH, /* 04 vendor_long */ ACPI_FIXED_LENGTH, /* 05 Memory32 */ ACPI_FIXED_LENGTH, /* 06 memory32_fixed */ ACPI_VARIABLE_LENGTH, /* 07 Dword* address */ ACPI_VARIABLE_LENGTH, /* 08 Word* address */ ACPI_VARIABLE_LENGTH, /* 09 extended_IRQ */ ACPI_VARIABLE_LENGTH, /* 0A Qword* address */ ACPI_FIXED_LENGTH, /* 0B Extended* address */ ACPI_VARIABLE_LENGTH, /* 0C Gpio* */ 0, ACPI_VARIABLE_LENGTH /* 0E *serial_bus */ }; /******************************************************************************* * * FUNCTION: acpi_ut_walk_aml_resources * * PARAMETERS: walk_state - Current walk info * PARAMETERS: aml - Pointer to the raw AML resource template * aml_length - Length of the entire template * user_function - Called once for each descriptor found. If * NULL, a pointer to the end_tag is returned * context - Passed to user_function * * RETURN: Status * * DESCRIPTION: Walk a raw AML resource list(buffer). User function called * once for each resource found. * ******************************************************************************/ acpi_status acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state, u8 *aml, acpi_size aml_length, acpi_walk_aml_callback user_function, void **context) { acpi_status status; u8 *end_aml; u8 resource_index; u32 length; u32 offset = 0; u8 end_tag[2] = { 0x79, 0x00 }; ACPI_FUNCTION_TRACE(ut_walk_aml_resources); /* The absolute minimum resource template is one end_tag descriptor */ if (aml_length < sizeof(struct aml_resource_end_tag)) { return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); } /* Point to the end of the resource template buffer */ end_aml = aml + aml_length; /* Walk the byte list, abort on any invalid descriptor type or length */ while (aml < end_aml) { /* Validate the Resource Type and Resource Length */ status = acpi_ut_validate_resource(walk_state, aml, &resource_index); if (ACPI_FAILURE(status)) { /* * Exit on failure. Cannot continue because the descriptor length * may be bogus also. */ return_ACPI_STATUS(status); } /* Get the length of this descriptor */ length = acpi_ut_get_descriptor_length(aml); /* Invoke the user function */ if (user_function) { status = user_function(aml, length, offset, resource_index, context); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } /* An end_tag descriptor terminates this resource template */ if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_END_TAG) { /* * There must be at least one more byte in the buffer for * the 2nd byte of the end_tag */ if ((aml + 1) >= end_aml) { return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); } /* Return the pointer to the end_tag if requested */ if (!user_function) { *context = aml; } /* Normal exit */ return_ACPI_STATUS(AE_OK); } aml += length; offset += length; } /* Did not find an end_tag descriptor */ if (user_function) { /* Insert an end_tag anyway. acpi_rs_get_list_length always leaves room */ (void)acpi_ut_validate_resource(walk_state, end_tag, &resource_index); status = user_function(end_tag, 2, offset, resource_index, context); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } } return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); } /******************************************************************************* * * FUNCTION: acpi_ut_validate_resource * * PARAMETERS: walk_state - Current walk info * aml - Pointer to the raw AML resource descriptor * return_index - Where the resource index is returned. NULL * if the index is not required. * * RETURN: Status, and optionally the Index into the global resource tables * * DESCRIPTION: Validate an AML resource descriptor by checking the Resource * Type and Resource Length. Returns an index into the global * resource information/dispatch tables for later use. * ******************************************************************************/ acpi_status acpi_ut_validate_resource(struct acpi_walk_state *walk_state, void *aml, u8 *return_index) { union aml_resource *aml_resource; u8 resource_type; u8 resource_index; acpi_rs_length resource_length; acpi_rs_length minimum_resource_length; ACPI_FUNCTION_ENTRY(); /* * 1) Validate the resource_type field (Byte 0) */ resource_type = ACPI_GET8(aml); /* * Byte 0 contains the descriptor name (Resource Type) * Examine the large/small bit in the resource header */ if (resource_type & ACPI_RESOURCE_NAME_LARGE) { /* Verify the large resource type (name) against the max */ if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) { goto invalid_resource; } /* * Large Resource Type -- bits 6:0 contain the name * Translate range 0x80-0x8B to index range 0x10-0x1B */ resource_index = (u8) (resource_type - 0x70); } else { /* * Small Resource Type -- bits 6:3 contain the name * Shift range to index range 0x00-0x0F */ resource_index = (u8) ((resource_type & ACPI_RESOURCE_NAME_SMALL_MASK) >> 3); } /* * Check validity of the resource type, via acpi_gbl_resource_types. Zero * indicates an invalid resource. */ if (!acpi_gbl_resource_types[resource_index]) { goto invalid_resource; } /* * Validate the resource_length field. This ensures that the length * is at least reasonable, and guarantees that it is non-zero. */ resource_length = acpi_ut_get_resource_length(aml); minimum_resource_length = acpi_gbl_resource_aml_sizes[resource_index]; /* Validate based upon the type of resource - fixed length or variable */ switch (acpi_gbl_resource_types[resource_index]) { case ACPI_FIXED_LENGTH: /* Fixed length resource, length must match exactly */ if (resource_length != minimum_resource_length) { goto bad_resource_length; } break; case ACPI_VARIABLE_LENGTH: /* Variable length resource, length must be at least the minimum */ if (resource_length < minimum_resource_length) { goto bad_resource_length; } break; case ACPI_SMALL_VARIABLE_LENGTH: /* Small variable length resource, length can be (Min) or (Min-1) */ if ((resource_length > minimum_resource_length) || (resource_length < (minimum_resource_length - 1))) { goto bad_resource_length; } break; default: /* Shouldn't happen (because of validation earlier), but be sure */ goto invalid_resource; } aml_resource = ACPI_CAST_PTR(union aml_resource, aml); if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) { /* Validate the bus_type field */ if ((aml_resource->common_serial_bus.type == 0) || (aml_resource->common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE)) { if (walk_state) { ACPI_ERROR((AE_INFO, "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X", aml_resource->common_serial_bus. type)); } return (AE_AML_INVALID_RESOURCE_TYPE); } } /* Optionally return the resource table index */ if (return_index) { *return_index = resource_index; } return (AE_OK); invalid_resource: if (walk_state) { ACPI_ERROR((AE_INFO, "Invalid/unsupported resource descriptor: Type 0x%2.2X", resource_type)); } return (AE_AML_INVALID_RESOURCE_TYPE); bad_resource_length: if (walk_state) { ACPI_ERROR((AE_INFO, "Invalid resource descriptor length: Type " "0x%2.2X, Length 0x%4.4X, MinLength 0x%4.4X", resource_type, resource_length, minimum_resource_length)); } return (AE_AML_BAD_RESOURCE_LENGTH); } /******************************************************************************* * * FUNCTION: acpi_ut_get_resource_type * * PARAMETERS: aml - Pointer to the raw AML resource descriptor * * RETURN: The Resource Type with no extraneous bits (except the * Large/Small descriptor bit -- this is left alone) * * DESCRIPTION: Extract the Resource Type/Name from the first byte of * a resource descriptor. * ******************************************************************************/ u8 acpi_ut_get_resource_type(void *aml) { ACPI_FUNCTION_ENTRY(); /* * Byte 0 contains the descriptor name (Resource Type) * Examine the large/small bit in the resource header */ if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) { /* Large Resource Type -- bits 6:0 contain the name */ return (ACPI_GET8(aml)); } else { /* Small Resource Type -- bits 6:3 contain the name */ return ((u8) (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_SMALL_MASK)); } } /******************************************************************************* * * FUNCTION: acpi_ut_get_resource_length * * PARAMETERS: aml - Pointer to the raw AML resource descriptor * * RETURN: Byte Length * * DESCRIPTION: Get the "Resource Length" of a raw AML descriptor. By * definition, this does not include the size of the descriptor * header or the length field itself. * ******************************************************************************/ u16 acpi_ut_get_resource_length(void *aml) { acpi_rs_length resource_length; ACPI_FUNCTION_ENTRY(); /* * Byte 0 contains the descriptor name (Resource Type) * Examine the large/small bit in the resource header */ if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) { /* Large Resource type -- bytes 1-2 contain the 16-bit length */ ACPI_MOVE_16_TO_16(&resource_length, ACPI_ADD_PTR(u8, aml, 1)); } else { /* Small Resource type -- bits 2:0 of byte 0 contain the length */ resource_length = (u16) (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_SMALL_LENGTH_MASK); } return (resource_length); } /******************************************************************************* * * FUNCTION: acpi_ut_get_resource_header_length * * PARAMETERS: aml - Pointer to the raw AML resource descriptor * * RETURN: Length of the AML header (depends on large/small descriptor) * * DESCRIPTION: Get the length of the header for this resource. * ******************************************************************************/ u8 acpi_ut_get_resource_header_length(void *aml) { ACPI_FUNCTION_ENTRY(); /* Examine the large/small bit in the resource header */ if (ACPI_GET8(aml) & ACPI_RESOURCE_NAME_LARGE) { return (sizeof(struct aml_resource_large_header)); } else { return (sizeof(struct aml_resource_small_header)); } } /******************************************************************************* * * FUNCTION: acpi_ut_get_descriptor_length * * PARAMETERS: aml - Pointer to the raw AML resource descriptor * * RETURN: Byte length * * DESCRIPTION: Get the total byte length of a raw AML descriptor, including the * length of the descriptor header and the length field itself. * Used to walk descriptor lists. * ******************************************************************************/ u32 acpi_ut_get_descriptor_length(void *aml) { ACPI_FUNCTION_ENTRY(); /* * Get the Resource Length (does not include header length) and add * the header length (depends on if this is a small or large resource) */ return (acpi_ut_get_resource_length(aml) + acpi_ut_get_resource_header_length(aml)); } /******************************************************************************* * * FUNCTION: acpi_ut_get_resource_end_tag * * PARAMETERS: obj_desc - The resource template buffer object * end_tag - Where the pointer to the end_tag is returned * * RETURN: Status, pointer to the end tag * * DESCRIPTION: Find the end_tag resource descriptor in an AML resource template * Note: allows a buffer length of zero. * ******************************************************************************/ acpi_status acpi_ut_get_resource_end_tag(union acpi_operand_object *obj_desc, u8 **end_tag) { acpi_status status; ACPI_FUNCTION_TRACE(ut_get_resource_end_tag); /* Allow a buffer length of zero */ if (!obj_desc->buffer.length) { *end_tag = obj_desc->buffer.pointer; return_ACPI_STATUS(AE_OK); } /* Validate the template and get a pointer to the end_tag */ status = acpi_ut_walk_aml_resources(NULL, obj_desc->buffer.pointer, obj_desc->buffer.length, NULL, (void **)end_tag); return_ACPI_STATUS(status); }
gpl-2.0
MoKee/android_kernel_samsung_lentislte
drivers/acpi/acpica/utdecode.c
2299
14885
/****************************************************************************** * * Module Name: utdecode - Utility decoding routines (value-to-string) * *****************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <linux/export.h> #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utdecode") /* * Properties of the ACPI Object Types, both internal and external. * The table is indexed by values of acpi_object_type */ const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES] = { ACPI_NS_NORMAL, /* 00 Any */ ACPI_NS_NORMAL, /* 01 Number */ ACPI_NS_NORMAL, /* 02 String */ ACPI_NS_NORMAL, /* 03 Buffer */ ACPI_NS_NORMAL, /* 04 Package */ ACPI_NS_NORMAL, /* 05 field_unit */ ACPI_NS_NEWSCOPE, /* 06 Device */ ACPI_NS_NORMAL, /* 07 Event */ ACPI_NS_NEWSCOPE, /* 08 Method */ ACPI_NS_NORMAL, /* 09 Mutex */ ACPI_NS_NORMAL, /* 10 Region */ ACPI_NS_NEWSCOPE, /* 11 Power */ ACPI_NS_NEWSCOPE, /* 12 Processor */ ACPI_NS_NEWSCOPE, /* 13 Thermal */ ACPI_NS_NORMAL, /* 14 buffer_field */ ACPI_NS_NORMAL, /* 15 ddb_handle */ ACPI_NS_NORMAL, /* 16 Debug Object */ ACPI_NS_NORMAL, /* 17 def_field */ ACPI_NS_NORMAL, /* 18 bank_field */ ACPI_NS_NORMAL, /* 19 index_field */ ACPI_NS_NORMAL, /* 20 Reference */ ACPI_NS_NORMAL, /* 21 Alias */ ACPI_NS_NORMAL, /* 22 method_alias */ ACPI_NS_NORMAL, /* 23 Notify */ ACPI_NS_NORMAL, /* 24 Address Handler */ ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 25 Resource Desc */ ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 26 Resource Field */ ACPI_NS_NEWSCOPE, /* 27 Scope */ ACPI_NS_NORMAL, /* 28 Extra */ ACPI_NS_NORMAL, /* 29 Data */ ACPI_NS_NORMAL /* 30 Invalid */ }; /******************************************************************************* * * FUNCTION: acpi_ut_hex_to_ascii_char * * PARAMETERS: integer - Contains the hex digit * position - bit position of the digit within the * integer (multiple of 4) * * RETURN: The converted Ascii character * * DESCRIPTION: Convert a hex digit to an Ascii character * ******************************************************************************/ /* Hex to ASCII conversion table */ static const char acpi_gbl_hex_to_ascii[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; char acpi_ut_hex_to_ascii_char(u64 integer, u32 position) { return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]); } /******************************************************************************* * * FUNCTION: acpi_ut_get_region_name * * PARAMETERS: Space ID - ID for the region * * RETURN: Decoded region space_id name * * DESCRIPTION: Translate a Space ID into a name string (Debug only) * ******************************************************************************/ /* Region type decoding */ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = { "SystemMemory", /* 0x00 */ "SystemIO", /* 0x01 */ "PCI_Config", /* 0x02 */ "EmbeddedControl", /* 0x03 */ "SMBus", /* 0x04 */ "SystemCMOS", /* 0x05 */ "PCIBARTarget", /* 0x06 */ "IPMI", /* 0x07 */ "GeneralPurposeIo", /* 0x08 */ "GenericSerialBus", /* 0x09 */ "PCC" /* 0x0A */ }; char *acpi_ut_get_region_name(u8 space_id) { if (space_id >= ACPI_USER_REGION_BEGIN) { return ("UserDefinedRegion"); } else if (space_id == ACPI_ADR_SPACE_DATA_TABLE) { return ("DataTable"); } else if (space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { return ("FunctionalFixedHW"); } else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) { return ("InvalidSpaceId"); } return (ACPI_CAST_PTR(char, acpi_gbl_region_types[space_id])); } /******************************************************************************* * * FUNCTION: acpi_ut_get_event_name * * PARAMETERS: event_id - Fixed event ID * * RETURN: Decoded event ID name * * DESCRIPTION: Translate a Event ID into a name string (Debug only) * ******************************************************************************/ /* Event type decoding */ static const char *acpi_gbl_event_types[ACPI_NUM_FIXED_EVENTS] = { "PM_Timer", "GlobalLock", "PowerButton", "SleepButton", "RealTimeClock", }; char *acpi_ut_get_event_name(u32 event_id) { if (event_id > ACPI_EVENT_MAX) { return ("InvalidEventID"); } return (ACPI_CAST_PTR(char, acpi_gbl_event_types[event_id])); } /******************************************************************************* * * FUNCTION: acpi_ut_get_type_name * * PARAMETERS: type - An ACPI object type * * RETURN: Decoded ACPI object type name * * DESCRIPTION: Translate a Type ID into a name string (Debug only) * ******************************************************************************/ /* * Elements of acpi_gbl_ns_type_names below must match * one-to-one with values of acpi_object_type * * The type ACPI_TYPE_ANY (Untyped) is used as a "don't care" when searching; * when stored in a table it really means that we have thus far seen no * evidence to indicate what type is actually going to be stored for this entry. */ static const char acpi_gbl_bad_type[] = "UNDEFINED"; /* Printable names of the ACPI object types */ static const char *acpi_gbl_ns_type_names[] = { /* 00 */ "Untyped", /* 01 */ "Integer", /* 02 */ "String", /* 03 */ "Buffer", /* 04 */ "Package", /* 05 */ "FieldUnit", /* 06 */ "Device", /* 07 */ "Event", /* 08 */ "Method", /* 09 */ "Mutex", /* 10 */ "Region", /* 11 */ "Power", /* 12 */ "Processor", /* 13 */ "Thermal", /* 14 */ "BufferField", /* 15 */ "DdbHandle", /* 16 */ "DebugObject", /* 17 */ "RegionField", /* 18 */ "BankField", /* 19 */ "IndexField", /* 20 */ "Reference", /* 21 */ "Alias", /* 22 */ "MethodAlias", /* 23 */ "Notify", /* 24 */ "AddrHandler", /* 25 */ "ResourceDesc", /* 26 */ "ResourceFld", /* 27 */ "Scope", /* 28 */ "Extra", /* 29 */ "Data", /* 30 */ "Invalid" }; char *acpi_ut_get_type_name(acpi_object_type type) { if (type > ACPI_TYPE_INVALID) { return (ACPI_CAST_PTR(char, acpi_gbl_bad_type)); } return (ACPI_CAST_PTR(char, acpi_gbl_ns_type_names[type])); } char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc) { if (!obj_desc) { return ("[NULL Object Descriptor]"); } return (acpi_ut_get_type_name(obj_desc->common.type)); } /******************************************************************************* * * FUNCTION: acpi_ut_get_node_name * * PARAMETERS: object - A namespace node * * RETURN: ASCII name of the node * * DESCRIPTION: Validate the node and return the node's ACPI name. * ******************************************************************************/ char *acpi_ut_get_node_name(void *object) { struct acpi_namespace_node *node = (struct acpi_namespace_node *)object; /* Must return a string of exactly 4 characters == ACPI_NAME_SIZE */ if (!object) { return ("NULL"); } /* Check for Root node */ if ((object == ACPI_ROOT_OBJECT) || (object == acpi_gbl_root_node)) { return ("\"\\\" "); } /* Descriptor must be a namespace node */ if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) { return ("####"); } /* * Ensure name is valid. The name was validated/repaired when the node * was created, but make sure it has not been corrupted. */ acpi_ut_repair_name(node->name.ascii); /* Return the name */ return (node->name.ascii); } /******************************************************************************* * * FUNCTION: acpi_ut_get_descriptor_name * * PARAMETERS: object - An ACPI object * * RETURN: Decoded name of the descriptor type * * DESCRIPTION: Validate object and return the descriptor type * ******************************************************************************/ /* Printable names of object descriptor types */ static const char *acpi_gbl_desc_type_names[] = { /* 00 */ "Not a Descriptor", /* 01 */ "Cached", /* 02 */ "State-Generic", /* 03 */ "State-Update", /* 04 */ "State-Package", /* 05 */ "State-Control", /* 06 */ "State-RootParseScope", /* 07 */ "State-ParseScope", /* 08 */ "State-WalkScope", /* 09 */ "State-Result", /* 10 */ "State-Notify", /* 11 */ "State-Thread", /* 12 */ "Walk", /* 13 */ "Parser", /* 14 */ "Operand", /* 15 */ "Node" }; char *acpi_ut_get_descriptor_name(void *object) { if (!object) { return ("NULL OBJECT"); } if (ACPI_GET_DESCRIPTOR_TYPE(object) > ACPI_DESC_TYPE_MAX) { return ("Not a Descriptor"); } return (ACPI_CAST_PTR(char, acpi_gbl_desc_type_names[ACPI_GET_DESCRIPTOR_TYPE (object)])); } /******************************************************************************* * * FUNCTION: acpi_ut_get_reference_name * * PARAMETERS: object - An ACPI reference object * * RETURN: Decoded name of the type of reference * * DESCRIPTION: Decode a reference object sub-type to a string. * ******************************************************************************/ /* Printable names of reference object sub-types */ static const char *acpi_gbl_ref_class_names[] = { /* 00 */ "Local", /* 01 */ "Argument", /* 02 */ "RefOf", /* 03 */ "Index", /* 04 */ "DdbHandle", /* 05 */ "Named Object", /* 06 */ "Debug" }; const char *acpi_ut_get_reference_name(union acpi_operand_object *object) { if (!object) { return ("NULL Object"); } if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) { return ("Not an Operand object"); } if (object->common.type != ACPI_TYPE_LOCAL_REFERENCE) { return ("Not a Reference object"); } if (object->reference.class > ACPI_REFCLASS_MAX) { return ("Unknown Reference class"); } return (acpi_gbl_ref_class_names[object->reference.class]); } #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) /* * Strings and procedures used for debug only */ /******************************************************************************* * * FUNCTION: acpi_ut_get_mutex_name * * PARAMETERS: mutex_id - The predefined ID for this mutex. * * RETURN: Decoded name of the internal mutex * * DESCRIPTION: Translate a mutex ID into a name string (Debug only) * ******************************************************************************/ /* Names for internal mutex objects, used for debug output */ static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = { "ACPI_MTX_Interpreter", "ACPI_MTX_Namespace", "ACPI_MTX_Tables", "ACPI_MTX_Events", "ACPI_MTX_Caches", "ACPI_MTX_Memory", "ACPI_MTX_CommandComplete", "ACPI_MTX_CommandReady" }; char *acpi_ut_get_mutex_name(u32 mutex_id) { if (mutex_id > ACPI_MAX_MUTEX) { return ("Invalid Mutex ID"); } return (acpi_gbl_mutex_names[mutex_id]); } /******************************************************************************* * * FUNCTION: acpi_ut_get_notify_name * * PARAMETERS: notify_value - Value from the Notify() request * * RETURN: Decoded name for the notify value * * DESCRIPTION: Translate a Notify Value to a notify namestring. * ******************************************************************************/ /* Names for Notify() values, used for debug output */ static const char *acpi_gbl_notify_value_names[ACPI_NOTIFY_MAX + 1] = { /* 00 */ "Bus Check", /* 01 */ "Device Check", /* 02 */ "Device Wake", /* 03 */ "Eject Request", /* 04 */ "Device Check Light", /* 05 */ "Frequency Mismatch", /* 06 */ "Bus Mode Mismatch", /* 07 */ "Power Fault", /* 08 */ "Capabilities Check", /* 09 */ "Device PLD Check", /* 10 */ "Reserved", /* 11 */ "System Locality Update", /* 12 */ "Shutdown Request" }; const char *acpi_ut_get_notify_name(u32 notify_value) { if (notify_value <= ACPI_NOTIFY_MAX) { return (acpi_gbl_notify_value_names[notify_value]); } else if (notify_value <= ACPI_MAX_SYS_NOTIFY) { return ("Reserved"); } else if (notify_value <= ACPI_MAX_DEVICE_SPECIFIC_NOTIFY) { return ("Device Specific"); } else { return ("Hardware Specific"); } } #endif /******************************************************************************* * * FUNCTION: acpi_ut_valid_object_type * * PARAMETERS: type - Object type to be validated * * RETURN: TRUE if valid object type, FALSE otherwise * * DESCRIPTION: Validate an object type * ******************************************************************************/ u8 acpi_ut_valid_object_type(acpi_object_type type) { if (type > ACPI_TYPE_LOCAL_MAX) { /* Note: Assumes all TYPEs are contiguous (external/local) */ return (FALSE); } return (TRUE); }
gpl-2.0
tilaksidduram/android_kernel_samsung_smdk4412
fs/ubifs/file.c
2299
46027
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Artem Bityutskiy (Битюцкий Артём) * Adrian Hunter */ /* * This file implements VFS file and inode operations for regular files, device * nodes and symlinks as well as address space operations. * * UBIFS uses 2 page flags: @PG_private and @PG_checked. @PG_private is set if * the page is dirty and is used for optimization purposes - dirty pages are * not budgeted so the flag shows that 'ubifs_write_end()' should not release * the budget for this page. The @PG_checked flag is set if full budgeting is * required for the page e.g., when it corresponds to a file hole or it is * beyond the file size. The budgeting is done in 'ubifs_write_begin()', because * it is OK to fail in this function, and the budget is released in * 'ubifs_write_end()'. So the @PG_private and @PG_checked flags carry * information about how the page was budgeted, to make it possible to release * the budget properly. * * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we * implement. However, this is not true for 'ubifs_writepage()', which may be * called with @i_mutex unlocked. For example, when pdflush is doing background * write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex. At "normal" * work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g. in the * "sys_write -> alloc_pages -> direct reclaim path". So, in 'ubifs_writepage()' * we are only guaranteed that the page is locked. * * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the * read-ahead path does not lock it ("sys_read -> generic_file_aio_read -> * ondemand_readahead -> readpage"). In case of readahead, @I_SYNC flag is not * set as well. However, UBIFS disables readahead. */ #include "ubifs.h" #include <linux/mount.h> #include <linux/namei.h> #include <linux/slab.h> static int read_block(struct inode *inode, void *addr, unsigned int block, struct ubifs_data_node *dn) { struct ubifs_info *c = inode->i_sb->s_fs_info; int err, len, out_len; union ubifs_key key; unsigned int dlen; data_key_init(c, &key, inode->i_ino, block); err = ubifs_tnc_lookup(c, &key, dn); if (err) { if (err == -ENOENT) /* Not found, so it must be a hole */ memset(addr, 0, UBIFS_BLOCK_SIZE); return err; } ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum); len = le32_to_cpu(dn->size); if (len <= 0 || len > UBIFS_BLOCK_SIZE) goto dump; dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; out_len = UBIFS_BLOCK_SIZE; err = ubifs_decompress(&dn->data, dlen, addr, &out_len, le16_to_cpu(dn->compr_type)); if (err || len != out_len) goto dump; /* * Data length can be less than a full block, even for blocks that are * not the last in the file (e.g., as a result of making a hole and * appending data). Ensure that the remainder is zeroed out. */ if (len < UBIFS_BLOCK_SIZE) memset(addr + len, 0, UBIFS_BLOCK_SIZE - len); return 0; dump: ubifs_err("bad data node (block %u, inode %lu)", block, inode->i_ino); dbg_dump_node(c, dn); return -EINVAL; } static int do_readpage(struct page *page) { void *addr; int err = 0, i; unsigned int block, beyond; struct ubifs_data_node *dn; struct inode *inode = page->mapping->host; loff_t i_size = i_size_read(inode); dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", inode->i_ino, page->index, i_size, page->flags); ubifs_assert(!PageChecked(page)); ubifs_assert(!PagePrivate(page)); addr = kmap(page); block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT; if (block >= beyond) { /* Reading beyond inode */ SetPageChecked(page); memset(addr, 0, PAGE_CACHE_SIZE); goto out; } dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS); if (!dn) { err = -ENOMEM; goto error; } i = 0; while (1) { int ret; if (block >= beyond) { /* Reading beyond inode */ err = -ENOENT; memset(addr, 0, UBIFS_BLOCK_SIZE); } else { ret = read_block(inode, addr, block, dn); if (ret) { err = ret; if (err != -ENOENT) break; } else if (block + 1 == beyond) { int dlen = le32_to_cpu(dn->size); int ilen = i_size & (UBIFS_BLOCK_SIZE - 1); if (ilen && ilen < dlen) memset(addr + ilen, 0, dlen - ilen); } } if (++i >= UBIFS_BLOCKS_PER_PAGE) break; block += 1; addr += UBIFS_BLOCK_SIZE; } if (err) { if (err == -ENOENT) { /* Not found, so it must be a hole */ SetPageChecked(page); dbg_gen("hole"); goto out_free; } ubifs_err("cannot read page %lu of inode %lu, error %d", page->index, inode->i_ino, err); goto error; } out_free: kfree(dn); out: SetPageUptodate(page); ClearPageError(page); flush_dcache_page(page); kunmap(page); return 0; error: kfree(dn); ClearPageUptodate(page); SetPageError(page); flush_dcache_page(page); kunmap(page); return err; } /** * release_new_page_budget - release budget of a new page. * @c: UBIFS file-system description object * * This is a helper function which releases budget corresponding to the budget * of one new page of data. */ static void release_new_page_budget(struct ubifs_info *c) { struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 }; ubifs_release_budget(c, &req); } /** * release_existing_page_budget - release budget of an existing page. * @c: UBIFS file-system description object * * This is a helper function which releases budget corresponding to the budget * of changing one one page of data which already exists on the flash media. */ static void release_existing_page_budget(struct ubifs_info *c) { struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget}; ubifs_release_budget(c, &req); } static int write_begin_slow(struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, unsigned flags) { struct inode *inode = mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; pgoff_t index = pos >> PAGE_CACHE_SHIFT; struct ubifs_budget_req req = { .new_page = 1 }; int uninitialized_var(err), appending = !!(pos + len > inode->i_size); struct page *page; dbg_gen("ino %lu, pos %llu, len %u, i_size %lld", inode->i_ino, pos, len, inode->i_size); /* * At the slow path we have to budget before locking the page, because * budgeting may force write-back, which would wait on locked pages and * deadlock if we had the page locked. At this point we do not know * anything about the page, so assume that this is a new page which is * written to a hole. This corresponds to largest budget. Later the * budget will be amended if this is not true. */ if (appending) /* We are appending data, budget for inode change */ req.dirtied_ino = 1; err = ubifs_budget_space(c, &req); if (unlikely(err)) return err; page = grab_cache_page_write_begin(mapping, index, flags); if (unlikely(!page)) { ubifs_release_budget(c, &req); return -ENOMEM; } if (!PageUptodate(page)) { if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) SetPageChecked(page); else { err = do_readpage(page); if (err) { unlock_page(page); page_cache_release(page); return err; } } SetPageUptodate(page); ClearPageError(page); } if (PagePrivate(page)) /* * The page is dirty, which means it was budgeted twice: * o first time the budget was allocated by the task which * made the page dirty and set the PG_private flag; * o and then we budgeted for it for the second time at the * very beginning of this function. * * So what we have to do is to release the page budget we * allocated. */ release_new_page_budget(c); else if (!PageChecked(page)) /* * We are changing a page which already exists on the media. * This means that changing the page does not make the amount * of indexing information larger, and this part of the budget * which we have already acquired may be released. */ ubifs_convert_page_budget(c); if (appending) { struct ubifs_inode *ui = ubifs_inode(inode); /* * 'ubifs_write_end()' is optimized from the fast-path part of * 'ubifs_write_begin()' and expects the @ui_mutex to be locked * if data is appended. */ mutex_lock(&ui->ui_mutex); if (ui->dirty) /* * The inode is dirty already, so we may free the * budget we allocated. */ ubifs_release_dirty_inode_budget(c, ui); } *pagep = page; return 0; } /** * allocate_budget - allocate budget for 'ubifs_write_begin()'. * @c: UBIFS file-system description object * @page: page to allocate budget for * @ui: UBIFS inode object the page belongs to * @appending: non-zero if the page is appended * * This is a helper function for 'ubifs_write_begin()' which allocates budget * for the operation. The budget is allocated differently depending on whether * this is appending, whether the page is dirty or not, and so on. This * function leaves the @ui->ui_mutex locked in case of appending. Returns zero * in case of success and %-ENOSPC in case of failure. */ static int allocate_budget(struct ubifs_info *c, struct page *page, struct ubifs_inode *ui, int appending) { struct ubifs_budget_req req = { .fast = 1 }; if (PagePrivate(page)) { if (!appending) /* * The page is dirty and we are not appending, which * means no budget is needed at all. */ return 0; mutex_lock(&ui->ui_mutex); if (ui->dirty) /* * The page is dirty and we are appending, so the inode * has to be marked as dirty. However, it is already * dirty, so we do not need any budget. We may return, * but @ui->ui_mutex hast to be left locked because we * should prevent write-back from flushing the inode * and freeing the budget. The lock will be released in * 'ubifs_write_end()'. */ return 0; /* * The page is dirty, we are appending, the inode is clean, so * we need to budget the inode change. */ req.dirtied_ino = 1; } else { if (PageChecked(page)) /* * The page corresponds to a hole and does not * exist on the media. So changing it makes * make the amount of indexing information * larger, and we have to budget for a new * page. */ req.new_page = 1; else /* * Not a hole, the change will not add any new * indexing information, budget for page * change. */ req.dirtied_page = 1; if (appending) { mutex_lock(&ui->ui_mutex); if (!ui->dirty) /* * The inode is clean but we will have to mark * it as dirty because we are appending. This * needs a budget. */ req.dirtied_ino = 1; } } return ubifs_budget_space(c, &req); } /* * This function is called when a page of data is going to be written. Since * the page of data will not necessarily go to the flash straight away, UBIFS * has to reserve space on the media for it, which is done by means of * budgeting. * * This is the hot-path of the file-system and we are trying to optimize it as * much as possible. For this reasons it is split on 2 parts - slow and fast. * * There many budgeting cases: * o a new page is appended - we have to budget for a new page and for * changing the inode; however, if the inode is already dirty, there is * no need to budget for it; * o an existing clean page is changed - we have budget for it; if the page * does not exist on the media (a hole), we have to budget for a new * page; otherwise, we may budget for changing an existing page; the * difference between these cases is that changing an existing page does * not introduce anything new to the FS indexing information, so it does * not grow, and smaller budget is acquired in this case; * o an existing dirty page is changed - no need to budget at all, because * the page budget has been acquired by earlier, when the page has been * marked dirty. * * UBIFS budgeting sub-system may force write-back if it thinks there is no * space to reserve. This imposes some locking restrictions and makes it * impossible to take into account the above cases, and makes it impossible to * optimize budgeting. * * The solution for this is that the fast path of 'ubifs_write_begin()' assumes * there is a plenty of flash space and the budget will be acquired quickly, * without forcing write-back. The slow path does not make this assumption. */ static int ubifs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_inode *ui = ubifs_inode(inode); pgoff_t index = pos >> PAGE_CACHE_SHIFT; int uninitialized_var(err), appending = !!(pos + len > inode->i_size); int skipped_read = 0; struct page *page; ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size); ubifs_assert(!c->ro_media && !c->ro_mount); if (unlikely(c->ro_error)) return -EROFS; /* Try out the fast-path part first */ page = grab_cache_page_write_begin(mapping, index, flags); if (unlikely(!page)) return -ENOMEM; if (!PageUptodate(page)) { /* The page is not loaded from the flash */ if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) { /* * We change whole page so no need to load it. But we * do not know whether this page exists on the media or * not, so we assume the latter because it requires * larger budget. The assumption is that it is better * to budget a bit more than to read the page from the * media. Thus, we are setting the @PG_checked flag * here. */ SetPageChecked(page); skipped_read = 1; } else { err = do_readpage(page); if (err) { unlock_page(page); page_cache_release(page); return err; } } SetPageUptodate(page); ClearPageError(page); } err = allocate_budget(c, page, ui, appending); if (unlikely(err)) { ubifs_assert(err == -ENOSPC); /* * If we skipped reading the page because we were going to * write all of it, then it is not up to date. */ if (skipped_read) { ClearPageChecked(page); ClearPageUptodate(page); } /* * Budgeting failed which means it would have to force * write-back but didn't, because we set the @fast flag in the * request. Write-back cannot be done now, while we have the * page locked, because it would deadlock. Unlock and free * everything and fall-back to slow-path. */ if (appending) { ubifs_assert(mutex_is_locked(&ui->ui_mutex)); mutex_unlock(&ui->ui_mutex); } unlock_page(page); page_cache_release(page); return write_begin_slow(mapping, pos, len, pagep, flags); } /* * Whee, we acquired budgeting quickly - without involving * garbage-collection, committing or forcing write-back. We return * with @ui->ui_mutex locked if we are appending pages, and unlocked * otherwise. This is an optimization (slightly hacky though). */ *pagep = page; return 0; } /** * cancel_budget - cancel budget. * @c: UBIFS file-system description object * @page: page to cancel budget for * @ui: UBIFS inode object the page belongs to * @appending: non-zero if the page is appended * * This is a helper function for a page write operation. It unlocks the * @ui->ui_mutex in case of appending. */ static void cancel_budget(struct ubifs_info *c, struct page *page, struct ubifs_inode *ui, int appending) { if (appending) { if (!ui->dirty) ubifs_release_dirty_inode_budget(c, ui); mutex_unlock(&ui->ui_mutex); } if (!PagePrivate(page)) { if (PageChecked(page)) release_new_page_budget(c); else release_existing_page_budget(c); } } static int ubifs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; struct ubifs_inode *ui = ubifs_inode(inode); struct ubifs_info *c = inode->i_sb->s_fs_info; loff_t end_pos = pos + len; int appending = !!(end_pos > inode->i_size); dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld", inode->i_ino, pos, page->index, len, copied, inode->i_size); if (unlikely(copied < len && len == PAGE_CACHE_SIZE)) { /* * VFS copied less data to the page that it intended and * declared in its '->write_begin()' call via the @len * argument. If the page was not up-to-date, and @len was * @PAGE_CACHE_SIZE, the 'ubifs_write_begin()' function did * not load it from the media (for optimization reasons). This * means that part of the page contains garbage. So read the * page now. */ dbg_gen("copied %d instead of %d, read page and repeat", copied, len); cancel_budget(c, page, ui, appending); ClearPageChecked(page); /* * Return 0 to force VFS to repeat the whole operation, or the * error code if 'do_readpage()' fails. */ copied = do_readpage(page); goto out; } if (!PagePrivate(page)) { SetPagePrivate(page); atomic_long_inc(&c->dirty_pg_cnt); __set_page_dirty_nobuffers(page); } if (appending) { i_size_write(inode, end_pos); ui->ui_size = end_pos; /* * Note, we do not set @I_DIRTY_PAGES (which means that the * inode has dirty pages), this has been done in * '__set_page_dirty_nobuffers()'. */ __mark_inode_dirty(inode, I_DIRTY_DATASYNC); ubifs_assert(mutex_is_locked(&ui->ui_mutex)); mutex_unlock(&ui->ui_mutex); } out: unlock_page(page); page_cache_release(page); return copied; } /** * populate_page - copy data nodes into a page for bulk-read. * @c: UBIFS file-system description object * @page: page * @bu: bulk-read information * @n: next zbranch slot * * This function returns %0 on success and a negative error code on failure. */ static int populate_page(struct ubifs_info *c, struct page *page, struct bu_info *bu, int *n) { int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0; struct inode *inode = page->mapping->host; loff_t i_size = i_size_read(inode); unsigned int page_block; void *addr, *zaddr; pgoff_t end_index; dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", inode->i_ino, page->index, i_size, page->flags); addr = zaddr = kmap(page); end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; if (!i_size || page->index > end_index) { hole = 1; memset(addr, 0, PAGE_CACHE_SIZE); goto out_hole; } page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; while (1) { int err, len, out_len, dlen; if (nn >= bu->cnt) { hole = 1; memset(addr, 0, UBIFS_BLOCK_SIZE); } else if (key_block(c, &bu->zbranch[nn].key) == page_block) { struct ubifs_data_node *dn; dn = bu->buf + (bu->zbranch[nn].offs - offs); ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum); len = le32_to_cpu(dn->size); if (len <= 0 || len > UBIFS_BLOCK_SIZE) goto out_err; dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; out_len = UBIFS_BLOCK_SIZE; err = ubifs_decompress(&dn->data, dlen, addr, &out_len, le16_to_cpu(dn->compr_type)); if (err || len != out_len) goto out_err; if (len < UBIFS_BLOCK_SIZE) memset(addr + len, 0, UBIFS_BLOCK_SIZE - len); nn += 1; read = (i << UBIFS_BLOCK_SHIFT) + len; } else if (key_block(c, &bu->zbranch[nn].key) < page_block) { nn += 1; continue; } else { hole = 1; memset(addr, 0, UBIFS_BLOCK_SIZE); } if (++i >= UBIFS_BLOCKS_PER_PAGE) break; addr += UBIFS_BLOCK_SIZE; page_block += 1; } if (end_index == page->index) { int len = i_size & (PAGE_CACHE_SIZE - 1); if (len && len < read) memset(zaddr + len, 0, read - len); } out_hole: if (hole) { SetPageChecked(page); dbg_gen("hole"); } SetPageUptodate(page); ClearPageError(page); flush_dcache_page(page); kunmap(page); *n = nn; return 0; out_err: ClearPageUptodate(page); SetPageError(page); flush_dcache_page(page); kunmap(page); ubifs_err("bad data node (block %u, inode %lu)", page_block, inode->i_ino); return -EINVAL; } /** * ubifs_do_bulk_read - do bulk-read. * @c: UBIFS file-system description object * @bu: bulk-read information * @page1: first page to read * * This function returns %1 if the bulk-read is done, otherwise %0 is returned. */ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, struct page *page1) { pgoff_t offset = page1->index, end_index; struct address_space *mapping = page1->mapping; struct inode *inode = mapping->host; struct ubifs_inode *ui = ubifs_inode(inode); int err, page_idx, page_cnt, ret = 0, n = 0; int allocate = bu->buf ? 0 : 1; loff_t isize; err = ubifs_tnc_get_bu_keys(c, bu); if (err) goto out_warn; if (bu->eof) { /* Turn off bulk-read at the end of the file */ ui->read_in_a_row = 1; ui->bulk_read = 0; } page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT; if (!page_cnt) { /* * This happens when there are multiple blocks per page and the * blocks for the first page we are looking for, are not * together. If all the pages were like this, bulk-read would * reduce performance, so we turn it off for a while. */ goto out_bu_off; } if (bu->cnt) { if (allocate) { /* * Allocate bulk-read buffer depending on how many data * nodes we are going to read. */ bu->buf_len = bu->zbranch[bu->cnt - 1].offs + bu->zbranch[bu->cnt - 1].len - bu->zbranch[0].offs; ubifs_assert(bu->buf_len > 0); ubifs_assert(bu->buf_len <= c->leb_size); bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN); if (!bu->buf) goto out_bu_off; } err = ubifs_tnc_bulk_read(c, bu); if (err) goto out_warn; } err = populate_page(c, page1, bu, &n); if (err) goto out_warn; unlock_page(page1); ret = 1; isize = i_size_read(inode); if (isize == 0) goto out_free; end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); for (page_idx = 1; page_idx < page_cnt; page_idx++) { pgoff_t page_offset = offset + page_idx; struct page *page; if (page_offset > end_index) break; page = find_or_create_page(mapping, page_offset, GFP_NOFS | __GFP_COLD); if (!page) break; if (!PageUptodate(page)) err = populate_page(c, page, bu, &n); unlock_page(page); page_cache_release(page); if (err) break; } ui->last_page_read = offset + page_idx - 1; out_free: if (allocate) kfree(bu->buf); return ret; out_warn: ubifs_warn("ignoring error %d and skipping bulk-read", err); goto out_free; out_bu_off: ui->read_in_a_row = ui->bulk_read = 0; goto out_free; } /** * ubifs_bulk_read - determine whether to bulk-read and, if so, do it. * @page: page from which to start bulk-read. * * Some flash media are capable of reading sequentially at faster rates. UBIFS * bulk-read facility is designed to take advantage of that, by reading in one * go consecutive data nodes that are also located consecutively in the same * LEB. This function returns %1 if a bulk-read is done and %0 otherwise. */ static int ubifs_bulk_read(struct page *page) { struct inode *inode = page->mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_inode *ui = ubifs_inode(inode); pgoff_t index = page->index, last_page_read = ui->last_page_read; struct bu_info *bu; int err = 0, allocated = 0; ui->last_page_read = index; if (!c->bulk_read) return 0; /* * Bulk-read is protected by @ui->ui_mutex, but it is an optimization, * so don't bother if we cannot lock the mutex. */ if (!mutex_trylock(&ui->ui_mutex)) return 0; if (index != last_page_read + 1) { /* Turn off bulk-read if we stop reading sequentially */ ui->read_in_a_row = 1; if (ui->bulk_read) ui->bulk_read = 0; goto out_unlock; } if (!ui->bulk_read) { ui->read_in_a_row += 1; if (ui->read_in_a_row < 3) goto out_unlock; /* Three reads in a row, so switch on bulk-read */ ui->bulk_read = 1; } /* * If possible, try to use pre-allocated bulk-read information, which * is protected by @c->bu_mutex. */ if (mutex_trylock(&c->bu_mutex)) bu = &c->bu; else { bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); if (!bu) goto out_unlock; bu->buf = NULL; allocated = 1; } bu->buf_len = c->max_bu_buf_len; data_key_init(c, &bu->key, inode->i_ino, page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); err = ubifs_do_bulk_read(c, bu, page); if (!allocated) mutex_unlock(&c->bu_mutex); else kfree(bu); out_unlock: mutex_unlock(&ui->ui_mutex); return err; } static int ubifs_readpage(struct file *file, struct page *page) { if (ubifs_bulk_read(page)) return 0; do_readpage(page); unlock_page(page); return 0; } static int do_writepage(struct page *page, int len) { int err = 0, i, blen; unsigned int block; void *addr; union ubifs_key key; struct inode *inode = page->mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; #ifdef UBIFS_DEBUG spin_lock(&ui->ui_lock); ubifs_assert(page->index <= ui->synced_i_size << PAGE_CACHE_SIZE); spin_unlock(&ui->ui_lock); #endif /* Update radix tree tags */ set_page_writeback(page); addr = kmap(page); block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; i = 0; while (len) { blen = min_t(int, len, UBIFS_BLOCK_SIZE); data_key_init(c, &key, inode->i_ino, block); err = ubifs_jnl_write_data(c, inode, &key, addr, blen); if (err) break; if (++i >= UBIFS_BLOCKS_PER_PAGE) break; block += 1; addr += blen; len -= blen; } if (err) { SetPageError(page); ubifs_err("cannot write page %lu of inode %lu, error %d", page->index, inode->i_ino, err); ubifs_ro_mode(c, err); } ubifs_assert(PagePrivate(page)); if (PageChecked(page)) release_new_page_budget(c); else release_existing_page_budget(c); atomic_long_dec(&c->dirty_pg_cnt); ClearPagePrivate(page); ClearPageChecked(page); kunmap(page); unlock_page(page); end_page_writeback(page); return err; } /* * When writing-back dirty inodes, VFS first writes-back pages belonging to the * inode, then the inode itself. For UBIFS this may cause a problem. Consider a * situation when a we have an inode with size 0, then a megabyte of data is * appended to the inode, then write-back starts and flushes some amount of the * dirty pages, the journal becomes full, commit happens and finishes, and then * an unclean reboot happens. When the file system is mounted next time, the * inode size would still be 0, but there would be many pages which are beyond * the inode size, they would be indexed and consume flash space. Because the * journal has been committed, the replay would not be able to detect this * situation and correct the inode size. This means UBIFS would have to scan * whole index and correct all inode sizes, which is long an unacceptable. * * To prevent situations like this, UBIFS writes pages back only if they are * within the last synchronized inode size, i.e. the size which has been * written to the flash media last time. Otherwise, UBIFS forces inode * write-back, thus making sure the on-flash inode contains current inode size, * and then keeps writing pages back. * * Some locking issues explanation. 'ubifs_writepage()' first is called with * the page locked, and it locks @ui_mutex. However, write-back does take inode * @i_mutex, which means other VFS operations may be run on this inode at the * same time. And the problematic one is truncation to smaller size, from where * we have to call 'truncate_setsize()', which first changes @inode->i_size, * then drops the truncated pages. And while dropping the pages, it takes the * page lock. This means that 'do_truncation()' cannot call 'truncate_setsize()' * with @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'. * This means that @inode->i_size is changed while @ui_mutex is unlocked. * * XXX(truncate): with the new truncate sequence this is not true anymore, * and the calls to truncate_setsize can be move around freely. They should * be moved to the very end of the truncate sequence. * * But in 'ubifs_writepage()' we have to guarantee that we do not write beyond * inode size. How do we do this if @inode->i_size may became smaller while we * are in the middle of 'ubifs_writepage()'? The UBIFS solution is the * @ui->ui_isize "shadow" field which UBIFS uses instead of @inode->i_size * internally and updates it under @ui_mutex. * * Q: why we do not worry that if we race with truncation, we may end up with a * situation when the inode is truncated while we are in the middle of * 'do_writepage()', so we do write beyond inode size? * A: If we are in the middle of 'do_writepage()', truncation would be locked * on the page lock and it would not write the truncated inode node to the * journal before we have finished. */ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct ubifs_inode *ui = ubifs_inode(inode); loff_t i_size = i_size_read(inode), synced_i_size; pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; int err, len = i_size & (PAGE_CACHE_SIZE - 1); void *kaddr; dbg_gen("ino %lu, pg %lu, pg flags %#lx", inode->i_ino, page->index, page->flags); ubifs_assert(PagePrivate(page)); /* Is the page fully outside @i_size? (truncate in progress) */ if (page->index > end_index || (page->index == end_index && !len)) { err = 0; goto out_unlock; } spin_lock(&ui->ui_lock); synced_i_size = ui->synced_i_size; spin_unlock(&ui->ui_lock); /* Is the page fully inside @i_size? */ if (page->index < end_index) { if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) { err = inode->i_sb->s_op->write_inode(inode, NULL); if (err) goto out_unlock; /* * The inode has been written, but the write-buffer has * not been synchronized, so in case of an unclean * reboot we may end up with some pages beyond inode * size, but they would be in the journal (because * commit flushes write buffers) and recovery would deal * with this. */ } return do_writepage(page, PAGE_CACHE_SIZE); } /* * The page straddles @i_size. It must be zeroed out on each and every * writepage invocation because it may be mmapped. "A file is mapped * in multiples of the page size. For a file that is not a multiple of * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ kaddr = kmap_atomic(page, KM_USER0); memset(kaddr + len, 0, PAGE_CACHE_SIZE - len); flush_dcache_page(page); kunmap_atomic(kaddr, KM_USER0); if (i_size > synced_i_size) { err = inode->i_sb->s_op->write_inode(inode, NULL); if (err) goto out_unlock; } return do_writepage(page, len); out_unlock: unlock_page(page); return err; } /** * do_attr_changes - change inode attributes. * @inode: inode to change attributes for * @attr: describes attributes to change */ static void do_attr_changes(struct inode *inode, const struct iattr *attr) { if (attr->ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (attr->ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; if (attr->ia_valid & ATTR_ATIME) inode->i_atime = timespec_trunc(attr->ia_atime, inode->i_sb->s_time_gran); if (attr->ia_valid & ATTR_MTIME) inode->i_mtime = timespec_trunc(attr->ia_mtime, inode->i_sb->s_time_gran); if (attr->ia_valid & ATTR_CTIME) inode->i_ctime = timespec_trunc(attr->ia_ctime, inode->i_sb->s_time_gran); if (attr->ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) mode &= ~S_ISGID; inode->i_mode = mode; } } /** * do_truncation - truncate an inode. * @c: UBIFS file-system description object * @inode: inode to truncate * @attr: inode attribute changes description * * This function implements VFS '->setattr()' call when the inode is truncated * to a smaller size. Returns zero in case of success and a negative error code * in case of failure. */ static int do_truncation(struct ubifs_info *c, struct inode *inode, const struct iattr *attr) { int err; struct ubifs_budget_req req; loff_t old_size = inode->i_size, new_size = attr->ia_size; int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1; struct ubifs_inode *ui = ubifs_inode(inode); dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size); memset(&req, 0, sizeof(struct ubifs_budget_req)); /* * If this is truncation to a smaller size, and we do not truncate on a * block boundary, budget for changing one data block, because the last * block will be re-written. */ if (new_size & (UBIFS_BLOCK_SIZE - 1)) req.dirtied_page = 1; req.dirtied_ino = 1; /* A funny way to budget for truncation node */ req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ; err = ubifs_budget_space(c, &req); if (err) { /* * Treat truncations to zero as deletion and always allow them, * just like we do for '->unlink()'. */ if (new_size || err != -ENOSPC) return err; budgeted = 0; } truncate_setsize(inode, new_size); if (offset) { pgoff_t index = new_size >> PAGE_CACHE_SHIFT; struct page *page; page = find_lock_page(inode->i_mapping, index); if (page) { if (PageDirty(page)) { /* * 'ubifs_jnl_truncate()' will try to truncate * the last data node, but it contains * out-of-date data because the page is dirty. * Write the page now, so that * 'ubifs_jnl_truncate()' will see an already * truncated (and up to date) data node. */ ubifs_assert(PagePrivate(page)); clear_page_dirty_for_io(page); if (UBIFS_BLOCKS_PER_PAGE_SHIFT) offset = new_size & (PAGE_CACHE_SIZE - 1); err = do_writepage(page, offset); page_cache_release(page); if (err) goto out_budg; /* * We could now tell 'ubifs_jnl_truncate()' not * to read the last block. */ } else { /* * We could 'kmap()' the page and pass the data * to 'ubifs_jnl_truncate()' to save it from * having to read it. */ unlock_page(page); page_cache_release(page); } } } mutex_lock(&ui->ui_mutex); ui->ui_size = inode->i_size; /* Truncation changes inode [mc]time */ inode->i_mtime = inode->i_ctime = ubifs_current_time(inode); /* Other attributes may be changed at the same time as well */ do_attr_changes(inode, attr); err = ubifs_jnl_truncate(c, inode, old_size, new_size); mutex_unlock(&ui->ui_mutex); out_budg: if (budgeted) ubifs_release_budget(c, &req); else { c->bi.nospace = c->bi.nospace_rp = 0; smp_wmb(); } return err; } /** * do_setattr - change inode attributes. * @c: UBIFS file-system description object * @inode: inode to change attributes for * @attr: inode attribute changes description * * This function implements VFS '->setattr()' call for all cases except * truncations to smaller size. Returns zero in case of success and a negative * error code in case of failure. */ static int do_setattr(struct ubifs_info *c, struct inode *inode, const struct iattr *attr) { int err, release; loff_t new_size = attr->ia_size; struct ubifs_inode *ui = ubifs_inode(inode); struct ubifs_budget_req req = { .dirtied_ino = 1, .dirtied_ino_d = ALIGN(ui->data_len, 8) }; err = ubifs_budget_space(c, &req); if (err) return err; if (attr->ia_valid & ATTR_SIZE) { dbg_gen("size %lld -> %lld", inode->i_size, new_size); truncate_setsize(inode, new_size); } mutex_lock(&ui->ui_mutex); if (attr->ia_valid & ATTR_SIZE) { /* Truncation changes inode [mc]time */ inode->i_mtime = inode->i_ctime = ubifs_current_time(inode); /* 'truncate_setsize()' changed @i_size, update @ui_size */ ui->ui_size = inode->i_size; } do_attr_changes(inode, attr); release = ui->dirty; if (attr->ia_valid & ATTR_SIZE) /* * Inode length changed, so we have to make sure * @I_DIRTY_DATASYNC is set. */ __mark_inode_dirty(inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC); else mark_inode_dirty_sync(inode); mutex_unlock(&ui->ui_mutex); if (release) ubifs_release_budget(c, &req); if (IS_SYNC(inode)) err = inode->i_sb->s_op->write_inode(inode, NULL); return err; } int ubifs_setattr(struct dentry *dentry, struct iattr *attr) { int err; struct inode *inode = dentry->d_inode; struct ubifs_info *c = inode->i_sb->s_fs_info; dbg_gen("ino %lu, mode %#x, ia_valid %#x", inode->i_ino, inode->i_mode, attr->ia_valid); err = inode_change_ok(inode, attr); if (err) return err; err = dbg_check_synced_i_size(inode); if (err) return err; if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size) /* Truncation to a smaller size */ err = do_truncation(c, inode, attr); else err = do_setattr(c, inode, attr); return err; } static void ubifs_invalidatepage(struct page *page, unsigned long offset) { struct inode *inode = page->mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; ubifs_assert(PagePrivate(page)); if (offset) /* Partial page remains dirty */ return; if (PageChecked(page)) release_new_page_budget(c); else release_existing_page_budget(c); atomic_long_dec(&c->dirty_pg_cnt); ClearPagePrivate(page); ClearPageChecked(page); } static void *ubifs_follow_link(struct dentry *dentry, struct nameidata *nd) { struct ubifs_inode *ui = ubifs_inode(dentry->d_inode); nd_set_link(nd, ui->data); return NULL; } int ubifs_fsync(struct file *file, int datasync) { struct inode *inode = file->f_mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; int err; dbg_gen("syncing inode %lu", inode->i_ino); if (c->ro_mount) /* * For some really strange reasons VFS does not filter out * 'fsync()' for R/O mounted file-systems as per 2.6.39. */ return 0; /* * VFS has already synchronized dirty pages for this inode. Synchronize * the inode unless this is a 'datasync()' call. */ if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) { err = inode->i_sb->s_op->write_inode(inode, NULL); if (err) return err; } /* * Nodes related to this inode may still sit in a write-buffer. Flush * them. */ err = ubifs_sync_wbufs_by_inode(c, inode); if (err) return err; return 0; } /** * mctime_update_needed - check if mtime or ctime update is needed. * @inode: the inode to do the check for * @now: current time * * This helper function checks if the inode mtime/ctime should be updated or * not. If current values of the time-stamps are within the UBIFS inode time * granularity, they are not updated. This is an optimization. */ static inline int mctime_update_needed(const struct inode *inode, const struct timespec *now) { if (!timespec_equal(&inode->i_mtime, now) || !timespec_equal(&inode->i_ctime, now)) return 1; return 0; } /** * update_ctime - update mtime and ctime of an inode. * @c: UBIFS file-system description object * @inode: inode to update * * This function updates mtime and ctime of the inode if it is not equivalent to * current time. Returns zero in case of success and a negative error code in * case of failure. */ static int update_mctime(struct ubifs_info *c, struct inode *inode) { struct timespec now = ubifs_current_time(inode); struct ubifs_inode *ui = ubifs_inode(inode); if (mctime_update_needed(inode, &now)) { int err, release; struct ubifs_budget_req req = { .dirtied_ino = 1, .dirtied_ino_d = ALIGN(ui->data_len, 8) }; err = ubifs_budget_space(c, &req); if (err) return err; mutex_lock(&ui->ui_mutex); inode->i_mtime = inode->i_ctime = ubifs_current_time(inode); release = ui->dirty; mark_inode_dirty_sync(inode); mutex_unlock(&ui->ui_mutex); if (release) ubifs_release_budget(c, &req); } return 0; } static ssize_t ubifs_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { int err; struct inode *inode = iocb->ki_filp->f_mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; err = update_mctime(c, inode); if (err) return err; return generic_file_aio_write(iocb, iov, nr_segs, pos); } static int ubifs_set_page_dirty(struct page *page) { int ret; ret = __set_page_dirty_nobuffers(page); /* * An attempt to dirty a page without budgeting for it - should not * happen. */ ubifs_assert(ret == 0); return ret; } static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) { /* * An attempt to release a dirty page without budgeting for it - should * not happen. */ if (PageWriteback(page)) return 0; ubifs_assert(PagePrivate(page)); ubifs_assert(0); ClearPagePrivate(page); ClearPageChecked(page); return 1; } /* * mmap()d file has taken write protection fault and is being made writable. * UBIFS must ensure page is budgeted for. */ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = vma->vm_file->f_path.dentry->d_inode; struct ubifs_info *c = inode->i_sb->s_fs_info; struct timespec now = ubifs_current_time(inode); struct ubifs_budget_req req = { .new_page = 1 }; int err, update_time; dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index, i_size_read(inode)); ubifs_assert(!c->ro_media && !c->ro_mount); if (unlikely(c->ro_error)) return VM_FAULT_SIGBUS; /* -EROFS */ /* * We have not locked @page so far so we may budget for changing the * page. Note, we cannot do this after we locked the page, because * budgeting may cause write-back which would cause deadlock. * * At the moment we do not know whether the page is dirty or not, so we * assume that it is not and budget for a new page. We could look at * the @PG_private flag and figure this out, but we may race with write * back and the page state may change by the time we lock it, so this * would need additional care. We do not bother with this at the * moment, although it might be good idea to do. Instead, we allocate * budget for a new page and amend it later on if the page was in fact * dirty. * * The budgeting-related logic of this function is similar to what we * do in 'ubifs_write_begin()' and 'ubifs_write_end()'. Glance there * for more comments. */ update_time = mctime_update_needed(inode, &now); if (update_time) /* * We have to change inode time stamp which requires extra * budgeting. */ req.dirtied_ino = 1; err = ubifs_budget_space(c, &req); if (unlikely(err)) { if (err == -ENOSPC) ubifs_warn("out of space for mmapped file " "(inode number %lu)", inode->i_ino); return VM_FAULT_SIGBUS; } lock_page(page); if (unlikely(page->mapping != inode->i_mapping || page_offset(page) > i_size_read(inode))) { /* Page got truncated out from underneath us */ err = -EINVAL; goto out_unlock; } if (PagePrivate(page)) release_new_page_budget(c); else { if (!PageChecked(page)) ubifs_convert_page_budget(c); SetPagePrivate(page); atomic_long_inc(&c->dirty_pg_cnt); __set_page_dirty_nobuffers(page); } if (update_time) { int release; struct ubifs_inode *ui = ubifs_inode(inode); mutex_lock(&ui->ui_mutex); inode->i_mtime = inode->i_ctime = ubifs_current_time(inode); release = ui->dirty; mark_inode_dirty_sync(inode); mutex_unlock(&ui->ui_mutex); if (release) ubifs_release_dirty_inode_budget(c, ui); } unlock_page(page); return 0; out_unlock: unlock_page(page); ubifs_release_budget(c, &req); if (err) err = VM_FAULT_SIGBUS; return err; } static const struct vm_operations_struct ubifs_file_vm_ops = { .fault = filemap_fault, .page_mkwrite = ubifs_vm_page_mkwrite, }; static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma) { int err; err = generic_file_mmap(file, vma); if (err) return err; vma->vm_ops = &ubifs_file_vm_ops; return 0; } const struct address_space_operations ubifs_file_address_operations = { .readpage = ubifs_readpage, .writepage = ubifs_writepage, .write_begin = ubifs_write_begin, .write_end = ubifs_write_end, .invalidatepage = ubifs_invalidatepage, .set_page_dirty = ubifs_set_page_dirty, .releasepage = ubifs_releasepage, }; const struct inode_operations ubifs_file_inode_operations = { .setattr = ubifs_setattr, .getattr = ubifs_getattr, #ifdef CONFIG_UBIFS_FS_XATTR .setxattr = ubifs_setxattr, .getxattr = ubifs_getxattr, .listxattr = ubifs_listxattr, .removexattr = ubifs_removexattr, #endif }; const struct inode_operations ubifs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = ubifs_follow_link, .setattr = ubifs_setattr, .getattr = ubifs_getattr, }; const struct file_operations ubifs_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, .write = do_sync_write, .aio_read = generic_file_aio_read, .aio_write = ubifs_aio_write, .mmap = ubifs_file_mmap, .fsync = ubifs_fsync, .unlocked_ioctl = ubifs_ioctl, .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, #ifdef CONFIG_COMPAT .compat_ioctl = ubifs_compat_ioctl, #endif };
gpl-2.0
Borkata/adam-nv-3.1
drivers/media/video/uvc/uvc_isight.c
3323
3953
/* * uvc_isight.c -- USB Video Class driver - iSight support * * Copyright (C) 2006-2007 * Ivan N. Zlatev <contact@i-nz.net> * Copyright (C) 2008-2009 * Laurent Pinchart <laurent.pinchart@ideasonboard.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/usb.h> #include <linux/kernel.h> #include <linux/mm.h> #include "uvcvideo.h" /* Built-in iSight webcams implements most of UVC 1.0 except a * different packet format. Instead of sending a header at the * beginning of each isochronous transfer payload, the webcam sends a * single header per image (on its own in a packet), followed by * packets containing data only. * * Offset Size (bytes) Description * ------------------------------------------------------------------ * 0x00 1 Header length * 0x01 1 Flags (UVC-compliant) * 0x02 4 Always equal to '11223344' * 0x06 8 Always equal to 'deadbeefdeadface' * 0x0e 16 Unknown * * The header can be prefixed by an optional, unknown-purpose byte. */ static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf, const __u8 *data, unsigned int len) { static const __u8 hdr[] = { 0x11, 0x22, 0x33, 0x44, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xfa, 0xce }; unsigned int maxlen, nbytes; __u8 *mem; int is_header = 0; if (buf == NULL) return 0; if ((len >= 14 && memcmp(&data[2], hdr, 12) == 0) || (len >= 15 && memcmp(&data[3], hdr, 12) == 0)) { uvc_trace(UVC_TRACE_FRAME, "iSight header found\n"); is_header = 1; } /* Synchronize to the input stream by waiting for a header packet. */ if (buf->state != UVC_BUF_STATE_ACTIVE) { if (!is_header) { uvc_trace(UVC_TRACE_FRAME, "Dropping packet (out of " "sync).\n"); return 0; } buf->state = UVC_BUF_STATE_ACTIVE; } /* Mark the buffer as done if we're at the beginning of a new frame. * * Empty buffers (bytesused == 0) don't trigger end of frame detection * as it doesn't make sense to return an empty buffer. */ if (is_header && buf->buf.bytesused != 0) { buf->state = UVC_BUF_STATE_DONE; return -EAGAIN; } /* Copy the video data to the buffer. Skip header packets, as they * contain no data. */ if (!is_header) { maxlen = buf->buf.length - buf->buf.bytesused; mem = queue->mem + buf->buf.m.offset + buf->buf.bytesused; nbytes = min(len, maxlen); memcpy(mem, data, nbytes); buf->buf.bytesused += nbytes; if (len > maxlen || buf->buf.bytesused == buf->buf.length) { uvc_trace(UVC_TRACE_FRAME, "Frame complete " "(overflow).\n"); buf->state = UVC_BUF_STATE_DONE; } } return 0; } void uvc_video_decode_isight(struct urb *urb, struct uvc_streaming *stream, struct uvc_buffer *buf) { int ret, i; for (i = 0; i < urb->number_of_packets; ++i) { if (urb->iso_frame_desc[i].status < 0) { uvc_trace(UVC_TRACE_FRAME, "USB isochronous frame " "lost (%d).\n", urb->iso_frame_desc[i].status); } /* Decode the payload packet. * uvc_video_decode is entered twice when a frame transition * has been detected because the end of frame can only be * reliably detected when the first packet of the new frame * is processed. The first pass detects the transition and * closes the previous frame's buffer, the second pass * processes the data of the first payload of the new frame. */ do { ret = isight_decode(&stream->queue, buf, urb->transfer_buffer + urb->iso_frame_desc[i].offset, urb->iso_frame_desc[i].actual_length); if (buf == NULL) break; if (buf->state == UVC_BUF_STATE_DONE || buf->state == UVC_BUF_STATE_ERROR) buf = uvc_queue_next_buffer(&stream->queue, buf); } while (ret == -EAGAIN); } }
gpl-2.0
qiubing/vanet
drivers/media/video/uvc/uvc_isight.c
3323
3953
/* * uvc_isight.c -- USB Video Class driver - iSight support * * Copyright (C) 2006-2007 * Ivan N. Zlatev <contact@i-nz.net> * Copyright (C) 2008-2009 * Laurent Pinchart <laurent.pinchart@ideasonboard.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/usb.h> #include <linux/kernel.h> #include <linux/mm.h> #include "uvcvideo.h" /* Built-in iSight webcams implements most of UVC 1.0 except a * different packet format. Instead of sending a header at the * beginning of each isochronous transfer payload, the webcam sends a * single header per image (on its own in a packet), followed by * packets containing data only. * * Offset Size (bytes) Description * ------------------------------------------------------------------ * 0x00 1 Header length * 0x01 1 Flags (UVC-compliant) * 0x02 4 Always equal to '11223344' * 0x06 8 Always equal to 'deadbeefdeadface' * 0x0e 16 Unknown * * The header can be prefixed by an optional, unknown-purpose byte. */ static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf, const __u8 *data, unsigned int len) { static const __u8 hdr[] = { 0x11, 0x22, 0x33, 0x44, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xfa, 0xce }; unsigned int maxlen, nbytes; __u8 *mem; int is_header = 0; if (buf == NULL) return 0; if ((len >= 14 && memcmp(&data[2], hdr, 12) == 0) || (len >= 15 && memcmp(&data[3], hdr, 12) == 0)) { uvc_trace(UVC_TRACE_FRAME, "iSight header found\n"); is_header = 1; } /* Synchronize to the input stream by waiting for a header packet. */ if (buf->state != UVC_BUF_STATE_ACTIVE) { if (!is_header) { uvc_trace(UVC_TRACE_FRAME, "Dropping packet (out of " "sync).\n"); return 0; } buf->state = UVC_BUF_STATE_ACTIVE; } /* Mark the buffer as done if we're at the beginning of a new frame. * * Empty buffers (bytesused == 0) don't trigger end of frame detection * as it doesn't make sense to return an empty buffer. */ if (is_header && buf->buf.bytesused != 0) { buf->state = UVC_BUF_STATE_DONE; return -EAGAIN; } /* Copy the video data to the buffer. Skip header packets, as they * contain no data. */ if (!is_header) { maxlen = buf->buf.length - buf->buf.bytesused; mem = queue->mem + buf->buf.m.offset + buf->buf.bytesused; nbytes = min(len, maxlen); memcpy(mem, data, nbytes); buf->buf.bytesused += nbytes; if (len > maxlen || buf->buf.bytesused == buf->buf.length) { uvc_trace(UVC_TRACE_FRAME, "Frame complete " "(overflow).\n"); buf->state = UVC_BUF_STATE_DONE; } } return 0; } void uvc_video_decode_isight(struct urb *urb, struct uvc_streaming *stream, struct uvc_buffer *buf) { int ret, i; for (i = 0; i < urb->number_of_packets; ++i) { if (urb->iso_frame_desc[i].status < 0) { uvc_trace(UVC_TRACE_FRAME, "USB isochronous frame " "lost (%d).\n", urb->iso_frame_desc[i].status); } /* Decode the payload packet. * uvc_video_decode is entered twice when a frame transition * has been detected because the end of frame can only be * reliably detected when the first packet of the new frame * is processed. The first pass detects the transition and * closes the previous frame's buffer, the second pass * processes the data of the first payload of the new frame. */ do { ret = isight_decode(&stream->queue, buf, urb->transfer_buffer + urb->iso_frame_desc[i].offset, urb->iso_frame_desc[i].actual_length); if (buf == NULL) break; if (buf->state == UVC_BUF_STATE_DONE || buf->state == UVC_BUF_STATE_ERROR) buf = uvc_queue_next_buffer(&stream->queue, buf); } while (ret == -EAGAIN); } }
gpl-2.0
corcor67/SMPL_M8_GPE
net/tipc/socket.c
3579
47128
/* * net/tipc/socket.c: TIPC socket API * * Copyright (c) 2001-2007, Ericsson AB * Copyright (c) 2004-2008, 2010-2011, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <linux/export.h> #include <net/sock.h> #include "core.h" #include "port.h" #define SS_LISTENING -1 /* socket is listening */ #define SS_READY -2 /* socket is connectionless */ #define OVERLOAD_LIMIT_BASE 5000 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ struct tipc_sock { struct sock sk; struct tipc_port *p; struct tipc_portid peer_name; unsigned int conn_timeout; }; #define tipc_sk(sk) ((struct tipc_sock *)(sk)) #define tipc_sk_port(sk) ((struct tipc_port *)(tipc_sk(sk)->p)) #define tipc_rx_ready(sock) (!skb_queue_empty(&sock->sk->sk_receive_queue) || \ (sock->state == SS_DISCONNECTING)) static int backlog_rcv(struct sock *sk, struct sk_buff *skb); static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf); static void wakeupdispatch(struct tipc_port *tport); static const struct proto_ops packet_ops; static const struct proto_ops stream_ops; static const struct proto_ops msg_ops; static struct proto tipc_proto; static int sockets_enabled; static atomic_t tipc_queue_size = ATOMIC_INIT(0); /* * Revised TIPC socket locking policy: * * Most socket operations take the standard socket lock when they start * and hold it until they finish (or until they need to sleep). Acquiring * this lock grants the owner exclusive access to the fields of the socket * data structures, with the exception of the backlog queue. A few socket * operations can be done without taking the socket lock because they only * read socket information that never changes during the life of the socket. * * Socket operations may acquire the lock for the associated TIPC port if they * need to perform an operation on the port. If any routine needs to acquire * both the socket lock and the port lock it must take the socket lock first * to avoid the risk of deadlock. * * The dispatcher handling incoming messages cannot grab the socket lock in * the standard fashion, since invoked it runs at the BH level and cannot block. * Instead, it checks to see if the socket lock is currently owned by someone, * and either handles the message itself or adds it to the socket's backlog * queue; in the latter case the queued message is processed once the process * owning the socket lock releases it. * * NOTE: Releasing the socket lock while an operation is sleeping overcomes * the problem of a blocked socket operation preventing any other operations * from occurring. However, applications must be careful if they have * multiple threads trying to send (or receive) on the same socket, as these * operations might interfere with each other. For example, doing a connect * and a receive at the same time might allow the receive to consume the * ACK message meant for the connect. While additional work could be done * to try and overcome this, it doesn't seem to be worthwhile at the present. * * NOTE: Releasing the socket lock while an operation is sleeping also ensures * that another operation that must be performed in a non-blocking manner is * not delayed for very long because the lock has already been taken. * * NOTE: This code assumes that certain fields of a port/socket pair are * constant over its lifetime; such fields can be examined without taking * the socket lock and/or port lock, and do not need to be re-read even * after resuming processing after waiting. These fields include: * - socket type * - pointer to socket sk structure (aka tipc_sock structure) * - pointer to port structure * - port reference */ /** * advance_rx_queue - discard first buffer in socket receive queue * * Caller must hold socket lock */ static void advance_rx_queue(struct sock *sk) { kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); atomic_dec(&tipc_queue_size); } /** * discard_rx_queue - discard all buffers in socket receive queue * * Caller must hold socket lock */ static void discard_rx_queue(struct sock *sk) { struct sk_buff *buf; while ((buf = __skb_dequeue(&sk->sk_receive_queue))) { atomic_dec(&tipc_queue_size); kfree_skb(buf); } } /** * reject_rx_queue - reject all buffers in socket receive queue * * Caller must hold socket lock */ static void reject_rx_queue(struct sock *sk) { struct sk_buff *buf; while ((buf = __skb_dequeue(&sk->sk_receive_queue))) { tipc_reject_msg(buf, TIPC_ERR_NO_PORT); atomic_dec(&tipc_queue_size); } } /** * tipc_create - create a TIPC socket * @net: network namespace (must be default network) * @sock: pre-allocated socket structure * @protocol: protocol indicator (must be 0) * @kern: caused by kernel or by userspace? * * This routine creates additional data structures used by the TIPC socket, * initializes them, and links them together. * * Returns 0 on success, errno otherwise */ static int tipc_create(struct net *net, struct socket *sock, int protocol, int kern) { const struct proto_ops *ops; socket_state state; struct sock *sk; struct tipc_port *tp_ptr; /* Validate arguments */ if (unlikely(protocol != 0)) return -EPROTONOSUPPORT; switch (sock->type) { case SOCK_STREAM: ops = &stream_ops; state = SS_UNCONNECTED; break; case SOCK_SEQPACKET: ops = &packet_ops; state = SS_UNCONNECTED; break; case SOCK_DGRAM: case SOCK_RDM: ops = &msg_ops; state = SS_READY; break; default: return -EPROTOTYPE; } /* Allocate socket's protocol area */ sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto); if (sk == NULL) return -ENOMEM; /* Allocate TIPC port for socket to use */ tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch, TIPC_LOW_IMPORTANCE); if (unlikely(!tp_ptr)) { sk_free(sk); return -ENOMEM; } /* Finish initializing socket data structures */ sock->ops = ops; sock->state = state; sock_init_data(sock, sk); sk->sk_backlog_rcv = backlog_rcv; tipc_sk(sk)->p = tp_ptr; tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT; spin_unlock_bh(tp_ptr->lock); if (sock->state == SS_READY) { tipc_set_portunreturnable(tp_ptr->ref, 1); if (sock->type == SOCK_DGRAM) tipc_set_portunreliable(tp_ptr->ref, 1); } return 0; } /** * release - destroy a TIPC socket * @sock: socket to destroy * * This routine cleans up any messages that are still queued on the socket. * For DGRAM and RDM socket types, all queued messages are rejected. * For SEQPACKET and STREAM socket types, the first message is rejected * and any others are discarded. (If the first message on a STREAM socket * is partially-read, it is discarded and the next one is rejected instead.) * * NOTE: Rejected messages are not necessarily returned to the sender! They * are returned or discarded according to the "destination droppable" setting * specified for the message by the sender. * * Returns 0 on success, errno otherwise */ static int release(struct socket *sock) { struct sock *sk = sock->sk; struct tipc_port *tport; struct sk_buff *buf; int res; /* * Exit if socket isn't fully initialized (occurs when a failed accept() * releases a pre-allocated child socket that was never used) */ if (sk == NULL) return 0; tport = tipc_sk_port(sk); lock_sock(sk); /* * Reject all unreceived messages, except on an active connection * (which disconnects locally & sends a 'FIN+' to peer) */ while (sock->state != SS_DISCONNECTING) { buf = __skb_dequeue(&sk->sk_receive_queue); if (buf == NULL) break; atomic_dec(&tipc_queue_size); if (TIPC_SKB_CB(buf)->handle != 0) kfree_skb(buf); else { if ((sock->state == SS_CONNECTING) || (sock->state == SS_CONNECTED)) { sock->state = SS_DISCONNECTING; tipc_disconnect(tport->ref); } tipc_reject_msg(buf, TIPC_ERR_NO_PORT); } } /* * Delete TIPC port; this ensures no more messages are queued * (also disconnects an active connection & sends a 'FIN-' to peer) */ res = tipc_deleteport(tport->ref); /* Discard any remaining (connection-based) messages in receive queue */ discard_rx_queue(sk); /* Reject any messages that accumulated in backlog queue */ sock->state = SS_DISCONNECTING; release_sock(sk); sock_put(sk); sock->sk = NULL; return res; } /** * bind - associate or disassocate TIPC name(s) with a socket * @sock: socket structure * @uaddr: socket address describing name(s) and desired operation * @uaddr_len: size of socket address data structure * * Name and name sequence binding is indicated using a positive scope value; * a negative scope value unbinds the specified name. Specifying no name * (i.e. a socket address length of 0) unbinds all names from the socket. * * Returns 0 on success, errno otherwise * * NOTE: This routine doesn't need to take the socket lock since it doesn't * access any non-constant socket information. */ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) { struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; u32 portref = tipc_sk_port(sock->sk)->ref; if (unlikely(!uaddr_len)) return tipc_withdraw(portref, 0, NULL); if (uaddr_len < sizeof(struct sockaddr_tipc)) return -EINVAL; if (addr->family != AF_TIPC) return -EAFNOSUPPORT; if (addr->addrtype == TIPC_ADDR_NAME) addr->addr.nameseq.upper = addr->addr.nameseq.lower; else if (addr->addrtype != TIPC_ADDR_NAMESEQ) return -EAFNOSUPPORT; if (addr->addr.nameseq.type < TIPC_RESERVED_TYPES) return -EACCES; return (addr->scope > 0) ? tipc_publish(portref, addr->scope, &addr->addr.nameseq) : tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq); } /** * get_name - get port ID of socket or peer socket * @sock: socket structure * @uaddr: area for returned socket address * @uaddr_len: area for returned length of socket address * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID * * Returns 0 on success, errno otherwise * * NOTE: This routine doesn't need to take the socket lock since it only * accesses socket information that is unchanging (or which changes in * a completely predictable manner). */ static int get_name(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; struct tipc_sock *tsock = tipc_sk(sock->sk); memset(addr, 0, sizeof(*addr)); if (peer) { if ((sock->state != SS_CONNECTED) && ((peer != 2) || (sock->state != SS_DISCONNECTING))) return -ENOTCONN; addr->addr.id.ref = tsock->peer_name.ref; addr->addr.id.node = tsock->peer_name.node; } else { addr->addr.id.ref = tsock->p->ref; addr->addr.id.node = tipc_own_addr; } *uaddr_len = sizeof(*addr); addr->addrtype = TIPC_ADDR_ID; addr->family = AF_TIPC; addr->scope = 0; addr->addr.name.domain = 0; return 0; } /** * poll - read and possibly block on pollmask * @file: file structure associated with the socket * @sock: socket for which to calculate the poll bits * @wait: ??? * * Returns pollmask value * * COMMENTARY: * It appears that the usual socket locking mechanisms are not useful here * since the pollmask info is potentially out-of-date the moment this routine * exits. TCP and other protocols seem to rely on higher level poll routines * to handle any preventable race conditions, so TIPC will do the same ... * * TIPC sets the returned events as follows: * * socket state flags set * ------------ --------- * unconnected no read flags * no write flags * * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue * no write flags * * connected POLLIN/POLLRDNORM if data in rx queue * POLLOUT if port is not congested * * disconnecting POLLIN/POLLRDNORM/POLLHUP * no write flags * * listening POLLIN if SYN in rx queue * no write flags * * ready POLLIN/POLLRDNORM if data in rx queue * [connectionless] POLLOUT (since port cannot be congested) * * IMPORTANT: The fact that a read or write operation is indicated does NOT * imply that the operation will succeed, merely that it should be performed * and will not block. */ static unsigned int poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; u32 mask = 0; poll_wait(file, sk_sleep(sk), wait); switch ((int)sock->state) { case SS_READY: case SS_CONNECTED: if (!tipc_sk_port(sk)->congested) mask |= POLLOUT; /* fall thru' */ case SS_CONNECTING: case SS_LISTENING: if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= (POLLIN | POLLRDNORM); break; case SS_DISCONNECTING: mask = (POLLIN | POLLRDNORM | POLLHUP); break; } return mask; } /** * dest_name_check - verify user is permitted to send to specified port name * @dest: destination address * @m: descriptor for message to be sent * * Prevents restricted configuration commands from being issued by * unauthorized users. * * Returns 0 if permission is granted, otherwise errno */ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m) { struct tipc_cfg_msg_hdr hdr; if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES)) return 0; if (likely(dest->addr.name.name.type == TIPC_TOP_SRV)) return 0; if (likely(dest->addr.name.name.type != TIPC_CFG_SRV)) return -EACCES; if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr))) return -EMSGSIZE; if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr))) return -EFAULT; if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN))) return -EACCES; return 0; } /** * send_msg - send message in connectionless manner * @iocb: if NULL, indicates that socket lock is already held * @sock: socket structure * @m: message to send * @total_len: length of message * * Message must have an destination specified explicitly. * Used for SOCK_RDM and SOCK_DGRAM messages, * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections. * (Note: 'SYN+' is prohibited on SOCK_STREAM.) * * Returns the number of bytes sent on success, or errno otherwise */ static int send_msg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { struct sock *sk = sock->sk; struct tipc_port *tport = tipc_sk_port(sk); struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name; int needs_conn; long timeout_val; int res = -EINVAL; if (unlikely(!dest)) return -EDESTADDRREQ; if (unlikely((m->msg_namelen < sizeof(*dest)) || (dest->family != AF_TIPC))) return -EINVAL; if ((total_len > TIPC_MAX_USER_MSG_SIZE) || (m->msg_iovlen > (unsigned)INT_MAX)) return -EMSGSIZE; if (iocb) lock_sock(sk); needs_conn = (sock->state != SS_READY); if (unlikely(needs_conn)) { if (sock->state == SS_LISTENING) { res = -EPIPE; goto exit; } if (sock->state != SS_UNCONNECTED) { res = -EISCONN; goto exit; } if ((tport->published) || ((sock->type == SOCK_STREAM) && (total_len != 0))) { res = -EOPNOTSUPP; goto exit; } if (dest->addrtype == TIPC_ADDR_NAME) { tport->conn_type = dest->addr.name.name.type; tport->conn_instance = dest->addr.name.name.instance; } /* Abort any pending connection attempts (very unlikely) */ reject_rx_queue(sk); } timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); do { if (dest->addrtype == TIPC_ADDR_NAME) { res = dest_name_check(dest, m); if (res) break; res = tipc_send2name(tport->ref, &dest->addr.name.name, dest->addr.name.domain, m->msg_iovlen, m->msg_iov, total_len); } else if (dest->addrtype == TIPC_ADDR_ID) { res = tipc_send2port(tport->ref, &dest->addr.id, m->msg_iovlen, m->msg_iov, total_len); } else if (dest->addrtype == TIPC_ADDR_MCAST) { if (needs_conn) { res = -EOPNOTSUPP; break; } res = dest_name_check(dest, m); if (res) break; res = tipc_multicast(tport->ref, &dest->addr.nameseq, m->msg_iovlen, m->msg_iov, total_len); } if (likely(res != -ELINKCONG)) { if (needs_conn && (res >= 0)) sock->state = SS_CONNECTING; break; } if (timeout_val <= 0L) { res = timeout_val ? timeout_val : -EWOULDBLOCK; break; } release_sock(sk); timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk), !tport->congested, timeout_val); lock_sock(sk); } while (1); exit: if (iocb) release_sock(sk); return res; } /** * send_packet - send a connection-oriented message * @iocb: if NULL, indicates that socket lock is already held * @sock: socket structure * @m: message to send * @total_len: length of message * * Used for SOCK_SEQPACKET messages and SOCK_STREAM data. * * Returns the number of bytes sent on success, or errno otherwise */ static int send_packet(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { struct sock *sk = sock->sk; struct tipc_port *tport = tipc_sk_port(sk); struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name; long timeout_val; int res; /* Handle implied connection establishment */ if (unlikely(dest)) return send_msg(iocb, sock, m, total_len); if ((total_len > TIPC_MAX_USER_MSG_SIZE) || (m->msg_iovlen > (unsigned)INT_MAX)) return -EMSGSIZE; if (iocb) lock_sock(sk); timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); do { if (unlikely(sock->state != SS_CONNECTED)) { if (sock->state == SS_DISCONNECTING) res = -EPIPE; else res = -ENOTCONN; break; } res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov, total_len); if (likely(res != -ELINKCONG)) break; if (timeout_val <= 0L) { res = timeout_val ? timeout_val : -EWOULDBLOCK; break; } release_sock(sk); timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk), (!tport->congested || !tport->connected), timeout_val); lock_sock(sk); } while (1); if (iocb) release_sock(sk); return res; } /** * send_stream - send stream-oriented data * @iocb: (unused) * @sock: socket structure * @m: data to send * @total_len: total length of data to be sent * * Used for SOCK_STREAM data. * * Returns the number of bytes sent on success (or partial success), * or errno if no data sent */ static int send_stream(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { struct sock *sk = sock->sk; struct tipc_port *tport = tipc_sk_port(sk); struct msghdr my_msg; struct iovec my_iov; struct iovec *curr_iov; int curr_iovlen; char __user *curr_start; u32 hdr_size; int curr_left; int bytes_to_send; int bytes_sent; int res; lock_sock(sk); /* Handle special cases where there is no connection */ if (unlikely(sock->state != SS_CONNECTED)) { if (sock->state == SS_UNCONNECTED) { res = send_packet(NULL, sock, m, total_len); goto exit; } else if (sock->state == SS_DISCONNECTING) { res = -EPIPE; goto exit; } else { res = -ENOTCONN; goto exit; } } if (unlikely(m->msg_name)) { res = -EISCONN; goto exit; } if ((total_len > (unsigned)INT_MAX) || (m->msg_iovlen > (unsigned)INT_MAX)) { res = -EMSGSIZE; goto exit; } /* * Send each iovec entry using one or more messages * * Note: This algorithm is good for the most likely case * (i.e. one large iovec entry), but could be improved to pass sets * of small iovec entries into send_packet(). */ curr_iov = m->msg_iov; curr_iovlen = m->msg_iovlen; my_msg.msg_iov = &my_iov; my_msg.msg_iovlen = 1; my_msg.msg_flags = m->msg_flags; my_msg.msg_name = NULL; bytes_sent = 0; hdr_size = msg_hdr_sz(&tport->phdr); while (curr_iovlen--) { curr_start = curr_iov->iov_base; curr_left = curr_iov->iov_len; while (curr_left) { bytes_to_send = tport->max_pkt - hdr_size; if (bytes_to_send > TIPC_MAX_USER_MSG_SIZE) bytes_to_send = TIPC_MAX_USER_MSG_SIZE; if (curr_left < bytes_to_send) bytes_to_send = curr_left; my_iov.iov_base = curr_start; my_iov.iov_len = bytes_to_send; res = send_packet(NULL, sock, &my_msg, bytes_to_send); if (res < 0) { if (bytes_sent) res = bytes_sent; goto exit; } curr_left -= bytes_to_send; curr_start += bytes_to_send; bytes_sent += bytes_to_send; } curr_iov++; } res = bytes_sent; exit: release_sock(sk); return res; } /** * auto_connect - complete connection setup to a remote port * @sock: socket structure * @msg: peer's response message * * Returns 0 on success, errno otherwise */ static int auto_connect(struct socket *sock, struct tipc_msg *msg) { struct tipc_sock *tsock = tipc_sk(sock->sk); if (msg_errcode(msg)) { sock->state = SS_DISCONNECTING; return -ECONNREFUSED; } tsock->peer_name.ref = msg_origport(msg); tsock->peer_name.node = msg_orignode(msg); tipc_connect2port(tsock->p->ref, &tsock->peer_name); tipc_set_portimportance(tsock->p->ref, msg_importance(msg)); sock->state = SS_CONNECTED; return 0; } /** * set_orig_addr - capture sender's address for received message * @m: descriptor for message info * @msg: received message header * * Note: Address is not captured if not requested by receiver. */ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) { struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name; if (addr) { addr->family = AF_TIPC; addr->addrtype = TIPC_ADDR_ID; addr->addr.id.ref = msg_origport(msg); addr->addr.id.node = msg_orignode(msg); addr->addr.name.domain = 0; /* could leave uninitialized */ addr->scope = 0; /* could leave uninitialized */ m->msg_namelen = sizeof(struct sockaddr_tipc); } } /** * anc_data_recv - optionally capture ancillary data for received message * @m: descriptor for message info * @msg: received message header * @tport: TIPC port associated with message * * Note: Ancillary data is not captured if not requested by receiver. * * Returns 0 if successful, otherwise errno */ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, struct tipc_port *tport) { u32 anc_data[3]; u32 err; u32 dest_type; int has_name; int res; if (likely(m->msg_controllen == 0)) return 0; /* Optionally capture errored message object(s) */ err = msg ? msg_errcode(msg) : 0; if (unlikely(err)) { anc_data[0] = err; anc_data[1] = msg_data_sz(msg); res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data); if (res) return res; if (anc_data[1]) { res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], msg_data(msg)); if (res) return res; } } /* Optionally capture message destination object */ dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; switch (dest_type) { case TIPC_NAMED_MSG: has_name = 1; anc_data[0] = msg_nametype(msg); anc_data[1] = msg_namelower(msg); anc_data[2] = msg_namelower(msg); break; case TIPC_MCAST_MSG: has_name = 1; anc_data[0] = msg_nametype(msg); anc_data[1] = msg_namelower(msg); anc_data[2] = msg_nameupper(msg); break; case TIPC_CONN_MSG: has_name = (tport->conn_type != 0); anc_data[0] = tport->conn_type; anc_data[1] = tport->conn_instance; anc_data[2] = tport->conn_instance; break; default: has_name = 0; } if (has_name) { res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data); if (res) return res; } return 0; } /** * recv_msg - receive packet-oriented message * @iocb: (unused) * @m: descriptor for message info * @buf_len: total size of user buffer area * @flags: receive flags * * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages. * If the complete message doesn't fit in user area, truncate it. * * Returns size of returned message data, errno otherwise */ static int recv_msg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t buf_len, int flags) { struct sock *sk = sock->sk; struct tipc_port *tport = tipc_sk_port(sk); struct sk_buff *buf; struct tipc_msg *msg; long timeout; unsigned int sz; u32 err; int res; /* Catch invalid receive requests */ if (unlikely(!buf_len)) return -EINVAL; lock_sock(sk); if (unlikely(sock->state == SS_UNCONNECTED)) { res = -ENOTCONN; goto exit; } timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); restart: /* Look for a message in receive queue; wait if necessary */ while (skb_queue_empty(&sk->sk_receive_queue)) { if (sock->state == SS_DISCONNECTING) { res = -ENOTCONN; goto exit; } if (timeout <= 0L) { res = timeout ? timeout : -EWOULDBLOCK; goto exit; } release_sock(sk); timeout = wait_event_interruptible_timeout(*sk_sleep(sk), tipc_rx_ready(sock), timeout); lock_sock(sk); } /* Look at first message in receive queue */ buf = skb_peek(&sk->sk_receive_queue); msg = buf_msg(buf); sz = msg_data_sz(msg); err = msg_errcode(msg); /* Complete connection setup for an implied connect */ if (unlikely(sock->state == SS_CONNECTING)) { res = auto_connect(sock, msg); if (res) goto exit; } /* Discard an empty non-errored message & try again */ if ((!sz) && (!err)) { advance_rx_queue(sk); goto restart; } /* Capture sender's address (optional) */ set_orig_addr(m, msg); /* Capture ancillary data (optional) */ res = anc_data_recv(m, msg, tport); if (res) goto exit; /* Capture message data (if valid) & compute return value (always) */ if (!err) { if (unlikely(buf_len < sz)) { sz = buf_len; m->msg_flags |= MSG_TRUNC; } res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg), m->msg_iov, sz); if (res) goto exit; res = sz; } else { if ((sock->state == SS_READY) || ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)) res = 0; else res = -ECONNRESET; } /* Consume received message (optional) */ if (likely(!(flags & MSG_PEEK))) { if ((sock->state != SS_READY) && (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) tipc_acknowledge(tport->ref, tport->conn_unacked); advance_rx_queue(sk); } exit: release_sock(sk); return res; } /** * recv_stream - receive stream-oriented data * @iocb: (unused) * @m: descriptor for message info * @buf_len: total size of user buffer area * @flags: receive flags * * Used for SOCK_STREAM messages only. If not enough data is available * will optionally wait for more; never truncates data. * * Returns size of returned message data, errno otherwise */ static int recv_stream(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t buf_len, int flags) { struct sock *sk = sock->sk; struct tipc_port *tport = tipc_sk_port(sk); struct sk_buff *buf; struct tipc_msg *msg; long timeout; unsigned int sz; int sz_to_copy, target, needed; int sz_copied = 0; u32 err; int res = 0; /* Catch invalid receive attempts */ if (unlikely(!buf_len)) return -EINVAL; lock_sock(sk); if (unlikely((sock->state == SS_UNCONNECTED) || (sock->state == SS_CONNECTING))) { res = -ENOTCONN; goto exit; } target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); restart: /* Look for a message in receive queue; wait if necessary */ while (skb_queue_empty(&sk->sk_receive_queue)) { if (sock->state == SS_DISCONNECTING) { res = -ENOTCONN; goto exit; } if (timeout <= 0L) { res = timeout ? timeout : -EWOULDBLOCK; goto exit; } release_sock(sk); timeout = wait_event_interruptible_timeout(*sk_sleep(sk), tipc_rx_ready(sock), timeout); lock_sock(sk); } /* Look at first message in receive queue */ buf = skb_peek(&sk->sk_receive_queue); msg = buf_msg(buf); sz = msg_data_sz(msg); err = msg_errcode(msg); /* Discard an empty non-errored message & try again */ if ((!sz) && (!err)) { advance_rx_queue(sk); goto restart; } /* Optionally capture sender's address & ancillary data of first msg */ if (sz_copied == 0) { set_orig_addr(m, msg); res = anc_data_recv(m, msg, tport); if (res) goto exit; } /* Capture message data (if valid) & compute return value (always) */ if (!err) { u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle); sz -= offset; needed = (buf_len - sz_copied); sz_to_copy = (sz <= needed) ? sz : needed; res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset, m->msg_iov, sz_to_copy); if (res) goto exit; sz_copied += sz_to_copy; if (sz_to_copy < sz) { if (!(flags & MSG_PEEK)) TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)(offset + sz_to_copy); goto exit; } } else { if (sz_copied != 0) goto exit; /* can't add error msg to valid data */ if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control) res = 0; else res = -ECONNRESET; } /* Consume received message (optional) */ if (likely(!(flags & MSG_PEEK))) { if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) tipc_acknowledge(tport->ref, tport->conn_unacked); advance_rx_queue(sk); } /* Loop around if more data is required */ if ((sz_copied < buf_len) && /* didn't get all requested data */ (!skb_queue_empty(&sk->sk_receive_queue) || (sz_copied < target)) && /* and more is ready or required */ (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */ (!err)) /* and haven't reached a FIN */ goto restart; exit: release_sock(sk); return sz_copied ? sz_copied : res; } /** * rx_queue_full - determine if receive queue can accept another message * @msg: message to be added to queue * @queue_size: current size of queue * @base: nominal maximum size of queue * * Returns 1 if queue is unable to accept message, 0 otherwise */ static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base) { u32 threshold; u32 imp = msg_importance(msg); if (imp == TIPC_LOW_IMPORTANCE) threshold = base; else if (imp == TIPC_MEDIUM_IMPORTANCE) threshold = base * 2; else if (imp == TIPC_HIGH_IMPORTANCE) threshold = base * 100; else return 0; if (msg_connected(msg)) threshold *= 4; return queue_size >= threshold; } /** * filter_rcv - validate incoming message * @sk: socket * @buf: message * * Enqueues message on receive queue if acceptable; optionally handles * disconnect indication for a connected socket. * * Called with socket lock already taken; port lock may also be taken. * * Returns TIPC error status code (TIPC_OK if message is not to be rejected) */ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) { struct socket *sock = sk->sk_socket; struct tipc_msg *msg = buf_msg(buf); u32 recv_q_len; /* Reject message if it is wrong sort of message for socket */ /* * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD? * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC */ if (sock->state == SS_READY) { if (msg_connected(msg)) return TIPC_ERR_NO_PORT; } else { if (msg_mcast(msg)) return TIPC_ERR_NO_PORT; if (sock->state == SS_CONNECTED) { if (!msg_connected(msg)) return TIPC_ERR_NO_PORT; } else if (sock->state == SS_CONNECTING) { if (!msg_connected(msg) && (msg_errcode(msg) == 0)) return TIPC_ERR_NO_PORT; } else if (sock->state == SS_LISTENING) { if (msg_connected(msg) || msg_errcode(msg)) return TIPC_ERR_NO_PORT; } else if (sock->state == SS_DISCONNECTING) { return TIPC_ERR_NO_PORT; } else /* (sock->state == SS_UNCONNECTED) */ { if (msg_connected(msg) || msg_errcode(msg)) return TIPC_ERR_NO_PORT; } } /* Reject message if there isn't room to queue it */ recv_q_len = (u32)atomic_read(&tipc_queue_size); if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) { if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE)) return TIPC_ERR_OVERLOAD; } recv_q_len = skb_queue_len(&sk->sk_receive_queue); if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) { if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2)) return TIPC_ERR_OVERLOAD; } /* Enqueue message (finally!) */ TIPC_SKB_CB(buf)->handle = 0; atomic_inc(&tipc_queue_size); __skb_queue_tail(&sk->sk_receive_queue, buf); /* Initiate connection termination for an incoming 'FIN' */ if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) { sock->state = SS_DISCONNECTING; tipc_disconnect_port(tipc_sk_port(sk)); } if (waitqueue_active(sk_sleep(sk))) wake_up_interruptible(sk_sleep(sk)); return TIPC_OK; } /** * backlog_rcv - handle incoming message from backlog queue * @sk: socket * @buf: message * * Caller must hold socket lock, but not port lock. * * Returns 0 */ static int backlog_rcv(struct sock *sk, struct sk_buff *buf) { u32 res; res = filter_rcv(sk, buf); if (res) tipc_reject_msg(buf, res); return 0; } /** * dispatch - handle incoming message * @tport: TIPC port that received message * @buf: message * * Called with port lock already taken. * * Returns TIPC error status code (TIPC_OK if message is not to be rejected) */ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf) { struct sock *sk = (struct sock *)tport->usr_handle; u32 res; /* * Process message if socket is unlocked; otherwise add to backlog queue * * This code is based on sk_receive_skb(), but must be distinct from it * since a TIPC-specific filter/reject mechanism is utilized */ bh_lock_sock(sk); if (!sock_owned_by_user(sk)) { res = filter_rcv(sk, buf); } else { if (sk_add_backlog(sk, buf)) res = TIPC_ERR_OVERLOAD; else res = TIPC_OK; } bh_unlock_sock(sk); return res; } /** * wakeupdispatch - wake up port after congestion * @tport: port to wakeup * * Called with port lock already taken. */ static void wakeupdispatch(struct tipc_port *tport) { struct sock *sk = (struct sock *)tport->usr_handle; if (waitqueue_active(sk_sleep(sk))) wake_up_interruptible(sk_sleep(sk)); } /** * connect - establish a connection to another TIPC port * @sock: socket structure * @dest: socket address for destination port * @destlen: size of socket address data structure * @flags: file-related flags associated with socket * * Returns 0 on success, errno otherwise */ static int connect(struct socket *sock, struct sockaddr *dest, int destlen, int flags) { struct sock *sk = sock->sk; struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; struct msghdr m = {NULL,}; struct sk_buff *buf; struct tipc_msg *msg; unsigned int timeout; int res; lock_sock(sk); /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */ if (sock->state == SS_READY) { res = -EOPNOTSUPP; goto exit; } /* For now, TIPC does not support the non-blocking form of connect() */ if (flags & O_NONBLOCK) { res = -EOPNOTSUPP; goto exit; } /* Issue Posix-compliant error code if socket is in the wrong state */ if (sock->state == SS_LISTENING) { res = -EOPNOTSUPP; goto exit; } if (sock->state == SS_CONNECTING) { res = -EALREADY; goto exit; } if (sock->state != SS_UNCONNECTED) { res = -EISCONN; goto exit; } /* * Reject connection attempt using multicast address * * Note: send_msg() validates the rest of the address fields, * so there's no need to do it here */ if (dst->addrtype == TIPC_ADDR_MCAST) { res = -EINVAL; goto exit; } /* Reject any messages already in receive queue (very unlikely) */ reject_rx_queue(sk); /* Send a 'SYN-' to destination */ m.msg_name = dest; m.msg_namelen = destlen; res = send_msg(NULL, sock, &m, 0); if (res < 0) goto exit; /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ timeout = tipc_sk(sk)->conn_timeout; release_sock(sk); res = wait_event_interruptible_timeout(*sk_sleep(sk), (!skb_queue_empty(&sk->sk_receive_queue) || (sock->state != SS_CONNECTING)), timeout ? (long)msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT); lock_sock(sk); if (res > 0) { buf = skb_peek(&sk->sk_receive_queue); if (buf != NULL) { msg = buf_msg(buf); res = auto_connect(sock, msg); if (!res) { if (!msg_data_sz(msg)) advance_rx_queue(sk); } } else { if (sock->state == SS_CONNECTED) res = -EISCONN; else res = -ECONNREFUSED; } } else { if (res == 0) res = -ETIMEDOUT; else ; /* leave "res" unchanged */ sock->state = SS_DISCONNECTING; } exit: release_sock(sk); return res; } /** * listen - allow socket to listen for incoming connections * @sock: socket structure * @len: (unused) * * Returns 0 on success, errno otherwise */ static int listen(struct socket *sock, int len) { struct sock *sk = sock->sk; int res; lock_sock(sk); if (sock->state != SS_UNCONNECTED) res = -EINVAL; else { sock->state = SS_LISTENING; res = 0; } release_sock(sk); return res; } /** * accept - wait for connection request * @sock: listening socket * @newsock: new socket that is to be connected * @flags: file-related flags associated with socket * * Returns 0 on success, errno otherwise */ static int accept(struct socket *sock, struct socket *new_sock, int flags) { struct sock *sk = sock->sk; struct sk_buff *buf; int res; lock_sock(sk); if (sock->state != SS_LISTENING) { res = -EINVAL; goto exit; } while (skb_queue_empty(&sk->sk_receive_queue)) { if (flags & O_NONBLOCK) { res = -EWOULDBLOCK; goto exit; } release_sock(sk); res = wait_event_interruptible(*sk_sleep(sk), (!skb_queue_empty(&sk->sk_receive_queue))); lock_sock(sk); if (res) goto exit; } buf = skb_peek(&sk->sk_receive_queue); res = tipc_create(sock_net(sock->sk), new_sock, 0, 0); if (!res) { struct sock *new_sk = new_sock->sk; struct tipc_sock *new_tsock = tipc_sk(new_sk); struct tipc_port *new_tport = new_tsock->p; u32 new_ref = new_tport->ref; struct tipc_msg *msg = buf_msg(buf); lock_sock(new_sk); /* * Reject any stray messages received by new socket * before the socket lock was taken (very, very unlikely) */ reject_rx_queue(new_sk); /* Connect new socket to it's peer */ new_tsock->peer_name.ref = msg_origport(msg); new_tsock->peer_name.node = msg_orignode(msg); tipc_connect2port(new_ref, &new_tsock->peer_name); new_sock->state = SS_CONNECTED; tipc_set_portimportance(new_ref, msg_importance(msg)); if (msg_named(msg)) { new_tport->conn_type = msg_nametype(msg); new_tport->conn_instance = msg_nameinst(msg); } /* * Respond to 'SYN-' by discarding it & returning 'ACK'-. * Respond to 'SYN+' by queuing it on new socket. */ if (!msg_data_sz(msg)) { struct msghdr m = {NULL,}; advance_rx_queue(sk); send_packet(NULL, new_sock, &m, 0); } else { __skb_dequeue(&sk->sk_receive_queue); __skb_queue_head(&new_sk->sk_receive_queue, buf); } release_sock(new_sk); } exit: release_sock(sk); return res; } /** * shutdown - shutdown socket connection * @sock: socket structure * @how: direction to close (must be SHUT_RDWR) * * Terminates connection (if necessary), then purges socket's receive queue. * * Returns 0 on success, errno otherwise */ static int shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; struct tipc_port *tport = tipc_sk_port(sk); struct sk_buff *buf; int res; if (how != SHUT_RDWR) return -EINVAL; lock_sock(sk); switch (sock->state) { case SS_CONNECTING: case SS_CONNECTED: /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */ restart: buf = __skb_dequeue(&sk->sk_receive_queue); if (buf) { atomic_dec(&tipc_queue_size); if (TIPC_SKB_CB(buf)->handle != 0) { kfree_skb(buf); goto restart; } tipc_disconnect(tport->ref); tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN); } else { tipc_shutdown(tport->ref); } sock->state = SS_DISCONNECTING; /* fall through */ case SS_DISCONNECTING: /* Discard any unreceived messages; wake up sleeping tasks */ discard_rx_queue(sk); if (waitqueue_active(sk_sleep(sk))) wake_up_interruptible(sk_sleep(sk)); res = 0; break; default: res = -ENOTCONN; } release_sock(sk); return res; } /** * setsockopt - set socket option * @sock: socket structure * @lvl: option level * @opt: option identifier * @ov: pointer to new option value * @ol: length of option value * * For stream sockets only, accepts and ignores all IPPROTO_TCP options * (to ease compatibility). * * Returns 0 on success, errno otherwise */ static int setsockopt(struct socket *sock, int lvl, int opt, char __user *ov, unsigned int ol) { struct sock *sk = sock->sk; struct tipc_port *tport = tipc_sk_port(sk); u32 value; int res; if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) return 0; if (lvl != SOL_TIPC) return -ENOPROTOOPT; if (ol < sizeof(value)) return -EINVAL; res = get_user(value, (u32 __user *)ov); if (res) return res; lock_sock(sk); switch (opt) { case TIPC_IMPORTANCE: res = tipc_set_portimportance(tport->ref, value); break; case TIPC_SRC_DROPPABLE: if (sock->type != SOCK_STREAM) res = tipc_set_portunreliable(tport->ref, value); else res = -ENOPROTOOPT; break; case TIPC_DEST_DROPPABLE: res = tipc_set_portunreturnable(tport->ref, value); break; case TIPC_CONN_TIMEOUT: tipc_sk(sk)->conn_timeout = value; /* no need to set "res", since already 0 at this point */ break; default: res = -EINVAL; } release_sock(sk); return res; } /** * getsockopt - get socket option * @sock: socket structure * @lvl: option level * @opt: option identifier * @ov: receptacle for option value * @ol: receptacle for length of option value * * For stream sockets only, returns 0 length result for all IPPROTO_TCP options * (to ease compatibility). * * Returns 0 on success, errno otherwise */ static int getsockopt(struct socket *sock, int lvl, int opt, char __user *ov, int __user *ol) { struct sock *sk = sock->sk; struct tipc_port *tport = tipc_sk_port(sk); int len; u32 value; int res; if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) return put_user(0, ol); if (lvl != SOL_TIPC) return -ENOPROTOOPT; res = get_user(len, ol); if (res) return res; lock_sock(sk); switch (opt) { case TIPC_IMPORTANCE: res = tipc_portimportance(tport->ref, &value); break; case TIPC_SRC_DROPPABLE: res = tipc_portunreliable(tport->ref, &value); break; case TIPC_DEST_DROPPABLE: res = tipc_portunreturnable(tport->ref, &value); break; case TIPC_CONN_TIMEOUT: value = tipc_sk(sk)->conn_timeout; /* no need to set "res", since already 0 at this point */ break; case TIPC_NODE_RECVQ_DEPTH: value = (u32)atomic_read(&tipc_queue_size); break; case TIPC_SOCK_RECVQ_DEPTH: value = skb_queue_len(&sk->sk_receive_queue); break; default: res = -EINVAL; } release_sock(sk); if (res) return res; /* "get" failed */ if (len < sizeof(value)) return -EINVAL; if (copy_to_user(ov, &value, sizeof(value))) return -EFAULT; return put_user(sizeof(value), ol); } /** * Protocol switches for the various types of TIPC sockets */ static const struct proto_ops msg_ops = { .owner = THIS_MODULE, .family = AF_TIPC, .release = release, .bind = bind, .connect = connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = get_name, .poll = poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = shutdown, .setsockopt = setsockopt, .getsockopt = getsockopt, .sendmsg = send_msg, .recvmsg = recv_msg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage }; static const struct proto_ops packet_ops = { .owner = THIS_MODULE, .family = AF_TIPC, .release = release, .bind = bind, .connect = connect, .socketpair = sock_no_socketpair, .accept = accept, .getname = get_name, .poll = poll, .ioctl = sock_no_ioctl, .listen = listen, .shutdown = shutdown, .setsockopt = setsockopt, .getsockopt = getsockopt, .sendmsg = send_packet, .recvmsg = recv_msg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage }; static const struct proto_ops stream_ops = { .owner = THIS_MODULE, .family = AF_TIPC, .release = release, .bind = bind, .connect = connect, .socketpair = sock_no_socketpair, .accept = accept, .getname = get_name, .poll = poll, .ioctl = sock_no_ioctl, .listen = listen, .shutdown = shutdown, .setsockopt = setsockopt, .getsockopt = getsockopt, .sendmsg = send_stream, .recvmsg = recv_stream, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage }; static const struct net_proto_family tipc_family_ops = { .owner = THIS_MODULE, .family = AF_TIPC, .create = tipc_create }; static struct proto tipc_proto = { .name = "TIPC", .owner = THIS_MODULE, .obj_size = sizeof(struct tipc_sock) }; /** * tipc_socket_init - initialize TIPC socket interface * * Returns 0 on success, errno otherwise */ int tipc_socket_init(void) { int res; res = proto_register(&tipc_proto, 1); if (res) { err("Failed to register TIPC protocol type\n"); goto out; } res = sock_register(&tipc_family_ops); if (res) { err("Failed to register TIPC socket type\n"); proto_unregister(&tipc_proto); goto out; } sockets_enabled = 1; out: return res; } /** * tipc_socket_stop - stop TIPC socket interface */ void tipc_socket_stop(void) { if (!sockets_enabled) return; sockets_enabled = 0; sock_unregister(tipc_family_ops.family); proto_unregister(&tipc_proto); }
gpl-2.0
invisiblek/android_kernel_lge_msm8610
drivers/s390/char/sclp_cmd.c
3579
17533
/* * Copyright IBM Corp. 2007, 2009 * * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> */ #define KMSG_COMPONENT "sclp_cmd" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/completion.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/memory.h> #include <linux/platform_device.h> #include <asm/chpid.h> #include <asm/sclp.h> #include <asm/setup.h> #include <asm/ctl_reg.h> #include "sclp.h" #define SCLP_CMDW_READ_SCP_INFO 0x00020001 #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 struct read_info_sccb { struct sccb_header header; /* 0-7 */ u16 rnmax; /* 8-9 */ u8 rnsize; /* 10 */ u8 _reserved0[24 - 11]; /* 11-15 */ u8 loadparm[8]; /* 24-31 */ u8 _reserved1[48 - 32]; /* 32-47 */ u64 facilities; /* 48-55 */ u8 _reserved2[84 - 56]; /* 56-83 */ u8 fac84; /* 84 */ u8 _reserved3[91 - 85]; /* 85-90 */ u8 flags; /* 91 */ u8 _reserved4[100 - 92]; /* 92-99 */ u32 rnsize2; /* 100-103 */ u64 rnmax2; /* 104-111 */ u8 _reserved5[4096 - 112]; /* 112-4095 */ } __attribute__((packed, aligned(PAGE_SIZE))); static struct read_info_sccb __initdata early_read_info_sccb; static int __initdata early_read_info_sccb_valid; u64 sclp_facilities; static u8 sclp_fac84; static unsigned long long rzm; static unsigned long long rnmax; static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) { int rc; __ctl_set_bit(0, 9); rc = sclp_service_call(cmd, sccb); if (rc) goto out; __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT); local_irq_disable(); out: /* Contents of the sccb might have changed. */ barrier(); __ctl_clear_bit(0, 9); return rc; } static void __init sclp_read_info_early(void) { int rc; int i; struct read_info_sccb *sccb; sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, SCLP_CMDW_READ_SCP_INFO}; sccb = &early_read_info_sccb; for (i = 0; i < ARRAY_SIZE(commands); i++) { do { memset(sccb, 0, sizeof(*sccb)); sccb->header.length = sizeof(*sccb); sccb->header.function_code = 0x80; sccb->header.control_mask[2] = 0x80; rc = sclp_cmd_sync_early(commands[i], sccb); } while (rc == -EBUSY); if (rc) break; if (sccb->header.response_code == 0x10) { early_read_info_sccb_valid = 1; break; } if (sccb->header.response_code != 0x1f0) break; } } void __init sclp_facilities_detect(void) { struct read_info_sccb *sccb; sclp_read_info_early(); if (!early_read_info_sccb_valid) return; sccb = &early_read_info_sccb; sclp_facilities = sccb->facilities; sclp_fac84 = sccb->fac84; rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; rzm <<= 20; } unsigned long long sclp_get_rnmax(void) { return rnmax; } unsigned long long sclp_get_rzm(void) { return rzm; } /* * This function will be called after sclp_facilities_detect(), which gets * called from early.c code. Therefore the sccb should have valid contents. */ void __init sclp_get_ipl_info(struct sclp_ipl_info *info) { struct read_info_sccb *sccb; if (!early_read_info_sccb_valid) return; sccb = &early_read_info_sccb; info->is_valid = 1; if (sccb->flags & 0x2) info->has_dump = 1; memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN); } static void sclp_sync_callback(struct sclp_req *req, void *data) { struct completion *completion = data; complete(completion); } static int do_sync_request(sclp_cmdw_t cmd, void *sccb) { struct completion completion; struct sclp_req *request; int rc; request = kzalloc(sizeof(*request), GFP_KERNEL); if (!request) return -ENOMEM; request->command = cmd; request->sccb = sccb; request->status = SCLP_REQ_FILLED; request->callback = sclp_sync_callback; request->callback_data = &completion; init_completion(&completion); /* Perform sclp request. */ rc = sclp_add_request(request); if (rc) goto out; wait_for_completion(&completion); /* Check response. */ if (request->status != SCLP_REQ_DONE) { pr_warning("sync request failed (cmd=0x%08x, " "status=0x%02x)\n", cmd, request->status); rc = -EIO; } out: kfree(request); return rc; } /* * CPU configuration related functions. */ #define SCLP_CMDW_READ_CPU_INFO 0x00010001 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001 struct read_cpu_info_sccb { struct sccb_header header; u16 nr_configured; u16 offset_configured; u16 nr_standby; u16 offset_standby; u8 reserved[4096 - 16]; } __attribute__((packed, aligned(PAGE_SIZE))); static void sclp_fill_cpu_info(struct sclp_cpu_info *info, struct read_cpu_info_sccb *sccb) { char *page = (char *) sccb; memset(info, 0, sizeof(*info)); info->configured = sccb->nr_configured; info->standby = sccb->nr_standby; info->combined = sccb->nr_configured + sccb->nr_standby; info->has_cpu_type = sclp_fac84 & 0x1; memcpy(&info->cpu, page + sccb->offset_configured, info->combined * sizeof(struct sclp_cpu_entry)); } int sclp_get_cpu_info(struct sclp_cpu_info *info) { int rc; struct read_cpu_info_sccb *sccb; if (!SCLP_HAS_CPU_INFO) return -EOPNOTSUPP; sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb); if (rc) goto out; if (sccb->header.response_code != 0x0010) { pr_warning("readcpuinfo failed (response=0x%04x)\n", sccb->header.response_code); rc = -EIO; goto out; } sclp_fill_cpu_info(info, sccb); out: free_page((unsigned long) sccb); return rc; } struct cpu_configure_sccb { struct sccb_header header; } __attribute__((packed, aligned(8))); static int do_cpu_configure(sclp_cmdw_t cmd) { struct cpu_configure_sccb *sccb; int rc; if (!SCLP_HAS_CPU_RECONFIG) return -EOPNOTSUPP; /* * This is not going to cross a page boundary since we force * kmalloc to have a minimum alignment of 8 bytes on s390. */ sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA); if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); rc = do_sync_request(cmd, sccb); if (rc) goto out; switch (sccb->header.response_code) { case 0x0020: case 0x0120: break; default: pr_warning("configure cpu failed (cmd=0x%08x, " "response=0x%04x)\n", cmd, sccb->header.response_code); rc = -EIO; break; } out: kfree(sccb); return rc; } int sclp_cpu_configure(u8 cpu) { return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8); } int sclp_cpu_deconfigure(u8 cpu) { return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8); } #ifdef CONFIG_MEMORY_HOTPLUG static DEFINE_MUTEX(sclp_mem_mutex); static LIST_HEAD(sclp_mem_list); static u8 sclp_max_storage_id; static unsigned long sclp_storage_ids[256 / BITS_PER_LONG]; static int sclp_mem_state_changed; struct memory_increment { struct list_head list; u16 rn; int standby; int usecount; }; struct assign_storage_sccb { struct sccb_header header; u16 rn; } __packed; int arch_get_memory_phys_device(unsigned long start_pfn) { if (!rzm) return 0; return PFN_PHYS(start_pfn) >> ilog2(rzm); } static unsigned long long rn2addr(u16 rn) { return (unsigned long long) (rn - 1) * rzm; } static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) { struct assign_storage_sccb *sccb; int rc; sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sccb) return -ENOMEM; sccb->header.length = PAGE_SIZE; sccb->rn = rn; rc = do_sync_request(cmd, sccb); if (rc) goto out; switch (sccb->header.response_code) { case 0x0020: case 0x0120: break; default: pr_warning("assign storage failed (cmd=0x%08x, " "response=0x%04x, rn=0x%04x)\n", cmd, sccb->header.response_code, rn); rc = -EIO; break; } out: free_page((unsigned long) sccb); return rc; } static int sclp_assign_storage(u16 rn) { return do_assign_storage(0x000d0001, rn); } static int sclp_unassign_storage(u16 rn) { return do_assign_storage(0x000c0001, rn); } struct attach_storage_sccb { struct sccb_header header; u16 :16; u16 assigned; u32 :32; u32 entries[0]; } __packed; static int sclp_attach_storage(u8 id) { struct attach_storage_sccb *sccb; int rc; int i; sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sccb) return -ENOMEM; sccb->header.length = PAGE_SIZE; rc = do_sync_request(0x00080001 | id << 8, sccb); if (rc) goto out; switch (sccb->header.response_code) { case 0x0020: set_bit(id, sclp_storage_ids); for (i = 0; i < sccb->assigned; i++) { if (sccb->entries[i]) sclp_unassign_storage(sccb->entries[i] >> 16); } break; default: rc = -EIO; break; } out: free_page((unsigned long) sccb); return rc; } static int sclp_mem_change_state(unsigned long start, unsigned long size, int online) { struct memory_increment *incr; unsigned long long istart; int rc = 0; list_for_each_entry(incr, &sclp_mem_list, list) { istart = rn2addr(incr->rn); if (start + size - 1 < istart) break; if (start > istart + rzm - 1) continue; if (online) { if (incr->usecount++) continue; /* * Don't break the loop if one assign fails. Loop may * be walked again on CANCEL and we can't save * information if state changed before or not. * So continue and increase usecount for all increments. */ rc |= sclp_assign_storage(incr->rn); } else { if (--incr->usecount) continue; sclp_unassign_storage(incr->rn); } } return rc ? -EIO : 0; } static int sclp_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { unsigned long start, size; struct memory_notify *arg; unsigned char id; int rc = 0; arg = data; start = arg->start_pfn << PAGE_SHIFT; size = arg->nr_pages << PAGE_SHIFT; mutex_lock(&sclp_mem_mutex); for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1) sclp_attach_storage(id); switch (action) { case MEM_ONLINE: case MEM_GOING_OFFLINE: case MEM_CANCEL_OFFLINE: break; case MEM_GOING_ONLINE: rc = sclp_mem_change_state(start, size, 1); break; case MEM_CANCEL_ONLINE: sclp_mem_change_state(start, size, 0); break; case MEM_OFFLINE: sclp_mem_change_state(start, size, 0); break; default: rc = -EINVAL; break; } if (!rc) sclp_mem_state_changed = 1; mutex_unlock(&sclp_mem_mutex); return rc ? NOTIFY_BAD : NOTIFY_OK; } static struct notifier_block sclp_mem_nb = { .notifier_call = sclp_mem_notifier, }; static void __init add_memory_merged(u16 rn) { static u16 first_rn, num; unsigned long long start, size; if (rn && first_rn && (first_rn + num == rn)) { num++; return; } if (!first_rn) goto skip_add; start = rn2addr(first_rn); size = (unsigned long long ) num * rzm; if (start >= VMEM_MAX_PHYS) goto skip_add; if (start + size > VMEM_MAX_PHYS) size = VMEM_MAX_PHYS - start; if (memory_end_set && (start >= memory_end)) goto skip_add; if (memory_end_set && (start + size > memory_end)) size = memory_end - start; add_memory(0, start, size); skip_add: first_rn = rn; num = 1; } static void __init sclp_add_standby_memory(void) { struct memory_increment *incr; list_for_each_entry(incr, &sclp_mem_list, list) if (incr->standby) add_memory_merged(incr->rn); add_memory_merged(0); } static void __init insert_increment(u16 rn, int standby, int assigned) { struct memory_increment *incr, *new_incr; struct list_head *prev; u16 last_rn; new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL); if (!new_incr) return; new_incr->rn = rn; new_incr->standby = standby; if (!standby) new_incr->usecount = 1; last_rn = 0; prev = &sclp_mem_list; list_for_each_entry(incr, &sclp_mem_list, list) { if (assigned && incr->rn > rn) break; if (!assigned && incr->rn - last_rn > 1) break; last_rn = incr->rn; prev = &incr->list; } if (!assigned) new_incr->rn = last_rn + 1; if (new_incr->rn > rnmax) { kfree(new_incr); return; } list_add(&new_incr->list, prev); } static int sclp_mem_freeze(struct device *dev) { if (!sclp_mem_state_changed) return 0; pr_err("Memory hotplug state changed, suspend refused.\n"); return -EPERM; } struct read_storage_sccb { struct sccb_header header; u16 max_id; u16 assigned; u16 standby; u16 :16; u32 entries[0]; } __packed; static const struct dev_pm_ops sclp_mem_pm_ops = { .freeze = sclp_mem_freeze, }; static struct platform_driver sclp_mem_pdrv = { .driver = { .name = "sclp_mem", .pm = &sclp_mem_pm_ops, }, }; static int __init sclp_detect_standby_memory(void) { struct platform_device *sclp_pdev; struct read_storage_sccb *sccb; int i, id, assigned, rc; if (!early_read_info_sccb_valid) return 0; if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) return 0; rc = -ENOMEM; sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA); if (!sccb) goto out; assigned = 0; for (id = 0; id <= sclp_max_storage_id; id++) { memset(sccb, 0, PAGE_SIZE); sccb->header.length = PAGE_SIZE; rc = do_sync_request(0x00040001 | id << 8, sccb); if (rc) goto out; switch (sccb->header.response_code) { case 0x0010: set_bit(id, sclp_storage_ids); for (i = 0; i < sccb->assigned; i++) { if (!sccb->entries[i]) continue; assigned++; insert_increment(sccb->entries[i] >> 16, 0, 1); } break; case 0x0310: break; case 0x0410: for (i = 0; i < sccb->assigned; i++) { if (!sccb->entries[i]) continue; assigned++; insert_increment(sccb->entries[i] >> 16, 1, 1); } break; default: rc = -EIO; break; } if (!rc) sclp_max_storage_id = sccb->max_id; } if (rc || list_empty(&sclp_mem_list)) goto out; for (i = 1; i <= rnmax - assigned; i++) insert_increment(0, 1, 0); rc = register_memory_notifier(&sclp_mem_nb); if (rc) goto out; rc = platform_driver_register(&sclp_mem_pdrv); if (rc) goto out; sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0); rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0; if (rc) goto out_driver; sclp_add_standby_memory(); goto out; out_driver: platform_driver_unregister(&sclp_mem_pdrv); out: free_page((unsigned long) sccb); return rc; } __initcall(sclp_detect_standby_memory); #endif /* CONFIG_MEMORY_HOTPLUG */ /* * Channel path configuration related functions. */ #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001 #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001 #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001 struct chp_cfg_sccb { struct sccb_header header; u8 ccm; u8 reserved[6]; u8 cssid; } __attribute__((packed)); static int do_chp_configure(sclp_cmdw_t cmd) { struct chp_cfg_sccb *sccb; int rc; if (!SCLP_HAS_CHP_RECONFIG) return -EOPNOTSUPP; /* Prepare sccb. */ sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); rc = do_sync_request(cmd, sccb); if (rc) goto out; switch (sccb->header.response_code) { case 0x0020: case 0x0120: case 0x0440: case 0x0450: break; default: pr_warning("configure channel-path failed " "(cmd=0x%08x, response=0x%04x)\n", cmd, sccb->header.response_code); rc = -EIO; break; } out: free_page((unsigned long) sccb); return rc; } /** * sclp_chp_configure - perform configure channel-path sclp command * @chpid: channel-path ID * * Perform configure channel-path command sclp command for specified chpid. * Return 0 after command successfully finished, non-zero otherwise. */ int sclp_chp_configure(struct chp_id chpid) { return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8); } /** * sclp_chp_deconfigure - perform deconfigure channel-path sclp command * @chpid: channel-path ID * * Perform deconfigure channel-path command sclp command for specified chpid * and wait for completion. On success return 0. Return non-zero otherwise. */ int sclp_chp_deconfigure(struct chp_id chpid) { return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8); } struct chp_info_sccb { struct sccb_header header; u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; u8 standby[SCLP_CHP_INFO_MASK_SIZE]; u8 configured[SCLP_CHP_INFO_MASK_SIZE]; u8 ccm; u8 reserved[6]; u8 cssid; } __attribute__((packed)); /** * sclp_chp_read_info - perform read channel-path information sclp command * @info: resulting channel-path information data * * Perform read channel-path information sclp command and wait for completion. * On success, store channel-path information in @info and return 0. Return * non-zero otherwise. */ int sclp_chp_read_info(struct sclp_chp_info *info) { struct chp_info_sccb *sccb; int rc; if (!SCLP_HAS_CHP_INFO) return -EOPNOTSUPP; /* Prepare sccb. */ sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!sccb) return -ENOMEM; sccb->header.length = sizeof(*sccb); rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb); if (rc) goto out; if (sccb->header.response_code != 0x0010) { pr_warning("read channel-path info failed " "(response=0x%04x)\n", sccb->header.response_code); rc = -EIO; goto out; } memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE); memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE); memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE); out: free_page((unsigned long) sccb); return rc; }
gpl-2.0
ashyx/kernel_gts28ve-gts210ve
drivers/pcmcia/rsrc_nonstatic.c
3579
30398
/* * rsrc_nonstatic.c -- Resource management routines for !SS_CAP_STATIC_MAP sockets * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * The initial developer of the original code is David A. Hinds * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * * (C) 1999 David A. Hinds */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/timer.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/io.h> #include <asm/irq.h> #include <pcmcia/ss.h> #include <pcmcia/cistpl.h> #include "cs_internal.h" /* moved to rsrc_mgr.c MODULE_AUTHOR("David A. Hinds, Dominik Brodowski"); MODULE_LICENSE("GPL"); */ /* Parameters that can be set with 'insmod' */ #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0444) INT_MODULE_PARM(probe_mem, 1); /* memory probe? */ #ifdef CONFIG_PCMCIA_PROBE INT_MODULE_PARM(probe_io, 1); /* IO port probe? */ INT_MODULE_PARM(mem_limit, 0x10000); #endif /* for io_db and mem_db */ struct resource_map { u_long base, num; struct resource_map *next; }; struct socket_data { struct resource_map mem_db; struct resource_map mem_db_valid; struct resource_map io_db; }; #define MEM_PROBE_LOW (1 << 0) #define MEM_PROBE_HIGH (1 << 1) /* Action field */ #define REMOVE_MANAGED_RESOURCE 1 #define ADD_MANAGED_RESOURCE 2 /*====================================================================== Linux resource management extensions ======================================================================*/ static struct resource * claim_region(struct pcmcia_socket *s, resource_size_t base, resource_size_t size, int type, char *name) { struct resource *res, *parent; parent = type & IORESOURCE_MEM ? &iomem_resource : &ioport_resource; res = pcmcia_make_resource(base, size, type | IORESOURCE_BUSY, name); if (res) { #ifdef CONFIG_PCI if (s && s->cb_dev) parent = pci_find_parent_resource(s->cb_dev, res); #endif if (!parent || request_resource(parent, res)) { kfree(res); res = NULL; } } return res; } static void free_region(struct resource *res) { if (res) { release_resource(res); kfree(res); } } /*====================================================================== These manage the internal databases of available resources. ======================================================================*/ static int add_interval(struct resource_map *map, u_long base, u_long num) { struct resource_map *p, *q; for (p = map; ; p = p->next) { if ((p != map) && (p->base+p->num >= base)) { p->num = max(num + base - p->base, p->num); return 0; } if ((p->next == map) || (p->next->base > base+num-1)) break; } q = kmalloc(sizeof(struct resource_map), GFP_KERNEL); if (!q) { printk(KERN_WARNING "out of memory to update resources\n"); return -ENOMEM; } q->base = base; q->num = num; q->next = p->next; p->next = q; return 0; } /*====================================================================*/ static int sub_interval(struct resource_map *map, u_long base, u_long num) { struct resource_map *p, *q; for (p = map; ; p = q) { q = p->next; if (q == map) break; if ((q->base+q->num > base) && (base+num > q->base)) { if (q->base >= base) { if (q->base+q->num <= base+num) { /* Delete whole block */ p->next = q->next; kfree(q); /* don't advance the pointer yet */ q = p; } else { /* Cut off bit from the front */ q->num = q->base + q->num - base - num; q->base = base + num; } } else if (q->base+q->num <= base+num) { /* Cut off bit from the end */ q->num = base - q->base; } else { /* Split the block into two pieces */ p = kmalloc(sizeof(struct resource_map), GFP_KERNEL); if (!p) { printk(KERN_WARNING "out of memory to update resources\n"); return -ENOMEM; } p->base = base+num; p->num = q->base+q->num - p->base; q->num = base - q->base; p->next = q->next ; q->next = p; } } } return 0; } /*====================================================================== These routines examine a region of IO or memory addresses to determine what ranges might be genuinely available. ======================================================================*/ #ifdef CONFIG_PCMCIA_PROBE static void do_io_probe(struct pcmcia_socket *s, unsigned int base, unsigned int num) { struct resource *res; struct socket_data *s_data = s->resource_data; unsigned int i, j, bad; int any; u_char *b, hole, most; dev_printk(KERN_INFO, &s->dev, "cs: IO port probe %#x-%#x:", base, base+num-1); /* First, what does a floating port look like? */ b = kzalloc(256, GFP_KERNEL); if (!b) { printk("\n"); dev_printk(KERN_ERR, &s->dev, "do_io_probe: unable to kmalloc 256 bytes"); return; } for (i = base, most = 0; i < base+num; i += 8) { res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); if (!res) continue; hole = inb(i); for (j = 1; j < 8; j++) if (inb(i+j) != hole) break; free_region(res); if ((j == 8) && (++b[hole] > b[most])) most = hole; if (b[most] == 127) break; } kfree(b); bad = any = 0; for (i = base; i < base+num; i += 8) { res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); if (!res) { if (!any) printk(" excluding"); if (!bad) bad = any = i; continue; } for (j = 0; j < 8; j++) if (inb(i+j) != most) break; free_region(res); if (j < 8) { if (!any) printk(" excluding"); if (!bad) bad = any = i; } else { if (bad) { sub_interval(&s_data->io_db, bad, i-bad); printk(" %#x-%#x", bad, i-1); bad = 0; } } } if (bad) { if ((num > 16) && (bad == base) && (i == base+num)) { sub_interval(&s_data->io_db, bad, i-bad); printk(" nothing: probe failed.\n"); return; } else { sub_interval(&s_data->io_db, bad, i-bad); printk(" %#x-%#x", bad, i-1); } } printk(any ? "\n" : " clean.\n"); } #endif /*======================================================================*/ /** * readable() - iomem validation function for cards with a valid CIS */ static int readable(struct pcmcia_socket *s, struct resource *res, unsigned int *count) { int ret = -EINVAL; if (s->fake_cis) { dev_dbg(&s->dev, "fake CIS is being used: can't validate mem\n"); return 0; } s->cis_mem.res = res; s->cis_virt = ioremap(res->start, s->map_size); if (s->cis_virt) { mutex_unlock(&s->ops_mutex); /* as we're only called from pcmcia.c, we're safe */ if (s->callback->validate) ret = s->callback->validate(s, count); /* invalidate mapping */ mutex_lock(&s->ops_mutex); iounmap(s->cis_virt); s->cis_virt = NULL; } s->cis_mem.res = NULL; if ((ret) || (*count == 0)) return -EINVAL; return 0; } /** * checksum() - iomem validation function for simple memory cards */ static int checksum(struct pcmcia_socket *s, struct resource *res, unsigned int *value) { pccard_mem_map map; int i, a = 0, b = -1, d; void __iomem *virt; virt = ioremap(res->start, s->map_size); if (virt) { map.map = 0; map.flags = MAP_ACTIVE; map.speed = 0; map.res = res; map.card_start = 0; s->ops->set_mem_map(s, &map); /* Don't bother checking every word... */ for (i = 0; i < s->map_size; i += 44) { d = readl(virt+i); a += d; b &= d; } map.flags = 0; s->ops->set_mem_map(s, &map); iounmap(virt); } if (b == -1) return -EINVAL; *value = a; return 0; } /** * do_validate_mem() - low level validate a memory region for PCMCIA use * @s: PCMCIA socket to validate * @base: start address of resource to check * @size: size of resource to check * @validate: validation function to use * * do_validate_mem() splits up the memory region which is to be checked * into two parts. Both are passed to the @validate() function. If * @validate() returns non-zero, or the value parameter to @validate() * is zero, or the value parameter is different between both calls, * the check fails, and -EINVAL is returned. Else, 0 is returned. */ static int do_validate_mem(struct pcmcia_socket *s, unsigned long base, unsigned long size, int validate (struct pcmcia_socket *s, struct resource *res, unsigned int *value)) { struct socket_data *s_data = s->resource_data; struct resource *res1, *res2; unsigned int info1 = 1, info2 = 1; int ret = -EINVAL; res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "PCMCIA memprobe"); res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM, "PCMCIA memprobe"); if (res1 && res2) { ret = 0; if (validate) { ret = validate(s, res1, &info1); ret += validate(s, res2, &info2); } } dev_dbg(&s->dev, "cs: memory probe 0x%06lx-0x%06lx: %p %p %u %u %u", base, base+size-1, res1, res2, ret, info1, info2); free_region(res2); free_region(res1); if ((ret) || (info1 != info2) || (info1 == 0)) return -EINVAL; if (validate && !s->fake_cis) { /* move it to the validated data set */ add_interval(&s_data->mem_db_valid, base, size); sub_interval(&s_data->mem_db, base, size); } return 0; } /** * do_mem_probe() - validate a memory region for PCMCIA use * @s: PCMCIA socket to validate * @base: start address of resource to check * @num: size of resource to check * @validate: validation function to use * @fallback: validation function to use if validate fails * * do_mem_probe() checks a memory region for use by the PCMCIA subsystem. * To do so, the area is split up into sensible parts, and then passed * into the @validate() function. Only if @validate() and @fallback() fail, * the area is marked as unavaibale for use by the PCMCIA subsystem. The * function returns the size of the usable memory area. */ static int do_mem_probe(struct pcmcia_socket *s, u_long base, u_long num, int validate (struct pcmcia_socket *s, struct resource *res, unsigned int *value), int fallback (struct pcmcia_socket *s, struct resource *res, unsigned int *value)) { struct socket_data *s_data = s->resource_data; u_long i, j, bad, fail, step; dev_printk(KERN_INFO, &s->dev, "cs: memory probe 0x%06lx-0x%06lx:", base, base+num-1); bad = fail = 0; step = (num < 0x20000) ? 0x2000 : ((num>>4) & ~0x1fff); /* don't allow too large steps */ if (step > 0x800000) step = 0x800000; /* cis_readable wants to map 2x map_size */ if (step < 2 * s->map_size) step = 2 * s->map_size; for (i = j = base; i < base+num; i = j + step) { if (!fail) { for (j = i; j < base+num; j += step) { if (!do_validate_mem(s, j, step, validate)) break; } fail = ((i == base) && (j == base+num)); } if ((fail) && (fallback)) { for (j = i; j < base+num; j += step) if (!do_validate_mem(s, j, step, fallback)) break; } if (i != j) { if (!bad) printk(" excluding"); printk(" %#05lx-%#05lx", i, j-1); sub_interval(&s_data->mem_db, i, j-i); bad += j-i; } } printk(bad ? "\n" : " clean.\n"); return num - bad; } #ifdef CONFIG_PCMCIA_PROBE /** * inv_probe() - top-to-bottom search for one usuable high memory area * @s: PCMCIA socket to validate * @m: resource_map to check */ static u_long inv_probe(struct resource_map *m, struct pcmcia_socket *s) { struct socket_data *s_data = s->resource_data; u_long ok; if (m == &s_data->mem_db) return 0; ok = inv_probe(m->next, s); if (ok) { if (m->base >= 0x100000) sub_interval(&s_data->mem_db, m->base, m->num); return ok; } if (m->base < 0x100000) return 0; return do_mem_probe(s, m->base, m->num, readable, checksum); } /** * validate_mem() - memory probe function * @s: PCMCIA socket to validate * @probe_mask: MEM_PROBE_LOW | MEM_PROBE_HIGH * * The memory probe. If the memory list includes a 64K-aligned block * below 1MB, we probe in 64K chunks, and as soon as we accumulate at * least mem_limit free space, we quit. Returns 0 on usuable ports. */ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask) { struct resource_map *m, mm; static unsigned char order[] = { 0xd0, 0xe0, 0xc0, 0xf0 }; unsigned long b, i, ok = 0; struct socket_data *s_data = s->resource_data; /* We do up to four passes through the list */ if (probe_mask & MEM_PROBE_HIGH) { if (inv_probe(s_data->mem_db.next, s) > 0) return 0; if (s_data->mem_db_valid.next != &s_data->mem_db_valid) return 0; dev_printk(KERN_NOTICE, &s->dev, "cs: warning: no high memory space available!\n"); return -ENODEV; } for (m = s_data->mem_db.next; m != &s_data->mem_db; m = mm.next) { mm = *m; /* Only probe < 1 MB */ if (mm.base >= 0x100000) continue; if ((mm.base | mm.num) & 0xffff) { ok += do_mem_probe(s, mm.base, mm.num, readable, checksum); continue; } /* Special probe for 64K-aligned block */ for (i = 0; i < 4; i++) { b = order[i] << 12; if ((b >= mm.base) && (b+0x10000 <= mm.base+mm.num)) { if (ok >= mem_limit) sub_interval(&s_data->mem_db, b, 0x10000); else ok += do_mem_probe(s, b, 0x10000, readable, checksum); } } } if (ok > 0) return 0; return -ENODEV; } #else /* CONFIG_PCMCIA_PROBE */ /** * validate_mem() - memory probe function * @s: PCMCIA socket to validate * @probe_mask: ignored * * Returns 0 on usuable ports. */ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask) { struct resource_map *m, mm; struct socket_data *s_data = s->resource_data; unsigned long ok = 0; for (m = s_data->mem_db.next; m != &s_data->mem_db; m = mm.next) { mm = *m; ok += do_mem_probe(s, mm.base, mm.num, readable, checksum); } if (ok > 0) return 0; return -ENODEV; } #endif /* CONFIG_PCMCIA_PROBE */ /** * pcmcia_nonstatic_validate_mem() - try to validate iomem for PCMCIA use * @s: PCMCIA socket to validate * * This is tricky... when we set up CIS memory, we try to validate * the memory window space allocations. * * Locking note: Must be called with skt_mutex held! */ static int pcmcia_nonstatic_validate_mem(struct pcmcia_socket *s) { struct socket_data *s_data = s->resource_data; unsigned int probe_mask = MEM_PROBE_LOW; int ret; if (!probe_mem || !(s->state & SOCKET_PRESENT)) return 0; if (s->features & SS_CAP_PAGE_REGS) probe_mask = MEM_PROBE_HIGH; ret = validate_mem(s, probe_mask); if (s_data->mem_db_valid.next != &s_data->mem_db_valid) return 0; return ret; } struct pcmcia_align_data { unsigned long mask; unsigned long offset; struct resource_map *map; }; static resource_size_t pcmcia_common_align(struct pcmcia_align_data *align_data, resource_size_t start) { resource_size_t ret; /* * Ensure that we have the correct start address */ ret = (start & ~align_data->mask) + align_data->offset; if (ret < start) ret += align_data->mask + 1; return ret; } static resource_size_t pcmcia_align(void *align_data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pcmcia_align_data *data = align_data; struct resource_map *m; resource_size_t start; start = pcmcia_common_align(data, res->start); for (m = data->map->next; m != data->map; m = m->next) { unsigned long map_start = m->base; unsigned long map_end = m->base + m->num - 1; /* * If the lower resources are not available, try aligning * to this entry of the resource database to see if it'll * fit here. */ if (start < map_start) start = pcmcia_common_align(data, map_start); /* * If we're above the area which was passed in, there's * no point proceeding. */ if (start >= res->end) break; if ((start + size - 1) <= map_end) break; } /* * If we failed to find something suitable, ensure we fail. */ if (m == data->map) start = res->end; return start; } /* * Adjust an existing IO region allocation, but making sure that we don't * encroach outside the resources which the user supplied. */ static int __nonstatic_adjust_io_region(struct pcmcia_socket *s, unsigned long r_start, unsigned long r_end) { struct resource_map *m; struct socket_data *s_data = s->resource_data; int ret = -ENOMEM; for (m = s_data->io_db.next; m != &s_data->io_db; m = m->next) { unsigned long start = m->base; unsigned long end = m->base + m->num - 1; if (start > r_start || r_end > end) continue; ret = 0; } return ret; } /*====================================================================== These find ranges of I/O ports or memory addresses that are not currently allocated by other devices. The 'align' field should reflect the number of bits of address that need to be preserved from the initial value of *base. It should be a power of two, greater than or equal to 'num'. A value of 0 means that all bits of *base are significant. *base should also be strictly less than 'align'. ======================================================================*/ static struct resource *__nonstatic_find_io_region(struct pcmcia_socket *s, unsigned long base, int num, unsigned long align) { struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO, dev_name(&s->dev)); struct socket_data *s_data = s->resource_data; struct pcmcia_align_data data; unsigned long min = base; int ret; data.mask = align - 1; data.offset = base & data.mask; data.map = &s_data->io_db; #ifdef CONFIG_PCI if (s->cb_dev) { ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1, min, 0, pcmcia_align, &data); } else #endif ret = allocate_resource(&ioport_resource, res, num, min, ~0UL, 1, pcmcia_align, &data); if (ret != 0) { kfree(res); res = NULL; } return res; } static int nonstatic_find_io(struct pcmcia_socket *s, unsigned int attr, unsigned int *base, unsigned int num, unsigned int align, struct resource **parent) { int i, ret = 0; /* Check for an already-allocated window that must conflict with * what was asked for. It is a hack because it does not catch all * potential conflicts, just the most obvious ones. */ for (i = 0; i < MAX_IO_WIN; i++) { if (!s->io[i].res) continue; if (!*base) continue; if ((s->io[i].res->start & (align-1)) == *base) return -EBUSY; } for (i = 0; i < MAX_IO_WIN; i++) { struct resource *res = s->io[i].res; unsigned int try; if (res && (res->flags & IORESOURCE_BITS) != (attr & IORESOURCE_BITS)) continue; if (!res) { if (align == 0) align = 0x10000; res = s->io[i].res = __nonstatic_find_io_region(s, *base, num, align); if (!res) return -EINVAL; *base = res->start; s->io[i].res->flags = ((res->flags & ~IORESOURCE_BITS) | (attr & IORESOURCE_BITS)); s->io[i].InUse = num; *parent = res; return 0; } /* Try to extend top of window */ try = res->end + 1; if ((*base == 0) || (*base == try)) { ret = __nonstatic_adjust_io_region(s, res->start, res->end + num); if (!ret) { ret = adjust_resource(s->io[i].res, res->start, resource_size(res) + num); if (ret) continue; *base = try; s->io[i].InUse += num; *parent = res; return 0; } } /* Try to extend bottom of window */ try = res->start - num; if ((*base == 0) || (*base == try)) { ret = __nonstatic_adjust_io_region(s, res->start - num, res->end); if (!ret) { ret = adjust_resource(s->io[i].res, res->start - num, resource_size(res) + num); if (ret) continue; *base = try; s->io[i].InUse += num; *parent = res; return 0; } } } return -EINVAL; } static struct resource *nonstatic_find_mem_region(u_long base, u_long num, u_long align, int low, struct pcmcia_socket *s) { struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_MEM, dev_name(&s->dev)); struct socket_data *s_data = s->resource_data; struct pcmcia_align_data data; unsigned long min, max; int ret, i, j; low = low || !(s->features & SS_CAP_PAGE_REGS); data.mask = align - 1; data.offset = base & data.mask; for (i = 0; i < 2; i++) { data.map = &s_data->mem_db_valid; if (low) { max = 0x100000UL; min = base < max ? base : 0; } else { max = ~0UL; min = 0x100000UL + base; } for (j = 0; j < 2; j++) { #ifdef CONFIG_PCI if (s->cb_dev) { ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1, min, 0, pcmcia_align, &data); } else #endif { ret = allocate_resource(&iomem_resource, res, num, min, max, 1, pcmcia_align, &data); } if (ret == 0) break; data.map = &s_data->mem_db; } if (ret == 0 || low) break; low = 1; } if (ret != 0) { kfree(res); res = NULL; } return res; } static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end) { struct socket_data *data = s->resource_data; unsigned long size = end - start + 1; int ret = 0; if (end < start) return -EINVAL; switch (action) { case ADD_MANAGED_RESOURCE: ret = add_interval(&data->mem_db, start, size); if (!ret) do_mem_probe(s, start, size, NULL, NULL); break; case REMOVE_MANAGED_RESOURCE: ret = sub_interval(&data->mem_db, start, size); break; default: ret = -EINVAL; } return ret; } static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end) { struct socket_data *data = s->resource_data; unsigned long size; int ret = 0; #if defined(CONFIG_X86) /* on x86, avoid anything < 0x100 for it is often used for * legacy platform devices */ if (start < 0x100) start = 0x100; #endif size = end - start + 1; if (end < start) return -EINVAL; if (end > IO_SPACE_LIMIT) return -EINVAL; switch (action) { case ADD_MANAGED_RESOURCE: if (add_interval(&data->io_db, start, size) != 0) { ret = -EBUSY; break; } #ifdef CONFIG_PCMCIA_PROBE if (probe_io) do_io_probe(s, start, size); #endif break; case REMOVE_MANAGED_RESOURCE: sub_interval(&data->io_db, start, size); break; default: ret = -EINVAL; break; } return ret; } #ifdef CONFIG_PCI static int nonstatic_autoadd_resources(struct pcmcia_socket *s) { struct resource *res; int i, done = 0; if (!s->cb_dev || !s->cb_dev->bus) return -ENODEV; #if defined(CONFIG_X86) /* If this is the root bus, the risk of hitting some strange * system devices is too high: If a driver isn't loaded, the * resources are not claimed; even if a driver is loaded, it * may not request all resources or even the wrong one. We * can neither trust the rest of the kernel nor ACPI/PNP and * CRS parsing to get it right. Therefore, use several * safeguards: * * - Do not auto-add resources if the CardBus bridge is on * the PCI root bus * * - Avoid any I/O ports < 0x100. * * - On PCI-PCI bridges, only use resources which are set up * exclusively for the secondary PCI bus: the risk of hitting * system devices is quite low, as they usually aren't * connected to the secondary PCI bus. */ if (s->cb_dev->bus->number == 0) return -EINVAL; for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { res = s->cb_dev->bus->resource[i]; #else pci_bus_for_each_resource(s->cb_dev->bus, res, i) { #endif if (!res) continue; if (res->flags & IORESOURCE_IO) { /* safeguard against the root resource, where the * risk of hitting any other device would be too * high */ if (res == &ioport_resource) continue; dev_printk(KERN_INFO, &s->cb_dev->dev, "pcmcia: parent PCI bridge window: %pR\n", res); if (!adjust_io(s, ADD_MANAGED_RESOURCE, res->start, res->end)) done |= IORESOURCE_IO; } if (res->flags & IORESOURCE_MEM) { /* safeguard against the root resource, where the * risk of hitting any other device would be too * high */ if (res == &iomem_resource) continue; dev_printk(KERN_INFO, &s->cb_dev->dev, "pcmcia: parent PCI bridge window: %pR\n", res); if (!adjust_memory(s, ADD_MANAGED_RESOURCE, res->start, res->end)) done |= IORESOURCE_MEM; } } /* if we got at least one of IO, and one of MEM, we can be glad and * activate the PCMCIA subsystem */ if (done == (IORESOURCE_MEM | IORESOURCE_IO)) s->resource_setup_done = 1; return 0; } #else static inline int nonstatic_autoadd_resources(struct pcmcia_socket *s) { return -ENODEV; } #endif static int nonstatic_init(struct pcmcia_socket *s) { struct socket_data *data; data = kzalloc(sizeof(struct socket_data), GFP_KERNEL); if (!data) return -ENOMEM; data->mem_db.next = &data->mem_db; data->mem_db_valid.next = &data->mem_db_valid; data->io_db.next = &data->io_db; s->resource_data = (void *) data; nonstatic_autoadd_resources(s); return 0; } static void nonstatic_release_resource_db(struct pcmcia_socket *s) { struct socket_data *data = s->resource_data; struct resource_map *p, *q; for (p = data->mem_db_valid.next; p != &data->mem_db_valid; p = q) { q = p->next; kfree(p); } for (p = data->mem_db.next; p != &data->mem_db; p = q) { q = p->next; kfree(p); } for (p = data->io_db.next; p != &data->io_db; p = q) { q = p->next; kfree(p); } } struct pccard_resource_ops pccard_nonstatic_ops = { .validate_mem = pcmcia_nonstatic_validate_mem, .find_io = nonstatic_find_io, .find_mem = nonstatic_find_mem_region, .init = nonstatic_init, .exit = nonstatic_release_resource_db, }; EXPORT_SYMBOL(pccard_nonstatic_ops); /* sysfs interface to the resource database */ static ssize_t show_io_db(struct device *dev, struct device_attribute *attr, char *buf) { struct pcmcia_socket *s = dev_get_drvdata(dev); struct socket_data *data; struct resource_map *p; ssize_t ret = 0; mutex_lock(&s->ops_mutex); data = s->resource_data; for (p = data->io_db.next; p != &data->io_db; p = p->next) { if (ret > (PAGE_SIZE - 10)) continue; ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1), "0x%08lx - 0x%08lx\n", ((unsigned long) p->base), ((unsigned long) p->base + p->num - 1)); } mutex_unlock(&s->ops_mutex); return ret; } static ssize_t store_io_db(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pcmcia_socket *s = dev_get_drvdata(dev); unsigned long start_addr, end_addr; unsigned int add = ADD_MANAGED_RESOURCE; ssize_t ret = 0; ret = sscanf(buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr); if (ret != 2) { ret = sscanf(buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr); add = REMOVE_MANAGED_RESOURCE; if (ret != 2) { ret = sscanf(buf, "0x%lx - 0x%lx", &start_addr, &end_addr); add = ADD_MANAGED_RESOURCE; if (ret != 2) return -EINVAL; } } if (end_addr < start_addr) return -EINVAL; mutex_lock(&s->ops_mutex); ret = adjust_io(s, add, start_addr, end_addr); mutex_unlock(&s->ops_mutex); return ret ? ret : count; } static DEVICE_ATTR(available_resources_io, 0600, show_io_db, store_io_db); static ssize_t show_mem_db(struct device *dev, struct device_attribute *attr, char *buf) { struct pcmcia_socket *s = dev_get_drvdata(dev); struct socket_data *data; struct resource_map *p; ssize_t ret = 0; mutex_lock(&s->ops_mutex); data = s->resource_data; for (p = data->mem_db_valid.next; p != &data->mem_db_valid; p = p->next) { if (ret > (PAGE_SIZE - 10)) continue; ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1), "0x%08lx - 0x%08lx\n", ((unsigned long) p->base), ((unsigned long) p->base + p->num - 1)); } for (p = data->mem_db.next; p != &data->mem_db; p = p->next) { if (ret > (PAGE_SIZE - 10)) continue; ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1), "0x%08lx - 0x%08lx\n", ((unsigned long) p->base), ((unsigned long) p->base + p->num - 1)); } mutex_unlock(&s->ops_mutex); return ret; } static ssize_t store_mem_db(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pcmcia_socket *s = dev_get_drvdata(dev); unsigned long start_addr, end_addr; unsigned int add = ADD_MANAGED_RESOURCE; ssize_t ret = 0; ret = sscanf(buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr); if (ret != 2) { ret = sscanf(buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr); add = REMOVE_MANAGED_RESOURCE; if (ret != 2) { ret = sscanf(buf, "0x%lx - 0x%lx", &start_addr, &end_addr); add = ADD_MANAGED_RESOURCE; if (ret != 2) return -EINVAL; } } if (end_addr < start_addr) return -EINVAL; mutex_lock(&s->ops_mutex); ret = adjust_memory(s, add, start_addr, end_addr); mutex_unlock(&s->ops_mutex); return ret ? ret : count; } static DEVICE_ATTR(available_resources_mem, 0600, show_mem_db, store_mem_db); static struct attribute *pccard_rsrc_attributes[] = { &dev_attr_available_resources_io.attr, &dev_attr_available_resources_mem.attr, NULL, }; static const struct attribute_group rsrc_attributes = { .attrs = pccard_rsrc_attributes, }; static int pccard_sysfs_add_rsrc(struct device *dev, struct class_interface *class_intf) { struct pcmcia_socket *s = dev_get_drvdata(dev); if (s->resource_ops != &pccard_nonstatic_ops) return 0; return sysfs_create_group(&dev->kobj, &rsrc_attributes); } static void pccard_sysfs_remove_rsrc(struct device *dev, struct class_interface *class_intf) { struct pcmcia_socket *s = dev_get_drvdata(dev); if (s->resource_ops != &pccard_nonstatic_ops) return; sysfs_remove_group(&dev->kobj, &rsrc_attributes); } static struct class_interface pccard_rsrc_interface __refdata = { .class = &pcmcia_socket_class, .add_dev = &pccard_sysfs_add_rsrc, .remove_dev = &pccard_sysfs_remove_rsrc, }; static int __init nonstatic_sysfs_init(void) { return class_interface_register(&pccard_rsrc_interface); } static void __exit nonstatic_sysfs_exit(void) { class_interface_unregister(&pccard_rsrc_interface); } module_init(nonstatic_sysfs_init); module_exit(nonstatic_sysfs_exit);
gpl-2.0
geeknik/StupidFast
kernel/debug/kdb/kdb_bt.c
3579
5344
/* * Kernel Debugger Architecture Independent Stack Traceback * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved. */ #include <linux/ctype.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/kdb.h> #include <linux/nmi.h> #include <asm/system.h> #include "kdb_private.h" static void kdb_show_stack(struct task_struct *p, void *addr) { int old_lvl = console_loglevel; console_loglevel = 15; kdb_trap_printk++; kdb_set_current_task(p); if (addr) { show_stack((struct task_struct *)p, addr); } else if (kdb_current_regs) { #ifdef CONFIG_X86 show_stack(p, &kdb_current_regs->sp); #else show_stack(p, NULL); #endif } else { show_stack(p, NULL); } console_loglevel = old_lvl; kdb_trap_printk--; } /* * kdb_bt * * This function implements the 'bt' command. Print a stack * traceback. * * bt [<address-expression>] (addr-exp is for alternate stacks) * btp <pid> Kernel stack for <pid> * btt <address-expression> Kernel stack for task structure at * <address-expression> * bta [DRSTCZEUIMA] All useful processes, optionally * filtered by state * btc [<cpu>] The current process on one cpu, * default is all cpus * * bt <address-expression> refers to a address on the stack, that location * is assumed to contain a return address. * * btt <address-expression> refers to the address of a struct task. * * Inputs: * argc argument count * argv argument vector * Outputs: * None. * Returns: * zero for success, a kdb diagnostic if error * Locking: * none. * Remarks: * Backtrack works best when the code uses frame pointers. But even * without frame pointers we should get a reasonable trace. * * mds comes in handy when examining the stack to do a manual traceback or * to get a starting point for bt <address-expression>. */ static int kdb_bt1(struct task_struct *p, unsigned long mask, int argcount, int btaprompt) { char buffer[2]; if (kdb_getarea(buffer[0], (unsigned long)p) || kdb_getarea(buffer[0], (unsigned long)(p+1)-1)) return KDB_BADADDR; if (!kdb_task_state(p, mask)) return 0; kdb_printf("Stack traceback for pid %d\n", p->pid); kdb_ps1(p); kdb_show_stack(p, NULL); if (btaprompt) { kdb_getstr(buffer, sizeof(buffer), "Enter <q> to end, <cr> to continue:"); if (buffer[0] == 'q') { kdb_printf("\n"); return 1; } } touch_nmi_watchdog(); return 0; } int kdb_bt(int argc, const char **argv) { int diag; int argcount = 5; int btaprompt = 1; int nextarg; unsigned long addr; long offset; kdbgetintenv("BTARGS", &argcount); /* Arguments to print */ kdbgetintenv("BTAPROMPT", &btaprompt); /* Prompt after each * proc in bta */ if (strcmp(argv[0], "bta") == 0) { struct task_struct *g, *p; unsigned long cpu; unsigned long mask = kdb_task_state_string(argc ? argv[1] : NULL); if (argc == 0) kdb_ps_suppressed(); /* Run the active tasks first */ for_each_online_cpu(cpu) { p = kdb_curr_task(cpu); if (kdb_bt1(p, mask, argcount, btaprompt)) return 0; } /* Now the inactive tasks */ kdb_do_each_thread(g, p) { if (task_curr(p)) continue; if (kdb_bt1(p, mask, argcount, btaprompt)) return 0; } kdb_while_each_thread(g, p); } else if (strcmp(argv[0], "btp") == 0) { struct task_struct *p; unsigned long pid; if (argc != 1) return KDB_ARGCOUNT; diag = kdbgetularg((char *)argv[1], &pid); if (diag) return diag; p = find_task_by_pid_ns(pid, &init_pid_ns); if (p) { kdb_set_current_task(p); return kdb_bt1(p, ~0UL, argcount, 0); } kdb_printf("No process with pid == %ld found\n", pid); return 0; } else if (strcmp(argv[0], "btt") == 0) { if (argc != 1) return KDB_ARGCOUNT; diag = kdbgetularg((char *)argv[1], &addr); if (diag) return diag; kdb_set_current_task((struct task_struct *)addr); return kdb_bt1((struct task_struct *)addr, ~0UL, argcount, 0); } else if (strcmp(argv[0], "btc") == 0) { unsigned long cpu = ~0; struct task_struct *save_current_task = kdb_current_task; char buf[80]; if (argc > 1) return KDB_ARGCOUNT; if (argc == 1) { diag = kdbgetularg((char *)argv[1], &cpu); if (diag) return diag; } /* Recursive use of kdb_parse, do not use argv after * this point */ argv = NULL; if (cpu != ~0) { if (cpu >= num_possible_cpus() || !cpu_online(cpu)) { kdb_printf("no process for cpu %ld\n", cpu); return 0; } sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu)); kdb_parse(buf); return 0; } kdb_printf("btc: cpu status: "); kdb_parse("cpu\n"); for_each_online_cpu(cpu) { sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu)); kdb_parse(buf); touch_nmi_watchdog(); } kdb_set_current_task(save_current_task); return 0; } else { if (argc) { nextarg = 1; diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL); if (diag) return diag; kdb_show_stack(kdb_current_task, (void *)addr); return 0; } else { return kdb_bt1(kdb_current_task, ~0UL, argcount, 0); } } /* NOTREACHED */ return 0; }
gpl-2.0
Capful/android_kernel_htc_msm8660
sound/isa/gus/gusclassic.c
4603
7388
/* * Driver for Gravis UltraSound Classic soundcard * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/err.h> #include <linux/isa.h> #include <linux/delay.h> #include <linux/time.h> #include <linux/moduleparam.h> #include <asm/dma.h> #include <sound/core.h> #include <sound/gus.h> #define SNDRV_LEGACY_FIND_FREE_IRQ #define SNDRV_LEGACY_FIND_FREE_DMA #include <sound/initval.h> #define CRD_NAME "Gravis UltraSound Classic" #define DEV_NAME "gusclassic" MODULE_DESCRIPTION(CRD_NAME); MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Gravis,UltraSound Classic}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */ static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* 0x220,0x230,0x240,0x250,0x260 */ static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 3,5,9,11,12,15 */ static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 1,3,5,6,7 */ static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 1,3,5,6,7 */ static int joystick_dac[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 29}; /* 0 to 31, (0.59V-4.52V or 0.389V-2.98V) */ static int channels[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 24}; static int pcm_channels[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2}; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for " CRD_NAME " soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for " CRD_NAME " soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable " CRD_NAME " soundcard."); module_param_array(port, long, NULL, 0444); MODULE_PARM_DESC(port, "Port # for " CRD_NAME " driver."); module_param_array(irq, int, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ # for " CRD_NAME " driver."); module_param_array(dma1, int, NULL, 0444); MODULE_PARM_DESC(dma1, "DMA1 # for " CRD_NAME " driver."); module_param_array(dma2, int, NULL, 0444); MODULE_PARM_DESC(dma2, "DMA2 # for " CRD_NAME " driver."); module_param_array(joystick_dac, int, NULL, 0444); MODULE_PARM_DESC(joystick_dac, "Joystick DAC level 0.59V-4.52V or 0.389V-2.98V for " CRD_NAME " driver."); module_param_array(channels, int, NULL, 0444); MODULE_PARM_DESC(channels, "GF1 channels for " CRD_NAME " driver."); module_param_array(pcm_channels, int, NULL, 0444); MODULE_PARM_DESC(pcm_channels, "Reserved PCM channels for " CRD_NAME " driver."); static int __devinit snd_gusclassic_match(struct device *dev, unsigned int n) { return enable[n]; } static int __devinit snd_gusclassic_create(struct snd_card *card, struct device *dev, unsigned int n, struct snd_gus_card **rgus) { static long possible_ports[] = {0x220, 0x230, 0x240, 0x250, 0x260}; static int possible_irqs[] = {5, 11, 12, 9, 7, 15, 3, 4, -1}; static int possible_dmas[] = {5, 6, 7, 1, 3, -1}; int i, error; if (irq[n] == SNDRV_AUTO_IRQ) { irq[n] = snd_legacy_find_free_irq(possible_irqs); if (irq[n] < 0) { dev_err(dev, "unable to find a free IRQ\n"); return -EBUSY; } } if (dma1[n] == SNDRV_AUTO_DMA) { dma1[n] = snd_legacy_find_free_dma(possible_dmas); if (dma1[n] < 0) { dev_err(dev, "unable to find a free DMA1\n"); return -EBUSY; } } if (dma2[n] == SNDRV_AUTO_DMA) { dma2[n] = snd_legacy_find_free_dma(possible_dmas); if (dma2[n] < 0) { dev_err(dev, "unable to find a free DMA2\n"); return -EBUSY; } } if (port[n] != SNDRV_AUTO_PORT) return snd_gus_create(card, port[n], irq[n], dma1[n], dma2[n], 0, channels[n], pcm_channels[n], 0, rgus); i = 0; do { port[n] = possible_ports[i]; error = snd_gus_create(card, port[n], irq[n], dma1[n], dma2[n], 0, channels[n], pcm_channels[n], 0, rgus); } while (error < 0 && ++i < ARRAY_SIZE(possible_ports)); return error; } static int __devinit snd_gusclassic_detect(struct snd_gus_card *gus) { unsigned char d; snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 0); /* reset GF1 */ if (((d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET)) & 0x07) != 0) { snd_printdd("[0x%lx] check 1 failed - 0x%x\n", gus->gf1.port, d); return -ENODEV; } udelay(160); snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 1); /* release reset */ udelay(160); if (((d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET)) & 0x07) != 1) { snd_printdd("[0x%lx] check 2 failed - 0x%x\n", gus->gf1.port, d); return -ENODEV; } return 0; } static int __devinit snd_gusclassic_probe(struct device *dev, unsigned int n) { struct snd_card *card; struct snd_gus_card *gus; int error; error = snd_card_create(index[n], id[n], THIS_MODULE, 0, &card); if (error < 0) return error; if (pcm_channels[n] < 2) pcm_channels[n] = 2; error = snd_gusclassic_create(card, dev, n, &gus); if (error < 0) goto out; error = snd_gusclassic_detect(gus); if (error < 0) goto out; gus->joystick_dac = joystick_dac[n]; error = snd_gus_initialize(gus); if (error < 0) goto out; error = -ENODEV; if (gus->max_flag || gus->ess_flag) { dev_err(dev, "GUS Classic or ACE soundcard was " "not detected at 0x%lx\n", gus->gf1.port); goto out; } error = snd_gf1_new_mixer(gus); if (error < 0) goto out; error = snd_gf1_pcm_new(gus, 0, 0, NULL); if (error < 0) goto out; if (!gus->ace_flag) { error = snd_gf1_rawmidi_new(gus, 0, NULL); if (error < 0) goto out; } sprintf(card->longname + strlen(card->longname), " at 0x%lx, irq %d, dma %d", gus->gf1.port, gus->gf1.irq, gus->gf1.dma1); if (gus->gf1.dma2 >= 0) sprintf(card->longname + strlen(card->longname), "&%d", gus->gf1.dma2); snd_card_set_dev(card, dev); error = snd_card_register(card); if (error < 0) goto out; dev_set_drvdata(dev, card); return 0; out: snd_card_free(card); return error; } static int __devexit snd_gusclassic_remove(struct device *dev, unsigned int n) { snd_card_free(dev_get_drvdata(dev)); dev_set_drvdata(dev, NULL); return 0; } static struct isa_driver snd_gusclassic_driver = { .match = snd_gusclassic_match, .probe = snd_gusclassic_probe, .remove = __devexit_p(snd_gusclassic_remove), #if 0 /* FIXME */ .suspend = snd_gusclassic_suspend, .remove = snd_gusclassic_remove, #endif .driver = { .name = DEV_NAME } }; static int __init alsa_card_gusclassic_init(void) { return isa_register_driver(&snd_gusclassic_driver, SNDRV_CARDS); } static void __exit alsa_card_gusclassic_exit(void) { isa_unregister_driver(&snd_gusclassic_driver); } module_init(alsa_card_gusclassic_init); module_exit(alsa_card_gusclassic_exit);
gpl-2.0
syhost/android_kernel_pantech_ef52l
arch/x86/platform/uv/uv_irq.c
4859
7095
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * SGI UV IRQ functions * * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. */ #include <linux/module.h> #include <linux/rbtree.h> #include <linux/slab.h> #include <linux/irq.h> #include <asm/apic.h> #include <asm/uv/uv_irq.h> #include <asm/uv/uv_hub.h> /* MMR offset and pnode of hub sourcing interrupts for a given irq */ struct uv_irq_2_mmr_pnode{ struct rb_node list; unsigned long offset; int pnode; int irq; }; static DEFINE_SPINLOCK(uv_irq_lock); static struct rb_root uv_irq_root; static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool); static void uv_noop(struct irq_data *data) { } static void uv_ack_apic(struct irq_data *data) { ack_APIC_irq(); } static struct irq_chip uv_irq_chip = { .name = "UV-CORE", .irq_mask = uv_noop, .irq_unmask = uv_noop, .irq_eoi = uv_ack_apic, .irq_set_affinity = uv_set_irq_affinity, }; /* * Add offset and pnode information of the hub sourcing interrupts to the * rb tree for a specific irq. */ static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade) { struct rb_node **link = &uv_irq_root.rb_node; struct rb_node *parent = NULL; struct uv_irq_2_mmr_pnode *n; struct uv_irq_2_mmr_pnode *e; unsigned long irqflags; n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL, uv_blade_to_memory_nid(blade)); if (!n) return -ENOMEM; n->irq = irq; n->offset = offset; n->pnode = uv_blade_to_pnode(blade); spin_lock_irqsave(&uv_irq_lock, irqflags); /* Find the right place in the rbtree: */ while (*link) { parent = *link; e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list); if (unlikely(irq == e->irq)) { /* irq entry exists */ e->pnode = uv_blade_to_pnode(blade); e->offset = offset; spin_unlock_irqrestore(&uv_irq_lock, irqflags); kfree(n); return 0; } if (irq < e->irq) link = &(*link)->rb_left; else link = &(*link)->rb_right; } /* Insert the node into the rbtree. */ rb_link_node(&n->list, parent, link); rb_insert_color(&n->list, &uv_irq_root); spin_unlock_irqrestore(&uv_irq_lock, irqflags); return 0; } /* Retrieve offset and pnode information from the rb tree for a specific irq */ int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode) { struct uv_irq_2_mmr_pnode *e; struct rb_node *n; unsigned long irqflags; spin_lock_irqsave(&uv_irq_lock, irqflags); n = uv_irq_root.rb_node; while (n) { e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); if (e->irq == irq) { *offset = e->offset; *pnode = e->pnode; spin_unlock_irqrestore(&uv_irq_lock, irqflags); return 0; } if (irq < e->irq) n = n->rb_left; else n = n->rb_right; } spin_unlock_irqrestore(&uv_irq_lock, irqflags); return -1; } /* * Re-target the irq to the specified CPU and enable the specified MMR located * on the specified blade to allow the sending of MSIs to the specified CPU. */ static int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, unsigned long mmr_offset, int limit) { const struct cpumask *eligible_cpu = cpumask_of(cpu); struct irq_cfg *cfg = irq_get_chip_data(irq); unsigned long mmr_value; struct uv_IO_APIC_route_entry *entry; int mmr_pnode, err; BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); err = assign_irq_vector(irq, cfg, eligible_cpu); if (err != 0) return err; if (limit == UV_AFFINITY_CPU) irq_set_status_flags(irq, IRQ_NO_BALANCING); else irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); irq_set_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, irq_name); mmr_value = 0; entry = (struct uv_IO_APIC_route_entry *)&mmr_value; entry->vector = cfg->vector; entry->delivery_mode = apic->irq_delivery_mode; entry->dest_mode = apic->irq_dest_mode; entry->polarity = 0; entry->trigger = 0; entry->mask = 0; entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); mmr_pnode = uv_blade_to_pnode(mmr_blade); uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); if (cfg->move_in_progress) send_cleanup_vector(cfg); return irq; } /* * Disable the specified MMR located on the specified blade so that MSIs are * longer allowed to be sent. */ static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset) { unsigned long mmr_value; struct uv_IO_APIC_route_entry *entry; BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); mmr_value = 0; entry = (struct uv_IO_APIC_route_entry *)&mmr_value; entry->mask = 1; uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); } static int uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { struct irq_cfg *cfg = data->chip_data; unsigned int dest; unsigned long mmr_value, mmr_offset; struct uv_IO_APIC_route_entry *entry; int mmr_pnode; if (__ioapic_set_affinity(data, mask, &dest)) return -1; mmr_value = 0; entry = (struct uv_IO_APIC_route_entry *)&mmr_value; entry->vector = cfg->vector; entry->delivery_mode = apic->irq_delivery_mode; entry->dest_mode = apic->irq_dest_mode; entry->polarity = 0; entry->trigger = 0; entry->mask = 0; entry->dest = dest; /* Get previously stored MMR and pnode of hub sourcing interrupts */ if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode)) return -1; uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); if (cfg->move_in_progress) send_cleanup_vector(cfg); return 0; } /* * Set up a mapping of an available irq and vector, and enable the specified * MMR that defines the MSI that is to be sent to the specified CPU when an * interrupt is raised. */ int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, unsigned long mmr_offset, int limit) { int irq, ret; irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade)); if (irq <= 0) return -EBUSY; ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset, limit); if (ret == irq) uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade); else destroy_irq(irq); return ret; } EXPORT_SYMBOL_GPL(uv_setup_irq); /* * Tear down a mapping of an irq and vector, and disable the specified MMR that * defined the MSI that was to be sent to the specified CPU when an interrupt * was raised. * * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq(). */ void uv_teardown_irq(unsigned int irq) { struct uv_irq_2_mmr_pnode *e; struct rb_node *n; unsigned long irqflags; spin_lock_irqsave(&uv_irq_lock, irqflags); n = uv_irq_root.rb_node; while (n) { e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); if (e->irq == irq) { arch_disable_uv_irq(e->pnode, e->offset); rb_erase(n, &uv_irq_root); kfree(e); break; } if (irq < e->irq) n = n->rb_left; else n = n->rb_right; } spin_unlock_irqrestore(&uv_irq_lock, irqflags); destroy_irq(irq); } EXPORT_SYMBOL_GPL(uv_teardown_irq);
gpl-2.0
binkybear/android_kernel_google_msm
arch/x86/platform/uv/uv_irq.c
4859
7095
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * SGI UV IRQ functions * * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. */ #include <linux/module.h> #include <linux/rbtree.h> #include <linux/slab.h> #include <linux/irq.h> #include <asm/apic.h> #include <asm/uv/uv_irq.h> #include <asm/uv/uv_hub.h> /* MMR offset and pnode of hub sourcing interrupts for a given irq */ struct uv_irq_2_mmr_pnode{ struct rb_node list; unsigned long offset; int pnode; int irq; }; static DEFINE_SPINLOCK(uv_irq_lock); static struct rb_root uv_irq_root; static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool); static void uv_noop(struct irq_data *data) { } static void uv_ack_apic(struct irq_data *data) { ack_APIC_irq(); } static struct irq_chip uv_irq_chip = { .name = "UV-CORE", .irq_mask = uv_noop, .irq_unmask = uv_noop, .irq_eoi = uv_ack_apic, .irq_set_affinity = uv_set_irq_affinity, }; /* * Add offset and pnode information of the hub sourcing interrupts to the * rb tree for a specific irq. */ static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade) { struct rb_node **link = &uv_irq_root.rb_node; struct rb_node *parent = NULL; struct uv_irq_2_mmr_pnode *n; struct uv_irq_2_mmr_pnode *e; unsigned long irqflags; n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL, uv_blade_to_memory_nid(blade)); if (!n) return -ENOMEM; n->irq = irq; n->offset = offset; n->pnode = uv_blade_to_pnode(blade); spin_lock_irqsave(&uv_irq_lock, irqflags); /* Find the right place in the rbtree: */ while (*link) { parent = *link; e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list); if (unlikely(irq == e->irq)) { /* irq entry exists */ e->pnode = uv_blade_to_pnode(blade); e->offset = offset; spin_unlock_irqrestore(&uv_irq_lock, irqflags); kfree(n); return 0; } if (irq < e->irq) link = &(*link)->rb_left; else link = &(*link)->rb_right; } /* Insert the node into the rbtree. */ rb_link_node(&n->list, parent, link); rb_insert_color(&n->list, &uv_irq_root); spin_unlock_irqrestore(&uv_irq_lock, irqflags); return 0; } /* Retrieve offset and pnode information from the rb tree for a specific irq */ int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode) { struct uv_irq_2_mmr_pnode *e; struct rb_node *n; unsigned long irqflags; spin_lock_irqsave(&uv_irq_lock, irqflags); n = uv_irq_root.rb_node; while (n) { e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); if (e->irq == irq) { *offset = e->offset; *pnode = e->pnode; spin_unlock_irqrestore(&uv_irq_lock, irqflags); return 0; } if (irq < e->irq) n = n->rb_left; else n = n->rb_right; } spin_unlock_irqrestore(&uv_irq_lock, irqflags); return -1; } /* * Re-target the irq to the specified CPU and enable the specified MMR located * on the specified blade to allow the sending of MSIs to the specified CPU. */ static int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, unsigned long mmr_offset, int limit) { const struct cpumask *eligible_cpu = cpumask_of(cpu); struct irq_cfg *cfg = irq_get_chip_data(irq); unsigned long mmr_value; struct uv_IO_APIC_route_entry *entry; int mmr_pnode, err; BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); err = assign_irq_vector(irq, cfg, eligible_cpu); if (err != 0) return err; if (limit == UV_AFFINITY_CPU) irq_set_status_flags(irq, IRQ_NO_BALANCING); else irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); irq_set_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, irq_name); mmr_value = 0; entry = (struct uv_IO_APIC_route_entry *)&mmr_value; entry->vector = cfg->vector; entry->delivery_mode = apic->irq_delivery_mode; entry->dest_mode = apic->irq_dest_mode; entry->polarity = 0; entry->trigger = 0; entry->mask = 0; entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); mmr_pnode = uv_blade_to_pnode(mmr_blade); uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); if (cfg->move_in_progress) send_cleanup_vector(cfg); return irq; } /* * Disable the specified MMR located on the specified blade so that MSIs are * longer allowed to be sent. */ static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset) { unsigned long mmr_value; struct uv_IO_APIC_route_entry *entry; BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); mmr_value = 0; entry = (struct uv_IO_APIC_route_entry *)&mmr_value; entry->mask = 1; uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); } static int uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { struct irq_cfg *cfg = data->chip_data; unsigned int dest; unsigned long mmr_value, mmr_offset; struct uv_IO_APIC_route_entry *entry; int mmr_pnode; if (__ioapic_set_affinity(data, mask, &dest)) return -1; mmr_value = 0; entry = (struct uv_IO_APIC_route_entry *)&mmr_value; entry->vector = cfg->vector; entry->delivery_mode = apic->irq_delivery_mode; entry->dest_mode = apic->irq_dest_mode; entry->polarity = 0; entry->trigger = 0; entry->mask = 0; entry->dest = dest; /* Get previously stored MMR and pnode of hub sourcing interrupts */ if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode)) return -1; uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); if (cfg->move_in_progress) send_cleanup_vector(cfg); return 0; } /* * Set up a mapping of an available irq and vector, and enable the specified * MMR that defines the MSI that is to be sent to the specified CPU when an * interrupt is raised. */ int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, unsigned long mmr_offset, int limit) { int irq, ret; irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade)); if (irq <= 0) return -EBUSY; ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset, limit); if (ret == irq) uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade); else destroy_irq(irq); return ret; } EXPORT_SYMBOL_GPL(uv_setup_irq); /* * Tear down a mapping of an irq and vector, and disable the specified MMR that * defined the MSI that was to be sent to the specified CPU when an interrupt * was raised. * * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq(). */ void uv_teardown_irq(unsigned int irq) { struct uv_irq_2_mmr_pnode *e; struct rb_node *n; unsigned long irqflags; spin_lock_irqsave(&uv_irq_lock, irqflags); n = uv_irq_root.rb_node; while (n) { e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); if (e->irq == irq) { arch_disable_uv_irq(e->pnode, e->offset); rb_erase(n, &uv_irq_root); kfree(e); break; } if (irq < e->irq) n = n->rb_left; else n = n->rb_right; } spin_unlock_irqrestore(&uv_irq_lock, irqflags); destroy_irq(irq); } EXPORT_SYMBOL_GPL(uv_teardown_irq);
gpl-2.0
PoonKang/Kernel_SCH-I545
arch/arm/mach-pnx4008/time.c
4859
3578
/* * arch/arm/mach-pnx4008/time.c * * PNX4008 Timers * * Authors: Vitaly Wool, Dmitry Chigirev, Grigory Tolstolytkin <source@mvista.com> * * 2005 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/module.h> #include <linux/kallsyms.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/irq.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/leds.h> #include <asm/mach/time.h> #include <asm/errno.h> #include "time.h" /*! Note: all timers are UPCOUNTING */ /*! * Returns number of us since last clock interrupt. Note that interrupts * will have been disabled by do_gettimeoffset() */ static unsigned long pnx4008_gettimeoffset(void) { u32 ticks_to_match = __raw_readl(HSTIM_MATCH0) - __raw_readl(HSTIM_COUNTER); u32 elapsed = LATCH - ticks_to_match; return (elapsed * (tick_nsec / 1000)) / LATCH; } /*! * IRQ handler for the timer */ static irqreturn_t pnx4008_timer_interrupt(int irq, void *dev_id) { if (__raw_readl(HSTIM_INT) & MATCH0_INT) { do { timer_tick(); /* * this algorithm takes care of possible delay * for this interrupt handling longer than a normal * timer period */ __raw_writel(__raw_readl(HSTIM_MATCH0) + LATCH, HSTIM_MATCH0); __raw_writel(MATCH0_INT, HSTIM_INT); /* clear interrupt */ /* * The goal is to keep incrementing HSTIM_MATCH0 * register until HSTIM_MATCH0 indicates time after * what HSTIM_COUNTER indicates. */ } while ((signed) (__raw_readl(HSTIM_MATCH0) - __raw_readl(HSTIM_COUNTER)) < 0); } return IRQ_HANDLED; } static struct irqaction pnx4008_timer_irq = { .name = "PNX4008 Tick Timer", .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .handler = pnx4008_timer_interrupt }; /*! * Set up timer and timer interrupt. */ static __init void pnx4008_setup_timer(void) { __raw_writel(RESET_COUNT, MSTIM_CTRL); while (__raw_readl(MSTIM_COUNTER)) ; /* wait for reset to complete. 100% guarantee event */ __raw_writel(0, MSTIM_CTRL); /* stop the timer */ __raw_writel(0, MSTIM_MCTRL); __raw_writel(RESET_COUNT, HSTIM_CTRL); while (__raw_readl(HSTIM_COUNTER)) ; /* wait for reset to complete. 100% guarantee event */ __raw_writel(0, HSTIM_CTRL); __raw_writel(0, HSTIM_MCTRL); __raw_writel(0, HSTIM_CCR); __raw_writel(12, HSTIM_PMATCH); /* scale down to 1 MHZ */ __raw_writel(LATCH, HSTIM_MATCH0); __raw_writel(MR0_INT, HSTIM_MCTRL); setup_irq(HSTIMER_INT, &pnx4008_timer_irq); __raw_writel(COUNT_ENAB | DEBUG_EN, HSTIM_CTRL); /*start timer, stop when JTAG active */ } /* Timer Clock Control in PM register */ #define TIMCLK_CTRL_REG IO_ADDRESS((PNX4008_PWRMAN_BASE + 0xBC)) #define WATCHDOG_CLK_EN 1 #define TIMER_CLK_EN 2 /* HS and MS timers? */ static u32 timclk_ctrl_reg_save; void pnx4008_timer_suspend(void) { timclk_ctrl_reg_save = __raw_readl(TIMCLK_CTRL_REG); __raw_writel(0, TIMCLK_CTRL_REG); /* disable timers */ } void pnx4008_timer_resume(void) { __raw_writel(timclk_ctrl_reg_save, TIMCLK_CTRL_REG); /* enable timers */ } struct sys_timer pnx4008_timer = { .init = pnx4008_setup_timer, .offset = pnx4008_gettimeoffset, .suspend = pnx4008_timer_suspend, .resume = pnx4008_timer_resume, };
gpl-2.0
uberlaggydarwin/useless
arch/m68k/kernel/module.c
5115
3413
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/moduleloader.h> #include <linux/elf.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #if 0 #define DEBUGP printk #else #define DEBUGP(fmt...) #endif #ifdef CONFIG_MODULES int apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { unsigned int i; Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; Elf32_Sym *sym; uint32_t *location; DEBUGP("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; /* This is the symbol it is referring to. Note that all undefined symbols have been resolved. */ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + ELF32_R_SYM(rel[i].r_info); switch (ELF32_R_TYPE(rel[i].r_info)) { case R_68K_32: /* We add the value into the location given */ *location += sym->st_value; break; case R_68K_PC32: /* Add the value, subtract its postition */ *location += sym->st_value - (uint32_t)location; break; default: printk(KERN_ERR "module %s: Unknown relocation: %u\n", me->name, ELF32_R_TYPE(rel[i].r_info)); return -ENOEXEC; } } return 0; } int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { unsigned int i; Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr; Elf32_Sym *sym; uint32_t *location; DEBUGP("Applying relocate_add section %u to %u\n", relsec, sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; /* This is the symbol it is referring to. Note that all undefined symbols have been resolved. */ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + ELF32_R_SYM(rel[i].r_info); switch (ELF32_R_TYPE(rel[i].r_info)) { case R_68K_32: /* We add the value into the location given */ *location = rel[i].r_addend + sym->st_value; break; case R_68K_PC32: /* Add the value, subtract its postition */ *location = rel[i].r_addend + sym->st_value - (uint32_t)location; break; default: printk(KERN_ERR "module %s: Unknown relocation: %u\n", me->name, ELF32_R_TYPE(rel[i].r_info)); return -ENOEXEC; } } return 0; } int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { module_fixup(mod, mod->arch.fixup_start, mod->arch.fixup_end); return 0; } #endif /* CONFIG_MODULES */ void module_fixup(struct module *mod, struct m68k_fixup_info *start, struct m68k_fixup_info *end) { #ifdef CONFIG_MMU struct m68k_fixup_info *fixup; for (fixup = start; fixup < end; fixup++) { switch (fixup->type) { case m68k_fixup_memoffset: *(u32 *)fixup->addr = m68k_memoffset; break; case m68k_fixup_vnode_shift: *(u16 *)fixup->addr += m68k_virt_to_node_shift; break; } } #endif }
gpl-2.0
mlachwani/Android-4.4.2-Manta-Kernel
drivers/mtd/maps/dmv182.c
8187
3837
/* * drivers/mtd/maps/dmv182.c * * Flash map driver for the Dy4 SVME182 board * * Copyright 2003-2004, TimeSys Corporation * * Based on the SVME181 flash map, by Tom Nelson, Dot4, Inc. for TimeSys Corp. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/kernel.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/errno.h> /* * This driver currently handles only the 16MiB user flash bank 1 on the * board. It does not provide access to bank 0 (contains the Dy4 FFW), bank 2 * (VxWorks boot), or the optional 48MiB expansion flash. * * scott.wood@timesys.com: On the newer boards with 128MiB flash, it * now supports the first 96MiB (the boot flash bank containing FFW * is excluded). The VxWorks loader is in partition 1. */ #define FLASH_BASE_ADDR 0xf0000000 #define FLASH_BANK_SIZE (128*1024*1024) MODULE_AUTHOR("Scott Wood, TimeSys Corporation <scott.wood@timesys.com>"); MODULE_DESCRIPTION("User-programmable flash device on the Dy4 SVME182 board"); MODULE_LICENSE("GPL"); static struct map_info svme182_map = { .name = "Dy4 SVME182", .bankwidth = 32, .size = 128 * 1024 * 1024 }; #define BOOTIMAGE_PART_SIZE ((6*1024*1024)-RESERVED_PART_SIZE) // Allow 6MiB for the kernel #define NEW_BOOTIMAGE_PART_SIZE (6 * 1024 * 1024) // Allow 1MiB for the bootloader #define NEW_BOOTLOADER_PART_SIZE (1024 * 1024) // Use the remaining 9MiB at the end of flash for the RFS #define NEW_RFS_PART_SIZE (0x01000000 - NEW_BOOTLOADER_PART_SIZE - \ NEW_BOOTIMAGE_PART_SIZE) static struct mtd_partition svme182_partitions[] = { // The Lower PABS is only 128KiB, but the partition code doesn't // like partitions that don't end on the largest erase block // size of the device, even if all of the erase blocks in the // partition are small ones. The hardware should prevent // writes to the actual PABS areas. { name: "Lower PABS and CPU 0 bootloader or kernel", size: 6*1024*1024, offset: 0, }, { name: "Root Filesystem", size: 10*1024*1024, offset: MTDPART_OFS_NXTBLK }, { name: "CPU1 Bootloader", size: 1024*1024, offset: MTDPART_OFS_NXTBLK, }, { name: "Extra", size: 110*1024*1024, offset: MTDPART_OFS_NXTBLK }, { name: "Foundation Firmware and Upper PABS", size: 1024*1024, offset: MTDPART_OFS_NXTBLK, mask_flags: MTD_WRITEABLE // read-only } }; static struct mtd_info *this_mtd; static int __init init_svme182(void) { struct mtd_partition *partitions; int num_parts = ARRAY_SIZE(svme182_partitions); partitions = svme182_partitions; svme182_map.virt = ioremap(FLASH_BASE_ADDR, svme182_map.size); if (svme182_map.virt == 0) { printk("Failed to ioremap FLASH memory area.\n"); return -EIO; } simple_map_init(&svme182_map); this_mtd = do_map_probe("cfi_probe", &svme182_map); if (!this_mtd) { iounmap((void *)svme182_map.virt); return -ENXIO; } printk(KERN_NOTICE "SVME182 flash device: %dMiB at 0x%08x\n", this_mtd->size >> 20, FLASH_BASE_ADDR); this_mtd->owner = THIS_MODULE; mtd_device_register(this_mtd, partitions, num_parts); return 0; } static void __exit cleanup_svme182(void) { if (this_mtd) { mtd_device_unregister(this_mtd); map_destroy(this_mtd); } if (svme182_map.virt) { iounmap((void *)svme182_map.virt); svme182_map.virt = 0; } return; } module_init(init_svme182); module_exit(cleanup_svme182);
gpl-2.0
gearslam/JB_LS970ZVC_Viper
mm/failslab.c
8955
1316
#include <linux/fault-inject.h> #include <linux/slab.h> static struct { struct fault_attr attr; u32 ignore_gfp_wait; int cache_filter; } failslab = { .attr = FAULT_ATTR_INITIALIZER, .ignore_gfp_wait = 1, .cache_filter = 0, }; bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags) { if (gfpflags & __GFP_NOFAIL) return false; if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT)) return false; if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB)) return false; return should_fail(&failslab.attr, size); } static int __init setup_failslab(char *str) { return setup_fault_attr(&failslab.attr, str); } __setup("failslab=", setup_failslab); #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS static int __init failslab_debugfs_init(void) { struct dentry *dir; umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; dir = fault_create_debugfs_attr("failslab", NULL, &failslab.attr); if (IS_ERR(dir)) return PTR_ERR(dir); if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, &failslab.ignore_gfp_wait)) goto fail; if (!debugfs_create_bool("cache-filter", mode, dir, &failslab.cache_filter)) goto fail; return 0; fail: debugfs_remove_recursive(dir); return -ENOMEM; } late_initcall(failslab_debugfs_init); #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
gpl-2.0
vanyasvl/android_kernel_samsung_picassoeur
drivers/ide/atiixp.c
9211
5707
/* * Copyright (C) 2003 ATI Inc. <hyu@ati.com> * Copyright (C) 2004,2007 Bartlomiej Zolnierkiewicz */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/ide.h> #include <linux/init.h> #define DRV_NAME "atiixp" #define ATIIXP_IDE_PIO_TIMING 0x40 #define ATIIXP_IDE_MDMA_TIMING 0x44 #define ATIIXP_IDE_PIO_CONTROL 0x48 #define ATIIXP_IDE_PIO_MODE 0x4a #define ATIIXP_IDE_UDMA_CONTROL 0x54 #define ATIIXP_IDE_UDMA_MODE 0x56 typedef struct { u8 command_width; u8 recover_width; } atiixp_ide_timing; static atiixp_ide_timing pio_timing[] = { { 0x05, 0x0d }, { 0x04, 0x07 }, { 0x03, 0x04 }, { 0x02, 0x02 }, { 0x02, 0x00 }, }; static atiixp_ide_timing mdma_timing[] = { { 0x07, 0x07 }, { 0x02, 0x01 }, { 0x02, 0x00 }, }; static DEFINE_SPINLOCK(atiixp_lock); /** * atiixp_set_pio_mode - set host controller for PIO mode * @hwif: port * @drive: drive * * Set the interface PIO mode. */ static void atiixp_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long flags; int timing_shift = (drive->dn ^ 1) * 8; u32 pio_timing_data; u16 pio_mode_data; const u8 pio = drive->pio_mode - XFER_PIO_0; spin_lock_irqsave(&atiixp_lock, flags); pci_read_config_word(dev, ATIIXP_IDE_PIO_MODE, &pio_mode_data); pio_mode_data &= ~(0x07 << (drive->dn * 4)); pio_mode_data |= (pio << (drive->dn * 4)); pci_write_config_word(dev, ATIIXP_IDE_PIO_MODE, pio_mode_data); pci_read_config_dword(dev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data); pio_timing_data &= ~(0xff << timing_shift); pio_timing_data |= (pio_timing[pio].recover_width << timing_shift) | (pio_timing[pio].command_width << (timing_shift + 4)); pci_write_config_dword(dev, ATIIXP_IDE_PIO_TIMING, pio_timing_data); spin_unlock_irqrestore(&atiixp_lock, flags); } /** * atiixp_set_dma_mode - set host controller for DMA mode * @hwif: port * @drive: drive * * Set a ATIIXP host controller to the desired DMA mode. This involves * programming the right timing data into the PCI configuration space. */ static void atiixp_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long flags; int timing_shift = (drive->dn ^ 1) * 8; u32 tmp32; u16 tmp16; u16 udma_ctl = 0; const u8 speed = drive->dma_mode; spin_lock_irqsave(&atiixp_lock, flags); pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &udma_ctl); if (speed >= XFER_UDMA_0) { pci_read_config_word(dev, ATIIXP_IDE_UDMA_MODE, &tmp16); tmp16 &= ~(0x07 << (drive->dn * 4)); tmp16 |= ((speed & 0x07) << (drive->dn * 4)); pci_write_config_word(dev, ATIIXP_IDE_UDMA_MODE, tmp16); udma_ctl |= (1 << drive->dn); } else if (speed >= XFER_MW_DMA_0) { u8 i = speed & 0x03; pci_read_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, &tmp32); tmp32 &= ~(0xff << timing_shift); tmp32 |= (mdma_timing[i].recover_width << timing_shift) | (mdma_timing[i].command_width << (timing_shift + 4)); pci_write_config_dword(dev, ATIIXP_IDE_MDMA_TIMING, tmp32); udma_ctl &= ~(1 << drive->dn); } pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, udma_ctl); spin_unlock_irqrestore(&atiixp_lock, flags); } static u8 atiixp_cable_detect(ide_hwif_t *hwif) { struct pci_dev *pdev = to_pci_dev(hwif->dev); u8 udma_mode = 0, ch = hwif->channel; pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ch, &udma_mode); if ((udma_mode & 0x07) >= 0x04 || (udma_mode & 0x70) >= 0x40) return ATA_CBL_PATA80; else return ATA_CBL_PATA40; } static const struct ide_port_ops atiixp_port_ops = { .set_pio_mode = atiixp_set_pio_mode, .set_dma_mode = atiixp_set_dma_mode, .cable_detect = atiixp_cable_detect, }; static const struct ide_port_info atiixp_pci_info[] __devinitdata = { { /* 0: IXP200/300/400/700 */ .name = DRV_NAME, .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}}, .port_ops = &atiixp_port_ops, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, }, { /* 1: IXP600 */ .name = DRV_NAME, .enablebits = {{0x48,0x01,0x00}, {0x00,0x00,0x00}}, .port_ops = &atiixp_port_ops, .host_flags = IDE_HFLAG_SINGLE, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, }, }; /** * atiixp_init_one - called when a ATIIXP is found * @dev: the atiixp device * @id: the matching pci id * * Called when the PCI registration layer (or the IDE initialization) * finds a device matching our IDE device tables. */ static int __devinit atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id) { return ide_pci_init_one(dev, &atiixp_pci_info[id->driver_data], NULL); } static const struct pci_device_id atiixp_pci_tbl[] = { { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), 0 }, { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), 0 }, { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), 0 }, { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), 1 }, { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), 0 }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), 0 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, atiixp_pci_tbl); static struct pci_driver atiixp_pci_driver = { .name = "ATIIXP_IDE", .id_table = atiixp_pci_tbl, .probe = atiixp_init_one, .remove = ide_pci_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init atiixp_ide_init(void) { return ide_pci_register_driver(&atiixp_pci_driver); } static void __exit atiixp_ide_exit(void) { pci_unregister_driver(&atiixp_pci_driver); } module_init(atiixp_ide_init); module_exit(atiixp_ide_exit); MODULE_AUTHOR("HUI YU"); MODULE_DESCRIPTION("PCI driver module for ATI IXP IDE"); MODULE_LICENSE("GPL");
gpl-2.0
elkay/LK_DNA_2
net/ax25/ax25_ds_in.c
9467
7237
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> /* * State machine for state 1, Awaiting Connection State. * The handling of the timer(s) is in file ax25_ds_timer.c. * Handling of state 0 and connection release is in ax25.c. */ static int ax25_ds_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) { switch (frametype) { case AX25_SABM: ax25->modulus = AX25_MODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW]; ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); break; case AX25_SABME: ax25->modulus = AX25_EMODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW]; ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); break; case AX25_DISC: ax25_send_control(ax25, AX25_DM, pf, AX25_RESPONSE); break; case AX25_UA: ax25_calculate_rtt(ax25); ax25_stop_t1timer(ax25); ax25_start_t3timer(ax25); ax25_start_idletimer(ax25); ax25->vs = 0; ax25->va = 0; ax25->vr = 0; ax25->state = AX25_STATE_3; ax25->n2count = 0; if (ax25->sk != NULL) { bh_lock_sock(ax25->sk); ax25->sk->sk_state = TCP_ESTABLISHED; /* * For WAIT_SABM connections we will produce an accept * ready socket here */ if (!sock_flag(ax25->sk, SOCK_DEAD)) ax25->sk->sk_state_change(ax25->sk); bh_unlock_sock(ax25->sk); } ax25_dama_on(ax25); /* according to DK4EG's spec we are required to * send a RR RESPONSE FINAL NR=0. */ ax25_std_enquiry_response(ax25); break; case AX25_DM: if (pf) ax25_disconnect(ax25, ECONNREFUSED); break; default: if (pf) ax25_send_control(ax25, AX25_SABM, AX25_POLLON, AX25_COMMAND); break; } return 0; } /* * State machine for state 2, Awaiting Release State. * The handling of the timer(s) is in file ax25_ds_timer.c * Handling of state 0 and connection release is in ax25.c. */ static int ax25_ds_state2_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) { switch (frametype) { case AX25_SABM: case AX25_SABME: ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); ax25_dama_off(ax25); break; case AX25_DISC: ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_dama_off(ax25); ax25_disconnect(ax25, 0); break; case AX25_DM: case AX25_UA: if (pf) { ax25_dama_off(ax25); ax25_disconnect(ax25, 0); } break; case AX25_I: case AX25_REJ: case AX25_RNR: case AX25_RR: if (pf) { ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); ax25_dama_off(ax25); } break; default: break; } return 0; } /* * State machine for state 3, Connected State. * The handling of the timer(s) is in file ax25_timer.c * Handling of state 0 and connection release is in ax25.c. */ static int ax25_ds_state3_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type) { int queued = 0; switch (frametype) { case AX25_SABM: case AX25_SABME: if (frametype == AX25_SABM) { ax25->modulus = AX25_MODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW]; } else { ax25->modulus = AX25_EMODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW]; } ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_stop_t1timer(ax25); ax25_start_t3timer(ax25); ax25_start_idletimer(ax25); ax25->condition = 0x00; ax25->vs = 0; ax25->va = 0; ax25->vr = 0; ax25_requeue_frames(ax25); ax25_dama_on(ax25); break; case AX25_DISC: ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_dama_off(ax25); ax25_disconnect(ax25, 0); break; case AX25_DM: ax25_dama_off(ax25); ax25_disconnect(ax25, ECONNRESET); break; case AX25_RR: case AX25_RNR: if (frametype == AX25_RR) ax25->condition &= ~AX25_COND_PEER_RX_BUSY; else ax25->condition |= AX25_COND_PEER_RX_BUSY; if (ax25_validate_nr(ax25, nr)) { if (ax25_check_iframes_acked(ax25, nr)) ax25->n2count=0; if (type == AX25_COMMAND && pf) ax25_ds_enquiry_response(ax25); } else { ax25_ds_nr_error_recovery(ax25); ax25->state = AX25_STATE_1; } break; case AX25_REJ: ax25->condition &= ~AX25_COND_PEER_RX_BUSY; if (ax25_validate_nr(ax25, nr)) { if (ax25->va != nr) ax25->n2count=0; ax25_frames_acked(ax25, nr); ax25_calculate_rtt(ax25); ax25_stop_t1timer(ax25); ax25_start_t3timer(ax25); ax25_requeue_frames(ax25); if (type == AX25_COMMAND && pf) ax25_ds_enquiry_response(ax25); } else { ax25_ds_nr_error_recovery(ax25); ax25->state = AX25_STATE_1; } break; case AX25_I: if (!ax25_validate_nr(ax25, nr)) { ax25_ds_nr_error_recovery(ax25); ax25->state = AX25_STATE_1; break; } if (ax25->condition & AX25_COND_PEER_RX_BUSY) { ax25_frames_acked(ax25, nr); ax25->n2count = 0; } else { if (ax25_check_iframes_acked(ax25, nr)) ax25->n2count = 0; } if (ax25->condition & AX25_COND_OWN_RX_BUSY) { if (pf) ax25_ds_enquiry_response(ax25); break; } if (ns == ax25->vr) { ax25->vr = (ax25->vr + 1) % ax25->modulus; queued = ax25_rx_iframe(ax25, skb); if (ax25->condition & AX25_COND_OWN_RX_BUSY) ax25->vr = ns; /* ax25->vr - 1 */ ax25->condition &= ~AX25_COND_REJECT; if (pf) { ax25_ds_enquiry_response(ax25); } else { if (!(ax25->condition & AX25_COND_ACK_PENDING)) { ax25->condition |= AX25_COND_ACK_PENDING; ax25_start_t2timer(ax25); } } } else { if (ax25->condition & AX25_COND_REJECT) { if (pf) ax25_ds_enquiry_response(ax25); } else { ax25->condition |= AX25_COND_REJECT; ax25_ds_enquiry_response(ax25); ax25->condition &= ~AX25_COND_ACK_PENDING; } } break; case AX25_FRMR: case AX25_ILLEGAL: ax25_ds_establish_data_link(ax25); ax25->state = AX25_STATE_1; break; default: break; } return queued; } /* * Higher level upcall for a LAPB frame */ int ax25_ds_frame_in(ax25_cb *ax25, struct sk_buff *skb, int type) { int queued = 0, frametype, ns, nr, pf; frametype = ax25_decode(ax25, skb, &ns, &nr, &pf); switch (ax25->state) { case AX25_STATE_1: queued = ax25_ds_state1_machine(ax25, skb, frametype, pf, type); break; case AX25_STATE_2: queued = ax25_ds_state2_machine(ax25, skb, frametype, pf, type); break; case AX25_STATE_3: queued = ax25_ds_state3_machine(ax25, skb, frametype, ns, nr, pf, type); break; } return queued; }
gpl-2.0
Infusion-OS/android_kernel_lge_gee
net/ax25/ax25_ds_in.c
9467
7237
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> /* * State machine for state 1, Awaiting Connection State. * The handling of the timer(s) is in file ax25_ds_timer.c. * Handling of state 0 and connection release is in ax25.c. */ static int ax25_ds_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) { switch (frametype) { case AX25_SABM: ax25->modulus = AX25_MODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW]; ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); break; case AX25_SABME: ax25->modulus = AX25_EMODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW]; ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); break; case AX25_DISC: ax25_send_control(ax25, AX25_DM, pf, AX25_RESPONSE); break; case AX25_UA: ax25_calculate_rtt(ax25); ax25_stop_t1timer(ax25); ax25_start_t3timer(ax25); ax25_start_idletimer(ax25); ax25->vs = 0; ax25->va = 0; ax25->vr = 0; ax25->state = AX25_STATE_3; ax25->n2count = 0; if (ax25->sk != NULL) { bh_lock_sock(ax25->sk); ax25->sk->sk_state = TCP_ESTABLISHED; /* * For WAIT_SABM connections we will produce an accept * ready socket here */ if (!sock_flag(ax25->sk, SOCK_DEAD)) ax25->sk->sk_state_change(ax25->sk); bh_unlock_sock(ax25->sk); } ax25_dama_on(ax25); /* according to DK4EG's spec we are required to * send a RR RESPONSE FINAL NR=0. */ ax25_std_enquiry_response(ax25); break; case AX25_DM: if (pf) ax25_disconnect(ax25, ECONNREFUSED); break; default: if (pf) ax25_send_control(ax25, AX25_SABM, AX25_POLLON, AX25_COMMAND); break; } return 0; } /* * State machine for state 2, Awaiting Release State. * The handling of the timer(s) is in file ax25_ds_timer.c * Handling of state 0 and connection release is in ax25.c. */ static int ax25_ds_state2_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) { switch (frametype) { case AX25_SABM: case AX25_SABME: ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); ax25_dama_off(ax25); break; case AX25_DISC: ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_dama_off(ax25); ax25_disconnect(ax25, 0); break; case AX25_DM: case AX25_UA: if (pf) { ax25_dama_off(ax25); ax25_disconnect(ax25, 0); } break; case AX25_I: case AX25_REJ: case AX25_RNR: case AX25_RR: if (pf) { ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); ax25_dama_off(ax25); } break; default: break; } return 0; } /* * State machine for state 3, Connected State. * The handling of the timer(s) is in file ax25_timer.c * Handling of state 0 and connection release is in ax25.c. */ static int ax25_ds_state3_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type) { int queued = 0; switch (frametype) { case AX25_SABM: case AX25_SABME: if (frametype == AX25_SABM) { ax25->modulus = AX25_MODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW]; } else { ax25->modulus = AX25_EMODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW]; } ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_stop_t1timer(ax25); ax25_start_t3timer(ax25); ax25_start_idletimer(ax25); ax25->condition = 0x00; ax25->vs = 0; ax25->va = 0; ax25->vr = 0; ax25_requeue_frames(ax25); ax25_dama_on(ax25); break; case AX25_DISC: ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_dama_off(ax25); ax25_disconnect(ax25, 0); break; case AX25_DM: ax25_dama_off(ax25); ax25_disconnect(ax25, ECONNRESET); break; case AX25_RR: case AX25_RNR: if (frametype == AX25_RR) ax25->condition &= ~AX25_COND_PEER_RX_BUSY; else ax25->condition |= AX25_COND_PEER_RX_BUSY; if (ax25_validate_nr(ax25, nr)) { if (ax25_check_iframes_acked(ax25, nr)) ax25->n2count=0; if (type == AX25_COMMAND && pf) ax25_ds_enquiry_response(ax25); } else { ax25_ds_nr_error_recovery(ax25); ax25->state = AX25_STATE_1; } break; case AX25_REJ: ax25->condition &= ~AX25_COND_PEER_RX_BUSY; if (ax25_validate_nr(ax25, nr)) { if (ax25->va != nr) ax25->n2count=0; ax25_frames_acked(ax25, nr); ax25_calculate_rtt(ax25); ax25_stop_t1timer(ax25); ax25_start_t3timer(ax25); ax25_requeue_frames(ax25); if (type == AX25_COMMAND && pf) ax25_ds_enquiry_response(ax25); } else { ax25_ds_nr_error_recovery(ax25); ax25->state = AX25_STATE_1; } break; case AX25_I: if (!ax25_validate_nr(ax25, nr)) { ax25_ds_nr_error_recovery(ax25); ax25->state = AX25_STATE_1; break; } if (ax25->condition & AX25_COND_PEER_RX_BUSY) { ax25_frames_acked(ax25, nr); ax25->n2count = 0; } else { if (ax25_check_iframes_acked(ax25, nr)) ax25->n2count = 0; } if (ax25->condition & AX25_COND_OWN_RX_BUSY) { if (pf) ax25_ds_enquiry_response(ax25); break; } if (ns == ax25->vr) { ax25->vr = (ax25->vr + 1) % ax25->modulus; queued = ax25_rx_iframe(ax25, skb); if (ax25->condition & AX25_COND_OWN_RX_BUSY) ax25->vr = ns; /* ax25->vr - 1 */ ax25->condition &= ~AX25_COND_REJECT; if (pf) { ax25_ds_enquiry_response(ax25); } else { if (!(ax25->condition & AX25_COND_ACK_PENDING)) { ax25->condition |= AX25_COND_ACK_PENDING; ax25_start_t2timer(ax25); } } } else { if (ax25->condition & AX25_COND_REJECT) { if (pf) ax25_ds_enquiry_response(ax25); } else { ax25->condition |= AX25_COND_REJECT; ax25_ds_enquiry_response(ax25); ax25->condition &= ~AX25_COND_ACK_PENDING; } } break; case AX25_FRMR: case AX25_ILLEGAL: ax25_ds_establish_data_link(ax25); ax25->state = AX25_STATE_1; break; default: break; } return queued; } /* * Higher level upcall for a LAPB frame */ int ax25_ds_frame_in(ax25_cb *ax25, struct sk_buff *skb, int type) { int queued = 0, frametype, ns, nr, pf; frametype = ax25_decode(ax25, skb, &ns, &nr, &pf); switch (ax25->state) { case AX25_STATE_1: queued = ax25_ds_state1_machine(ax25, skb, frametype, pf, type); break; case AX25_STATE_2: queued = ax25_ds_state2_machine(ax25, skb, frametype, pf, type); break; case AX25_STATE_3: queued = ax25_ds_state3_machine(ax25, skb, frametype, ns, nr, pf, type); break; } return queued; }
gpl-2.0
UnknownzD/I9103_ICS_Kernel
fs/proc/proc_tty.c
10747
4805
/* * proc_tty.c -- handles /proc/tty * * Copyright 1997, Theodore Ts'o */ #include <asm/uaccess.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/tty.h> #include <linux/seq_file.h> #include <linux/bitops.h> /* * The /proc/tty directory inodes... */ static struct proc_dir_entry *proc_tty_ldisc, *proc_tty_driver; /* * This is the handler for /proc/tty/drivers */ static void show_tty_range(struct seq_file *m, struct tty_driver *p, dev_t from, int num) { seq_printf(m, "%-20s ", p->driver_name ? p->driver_name : "unknown"); seq_printf(m, "/dev/%-8s ", p->name); if (p->num > 1) { seq_printf(m, "%3d %d-%d ", MAJOR(from), MINOR(from), MINOR(from) + num - 1); } else { seq_printf(m, "%3d %7d ", MAJOR(from), MINOR(from)); } switch (p->type) { case TTY_DRIVER_TYPE_SYSTEM: seq_puts(m, "system"); if (p->subtype == SYSTEM_TYPE_TTY) seq_puts(m, ":/dev/tty"); else if (p->subtype == SYSTEM_TYPE_SYSCONS) seq_puts(m, ":console"); else if (p->subtype == SYSTEM_TYPE_CONSOLE) seq_puts(m, ":vtmaster"); break; case TTY_DRIVER_TYPE_CONSOLE: seq_puts(m, "console"); break; case TTY_DRIVER_TYPE_SERIAL: seq_puts(m, "serial"); break; case TTY_DRIVER_TYPE_PTY: if (p->subtype == PTY_TYPE_MASTER) seq_puts(m, "pty:master"); else if (p->subtype == PTY_TYPE_SLAVE) seq_puts(m, "pty:slave"); else seq_puts(m, "pty"); break; default: seq_printf(m, "type:%d.%d", p->type, p->subtype); } seq_putc(m, '\n'); } static int show_tty_driver(struct seq_file *m, void *v) { struct tty_driver *p = list_entry(v, struct tty_driver, tty_drivers); dev_t from = MKDEV(p->major, p->minor_start); dev_t to = from + p->num; if (&p->tty_drivers == tty_drivers.next) { /* pseudo-drivers first */ seq_printf(m, "%-20s /dev/%-8s ", "/dev/tty", "tty"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 0); seq_puts(m, "system:/dev/tty\n"); seq_printf(m, "%-20s /dev/%-8s ", "/dev/console", "console"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 1); seq_puts(m, "system:console\n"); #ifdef CONFIG_UNIX98_PTYS seq_printf(m, "%-20s /dev/%-8s ", "/dev/ptmx", "ptmx"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 2); seq_puts(m, "system\n"); #endif #ifdef CONFIG_VT seq_printf(m, "%-20s /dev/%-8s ", "/dev/vc/0", "vc/0"); seq_printf(m, "%3d %7d ", TTY_MAJOR, 0); seq_puts(m, "system:vtmaster\n"); #endif } while (MAJOR(from) < MAJOR(to)) { dev_t next = MKDEV(MAJOR(from)+1, 0); show_tty_range(m, p, from, next - from); from = next; } if (from != to) show_tty_range(m, p, from, to - from); return 0; } /* iterator */ static void *t_start(struct seq_file *m, loff_t *pos) { mutex_lock(&tty_mutex); return seq_list_start(&tty_drivers, *pos); } static void *t_next(struct seq_file *m, void *v, loff_t *pos) { return seq_list_next(v, &tty_drivers, pos); } static void t_stop(struct seq_file *m, void *v) { mutex_unlock(&tty_mutex); } static const struct seq_operations tty_drivers_op = { .start = t_start, .next = t_next, .stop = t_stop, .show = show_tty_driver }; static int tty_drivers_open(struct inode *inode, struct file *file) { return seq_open(file, &tty_drivers_op); } static const struct file_operations proc_tty_drivers_operations = { .open = tty_drivers_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /* * This function is called by tty_register_driver() to handle * registering the driver's /proc handler into /proc/tty/driver/<foo> */ void proc_tty_register_driver(struct tty_driver *driver) { struct proc_dir_entry *ent; if (!driver->driver_name || driver->proc_entry || !driver->ops->proc_fops) return; ent = proc_create_data(driver->driver_name, 0, proc_tty_driver, driver->ops->proc_fops, driver); driver->proc_entry = ent; } /* * This function is called by tty_unregister_driver() */ void proc_tty_unregister_driver(struct tty_driver *driver) { struct proc_dir_entry *ent; ent = driver->proc_entry; if (!ent) return; remove_proc_entry(driver->driver_name, proc_tty_driver); driver->proc_entry = NULL; } /* * Called by proc_root_init() to initialize the /proc/tty subtree */ void __init proc_tty_init(void) { if (!proc_mkdir("tty", NULL)) return; proc_tty_ldisc = proc_mkdir("tty/ldisc", NULL); /* * /proc/tty/driver/serial reveals the exact character counts for * serial links which is just too easy to abuse for inferring * password lengths and inter-keystroke timings during password * entry. */ proc_tty_driver = proc_mkdir_mode("tty/driver", S_IRUSR|S_IXUSR, NULL); proc_create("tty/ldiscs", 0, NULL, &tty_ldiscs_proc_fops); proc_create("tty/drivers", 0, NULL, &proc_tty_drivers_operations); }
gpl-2.0
friedrich420/Sprint-Note-4-Android-5.1.1-Kernel
KernelN910P-5_1_1GIT/drivers/ide/ide-pio-blacklist.c
12539
2347
/* * PIO blacklist. Some drives incorrectly report their maximal PIO mode, * at least in respect to CMD640. Here we keep info on some known drives. * * Changes to the ide_pio_blacklist[] should be made with EXTREME CAUTION * to avoid breaking the fragile cmd640.c support. */ #include <linux/string.h> static struct ide_pio_info { const char *name; int pio; } ide_pio_blacklist [] = { { "Conner Peripherals 540MB - CFS540A", 3 }, { "WDC AC2700", 3 }, { "WDC AC2540", 3 }, { "WDC AC2420", 3 }, { "WDC AC2340", 3 }, { "WDC AC2250", 0 }, { "WDC AC2200", 0 }, { "WDC AC21200", 4 }, { "WDC AC2120", 0 }, { "WDC AC2850", 3 }, { "WDC AC1270", 3 }, { "WDC AC1170", 1 }, { "WDC AC1210", 1 }, { "WDC AC280", 0 }, { "WDC AC31000", 3 }, { "WDC AC31200", 3 }, { "Maxtor 7131 AT", 1 }, { "Maxtor 7171 AT", 1 }, { "Maxtor 7213 AT", 1 }, { "Maxtor 7245 AT", 1 }, { "Maxtor 7345 AT", 1 }, { "Maxtor 7546 AT", 3 }, { "Maxtor 7540 AV", 3 }, { "SAMSUNG SHD-3121A", 1 }, { "SAMSUNG SHD-3122A", 1 }, { "SAMSUNG SHD-3172A", 1 }, { "ST5660A", 3 }, { "ST3660A", 3 }, { "ST3630A", 3 }, { "ST3655A", 3 }, { "ST3391A", 3 }, { "ST3390A", 1 }, { "ST3600A", 1 }, { "ST3290A", 0 }, { "ST3144A", 0 }, { "ST3491A", 1 }, /* reports 3, should be 1 or 2 (depending on drive) according to Seagate's FIND-ATA program */ { "QUANTUM ELS127A", 0 }, { "QUANTUM ELS170A", 0 }, { "QUANTUM LPS240A", 0 }, { "QUANTUM LPS210A", 3 }, { "QUANTUM LPS270A", 3 }, { "QUANTUM LPS365A", 3 }, { "QUANTUM LPS540A", 3 }, { "QUANTUM LIGHTNING 540A", 3 }, { "QUANTUM LIGHTNING 730A", 3 }, { "QUANTUM FIREBALL_540", 3 }, /* Older Quantum Fireballs don't work */ { "QUANTUM FIREBALL_640", 3 }, { "QUANTUM FIREBALL_1080", 3 }, { "QUANTUM FIREBALL_1280", 3 }, { NULL, 0 } }; /** * ide_scan_pio_blacklist - check for a blacklisted drive * @model: Drive model string * * This routine searches the ide_pio_blacklist for an entry * matching the start/whole of the supplied model name. * * Returns -1 if no match found. * Otherwise returns the recommended PIO mode from ide_pio_blacklist[]. */ int ide_scan_pio_blacklist(char *model) { struct ide_pio_info *p; for (p = ide_pio_blacklist; p->name != NULL; p++) { if (strncmp(p->name, model, strlen(p->name)) == 0) return p->pio; } return -1; }
gpl-2.0
Snuzzo/funky_redux
lib/extable.c
13819
2486
/* * Derived from arch/ppc/mm/extable.c and arch/i386/mm/extable.c. * * Copyright (C) 2004 Paul Mackerras, IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sort.h> #include <asm/uaccess.h> #ifndef ARCH_HAS_SORT_EXTABLE /* * The exception table needs to be sorted so that the binary * search that we use to find entries in it works properly. * This is used both for the kernel exception table and for * the exception tables of modules that get loaded. */ static int cmp_ex(const void *a, const void *b) { const struct exception_table_entry *x = a, *y = b; /* avoid overflow */ if (x->insn > y->insn) return 1; if (x->insn < y->insn) return -1; return 0; } void sort_extable(struct exception_table_entry *start, struct exception_table_entry *finish) { sort(start, finish - start, sizeof(struct exception_table_entry), cmp_ex, NULL); } #ifdef CONFIG_MODULES /* * If the exception table is sorted, any referring to the module init * will be at the beginning or the end. */ void trim_init_extable(struct module *m) { /*trim the beginning*/ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) { m->extable++; m->num_exentries--; } /*trim the end*/ while (m->num_exentries && within_module_init(m->extable[m->num_exentries-1].insn, m)) m->num_exentries--; } #endif /* CONFIG_MODULES */ #endif /* !ARCH_HAS_SORT_EXTABLE */ #ifndef ARCH_HAS_SEARCH_EXTABLE /* * Search one exception table for an entry corresponding to the * given instruction address, and return the address of the entry, * or NULL if none is found. * We use a binary search, and thus we assume that the table is * already sorted. */ const struct exception_table_entry * search_extable(const struct exception_table_entry *first, const struct exception_table_entry *last, unsigned long value) { while (first <= last) { const struct exception_table_entry *mid; mid = ((last - first) >> 1) + first; /* * careful, the distance between value and insn * can be larger than MAX_LONG: */ if (mid->insn < value) first = mid + 1; else if (mid->insn > value) last = mid - 1; else return mid; } return NULL; } #endif
gpl-2.0
fastbot3d/linux-3.2.0
drivers/misc/fsa9480.c
252
13269
/* * fsa9480.c - FSA9480 micro USB switch device driver * * Copyright (C) 2010 Samsung Electronics * Minkyu Kang <mk7.kang@samsung.com> * Wonguk Jeong <wonguk.jeong@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/platform_data/fsa9480.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/pm_runtime.h> /* FSA9480 I2C registers */ #define FSA9480_REG_DEVID 0x01 #define FSA9480_REG_CTRL 0x02 #define FSA9480_REG_INT1 0x03 #define FSA9480_REG_INT2 0x04 #define FSA9480_REG_INT1_MASK 0x05 #define FSA9480_REG_INT2_MASK 0x06 #define FSA9480_REG_ADC 0x07 #define FSA9480_REG_TIMING1 0x08 #define FSA9480_REG_TIMING2 0x09 #define FSA9480_REG_DEV_T1 0x0a #define FSA9480_REG_DEV_T2 0x0b #define FSA9480_REG_BTN1 0x0c #define FSA9480_REG_BTN2 0x0d #define FSA9480_REG_CK 0x0e #define FSA9480_REG_CK_INT1 0x0f #define FSA9480_REG_CK_INT2 0x10 #define FSA9480_REG_CK_INTMASK1 0x11 #define FSA9480_REG_CK_INTMASK2 0x12 #define FSA9480_REG_MANSW1 0x13 #define FSA9480_REG_MANSW2 0x14 /* Control */ #define CON_SWITCH_OPEN (1 << 4) #define CON_RAW_DATA (1 << 3) #define CON_MANUAL_SW (1 << 2) #define CON_WAIT (1 << 1) #define CON_INT_MASK (1 << 0) #define CON_MASK (CON_SWITCH_OPEN | CON_RAW_DATA | \ CON_MANUAL_SW | CON_WAIT) /* Device Type 1 */ #define DEV_USB_OTG (1 << 7) #define DEV_DEDICATED_CHG (1 << 6) #define DEV_USB_CHG (1 << 5) #define DEV_CAR_KIT (1 << 4) #define DEV_UART (1 << 3) #define DEV_USB (1 << 2) #define DEV_AUDIO_2 (1 << 1) #define DEV_AUDIO_1 (1 << 0) #define DEV_T1_USB_MASK (DEV_USB_OTG | DEV_USB) #define DEV_T1_UART_MASK (DEV_UART) #define DEV_T1_CHARGER_MASK (DEV_DEDICATED_CHG | DEV_USB_CHG) /* Device Type 2 */ #define DEV_AV (1 << 6) #define DEV_TTY (1 << 5) #define DEV_PPD (1 << 4) #define DEV_JIG_UART_OFF (1 << 3) #define DEV_JIG_UART_ON (1 << 2) #define DEV_JIG_USB_OFF (1 << 1) #define DEV_JIG_USB_ON (1 << 0) #define DEV_T2_USB_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON) #define DEV_T2_UART_MASK (DEV_JIG_UART_OFF | DEV_JIG_UART_ON) #define DEV_T2_JIG_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON | \ DEV_JIG_UART_OFF | DEV_JIG_UART_ON) /* * Manual Switch * D- [7:5] / D+ [4:2] * 000: Open all / 001: USB / 010: AUDIO / 011: UART / 100: V_AUDIO */ #define SW_VAUDIO ((4 << 5) | (4 << 2)) #define SW_UART ((3 << 5) | (3 << 2)) #define SW_AUDIO ((2 << 5) | (2 << 2)) #define SW_DHOST ((1 << 5) | (1 << 2)) #define SW_AUTO ((0 << 5) | (0 << 2)) /* Interrupt 1 */ #define INT_DETACH (1 << 1) #define INT_ATTACH (1 << 0) struct fsa9480_usbsw { struct i2c_client *client; struct fsa9480_platform_data *pdata; int dev1; int dev2; int mansw; }; static struct fsa9480_usbsw *chip; static int fsa9480_write_reg(struct i2c_client *client, int reg, int value) { int ret; ret = i2c_smbus_write_byte_data(client, reg, value); if (ret < 0) dev_err(&client->dev, "%s: err %d\n", __func__, ret); return ret; } static int fsa9480_read_reg(struct i2c_client *client, int reg) { int ret; ret = i2c_smbus_read_byte_data(client, reg); if (ret < 0) dev_err(&client->dev, "%s: err %d\n", __func__, ret); return ret; } static int fsa9480_read_irq(struct i2c_client *client, int *value) { int ret; ret = i2c_smbus_read_i2c_block_data(client, FSA9480_REG_INT1, 2, (u8 *)value); *value &= 0xffff; if (ret < 0) dev_err(&client->dev, "%s: err %d\n", __func__, ret); return ret; } static void fsa9480_set_switch(const char *buf) { struct fsa9480_usbsw *usbsw = chip; struct i2c_client *client = usbsw->client; unsigned int value; unsigned int path = 0; value = fsa9480_read_reg(client, FSA9480_REG_CTRL); if (!strncmp(buf, "VAUDIO", 6)) { path = SW_VAUDIO; value &= ~CON_MANUAL_SW; } else if (!strncmp(buf, "UART", 4)) { path = SW_UART; value &= ~CON_MANUAL_SW; } else if (!strncmp(buf, "AUDIO", 5)) { path = SW_AUDIO; value &= ~CON_MANUAL_SW; } else if (!strncmp(buf, "DHOST", 5)) { path = SW_DHOST; value &= ~CON_MANUAL_SW; } else if (!strncmp(buf, "AUTO", 4)) { path = SW_AUTO; value |= CON_MANUAL_SW; } else { printk(KERN_ERR "Wrong command\n"); return; } usbsw->mansw = path; fsa9480_write_reg(client, FSA9480_REG_MANSW1, path); fsa9480_write_reg(client, FSA9480_REG_CTRL, value); } static ssize_t fsa9480_get_switch(char *buf) { struct fsa9480_usbsw *usbsw = chip; struct i2c_client *client = usbsw->client; unsigned int value; value = fsa9480_read_reg(client, FSA9480_REG_MANSW1); if (value == SW_VAUDIO) return sprintf(buf, "VAUDIO\n"); else if (value == SW_UART) return sprintf(buf, "UART\n"); else if (value == SW_AUDIO) return sprintf(buf, "AUDIO\n"); else if (value == SW_DHOST) return sprintf(buf, "DHOST\n"); else if (value == SW_AUTO) return sprintf(buf, "AUTO\n"); else return sprintf(buf, "%x", value); } static ssize_t fsa9480_show_device(struct device *dev, struct device_attribute *attr, char *buf) { struct fsa9480_usbsw *usbsw = dev_get_drvdata(dev); struct i2c_client *client = usbsw->client; int dev1, dev2; dev1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1); dev2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2); if (!dev1 && !dev2) return sprintf(buf, "NONE\n"); /* USB */ if (dev1 & DEV_T1_USB_MASK || dev2 & DEV_T2_USB_MASK) return sprintf(buf, "USB\n"); /* UART */ if (dev1 & DEV_T1_UART_MASK || dev2 & DEV_T2_UART_MASK) return sprintf(buf, "UART\n"); /* CHARGER */ if (dev1 & DEV_T1_CHARGER_MASK) return sprintf(buf, "CHARGER\n"); /* JIG */ if (dev2 & DEV_T2_JIG_MASK) return sprintf(buf, "JIG\n"); return sprintf(buf, "UNKNOWN\n"); } static ssize_t fsa9480_show_manualsw(struct device *dev, struct device_attribute *attr, char *buf) { return fsa9480_get_switch(buf); } static ssize_t fsa9480_set_manualsw(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { fsa9480_set_switch(buf); return count; } static DEVICE_ATTR(device, S_IRUGO, fsa9480_show_device, NULL); static DEVICE_ATTR(switch, S_IRUGO | S_IWUSR, fsa9480_show_manualsw, fsa9480_set_manualsw); static struct attribute *fsa9480_attributes[] = { &dev_attr_device.attr, &dev_attr_switch.attr, NULL }; static const struct attribute_group fsa9480_group = { .attrs = fsa9480_attributes, }; static void fsa9480_detect_dev(struct fsa9480_usbsw *usbsw, int intr) { int val1, val2, ctrl; struct fsa9480_platform_data *pdata = usbsw->pdata; struct i2c_client *client = usbsw->client; val1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1); val2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2); ctrl = fsa9480_read_reg(client, FSA9480_REG_CTRL); dev_info(&client->dev, "intr: 0x%x, dev1: 0x%x, dev2: 0x%x\n", intr, val1, val2); if (!intr) goto out; if (intr & INT_ATTACH) { /* Attached */ /* USB */ if (val1 & DEV_T1_USB_MASK || val2 & DEV_T2_USB_MASK) { if (pdata->usb_cb) pdata->usb_cb(FSA9480_ATTACHED); if (usbsw->mansw) { fsa9480_write_reg(client, FSA9480_REG_MANSW1, usbsw->mansw); } } /* UART */ if (val1 & DEV_T1_UART_MASK || val2 & DEV_T2_UART_MASK) { if (pdata->uart_cb) pdata->uart_cb(FSA9480_ATTACHED); if (!(ctrl & CON_MANUAL_SW)) { fsa9480_write_reg(client, FSA9480_REG_MANSW1, SW_UART); } } /* CHARGER */ if (val1 & DEV_T1_CHARGER_MASK) { if (pdata->charger_cb) pdata->charger_cb(FSA9480_ATTACHED); } /* JIG */ if (val2 & DEV_T2_JIG_MASK) { if (pdata->jig_cb) pdata->jig_cb(FSA9480_ATTACHED); } } else if (intr & INT_DETACH) { /* Detached */ /* USB */ if (usbsw->dev1 & DEV_T1_USB_MASK || usbsw->dev2 & DEV_T2_USB_MASK) { if (pdata->usb_cb) pdata->usb_cb(FSA9480_DETACHED); } /* UART */ if (usbsw->dev1 & DEV_T1_UART_MASK || usbsw->dev2 & DEV_T2_UART_MASK) { if (pdata->uart_cb) pdata->uart_cb(FSA9480_DETACHED); } /* CHARGER */ if (usbsw->dev1 & DEV_T1_CHARGER_MASK) { if (pdata->charger_cb) pdata->charger_cb(FSA9480_DETACHED); } /* JIG */ if (usbsw->dev2 & DEV_T2_JIG_MASK) { if (pdata->jig_cb) pdata->jig_cb(FSA9480_DETACHED); } } usbsw->dev1 = val1; usbsw->dev2 = val2; out: ctrl &= ~CON_INT_MASK; fsa9480_write_reg(client, FSA9480_REG_CTRL, ctrl); } static irqreturn_t fsa9480_irq_handler(int irq, void *data) { struct fsa9480_usbsw *usbsw = data; struct i2c_client *client = usbsw->client; int intr; /* clear interrupt */ fsa9480_read_irq(client, &intr); /* device detection */ fsa9480_detect_dev(usbsw, intr); return IRQ_HANDLED; } static int fsa9480_irq_init(struct fsa9480_usbsw *usbsw) { struct fsa9480_platform_data *pdata = usbsw->pdata; struct i2c_client *client = usbsw->client; int ret; int intr; unsigned int ctrl = CON_MASK; /* clear interrupt */ fsa9480_read_irq(client, &intr); /* unmask interrupt (attach/detach only) */ fsa9480_write_reg(client, FSA9480_REG_INT1_MASK, 0xfc); fsa9480_write_reg(client, FSA9480_REG_INT2_MASK, 0x1f); usbsw->mansw = fsa9480_read_reg(client, FSA9480_REG_MANSW1); if (usbsw->mansw) ctrl &= ~CON_MANUAL_SW; /* Manual Switching Mode */ fsa9480_write_reg(client, FSA9480_REG_CTRL, ctrl); if (pdata && pdata->cfg_gpio) pdata->cfg_gpio(); if (client->irq) { ret = request_threaded_irq(client->irq, NULL, fsa9480_irq_handler, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "fsa9480 micro USB", usbsw); if (ret) { dev_err(&client->dev, "failed to reqeust IRQ\n"); return ret; } if (pdata) device_init_wakeup(&client->dev, pdata->wakeup); } return 0; } static int __devinit fsa9480_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct fsa9480_usbsw *usbsw; int ret = 0; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; usbsw = kzalloc(sizeof(struct fsa9480_usbsw), GFP_KERNEL); if (!usbsw) { dev_err(&client->dev, "failed to allocate driver data\n"); return -ENOMEM; } usbsw->client = client; usbsw->pdata = client->dev.platform_data; chip = usbsw; i2c_set_clientdata(client, usbsw); ret = fsa9480_irq_init(usbsw); if (ret) goto fail1; ret = sysfs_create_group(&client->dev.kobj, &fsa9480_group); if (ret) { dev_err(&client->dev, "failed to create fsa9480 attribute group\n"); goto fail2; } /* ADC Detect Time: 500ms */ fsa9480_write_reg(client, FSA9480_REG_TIMING1, 0x6); if (chip->pdata->reset_cb) chip->pdata->reset_cb(); /* device detection */ fsa9480_detect_dev(usbsw, INT_ATTACH); pm_runtime_set_active(&client->dev); return 0; fail2: if (client->irq) free_irq(client->irq, usbsw); fail1: i2c_set_clientdata(client, NULL); kfree(usbsw); return ret; } static int __devexit fsa9480_remove(struct i2c_client *client) { struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); if (client->irq) free_irq(client->irq, usbsw); i2c_set_clientdata(client, NULL); sysfs_remove_group(&client->dev.kobj, &fsa9480_group); device_init_wakeup(&client->dev, 0); kfree(usbsw); return 0; } #ifdef CONFIG_PM static int fsa9480_suspend(struct i2c_client *client, pm_message_t state) { struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); struct fsa9480_platform_data *pdata = usbsw->pdata; if (device_may_wakeup(&client->dev) && client->irq) enable_irq_wake(client->irq); if (pdata->usb_power) pdata->usb_power(0); return 0; } static int fsa9480_resume(struct i2c_client *client) { struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); int dev1, dev2; if (device_may_wakeup(&client->dev) && client->irq) disable_irq_wake(client->irq); /* * Clear Pending interrupt. Note that detect_dev does what * the interrupt handler does. So, we don't miss pending and * we reenable interrupt if there is one. */ fsa9480_read_reg(client, FSA9480_REG_INT1); fsa9480_read_reg(client, FSA9480_REG_INT2); dev1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1); dev2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2); /* device detection */ fsa9480_detect_dev(usbsw, (dev1 || dev2) ? INT_ATTACH : INT_DETACH); return 0; } #else #define fsa9480_suspend NULL #define fsa9480_resume NULL #endif /* CONFIG_PM */ static const struct i2c_device_id fsa9480_id[] = { {"fsa9480", 0}, {} }; MODULE_DEVICE_TABLE(i2c, fsa9480_id); static struct i2c_driver fsa9480_i2c_driver = { .driver = { .name = "fsa9480", }, .probe = fsa9480_probe, .remove = __devexit_p(fsa9480_remove), .resume = fsa9480_resume, .suspend = fsa9480_suspend, .id_table = fsa9480_id, }; static int __init fsa9480_init(void) { return i2c_add_driver(&fsa9480_i2c_driver); } module_init(fsa9480_init); static void __exit fsa9480_exit(void) { i2c_del_driver(&fsa9480_i2c_driver); } module_exit(fsa9480_exit); MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>"); MODULE_DESCRIPTION("FSA9480 USB Switch driver"); MODULE_LICENSE("GPL");
gpl-2.0
android-on-pre/kernel_common
drivers/edac/i5000_edac.c
252
42753
/* * Intel 5000(P/V/X) class Memory Controllers kernel module * * This file may be distributed under the terms of the * GNU General Public License. * * Written by Douglas Thompson Linux Networx (http://lnxi.com) * norsk5@xmission.com * * This module is based on the following document: * * Intel 5000X Chipset Memory Controller Hub (MCH) - Datasheet * http://developer.intel.com/design/chipsets/datashts/313070.htm * */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/slab.h> #include <linux/edac.h> #include <asm/mmzone.h> #include "edac_core.h" /* * Alter this version for the I5000 module when modifications are made */ #define I5000_REVISION " Ver: 2.0.12 " __DATE__ #define EDAC_MOD_STR "i5000_edac" #define i5000_printk(level, fmt, arg...) \ edac_printk(level, "i5000", fmt, ##arg) #define i5000_mc_printk(mci, level, fmt, arg...) \ edac_mc_chipset_printk(mci, level, "i5000", fmt, ##arg) #ifndef PCI_DEVICE_ID_INTEL_FBD_0 #define PCI_DEVICE_ID_INTEL_FBD_0 0x25F5 #endif #ifndef PCI_DEVICE_ID_INTEL_FBD_1 #define PCI_DEVICE_ID_INTEL_FBD_1 0x25F6 #endif /* Device 16, * Function 0: System Address * Function 1: Memory Branch Map, Control, Errors Register * Function 2: FSB Error Registers * * All 3 functions of Device 16 (0,1,2) share the SAME DID */ #define PCI_DEVICE_ID_INTEL_I5000_DEV16 0x25F0 /* OFFSETS for Function 0 */ /* OFFSETS for Function 1 */ #define AMBASE 0x48 #define MAXCH 0x56 #define MAXDIMMPERCH 0x57 #define TOLM 0x6C #define REDMEMB 0x7C #define RED_ECC_LOCATOR(x) ((x) & 0x3FFFF) #define REC_ECC_LOCATOR_EVEN(x) ((x) & 0x001FF) #define REC_ECC_LOCATOR_ODD(x) ((x) & 0x3FE00) #define MIR0 0x80 #define MIR1 0x84 #define MIR2 0x88 #define AMIR0 0x8C #define AMIR1 0x90 #define AMIR2 0x94 #define FERR_FAT_FBD 0x98 #define NERR_FAT_FBD 0x9C #define EXTRACT_FBDCHAN_INDX(x) (((x)>>28) & 0x3) #define FERR_FAT_FBDCHAN 0x30000000 #define FERR_FAT_M3ERR 0x00000004 #define FERR_FAT_M2ERR 0x00000002 #define FERR_FAT_M1ERR 0x00000001 #define FERR_FAT_MASK (FERR_FAT_M1ERR | \ FERR_FAT_M2ERR | \ FERR_FAT_M3ERR) #define FERR_NF_FBD 0xA0 /* Thermal and SPD or BFD errors */ #define FERR_NF_M28ERR 0x01000000 #define FERR_NF_M27ERR 0x00800000 #define FERR_NF_M26ERR 0x00400000 #define FERR_NF_M25ERR 0x00200000 #define FERR_NF_M24ERR 0x00100000 #define FERR_NF_M23ERR 0x00080000 #define FERR_NF_M22ERR 0x00040000 #define FERR_NF_M21ERR 0x00020000 /* Correctable errors */ #define FERR_NF_M20ERR 0x00010000 #define FERR_NF_M19ERR 0x00008000 #define FERR_NF_M18ERR 0x00004000 #define FERR_NF_M17ERR 0x00002000 /* Non-Retry or redundant Retry errors */ #define FERR_NF_M16ERR 0x00001000 #define FERR_NF_M15ERR 0x00000800 #define FERR_NF_M14ERR 0x00000400 #define FERR_NF_M13ERR 0x00000200 /* Uncorrectable errors */ #define FERR_NF_M12ERR 0x00000100 #define FERR_NF_M11ERR 0x00000080 #define FERR_NF_M10ERR 0x00000040 #define FERR_NF_M9ERR 0x00000020 #define FERR_NF_M8ERR 0x00000010 #define FERR_NF_M7ERR 0x00000008 #define FERR_NF_M6ERR 0x00000004 #define FERR_NF_M5ERR 0x00000002 #define FERR_NF_M4ERR 0x00000001 #define FERR_NF_UNCORRECTABLE (FERR_NF_M12ERR | \ FERR_NF_M11ERR | \ FERR_NF_M10ERR | \ FERR_NF_M9ERR | \ FERR_NF_M8ERR | \ FERR_NF_M7ERR | \ FERR_NF_M6ERR | \ FERR_NF_M5ERR | \ FERR_NF_M4ERR) #define FERR_NF_CORRECTABLE (FERR_NF_M20ERR | \ FERR_NF_M19ERR | \ FERR_NF_M18ERR | \ FERR_NF_M17ERR) #define FERR_NF_DIMM_SPARE (FERR_NF_M27ERR | \ FERR_NF_M28ERR) #define FERR_NF_THERMAL (FERR_NF_M26ERR | \ FERR_NF_M25ERR | \ FERR_NF_M24ERR | \ FERR_NF_M23ERR) #define FERR_NF_SPD_PROTOCOL (FERR_NF_M22ERR) #define FERR_NF_NORTH_CRC (FERR_NF_M21ERR) #define FERR_NF_NON_RETRY (FERR_NF_M13ERR | \ FERR_NF_M14ERR | \ FERR_NF_M15ERR) #define NERR_NF_FBD 0xA4 #define FERR_NF_MASK (FERR_NF_UNCORRECTABLE | \ FERR_NF_CORRECTABLE | \ FERR_NF_DIMM_SPARE | \ FERR_NF_THERMAL | \ FERR_NF_SPD_PROTOCOL | \ FERR_NF_NORTH_CRC | \ FERR_NF_NON_RETRY) #define EMASK_FBD 0xA8 #define EMASK_FBD_M28ERR 0x08000000 #define EMASK_FBD_M27ERR 0x04000000 #define EMASK_FBD_M26ERR 0x02000000 #define EMASK_FBD_M25ERR 0x01000000 #define EMASK_FBD_M24ERR 0x00800000 #define EMASK_FBD_M23ERR 0x00400000 #define EMASK_FBD_M22ERR 0x00200000 #define EMASK_FBD_M21ERR 0x00100000 #define EMASK_FBD_M20ERR 0x00080000 #define EMASK_FBD_M19ERR 0x00040000 #define EMASK_FBD_M18ERR 0x00020000 #define EMASK_FBD_M17ERR 0x00010000 #define EMASK_FBD_M15ERR 0x00004000 #define EMASK_FBD_M14ERR 0x00002000 #define EMASK_FBD_M13ERR 0x00001000 #define EMASK_FBD_M12ERR 0x00000800 #define EMASK_FBD_M11ERR 0x00000400 #define EMASK_FBD_M10ERR 0x00000200 #define EMASK_FBD_M9ERR 0x00000100 #define EMASK_FBD_M8ERR 0x00000080 #define EMASK_FBD_M7ERR 0x00000040 #define EMASK_FBD_M6ERR 0x00000020 #define EMASK_FBD_M5ERR 0x00000010 #define EMASK_FBD_M4ERR 0x00000008 #define EMASK_FBD_M3ERR 0x00000004 #define EMASK_FBD_M2ERR 0x00000002 #define EMASK_FBD_M1ERR 0x00000001 #define ENABLE_EMASK_FBD_FATAL_ERRORS (EMASK_FBD_M1ERR | \ EMASK_FBD_M2ERR | \ EMASK_FBD_M3ERR) #define ENABLE_EMASK_FBD_UNCORRECTABLE (EMASK_FBD_M4ERR | \ EMASK_FBD_M5ERR | \ EMASK_FBD_M6ERR | \ EMASK_FBD_M7ERR | \ EMASK_FBD_M8ERR | \ EMASK_FBD_M9ERR | \ EMASK_FBD_M10ERR | \ EMASK_FBD_M11ERR | \ EMASK_FBD_M12ERR) #define ENABLE_EMASK_FBD_CORRECTABLE (EMASK_FBD_M17ERR | \ EMASK_FBD_M18ERR | \ EMASK_FBD_M19ERR | \ EMASK_FBD_M20ERR) #define ENABLE_EMASK_FBD_DIMM_SPARE (EMASK_FBD_M27ERR | \ EMASK_FBD_M28ERR) #define ENABLE_EMASK_FBD_THERMALS (EMASK_FBD_M26ERR | \ EMASK_FBD_M25ERR | \ EMASK_FBD_M24ERR | \ EMASK_FBD_M23ERR) #define ENABLE_EMASK_FBD_SPD_PROTOCOL (EMASK_FBD_M22ERR) #define ENABLE_EMASK_FBD_NORTH_CRC (EMASK_FBD_M21ERR) #define ENABLE_EMASK_FBD_NON_RETRY (EMASK_FBD_M15ERR | \ EMASK_FBD_M14ERR | \ EMASK_FBD_M13ERR) #define ENABLE_EMASK_ALL (ENABLE_EMASK_FBD_NON_RETRY | \ ENABLE_EMASK_FBD_NORTH_CRC | \ ENABLE_EMASK_FBD_SPD_PROTOCOL | \ ENABLE_EMASK_FBD_THERMALS | \ ENABLE_EMASK_FBD_DIMM_SPARE | \ ENABLE_EMASK_FBD_FATAL_ERRORS | \ ENABLE_EMASK_FBD_CORRECTABLE | \ ENABLE_EMASK_FBD_UNCORRECTABLE) #define ERR0_FBD 0xAC #define ERR1_FBD 0xB0 #define ERR2_FBD 0xB4 #define MCERR_FBD 0xB8 #define NRECMEMA 0xBE #define NREC_BANK(x) (((x)>>12) & 0x7) #define NREC_RDWR(x) (((x)>>11) & 1) #define NREC_RANK(x) (((x)>>8) & 0x7) #define NRECMEMB 0xC0 #define NREC_CAS(x) (((x)>>16) & 0xFFFFFF) #define NREC_RAS(x) ((x) & 0x7FFF) #define NRECFGLOG 0xC4 #define NREEECFBDA 0xC8 #define NREEECFBDB 0xCC #define NREEECFBDC 0xD0 #define NREEECFBDD 0xD4 #define NREEECFBDE 0xD8 #define REDMEMA 0xDC #define RECMEMA 0xE2 #define REC_BANK(x) (((x)>>12) & 0x7) #define REC_RDWR(x) (((x)>>11) & 1) #define REC_RANK(x) (((x)>>8) & 0x7) #define RECMEMB 0xE4 #define REC_CAS(x) (((x)>>16) & 0xFFFFFF) #define REC_RAS(x) ((x) & 0x7FFF) #define RECFGLOG 0xE8 #define RECFBDA 0xEC #define RECFBDB 0xF0 #define RECFBDC 0xF4 #define RECFBDD 0xF8 #define RECFBDE 0xFC /* OFFSETS for Function 2 */ /* * Device 21, * Function 0: Memory Map Branch 0 * * Device 22, * Function 0: Memory Map Branch 1 */ #define PCI_DEVICE_ID_I5000_BRANCH_0 0x25F5 #define PCI_DEVICE_ID_I5000_BRANCH_1 0x25F6 #define AMB_PRESENT_0 0x64 #define AMB_PRESENT_1 0x66 #define MTR0 0x80 #define MTR1 0x84 #define MTR2 0x88 #define MTR3 0x8C #define NUM_MTRS 4 #define CHANNELS_PER_BRANCH (2) /* Defines to extract the vaious fields from the * MTRx - Memory Technology Registers */ #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (0x1 << 8)) #define MTR_DRAM_WIDTH(mtr) ((((mtr) >> 6) & 0x1) ? 8 : 4) #define MTR_DRAM_BANKS(mtr) ((((mtr) >> 5) & 0x1) ? 8 : 4) #define MTR_DRAM_BANKS_ADDR_BITS(mtr) ((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2) #define MTR_DIMM_RANK(mtr) (((mtr) >> 4) & 0x1) #define MTR_DIMM_RANK_ADDR_BITS(mtr) (MTR_DIMM_RANK(mtr) ? 2 : 1) #define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3) #define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13) #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) #ifdef CONFIG_EDAC_DEBUG static char *numrow_toString[] = { "8,192 - 13 rows", "16,384 - 14 rows", "32,768 - 15 rows", "reserved" }; static char *numcol_toString[] = { "1,024 - 10 columns", "2,048 - 11 columns", "4,096 - 12 columns", "reserved" }; #endif /* enables the report of miscellaneous messages as CE errors - default off */ static int misc_messages; /* Enumeration of supported devices */ enum i5000_chips { I5000P = 0, I5000V = 1, /* future */ I5000X = 2 /* future */ }; /* Device name and register DID (Device ID) */ struct i5000_dev_info { const char *ctl_name; /* name for this device */ u16 fsb_mapping_errors; /* DID for the branchmap,control */ }; /* Table of devices attributes supported by this driver */ static const struct i5000_dev_info i5000_devs[] = { [I5000P] = { .ctl_name = "I5000", .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I5000_DEV16, }, }; struct i5000_dimm_info { int megabytes; /* size, 0 means not present */ int dual_rank; }; #define MAX_CHANNELS 6 /* max possible channels */ #define MAX_CSROWS (8*2) /* max possible csrows per channel */ /* driver private data structure */ struct i5000_pvt { struct pci_dev *system_address; /* 16.0 */ struct pci_dev *branchmap_werrors; /* 16.1 */ struct pci_dev *fsb_error_regs; /* 16.2 */ struct pci_dev *branch_0; /* 21.0 */ struct pci_dev *branch_1; /* 22.0 */ u16 tolm; /* top of low memory */ u64 ambase; /* AMB BAR */ u16 mir0, mir1, mir2; u16 b0_mtr[NUM_MTRS]; /* Memory Technlogy Reg */ u16 b0_ambpresent0; /* Branch 0, Channel 0 */ u16 b0_ambpresent1; /* Brnach 0, Channel 1 */ u16 b1_mtr[NUM_MTRS]; /* Memory Technlogy Reg */ u16 b1_ambpresent0; /* Branch 1, Channel 8 */ u16 b1_ambpresent1; /* Branch 1, Channel 1 */ /* DIMM information matrix, allocating architecture maximums */ struct i5000_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS]; /* Actual values for this controller */ int maxch; /* Max channels */ int maxdimmperch; /* Max DIMMs per channel */ }; /* I5000 MCH error information retrieved from Hardware */ struct i5000_error_info { /* These registers are always read from the MC */ u32 ferr_fat_fbd; /* First Errors Fatal */ u32 nerr_fat_fbd; /* Next Errors Fatal */ u32 ferr_nf_fbd; /* First Errors Non-Fatal */ u32 nerr_nf_fbd; /* Next Errors Non-Fatal */ /* These registers are input ONLY if there was a Recoverable Error */ u32 redmemb; /* Recoverable Mem Data Error log B */ u16 recmema; /* Recoverable Mem Error log A */ u32 recmemb; /* Recoverable Mem Error log B */ /* These registers are input ONLY if there was a * Non-Recoverable Error */ u16 nrecmema; /* Non-Recoverable Mem log A */ u16 nrecmemb; /* Non-Recoverable Mem log B */ }; static struct edac_pci_ctl_info *i5000_pci; /* * i5000_get_error_info Retrieve the hardware error information from * the hardware and cache it in the 'info' * structure */ static void i5000_get_error_info(struct mem_ctl_info *mci, struct i5000_error_info *info) { struct i5000_pvt *pvt; u32 value; pvt = mci->pvt_info; /* read in the 1st FATAL error register */ pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value); /* Mask only the bits that the doc says are valid */ value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK); /* If there is an error, then read in the */ /* NEXT FATAL error register and the Memory Error Log Register A */ if (value & FERR_FAT_MASK) { info->ferr_fat_fbd = value; /* harvest the various error data we need */ pci_read_config_dword(pvt->branchmap_werrors, NERR_FAT_FBD, &info->nerr_fat_fbd); pci_read_config_word(pvt->branchmap_werrors, NRECMEMA, &info->nrecmema); pci_read_config_word(pvt->branchmap_werrors, NRECMEMB, &info->nrecmemb); /* Clear the error bits, by writing them back */ pci_write_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, value); } else { info->ferr_fat_fbd = 0; info->nerr_fat_fbd = 0; info->nrecmema = 0; info->nrecmemb = 0; } /* read in the 1st NON-FATAL error register */ pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value); /* If there is an error, then read in the 1st NON-FATAL error * register as well */ if (value & FERR_NF_MASK) { info->ferr_nf_fbd = value; /* harvest the various error data we need */ pci_read_config_dword(pvt->branchmap_werrors, NERR_NF_FBD, &info->nerr_nf_fbd); pci_read_config_word(pvt->branchmap_werrors, RECMEMA, &info->recmema); pci_read_config_dword(pvt->branchmap_werrors, RECMEMB, &info->recmemb); pci_read_config_dword(pvt->branchmap_werrors, REDMEMB, &info->redmemb); /* Clear the error bits, by writing them back */ pci_write_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, value); } else { info->ferr_nf_fbd = 0; info->nerr_nf_fbd = 0; info->recmema = 0; info->recmemb = 0; info->redmemb = 0; } } /* * i5000_process_fatal_error_info(struct mem_ctl_info *mci, * struct i5000_error_info *info, * int handle_errors); * * handle the Intel FATAL errors, if any */ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci, struct i5000_error_info *info, int handle_errors) { char msg[EDAC_MC_LABEL_LEN + 1 + 160]; char *specific = NULL; u32 allErrors; int branch; int channel; int bank; int rank; int rdwr; int ras, cas; /* mask off the Error bits that are possible */ allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK); if (!allErrors) return; /* if no error, return now */ branch = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd); channel = branch; /* Use the NON-Recoverable macros to extract data */ bank = NREC_BANK(info->nrecmema); rank = NREC_RANK(info->nrecmema); rdwr = NREC_RDWR(info->nrecmema); ras = NREC_RAS(info->nrecmemb); cas = NREC_CAS(info->nrecmemb); debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d " "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", rank, channel, channel + 1, branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas); /* Only 1 bit will be on */ switch (allErrors) { case FERR_FAT_M1ERR: specific = "Alert on non-redundant retry or fast " "reset timeout"; break; case FERR_FAT_M2ERR: specific = "Northbound CRC error on non-redundant " "retry"; break; case FERR_FAT_M3ERR: { static int done; /* * This error is generated to inform that the intelligent * throttling is disabled and the temperature passed the * specified middle point. Since this is something the BIOS * should take care of, we'll warn only once to avoid * worthlessly flooding the log. */ if (done) return; done++; specific = ">Tmid Thermal event with intelligent " "throttling disabled"; } break; } /* Form out message */ snprintf(msg, sizeof(msg), "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d CAS=%d " "FATAL Err=0x%x (%s))", branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas, allErrors, specific); /* Call the helper to output message */ edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg); } /* * i5000_process_fatal_error_info(struct mem_ctl_info *mci, * struct i5000_error_info *info, * int handle_errors); * * handle the Intel NON-FATAL errors, if any */ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci, struct i5000_error_info *info, int handle_errors) { char msg[EDAC_MC_LABEL_LEN + 1 + 170]; char *specific = NULL; u32 allErrors; u32 ue_errors; u32 ce_errors; u32 misc_errors; int branch; int channel; int bank; int rank; int rdwr; int ras, cas; /* mask off the Error bits that are possible */ allErrors = (info->ferr_nf_fbd & FERR_NF_MASK); if (!allErrors) return; /* if no error, return now */ /* ONLY ONE of the possible error bits will be set, as per the docs */ ue_errors = allErrors & FERR_NF_UNCORRECTABLE; if (ue_errors) { debugf0("\tUncorrected bits= 0x%x\n", ue_errors); branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); /* * According with i5000 datasheet, bit 28 has no significance * for errors M4Err-M12Err and M17Err-M21Err, on FERR_NF_FBD */ channel = branch & 2; bank = NREC_BANK(info->nrecmema); rank = NREC_RANK(info->nrecmema); rdwr = NREC_RDWR(info->nrecmema); ras = NREC_RAS(info->nrecmemb); cas = NREC_CAS(info->nrecmemb); debugf0 ("\t\tCSROW= %d Channels= %d,%d (Branch= %d " "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", rank, channel, channel + 1, branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas); switch (ue_errors) { case FERR_NF_M12ERR: specific = "Non-Aliased Uncorrectable Patrol Data ECC"; break; case FERR_NF_M11ERR: specific = "Non-Aliased Uncorrectable Spare-Copy " "Data ECC"; break; case FERR_NF_M10ERR: specific = "Non-Aliased Uncorrectable Mirrored Demand " "Data ECC"; break; case FERR_NF_M9ERR: specific = "Non-Aliased Uncorrectable Non-Mirrored " "Demand Data ECC"; break; case FERR_NF_M8ERR: specific = "Aliased Uncorrectable Patrol Data ECC"; break; case FERR_NF_M7ERR: specific = "Aliased Uncorrectable Spare-Copy Data ECC"; break; case FERR_NF_M6ERR: specific = "Aliased Uncorrectable Mirrored Demand " "Data ECC"; break; case FERR_NF_M5ERR: specific = "Aliased Uncorrectable Non-Mirrored Demand " "Data ECC"; break; case FERR_NF_M4ERR: specific = "Uncorrectable Data ECC on Replay"; break; } /* Form out message */ snprintf(msg, sizeof(msg), "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d " "CAS=%d, UE Err=0x%x (%s))", branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas, ue_errors, specific); /* Call the helper to output message */ edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg); } /* Check correctable errors */ ce_errors = allErrors & FERR_NF_CORRECTABLE; if (ce_errors) { debugf0("\tCorrected bits= 0x%x\n", ce_errors); branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); channel = 0; if (REC_ECC_LOCATOR_ODD(info->redmemb)) channel = 1; /* Convert channel to be based from zero, instead of * from branch base of 0 */ channel += branch; bank = REC_BANK(info->recmema); rank = REC_RANK(info->recmema); rdwr = REC_RDWR(info->recmema); ras = REC_RAS(info->recmemb); cas = REC_CAS(info->recmemb); debugf0("\t\tCSROW= %d Channel= %d (Branch %d " "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", rank, channel, branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas); switch (ce_errors) { case FERR_NF_M17ERR: specific = "Correctable Non-Mirrored Demand Data ECC"; break; case FERR_NF_M18ERR: specific = "Correctable Mirrored Demand Data ECC"; break; case FERR_NF_M19ERR: specific = "Correctable Spare-Copy Data ECC"; break; case FERR_NF_M20ERR: specific = "Correctable Patrol Data ECC"; break; } /* Form out message */ snprintf(msg, sizeof(msg), "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d " "CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas, ce_errors, specific); /* Call the helper to output message */ edac_mc_handle_fbd_ce(mci, rank, channel, msg); } if (!misc_messages) return; misc_errors = allErrors & (FERR_NF_NON_RETRY | FERR_NF_NORTH_CRC | FERR_NF_SPD_PROTOCOL | FERR_NF_DIMM_SPARE); if (misc_errors) { switch (misc_errors) { case FERR_NF_M13ERR: specific = "Non-Retry or Redundant Retry FBD Memory " "Alert or Redundant Fast Reset Timeout"; break; case FERR_NF_M14ERR: specific = "Non-Retry or Redundant Retry FBD " "Configuration Alert"; break; case FERR_NF_M15ERR: specific = "Non-Retry or Redundant Retry FBD " "Northbound CRC error on read data"; break; case FERR_NF_M21ERR: specific = "FBD Northbound CRC error on " "FBD Sync Status"; break; case FERR_NF_M22ERR: specific = "SPD protocol error"; break; case FERR_NF_M27ERR: specific = "DIMM-spare copy started"; break; case FERR_NF_M28ERR: specific = "DIMM-spare copy completed"; break; } branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); /* Form out message */ snprintf(msg, sizeof(msg), "(Branch=%d Err=%#x (%s))", branch >> 1, misc_errors, specific); /* Call the helper to output message */ edac_mc_handle_fbd_ce(mci, 0, 0, msg); } } /* * i5000_process_error_info Process the error info that is * in the 'info' structure, previously retrieved from hardware */ static void i5000_process_error_info(struct mem_ctl_info *mci, struct i5000_error_info *info, int handle_errors) { /* First handle any fatal errors that occurred */ i5000_process_fatal_error_info(mci, info, handle_errors); /* now handle any non-fatal errors that occurred */ i5000_process_nonfatal_error_info(mci, info, handle_errors); } /* * i5000_clear_error Retrieve any error from the hardware * but do NOT process that error. * Used for 'clearing' out of previous errors * Called by the Core module. */ static void i5000_clear_error(struct mem_ctl_info *mci) { struct i5000_error_info info; i5000_get_error_info(mci, &info); } /* * i5000_check_error Retrieve and process errors reported by the * hardware. Called by the Core module. */ static void i5000_check_error(struct mem_ctl_info *mci) { struct i5000_error_info info; debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); i5000_get_error_info(mci, &info); i5000_process_error_info(mci, &info, 1); } /* * i5000_get_devices Find and perform 'get' operation on the MCH's * device/functions we want to reference for this driver * * Need to 'get' device 16 func 1 and func 2 */ static int i5000_get_devices(struct mem_ctl_info *mci, int dev_idx) { //const struct i5000_dev_info *i5000_dev = &i5000_devs[dev_idx]; struct i5000_pvt *pvt; struct pci_dev *pdev; pvt = mci->pvt_info; /* Attempt to 'get' the MCH register we want */ pdev = NULL; while (1) { pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev); /* End of list, leave */ if (pdev == NULL) { i5000_printk(KERN_ERR, "'system address,Process Bus' " "device not found:" "vendor 0x%x device 0x%x FUNC 1 " "(broken BIOS?)\n", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16); return 1; } /* Scan for device 16 func 1 */ if (PCI_FUNC(pdev->devfn) == 1) break; } pvt->branchmap_werrors = pdev; /* Attempt to 'get' the MCH register we want */ pdev = NULL; while (1) { pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev); if (pdev == NULL) { i5000_printk(KERN_ERR, "MC: 'branchmap,control,errors' " "device not found:" "vendor 0x%x device 0x%x Func 2 " "(broken BIOS?)\n", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16); pci_dev_put(pvt->branchmap_werrors); return 1; } /* Scan for device 16 func 1 */ if (PCI_FUNC(pdev->devfn) == 2) break; } pvt->fsb_error_regs = pdev; debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", pci_name(pvt->system_address), pvt->system_address->vendor, pvt->system_address->device); debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", pci_name(pvt->branchmap_werrors), pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device); debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", pci_name(pvt->fsb_error_regs), pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device); pdev = NULL; pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_I5000_BRANCH_0, pdev); if (pdev == NULL) { i5000_printk(KERN_ERR, "MC: 'BRANCH 0' device not found:" "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_I5000_BRANCH_0); pci_dev_put(pvt->branchmap_werrors); pci_dev_put(pvt->fsb_error_regs); return 1; } pvt->branch_0 = pdev; /* If this device claims to have more than 2 channels then * fetch Branch 1's information */ if (pvt->maxch >= CHANNELS_PER_BRANCH) { pdev = NULL; pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_I5000_BRANCH_1, pdev); if (pdev == NULL) { i5000_printk(KERN_ERR, "MC: 'BRANCH 1' device not found:" "vendor 0x%x device 0x%x Func 0 " "(broken BIOS?)\n", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_I5000_BRANCH_1); pci_dev_put(pvt->branchmap_werrors); pci_dev_put(pvt->fsb_error_regs); pci_dev_put(pvt->branch_0); return 1; } pvt->branch_1 = pdev; } return 0; } /* * i5000_put_devices 'put' all the devices that we have * reserved via 'get' */ static void i5000_put_devices(struct mem_ctl_info *mci) { struct i5000_pvt *pvt; pvt = mci->pvt_info; pci_dev_put(pvt->branchmap_werrors); /* FUNC 1 */ pci_dev_put(pvt->fsb_error_regs); /* FUNC 2 */ pci_dev_put(pvt->branch_0); /* DEV 21 */ /* Only if more than 2 channels do we release the second branch */ if (pvt->maxch >= CHANNELS_PER_BRANCH) pci_dev_put(pvt->branch_1); /* DEV 22 */ } /* * determine_amb_resent * * the information is contained in NUM_MTRS different registers * determineing which of the NUM_MTRS requires knowing * which channel is in question * * 2 branches, each with 2 channels * b0_ambpresent0 for channel '0' * b0_ambpresent1 for channel '1' * b1_ambpresent0 for channel '2' * b1_ambpresent1 for channel '3' */ static int determine_amb_present_reg(struct i5000_pvt *pvt, int channel) { int amb_present; if (channel < CHANNELS_PER_BRANCH) { if (channel & 0x1) amb_present = pvt->b0_ambpresent1; else amb_present = pvt->b0_ambpresent0; } else { if (channel & 0x1) amb_present = pvt->b1_ambpresent1; else amb_present = pvt->b1_ambpresent0; } return amb_present; } /* * determine_mtr(pvt, csrow, channel) * * return the proper MTR register as determine by the csrow and channel desired */ static int determine_mtr(struct i5000_pvt *pvt, int csrow, int channel) { int mtr; if (channel < CHANNELS_PER_BRANCH) mtr = pvt->b0_mtr[csrow >> 1]; else mtr = pvt->b1_mtr[csrow >> 1]; return mtr; } /* */ static void decode_mtr(int slot_row, u16 mtr) { int ans; ans = MTR_DIMMS_PRESENT(mtr); debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr, ans ? "Present" : "NOT Present"); if (!ans) return; debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single"); debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); } static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel, struct i5000_dimm_info *dinfo) { int mtr; int amb_present_reg; int addrBits; mtr = determine_mtr(pvt, csrow, channel); if (MTR_DIMMS_PRESENT(mtr)) { amb_present_reg = determine_amb_present_reg(pvt, channel); /* Determine if there is a DIMM present in this DIMM slot */ if (amb_present_reg & (1 << (csrow >> 1))) { dinfo->dual_rank = MTR_DIMM_RANK(mtr); if (!((dinfo->dual_rank == 0) && ((csrow & 0x1) == 0x1))) { /* Start with the number of bits for a Bank * on the DRAM */ addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr); /* Add thenumber of ROW bits */ addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr); /* add the number of COLUMN bits */ addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); addrBits += 6; /* add 64 bits per DIMM */ addrBits -= 20; /* divide by 2^^20 */ addrBits -= 3; /* 8 bits per bytes */ dinfo->megabytes = 1 << addrBits; } } } } /* * calculate_dimm_size * * also will output a DIMM matrix map, if debug is enabled, for viewing * how the DIMMs are populated */ static void calculate_dimm_size(struct i5000_pvt *pvt) { struct i5000_dimm_info *dinfo; int csrow, max_csrows; char *p, *mem_buffer; int space, n; int channel; /* ================= Generate some debug output ================= */ space = PAGE_SIZE; mem_buffer = p = kmalloc(space, GFP_KERNEL); if (p == NULL) { i5000_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n", __FILE__, __func__); return; } n = snprintf(p, space, "\n"); p += n; space -= n; /* Scan all the actual CSROWS (which is # of DIMMS * 2) * and calculate the information for each DIMM * Start with the highest csrow first, to display it first * and work toward the 0th csrow */ max_csrows = pvt->maxdimmperch * 2; for (csrow = max_csrows - 1; csrow >= 0; csrow--) { /* on an odd csrow, first output a 'boundary' marker, * then reset the message buffer */ if (csrow & 0x1) { n = snprintf(p, space, "---------------------------" "--------------------------------"); p += n; space -= n; debugf2("%s\n", mem_buffer); p = mem_buffer; space = PAGE_SIZE; } n = snprintf(p, space, "csrow %2d ", csrow); p += n; space -= n; for (channel = 0; channel < pvt->maxch; channel++) { dinfo = &pvt->dimm_info[csrow][channel]; handle_channel(pvt, csrow, channel, dinfo); n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); p += n; space -= n; } n = snprintf(p, space, "\n"); p += n; space -= n; } /* Output the last bottom 'boundary' marker */ n = snprintf(p, space, "---------------------------" "--------------------------------\n"); p += n; space -= n; /* now output the 'channel' labels */ n = snprintf(p, space, " "); p += n; space -= n; for (channel = 0; channel < pvt->maxch; channel++) { n = snprintf(p, space, "channel %d | ", channel); p += n; space -= n; } n = snprintf(p, space, "\n"); p += n; space -= n; /* output the last message and free buffer */ debugf2("%s\n", mem_buffer); kfree(mem_buffer); } /* * i5000_get_mc_regs read in the necessary registers and * cache locally * * Fills in the private data members */ static void i5000_get_mc_regs(struct mem_ctl_info *mci) { struct i5000_pvt *pvt; u32 actual_tolm; u16 limit; int slot_row; int maxch; int maxdimmperch; int way0, way1; pvt = mci->pvt_info; pci_read_config_dword(pvt->system_address, AMBASE, (u32 *) & pvt->ambase); pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32), ((u32 *) & pvt->ambase) + sizeof(u32)); maxdimmperch = pvt->maxdimmperch; maxch = pvt->maxch; debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n", (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch); /* Get the Branch Map regs */ pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm); pvt->tolm >>= 12; debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, pvt->tolm); actual_tolm = pvt->tolm << 28; debugf2("Actual TOLM byte addr=%u (0x%x)\n", actual_tolm, actual_tolm); pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0); pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1); pci_read_config_word(pvt->branchmap_werrors, MIR2, &pvt->mir2); /* Get the MIR[0-2] regs */ limit = (pvt->mir0 >> 4) & 0x0FFF; way0 = pvt->mir0 & 0x1; way1 = pvt->mir0 & 0x2; debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); limit = (pvt->mir1 >> 4) & 0x0FFF; way0 = pvt->mir1 & 0x1; way1 = pvt->mir1 & 0x2; debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); limit = (pvt->mir2 >> 4) & 0x0FFF; way0 = pvt->mir2 & 0x1; way1 = pvt->mir2 & 0x2; debugf2("MIR2: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); /* Get the MTR[0-3] regs */ for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { int where = MTR0 + (slot_row * sizeof(u32)); pci_read_config_word(pvt->branch_0, where, &pvt->b0_mtr[slot_row]); debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where, pvt->b0_mtr[slot_row]); if (pvt->maxch >= CHANNELS_PER_BRANCH) { pci_read_config_word(pvt->branch_1, where, &pvt->b1_mtr[slot_row]); debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, where, pvt->b1_mtr[slot_row]); } else { pvt->b1_mtr[slot_row] = 0; } } /* Read and dump branch 0's MTRs */ debugf2("\nMemory Technology Registers:\n"); debugf2(" Branch 0:\n"); for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { decode_mtr(slot_row, pvt->b0_mtr[slot_row]); } pci_read_config_word(pvt->branch_0, AMB_PRESENT_0, &pvt->b0_ambpresent0); debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0); pci_read_config_word(pvt->branch_0, AMB_PRESENT_1, &pvt->b0_ambpresent1); debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1); /* Only if we have 2 branchs (4 channels) */ if (pvt->maxch < CHANNELS_PER_BRANCH) { pvt->b1_ambpresent0 = 0; pvt->b1_ambpresent1 = 0; } else { /* Read and dump branch 1's MTRs */ debugf2(" Branch 1:\n"); for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { decode_mtr(slot_row, pvt->b1_mtr[slot_row]); } pci_read_config_word(pvt->branch_1, AMB_PRESENT_0, &pvt->b1_ambpresent0); debugf2("\t\tAMB-Branch 1-present0 0x%x:\n", pvt->b1_ambpresent0); pci_read_config_word(pvt->branch_1, AMB_PRESENT_1, &pvt->b1_ambpresent1); debugf2("\t\tAMB-Branch 1-present1 0x%x:\n", pvt->b1_ambpresent1); } /* Go and determine the size of each DIMM and place in an * orderly matrix */ calculate_dimm_size(pvt); } /* * i5000_init_csrows Initialize the 'csrows' table within * the mci control structure with the * addressing of memory. * * return: * 0 success * 1 no actual memory found on this MC */ static int i5000_init_csrows(struct mem_ctl_info *mci) { struct i5000_pvt *pvt; struct csrow_info *p_csrow; int empty, channel_count; int max_csrows; int mtr, mtr1; int csrow_megs; int channel; int csrow; pvt = mci->pvt_info; channel_count = pvt->maxch; max_csrows = pvt->maxdimmperch * 2; empty = 1; /* Assume NO memory */ for (csrow = 0; csrow < max_csrows; csrow++) { p_csrow = &mci->csrows[csrow]; p_csrow->csrow_idx = csrow; /* use branch 0 for the basis */ mtr = pvt->b0_mtr[csrow >> 1]; mtr1 = pvt->b1_mtr[csrow >> 1]; /* if no DIMMS on this row, continue */ if (!MTR_DIMMS_PRESENT(mtr) && !MTR_DIMMS_PRESENT(mtr1)) continue; /* FAKE OUT VALUES, FIXME */ p_csrow->first_page = 0 + csrow * 20; p_csrow->last_page = 9 + csrow * 20; p_csrow->page_mask = 0xFFF; p_csrow->grain = 8; csrow_megs = 0; for (channel = 0; channel < pvt->maxch; channel++) { csrow_megs += pvt->dimm_info[csrow][channel].megabytes; } p_csrow->nr_pages = csrow_megs << 8; /* Assume DDR2 for now */ p_csrow->mtype = MEM_FB_DDR2; /* ask what device type on this row */ if (MTR_DRAM_WIDTH(mtr)) p_csrow->dtype = DEV_X8; else p_csrow->dtype = DEV_X4; p_csrow->edac_mode = EDAC_S8ECD8ED; empty = 0; } return empty; } /* * i5000_enable_error_reporting * Turn on the memory reporting features of the hardware */ static void i5000_enable_error_reporting(struct mem_ctl_info *mci) { struct i5000_pvt *pvt; u32 fbd_error_mask; pvt = mci->pvt_info; /* Read the FBD Error Mask Register */ pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD, &fbd_error_mask); /* Enable with a '0' */ fbd_error_mask &= ~(ENABLE_EMASK_ALL); pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD, fbd_error_mask); } /* * i5000_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels) * * ask the device how many channels are present and how many CSROWS * as well */ static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev, int *num_dimms_per_channel, int *num_channels) { u8 value; /* Need to retrieve just how many channels and dimms per channel are * supported on this memory controller */ pci_read_config_byte(pdev, MAXDIMMPERCH, &value); *num_dimms_per_channel = (int)value *2; pci_read_config_byte(pdev, MAXCH, &value); *num_channels = (int)value; } /* * i5000_probe1 Probe for ONE instance of device to see if it is * present. * return: * 0 for FOUND a device * < 0 for error code */ static int i5000_probe1(struct pci_dev *pdev, int dev_idx) { struct mem_ctl_info *mci; struct i5000_pvt *pvt; int num_channels; int num_dimms_per_channel; int num_csrows; debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n", __FILE__, __func__, pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); /* We only are looking for func 0 of the set */ if (PCI_FUNC(pdev->devfn) != 0) return -ENODEV; /* Ask the devices for the number of CSROWS and CHANNELS so * that we can calculate the memory resources, etc * * The Chipset will report what it can handle which will be greater * or equal to what the motherboard manufacturer will implement. * * As we don't have a motherboard identification routine to determine * actual number of slots/dimms per channel, we thus utilize the * resource as specified by the chipset. Thus, we might have * have more DIMMs per channel than actually on the mobo, but this * allows the driver to support upto the chipset max, without * some fancy mobo determination. */ i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel, &num_channels); num_csrows = num_dimms_per_channel * 2; debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n", __func__, num_channels, num_dimms_per_channel, num_csrows); /* allocate a new MC control structure */ mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0); if (mci == NULL) return -ENOMEM; kobject_get(&mci->edac_mci_kobj); debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); mci->dev = &pdev->dev; /* record ptr to the generic device */ pvt = mci->pvt_info; pvt->system_address = pdev; /* Record this device in our private */ pvt->maxch = num_channels; pvt->maxdimmperch = num_dimms_per_channel; /* 'get' the pci devices we want to reserve for our use */ if (i5000_get_devices(mci, dev_idx)) goto fail0; /* Time to get serious */ i5000_get_mc_regs(mci); /* retrieve the hardware registers */ mci->mc_idx = 0; mci->mtype_cap = MEM_FLAG_FB_DDR2; mci->edac_ctl_cap = EDAC_FLAG_NONE; mci->edac_cap = EDAC_FLAG_NONE; mci->mod_name = "i5000_edac.c"; mci->mod_ver = I5000_REVISION; mci->ctl_name = i5000_devs[dev_idx].ctl_name; mci->dev_name = pci_name(pdev); mci->ctl_page_to_phys = NULL; /* Set the function pointer to an actual operation function */ mci->edac_check = i5000_check_error; /* initialize the MC control structure 'csrows' table * with the mapping and control information */ if (i5000_init_csrows(mci)) { debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" " because i5000_init_csrows() returned nonzero " "value\n"); mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ } else { debugf1("MC: Enable error reporting now\n"); i5000_enable_error_reporting(mci); } /* add this new MC control structure to EDAC's list of MCs */ if (edac_mc_add_mc(mci)) { debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n", __FILE__, __func__); /* FIXME: perhaps some code should go here that disables error * reporting if we just enabled it */ goto fail1; } i5000_clear_error(mci); /* allocating generic PCI control info */ i5000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); if (!i5000_pci) { printk(KERN_WARNING "%s(): Unable to create PCI control\n", __func__); printk(KERN_WARNING "%s(): PCI error report via EDAC not setup\n", __func__); } return 0; /* Error exit unwinding stack */ fail1: i5000_put_devices(mci); fail0: kobject_put(&mci->edac_mci_kobj); edac_mc_free(mci); return -ENODEV; } /* * i5000_init_one constructor for one instance of device * * returns: * negative on error * count (>= 0) */ static int __devinit i5000_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { int rc; debugf0("MC: %s: %s()\n", __FILE__, __func__); /* wake up device */ rc = pci_enable_device(pdev); if (rc) return rc; /* now probe and enable the device */ return i5000_probe1(pdev, id->driver_data); } /* * i5000_remove_one destructor for one instance of device * */ static void __devexit i5000_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; debugf0("%s: %s()\n", __FILE__, __func__); if (i5000_pci) edac_pci_release_generic_ctl(i5000_pci); if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) return; /* retrieve references to resources, and free those resources */ i5000_put_devices(mci); kobject_put(&mci->edac_mci_kobj); edac_mc_free(mci); } /* * pci_device_id table for which devices we are looking for * * The "E500P" device is the first device supported. */ static const struct pci_device_id i5000_pci_tbl[] __devinitdata = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16), .driver_data = I5000P}, {0,} /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, i5000_pci_tbl); /* * i5000_driver pci_driver structure for this module * */ static struct pci_driver i5000_driver = { .name = KBUILD_BASENAME, .probe = i5000_init_one, .remove = __devexit_p(i5000_remove_one), .id_table = i5000_pci_tbl, }; /* * i5000_init Module entry function * Try to initialize this module for its devices */ static int __init i5000_init(void) { int pci_rc; debugf2("MC: %s: %s()\n", __FILE__, __func__); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); pci_rc = pci_register_driver(&i5000_driver); return (pci_rc < 0) ? pci_rc : 0; } /* * i5000_exit() Module exit function * Unregister the driver */ static void __exit i5000_exit(void) { debugf2("MC: %s: %s()\n", __FILE__, __func__); pci_unregister_driver(&i5000_driver); } module_init(i5000_init); module_exit(i5000_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR ("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>"); MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - " I5000_REVISION); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); module_param(misc_messages, int, 0444); MODULE_PARM_DESC(misc_messages, "Log miscellaneous non fatal messages");
gpl-2.0
nadavg54/kernel3.14
drivers/regulator/da9063-regulator.c
252
26193
/* * Regulator driver for DA9063 PMIC series * * Copyright 2012 Dialog Semiconductors Ltd. * Copyright 2013 Philipp Zabel, Pengutronix * * Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> #include <linux/mfd/da9063/core.h> #include <linux/mfd/da9063/pdata.h> #include <linux/mfd/da9063/registers.h> /* Definition for registering regmap bit fields using a mask */ #define BFIELD(_reg, _mask) \ REG_FIELD(_reg, __builtin_ffs((int)_mask) - 1, \ sizeof(unsigned int) * 8 - __builtin_clz((_mask)) - 1) /* Regulator capabilities and registers description */ struct da9063_regulator_info { struct regulator_desc desc; /* Current limiting */ unsigned n_current_limits; const int *current_limits; /* DA9063 main register fields */ struct reg_field mode; /* buck mode of operation */ struct reg_field suspend; struct reg_field sleep; struct reg_field suspend_sleep; unsigned int suspend_vsel_reg; struct reg_field ilimit; /* DA9063 event detection bit */ struct reg_field oc_event; }; /* Macros for LDO */ #define DA9063_LDO(chip, regl_name, min_mV, step_mV, max_mV) \ .desc.id = chip##_ID_##regl_name, \ .desc.name = __stringify(chip##_##regl_name), \ .desc.ops = &da9063_ldo_ops, \ .desc.min_uV = (min_mV) * 1000, \ .desc.uV_step = (step_mV) * 1000, \ .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1 \ + (DA9063_V##regl_name##_BIAS)), \ .desc.enable_reg = DA9063_REG_##regl_name##_CONT, \ .desc.enable_mask = DA9063_LDO_EN, \ .desc.vsel_reg = DA9063_REG_V##regl_name##_A, \ .desc.vsel_mask = DA9063_V##regl_name##_MASK, \ .desc.linear_min_sel = DA9063_V##regl_name##_BIAS, \ .sleep = BFIELD(DA9063_REG_V##regl_name##_A, DA9063_LDO_SL), \ .suspend_sleep = BFIELD(DA9063_REG_V##regl_name##_B, DA9063_LDO_SL), \ .suspend_vsel_reg = DA9063_REG_V##regl_name##_B /* Macros for voltage DC/DC converters (BUCKs) */ #define DA9063_BUCK(chip, regl_name, min_mV, step_mV, max_mV, limits_array) \ .desc.id = chip##_ID_##regl_name, \ .desc.name = __stringify(chip##_##regl_name), \ .desc.ops = &da9063_buck_ops, \ .desc.min_uV = (min_mV) * 1000, \ .desc.uV_step = (step_mV) * 1000, \ .desc.n_voltages = ((max_mV) - (min_mV))/(step_mV) + 1, \ .current_limits = limits_array, \ .n_current_limits = ARRAY_SIZE(limits_array) #define DA9063_BUCK_COMMON_FIELDS(regl_name) \ .desc.enable_reg = DA9063_REG_##regl_name##_CONT, \ .desc.enable_mask = DA9063_BUCK_EN, \ .desc.vsel_reg = DA9063_REG_V##regl_name##_A, \ .desc.vsel_mask = DA9063_VBUCK_MASK, \ .desc.linear_min_sel = DA9063_VBUCK_BIAS, \ .sleep = BFIELD(DA9063_REG_V##regl_name##_A, DA9063_BUCK_SL), \ .suspend_sleep = BFIELD(DA9063_REG_V##regl_name##_B, DA9063_BUCK_SL), \ .suspend_vsel_reg = DA9063_REG_V##regl_name##_B, \ .mode = BFIELD(DA9063_REG_##regl_name##_CFG, DA9063_BUCK_MODE_MASK) /* Defines asignment of regulators info table to chip model */ struct da9063_dev_model { const struct da9063_regulator_info *regulator_info; unsigned n_regulators; unsigned dev_model; }; /* Single regulator settings */ struct da9063_regulator { struct regulator_desc desc; struct regulator_dev *rdev; struct da9063 *hw; const struct da9063_regulator_info *info; struct regmap_field *mode; struct regmap_field *suspend; struct regmap_field *sleep; struct regmap_field *suspend_sleep; struct regmap_field *ilimit; }; /* Encapsulates all information for the regulators driver */ struct da9063_regulators { int irq_ldo_lim; int irq_uvov; unsigned n_regulators; /* Array size to be defined during init. Keep at end. */ struct da9063_regulator regulator[0]; }; /* BUCK modes for DA9063 */ enum { BUCK_MODE_MANUAL, /* 0 */ BUCK_MODE_SLEEP, /* 1 */ BUCK_MODE_SYNC, /* 2 */ BUCK_MODE_AUTO /* 3 */ }; /* Regulator operations */ /* Current limits array (in uA) for BCORE1, BCORE2, BPRO. Entry indexes corresponds to register values. */ static const int da9063_buck_a_limits[] = { 500000, 600000, 700000, 800000, 900000, 1000000, 1100000, 1200000, 1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1900000, 2000000 }; /* Current limits array (in uA) for BMEM, BIO, BPERI. Entry indexes corresponds to register values. */ static const int da9063_buck_b_limits[] = { 1500000, 1600000, 1700000, 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000, 2600000, 2700000, 2800000, 2900000, 3000000 }; /* Current limits array (in uA) for merged BCORE1 and BCORE2. Entry indexes corresponds to register values. */ static const int da9063_bcores_merged_limits[] = { 1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2200000, 2400000, 2600000, 2800000, 3000000, 3200000, 3400000, 3600000, 3800000, 4000000 }; /* Current limits array (in uA) for merged BMEM and BIO. Entry indexes corresponds to register values. */ static const int da9063_bmem_bio_merged_limits[] = { 3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000, 4600000, 4800000, 5000000, 5200000, 5400000, 5600000, 5800000, 6000000 }; static int da9063_set_current_limit(struct regulator_dev *rdev, int min_uA, int max_uA) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); const struct da9063_regulator_info *rinfo = regl->info; int n, tval; for (n = 0; n < rinfo->n_current_limits; n++) { tval = rinfo->current_limits[n]; if (tval >= min_uA && tval <= max_uA) return regmap_field_write(regl->ilimit, n); } return -EINVAL; } static int da9063_get_current_limit(struct regulator_dev *rdev) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); const struct da9063_regulator_info *rinfo = regl->info; unsigned int sel; int ret; ret = regmap_field_read(regl->ilimit, &sel); if (ret < 0) return ret; if (sel >= rinfo->n_current_limits) sel = rinfo->n_current_limits - 1; return rinfo->current_limits[sel]; } static int da9063_buck_set_mode(struct regulator_dev *rdev, unsigned mode) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); unsigned val; switch (mode) { case REGULATOR_MODE_FAST: val = BUCK_MODE_SYNC; break; case REGULATOR_MODE_NORMAL: val = BUCK_MODE_AUTO; break; case REGULATOR_MODE_STANDBY: val = BUCK_MODE_SLEEP; break; default: return -EINVAL; } return regmap_field_write(regl->mode, val); } /* * Bucks use single mode register field for normal operation * and suspend state. * There are 3 modes to map to: FAST, NORMAL, and STANDBY. */ static unsigned da9063_buck_get_mode(struct regulator_dev *rdev) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); struct regmap_field *field; unsigned int val, mode = 0; int ret; ret = regmap_field_read(regl->mode, &val); if (ret < 0) return ret; switch (val) { default: case BUCK_MODE_MANUAL: mode = REGULATOR_MODE_FAST | REGULATOR_MODE_STANDBY; /* Sleep flag bit decides the mode */ break; case BUCK_MODE_SLEEP: return REGULATOR_MODE_STANDBY; case BUCK_MODE_SYNC: return REGULATOR_MODE_FAST; case BUCK_MODE_AUTO: return REGULATOR_MODE_NORMAL; } /* Detect current regulator state */ ret = regmap_field_read(regl->suspend, &val); if (ret < 0) return 0; /* Read regulator mode from proper register, depending on state */ if (val) field = regl->suspend_sleep; else field = regl->sleep; ret = regmap_field_read(field, &val); if (ret < 0) return 0; if (val) mode &= REGULATOR_MODE_STANDBY; else mode &= REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST; return mode; } /* * LDOs use sleep flags - one for normal and one for suspend state. * There are 2 modes to map to: NORMAL and STANDBY (sleep) for each state. */ static int da9063_ldo_set_mode(struct regulator_dev *rdev, unsigned mode) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); unsigned val; switch (mode) { case REGULATOR_MODE_NORMAL: val = 0; break; case REGULATOR_MODE_STANDBY: val = 1; break; default: return -EINVAL; } return regmap_field_write(regl->sleep, val); } static unsigned da9063_ldo_get_mode(struct regulator_dev *rdev) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); struct regmap_field *field; int ret, val; /* Detect current regulator state */ ret = regmap_field_read(regl->suspend, &val); if (ret < 0) return 0; /* Read regulator mode from proper register, depending on state */ if (val) field = regl->suspend_sleep; else field = regl->sleep; ret = regmap_field_read(field, &val); if (ret < 0) return 0; if (val) return REGULATOR_MODE_STANDBY; else return REGULATOR_MODE_NORMAL; } static int da9063_buck_get_status(struct regulator_dev *rdev) { int ret = regulator_is_enabled_regmap(rdev); if (ret == 0) { ret = REGULATOR_STATUS_OFF; } else if (ret > 0) { ret = da9063_buck_get_mode(rdev); if (ret > 0) ret = regulator_mode_to_status(ret); else if (ret == 0) ret = -EIO; } return ret; } static int da9063_ldo_get_status(struct regulator_dev *rdev) { int ret = regulator_is_enabled_regmap(rdev); if (ret == 0) { ret = REGULATOR_STATUS_OFF; } else if (ret > 0) { ret = da9063_ldo_get_mode(rdev); if (ret > 0) ret = regulator_mode_to_status(ret); else if (ret == 0) ret = -EIO; } return ret; } static int da9063_set_suspend_voltage(struct regulator_dev *rdev, int uV) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); const struct da9063_regulator_info *rinfo = regl->info; int ret, sel; sel = regulator_map_voltage_linear(rdev, uV, uV); if (sel < 0) return -EINVAL; sel <<= ffs(rdev->desc->vsel_mask) - 1; ret = regmap_update_bits(regl->hw->regmap, rinfo->suspend_vsel_reg, rdev->desc->vsel_mask, sel); return ret; } static int da9063_suspend_enable(struct regulator_dev *rdev) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); return regmap_field_write(regl->suspend, 1); } static int da9063_suspend_disable(struct regulator_dev *rdev) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); return regmap_field_write(regl->suspend, 0); } static int da9063_buck_set_suspend_mode(struct regulator_dev *rdev, unsigned mode) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); int val; switch (mode) { case REGULATOR_MODE_FAST: val = BUCK_MODE_SYNC; break; case REGULATOR_MODE_NORMAL: val = BUCK_MODE_AUTO; break; case REGULATOR_MODE_STANDBY: val = BUCK_MODE_SLEEP; break; default: return -EINVAL; } return regmap_field_write(regl->mode, val); } static int da9063_ldo_set_suspend_mode(struct regulator_dev *rdev, unsigned mode) { struct da9063_regulator *regl = rdev_get_drvdata(rdev); unsigned val; switch (mode) { case REGULATOR_MODE_NORMAL: val = 0; break; case REGULATOR_MODE_STANDBY: val = 1; break; default: return -EINVAL; } return regmap_field_write(regl->suspend_sleep, val); } static struct regulator_ops da9063_buck_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear, .set_current_limit = da9063_set_current_limit, .get_current_limit = da9063_get_current_limit, .set_mode = da9063_buck_set_mode, .get_mode = da9063_buck_get_mode, .get_status = da9063_buck_get_status, .set_suspend_voltage = da9063_set_suspend_voltage, .set_suspend_enable = da9063_suspend_enable, .set_suspend_disable = da9063_suspend_disable, .set_suspend_mode = da9063_buck_set_suspend_mode, }; static struct regulator_ops da9063_ldo_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear, .set_mode = da9063_ldo_set_mode, .get_mode = da9063_ldo_get_mode, .get_status = da9063_ldo_get_status, .set_suspend_voltage = da9063_set_suspend_voltage, .set_suspend_enable = da9063_suspend_enable, .set_suspend_disable = da9063_suspend_disable, .set_suspend_mode = da9063_ldo_set_suspend_mode, }; /* Info of regulators for DA9063 */ static const struct da9063_regulator_info da9063_regulator_info[] = { { DA9063_BUCK(DA9063, BCORE1, 300, 10, 1570, da9063_buck_a_limits), DA9063_BUCK_COMMON_FIELDS(BCORE1), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE1_SEL), .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_C, DA9063_BCORE1_ILIM_MASK), }, { DA9063_BUCK(DA9063, BCORE2, 300, 10, 1570, da9063_buck_a_limits), DA9063_BUCK_COMMON_FIELDS(BCORE2), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE2_SEL), .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_C, DA9063_BCORE2_ILIM_MASK), }, { DA9063_BUCK(DA9063, BPRO, 530, 10, 1800, da9063_buck_a_limits), DA9063_BUCK_COMMON_FIELDS(BPRO), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBPRO_SEL), .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_B, DA9063_BPRO_ILIM_MASK), }, { DA9063_BUCK(DA9063, BMEM, 800, 20, 3340, da9063_buck_b_limits), DA9063_BUCK_COMMON_FIELDS(BMEM), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBMEM_SEL), .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_A, DA9063_BMEM_ILIM_MASK), }, { DA9063_BUCK(DA9063, BIO, 800, 20, 3340, da9063_buck_b_limits), DA9063_BUCK_COMMON_FIELDS(BIO), .suspend = BFIELD(DA9063_REG_DVC_2, DA9063_VBIO_SEL), .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_A, DA9063_BIO_ILIM_MASK), }, { DA9063_BUCK(DA9063, BPERI, 800, 20, 3340, da9063_buck_b_limits), DA9063_BUCK_COMMON_FIELDS(BPERI), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBPERI_SEL), .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_B, DA9063_BPERI_ILIM_MASK), }, { DA9063_BUCK(DA9063, BCORES_MERGED, 300, 10, 1570, da9063_bcores_merged_limits), /* BCORES_MERGED uses the same register fields as BCORE1 */ DA9063_BUCK_COMMON_FIELDS(BCORE1), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE1_SEL), .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_C, DA9063_BCORE1_ILIM_MASK), }, { DA9063_BUCK(DA9063, BMEM_BIO_MERGED, 800, 20, 3340, da9063_bmem_bio_merged_limits), /* BMEM_BIO_MERGED uses the same register fields as BMEM */ DA9063_BUCK_COMMON_FIELDS(BMEM), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBMEM_SEL), .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_A, DA9063_BMEM_ILIM_MASK), }, { DA9063_LDO(DA9063, LDO1, 600, 20, 1860), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO1_SEL), }, { DA9063_LDO(DA9063, LDO2, 600, 20, 1860), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO2_SEL), }, { DA9063_LDO(DA9063, LDO3, 900, 20, 3440), .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO3_SEL), .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO3_LIM), }, { DA9063_LDO(DA9063, LDO4, 900, 20, 3440), .suspend = BFIELD(DA9063_REG_DVC_2, DA9063_VLDO4_SEL), .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO4_LIM), }, { DA9063_LDO(DA9063, LDO5, 900, 50, 3600), .suspend = BFIELD(DA9063_REG_LDO5_CONT, DA9063_VLDO5_SEL), }, { DA9063_LDO(DA9063, LDO6, 900, 50, 3600), .suspend = BFIELD(DA9063_REG_LDO6_CONT, DA9063_VLDO6_SEL), }, { DA9063_LDO(DA9063, LDO7, 900, 50, 3600), .suspend = BFIELD(DA9063_REG_LDO7_CONT, DA9063_VLDO7_SEL), .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO7_LIM), }, { DA9063_LDO(DA9063, LDO8, 900, 50, 3600), .suspend = BFIELD(DA9063_REG_LDO8_CONT, DA9063_VLDO8_SEL), .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO8_LIM), }, { DA9063_LDO(DA9063, LDO9, 950, 50, 3600), .suspend = BFIELD(DA9063_REG_LDO9_CONT, DA9063_VLDO9_SEL), }, { DA9063_LDO(DA9063, LDO10, 900, 50, 3600), .suspend = BFIELD(DA9063_REG_LDO10_CONT, DA9063_VLDO10_SEL), }, { DA9063_LDO(DA9063, LDO11, 900, 50, 3600), .suspend = BFIELD(DA9063_REG_LDO11_CONT, DA9063_VLDO11_SEL), .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO11_LIM), }, }; /* Link chip model with regulators info table */ static struct da9063_dev_model regulators_models[] = { { .regulator_info = da9063_regulator_info, .n_regulators = ARRAY_SIZE(da9063_regulator_info), .dev_model = PMIC_DA9063, }, { } }; /* Regulator interrupt handlers */ static irqreturn_t da9063_ldo_lim_event(int irq, void *data) { struct da9063_regulators *regulators = data; struct da9063 *hw = regulators->regulator[0].hw; struct da9063_regulator *regl; int bits, i , ret; ret = regmap_read(hw->regmap, DA9063_REG_STATUS_D, &bits); if (ret < 0) return IRQ_NONE; for (i = regulators->n_regulators - 1; i >= 0; i--) { regl = &regulators->regulator[i]; if (regl->info->oc_event.reg != DA9063_REG_STATUS_D) continue; if (BIT(regl->info->oc_event.lsb) & bits) regulator_notifier_call_chain(regl->rdev, REGULATOR_EVENT_OVER_CURRENT, NULL); } return IRQ_HANDLED; } /* * Probing and Initialisation functions */ static const struct regulator_init_data *da9063_get_regulator_initdata( const struct da9063_regulators_pdata *regl_pdata, int id) { int i; for (i = 0; i < regl_pdata->n_regulators; i++) { if (id == regl_pdata->regulator_data[i].id) return regl_pdata->regulator_data[i].initdata; } return NULL; } #ifdef CONFIG_OF static struct of_regulator_match da9063_matches[] = { [DA9063_ID_BCORE1] = { .name = "bcore1" }, [DA9063_ID_BCORE2] = { .name = "bcore2" }, [DA9063_ID_BPRO] = { .name = "bpro", }, [DA9063_ID_BMEM] = { .name = "bmem", }, [DA9063_ID_BIO] = { .name = "bio", }, [DA9063_ID_BPERI] = { .name = "bperi", }, [DA9063_ID_BCORES_MERGED] = { .name = "bcores-merged" }, [DA9063_ID_BMEM_BIO_MERGED] = { .name = "bmem-bio-merged", }, [DA9063_ID_LDO1] = { .name = "ldo1", }, [DA9063_ID_LDO2] = { .name = "ldo2", }, [DA9063_ID_LDO3] = { .name = "ldo3", }, [DA9063_ID_LDO4] = { .name = "ldo4", }, [DA9063_ID_LDO5] = { .name = "ldo5", }, [DA9063_ID_LDO6] = { .name = "ldo6", }, [DA9063_ID_LDO7] = { .name = "ldo7", }, [DA9063_ID_LDO8] = { .name = "ldo8", }, [DA9063_ID_LDO9] = { .name = "ldo9", }, [DA9063_ID_LDO10] = { .name = "ldo10", }, [DA9063_ID_LDO11] = { .name = "ldo11", }, }; static struct da9063_regulators_pdata *da9063_parse_regulators_dt( struct platform_device *pdev, struct of_regulator_match **da9063_reg_matches) { struct da9063_regulators_pdata *pdata; struct da9063_regulator_data *rdata; struct device_node *node; int i, n, num; node = of_find_node_by_name(pdev->dev.parent->of_node, "regulators"); if (!node) { dev_err(&pdev->dev, "Regulators device node not found\n"); return ERR_PTR(-ENODEV); } num = of_regulator_match(&pdev->dev, node, da9063_matches, ARRAY_SIZE(da9063_matches)); if (num < 0) { dev_err(&pdev->dev, "Failed to match regulators\n"); return ERR_PTR(-EINVAL); } pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return ERR_PTR(-ENOMEM); pdata->regulator_data = devm_kzalloc(&pdev->dev, num * sizeof(*pdata->regulator_data), GFP_KERNEL); if (!pdata->regulator_data) return ERR_PTR(-ENOMEM); pdata->n_regulators = num; n = 0; for (i = 0; i < ARRAY_SIZE(da9063_matches); i++) { if (!da9063_matches[i].init_data) continue; rdata = &pdata->regulator_data[n]; rdata->id = i; rdata->initdata = da9063_matches[i].init_data; n++; }; *da9063_reg_matches = da9063_matches; return pdata; } #else static struct da9063_regulators_pdata *da9063_parse_regulators_dt( struct platform_device *pdev, struct of_regulator_match **da9063_reg_matches) { da9063_reg_matches = NULL; return ERR_PTR(-ENODEV); } #endif static int da9063_regulator_probe(struct platform_device *pdev) { struct da9063 *da9063 = dev_get_drvdata(pdev->dev.parent); struct da9063_pdata *da9063_pdata = dev_get_platdata(da9063->dev); struct of_regulator_match *da9063_reg_matches = NULL; struct da9063_regulators_pdata *regl_pdata; const struct da9063_dev_model *model; struct da9063_regulators *regulators; struct da9063_regulator *regl; struct regulator_config config; bool bcores_merged, bmem_bio_merged; int id, irq, n, n_regulators, ret, val; size_t size; regl_pdata = da9063_pdata ? da9063_pdata->regulators_pdata : NULL; if (!regl_pdata) regl_pdata = da9063_parse_regulators_dt(pdev, &da9063_reg_matches); if (IS_ERR(regl_pdata) || regl_pdata->n_regulators == 0) { dev_err(&pdev->dev, "No regulators defined for the platform\n"); return PTR_ERR(regl_pdata); } /* Find regulators set for particular device model */ for (model = regulators_models; model->regulator_info; model++) { if (model->dev_model == da9063->model) break; } if (!model->regulator_info) { dev_err(&pdev->dev, "Chip model not recognised (%u)\n", da9063->model); return -ENODEV; } ret = regmap_read(da9063->regmap, DA9063_REG_CONFIG_H, &val); if (ret < 0) { dev_err(&pdev->dev, "Error while reading BUCKs configuration\n"); return -EIO; } bcores_merged = val & DA9063_BCORE_MERGE; bmem_bio_merged = val & DA9063_BUCK_MERGE; n_regulators = model->n_regulators; if (bcores_merged) n_regulators -= 2; /* remove BCORE1, BCORE2 */ else n_regulators--; /* remove BCORES_MERGED */ if (bmem_bio_merged) n_regulators -= 2; /* remove BMEM, BIO */ else n_regulators--; /* remove BMEM_BIO_MERGED */ /* Allocate memory required by usable regulators */ size = sizeof(struct da9063_regulators) + n_regulators * sizeof(struct da9063_regulator); regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); if (!regulators) { dev_err(&pdev->dev, "No memory for regulators\n"); return -ENOMEM; } regulators->n_regulators = n_regulators; platform_set_drvdata(pdev, regulators); /* Register all regulators declared in platform information */ n = 0; id = 0; while (n < regulators->n_regulators) { /* Skip regulator IDs depending on merge mode configuration */ switch (id) { case DA9063_ID_BCORE1: case DA9063_ID_BCORE2: if (bcores_merged) { id++; continue; } break; case DA9063_ID_BMEM: case DA9063_ID_BIO: if (bmem_bio_merged) { id++; continue; } break; case DA9063_ID_BCORES_MERGED: if (!bcores_merged) { id++; continue; } break; case DA9063_ID_BMEM_BIO_MERGED: if (!bmem_bio_merged) { id++; continue; } break; } /* Initialise regulator structure */ regl = &regulators->regulator[n]; regl->hw = da9063; regl->info = &model->regulator_info[id]; regl->desc = regl->info->desc; regl->desc.type = REGULATOR_VOLTAGE; regl->desc.owner = THIS_MODULE; if (regl->info->mode.reg) regl->mode = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->mode); if (regl->info->suspend.reg) regl->suspend = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->suspend); if (regl->info->sleep.reg) regl->sleep = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->sleep); if (regl->info->suspend_sleep.reg) regl->suspend_sleep = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->suspend_sleep); if (regl->info->ilimit.reg) regl->ilimit = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->ilimit); /* Register regulator */ memset(&config, 0, sizeof(config)); config.dev = &pdev->dev; config.init_data = da9063_get_regulator_initdata(regl_pdata, id); config.driver_data = regl; if (da9063_reg_matches) config.of_node = da9063_reg_matches[id].of_node; config.regmap = da9063->regmap; regl->rdev = devm_regulator_register(&pdev->dev, &regl->desc, &config); if (IS_ERR(regl->rdev)) { dev_err(&pdev->dev, "Failed to register %s regulator\n", regl->desc.name); return PTR_ERR(regl->rdev); } id++; n++; } /* LDOs overcurrent event support */ irq = platform_get_irq_byname(pdev, "LDO_LIM"); if (irq < 0) { dev_err(&pdev->dev, "Failed to get IRQ.\n"); return irq; } regulators->irq_ldo_lim = regmap_irq_get_virq(da9063->regmap_irq, irq); if (regulators->irq_ldo_lim >= 0) { ret = request_threaded_irq(regulators->irq_ldo_lim, NULL, da9063_ldo_lim_event, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "LDO_LIM", regulators); if (ret) { dev_err(&pdev->dev, "Failed to request LDO_LIM IRQ.\n"); regulators->irq_ldo_lim = -ENXIO; } } return 0; } static int da9063_regulator_remove(struct platform_device *pdev) { struct da9063_regulators *regulators = platform_get_drvdata(pdev); free_irq(regulators->irq_ldo_lim, regulators); free_irq(regulators->irq_uvov, regulators); return 0; } static struct platform_driver da9063_regulator_driver = { .driver = { .name = DA9063_DRVNAME_REGULATORS, .owner = THIS_MODULE, }, .probe = da9063_regulator_probe, .remove = da9063_regulator_remove, }; static int __init da9063_regulator_init(void) { return platform_driver_register(&da9063_regulator_driver); } subsys_initcall(da9063_regulator_init); static void __exit da9063_regulator_cleanup(void) { platform_driver_unregister(&da9063_regulator_driver); } module_exit(da9063_regulator_cleanup); /* Module information */ MODULE_AUTHOR("Krystian Garbaciak <krystian.garbaciak@diasemi.com>"); MODULE_DESCRIPTION("DA9063 regulators driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("paltform:" DA9063_DRVNAME_REGULATORS);
gpl-2.0
invisiblek/android_kernel_motorola_msm8992
drivers/usb/phy/phy-msm-qusb.c
252
17115
/* * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/clk/msm-clk.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/usb/phy.h> #include <linux/usb/msm_hsusb.h> #define QUSB2PHY_PORT_POWERDOWN 0xB4 #define CLAMP_N_EN BIT(5) #define FREEZIO_N BIT(1) #define POWER_DOWN BIT(0) #define QUSB2PHY_PORT_UTMI_CTRL2 0xC4 #define QUSB2PHY_PORT_TUNE1 0x80 #define QUSB2PHY_PORT_TUNE2 0x84 #define QUSB2PHY_PORT_TUNE3 0x88 #define QUSB2PHY_PORT_TUNE4 0x8C #define QUSB2PHY_PORT_INTR_CTRL 0xBC #define CHG_DET_INTR_EN BIT(4) #define DMSE_INTR_HIGH_SEL BIT(3) #define DMSE_INTR_EN BIT(2) #define DPSE_INTR_HIGH_SEL BIT(1) #define DPSE_INTR_EN BIT(0) #define QUSB2PHY_PORT_UTMI_STATUS 0xF4 #define LINESTATE_DP BIT(0) #define LINESTATE_DM BIT(1) #define UTMI_OTG_VBUS_VALID BIT(20) #define SW_SESSVLD_SEL BIT(28) #define QRBTC_USB2_PLL 0x404 #define QRBTC_USB2_PLLCTRL2 0x414 #define QRBTC_USB2_PLLCTRL1 0x410 #define QRBTC_USB2_PLLCTRL3 0x418 #define QRBTC_USB2_PLLTEST1 0x408 #define RUMI_RESET_ADDRESS 0x6500 #define RUMI_RESET_VALUE_1 0x80000000 #define RUMI_RESET_VALUE_2 0x000201e0 #define PORT_OFFSET(i) ((i == 0) ? 0x0 : ((i == 1) ? 0x6c : 0x88)) #define HS_PHY_CTRL_REG(i) (0x10 + PORT_OFFSET(i)) #define QUSB2PHY_1P8_VOL_MIN 1800000 /* uV */ #define QUSB2PHY_1P8_VOL_MAX 1800000 /* uV */ #define QUSB2PHY_1P8_HPM_LOAD 30000 /* uA */ #define QUSB2PHY_3P3_VOL_MIN 3075000 /* uV */ #define QUSB2PHY_3P3_VOL_MAX 3200000 /* uV */ #define QUSB2PHY_3P3_HPM_LOAD 30000 /* uA */ struct qusb_phy { struct usb_phy phy; void __iomem *base; void __iomem *qscratch_base; struct clk *ref_clk; struct clk *cfg_ahb_clk; struct clk *phy_reset; struct regulator *vdd; struct regulator *vdda33; struct regulator *vdda18; int vdd_levels[3]; /* none, low, high */ u32 qusb_tune; bool power_enabled; bool clocks_enabled; bool cable_connected; bool suspended; bool emulation; bool ulpi_mode; }; static int qusb_phy_reset(struct usb_phy *phy) { struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); dev_dbg(phy->dev, "%s\n", __func__); clk_reset(qphy->phy_reset, CLK_RESET_ASSERT); usleep(100); clk_reset(qphy->phy_reset, CLK_RESET_DEASSERT); return 0; } static int qusb_phy_config_vdd(struct qusb_phy *qphy, int high) { int min, ret; min = high ? 1 : 0; /* low or none? */ ret = regulator_set_voltage(qphy->vdd, qphy->vdd_levels[min], qphy->vdd_levels[2]); if (ret) { dev_err(qphy->phy.dev, "unable to set voltage for qusb vdd\n"); return ret; } dev_dbg(qphy->phy.dev, "min_vol:%d max_vol:%d\n", qphy->vdd_levels[min], qphy->vdd_levels[2]); return ret; } static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on) { int ret = 0; dev_dbg(qphy->phy.dev, "%s turn %s regulators. power_enabled:%d\n", __func__, on ? "on" : "off", qphy->power_enabled); if (qphy->power_enabled == on) { dev_dbg(qphy->phy.dev, "PHYs' regulators are already ON.\n"); return 0; } if (!on) goto disable_vdda33; ret = qusb_phy_config_vdd(qphy, true); if (ret) { dev_err(qphy->phy.dev, "Unable to config VDD:%d\n", ret); goto err_vdd; } ret = regulator_enable(qphy->vdd); if (ret) { dev_err(qphy->phy.dev, "Unable to enable VDD\n"); goto unconfig_vdd; } ret = regulator_set_optimum_mode(qphy->vdda18, QUSB2PHY_1P8_HPM_LOAD); if (ret < 0) { dev_err(qphy->phy.dev, "Unable to set HPM of vdda18:%d\n", ret); goto disable_vdd; } ret = regulator_set_voltage(qphy->vdda18, QUSB2PHY_1P8_VOL_MIN, QUSB2PHY_1P8_VOL_MAX); if (ret) { dev_err(qphy->phy.dev, "Unable to set voltage for vdda18:%d\n", ret); goto put_vdda18_lpm; } ret = regulator_enable(qphy->vdda18); if (ret) { dev_err(qphy->phy.dev, "Unable to enable vdda18:%d\n", ret); goto unset_vdda18; } ret = regulator_set_optimum_mode(qphy->vdda33, QUSB2PHY_3P3_HPM_LOAD); if (ret < 0) { dev_err(qphy->phy.dev, "Unable to set HPM of vdda33:%d\n", ret); goto disable_vdda18; } ret = regulator_set_voltage(qphy->vdda33, QUSB2PHY_3P3_VOL_MIN, QUSB2PHY_3P3_VOL_MAX); if (ret) { dev_err(qphy->phy.dev, "Unable to set voltage for vdda33:%d\n", ret); goto put_vdda33_lpm; } ret = regulator_enable(qphy->vdda33); if (ret) { dev_err(qphy->phy.dev, "Unable to enable vdda33:%d\n", ret); goto unset_vdd33; } qphy->power_enabled = true; pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__); return 0; disable_vdda33: ret = regulator_disable(qphy->vdda33); if (ret) dev_err(qphy->phy.dev, "Unable to disable vdda33:%d\n", ret); unset_vdd33: ret = regulator_set_voltage(qphy->vdda33, 0, QUSB2PHY_3P3_VOL_MAX); if (ret) dev_err(qphy->phy.dev, "Unable to set (0) voltage for vdda33:%d\n", ret); put_vdda33_lpm: ret = regulator_set_optimum_mode(qphy->vdda33, 0); if (ret < 0) dev_err(qphy->phy.dev, "Unable to set (0) HPM of vdda33\n"); disable_vdda18: ret = regulator_disable(qphy->vdda18); if (ret) dev_err(qphy->phy.dev, "Unable to disable vdda18:%d\n", ret); unset_vdda18: ret = regulator_set_voltage(qphy->vdda18, 0, QUSB2PHY_1P8_VOL_MAX); if (ret) dev_err(qphy->phy.dev, "Unable to set (0) voltage for vdda18:%d\n", ret); put_vdda18_lpm: ret = regulator_set_optimum_mode(qphy->vdda18, 0); if (ret < 0) dev_err(qphy->phy.dev, "Unable to set LPM of vdda18\n"); disable_vdd: ret = regulator_disable(qphy->vdd); if (ret) dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n", ret); unconfig_vdd: ret = qusb_phy_config_vdd(qphy, false); if (ret) dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n", ret); err_vdd: qphy->power_enabled = false; dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n"); return ret; } static int qusb_phy_init(struct usb_phy *phy) { struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); int ret; u32 t1, t2, t3, t4; dev_dbg(phy->dev, "%s\n", __func__); ret = qusb_phy_enable_power(qphy, true); if (ret) return ret; if (!qphy->clocks_enabled) { clk_prepare_enable(qphy->ref_clk); clk_prepare_enable(qphy->cfg_ahb_clk); qphy->clocks_enabled = true; } if (qphy->emulation) { /* Configure QUSB2 PLLs for RUMI */ writel_relaxed(0x19, qphy->base + QRBTC_USB2_PLL); writel_relaxed(0x20, qphy->base + QRBTC_USB2_PLLCTRL2); writel_relaxed(0x79, qphy->base + QRBTC_USB2_PLLCTRL1); writel_relaxed(0x00, qphy->base + QRBTC_USB2_PLLCTRL3); writel_relaxed(0x99, qphy->base + QRBTC_USB2_PLL); writel_relaxed(0x04, qphy->base + QRBTC_USB2_PLLTEST1); writel_relaxed(0xD9, qphy->base + QRBTC_USB2_PLL); /* Wait for 5ms as per QUSB2 RUMI sequence from VI */ usleep(5000); /* Perform the RUMI PLL Reset */ writel_relaxed((int)RUMI_RESET_VALUE_1, qphy->base + RUMI_RESET_ADDRESS); /* Wait for 10ms as per QUSB2 RUMI sequence from VI */ usleep(10000); writel_relaxed(0x0, qphy->base + RUMI_RESET_ADDRESS); /* Wait for 10ms as per QUSB2 RUMI sequence from VI */ usleep(10000); writel_relaxed((int)RUMI_RESET_VALUE_2, qphy->base + RUMI_RESET_ADDRESS); /* Wait for 10ms as per QUSB2 RUMI sequence from VI */ usleep(10000); writel_relaxed(0x0, qphy->base + RUMI_RESET_ADDRESS); } else { /* Disable the PHY */ writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN, qphy->base + QUSB2PHY_PORT_POWERDOWN); /* configure for ULPI mode if requested */ if (qphy->ulpi_mode) writel_relaxed(0x0, qphy->base + QUSB2PHY_PORT_UTMI_CTRL2); if (qphy->qusb_tune) { t1 = qphy->qusb_tune >> 24; t2 = (qphy->qusb_tune) >> 16 & 0xFF; t3 = (qphy->qusb_tune) >> 8 & 0xFF; t4 = (qphy->qusb_tune) & 0xFF; /* Program tuning parameters for PHY */ writel_relaxed(t1, qphy->base + QUSB2PHY_PORT_TUNE1); writel_relaxed(t2, qphy->base + QUSB2PHY_PORT_TUNE2); writel_relaxed(t3, qphy->base + QUSB2PHY_PORT_TUNE3); writel_relaxed(t4, qphy->base + QUSB2PHY_PORT_TUNE4); } /* ensure above writes are completed before re-enabling PHY */ wmb(); /* Enable the PHY */ writel_relaxed(CLAMP_N_EN | FREEZIO_N, qphy->base + QUSB2PHY_PORT_POWERDOWN); } return 0; } static void qusb_phy_shutdown(struct usb_phy *phy) { struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); dev_dbg(phy->dev, "%s\n", __func__); /* clocks need to be on to access register */ if (!qphy->clocks_enabled) { clk_prepare_enable(qphy->ref_clk); clk_prepare_enable(qphy->cfg_ahb_clk); qphy->clocks_enabled = true; } /* Disable the PHY */ writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN, qphy->base + QUSB2PHY_PORT_POWERDOWN); wmb(); clk_disable_unprepare(qphy->cfg_ahb_clk); clk_disable_unprepare(qphy->ref_clk); qphy->clocks_enabled = false; } static void qusb_write_readback(void *base, u32 offset, const u32 mask, u32 val) { u32 write_val, tmp = readl_relaxed(base + offset); tmp &= ~mask; /* retain other bits */ write_val = tmp | val; writel_relaxed(write_val, base + offset); /* Read back to see if val was written */ tmp = readl_relaxed(base + offset); tmp &= mask; /* clear other bits */ if (tmp != val) pr_err("%s: write: %x to QSCRATCH: %x FAILED\n", __func__, val, offset); } /** * Performs QUSB2 PHY suspend/resume functionality. * * @uphy - usb phy pointer. * @suspend - to enable suspend or not. 1 - suspend, 0 - resume * */ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend) { struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); u32 linestate = 0, intr_mask = 0; if (!qphy->clocks_enabled) { dev_dbg(phy->dev, "clocks not enabled yet\n"); return -EAGAIN; } if (qphy->suspended && suspend) { dev_dbg(phy->dev, "%s: USB PHY is already suspended\n", __func__); return 0; } if (suspend) { /* Bus suspend case */ if (qphy->cable_connected || (qphy->phy.flags & PHY_HOST_MODE)) { /* Clear all interrupts */ writel_relaxed(0x00, qphy->base + QUSB2PHY_PORT_INTR_CTRL); linestate = readl_relaxed(qphy->base + QUSB2PHY_PORT_UTMI_STATUS); /* * D+/D- interrupts are level-triggered, but we are * only interested if the line state changes, so enable * the high/low trigger based on current state. In * other words, enable the triggers _opposite_ of what * the current D+/D- levels are. * e.g. if currently D+ high, D- low (HS 'J'/Suspend), * configure the mask to trigger on D+ low OR D- high */ intr_mask = DPSE_INTR_EN | DMSE_INTR_EN; if (!(linestate & LINESTATE_DP)) /* D+ low */ intr_mask |= DPSE_INTR_HIGH_SEL; if (!(linestate & LINESTATE_DM)) /* D- low */ intr_mask |= DMSE_INTR_HIGH_SEL; writel_relaxed(intr_mask, qphy->base + QUSB2PHY_PORT_INTR_CTRL); clk_disable_unprepare(qphy->cfg_ahb_clk); clk_disable_unprepare(qphy->ref_clk); } else { /* Disconnect case */ clk_disable_unprepare(qphy->cfg_ahb_clk); clk_disable_unprepare(qphy->ref_clk); qusb_phy_enable_power(qphy, false); } qphy->suspended = true; } else { /* Bus suspend case */ if (qphy->cable_connected || (qphy->phy.flags & PHY_HOST_MODE)) { clk_prepare_enable(qphy->ref_clk); clk_prepare_enable(qphy->cfg_ahb_clk); /* Clear all interrupts on resume */ writel_relaxed(0x00, qphy->base + QUSB2PHY_PORT_INTR_CTRL); } else { qusb_phy_enable_power(qphy, true); clk_prepare_enable(qphy->ref_clk); clk_prepare_enable(qphy->cfg_ahb_clk); } qphy->suspended = false; } return 0; } static int qusb_phy_notify_connect(struct usb_phy *phy, enum usb_device_speed speed) { struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); qphy->cable_connected = true; dev_dbg(phy->dev, " cable_connected=%d\n", qphy->cable_connected); /* Set OTG VBUS Valid from HSPHY to controller */ qusb_write_readback(qphy->qscratch_base, HS_PHY_CTRL_REG(0), UTMI_OTG_VBUS_VALID, UTMI_OTG_VBUS_VALID); /* Indicate value is driven by UTMI_OTG_VBUS_VALID bit */ qusb_write_readback(qphy->qscratch_base, HS_PHY_CTRL_REG(0), SW_SESSVLD_SEL, SW_SESSVLD_SEL); dev_dbg(phy->dev, "QUSB2 phy connect notification\n"); return 0; } static int qusb_phy_notify_disconnect(struct usb_phy *phy, enum usb_device_speed speed) { struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy); qphy->cable_connected = false; dev_dbg(phy->dev, " cable_connected=%d\n", qphy->cable_connected); /* Set OTG VBUS Valid from HSPHY to controller */ qusb_write_readback(qphy->qscratch_base, HS_PHY_CTRL_REG(0), UTMI_OTG_VBUS_VALID, 0); /* Indicate value is driven by UTMI_OTG_VBUS_VALID bit */ qusb_write_readback(qphy->qscratch_base, HS_PHY_CTRL_REG(0), SW_SESSVLD_SEL, 0); dev_dbg(phy->dev, "QUSB2 phy disconnect notification\n"); return 0; } static int qusb_phy_probe(struct platform_device *pdev) { struct qusb_phy *qphy; struct device *dev = &pdev->dev; struct resource *res; int ret = 0; const char *phy_type; qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL); if (!qphy) return -ENOMEM; qphy->phy.dev = dev; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qusb_phy_base"); qphy->base = devm_ioremap_resource(dev, res); if (IS_ERR(qphy->base)) return PTR_ERR(qphy->base); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qscratch_base"); qphy->qscratch_base = devm_ioremap_resource(dev, res); if (IS_ERR(qphy->qscratch_base)) qphy->qscratch_base = NULL; qphy->ref_clk = devm_clk_get(dev, "ref_clk"); if (IS_ERR(qphy->ref_clk)) return PTR_ERR(qphy->ref_clk); clk_set_rate(qphy->ref_clk, 19200000); qphy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk"); if (IS_ERR(qphy->cfg_ahb_clk)) return PTR_ERR(qphy->cfg_ahb_clk); qphy->phy_reset = devm_clk_get(dev, "phy_reset"); if (IS_ERR(qphy->phy_reset)) return PTR_ERR(qphy->phy_reset); qphy->emulation = of_property_read_bool(dev->of_node, "qcom,emulation"); of_property_read_u32(dev->of_node, "qcom,qusb-tune", &qphy->qusb_tune); qphy->ulpi_mode = false; ret = of_property_read_string(dev->of_node, "phy_type", &phy_type); if (!ret) { if (!strcasecmp(phy_type, "ulpi")) qphy->ulpi_mode = true; } else { dev_err(dev, "error reading phy_type property\n"); return ret; } ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level", (u32 *) qphy->vdd_levels, ARRAY_SIZE(qphy->vdd_levels)); if (ret) { dev_err(dev, "error reading qcom,vdd-voltage-level property\n"); return ret; } qphy->vdd = devm_regulator_get(dev, "vdd"); if (IS_ERR(qphy->vdd)) { dev_err(dev, "unable to get vdd supply\n"); return PTR_ERR(qphy->vdd); } qphy->vdda33 = devm_regulator_get(dev, "vdda33"); if (IS_ERR(qphy->vdda33)) { dev_err(dev, "unable to get vdda33 supply\n"); return PTR_ERR(qphy->vdda33); } qphy->vdda18 = devm_regulator_get(dev, "vdda18"); if (IS_ERR(qphy->vdda18)) { dev_err(dev, "unable to get vdda18 supply\n"); return PTR_ERR(qphy->vdda18); } platform_set_drvdata(pdev, qphy); qphy->phy.label = "msm-qusb-phy"; qphy->phy.init = qusb_phy_init; qphy->phy.set_suspend = qusb_phy_set_suspend; qphy->phy.shutdown = qusb_phy_shutdown; qphy->phy.reset = qusb_phy_reset; qphy->phy.type = USB_PHY_TYPE_USB2; if (qphy->qscratch_base) { qphy->phy.notify_connect = qusb_phy_notify_connect; qphy->phy.notify_disconnect = qusb_phy_notify_disconnect; } qusb_phy_reset(&qphy->phy); ret = usb_add_phy_dev(&qphy->phy); return ret; } static int qusb_phy_remove(struct platform_device *pdev) { struct qusb_phy *qphy = platform_get_drvdata(pdev); usb_remove_phy(&qphy->phy); if (qphy->clocks_enabled) { clk_disable_unprepare(qphy->cfg_ahb_clk); clk_disable_unprepare(qphy->ref_clk); qphy->clocks_enabled = false; } qusb_phy_enable_power(qphy, false); return 0; } static const struct of_device_id qusb_phy_id_table[] = { { .compatible = "qcom,qusb2phy", }, { }, }; MODULE_DEVICE_TABLE(of, qusb_phy_id_table); static struct platform_driver qusb_phy_driver = { .probe = qusb_phy_probe, .remove = qusb_phy_remove, .driver = { .name = "msm-qusb-phy", .of_match_table = of_match_ptr(qusb_phy_id_table), }, }; module_platform_driver(qusb_phy_driver); MODULE_DESCRIPTION("MSM QUSB2 PHY driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
sencis/kernel_huawei_u8500
drivers/power/wm97xx_battery.c
508
7736
/* * linux/drivers/power/wm97xx_battery.c * * Battery measurement code for WM97xx * * based on tosa_battery.c * * Copyright (C) 2008 Marek Vasut <marek.vasut@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/wm97xx.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/irq.h> static DEFINE_MUTEX(bat_lock); static struct work_struct bat_work; struct mutex work_lock; static int bat_status = POWER_SUPPLY_STATUS_UNKNOWN; static struct wm97xx_batt_info *gpdata; static enum power_supply_property *prop; static unsigned long wm97xx_read_bat(struct power_supply *bat_ps) { struct wm97xx_pdata *wmdata = bat_ps->dev->parent->platform_data; struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev->parent), pdata->batt_aux) * pdata->batt_mult / pdata->batt_div; } static unsigned long wm97xx_read_temp(struct power_supply *bat_ps) { struct wm97xx_pdata *wmdata = bat_ps->dev->parent->platform_data; struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev->parent), pdata->temp_aux) * pdata->temp_mult / pdata->temp_div; } static int wm97xx_bat_get_property(struct power_supply *bat_ps, enum power_supply_property psp, union power_supply_propval *val) { struct wm97xx_pdata *wmdata = bat_ps->dev->parent->platform_data; struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = bat_status; break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = pdata->batt_tech; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: if (pdata->batt_aux >= 0) val->intval = wm97xx_read_bat(bat_ps); else return -EINVAL; break; case POWER_SUPPLY_PROP_TEMP: if (pdata->temp_aux >= 0) val->intval = wm97xx_read_temp(bat_ps); else return -EINVAL; break; case POWER_SUPPLY_PROP_VOLTAGE_MAX: if (pdata->max_voltage >= 0) val->intval = pdata->max_voltage; else return -EINVAL; break; case POWER_SUPPLY_PROP_VOLTAGE_MIN: if (pdata->min_voltage >= 0) val->intval = pdata->min_voltage; else return -EINVAL; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = 1; break; default: return -EINVAL; } return 0; } static void wm97xx_bat_external_power_changed(struct power_supply *bat_ps) { schedule_work(&bat_work); } static void wm97xx_bat_update(struct power_supply *bat_ps) { int old_status = bat_status; struct wm97xx_pdata *wmdata = bat_ps->dev->parent->platform_data; struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; mutex_lock(&work_lock); bat_status = (pdata->charge_gpio >= 0) ? (gpio_get_value(pdata->charge_gpio) ? POWER_SUPPLY_STATUS_DISCHARGING : POWER_SUPPLY_STATUS_CHARGING) : POWER_SUPPLY_STATUS_UNKNOWN; if (old_status != bat_status) { pr_debug("%s: %i -> %i\n", bat_ps->name, old_status, bat_status); power_supply_changed(bat_ps); } mutex_unlock(&work_lock); } static struct power_supply bat_ps = { .type = POWER_SUPPLY_TYPE_BATTERY, .get_property = wm97xx_bat_get_property, .external_power_changed = wm97xx_bat_external_power_changed, .use_for_apm = 1, }; static void wm97xx_bat_work(struct work_struct *work) { wm97xx_bat_update(&bat_ps); } static irqreturn_t wm97xx_chrg_irq(int irq, void *data) { schedule_work(&bat_work); return IRQ_HANDLED; } #ifdef CONFIG_PM static int wm97xx_bat_suspend(struct device *dev) { flush_scheduled_work(); return 0; } static int wm97xx_bat_resume(struct device *dev) { schedule_work(&bat_work); return 0; } static struct dev_pm_ops wm97xx_bat_pm_ops = { .suspend = wm97xx_bat_suspend, .resume = wm97xx_bat_resume, }; #endif static int __devinit wm97xx_bat_probe(struct platform_device *dev) { int ret = 0; int props = 1; /* POWER_SUPPLY_PROP_PRESENT */ int i = 0; struct wm97xx_pdata *wmdata = dev->dev.platform_data; struct wm97xx_batt_pdata *pdata; if (gpdata) { dev_err(&dev->dev, "Do not pass platform_data through " "wm97xx_bat_set_pdata!\n"); return -EINVAL; } else pdata = wmdata->batt_pdata; if (dev->id != -1) return -EINVAL; mutex_init(&work_lock); if (!pdata) { dev_err(&dev->dev, "No platform_data supplied\n"); return -EINVAL; } if (gpio_is_valid(pdata->charge_gpio)) { ret = gpio_request(pdata->charge_gpio, "BATT CHRG"); if (ret) goto err; ret = gpio_direction_input(pdata->charge_gpio); if (ret) goto err2; ret = request_irq(gpio_to_irq(pdata->charge_gpio), wm97xx_chrg_irq, IRQF_DISABLED, "AC Detect", 0); if (ret) goto err2; props++; /* POWER_SUPPLY_PROP_STATUS */ } if (pdata->batt_tech >= 0) props++; /* POWER_SUPPLY_PROP_TECHNOLOGY */ if (pdata->temp_aux >= 0) props++; /* POWER_SUPPLY_PROP_TEMP */ if (pdata->batt_aux >= 0) props++; /* POWER_SUPPLY_PROP_VOLTAGE_NOW */ if (pdata->max_voltage >= 0) props++; /* POWER_SUPPLY_PROP_VOLTAGE_MAX */ if (pdata->min_voltage >= 0) props++; /* POWER_SUPPLY_PROP_VOLTAGE_MIN */ prop = kzalloc(props * sizeof(*prop), GFP_KERNEL); if (!prop) goto err3; prop[i++] = POWER_SUPPLY_PROP_PRESENT; if (pdata->charge_gpio >= 0) prop[i++] = POWER_SUPPLY_PROP_STATUS; if (pdata->batt_tech >= 0) prop[i++] = POWER_SUPPLY_PROP_TECHNOLOGY; if (pdata->temp_aux >= 0) prop[i++] = POWER_SUPPLY_PROP_TEMP; if (pdata->batt_aux >= 0) prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_NOW; if (pdata->max_voltage >= 0) prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_MAX; if (pdata->min_voltage >= 0) prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_MIN; INIT_WORK(&bat_work, wm97xx_bat_work); if (!pdata->batt_name) { dev_info(&dev->dev, "Please consider setting proper battery " "name in platform definition file, falling " "back to name \"wm97xx-batt\"\n"); bat_ps.name = "wm97xx-batt"; } else bat_ps.name = pdata->batt_name; bat_ps.properties = prop; bat_ps.num_properties = props; ret = power_supply_register(&dev->dev, &bat_ps); if (!ret) schedule_work(&bat_work); else goto err4; return 0; err4: kfree(prop); err3: if (gpio_is_valid(pdata->charge_gpio)) free_irq(gpio_to_irq(pdata->charge_gpio), dev); err2: if (gpio_is_valid(pdata->charge_gpio)) gpio_free(pdata->charge_gpio); err: return ret; } static int __devexit wm97xx_bat_remove(struct platform_device *dev) { struct wm97xx_pdata *wmdata = dev->dev.platform_data; struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; if (pdata && gpio_is_valid(pdata->charge_gpio)) { free_irq(gpio_to_irq(pdata->charge_gpio), dev); gpio_free(pdata->charge_gpio); } flush_scheduled_work(); power_supply_unregister(&bat_ps); kfree(prop); return 0; } static struct platform_driver wm97xx_bat_driver = { .driver = { .name = "wm97xx-battery", .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &wm97xx_bat_pm_ops, #endif }, .probe = wm97xx_bat_probe, .remove = __devexit_p(wm97xx_bat_remove), }; static int __init wm97xx_bat_init(void) { return platform_driver_register(&wm97xx_bat_driver); } static void __exit wm97xx_bat_exit(void) { platform_driver_unregister(&wm97xx_bat_driver); } void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data) { gpdata = data; } EXPORT_SYMBOL_GPL(wm97xx_bat_set_pdata); module_init(wm97xx_bat_init); module_exit(wm97xx_bat_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); MODULE_DESCRIPTION("WM97xx battery driver");
gpl-2.0
scjen/rts-pj2
drivers/net/3c527.c
508
43119
/* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6. * * (c) Copyright 1998 Red Hat Software Inc * Written by Alan Cox. * Further debugging by Carl Drougge. * Initial SMP support by Felipe W Damasio <felipewd@terra.com.br> * Heavily modified by Richard Procter <rnp@paradise.net.nz> * * Based on skeleton.c written 1993-94 by Donald Becker and ne2.c * (for the MCA stuff) written by Wim Dumon. * * Thanks to 3Com for making this possible by providing me with the * documentation. * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #define DRV_NAME "3c527" #define DRV_VERSION "0.7-SMP" #define DRV_RELDATE "2003/09/21" static const char *version = DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.nz>\n"; /** * DOC: Traps for the unwary * * The diagram (Figure 1-1) and the POS summary disagree with the * "Interrupt Level" section in the manual. * * The manual contradicts itself when describing the minimum number * buffers in the 'configure lists' command. * My card accepts a buffer config of 4/4. * * Setting the SAV BP bit does not save bad packets, but * only enables RX on-card stats collection. * * The documentation in places seems to miss things. In actual fact * I've always eventually found everything is documented, it just * requires careful study. * * DOC: Theory Of Operation * * The 3com 3c527 is a 32bit MCA bus mastering adapter with a large * amount of on board intelligence that housekeeps a somewhat dumber * Intel NIC. For performance we want to keep the transmit queue deep * as the card can transmit packets while fetching others from main * memory by bus master DMA. Transmission and reception are driven by * circular buffer queues. * * The mailboxes can be used for controlling how the card traverses * its buffer rings, but are used only for inital setup in this * implementation. The exec mailbox allows a variety of commands to * be executed. Each command must complete before the next is * executed. Primarily we use the exec mailbox for controlling the * multicast lists. We have to do a certain amount of interesting * hoop jumping as the multicast list changes can occur in interrupt * state when the card has an exec command pending. We defer such * events until the command completion interrupt. * * A copy break scheme (taken from 3c59x.c) is employed whereby * received frames exceeding a configurable length are passed * directly to the higher networking layers without incuring a copy, * in what amounts to a time/space trade-off. * * The card also keeps a large amount of statistical information * on-board. In a perfect world, these could be used safely at no * cost. However, lacking information to the contrary, processing * them without races would involve so much extra complexity as to * make it unworthwhile to do so. In the end, a hybrid SW/HW * implementation was made necessary --- see mc32_update_stats(). * * DOC: Notes * * It should be possible to use two or more cards, but at this stage * only by loading two copies of the same module. * * The on-board 82586 NIC has trouble receiving multiple * back-to-back frames and so is likely to drop packets from fast * senders. **/ #include <linux/module.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_ether.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/mca-legacy.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/wait.h> #include <linux/ethtool.h> #include <linux/completion.h> #include <linux/bitops.h> #include <linux/semaphore.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/io.h> #include <asm/dma.h> #include "3c527.h" MODULE_LICENSE("GPL"); /* * The name of the card. Is used for messages and in the requests for * io regions, irqs and dma channels */ static const char* cardname = DRV_NAME; /* use 0 for production, 1 for verification, >2 for debug */ #ifndef NET_DEBUG #define NET_DEBUG 2 #endif static unsigned int mc32_debug = NET_DEBUG; /* The number of low I/O ports used by the ethercard. */ #define MC32_IO_EXTENT 8 /* As implemented, values must be a power-of-2 -- 4/8/16/32 */ #define TX_RING_LEN 32 /* Typically the card supports 37 */ #define RX_RING_LEN 8 /* " " " */ /* Copy break point, see above for details. * Setting to > 1512 effectively disables this feature. */ #define RX_COPYBREAK 200 /* Value from 3c59x.c */ /* Issue the 82586 workaround command - this is for "busy lans", but * basically means for all lans now days - has a performance (latency) * cost, but best set. */ static const int WORKAROUND_82586=1; /* Pointers to buffers and their on-card records */ struct mc32_ring_desc { volatile struct skb_header *p; struct sk_buff *skb; }; /* Information that needs to be kept for each board. */ struct mc32_local { int slot; u32 base; volatile struct mc32_mailbox *rx_box; volatile struct mc32_mailbox *tx_box; volatile struct mc32_mailbox *exec_box; volatile struct mc32_stats *stats; /* Start of on-card statistics */ u16 tx_chain; /* Transmit list start offset */ u16 rx_chain; /* Receive list start offset */ u16 tx_len; /* Transmit list count */ u16 rx_len; /* Receive list count */ u16 xceiver_desired_state; /* HALTED or RUNNING */ u16 cmd_nonblocking; /* Thread is uninterested in command result */ u16 mc_reload_wait; /* A multicast load request is pending */ u32 mc_list_valid; /* True when the mclist is set */ struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */ struct mc32_ring_desc rx_ring[RX_RING_LEN]; /* Host Receive ring */ atomic_t tx_count; /* buffers left */ atomic_t tx_ring_head; /* index to tx en-queue end */ u16 tx_ring_tail; /* index to tx de-queue end */ u16 rx_ring_tail; /* index to rx de-queue end */ struct semaphore cmd_mutex; /* Serialises issuing of execute commands */ struct completion execution_cmd; /* Card has completed an execute command */ struct completion xceiver_cmd; /* Card has completed a tx or rx command */ }; /* The station (ethernet) address prefix, used for a sanity check. */ #define SA_ADDR0 0x02 #define SA_ADDR1 0x60 #define SA_ADDR2 0xAC struct mca_adapters_t { unsigned int id; char *name; }; static const struct mca_adapters_t mc32_adapters[] = { { 0x0041, "3COM EtherLink MC/32" }, { 0x8EF5, "IBM High Performance Lan Adapter" }, { 0x0000, NULL } }; /* Macros for ring index manipulations */ static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); }; static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); }; static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); }; /* Index to functions, as function prototypes. */ static int mc32_probe1(struct net_device *dev, int ioaddr); static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len); static int mc32_open(struct net_device *dev); static void mc32_timeout(struct net_device *dev); static netdev_tx_t mc32_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t mc32_interrupt(int irq, void *dev_id); static int mc32_close(struct net_device *dev); static struct net_device_stats *mc32_get_stats(struct net_device *dev); static void mc32_set_multicast_list(struct net_device *dev); static void mc32_reset_multicast_list(struct net_device *dev); static const struct ethtool_ops netdev_ethtool_ops; static void cleanup_card(struct net_device *dev) { struct mc32_local *lp = netdev_priv(dev); unsigned slot = lp->slot; mca_mark_as_unused(slot); mca_set_adapter_name(slot, NULL); free_irq(dev->irq, dev); release_region(dev->base_addr, MC32_IO_EXTENT); } /** * mc32_probe - Search for supported boards * @unit: interface number to use * * Because MCA bus is a real bus and we can scan for cards we could do a * single scan for all boards here. Right now we use the passed in device * structure and scan for only one board. This needs fixing for modules * in particular. */ struct net_device *__init mc32_probe(int unit) { struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local)); static int current_mca_slot = -1; int i; int err; if (!dev) return ERR_PTR(-ENOMEM); if (unit >= 0) sprintf(dev->name, "eth%d", unit); /* Do not check any supplied i/o locations. POS registers usually don't fail :) */ /* MCA cards have POS registers. Autodetecting MCA cards is extremely simple. Just search for the card. */ for(i = 0; (mc32_adapters[i].name != NULL); i++) { current_mca_slot = mca_find_unused_adapter(mc32_adapters[i].id, 0); if(current_mca_slot != MCA_NOTFOUND) { if(!mc32_probe1(dev, current_mca_slot)) { mca_set_adapter_name(current_mca_slot, mc32_adapters[i].name); mca_mark_as_used(current_mca_slot); err = register_netdev(dev); if (err) { cleanup_card(dev); free_netdev(dev); dev = ERR_PTR(err); } return dev; } } } free_netdev(dev); return ERR_PTR(-ENODEV); } static const struct net_device_ops netdev_ops = { .ndo_open = mc32_open, .ndo_stop = mc32_close, .ndo_start_xmit = mc32_send_packet, .ndo_get_stats = mc32_get_stats, .ndo_set_multicast_list = mc32_set_multicast_list, .ndo_tx_timeout = mc32_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; /** * mc32_probe1 - Check a given slot for a board and test the card * @dev: Device structure to fill in * @slot: The MCA bus slot being used by this card * * Decode the slot data and configure the card structures. Having done this we * can reset the card and configure it. The card does a full self test cycle * in firmware so we have to wait for it to return and post us either a * failure case or some addresses we use to find the board internals. */ static int __init mc32_probe1(struct net_device *dev, int slot) { static unsigned version_printed; int i, err; u8 POS; u32 base; struct mc32_local *lp = netdev_priv(dev); static u16 mca_io_bases[]={ 0x7280,0x7290, 0x7680,0x7690, 0x7A80,0x7A90, 0x7E80,0x7E90 }; static u32 mca_mem_bases[]={ 0x00C0000, 0x00C4000, 0x00C8000, 0x00CC000, 0x00D0000, 0x00D4000, 0x00D8000, 0x00DC000 }; static char *failures[]={ "Processor instruction", "Processor data bus", "Processor data bus", "Processor data bus", "Adapter bus", "ROM checksum", "Base RAM", "Extended RAM", "82586 internal loopback", "82586 initialisation failure", "Adapter list configuration error" }; /* Time to play MCA games */ if (mc32_debug && version_printed++ == 0) pr_debug("%s", version); pr_info("%s: %s found in slot %d: ", dev->name, cardname, slot); POS = mca_read_stored_pos(slot, 2); if(!(POS&1)) { pr_cont("disabled.\n"); return -ENODEV; } /* Fill in the 'dev' fields. */ dev->base_addr = mca_io_bases[(POS>>1)&7]; dev->mem_start = mca_mem_bases[(POS>>4)&7]; POS = mca_read_stored_pos(slot, 4); if(!(POS&1)) { pr_cont("memory window disabled.\n"); return -ENODEV; } POS = mca_read_stored_pos(slot, 5); i=(POS>>4)&3; if(i==3) { pr_cont("invalid memory window.\n"); return -ENODEV; } i*=16384; i+=16384; dev->mem_end=dev->mem_start + i; dev->irq = ((POS>>2)&3)+9; if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname)) { pr_cont("io 0x%3lX, which is busy.\n", dev->base_addr); return -EBUSY; } pr_cont("io 0x%3lX irq %d mem 0x%lX (%dK)\n", dev->base_addr, dev->irq, dev->mem_start, i/1024); /* We ought to set the cache line size here.. */ /* * Go PROM browsing */ /* Retrieve and print the ethernet address. */ for (i = 0; i < 6; i++) { mca_write_pos(slot, 6, i+12); mca_write_pos(slot, 7, 0); dev->dev_addr[i] = mca_read_pos(slot,3); } pr_info("%s: Address %pM ", dev->name, dev->dev_addr); mca_write_pos(slot, 6, 0); mca_write_pos(slot, 7, 0); POS = mca_read_stored_pos(slot, 4); if(POS&2) pr_cont(": BNC port selected.\n"); else pr_cont(": AUI port selected.\n"); POS=inb(dev->base_addr+HOST_CTRL); POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET; POS&=~HOST_CTRL_INTE; outb(POS, dev->base_addr+HOST_CTRL); /* Reset adapter */ udelay(100); /* Reset off */ POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET); outb(POS, dev->base_addr+HOST_CTRL); udelay(300); /* * Grab the IRQ */ err = request_irq(dev->irq, &mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev); if (err) { release_region(dev->base_addr, MC32_IO_EXTENT); pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq); goto err_exit_ports; } memset(lp, 0, sizeof(struct mc32_local)); lp->slot = slot; i=0; base = inb(dev->base_addr); while(base == 0xFF) { i++; if(i == 1000) { pr_err("%s: failed to boot adapter.\n", dev->name); err = -ENODEV; goto err_exit_irq; } udelay(1000); if(inb(dev->base_addr+2)&(1<<5)) base = inb(dev->base_addr); } if(base>0) { if(base < 0x0C) pr_err("%s: %s%s.\n", dev->name, failures[base-1], base<0x0A?" test failure":""); else pr_err("%s: unknown failure %d.\n", dev->name, base); err = -ENODEV; goto err_exit_irq; } base=0; for(i=0;i<4;i++) { int n=0; while(!(inb(dev->base_addr+2)&(1<<5))) { n++; udelay(50); if(n>100) { pr_err("%s: mailbox read fail (%d).\n", dev->name, i); err = -ENODEV; goto err_exit_irq; } } base|=(inb(dev->base_addr)<<(8*i)); } lp->exec_box=isa_bus_to_virt(dev->mem_start+base); base=lp->exec_box->data[1]<<16|lp->exec_box->data[0]; lp->base = dev->mem_start+base; lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]); lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]); lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]); /* * Descriptor chains (card relative) */ lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */ lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */ lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */ lp->rx_len = lp->exec_box->data[11]; /* Receive list count */ init_MUTEX_LOCKED(&lp->cmd_mutex); init_completion(&lp->execution_cmd); init_completion(&lp->xceiver_cmd); pr_info("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n", dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base); dev->netdev_ops = &netdev_ops; dev->watchdog_timeo = HZ*5; /* Board does all the work */ dev->ethtool_ops = &netdev_ethtool_ops; return 0; err_exit_irq: free_irq(dev->irq, dev); err_exit_ports: release_region(dev->base_addr, MC32_IO_EXTENT); return err; } /** * mc32_ready_poll - wait until we can feed it a command * @dev: The device to wait for * * Wait until the card becomes ready to accept a command via the * command register. This tells us nothing about the completion * status of any pending commands and takes very little time at all. */ static inline void mc32_ready_poll(struct net_device *dev) { int ioaddr = dev->base_addr; while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR)); } /** * mc32_command_nowait - send a command non blocking * @dev: The 3c527 to issue the command to * @cmd: The command word to write to the mailbox * @data: A data block if the command expects one * @len: Length of the data block * * Send a command from interrupt state. If there is a command * currently being executed then we return an error of -1. It * simply isn't viable to wait around as commands may be * slow. This can theoretically be starved on SMP, but it's hard * to see a realistic situation. We do not wait for the command * to complete --- we rely on the interrupt handler to tidy up * after us. */ static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len) { struct mc32_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; int ret = -1; if (down_trylock(&lp->cmd_mutex) == 0) { lp->cmd_nonblocking=1; lp->exec_box->mbox=0; lp->exec_box->mbox=cmd; memcpy((void *)lp->exec_box->data, data, len); barrier(); /* the memcpy forgot the volatile so be sure */ /* Send the command */ mc32_ready_poll(dev); outb(1<<6, ioaddr+HOST_CMD); ret = 0; /* Interrupt handler will signal mutex on completion */ } return ret; } /** * mc32_command - send a command and sleep until completion * @dev: The 3c527 card to issue the command to * @cmd: The command word to write to the mailbox * @data: A data block if the command expects one * @len: Length of the data block * * Sends exec commands in a user context. This permits us to wait around * for the replies and also to wait for the command buffer to complete * from a previous command before we execute our command. After our * command completes we will attempt any pending multicast reload * we blocked off by hogging the exec buffer. * * You feed the card a command, you wait, it interrupts you get a * reply. All well and good. The complication arises because you use * commands for filter list changes which come in at bh level from things * like IPV6 group stuff. */ static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len) { struct mc32_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; int ret = 0; down(&lp->cmd_mutex); /* * My Turn */ lp->cmd_nonblocking=0; lp->exec_box->mbox=0; lp->exec_box->mbox=cmd; memcpy((void *)lp->exec_box->data, data, len); barrier(); /* the memcpy forgot the volatile so be sure */ mc32_ready_poll(dev); outb(1<<6, ioaddr+HOST_CMD); wait_for_completion(&lp->execution_cmd); if(lp->exec_box->mbox&(1<<13)) ret = -1; up(&lp->cmd_mutex); /* * A multicast set got blocked - try it now */ if(lp->mc_reload_wait) { mc32_reset_multicast_list(dev); } return ret; } /** * mc32_start_transceiver - tell board to restart tx/rx * @dev: The 3c527 card to issue the command to * * This may be called from the interrupt state, where it is used * to restart the rx ring if the card runs out of rx buffers. * * We must first check if it's ok to (re)start the transceiver. See * mc32_close for details. */ static void mc32_start_transceiver(struct net_device *dev) { struct mc32_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; /* Ignore RX overflow on device closure */ if (lp->xceiver_desired_state==HALTED) return; /* Give the card the offset to the post-EOL-bit RX descriptor */ mc32_ready_poll(dev); lp->rx_box->mbox=0; lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next; outb(HOST_CMD_START_RX, ioaddr+HOST_CMD); mc32_ready_poll(dev); lp->tx_box->mbox=0; outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */ /* We are not interrupted on start completion */ } /** * mc32_halt_transceiver - tell board to stop tx/rx * @dev: The 3c527 card to issue the command to * * We issue the commands to halt the card's transceiver. In fact, * after some experimenting we now simply tell the card to * suspend. When issuing aborts occasionally odd things happened. * * We then sleep until the card has notified us that both rx and * tx have been suspended. */ static void mc32_halt_transceiver(struct net_device *dev) { struct mc32_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; mc32_ready_poll(dev); lp->rx_box->mbox=0; outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD); wait_for_completion(&lp->xceiver_cmd); mc32_ready_poll(dev); lp->tx_box->mbox=0; outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD); wait_for_completion(&lp->xceiver_cmd); } /** * mc32_load_rx_ring - load the ring of receive buffers * @dev: 3c527 to build the ring for * * This initalises the on-card and driver datastructures to * the point where mc32_start_transceiver() can be called. * * The card sets up the receive ring for us. We are required to use the * ring it provides, although the size of the ring is configurable. * * We allocate an sk_buff for each ring entry in turn and * initalise its house-keeping info. At the same time, we read * each 'next' pointer in our rx_ring array. This reduces slow * shared-memory reads and makes it easy to access predecessor * descriptors. * * We then set the end-of-list bit for the last entry so that the * card will know when it has run out of buffers. */ static int mc32_load_rx_ring(struct net_device *dev) { struct mc32_local *lp = netdev_priv(dev); int i; u16 rx_base; volatile struct skb_header *p; rx_base=lp->rx_chain; for(i=0; i<RX_RING_LEN; i++) { lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL); if (lp->rx_ring[i].skb==NULL) { for (;i>=0;i--) kfree_skb(lp->rx_ring[i].skb); return -ENOBUFS; } skb_reserve(lp->rx_ring[i].skb, 18); p=isa_bus_to_virt(lp->base+rx_base); p->control=0; p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data); p->status=0; p->length=1532; lp->rx_ring[i].p=p; rx_base=p->next; } lp->rx_ring[i-1].p->control |= CONTROL_EOL; lp->rx_ring_tail=0; return 0; } /** * mc32_flush_rx_ring - free the ring of receive buffers * @lp: Local data of 3c527 to flush the rx ring of * * Free the buffer for each ring slot. This may be called * before mc32_load_rx_ring(), eg. on error in mc32_open(). * Requires rx skb pointers to point to a valid skb, or NULL. */ static void mc32_flush_rx_ring(struct net_device *dev) { struct mc32_local *lp = netdev_priv(dev); int i; for(i=0; i < RX_RING_LEN; i++) { if (lp->rx_ring[i].skb) { dev_kfree_skb(lp->rx_ring[i].skb); lp->rx_ring[i].skb = NULL; } lp->rx_ring[i].p=NULL; } } /** * mc32_load_tx_ring - load transmit ring * @dev: The 3c527 card to issue the command to * * This sets up the host transmit data-structures. * * First, we obtain from the card it's current postion in the tx * ring, so that we will know where to begin transmitting * packets. * * Then, we read the 'next' pointers from the on-card tx ring into * our tx_ring array to reduce slow shared-mem reads. Finally, we * intitalise the tx house keeping variables. * */ static void mc32_load_tx_ring(struct net_device *dev) { struct mc32_local *lp = netdev_priv(dev); volatile struct skb_header *p; int i; u16 tx_base; tx_base=lp->tx_box->data[0]; for(i=0 ; i<TX_RING_LEN ; i++) { p=isa_bus_to_virt(lp->base+tx_base); lp->tx_ring[i].p=p; lp->tx_ring[i].skb=NULL; tx_base=p->next; } /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */ /* see mc32_tx_ring */ atomic_set(&lp->tx_count, TX_RING_LEN-1); atomic_set(&lp->tx_ring_head, 0); lp->tx_ring_tail=0; } /** * mc32_flush_tx_ring - free transmit ring * @lp: Local data of 3c527 to flush the tx ring of * * If the ring is non-empty, zip over the it, freeing any * allocated skb_buffs. The tx ring house-keeping variables are * then reset. Requires rx skb pointers to point to a valid skb, * or NULL. */ static void mc32_flush_tx_ring(struct net_device *dev) { struct mc32_local *lp = netdev_priv(dev); int i; for (i=0; i < TX_RING_LEN; i++) { if (lp->tx_ring[i].skb) { dev_kfree_skb(lp->tx_ring[i].skb); lp->tx_ring[i].skb = NULL; } } atomic_set(&lp->tx_count, 0); atomic_set(&lp->tx_ring_head, 0); lp->tx_ring_tail=0; } /** * mc32_open - handle 'up' of card * @dev: device to open * * The user is trying to bring the card into ready state. This requires * a brief dialogue with the card. Firstly we enable interrupts and then * 'indications'. Without these enabled the card doesn't bother telling * us what it has done. This had me puzzled for a week. * * We configure the number of card descriptors, then load the network * address and multicast filters. Turn on the workaround mode. This * works around a bug in the 82586 - it asks the firmware to do * so. It has a performance (latency) hit but is needed on busy * [read most] lans. We load the ring with buffers then we kick it * all off. */ static int mc32_open(struct net_device *dev) { int ioaddr = dev->base_addr; struct mc32_local *lp = netdev_priv(dev); u8 one=1; u8 regs; u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN}; /* * Interrupts enabled */ regs=inb(ioaddr+HOST_CTRL); regs|=HOST_CTRL_INTE; outb(regs, ioaddr+HOST_CTRL); /* * Allow ourselves to issue commands */ up(&lp->cmd_mutex); /* * Send the indications on command */ mc32_command(dev, 4, &one, 2); /* * Poke it to make sure it's really dead. */ mc32_halt_transceiver(dev); mc32_flush_tx_ring(dev); /* * Ask card to set up on-card descriptors to our spec */ if(mc32_command(dev, 8, descnumbuffs, 4)) { pr_info("%s: %s rejected our buffer configuration!\n", dev->name, cardname); mc32_close(dev); return -ENOBUFS; } /* Report new configuration */ mc32_command(dev, 6, NULL, 0); lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */ lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */ lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */ lp->rx_len = lp->exec_box->data[11]; /* Receive list count */ /* Set Network Address */ mc32_command(dev, 1, dev->dev_addr, 6); /* Set the filters */ mc32_set_multicast_list(dev); if (WORKAROUND_82586) { u16 zero_word=0; mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */ } mc32_load_tx_ring(dev); if(mc32_load_rx_ring(dev)) { mc32_close(dev); return -ENOBUFS; } lp->xceiver_desired_state = RUNNING; /* And finally, set the ball rolling... */ mc32_start_transceiver(dev); netif_start_queue(dev); return 0; } /** * mc32_timeout - handle a timeout from the network layer * @dev: 3c527 that timed out * * Handle a timeout on transmit from the 3c527. This normally means * bad things as the hardware handles cable timeouts and mess for * us. * */ static void mc32_timeout(struct net_device *dev) { pr_warning("%s: transmit timed out?\n", dev->name); /* Try to restart the adaptor. */ netif_wake_queue(dev); } /** * mc32_send_packet - queue a frame for transmit * @skb: buffer to transmit * @dev: 3c527 to send it out of * * Transmit a buffer. This normally means throwing the buffer onto * the transmit queue as the queue is quite large. If the queue is * full then we set tx_busy and return. Once the interrupt handler * gets messages telling it to reclaim transmit queue entries, we will * clear tx_busy and the kernel will start calling this again. * * We do not disable interrupts or acquire any locks; this can * run concurrently with mc32_tx_ring(), and the function itself * is serialised at a higher layer. However, similarly for the * card itself, we must ensure that we update tx_ring_head only * after we've established a valid packet on the tx ring (and * before we let the card "see" it, to prevent it racing with the * irq handler). * */ static netdev_tx_t mc32_send_packet(struct sk_buff *skb, struct net_device *dev) { struct mc32_local *lp = netdev_priv(dev); u32 head = atomic_read(&lp->tx_ring_head); volatile struct skb_header *p, *np; netif_stop_queue(dev); if(atomic_read(&lp->tx_count)==0) { return NETDEV_TX_BUSY; } if (skb_padto(skb, ETH_ZLEN)) { netif_wake_queue(dev); return NETDEV_TX_OK; } atomic_dec(&lp->tx_count); /* P is the last sending/sent buffer as a pointer */ p=lp->tx_ring[head].p; head = next_tx(head); /* NP is the buffer we will be loading */ np=lp->tx_ring[head].p; /* We will need this to flush the buffer out */ lp->tx_ring[head].skb=skb; np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; np->data = isa_virt_to_bus(skb->data); np->status = 0; np->control = CONTROL_EOP | CONTROL_EOL; wmb(); /* * The new frame has been setup; we can now * let the interrupt handler and card "see" it */ atomic_set(&lp->tx_ring_head, head); p->control &= ~CONTROL_EOL; netif_wake_queue(dev); return NETDEV_TX_OK; } /** * mc32_update_stats - pull off the on board statistics * @dev: 3c527 to service * * * Query and reset the on-card stats. There's the small possibility * of a race here, which would result in an underestimation of * actual errors. As such, we'd prefer to keep all our stats * collection in software. As a rule, we do. However it can't be * used for rx errors and collisions as, by default, the card discards * bad rx packets. * * Setting the SAV BP in the rx filter command supposedly * stops this behaviour. However, testing shows that it only seems to * enable the collation of on-card rx statistics --- the driver * never sees an RX descriptor with an error status set. * */ static void mc32_update_stats(struct net_device *dev) { struct mc32_local *lp = netdev_priv(dev); volatile struct mc32_stats *st = lp->stats; u32 rx_errors=0; rx_errors+=dev->stats.rx_crc_errors +=st->rx_crc_errors; st->rx_crc_errors=0; rx_errors+=dev->stats.rx_fifo_errors +=st->rx_overrun_errors; st->rx_overrun_errors=0; rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors; st->rx_alignment_errors=0; rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors; st->rx_tooshort_errors=0; rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors; st->rx_outofresource_errors=0; dev->stats.rx_errors=rx_errors; /* Number of packets which saw one collision */ dev->stats.collisions+=st->dataC[10]; st->dataC[10]=0; /* Number of packets which saw 2--15 collisions */ dev->stats.collisions+=st->dataC[11]; st->dataC[11]=0; } /** * mc32_rx_ring - process the receive ring * @dev: 3c527 that needs its receive ring processing * * * We have received one or more indications from the card that a * receive has completed. The buffer ring thus contains dirty * entries. We walk the ring by iterating over the circular rx_ring * array, starting at the next dirty buffer (which happens to be the * one we finished up at last time around). * * For each completed packet, we will either copy it and pass it up * the stack or, if the packet is near MTU sized, we allocate * another buffer and flip the old one up the stack. * * We must succeed in keeping a buffer on the ring. If necessary we * will toss a received packet rather than lose a ring entry. Once * the first uncompleted descriptor is found, we move the * End-Of-List bit to include the buffers just processed. * */ static void mc32_rx_ring(struct net_device *dev) { struct mc32_local *lp = netdev_priv(dev); volatile struct skb_header *p; u16 rx_ring_tail; u16 rx_old_tail; int x=0; rx_old_tail = rx_ring_tail = lp->rx_ring_tail; do { p=lp->rx_ring[rx_ring_tail].p; if(!(p->status & (1<<7))) { /* Not COMPLETED */ break; } if(p->status & (1<<6)) /* COMPLETED_OK */ { u16 length=p->length; struct sk_buff *skb; struct sk_buff *newskb; /* Try to save time by avoiding a copy on big frames */ if ((length > RX_COPYBREAK) && ((newskb=dev_alloc_skb(1532)) != NULL)) { skb=lp->rx_ring[rx_ring_tail].skb; skb_put(skb, length); skb_reserve(newskb,18); lp->rx_ring[rx_ring_tail].skb=newskb; p->data=isa_virt_to_bus(newskb->data); } else { skb=dev_alloc_skb(length+2); if(skb==NULL) { dev->stats.rx_dropped++; goto dropped; } skb_reserve(skb,2); memcpy(skb_put(skb, length), lp->rx_ring[rx_ring_tail].skb->data, length); } skb->protocol=eth_type_trans(skb,dev); dev->stats.rx_packets++; dev->stats.rx_bytes += length; netif_rx(skb); } dropped: p->length = 1532; p->status = 0; rx_ring_tail=next_rx(rx_ring_tail); } while(x++<48); /* If there was actually a frame to be processed, place the EOL bit */ /* at the descriptor prior to the one to be filled next */ if (rx_ring_tail != rx_old_tail) { lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL; lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL; lp->rx_ring_tail=rx_ring_tail; } } /** * mc32_tx_ring - process completed transmits * @dev: 3c527 that needs its transmit ring processing * * * This operates in a similar fashion to mc32_rx_ring. We iterate * over the transmit ring. For each descriptor which has been * processed by the card, we free its associated buffer and note * any errors. This continues until the transmit ring is emptied * or we reach a descriptor that hasn't yet been processed by the * card. * */ static void mc32_tx_ring(struct net_device *dev) { struct mc32_local *lp = netdev_priv(dev); volatile struct skb_header *np; /* * We rely on head==tail to mean 'queue empty'. * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent * tx_ring_head wrapping to tail and confusing a 'queue empty' * condition with 'queue full' */ while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head)) { u16 t; t=next_tx(lp->tx_ring_tail); np=lp->tx_ring[t].p; if(!(np->status & (1<<7))) { /* Not COMPLETED */ break; } dev->stats.tx_packets++; if(!(np->status & (1<<6))) /* Not COMPLETED_OK */ { dev->stats.tx_errors++; switch(np->status&0x0F) { case 1: dev->stats.tx_aborted_errors++; break; /* Max collisions */ case 2: dev->stats.tx_fifo_errors++; break; case 3: dev->stats.tx_carrier_errors++; break; case 4: dev->stats.tx_window_errors++; break; /* CTS Lost */ case 5: dev->stats.tx_aborted_errors++; break; /* Transmit timeout */ } } /* Packets are sent in order - this is basically a FIFO queue of buffers matching the card ring */ dev->stats.tx_bytes+=lp->tx_ring[t].skb->len; dev_kfree_skb_irq(lp->tx_ring[t].skb); lp->tx_ring[t].skb=NULL; atomic_inc(&lp->tx_count); netif_wake_queue(dev); lp->tx_ring_tail=t; } } /** * mc32_interrupt - handle an interrupt from a 3c527 * @irq: Interrupt number * @dev_id: 3c527 that requires servicing * @regs: Registers (unused) * * * An interrupt is raised whenever the 3c527 writes to the command * register. This register contains the message it wishes to send us * packed into a single byte field. We keep reading status entries * until we have processed all the control items, but simply count * transmit and receive reports. When all reports are in we empty the * transceiver rings as appropriate. This saves the overhead of * multiple command requests. * * Because MCA is level-triggered, we shouldn't miss indications. * Therefore, we needn't ask the card to suspend interrupts within * this handler. The card receives an implicit acknowledgment of the * current interrupt when we read the command register. * */ static irqreturn_t mc32_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct mc32_local *lp; int ioaddr, status, boguscount = 0; int rx_event = 0; int tx_event = 0; ioaddr = dev->base_addr; lp = netdev_priv(dev); /* See whats cooking */ while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000) { status=inb(ioaddr+HOST_CMD); pr_debug("Status TX%d RX%d EX%d OV%d BC%d\n", (status&7), (status>>3)&7, (status>>6)&1, (status>>7)&1, boguscount); switch(status&7) { case 0: break; case 6: /* TX fail */ case 2: /* TX ok */ tx_event = 1; break; case 3: /* Halt */ case 4: /* Abort */ complete(&lp->xceiver_cmd); break; default: pr_notice("%s: strange tx ack %d\n", dev->name, status&7); } status>>=3; switch(status&7) { case 0: break; case 2: /* RX */ rx_event=1; break; case 3: /* Halt */ case 4: /* Abort */ complete(&lp->xceiver_cmd); break; case 6: /* Out of RX buffers stat */ /* Must restart rx */ dev->stats.rx_dropped++; mc32_rx_ring(dev); mc32_start_transceiver(dev); break; default: pr_notice("%s: strange rx ack %d\n", dev->name, status&7); } status>>=3; if(status&1) { /* * No thread is waiting: we need to tidy * up ourself. */ if (lp->cmd_nonblocking) { up(&lp->cmd_mutex); if (lp->mc_reload_wait) mc32_reset_multicast_list(dev); } else complete(&lp->execution_cmd); } if(status&2) { /* * We get interrupted once per * counter that is about to overflow. */ mc32_update_stats(dev); } } /* * Process the transmit and receive rings */ if(tx_event) mc32_tx_ring(dev); if(rx_event) mc32_rx_ring(dev); return IRQ_HANDLED; } /** * mc32_close - user configuring the 3c527 down * @dev: 3c527 card to shut down * * The 3c527 is a bus mastering device. We must be careful how we * shut it down. It may also be running shared interrupt so we have * to be sure to silence it properly * * We indicate that the card is closing to the rest of the * driver. Otherwise, it is possible that the card may run out * of receive buffers and restart the transceiver while we're * trying to close it. * * We abort any receive and transmits going on and then wait until * any pending exec commands have completed in other code threads. * In theory we can't get here while that is true, in practice I am * paranoid * * We turn off the interrupt enable for the board to be sure it can't * intefere with other devices. */ static int mc32_close(struct net_device *dev) { struct mc32_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; u8 regs; u16 one=1; lp->xceiver_desired_state = HALTED; netif_stop_queue(dev); /* * Send the indications on command (handy debug check) */ mc32_command(dev, 4, &one, 2); /* Shut down the transceiver */ mc32_halt_transceiver(dev); /* Ensure we issue no more commands beyond this point */ down(&lp->cmd_mutex); /* Ok the card is now stopping */ regs=inb(ioaddr+HOST_CTRL); regs&=~HOST_CTRL_INTE; outb(regs, ioaddr+HOST_CTRL); mc32_flush_rx_ring(dev); mc32_flush_tx_ring(dev); mc32_update_stats(dev); return 0; } /** * mc32_get_stats - hand back stats to network layer * @dev: The 3c527 card to handle * * We've collected all the stats we can in software already. Now * it's time to update those kept on-card and return the lot. * */ static struct net_device_stats *mc32_get_stats(struct net_device *dev) { mc32_update_stats(dev); return &dev->stats; } /** * do_mc32_set_multicast_list - attempt to update multicasts * @dev: 3c527 device to load the list on * @retry: indicates this is not the first call. * * * Actually set or clear the multicast filter for this adaptor. The * locking issues are handled by this routine. We have to track * state as it may take multiple calls to get the command sequence * completed. We just keep trying to schedule the loads until we * manage to process them all. * * num_addrs == -1 Promiscuous mode, receive all packets * * num_addrs == 0 Normal mode, clear multicast list * * num_addrs > 0 Multicast mode, receive normal and MC packets, * and do best-effort filtering. * * See mc32_update_stats() regards setting the SAV BP bit. * */ static void do_mc32_set_multicast_list(struct net_device *dev, int retry) { struct mc32_local *lp = netdev_priv(dev); u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */ if ((dev->flags&IFF_PROMISC) || (dev->flags&IFF_ALLMULTI) || dev->mc_count > 10) /* Enable promiscuous mode */ filt |= 1; else if(dev->mc_count) { unsigned char block[62]; unsigned char *bp; struct dev_mc_list *dmc=dev->mc_list; int i; if(retry==0) lp->mc_list_valid = 0; if(!lp->mc_list_valid) { block[1]=0; block[0]=dev->mc_count; bp=block+2; for(i=0;i<dev->mc_count;i++) { memcpy(bp, dmc->dmi_addr, 6); bp+=6; dmc=dmc->next; } if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1) { lp->mc_reload_wait = 1; return; } lp->mc_list_valid=1; } } if(mc32_command_nowait(dev, 0, &filt, 2)==-1) { lp->mc_reload_wait = 1; } else { lp->mc_reload_wait = 0; } } /** * mc32_set_multicast_list - queue multicast list update * @dev: The 3c527 to use * * Commence loading the multicast list. This is called when the kernel * changes the lists. It will override any pending list we are trying to * load. */ static void mc32_set_multicast_list(struct net_device *dev) { do_mc32_set_multicast_list(dev,0); } /** * mc32_reset_multicast_list - reset multicast list * @dev: The 3c527 to use * * Attempt the next step in loading the multicast lists. If this attempt * fails to complete then it will be scheduled and this function called * again later from elsewhere. */ static void mc32_reset_multicast_list(struct net_device *dev) { do_mc32_set_multicast_list(dev,1); } static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr); } static u32 netdev_get_msglevel(struct net_device *dev) { return mc32_debug; } static void netdev_set_msglevel(struct net_device *dev, u32 level) { mc32_debug = level; } static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, }; #ifdef MODULE static struct net_device *this_device; /** * init_module - entry point * * Probe and locate a 3c527 card. This really should probe and locate * all the 3c527 cards in the machine not just one of them. Yes you can * insmod multiple modules for now but it's a hack. */ int __init init_module(void) { this_device = mc32_probe(-1); if (IS_ERR(this_device)) return PTR_ERR(this_device); return 0; } /** * cleanup_module - free resources for an unload * * Unloading time. We release the MCA bus resources and the interrupt * at which point everything is ready to unload. The card must be stopped * at this point or we would not have been called. When we unload we * leave the card stopped but not totally shut down. When the card is * initialized it must be rebooted or the rings reloaded before any * transmit operations are allowed to start scribbling into memory. */ void __exit cleanup_module(void) { unregister_netdev(this_device); cleanup_card(this_device); free_netdev(this_device); } #endif /* MODULE */
gpl-2.0
danguria/linux-kernel-study
arch/arm/mach-omap1/board-nokia770.c
764
9781
/* * linux/arch/arm/mach-omap1/board-nokia770.c * * Modified from board-generic.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/clk.h> #include <linux/omapfb.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/gpio.h> #include <plat/mux.h> #include <plat/usb.h> #include <plat/board.h> #include <plat/keypad.h> #include <plat/common.h> #include <plat/dsp_common.h> #include <plat/hwa742.h> #include <plat/lcd_mipid.h> #include <plat/mmc.h> #include <plat/clock.h> #define ADS7846_PENDOWN_GPIO 15 static void __init omap_nokia770_init_irq(void) { /* On Nokia 770, the SleepX signal is masked with an * MPUIO line by default. It has to be unmasked for it * to become functional */ /* SleepX mask direction */ omap_writew((omap_readw(0xfffb5008) & ~2), 0xfffb5008); /* Unmask SleepX signal */ omap_writew((omap_readw(0xfffb5004) & ~2), 0xfffb5004); omap1_init_common_hw(); omap_init_irq(); } static int nokia770_keymap[] = { KEY(0, 1, GROUP_0 | KEY_UP), KEY(0, 2, GROUP_1 | KEY_F5), KEY(1, 0, GROUP_0 | KEY_LEFT), KEY(1, 1, GROUP_0 | KEY_ENTER), KEY(1, 2, GROUP_0 | KEY_RIGHT), KEY(2, 0, GROUP_1 | KEY_ESC), KEY(2, 1, GROUP_0 | KEY_DOWN), KEY(2, 2, GROUP_1 | KEY_F4), KEY(3, 0, GROUP_2 | KEY_F7), KEY(3, 1, GROUP_2 | KEY_F8), KEY(3, 2, GROUP_2 | KEY_F6), 0 }; static struct resource nokia770_kp_resources[] = { [0] = { .start = INT_KEYBOARD, .end = INT_KEYBOARD, .flags = IORESOURCE_IRQ, }, }; static struct omap_kp_platform_data nokia770_kp_data = { .rows = 8, .cols = 8, .keymap = nokia770_keymap, .keymapsize = ARRAY_SIZE(nokia770_keymap), .delay = 4, }; static struct platform_device nokia770_kp_device = { .name = "omap-keypad", .id = -1, .dev = { .platform_data = &nokia770_kp_data, }, .num_resources = ARRAY_SIZE(nokia770_kp_resources), .resource = nokia770_kp_resources, }; static struct platform_device *nokia770_devices[] __initdata = { &nokia770_kp_device, }; static void mipid_shutdown(struct mipid_platform_data *pdata) { if (pdata->nreset_gpio != -1) { printk(KERN_INFO "shutdown LCD\n"); gpio_set_value(pdata->nreset_gpio, 0); msleep(120); } } static struct mipid_platform_data nokia770_mipid_platform_data = { .shutdown = mipid_shutdown, }; static void mipid_dev_init(void) { const struct omap_lcd_config *conf; conf = omap_get_config(OMAP_TAG_LCD, struct omap_lcd_config); if (conf != NULL) { nokia770_mipid_platform_data.nreset_gpio = conf->nreset_gpio; nokia770_mipid_platform_data.data_lines = conf->data_lines; } } static void ads7846_dev_init(void) { if (gpio_request(ADS7846_PENDOWN_GPIO, "ADS7846 pendown") < 0) printk(KERN_ERR "can't get ads7846 pen down GPIO\n"); } static int ads7846_get_pendown_state(void) { return !gpio_get_value(ADS7846_PENDOWN_GPIO); } static struct ads7846_platform_data nokia770_ads7846_platform_data __initdata = { .x_max = 0x0fff, .y_max = 0x0fff, .x_plate_ohms = 180, .pressure_max = 255, .debounce_max = 10, .debounce_tol = 3, .debounce_rep = 1, .get_pendown_state = ads7846_get_pendown_state, }; static struct spi_board_info nokia770_spi_board_info[] __initdata = { [0] = { .modalias = "lcd_mipid", .bus_num = 2, .chip_select = 3, .max_speed_hz = 12000000, .platform_data = &nokia770_mipid_platform_data, }, [1] = { .modalias = "ads7846", .bus_num = 2, .chip_select = 0, .max_speed_hz = 2500000, .irq = OMAP_GPIO_IRQ(15), .platform_data = &nokia770_ads7846_platform_data, }, }; static struct hwa742_platform_data nokia770_hwa742_platform_data = { .te_connected = 1, }; static void hwa742_dev_init(void) { clk_add_alias("hwa_sys_ck", NULL, "bclk", NULL); omapfb_set_ctrl_platform_data(&nokia770_hwa742_platform_data); } /* assume no Mini-AB port */ static struct omap_usb_config nokia770_usb_config __initdata = { .otg = 1, .register_host = 1, .register_dev = 1, .hmc_mode = 16, .pins[0] = 6, }; #if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) #define NOKIA770_GPIO_MMC_POWER 41 #define NOKIA770_GPIO_MMC_SWITCH 23 static int nokia770_mmc_set_power(struct device *dev, int slot, int power_on, int vdd) { gpio_set_value(NOKIA770_GPIO_MMC_POWER, power_on); return 0; } static int nokia770_mmc_get_cover_state(struct device *dev, int slot) { return gpio_get_value(NOKIA770_GPIO_MMC_SWITCH); } static struct omap_mmc_platform_data nokia770_mmc2_data = { .nr_slots = 1, .dma_mask = 0xffffffff, .max_freq = 12000000, .slots[0] = { .set_power = nokia770_mmc_set_power, .get_cover_state = nokia770_mmc_get_cover_state, .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .name = "mmcblk", }, }; static struct omap_mmc_platform_data *nokia770_mmc_data[OMAP16XX_NR_MMC]; static void __init nokia770_mmc_init(void) { int ret; ret = gpio_request(NOKIA770_GPIO_MMC_POWER, "MMC power"); if (ret < 0) return; gpio_direction_output(NOKIA770_GPIO_MMC_POWER, 0); ret = gpio_request(NOKIA770_GPIO_MMC_SWITCH, "MMC cover"); if (ret < 0) { gpio_free(NOKIA770_GPIO_MMC_POWER); return; } gpio_direction_input(NOKIA770_GPIO_MMC_SWITCH); /* Only the second MMC controller is used */ nokia770_mmc_data[1] = &nokia770_mmc2_data; omap1_init_mmc(nokia770_mmc_data, OMAP16XX_NR_MMC); } #else static inline void nokia770_mmc_init(void) { } #endif #if defined(CONFIG_OMAP_DSP) /* * audio power control */ #define HEADPHONE_GPIO 14 #define AMPLIFIER_CTRL_GPIO 58 static struct clk *dspxor_ck; static DEFINE_MUTEX(audio_pwr_lock); /* * audio_pwr_state * +--+-------------------------+---------------------------------------+ * |-1|down |power-up request -> 0 | * +--+-------------------------+---------------------------------------+ * | 0|up |power-down(1) request -> 1 | * | | |power-down(2) request -> (ignore) | * +--+-------------------------+---------------------------------------+ * | 1|up, |power-up request -> 0 | * | |received down(1) request |power-down(2) request -> -1 | * +--+-------------------------+---------------------------------------+ */ static int audio_pwr_state = -1; static inline void aic23_power_up(void) { } static inline void aic23_power_down(void) { } /* * audio_pwr_up / down should be called under audio_pwr_lock */ static void nokia770_audio_pwr_up(void) { clk_enable(dspxor_ck); /* Turn on codec */ aic23_power_up(); if (gpio_get_value(HEADPHONE_GPIO)) /* HP not connected, turn on amplifier */ gpio_set_value(AMPLIFIER_CTRL_GPIO, 1); else /* HP connected, do not turn on amplifier */ printk("HP connected\n"); } static void codec_delayed_power_down(struct work_struct *work) { mutex_lock(&audio_pwr_lock); if (audio_pwr_state == -1) aic23_power_down(); clk_disable(dspxor_ck); mutex_unlock(&audio_pwr_lock); } static DECLARE_DELAYED_WORK(codec_power_down_work, codec_delayed_power_down); static void nokia770_audio_pwr_down(void) { /* Turn off amplifier */ gpio_set_value(AMPLIFIER_CTRL_GPIO, 0); /* Turn off codec: schedule delayed work */ schedule_delayed_work(&codec_power_down_work, HZ / 20); /* 50ms */ } static int nokia770_audio_pwr_up_request(struct dsp_kfunc_device *kdev, int stage) { mutex_lock(&audio_pwr_lock); if (audio_pwr_state == -1) nokia770_audio_pwr_up(); /* force audio_pwr_state = 0, even if it was 1. */ audio_pwr_state = 0; mutex_unlock(&audio_pwr_lock); return 0; } static int nokia770_audio_pwr_down_request(struct dsp_kfunc_device *kdev, int stage) { mutex_lock(&audio_pwr_lock); switch (stage) { case 1: if (audio_pwr_state == 0) audio_pwr_state = 1; break; case 2: if (audio_pwr_state == 1) { nokia770_audio_pwr_down(); audio_pwr_state = -1; } break; } mutex_unlock(&audio_pwr_lock); return 0; } static struct dsp_kfunc_device nokia770_audio_device = { .name = "audio", .type = DSP_KFUNC_DEV_TYPE_AUDIO, .enable = nokia770_audio_pwr_up_request, .disable = nokia770_audio_pwr_down_request, }; static __init int omap_dsp_init(void) { int ret; dspxor_ck = clk_get(0, "dspxor_ck"); if (IS_ERR(dspxor_ck)) { printk(KERN_ERR "couldn't acquire dspxor_ck\n"); return PTR_ERR(dspxor_ck); } ret = dsp_kfunc_device_register(&nokia770_audio_device); if (ret) { printk(KERN_ERR "KFUNC device registration faild: %s\n", nokia770_audio_device.name); goto out; } return 0; out: return ret; } #else #define omap_dsp_init() do {} while (0) #endif /* CONFIG_OMAP_DSP */ static void __init omap_nokia770_init(void) { platform_add_devices(nokia770_devices, ARRAY_SIZE(nokia770_devices)); spi_register_board_info(nokia770_spi_board_info, ARRAY_SIZE(nokia770_spi_board_info)); omap_gpio_init(); omap_serial_init(); omap_register_i2c_bus(1, 100, NULL, 0); omap_dsp_init(); hwa742_dev_init(); ads7846_dev_init(); mipid_dev_init(); omap_usb_init(&nokia770_usb_config); nokia770_mmc_init(); } static void __init omap_nokia770_map_io(void) { omap1_map_common_io(); } MACHINE_START(NOKIA770, "Nokia 770") .phys_io = 0xfff00000, .io_pg_offst = ((0xfef00000) >> 18) & 0xfffc, .boot_params = 0x10000100, .map_io = omap_nokia770_map_io, .init_irq = omap_nokia770_init_irq, .init_machine = omap_nokia770_init, .timer = &omap_timer, MACHINE_END
gpl-2.0
mdalexca/OP3
arch/tile/kernel/vdso/vgettimeofday.c
764
5102
/* * Copyright 2012 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #define VDSO_BUILD /* avoid some shift warnings for -m32 in <asm/page.h> */ #include <linux/time.h> #include <asm/timex.h> #include <asm/unistd.h> #include <asm/vdso.h> #if CHIP_HAS_SPLIT_CYCLE() static inline cycles_t get_cycles_inline(void) { unsigned int high = __insn_mfspr(SPR_CYCLE_HIGH); unsigned int low = __insn_mfspr(SPR_CYCLE_LOW); unsigned int high2 = __insn_mfspr(SPR_CYCLE_HIGH); while (unlikely(high != high2)) { low = __insn_mfspr(SPR_CYCLE_LOW); high = high2; high2 = __insn_mfspr(SPR_CYCLE_HIGH); } return (((cycles_t)high) << 32) | low; } #define get_cycles get_cycles_inline #endif struct syscall_return_value { long value; long error; }; /* * Find out the vDSO data page address in the process address space. */ inline unsigned long get_datapage(void) { unsigned long ret; /* vdso data page located in the 2nd vDSO page. */ asm volatile ("lnk %0" : "=r"(ret)); ret &= ~(PAGE_SIZE - 1); ret += PAGE_SIZE; return ret; } static inline u64 vgetsns(struct vdso_data *vdso) { return ((get_cycles() - vdso->cycle_last) & vdso->mask) * vdso->mult; } static inline int do_realtime(struct vdso_data *vdso, struct timespec *ts) { unsigned count; u64 ns; do { count = read_seqcount_begin(&vdso->tb_seq); ts->tv_sec = vdso->wall_time_sec; ns = vdso->wall_time_snsec; ns += vgetsns(vdso); ns >>= vdso->shift; } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count))); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); ts->tv_nsec = ns; return 0; } static inline int do_monotonic(struct vdso_data *vdso, struct timespec *ts) { unsigned count; u64 ns; do { count = read_seqcount_begin(&vdso->tb_seq); ts->tv_sec = vdso->monotonic_time_sec; ns = vdso->monotonic_time_snsec; ns += vgetsns(vdso); ns >>= vdso->shift; } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count))); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); ts->tv_nsec = ns; return 0; } static inline int do_realtime_coarse(struct vdso_data *vdso, struct timespec *ts) { unsigned count; do { count = read_seqcount_begin(&vdso->tb_seq); ts->tv_sec = vdso->wall_time_coarse_sec; ts->tv_nsec = vdso->wall_time_coarse_nsec; } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count))); return 0; } static inline int do_monotonic_coarse(struct vdso_data *vdso, struct timespec *ts) { unsigned count; do { count = read_seqcount_begin(&vdso->tb_seq); ts->tv_sec = vdso->monotonic_time_coarse_sec; ts->tv_nsec = vdso->monotonic_time_coarse_nsec; } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count))); return 0; } struct syscall_return_value __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) { struct syscall_return_value ret = { 0, 0 }; unsigned count; struct vdso_data *vdso = (struct vdso_data *)get_datapage(); /* The use of the timezone is obsolete, normally tz is NULL. */ if (unlikely(tz != NULL)) { do { count = read_seqcount_begin(&vdso->tz_seq); tz->tz_minuteswest = vdso->tz_minuteswest; tz->tz_dsttime = vdso->tz_dsttime; } while (unlikely(read_seqcount_retry(&vdso->tz_seq, count))); } if (unlikely(tv == NULL)) return ret; do_realtime(vdso, (struct timespec *)tv); tv->tv_usec /= 1000; return ret; } int gettimeofday(struct timeval *tv, struct timezone *tz) __attribute__((weak, alias("__vdso_gettimeofday"))); static struct syscall_return_value vdso_fallback_gettime(long clock, struct timespec *ts) { struct syscall_return_value ret; __asm__ __volatile__ ( "swint1" : "=R00" (ret.value), "=R01" (ret.error) : "R10" (__NR_clock_gettime), "R00" (clock), "R01" (ts) : "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "memory"); return ret; } struct syscall_return_value __vdso_clock_gettime(clockid_t clock, struct timespec *ts) { struct vdso_data *vdso = (struct vdso_data *)get_datapage(); struct syscall_return_value ret = { 0, 0 }; switch (clock) { case CLOCK_REALTIME: do_realtime(vdso, ts); return ret; case CLOCK_MONOTONIC: do_monotonic(vdso, ts); return ret; case CLOCK_REALTIME_COARSE: do_realtime_coarse(vdso, ts); return ret; case CLOCK_MONOTONIC_COARSE: do_monotonic_coarse(vdso, ts); return ret; default: return vdso_fallback_gettime(clock, ts); } } int clock_gettime(clockid_t clock, struct timespec *ts) __attribute__((weak, alias("__vdso_clock_gettime")));
gpl-2.0
STS-Dev-Team/kernel_kexec_modules
drivers/media/video/usbvideo/quickcam_messenger.c
764
27489
/* * Driver for Logitech Quickcam Messenger usb video camera * Copyright (C) Jaya Kumar * * This work was sponsored by CIS(M) Sdn Bhd. * History: * 05/08/2006 - Jaya Kumar * I wrote this based on the konicawc by Simon Evans. * - * Full credit for reverse engineering and creating an initial * working linux driver for the VV6422 goes to the qce-ga project by * Tuukka Toivonen, Jochen Hoenicke, Peter McConnell, * Cristiano De Michele, Georg Acher, Jean-Frederic Clere as well as * others. * --- * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/usb/input.h> #include <linux/slab.h> #include "usbvideo.h" #include "quickcam_messenger.h" /* * Version Information */ #ifdef CONFIG_USB_DEBUG static int debug; #define DEBUG(n, format, arg...) \ if (n <= debug) { \ printk(KERN_DEBUG __FILE__ ":%s(): " format "\n", __func__ , ## arg); \ } #else #define DEBUG(n, arg...) static const int debug; #endif #define DRIVER_VERSION "v0.01" #define DRIVER_DESC "Logitech Quickcam Messenger USB" #define USB_LOGITECH_VENDOR_ID 0x046D #define USB_QCM_PRODUCT_ID 0x08F0 #define MAX_CAMERAS 1 #define MAX_COLOUR 32768 #define MAX_HUE 32768 #define MAX_BRIGHTNESS 32768 #define MAX_CONTRAST 32768 #define MAX_WHITENESS 32768 static int size = SIZE_320X240; static int colour = MAX_COLOUR; static int hue = MAX_HUE; static int brightness = MAX_BRIGHTNESS; static int contrast = MAX_CONTRAST; static int whiteness = MAX_WHITENESS; static struct usbvideo *cams; static struct usb_device_id qcm_table [] = { { USB_DEVICE(USB_LOGITECH_VENDOR_ID, USB_QCM_PRODUCT_ID) }, { } }; MODULE_DEVICE_TABLE(usb, qcm_table); #ifdef CONFIG_INPUT static void qcm_register_input(struct qcm *cam, struct usb_device *dev) { struct input_dev *input_dev; int error; usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname)); strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname)); cam->input = input_dev = input_allocate_device(); if (!input_dev) { dev_warn(&dev->dev, "insufficient mem for cam input device\n"); return; } input_dev->name = "QCM button"; input_dev->phys = cam->input_physname; usb_to_input_id(dev, &input_dev->id); input_dev->dev.parent = &dev->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY); input_dev->keybit[BIT_WORD(KEY_CAMERA)] = BIT_MASK(KEY_CAMERA); error = input_register_device(cam->input); if (error) { dev_warn(&dev->dev, "Failed to register camera's input device, err: %d\n", error); input_free_device(cam->input); cam->input = NULL; } } static void qcm_unregister_input(struct qcm *cam) { if (cam->input) { input_unregister_device(cam->input); cam->input = NULL; } } static void qcm_report_buttonstat(struct qcm *cam) { if (cam->input) { input_report_key(cam->input, KEY_CAMERA, cam->button_sts); input_sync(cam->input); } } static void qcm_int_irq(struct urb *urb) { int ret; struct uvd *uvd = urb->context; struct qcm *cam; if (!CAMERA_IS_OPERATIONAL(uvd)) return; if (!uvd->streaming) return; uvd->stats.urb_count++; if (urb->status < 0) uvd->stats.iso_err_count++; else { if (urb->actual_length > 0 ) { cam = (struct qcm *) uvd->user_data; if (cam->button_sts_buf == 0x88) cam->button_sts = 0x0; else if (cam->button_sts_buf == 0x80) cam->button_sts = 0x1; qcm_report_buttonstat(cam); } } ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) err("usb_submit_urb error (%d)", ret); } static int qcm_setup_input_int(struct qcm *cam, struct uvd *uvd) { int errflag; usb_fill_int_urb(cam->button_urb, uvd->dev, usb_rcvintpipe(uvd->dev, uvd->video_endp + 1), &cam->button_sts_buf, 1, qcm_int_irq, uvd, 16); errflag = usb_submit_urb(cam->button_urb, GFP_KERNEL); if (errflag) err ("usb_submit_int ret %d", errflag); return errflag; } static void qcm_stop_int_data(struct qcm *cam) { usb_kill_urb(cam->button_urb); } static int qcm_alloc_int_urb(struct qcm *cam) { cam->button_urb = usb_alloc_urb(0, GFP_KERNEL); if (!cam->button_urb) return -ENOMEM; return 0; } static void qcm_free_int(struct qcm *cam) { usb_free_urb(cam->button_urb); } #endif /* CONFIG_INPUT */ static int qcm_stv_setb(struct usb_device *dev, u16 reg, u8 val) { int ret; /* we'll wait up to 3 slices but no more */ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x04, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, reg, 0, &val, 1, 3*HZ); return ret; } static int qcm_stv_setw(struct usb_device *dev, u16 reg, __le16 val) { int ret; /* we'll wait up to 3 slices but no more */ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x04, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, reg, 0, &val, 2, 3*HZ); return ret; } static int qcm_stv_getw(struct usb_device *dev, unsigned short reg, __le16 *val) { int ret; /* we'll wait up to 3 slices but no more */ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x04, USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, reg, 0, val, 2, 3*HZ); return ret; } static int qcm_camera_on(struct uvd *uvd) { int ret; CHECK_RET(ret, qcm_stv_setb(uvd->dev, STV_ISO_ENABLE, 0x01)); return 0; } static int qcm_camera_off(struct uvd *uvd) { int ret; CHECK_RET(ret, qcm_stv_setb(uvd->dev, STV_ISO_ENABLE, 0x00)); return 0; } static void qcm_hsv2rgb(u16 hue, u16 sat, u16 val, u16 *r, u16 *g, u16 *b) { unsigned int segment, valsat; signed int h = (signed int) hue; unsigned int s = (sat - 32768) * 2; /* rescale */ unsigned int v = val; unsigned int p; /* the registers controlling gain are 8 bit of which we affect only the last 4 bits with our gain. we know that if saturation is 0, (unsaturated) then we're grayscale (center axis of the colour cone) so we set rgb=value. we use a formula obtained from wikipedia to map the cone to the RGB plane. it's as follows for the human value case of h=0..360, s=0..1, v=0..1 h_i = h/60 % 6 , f = h/60 - h_i , p = v(1-s) q = v(1 - f*s) , t = v(1 - (1-f)s) h_i==0 => r=v , g=t, b=p h_i==1 => r=q , g=v, b=p h_i==2 => r=p , g=v, b=t h_i==3 => r=p , g=q, b=v h_i==4 => r=t , g=p, b=v h_i==5 => r=v , g=p, b=q the bottom side (the point) and the stuff just up of that is black so we simplify those two cases. */ if (sat < 32768) { /* anything less than this is unsaturated */ *r = val; *g = val; *b = val; return; } if (val <= (0xFFFF/8)) { /* anything less than this is black */ *r = 0; *g = 0; *b = 0; return; } /* the rest of this code is copying tukkat's implementation of the hsv2rgb conversion as taken from qc-usb-messenger code. the 10923 is 0xFFFF/6 to divide the cone into 6 sectors. */ segment = (h + 10923) & 0xFFFF; segment = segment*3 >> 16; /* 0..2: 0=R, 1=G, 2=B */ hue -= segment * 21845; /* -10923..10923 */ h = hue; h *= 3; valsat = v*s >> 16; /* 0..65534 */ p = v - valsat; if (h >= 0) { unsigned int t = v - (valsat * (32769 - h) >> 15); switch (segment) { case 0: /* R-> */ *r = v; *g = t; *b = p; break; case 1: /* G-> */ *r = p; *g = v; *b = t; break; case 2: /* B-> */ *r = t; *g = p; *b = v; break; } } else { unsigned int q = v - (valsat * (32769 + h) >> 15); switch (segment) { case 0: /* ->R */ *r = v; *g = p; *b = q; break; case 1: /* ->G */ *r = q; *g = v; *b = p; break; case 2: /* ->B */ *r = p; *g = q; *b = v; break; } } } static int qcm_sensor_set_gains(struct uvd *uvd, u16 hue, u16 saturation, u16 value) { int ret; u16 r=0,g=0,b=0; /* this code is based on qc-usb-messenger */ qcm_hsv2rgb(hue, saturation, value, &r, &g, &b); r >>= 12; g >>= 12; b >>= 12; /* min val is 8 */ r = max((u16) 8, r); g = max((u16) 8, g); b = max((u16) 8, b); r |= 0x30; g |= 0x30; b |= 0x30; /* set the r,g,b gain registers */ CHECK_RET(ret, qcm_stv_setb(uvd->dev, 0x0509, r)); CHECK_RET(ret, qcm_stv_setb(uvd->dev, 0x050A, g)); CHECK_RET(ret, qcm_stv_setb(uvd->dev, 0x050B, b)); /* doing as qc-usb did */ CHECK_RET(ret, qcm_stv_setb(uvd->dev, 0x050C, 0x2A)); CHECK_RET(ret, qcm_stv_setb(uvd->dev, 0x050D, 0x01)); CHECK_RET(ret, qcm_stv_setb(uvd->dev, 0x143F, 0x01)); return 0; } static int qcm_sensor_set_exposure(struct uvd *uvd, int exposure) { int ret; int formedval; /* calculation was from qc-usb-messenger driver */ formedval = ( exposure >> 12 ); /* max value for formedval is 14 */ formedval = min(formedval, 14); CHECK_RET(ret, qcm_stv_setb(uvd->dev, 0x143A, 0xF0 | formedval)); CHECK_RET(ret, qcm_stv_setb(uvd->dev, 0x143F, 0x01)); return 0; } static int qcm_sensor_setlevels(struct uvd *uvd, int brightness, int contrast, int hue, int colour) { int ret; /* brightness is exposure, contrast is gain, colour is saturation */ CHECK_RET(ret, qcm_sensor_set_exposure(uvd, brightness)); CHECK_RET(ret, qcm_sensor_set_gains(uvd, hue, colour, contrast)); return 0; } static int qcm_sensor_setsize(struct uvd *uvd, u8 size) { int ret; CHECK_RET(ret, qcm_stv_setb(uvd->dev, 0x1505, size)); return 0; } static int qcm_sensor_set_shutter(struct uvd *uvd, int whiteness) { int ret; /* some rescaling as done by the qc-usb-messenger code */ if (whiteness > 0xC000) whiteness = 0xC000 + (whiteness & 0x3FFF)*8; CHECK_RET(ret, qcm_stv_setb(uvd->dev, 0x143D, (whiteness >> 8) & 0xFF)); CHECK_RET(ret, qcm_stv_setb(uvd->dev, 0x143E, (whiteness >> 16) & 0x03)); CHECK_RET(ret, qcm_stv_setb(uvd->dev, 0x143F, 0x01)); return 0; } static int qcm_sensor_init(struct uvd *uvd) { struct qcm *cam = (struct qcm *) uvd->user_data; int ret; int i; for (i=0; i < ARRAY_SIZE(regval_table) ; i++) { CHECK_RET(ret, qcm_stv_setb(uvd->dev, regval_table[i].reg, regval_table[i].val)); } CHECK_RET(ret, qcm_stv_setw(uvd->dev, 0x15c1, cpu_to_le16(ISOC_PACKET_SIZE))); CHECK_RET(ret, qcm_stv_setb(uvd->dev, 0x15c3, 0x08)); CHECK_RET(ret, qcm_stv_setb(uvd->dev, 0x143f, 0x01)); CHECK_RET(ret, qcm_stv_setb(uvd->dev, STV_ISO_ENABLE, 0x00)); CHECK_RET(ret, qcm_sensor_setsize(uvd, camera_sizes[cam->size].cmd)); CHECK_RET(ret, qcm_sensor_setlevels(uvd, uvd->vpic.brightness, uvd->vpic.contrast, uvd->vpic.hue, uvd->vpic.colour)); CHECK_RET(ret, qcm_sensor_set_shutter(uvd, uvd->vpic.whiteness)); CHECK_RET(ret, qcm_sensor_setsize(uvd, camera_sizes[cam->size].cmd)); return 0; } static int qcm_set_camera_size(struct uvd *uvd) { int ret; struct qcm *cam = (struct qcm *) uvd->user_data; CHECK_RET(ret, qcm_sensor_setsize(uvd, camera_sizes[cam->size].cmd)); cam->width = camera_sizes[cam->size].width; cam->height = camera_sizes[cam->size].height; uvd->videosize = VIDEOSIZE(cam->width, cam->height); return 0; } static int qcm_setup_on_open(struct uvd *uvd) { int ret; CHECK_RET(ret, qcm_sensor_set_gains(uvd, uvd->vpic.hue, uvd->vpic.colour, uvd->vpic.contrast)); CHECK_RET(ret, qcm_sensor_set_exposure(uvd, uvd->vpic.brightness)); CHECK_RET(ret, qcm_sensor_set_shutter(uvd, uvd->vpic.whiteness)); CHECK_RET(ret, qcm_set_camera_size(uvd)); CHECK_RET(ret, qcm_camera_on(uvd)); return 0; } static void qcm_adjust_picture(struct uvd *uvd) { int ret; struct qcm *cam = (struct qcm *) uvd->user_data; ret = qcm_camera_off(uvd); if (ret) { err("can't turn camera off. abandoning pic adjustment"); return; } /* if there's been a change in contrast, hue, or colour then we need to recalculate hsv in order to update gains */ if ((cam->contrast != uvd->vpic.contrast) || (cam->hue != uvd->vpic.hue) || (cam->colour != uvd->vpic.colour)) { cam->contrast = uvd->vpic.contrast; cam->hue = uvd->vpic.hue; cam->colour = uvd->vpic.colour; ret = qcm_sensor_set_gains(uvd, cam->hue, cam->colour, cam->contrast); if (ret) { err("can't set gains. abandoning pic adjustment"); return; } } if (cam->brightness != uvd->vpic.brightness) { cam->brightness = uvd->vpic.brightness; ret = qcm_sensor_set_exposure(uvd, cam->brightness); if (ret) { err("can't set exposure. abandoning pic adjustment"); return; } } if (cam->whiteness != uvd->vpic.whiteness) { cam->whiteness = uvd->vpic.whiteness; qcm_sensor_set_shutter(uvd, cam->whiteness); if (ret) { err("can't set shutter. abandoning pic adjustment"); return; } } ret = qcm_camera_on(uvd); if (ret) { err("can't reenable camera. pic adjustment failed"); return; } } static int qcm_process_frame(struct uvd *uvd, u8 *cdata, int framelen) { int datalen; int totaldata; struct framehdr { __be16 id; __be16 len; }; struct framehdr *fhdr; totaldata = 0; while (framelen) { fhdr = (struct framehdr *) cdata; datalen = be16_to_cpu(fhdr->len); framelen -= 4; cdata += 4; if ((fhdr->id) == cpu_to_be16(0x8001)) { RingQueue_Enqueue(&uvd->dp, marker, 4); totaldata += 4; continue; } if ((fhdr->id & cpu_to_be16(0xFF00)) == cpu_to_be16(0x0200)) { RingQueue_Enqueue(&uvd->dp, cdata, datalen); totaldata += datalen; } framelen -= datalen; cdata += datalen; } return totaldata; } static int qcm_compress_iso(struct uvd *uvd, struct urb *dataurb) { int totlen; int i; unsigned char *cdata; totlen=0; for (i = 0; i < dataurb->number_of_packets; i++) { int n = dataurb->iso_frame_desc[i].actual_length; int st = dataurb->iso_frame_desc[i].status; cdata = dataurb->transfer_buffer + dataurb->iso_frame_desc[i].offset; if (st < 0) { dev_warn(&uvd->dev->dev, "Data error: packet=%d. len=%d. status=%d.\n", i, n, st); uvd->stats.iso_err_count++; continue; } if (!n) continue; totlen += qcm_process_frame(uvd, cdata, n); } return totlen; } static void resubmit_urb(struct uvd *uvd, struct urb *urb) { int ret; urb->dev = uvd->dev; ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) err("usb_submit_urb error (%d)", ret); } static void qcm_isoc_irq(struct urb *urb) { int len; struct uvd *uvd = urb->context; if (!CAMERA_IS_OPERATIONAL(uvd)) return; if (!uvd->streaming) return; uvd->stats.urb_count++; if (!urb->actual_length) { resubmit_urb(uvd, urb); return; } len = qcm_compress_iso(uvd, urb); resubmit_urb(uvd, urb); uvd->stats.urb_length = len; uvd->stats.data_count += len; if (len) RingQueue_WakeUpInterruptible(&uvd->dp); } static int qcm_start_data(struct uvd *uvd) { struct qcm *cam = (struct qcm *) uvd->user_data; int i; int errflag; int pktsz; int err; pktsz = uvd->iso_packet_len; if (!CAMERA_IS_OPERATIONAL(uvd)) { err("Camera is not operational"); return -EFAULT; } err = usb_set_interface(uvd->dev, uvd->iface, uvd->ifaceAltActive); if (err < 0) { err("usb_set_interface error"); uvd->last_error = err; return -EBUSY; } for (i=0; i < USBVIDEO_NUMSBUF; i++) { int j, k; struct urb *urb = uvd->sbuf[i].urb; urb->dev = uvd->dev; urb->context = uvd; urb->pipe = usb_rcvisocpipe(uvd->dev, uvd->video_endp); urb->interval = 1; urb->transfer_flags = URB_ISO_ASAP; urb->transfer_buffer = uvd->sbuf[i].data; urb->complete = qcm_isoc_irq; urb->number_of_packets = FRAMES_PER_DESC; urb->transfer_buffer_length = pktsz * FRAMES_PER_DESC; for (j=k=0; j < FRAMES_PER_DESC; j++, k += pktsz) { urb->iso_frame_desc[j].offset = k; urb->iso_frame_desc[j].length = pktsz; } } uvd->streaming = 1; uvd->curframe = -1; for (i=0; i < USBVIDEO_NUMSBUF; i++) { errflag = usb_submit_urb(uvd->sbuf[i].urb, GFP_KERNEL); if (errflag) err ("usb_submit_isoc(%d) ret %d", i, errflag); } CHECK_RET(err, qcm_setup_input_int(cam, uvd)); CHECK_RET(err, qcm_camera_on(uvd)); return 0; } static void qcm_stop_data(struct uvd *uvd) { struct qcm *cam; int i, j; int ret; if ((uvd == NULL) || (!uvd->streaming) || (uvd->dev == NULL)) return; cam = (struct qcm *) uvd->user_data; ret = qcm_camera_off(uvd); if (ret) dev_warn(&uvd->dev->dev, "couldn't turn the cam off.\n"); uvd->streaming = 0; /* Unschedule all of the iso td's */ for (i=0; i < USBVIDEO_NUMSBUF; i++) usb_kill_urb(uvd->sbuf[i].urb); qcm_stop_int_data(cam); if (!uvd->remove_pending) { /* Set packet size to 0 */ j = usb_set_interface(uvd->dev, uvd->iface, uvd->ifaceAltInactive); if (j < 0) { err("usb_set_interface() error %d.", j); uvd->last_error = j; } } } static void qcm_process_isoc(struct uvd *uvd, struct usbvideo_frame *frame) { struct qcm *cam = (struct qcm *) uvd->user_data; int x; struct rgb *rgbL0; struct rgb *rgbL1; struct bayL0 *bayL0; struct bayL1 *bayL1; int hor,ver,hordel,verdel; assert(frame != NULL); switch (cam->size) { case SIZE_160X120: hor = 162; ver = 124; hordel = 1; verdel = 2; break; case SIZE_320X240: default: hor = 324; ver = 248; hordel = 2; verdel = 4; break; } if (frame->scanstate == ScanState_Scanning) { while (RingQueue_GetLength(&uvd->dp) >= 4 + (hor*verdel + hordel)) { if ((RING_QUEUE_PEEK(&uvd->dp, 0) == 0x00) && (RING_QUEUE_PEEK(&uvd->dp, 1) == 0xff) && (RING_QUEUE_PEEK(&uvd->dp, 2) == 0x00) && (RING_QUEUE_PEEK(&uvd->dp, 3) == 0xff)) { frame->curline = 0; frame->scanstate = ScanState_Lines; frame->frameState = FrameState_Grabbing; RING_QUEUE_DEQUEUE_BYTES(&uvd->dp, 4); /* * if we're starting, we need to discard the first * 4 lines of y bayer data * and the first 2 gr elements of x bayer data */ RING_QUEUE_DEQUEUE_BYTES(&uvd->dp, (hor*verdel + hordel)); break; } RING_QUEUE_DEQUEUE_BYTES(&uvd->dp, 1); } } if (frame->scanstate == ScanState_Scanning) return; /* now we can start processing bayer data so long as we have at least * 2 lines worth of data. this is the simplest demosaicing method that * I could think of. I use each 2x2 bayer element without interpolation * to generate 4 rgb pixels. */ while ( frame->curline < cam->height && (RingQueue_GetLength(&uvd->dp) >= hor*2)) { /* get 2 lines of bayer for demosaicing * into 2 lines of RGB */ RingQueue_Dequeue(&uvd->dp, cam->scratch, hor*2); bayL0 = (struct bayL0 *) cam->scratch; bayL1 = (struct bayL1 *) (cam->scratch + hor); /* frame->curline is the rgb y line */ rgbL0 = (struct rgb *) ( frame->data + (cam->width*3*frame->curline)); /* w/2 because we're already doing 2 pixels */ rgbL1 = rgbL0 + (cam->width/2); for (x=0; x < cam->width; x+=2) { rgbL0->r = bayL0->r; rgbL0->g = bayL0->g; rgbL0->b = bayL1->b; rgbL0->r2 = bayL0->r; rgbL0->g2 = bayL1->g; rgbL0->b2 = bayL1->b; rgbL1->r = bayL0->r; rgbL1->g = bayL1->g; rgbL1->b = bayL1->b; rgbL1->r2 = bayL0->r; rgbL1->g2 = bayL1->g; rgbL1->b2 = bayL1->b; rgbL0++; rgbL1++; bayL0++; bayL1++; } frame->seqRead_Length += cam->width*3*2; frame->curline += 2; } /* See if we filled the frame */ if (frame->curline == cam->height) { frame->frameState = FrameState_Done_Hold; frame->curline = 0; uvd->curframe = -1; uvd->stats.frame_num++; } } /* taken from konicawc */ static int qcm_set_video_mode(struct uvd *uvd, struct video_window *vw) { int ret; int newsize; int oldsize; int x = vw->width; int y = vw->height; struct qcm *cam = (struct qcm *) uvd->user_data; if (x > 0 && y > 0) { DEBUG(2, "trying to find size %d,%d", x, y); for (newsize = 0; newsize <= MAX_FRAME_SIZE; newsize++) { if ((camera_sizes[newsize].width == x) && (camera_sizes[newsize].height == y)) break; } } else newsize = cam->size; if (newsize > MAX_FRAME_SIZE) { DEBUG(1, "couldn't find size %d,%d", x, y); return -EINVAL; } if (newsize == cam->size) { DEBUG(1, "Nothing to do"); return 0; } qcm_stop_data(uvd); if (cam->size != newsize) { oldsize = cam->size; cam->size = newsize; ret = qcm_set_camera_size(uvd); if (ret) { err("Couldn't set camera size, err=%d",ret); /* restore the original size */ cam->size = oldsize; return ret; } } /* Flush the input queue and clear any current frame in progress */ RingQueue_Flush(&uvd->dp); if (uvd->curframe != -1) { uvd->frame[uvd->curframe].curline = 0; uvd->frame[uvd->curframe].seqRead_Length = 0; uvd->frame[uvd->curframe].seqRead_Index = 0; } CHECK_RET(ret, qcm_start_data(uvd)); return 0; } static int qcm_configure_video(struct uvd *uvd) { int ret; memset(&uvd->vpic, 0, sizeof(uvd->vpic)); memset(&uvd->vpic_old, 0x55, sizeof(uvd->vpic_old)); uvd->vpic.colour = colour; uvd->vpic.hue = hue; uvd->vpic.brightness = brightness; uvd->vpic.contrast = contrast; uvd->vpic.whiteness = whiteness; uvd->vpic.depth = 24; uvd->vpic.palette = VIDEO_PALETTE_RGB24; memset(&uvd->vcap, 0, sizeof(uvd->vcap)); strcpy(uvd->vcap.name, "QCM USB Camera"); uvd->vcap.type = VID_TYPE_CAPTURE; uvd->vcap.channels = 1; uvd->vcap.audios = 0; uvd->vcap.minwidth = camera_sizes[SIZE_160X120].width; uvd->vcap.minheight = camera_sizes[SIZE_160X120].height; uvd->vcap.maxwidth = camera_sizes[SIZE_320X240].width; uvd->vcap.maxheight = camera_sizes[SIZE_320X240].height; memset(&uvd->vchan, 0, sizeof(uvd->vchan)); uvd->vchan.flags = 0 ; uvd->vchan.tuners = 0; uvd->vchan.channel = 0; uvd->vchan.type = VIDEO_TYPE_CAMERA; strcpy(uvd->vchan.name, "Camera"); CHECK_RET(ret, qcm_sensor_init(uvd)); return 0; } static int qcm_probe(struct usb_interface *intf, const struct usb_device_id *devid) { int err; struct uvd *uvd; struct usb_device *dev = interface_to_usbdev(intf); struct qcm *cam; size_t buffer_size; unsigned char video_ep; struct usb_host_interface *interface; struct usb_endpoint_descriptor *endpoint; int i,j; unsigned int ifacenum, ifacenum_inact=0; __le16 sensor_id; /* we don't support multiconfig cams */ if (dev->descriptor.bNumConfigurations != 1) return -ENODEV; /* first check for the video interface and not * the audio interface */ interface = &intf->cur_altsetting[0]; if ((interface->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) || (interface->desc.bInterfaceSubClass != USB_CLASS_VENDOR_SPEC)) return -ENODEV; /* walk through each endpoint in each setting in the interface stop when we find the one that's an isochronous IN endpoint. */ for (i=0; i < intf->num_altsetting; i++) { interface = &intf->cur_altsetting[i]; ifacenum = interface->desc.bAlternateSetting; /* walk the end points */ for (j=0; j < interface->desc.bNumEndpoints; j++) { endpoint = &interface->endpoint[j].desc; if (usb_endpoint_dir_out(endpoint)) continue; /* not input then not good */ buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); if (!buffer_size) { ifacenum_inact = ifacenum; continue; /* 0 pkt size is not what we want */ } if (usb_endpoint_xfer_isoc(endpoint)) { video_ep = endpoint->bEndpointAddress; /* break out of the search */ goto good_videoep; } } } /* failed out since nothing useful was found */ err("No suitable endpoint was found\n"); return -ENODEV; good_videoep: /* disable isochronous stream before doing anything else */ err = qcm_stv_setb(dev, STV_ISO_ENABLE, 0); if (err < 0) { err("Failed to disable sensor stream"); return -EIO; } /* Check that this is the same unknown sensor that is known to work. This sensor is suspected to be the ST VV6422C001. I'll check the same value that the qc-usb driver checks. This value is probably not even the sensor ID since it matches the USB dev ID. Oh well. If it doesn't match, it's probably a diff sensor so exit and apologize. */ err = qcm_stv_getw(dev, CMOS_SENSOR_IDREV, &sensor_id); if (err < 0) { err("Couldn't read sensor values. Err %d\n",err); return err; } if (sensor_id != cpu_to_le16(0x08F0)) { err("Sensor ID %x != %x. Unsupported. Sorry\n", le16_to_cpu(sensor_id), (0x08F0)); return -ENODEV; } uvd = usbvideo_AllocateDevice(cams); if (!uvd) return -ENOMEM; cam = (struct qcm *) uvd->user_data; /* buf for doing demosaicing */ cam->scratch = kmalloc(324*2, GFP_KERNEL); if (!cam->scratch) /* uvd freed in dereg */ return -ENOMEM; /* yes, if we fail after here, cam->scratch gets freed by qcm_free_uvd */ err = qcm_alloc_int_urb(cam); if (err < 0) return err; /* yes, if we fail after here, int urb gets freed by qcm_free_uvd */ RESTRICT_TO_RANGE(size, SIZE_160X120, SIZE_320X240); cam->width = camera_sizes[size].width; cam->height = camera_sizes[size].height; cam->size = size; uvd->debug = debug; uvd->flags = 0; uvd->dev = dev; uvd->iface = intf->altsetting->desc.bInterfaceNumber; uvd->ifaceAltActive = ifacenum; uvd->ifaceAltInactive = ifacenum_inact; uvd->video_endp = video_ep; uvd->iso_packet_len = buffer_size; uvd->paletteBits = 1L << VIDEO_PALETTE_RGB24; uvd->defaultPalette = VIDEO_PALETTE_RGB24; uvd->canvas = VIDEOSIZE(320, 240); uvd->videosize = VIDEOSIZE(cam->width, cam->height); err = qcm_configure_video(uvd); if (err) { err("failed to configure video settings"); return err; } err = usbvideo_RegisterVideoDevice(uvd); if (err) { /* the uvd gets freed in Deregister */ err("usbvideo_RegisterVideoDevice() failed."); return err; } uvd->max_frame_size = (320 * 240 * 3); qcm_register_input(cam, dev); usb_set_intfdata(intf, uvd); return 0; } static void qcm_free_uvd(struct uvd *uvd) { struct qcm *cam = (struct qcm *) uvd->user_data; kfree(cam->scratch); qcm_unregister_input(cam); qcm_free_int(cam); } static struct usbvideo_cb qcm_driver = { .probe = qcm_probe, .setupOnOpen = qcm_setup_on_open, .processData = qcm_process_isoc, .setVideoMode = qcm_set_video_mode, .startDataPump = qcm_start_data, .stopDataPump = qcm_stop_data, .adjustPicture = qcm_adjust_picture, .userFree = qcm_free_uvd }; static int __init qcm_init(void) { printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); return usbvideo_register( &cams, MAX_CAMERAS, sizeof(struct qcm), "QCM", &qcm_driver, THIS_MODULE, qcm_table); } static void __exit qcm_exit(void) { usbvideo_Deregister(&cams); } module_param(size, int, 0); MODULE_PARM_DESC(size, "Initial Size 0: 160x120 1: 320x240"); module_param(colour, int, 0); MODULE_PARM_DESC(colour, "Initial colour"); module_param(hue, int, 0); MODULE_PARM_DESC(hue, "Initial hue"); module_param(brightness, int, 0); MODULE_PARM_DESC(brightness, "Initial brightness"); module_param(contrast, int, 0); MODULE_PARM_DESC(contrast, "Initial contrast"); module_param(whiteness, int, 0); MODULE_PARM_DESC(whiteness, "Initial whiteness"); #ifdef CONFIG_USB_DEBUG module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug level: 0-9 (default=0)"); #endif module_init(qcm_init); module_exit(qcm_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jaya Kumar"); MODULE_DESCRIPTION("QCM USB Camera"); MODULE_SUPPORTED_DEVICE("QCM USB Camera");
gpl-2.0
tomdean1/linux
drivers/input/touchscreen/cyttsp_core.c
1020
14351
/* * Core Source for: * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers. * For use with Cypress Txx3xx parts. * Supported parts include: * CY8CTST341 * CY8CTMA340 * * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc. * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2, and only version 2, as published by the * Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Contact Cypress Semiconductor at www.cypress.com <kev@cypress.com> * */ #include <linux/delay.h> #include <linux/input.h> #include <linux/input/mt.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/slab.h> #include "cyttsp_core.h" /* Bootloader number of command keys */ #define CY_NUM_BL_KEYS 8 /* helpers */ #define GET_NUM_TOUCHES(x) ((x) & 0x0F) #define IS_LARGE_AREA(x) (((x) & 0x10) >> 4) #define IS_BAD_PKT(x) ((x) & 0x20) #define IS_VALID_APP(x) ((x) & 0x01) #define IS_OPERATIONAL_ERR(x) ((x) & 0x3F) #define GET_HSTMODE(reg) (((reg) & 0x70) >> 4) #define GET_BOOTLOADERMODE(reg) (((reg) & 0x10) >> 4) #define CY_REG_BASE 0x00 #define CY_REG_ACT_DIST 0x1E #define CY_REG_ACT_INTRVL 0x1D #define CY_REG_TCH_TMOUT (CY_REG_ACT_INTRVL + 1) #define CY_REG_LP_INTRVL (CY_REG_TCH_TMOUT + 1) #define CY_MAXZ 255 #define CY_DELAY_DFLT 20 /* ms */ #define CY_DELAY_MAX 500 #define CY_ACT_DIST_DFLT 0xF8 #define CY_HNDSHK_BIT 0x80 /* device mode bits */ #define CY_OPERATE_MODE 0x00 #define CY_SYSINFO_MODE 0x10 /* power mode select bits */ #define CY_SOFT_RESET_MODE 0x01 /* return to Bootloader mode */ #define CY_DEEP_SLEEP_MODE 0x02 #define CY_LOW_POWER_MODE 0x04 /* Slots management */ #define CY_MAX_FINGER 4 #define CY_MAX_ID 16 static const u8 bl_command[] = { 0x00, /* file offset */ 0xFF, /* command */ 0xA5, /* exit bootloader command */ 0, 1, 2, 3, 4, 5, 6, 7 /* default keys */ }; static int ttsp_read_block_data(struct cyttsp *ts, u8 command, u8 length, void *buf) { int error; int tries; for (tries = 0; tries < CY_NUM_RETRY; tries++) { error = ts->bus_ops->read(ts->dev, ts->xfer_buf, command, length, buf); if (!error) return 0; msleep(CY_DELAY_DFLT); } return -EIO; } static int ttsp_write_block_data(struct cyttsp *ts, u8 command, u8 length, void *buf) { int error; int tries; for (tries = 0; tries < CY_NUM_RETRY; tries++) { error = ts->bus_ops->write(ts->dev, ts->xfer_buf, command, length, buf); if (!error) return 0; msleep(CY_DELAY_DFLT); } return -EIO; } static int ttsp_send_command(struct cyttsp *ts, u8 cmd) { return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd); } static int cyttsp_handshake(struct cyttsp *ts) { if (ts->pdata->use_hndshk) return ttsp_send_command(ts, ts->xy_data.hst_mode ^ CY_HNDSHK_BIT); return 0; } static int cyttsp_load_bl_regs(struct cyttsp *ts) { memset(&ts->bl_data, 0, sizeof(ts->bl_data)); ts->bl_data.bl_status = 0x10; return ttsp_read_block_data(ts, CY_REG_BASE, sizeof(ts->bl_data), &ts->bl_data); } static int cyttsp_exit_bl_mode(struct cyttsp *ts) { int error; u8 bl_cmd[sizeof(bl_command)]; memcpy(bl_cmd, bl_command, sizeof(bl_command)); if (ts->pdata->bl_keys) memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS], ts->pdata->bl_keys, CY_NUM_BL_KEYS); error = ttsp_write_block_data(ts, CY_REG_BASE, sizeof(bl_cmd), bl_cmd); if (error) return error; /* wait for TTSP Device to complete the operation */ msleep(CY_DELAY_DFLT); error = cyttsp_load_bl_regs(ts); if (error) return error; if (GET_BOOTLOADERMODE(ts->bl_data.bl_status)) return -EIO; return 0; } static int cyttsp_set_operational_mode(struct cyttsp *ts) { int error; error = ttsp_send_command(ts, CY_OPERATE_MODE); if (error) return error; /* wait for TTSP Device to complete switch to Operational mode */ error = ttsp_read_block_data(ts, CY_REG_BASE, sizeof(ts->xy_data), &ts->xy_data); if (error) return error; error = cyttsp_handshake(ts); if (error) return error; return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0; } static int cyttsp_set_sysinfo_mode(struct cyttsp *ts) { int error; memset(&ts->sysinfo_data, 0, sizeof(ts->sysinfo_data)); /* switch to sysinfo mode */ error = ttsp_send_command(ts, CY_SYSINFO_MODE); if (error) return error; /* read sysinfo registers */ msleep(CY_DELAY_DFLT); error = ttsp_read_block_data(ts, CY_REG_BASE, sizeof(ts->sysinfo_data), &ts->sysinfo_data); if (error) return error; error = cyttsp_handshake(ts); if (error) return error; if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl) return -EIO; return 0; } static int cyttsp_set_sysinfo_regs(struct cyttsp *ts) { int retval = 0; if (ts->pdata->act_intrvl != CY_ACT_INTRVL_DFLT || ts->pdata->tch_tmout != CY_TCH_TMOUT_DFLT || ts->pdata->lp_intrvl != CY_LP_INTRVL_DFLT) { u8 intrvl_ray[] = { ts->pdata->act_intrvl, ts->pdata->tch_tmout, ts->pdata->lp_intrvl }; /* set intrvl registers */ retval = ttsp_write_block_data(ts, CY_REG_ACT_INTRVL, sizeof(intrvl_ray), intrvl_ray); msleep(CY_DELAY_DFLT); } return retval; } static int cyttsp_soft_reset(struct cyttsp *ts) { unsigned long timeout; int retval; /* wait for interrupt to set ready completion */ reinit_completion(&ts->bl_ready); ts->state = CY_BL_STATE; enable_irq(ts->irq); retval = ttsp_send_command(ts, CY_SOFT_RESET_MODE); if (retval) goto out; timeout = wait_for_completion_timeout(&ts->bl_ready, msecs_to_jiffies(CY_DELAY_DFLT * CY_DELAY_MAX)); retval = timeout ? 0 : -EIO; out: ts->state = CY_IDLE_STATE; disable_irq(ts->irq); return retval; } static int cyttsp_act_dist_setup(struct cyttsp *ts) { u8 act_dist_setup = ts->pdata->act_dist; /* Init gesture; active distance setup */ return ttsp_write_block_data(ts, CY_REG_ACT_DIST, sizeof(act_dist_setup), &act_dist_setup); } static void cyttsp_extract_track_ids(struct cyttsp_xydata *xy_data, int *ids) { ids[0] = xy_data->touch12_id >> 4; ids[1] = xy_data->touch12_id & 0xF; ids[2] = xy_data->touch34_id >> 4; ids[3] = xy_data->touch34_id & 0xF; } static const struct cyttsp_tch *cyttsp_get_tch(struct cyttsp_xydata *xy_data, int idx) { switch (idx) { case 0: return &xy_data->tch1; case 1: return &xy_data->tch2; case 2: return &xy_data->tch3; case 3: return &xy_data->tch4; default: return NULL; } } static void cyttsp_report_tchdata(struct cyttsp *ts) { struct cyttsp_xydata *xy_data = &ts->xy_data; struct input_dev *input = ts->input; int num_tch = GET_NUM_TOUCHES(xy_data->tt_stat); const struct cyttsp_tch *tch; int ids[CY_MAX_ID]; int i; DECLARE_BITMAP(used, CY_MAX_ID); if (IS_LARGE_AREA(xy_data->tt_stat) == 1) { /* terminate all active tracks */ num_tch = 0; dev_dbg(ts->dev, "%s: Large area detected\n", __func__); } else if (num_tch > CY_MAX_FINGER) { /* terminate all active tracks */ num_tch = 0; dev_dbg(ts->dev, "%s: Num touch error detected\n", __func__); } else if (IS_BAD_PKT(xy_data->tt_mode)) { /* terminate all active tracks */ num_tch = 0; dev_dbg(ts->dev, "%s: Invalid buffer detected\n", __func__); } cyttsp_extract_track_ids(xy_data, ids); bitmap_zero(used, CY_MAX_ID); for (i = 0; i < num_tch; i++) { tch = cyttsp_get_tch(xy_data, i); input_mt_slot(input, ids[i]); input_mt_report_slot_state(input, MT_TOOL_FINGER, true); input_report_abs(input, ABS_MT_POSITION_X, be16_to_cpu(tch->x)); input_report_abs(input, ABS_MT_POSITION_Y, be16_to_cpu(tch->y)); input_report_abs(input, ABS_MT_TOUCH_MAJOR, tch->z); __set_bit(ids[i], used); } for (i = 0; i < CY_MAX_ID; i++) { if (test_bit(i, used)) continue; input_mt_slot(input, i); input_mt_report_slot_state(input, MT_TOOL_FINGER, false); } input_sync(input); } static irqreturn_t cyttsp_irq(int irq, void *handle) { struct cyttsp *ts = handle; int error; if (unlikely(ts->state == CY_BL_STATE)) { complete(&ts->bl_ready); goto out; } /* Get touch data from CYTTSP device */ error = ttsp_read_block_data(ts, CY_REG_BASE, sizeof(struct cyttsp_xydata), &ts->xy_data); if (error) goto out; /* provide flow control handshake */ error = cyttsp_handshake(ts); if (error) goto out; if (unlikely(ts->state == CY_IDLE_STATE)) goto out; if (GET_BOOTLOADERMODE(ts->xy_data.tt_mode)) { /* * TTSP device has reset back to bootloader mode. * Restore to operational mode. */ error = cyttsp_exit_bl_mode(ts); if (error) { dev_err(ts->dev, "Could not return to operational mode, err: %d\n", error); ts->state = CY_IDLE_STATE; } } else { cyttsp_report_tchdata(ts); } out: return IRQ_HANDLED; } static int cyttsp_power_on(struct cyttsp *ts) { int error; error = cyttsp_soft_reset(ts); if (error) return error; error = cyttsp_load_bl_regs(ts); if (error) return error; if (GET_BOOTLOADERMODE(ts->bl_data.bl_status) && IS_VALID_APP(ts->bl_data.bl_status)) { error = cyttsp_exit_bl_mode(ts); if (error) return error; } if (GET_HSTMODE(ts->bl_data.bl_file) != CY_OPERATE_MODE || IS_OPERATIONAL_ERR(ts->bl_data.bl_status)) { return -ENODEV; } error = cyttsp_set_sysinfo_mode(ts); if (error) return error; error = cyttsp_set_sysinfo_regs(ts); if (error) return error; error = cyttsp_set_operational_mode(ts); if (error) return error; /* init active distance */ error = cyttsp_act_dist_setup(ts); if (error) return error; ts->state = CY_ACTIVE_STATE; return 0; } static int cyttsp_enable(struct cyttsp *ts) { int error; /* * The device firmware can wake on an I2C or SPI memory slave * address match. So just reading a register is sufficient to * wake up the device. The first read attempt will fail but it * will wake it up making the second read attempt successful. */ error = ttsp_read_block_data(ts, CY_REG_BASE, sizeof(ts->xy_data), &ts->xy_data); if (error) return error; if (GET_HSTMODE(ts->xy_data.hst_mode)) return -EIO; enable_irq(ts->irq); return 0; } static int cyttsp_disable(struct cyttsp *ts) { int error; error = ttsp_send_command(ts, CY_LOW_POWER_MODE); if (error) return error; disable_irq(ts->irq); return 0; } static int __maybe_unused cyttsp_suspend(struct device *dev) { struct cyttsp *ts = dev_get_drvdata(dev); int retval = 0; mutex_lock(&ts->input->mutex); if (ts->input->users) { retval = cyttsp_disable(ts); if (retval == 0) ts->suspended = true; } mutex_unlock(&ts->input->mutex); return retval; } static int __maybe_unused cyttsp_resume(struct device *dev) { struct cyttsp *ts = dev_get_drvdata(dev); mutex_lock(&ts->input->mutex); if (ts->input->users) cyttsp_enable(ts); ts->suspended = false; mutex_unlock(&ts->input->mutex); return 0; } SIMPLE_DEV_PM_OPS(cyttsp_pm_ops, cyttsp_suspend, cyttsp_resume); EXPORT_SYMBOL_GPL(cyttsp_pm_ops); static int cyttsp_open(struct input_dev *dev) { struct cyttsp *ts = input_get_drvdata(dev); int retval = 0; if (!ts->suspended) retval = cyttsp_enable(ts); return retval; } static void cyttsp_close(struct input_dev *dev) { struct cyttsp *ts = input_get_drvdata(dev); if (!ts->suspended) cyttsp_disable(ts); } struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops, struct device *dev, int irq, size_t xfer_buf_size) { const struct cyttsp_platform_data *pdata = dev_get_platdata(dev); struct cyttsp *ts; struct input_dev *input_dev; int error; if (!pdata || !pdata->name || irq <= 0) { error = -EINVAL; goto err_out; } ts = kzalloc(sizeof(*ts) + xfer_buf_size, GFP_KERNEL); input_dev = input_allocate_device(); if (!ts || !input_dev) { error = -ENOMEM; goto err_free_mem; } ts->dev = dev; ts->input = input_dev; ts->pdata = dev_get_platdata(dev); ts->bus_ops = bus_ops; ts->irq = irq; init_completion(&ts->bl_ready); snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(dev)); if (pdata->init) { error = pdata->init(); if (error) { dev_err(ts->dev, "platform init failed, err: %d\n", error); goto err_free_mem; } } input_dev->name = pdata->name; input_dev->phys = ts->phys; input_dev->id.bustype = bus_ops->bustype; input_dev->dev.parent = ts->dev; input_dev->open = cyttsp_open; input_dev->close = cyttsp_close; input_set_drvdata(input_dev, ts); __set_bit(EV_ABS, input_dev->evbit); input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, pdata->maxx, 0, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, pdata->maxy, 0, 0); input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, CY_MAXZ, 0, 0); input_mt_init_slots(input_dev, CY_MAX_ID, 0); error = request_threaded_irq(ts->irq, NULL, cyttsp_irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, pdata->name, ts); if (error) { dev_err(ts->dev, "failed to request IRQ %d, err: %d\n", ts->irq, error); goto err_platform_exit; } disable_irq(ts->irq); error = cyttsp_power_on(ts); if (error) goto err_free_irq; error = input_register_device(input_dev); if (error) { dev_err(ts->dev, "failed to register input device: %d\n", error); goto err_free_irq; } return ts; err_free_irq: free_irq(ts->irq, ts); err_platform_exit: if (pdata->exit) pdata->exit(); err_free_mem: input_free_device(input_dev); kfree(ts); err_out: return ERR_PTR(error); } EXPORT_SYMBOL_GPL(cyttsp_probe); void cyttsp_remove(struct cyttsp *ts) { free_irq(ts->irq, ts); input_unregister_device(ts->input); if (ts->pdata->exit) ts->pdata->exit(); kfree(ts); } EXPORT_SYMBOL_GPL(cyttsp_remove); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard touchscreen driver core"); MODULE_AUTHOR("Cypress");
gpl-2.0
mayli/wrapfs-latest
drivers/net/ethernet/altera/altera_utils.c
2044
1368
/* Altera TSE SGDMA and MSGDMA Linux driver * Copyright (C) 2014 Altera Corporation. All rights reserved * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include "altera_tse.h" #include "altera_utils.h" void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask) { u32 value = csrrd32(ioaddr, offs); value |= bit_mask; csrwr32(value, ioaddr, offs); } void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask) { u32 value = csrrd32(ioaddr, offs); value &= ~bit_mask; csrwr32(value, ioaddr, offs); } int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask) { u32 value = csrrd32(ioaddr, offs); return (value & bit_mask) ? 1 : 0; } int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask) { u32 value = csrrd32(ioaddr, offs); return (value & bit_mask) ? 0 : 1; }
gpl-2.0
CyanogenMod/android_kernel_samsung_manta
drivers/net/ethernet/intel/e1000e/phy.c
2044
91915
/******************************************************************************* Intel PRO/1000 Linux driver Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include "e1000.h" static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); static s32 e1000_wait_autoneg(struct e1000_hw *hw); static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg); static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data, bool read, bool page_set); static u32 e1000_get_phy_addr_for_hv_page(u32 page); static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, u16 *data, bool read); /* Cable length tables */ static const u16 e1000_m88_cable_length_table[] = { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ ARRAY_SIZE(e1000_m88_cable_length_table) static const u16 e1000_igp_2_cable_length_table[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, 124}; #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ ARRAY_SIZE(e1000_igp_2_cable_length_table) #define BM_PHY_REG_PAGE(offset) \ ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF)) #define BM_PHY_REG_NUM(offset) \ ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\ (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\ ~MAX_PHY_REG_ADDRESS))) #define HV_INTC_FC_PAGE_START 768 #define I82578_ADDR_REG 29 #define I82577_ADDR_REG 16 #define I82577_CFG_REG 22 #define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) #define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ #define I82577_CTRL_REG 23 /* 82577 specific PHY registers */ #define I82577_PHY_CTRL_2 18 #define I82577_PHY_STATUS_2 26 #define I82577_PHY_DIAG_STATUS 31 /* I82577 PHY Status 2 */ #define I82577_PHY_STATUS2_REV_POLARITY 0x0400 #define I82577_PHY_STATUS2_MDIX 0x0800 #define I82577_PHY_STATUS2_SPEED_MASK 0x0300 #define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 /* I82577 PHY Control 2 */ #define I82577_PHY_CTRL2_AUTO_MDIX 0x0400 #define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 /* I82577 PHY Diagnostics Status */ #define I82577_DSTATUS_CABLE_LENGTH 0x03FC #define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 /* BM PHY Copper Specific Control 1 */ #define BM_CS_CTRL1 16 #define HV_MUX_DATA_CTRL PHY_REG(776, 16) #define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 #define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 /** * e1000e_check_reset_block_generic - Check if PHY reset is blocked * @hw: pointer to the HW structure * * Read the PHY management control register and check whether a PHY reset * is blocked. If a reset is not blocked return 0, otherwise * return E1000_BLK_PHY_RESET (12). **/ s32 e1000e_check_reset_block_generic(struct e1000_hw *hw) { u32 manc; manc = er32(MANC); return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; } /** * e1000e_get_phy_id - Retrieve the PHY ID and revision * @hw: pointer to the HW structure * * Reads the PHY registers and stores the PHY ID and possibly the PHY * revision in the hardware structure. **/ s32 e1000e_get_phy_id(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = 0; u16 phy_id; u16 retry_count = 0; if (!phy->ops.read_reg) return 0; while (retry_count < 2) { ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); if (ret_val) return ret_val; phy->id = (u32)(phy_id << 16); udelay(20); ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); if (ret_val) return ret_val; phy->id |= (u32)(phy_id & PHY_REVISION_MASK); phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); if (phy->id != 0 && phy->id != PHY_REVISION_MASK) return 0; retry_count++; } return 0; } /** * e1000e_phy_reset_dsp - Reset PHY DSP * @hw: pointer to the HW structure * * Reset the digital signal processor. **/ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw) { s32 ret_val; ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); if (ret_val) return ret_val; return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0); } /** * e1000e_read_phy_reg_mdic - Read MDI control register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the MDI control register in the PHY at offset and stores the * information read to data. **/ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) { struct e1000_phy_info *phy = &hw->phy; u32 i, mdic = 0; if (offset > MAX_PHY_REG_ADDRESS) { e_dbg("PHY Address %d is out of range\n", offset); return -E1000_ERR_PARAM; } /* * Set up Op-code, Phy Address, and register offset in the MDI * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ mdic = ((offset << E1000_MDIC_REG_SHIFT) | (phy->addr << E1000_MDIC_PHY_SHIFT) | (E1000_MDIC_OP_READ)); ew32(MDIC, mdic); /* * Poll the ready bit to see if the MDI read completed * Increasing the time out as testing showed failures with * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { udelay(50); mdic = er32(MDIC); if (mdic & E1000_MDIC_READY) break; } if (!(mdic & E1000_MDIC_READY)) { e_dbg("MDI Read did not complete\n"); return -E1000_ERR_PHY; } if (mdic & E1000_MDIC_ERROR) { e_dbg("MDI Error\n"); return -E1000_ERR_PHY; } *data = (u16) mdic; /* * Allow some time after each MDIC transaction to avoid * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) udelay(100); return 0; } /** * e1000e_write_phy_reg_mdic - Write MDI control register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write to register at offset * * Writes data to MDI control register in the PHY at offset. **/ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) { struct e1000_phy_info *phy = &hw->phy; u32 i, mdic = 0; if (offset > MAX_PHY_REG_ADDRESS) { e_dbg("PHY Address %d is out of range\n", offset); return -E1000_ERR_PARAM; } /* * Set up Op-code, Phy Address, and register offset in the MDI * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ mdic = (((u32)data) | (offset << E1000_MDIC_REG_SHIFT) | (phy->addr << E1000_MDIC_PHY_SHIFT) | (E1000_MDIC_OP_WRITE)); ew32(MDIC, mdic); /* * Poll the ready bit to see if the MDI read completed * Increasing the time out as testing showed failures with * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { udelay(50); mdic = er32(MDIC); if (mdic & E1000_MDIC_READY) break; } if (!(mdic & E1000_MDIC_READY)) { e_dbg("MDI Write did not complete\n"); return -E1000_ERR_PHY; } if (mdic & E1000_MDIC_ERROR) { e_dbg("MDI Error\n"); return -E1000_ERR_PHY; } /* * Allow some time after each MDIC transaction to avoid * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) udelay(100); return 0; } /** * e1000e_read_phy_reg_m88 - Read m88 PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore, if necessary, then reads the PHY register at offset * and storing the retrieved information in data. Release any acquired * semaphores before exiting. **/ s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); hw->phy.ops.release(hw); return ret_val; } /** * e1000e_write_phy_reg_m88 - Write m88 PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); hw->phy.ops.release(hw); return ret_val; } /** * e1000_set_page_igp - Set page as on IGP-like PHY(s) * @hw: pointer to the HW structure * @page: page to set (shifted left when necessary) * * Sets PHY page required for PHY register access. Assumes semaphore is * already acquired. Note, this function sets phy.addr to 1 so the caller * must set it appropriately (if necessary) after this function returns. **/ s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) { e_dbg("Setting page 0x%x\n", page); hw->phy.addr = 1; return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); } /** * __e1000e_read_phy_reg_igp - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then reads the PHY register at offset * and stores the retrieved information in data. Release any acquired * semaphores before exiting. **/ static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, bool locked) { s32 ret_val = 0; if (!locked) { if (!hw->phy.ops.acquire) return 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } if (offset > MAX_PHY_MULTI_PAGE_REG) ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (u16)offset); if (!ret_val) ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); if (!locked) hw->phy.ops.release(hw); return ret_val; } /** * e1000e_read_phy_reg_igp - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore then reads the PHY register at offset and stores the * retrieved information in data. * Release the acquired semaphore before exiting. **/ s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000e_read_phy_reg_igp(hw, offset, data, false); } /** * e1000e_read_phy_reg_igp_locked - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the PHY register at offset and stores the retrieved information * in data. Assumes semaphore already acquired. **/ s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000e_read_phy_reg_igp(hw, offset, data, true); } /** * e1000e_write_phy_reg_igp - Write igp PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, bool locked) { s32 ret_val = 0; if (!locked) { if (!hw->phy.ops.acquire) return 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } if (offset > MAX_PHY_MULTI_PAGE_REG) ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (u16)offset); if (!ret_val) ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); if (!locked) hw->phy.ops.release(hw); return ret_val; } /** * e1000e_write_phy_reg_igp - Write igp PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000e_write_phy_reg_igp(hw, offset, data, false); } /** * e1000e_write_phy_reg_igp_locked - Write igp PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Writes the data to PHY register at the offset. * Assumes semaphore already acquired. **/ s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000e_write_phy_reg_igp(hw, offset, data, true); } /** * __e1000_read_kmrn_reg - Read kumeran register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary. Then reads the PHY register at offset * using the kumeran interface. The information retrieved is stored in data. * Release any acquired semaphores before exiting. **/ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, bool locked) { u32 kmrnctrlsta; if (!locked) { s32 ret_val = 0; if (!hw->phy.ops.acquire) return 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; ew32(KMRNCTRLSTA, kmrnctrlsta); e1e_flush(); udelay(2); kmrnctrlsta = er32(KMRNCTRLSTA); *data = (u16)kmrnctrlsta; if (!locked) hw->phy.ops.release(hw); return 0; } /** * e1000e_read_kmrn_reg - Read kumeran register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore then reads the PHY register at offset using the * kumeran interface. The information retrieved is stored in data. * Release the acquired semaphore before exiting. **/ s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_kmrn_reg(hw, offset, data, false); } /** * e1000e_read_kmrn_reg_locked - Read kumeran register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the PHY register at offset using the kumeran interface. The * information retrieved is stored in data. * Assumes semaphore already acquired. **/ s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_kmrn_reg(hw, offset, data, true); } /** * __e1000_write_kmrn_reg - Write kumeran register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary. Then write the data to PHY register * at the offset using the kumeran interface. Release any acquired semaphores * before exiting. **/ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, bool locked) { u32 kmrnctrlsta; if (!locked) { s32 ret_val = 0; if (!hw->phy.ops.acquire) return 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | data; ew32(KMRNCTRLSTA, kmrnctrlsta); e1e_flush(); udelay(2); if (!locked) hw->phy.ops.release(hw); return 0; } /** * e1000e_write_kmrn_reg - Write kumeran register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore then writes the data to the PHY register at the offset * using the kumeran interface. Release the acquired semaphore before exiting. **/ s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_kmrn_reg(hw, offset, data, false); } /** * e1000e_write_kmrn_reg_locked - Write kumeran register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Write the data to PHY register at the offset using the kumeran interface. * Assumes semaphore already acquired. **/ s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_kmrn_reg(hw, offset, data, true); } /** * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link * @hw: pointer to the HW structure * * Sets up Carrier-sense on Transmit and downshift values. **/ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) { s32 ret_val; u16 phy_data; /* Enable CRS on Tx. This must be set for half-duplex operation. */ ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); if (ret_val) return ret_val; phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; /* Enable downshift */ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; return e1e_wphy(hw, I82577_CFG_REG, phy_data); } /** * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link * @hw: pointer to the HW structure * * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock * and downshift values are set also. **/ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; /* Enable CRS on Tx. This must be set for half-duplex operation. */ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; /* For BM PHY this bit is downshift enable */ if (phy->type != e1000_phy_bm) phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; /* * Options: * MDI/MDI-X = 0 (default) * 0 - Auto for all speeds * 1 - MDI mode * 2 - MDI-X mode * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) */ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; switch (phy->mdix) { case 1: phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; break; case 2: phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; break; case 3: phy_data |= M88E1000_PSCR_AUTO_X_1000T; break; case 0: default: phy_data |= M88E1000_PSCR_AUTO_X_MODE; break; } /* * Options: * disable_polarity_correction = 0 (default) * Automatic Correction for Reversed Cable Polarity * 0 - Disabled * 1 - Enabled */ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; if (phy->disable_polarity_correction == 1) phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; /* Enable downshift on BM (disabled by default) */ if (phy->type == e1000_phy_bm) phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; if ((phy->type == e1000_phy_m88) && (phy->revision < E1000_REVISION_4) && (phy->id != BME1000_E_PHY_ID_R2)) { /* * Force TX_CLK in the Extended PHY Specific Control Register * to 25MHz clock. */ ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy_data |= M88E1000_EPSCR_TX_CLK_25; if ((phy->revision == 2) && (phy->id == M88E1111_I_PHY_ID)) { /* 82573L PHY - set the downshift counter to 5x. */ phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; } else { /* Configure Master and Slave downshift values */ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); } ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; } if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) { /* Set PHY page 0, register 29 to 0x0003 */ ret_val = e1e_wphy(hw, 29, 0x0003); if (ret_val) return ret_val; /* Set PHY page 0, register 30 to 0x0000 */ ret_val = e1e_wphy(hw, 30, 0x0000); if (ret_val) return ret_val; } /* Commit the changes. */ ret_val = e1000e_commit_phy(hw); if (ret_val) { e_dbg("Error committing the PHY changes\n"); return ret_val; } if (phy->type == e1000_phy_82578) { ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; /* 82578 PHY - set the downshift count to 1x. */ phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; } return 0; } /** * e1000e_copper_link_setup_igp - Setup igp PHY's for copper link * @hw: pointer to the HW structure * * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for * igp PHY's. **/ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; ret_val = e1000_phy_hw_reset(hw); if (ret_val) { e_dbg("Error resetting the PHY.\n"); return ret_val; } /* * Wait 100ms for MAC to configure PHY from NVM settings, to avoid * timeout issues when LFS is enabled. */ msleep(100); /* disable lplu d0 during driver init */ ret_val = e1000_set_d0_lplu_state(hw, false); if (ret_val) { e_dbg("Error Disabling LPLU D0\n"); return ret_val; } /* Configure mdi-mdix settings */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCR_AUTO_MDIX; switch (phy->mdix) { case 1: data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; break; case 2: data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; break; case 0: default: data |= IGP01E1000_PSCR_AUTO_MDIX; break; } ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data); if (ret_val) return ret_val; /* set auto-master slave resolution settings */ if (hw->mac.autoneg) { /* * when autonegotiation advertisement is only 1000Mbps then we * should disable SmartSpeed and enable Auto MasterSlave * resolution as hardware default. */ if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { /* Disable SmartSpeed */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; /* Set auto Master/Slave resolution process */ ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); if (ret_val) return ret_val; data &= ~CR_1000T_MS_ENABLE; ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); if (ret_val) return ret_val; } ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); if (ret_val) return ret_val; /* load defaults for future use */ phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? ((data & CR_1000T_MS_VALUE) ? e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto; switch (phy->ms_type) { case e1000_ms_force_master: data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); break; case e1000_ms_force_slave: data |= CR_1000T_MS_ENABLE; data &= ~(CR_1000T_MS_VALUE); break; case e1000_ms_auto: data &= ~CR_1000T_MS_ENABLE; default: break; } ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); } return ret_val; } /** * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation * @hw: pointer to the HW structure * * Reads the MII auto-neg advertisement register and/or the 1000T control * register and if the PHY is already setup for auto-negotiation, then * return successful. Otherwise, setup advertisement and flow control to * the appropriate values for the wanted auto-negotiation. **/ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 mii_autoneg_adv_reg; u16 mii_1000t_ctrl_reg = 0; phy->autoneg_advertised &= phy->autoneg_mask; /* Read the MII Auto-Neg Advertisement Register (Address 4). */ ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); if (ret_val) return ret_val; if (phy->autoneg_mask & ADVERTISE_1000_FULL) { /* Read the MII 1000Base-T Control Register (Address 9). */ ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); if (ret_val) return ret_val; } /* * Need to parse both autoneg_advertised and fc and set up * the appropriate PHY registers. First we will parse for * autoneg_advertised software override. Since we can advertise * a plethora of combinations, we need to check each bit * individually. */ /* * First we clear all the 10/100 mb speed bits in the Auto-Neg * Advertisement Register (Address 4) and the 1000 mb speed bits in * the 1000Base-T Control Register (Address 9). */ mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | NWAY_AR_100TX_HD_CAPS | NWAY_AR_10T_FD_CAPS | NWAY_AR_10T_HD_CAPS); mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); /* Do we want to advertise 10 Mb Half Duplex? */ if (phy->autoneg_advertised & ADVERTISE_10_HALF) { e_dbg("Advertise 10mb Half duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; } /* Do we want to advertise 10 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_10_FULL) { e_dbg("Advertise 10mb Full duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; } /* Do we want to advertise 100 Mb Half Duplex? */ if (phy->autoneg_advertised & ADVERTISE_100_HALF) { e_dbg("Advertise 100mb Half duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; } /* Do we want to advertise 100 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_100_FULL) { e_dbg("Advertise 100mb Full duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; } /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ if (phy->autoneg_advertised & ADVERTISE_1000_HALF) e_dbg("Advertise 1000mb Half duplex request denied!\n"); /* Do we want to advertise 1000 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { e_dbg("Advertise 1000mb Full duplex\n"); mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; } /* * Check for a software override of the flow control settings, and * setup the PHY advertisement registers accordingly. If * auto-negotiation is enabled, then software will have to set the * "PAUSE" bits to the correct value in the Auto-Negotiation * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- * negotiation. * * The possible values of the "fc" parameter are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames * but we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. * other: No software override. The flow control configuration * in the EEPROM is used. */ switch (hw->fc.current_mode) { case e1000_fc_none: /* * Flow control (Rx & Tx) is completely disabled by a * software over-ride. */ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; case e1000_fc_rx_pause: /* * Rx Flow control is enabled, and Tx Flow control is * disabled, by a software over-ride. * * Since there really isn't a way to advertise that we are * capable of Rx Pause ONLY, we will advertise that we * support both symmetric and asymmetric Rx PAUSE. Later * (in e1000e_config_fc_after_link_up) we will disable the * hw's ability to send PAUSE frames. */ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; case e1000_fc_tx_pause: /* * Tx Flow control is enabled, and Rx Flow control is * disabled, by a software over-ride. */ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; break; case e1000_fc_full: /* * Flow control (both Rx and Tx) is enabled by a software * over-ride. */ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; default: e_dbg("Flow control param set incorrectly\n"); return -E1000_ERR_CONFIG; } ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); if (ret_val) return ret_val; e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); if (phy->autoneg_mask & ADVERTISE_1000_FULL) ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); return ret_val; } /** * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link * @hw: pointer to the HW structure * * Performs initial bounds checking on autoneg advertisement parameter, then * configure to advertise the full capability. Setup the PHY to autoneg * and restart the negotiation process between the link partner. If * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. **/ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_ctrl; /* * Perform some bounds checking on the autoneg advertisement * parameter. */ phy->autoneg_advertised &= phy->autoneg_mask; /* * If autoneg_advertised is zero, we assume it was not defaulted * by the calling code so we set to advertise full capability. */ if (phy->autoneg_advertised == 0) phy->autoneg_advertised = phy->autoneg_mask; e_dbg("Reconfiguring auto-neg advertisement params\n"); ret_val = e1000_phy_setup_autoneg(hw); if (ret_val) { e_dbg("Error Setting up Auto-Negotiation\n"); return ret_val; } e_dbg("Restarting Auto-Neg\n"); /* * Restart auto-negotiation by setting the Auto Neg Enable bit and * the Auto Neg Restart bit in the PHY control register. */ ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); if (ret_val) return ret_val; phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); if (ret_val) return ret_val; /* * Does the user want to wait for Auto-Neg to complete here, or * check at a later time (for example, callback routine). */ if (phy->autoneg_wait_to_complete) { ret_val = e1000_wait_autoneg(hw); if (ret_val) { e_dbg("Error while waiting for autoneg to complete\n"); return ret_val; } } hw->mac.get_link_status = true; return ret_val; } /** * e1000e_setup_copper_link - Configure copper link settings * @hw: pointer to the HW structure * * Calls the appropriate function to configure the link for auto-neg or forced * speed and duplex. Then we check for link, once link is established calls * to configure collision distance and flow control are called. If link is * not established, we return -E1000_ERR_PHY (-2). **/ s32 e1000e_setup_copper_link(struct e1000_hw *hw) { s32 ret_val; bool link; if (hw->mac.autoneg) { /* * Setup autoneg and flow control advertisement and perform * autonegotiation. */ ret_val = e1000_copper_link_autoneg(hw); if (ret_val) return ret_val; } else { /* * PHY will be set to 10H, 10F, 100H or 100F * depending on user settings. */ e_dbg("Forcing Speed and Duplex\n"); ret_val = e1000_phy_force_speed_duplex(hw); if (ret_val) { e_dbg("Error Forcing Speed and Duplex\n"); return ret_val; } } /* * Check link status. Wait up to 100 microseconds for link to become * valid. */ ret_val = e1000e_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, &link); if (ret_val) return ret_val; if (link) { e_dbg("Valid link established!!!\n"); hw->mac.ops.config_collision_dist(hw); ret_val = e1000e_config_fc_after_link_up(hw); } else { e_dbg("Unable to establish link!!!\n"); } return ret_val; } /** * e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY * @hw: pointer to the HW structure * * Calls the PHY setup function to force speed and duplex. Clears the * auto-crossover to force MDI manually. Waits for link and returns * successful if link up is successful, else -E1000_ERR_PHY (-2). **/ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); if (ret_val) return ret_val; /* * Clear Auto-Crossover to force MDI manually. IGP requires MDI * forced whenever speed and duplex are forced. */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); if (ret_val) return ret_val; phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); if (ret_val) return ret_val; e_dbg("IGP PSCR: %X\n", phy_data); udelay(1); if (phy->autoneg_wait_to_complete) { e_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) e_dbg("Link taking longer than expected.\n"); /* Try once more */ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); } return ret_val; } /** * e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY * @hw: pointer to the HW structure * * Calls the PHY setup function to force speed and duplex. Clears the * auto-crossover to force MDI manually. Resets the PHY to commit the * changes. If time expires while waiting for link up, we reset the DSP. * After reset, TX_CLK and CRS on Tx must be set. Return successful upon * successful completion, else return corresponding error code. **/ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; /* * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI * forced whenever speed and duplex are forced. */ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; e_dbg("M88E1000 PSCR: %X\n", phy_data); ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); if (ret_val) return ret_val; /* Reset the phy to commit changes. */ ret_val = e1000e_commit_phy(hw); if (ret_val) return ret_val; if (phy->autoneg_wait_to_complete) { e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) { if (hw->phy.type != e1000_phy_m88) { e_dbg("Link taking longer than expected.\n"); } else { /* * We didn't get link. * Reset the DSP and cross our fingers. */ ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, 0x001d); if (ret_val) return ret_val; ret_val = e1000e_phy_reset_dsp(hw); if (ret_val) return ret_val; } } /* Try once more */ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; } if (hw->phy.type != e1000_phy_m88) return 0; ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; /* * Resetting the phy means we need to re-force TX_CLK in the * Extended PHY Specific Control Register to 25MHz clock from * the reset value of 2.5MHz. */ phy_data |= M88E1000_EPSCR_TX_CLK_25; ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; /* * In addition, we must re-enable CRS on Tx for both half and full * duplex. */ ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); return ret_val; } /** * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex * @hw: pointer to the HW structure * * Forces the speed and duplex settings of the PHY. * This is a function pointer entry point only called by * PHY setup routines. **/ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; ret_val = e1e_rphy(hw, PHY_CONTROL, &data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &data); ret_val = e1e_wphy(hw, PHY_CONTROL, data); if (ret_val) return ret_val; /* Disable MDI-X support for 10/100 */ ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); if (ret_val) return ret_val; data &= ~IFE_PMC_AUTO_MDIX; data &= ~IFE_PMC_FORCE_MDIX; ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); if (ret_val) return ret_val; e_dbg("IFE PMC: %X\n", data); udelay(1); if (phy->autoneg_wait_to_complete) { e_dbg("Waiting for forced speed/duplex link on IFE phy.\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) e_dbg("Link taking longer than expected.\n"); /* Try once more */ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; } return 0; } /** * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex * @hw: pointer to the HW structure * @phy_ctrl: pointer to current value of PHY_CONTROL * * Forces speed and duplex on the PHY by doing the following: disable flow * control, force speed/duplex on the MAC, disable auto speed detection, * disable auto-negotiation, configure duplex, configure speed, configure * the collision distance, write configuration to CTRL register. The * caller must write to the PHY_CONTROL register for these settings to * take affect. **/ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) { struct e1000_mac_info *mac = &hw->mac; u32 ctrl; /* Turn off flow control when forcing speed/duplex */ hw->fc.current_mode = e1000_fc_none; /* Force speed/duplex on the mac */ ctrl = er32(CTRL); ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); ctrl &= ~E1000_CTRL_SPD_SEL; /* Disable Auto Speed Detection */ ctrl &= ~E1000_CTRL_ASDE; /* Disable autoneg on the phy */ *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; /* Forcing Full or Half Duplex? */ if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { ctrl &= ~E1000_CTRL_FD; *phy_ctrl &= ~MII_CR_FULL_DUPLEX; e_dbg("Half Duplex\n"); } else { ctrl |= E1000_CTRL_FD; *phy_ctrl |= MII_CR_FULL_DUPLEX; e_dbg("Full Duplex\n"); } /* Forcing 10mb or 100mb? */ if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { ctrl |= E1000_CTRL_SPD_100; *phy_ctrl |= MII_CR_SPEED_100; *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); e_dbg("Forcing 100mb\n"); } else { ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); *phy_ctrl |= MII_CR_SPEED_10; *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); e_dbg("Forcing 10mb\n"); } hw->mac.ops.config_collision_dist(hw); ew32(CTRL, ctrl); } /** * e1000e_set_d3_lplu_state - Sets low power link up state for D3 * @hw: pointer to the HW structure * @active: boolean used to enable/disable lplu * * Success returns 0, Failure returns 1 * * The low power link up (lplu) state is set to the power management level D3 * and SmartSpeed is disabled when active is true, else clear lplu for D3 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU * is used during Dx states where the power conservation is most important. * During driver activity, SmartSpeed should be enabled so performance is * maintained. **/ s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); if (ret_val) return ret_val; if (!active) { data &= ~IGP02E1000_PM_D3_LPLU; ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); if (ret_val) return ret_val; /* * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { data |= IGP02E1000_PM_D3_LPLU; ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); if (ret_val) return ret_val; /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); } return ret_val; } /** * e1000e_check_downshift - Checks whether a downshift in speed occurred * @hw: pointer to the HW structure * * Success returns 0, Failure returns 1 * * A downshift is detected by querying the PHY link health. **/ s32 e1000e_check_downshift(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, offset, mask; switch (phy->type) { case e1000_phy_m88: case e1000_phy_gg82563: case e1000_phy_bm: case e1000_phy_82578: offset = M88E1000_PHY_SPEC_STATUS; mask = M88E1000_PSSR_DOWNSHIFT; break; case e1000_phy_igp_2: case e1000_phy_igp_3: offset = IGP01E1000_PHY_LINK_HEALTH; mask = IGP01E1000_PLHR_SS_DOWNGRADE; break; default: /* speed downshift not supported */ phy->speed_downgraded = false; return 0; } ret_val = e1e_rphy(hw, offset, &phy_data); if (!ret_val) phy->speed_downgraded = (phy_data & mask); return ret_val; } /** * e1000_check_polarity_m88 - Checks the polarity. * @hw: pointer to the HW structure * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) * * Polarity is determined based on the PHY specific status register. **/ s32 e1000_check_polarity_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); if (!ret_val) phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal; return ret_val; } /** * e1000_check_polarity_igp - Checks the polarity. * @hw: pointer to the HW structure * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) * * Polarity is determined based on the PHY port status register, and the * current speed (since there is no polarity at 100Mbps). **/ s32 e1000_check_polarity_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data, offset, mask; /* * Polarity is determined based on the speed of * our connection. */ ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); if (ret_val) return ret_val; if ((data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { offset = IGP01E1000_PHY_PCS_INIT_REG; mask = IGP01E1000_PHY_POLARITY_MASK; } else { /* * This really only applies to 10Mbps since * there is no polarity for 100Mbps (always 0). */ offset = IGP01E1000_PHY_PORT_STATUS; mask = IGP01E1000_PSSR_POLARITY_REVERSED; } ret_val = e1e_rphy(hw, offset, &data); if (!ret_val) phy->cable_polarity = (data & mask) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal; return ret_val; } /** * e1000_check_polarity_ife - Check cable polarity for IFE PHY * @hw: pointer to the HW structure * * Polarity is determined on the polarity reversal feature being enabled. **/ s32 e1000_check_polarity_ife(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, offset, mask; /* * Polarity is determined based on the reversal feature being enabled. */ if (phy->polarity_correction) { offset = IFE_PHY_EXTENDED_STATUS_CONTROL; mask = IFE_PESC_POLARITY_REVERSED; } else { offset = IFE_PHY_SPECIAL_CONTROL; mask = IFE_PSC_FORCE_POLARITY; } ret_val = e1e_rphy(hw, offset, &phy_data); if (!ret_val) phy->cable_polarity = (phy_data & mask) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal; return ret_val; } /** * e1000_wait_autoneg - Wait for auto-neg completion * @hw: pointer to the HW structure * * Waits for auto-negotiation to complete or for the auto-negotiation time * limit to expire, which ever happens first. **/ static s32 e1000_wait_autoneg(struct e1000_hw *hw) { s32 ret_val = 0; u16 i, phy_status; /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); if (ret_val) break; ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); if (ret_val) break; if (phy_status & MII_SR_AUTONEG_COMPLETE) break; msleep(100); } /* * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation * has completed. */ return ret_val; } /** * e1000e_phy_has_link_generic - Polls PHY for link * @hw: pointer to the HW structure * @iterations: number of times to poll for link * @usec_interval: delay between polling attempts * @success: pointer to whether polling was successful or not * * Polls the PHY status register for link, 'iterations' number of times. **/ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, u32 usec_interval, bool *success) { s32 ret_val = 0; u16 i, phy_status; for (i = 0; i < iterations; i++) { /* * Some PHYs require the PHY_STATUS register to be read * twice due to the link bit being sticky. No harm doing * it across the board. */ ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); if (ret_val) /* * If the first read fails, another entity may have * ownership of the resources, wait and try again to * see if they have relinquished the resources yet. */ udelay(usec_interval); ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); if (ret_val) break; if (phy_status & MII_SR_LINK_STATUS) break; if (usec_interval >= 1000) mdelay(usec_interval/1000); else udelay(usec_interval); } *success = (i < iterations); return ret_val; } /** * e1000e_get_cable_length_m88 - Determine cable length for m88 PHY * @hw: pointer to the HW structure * * Reads the PHY specific status register to retrieve the cable length * information. The cable length is determined by averaging the minimum and * maximum values to get the "average" cable length. The m88 PHY has four * possible cable length values, which are: * Register Value Cable Length * 0 < 50 meters * 1 50 - 80 meters * 2 80 - 110 meters * 3 110 - 140 meters * 4 > 140 meters **/ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, index; ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) return ret_val; index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT; if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) return -E1000_ERR_PHY; phy->min_cable_length = e1000_m88_cable_length_table[index]; phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; return 0; } /** * e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY * @hw: pointer to the HW structure * * The automatic gain control (agc) normalizes the amplitude of the * received signal, adjusting for the attenuation produced by the * cable. By reading the AGC registers, which represent the * combination of coarse and fine gain value, the value can be put * into a lookup table to obtain the approximate cable length * for each channel. **/ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, i, agc_value = 0; u16 cur_agc_index, max_agc_index = 0; u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { IGP02E1000_PHY_AGC_A, IGP02E1000_PHY_AGC_B, IGP02E1000_PHY_AGC_C, IGP02E1000_PHY_AGC_D }; /* Read the AGC registers for all channels */ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data); if (ret_val) return ret_val; /* * Getting bits 15:9, which represent the combination of * coarse and fine gain values. The result is a number * that can be put into the lookup table to obtain the * approximate cable length. */ cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & IGP02E1000_AGC_LENGTH_MASK; /* Array index bound check. */ if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || (cur_agc_index == 0)) return -E1000_ERR_PHY; /* Remove min & max AGC values from calculation. */ if (e1000_igp_2_cable_length_table[min_agc_index] > e1000_igp_2_cable_length_table[cur_agc_index]) min_agc_index = cur_agc_index; if (e1000_igp_2_cable_length_table[max_agc_index] < e1000_igp_2_cable_length_table[cur_agc_index]) max_agc_index = cur_agc_index; agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; } agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + e1000_igp_2_cable_length_table[max_agc_index]); agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); /* Calculate cable length with the error range of +/- 10 meters. */ phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? (agc_value - IGP02E1000_AGC_RANGE) : 0; phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; return 0; } /** * e1000e_get_phy_info_m88 - Retrieve PHY information * @hw: pointer to the HW structure * * Valid for only copper links. Read the PHY status register (sticky read) * to verify that link is up. Read the PHY special control register to * determine the polarity and 10base-T extended distance. Read the PHY * special status register to determine MDI/MDIx and current speed. If * speed is 1000, then determine cable length, local and remote receiver. **/ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; if (phy->media_type != e1000_media_type_copper) { e_dbg("Phy info is only valid for copper media\n"); return -E1000_ERR_CONFIG; } ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (!link) { e_dbg("Phy info is only valid if link is up\n"); return -E1000_ERR_CONFIG; } ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL); ret_val = e1000_check_polarity_m88(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) return ret_val; phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX); if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { ret_val = e1000_get_cable_length(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); if (ret_val) return ret_val; phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; } else { /* Set values to "undefined" */ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; } return ret_val; } /** * e1000e_get_phy_info_igp - Retrieve igp PHY information * @hw: pointer to the HW structure * * Read PHY status to determine if link is up. If link is up, then * set/determine 10base-T extended distance and polarity correction. Read * PHY port status to determine MDI/MDIx and speed. Based on the speed, * determine on the cable length, local and remote receiver. **/ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (!link) { e_dbg("Phy info is only valid if link is up\n"); return -E1000_ERR_CONFIG; } phy->polarity_correction = true; ret_val = e1000_check_polarity_igp(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); if (ret_val) return ret_val; phy->is_mdix = (data & IGP01E1000_PSSR_MDIX); if ((data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { ret_val = e1000_get_cable_length(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); if (ret_val) return ret_val; phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; } else { phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; } return ret_val; } /** * e1000_get_phy_info_ife - Retrieves various IFE PHY states * @hw: pointer to the HW structure * * Populates "phy" structure with various feature states. **/ s32 e1000_get_phy_info_ife(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (!link) { e_dbg("Phy info is only valid if link is up\n"); return -E1000_ERR_CONFIG; } ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); if (ret_val) return ret_val; phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) ? false : true; if (phy->polarity_correction) { ret_val = e1000_check_polarity_ife(hw); if (ret_val) return ret_val; } else { /* Polarity is forced */ phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal; } ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); if (ret_val) return ret_val; phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false; /* The following parameters are undefined for 10/100 operation. */ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; return 0; } /** * e1000e_phy_sw_reset - PHY software reset * @hw: pointer to the HW structure * * Does a software reset of the PHY by reading the PHY control register and * setting/write the control register reset bit to the PHY. **/ s32 e1000e_phy_sw_reset(struct e1000_hw *hw) { s32 ret_val; u16 phy_ctrl; ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); if (ret_val) return ret_val; phy_ctrl |= MII_CR_RESET; ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); if (ret_val) return ret_val; udelay(1); return ret_val; } /** * e1000e_phy_hw_reset_generic - PHY hardware reset * @hw: pointer to the HW structure * * Verify the reset block is not blocking us from resetting. Acquire * semaphore (if necessary) and read/set/write the device control reset * bit in the PHY. Wait the appropriate delay time for the device to * reset and release the semaphore (if necessary). **/ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u32 ctrl; if (phy->ops.check_reset_block) { ret_val = phy->ops.check_reset_block(hw); if (ret_val) return 0; } ret_val = phy->ops.acquire(hw); if (ret_val) return ret_val; ctrl = er32(CTRL); ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); e1e_flush(); udelay(phy->reset_delay_us); ew32(CTRL, ctrl); e1e_flush(); udelay(150); phy->ops.release(hw); return e1000_get_phy_cfg_done(hw); } /** * e1000e_get_cfg_done - Generic configuration done * @hw: pointer to the HW structure * * Generic function to wait 10 milli-seconds for configuration to complete * and return success. **/ s32 e1000e_get_cfg_done(struct e1000_hw *hw) { mdelay(10); return 0; } /** * e1000e_phy_init_script_igp3 - Inits the IGP3 PHY * @hw: pointer to the HW structure * * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. **/ s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) { e_dbg("Running IGP 3 PHY init script\n"); /* PHY init IGP 3 */ /* Enable rise/fall, 10-mode work in class-A */ e1e_wphy(hw, 0x2F5B, 0x9018); /* Remove all caps from Replica path filter */ e1e_wphy(hw, 0x2F52, 0x0000); /* Bias trimming for ADC, AFE and Driver (Default) */ e1e_wphy(hw, 0x2FB1, 0x8B24); /* Increase Hybrid poly bias */ e1e_wphy(hw, 0x2FB2, 0xF8F0); /* Add 4% to Tx amplitude in Gig mode */ e1e_wphy(hw, 0x2010, 0x10B0); /* Disable trimming (TTT) */ e1e_wphy(hw, 0x2011, 0x0000); /* Poly DC correction to 94.6% + 2% for all channels */ e1e_wphy(hw, 0x20DD, 0x249A); /* ABS DC correction to 95.9% */ e1e_wphy(hw, 0x20DE, 0x00D3); /* BG temp curve trim */ e1e_wphy(hw, 0x28B4, 0x04CE); /* Increasing ADC OPAMP stage 1 currents to max */ e1e_wphy(hw, 0x2F70, 0x29E4); /* Force 1000 ( required for enabling PHY regs configuration) */ e1e_wphy(hw, 0x0000, 0x0140); /* Set upd_freq to 6 */ e1e_wphy(hw, 0x1F30, 0x1606); /* Disable NPDFE */ e1e_wphy(hw, 0x1F31, 0xB814); /* Disable adaptive fixed FFE (Default) */ e1e_wphy(hw, 0x1F35, 0x002A); /* Enable FFE hysteresis */ e1e_wphy(hw, 0x1F3E, 0x0067); /* Fixed FFE for short cable lengths */ e1e_wphy(hw, 0x1F54, 0x0065); /* Fixed FFE for medium cable lengths */ e1e_wphy(hw, 0x1F55, 0x002A); /* Fixed FFE for long cable lengths */ e1e_wphy(hw, 0x1F56, 0x002A); /* Enable Adaptive Clip Threshold */ e1e_wphy(hw, 0x1F72, 0x3FB0); /* AHT reset limit to 1 */ e1e_wphy(hw, 0x1F76, 0xC0FF); /* Set AHT master delay to 127 msec */ e1e_wphy(hw, 0x1F77, 0x1DEC); /* Set scan bits for AHT */ e1e_wphy(hw, 0x1F78, 0xF9EF); /* Set AHT Preset bits */ e1e_wphy(hw, 0x1F79, 0x0210); /* Change integ_factor of channel A to 3 */ e1e_wphy(hw, 0x1895, 0x0003); /* Change prop_factor of channels BCD to 8 */ e1e_wphy(hw, 0x1796, 0x0008); /* Change cg_icount + enable integbp for channels BCD */ e1e_wphy(hw, 0x1798, 0xD008); /* * Change cg_icount + enable integbp + change prop_factor_master * to 8 for channel A */ e1e_wphy(hw, 0x1898, 0xD918); /* Disable AHT in Slave mode on channel A */ e1e_wphy(hw, 0x187A, 0x0800); /* * Enable LPLU and disable AN to 1000 in non-D0a states, * Enable SPD+B2B */ e1e_wphy(hw, 0x0019, 0x008D); /* Enable restart AN on an1000_dis change */ e1e_wphy(hw, 0x001B, 0x2080); /* Enable wh_fifo read clock in 10/100 modes */ e1e_wphy(hw, 0x0014, 0x0045); /* Restart AN, Speed selection is 1000 */ e1e_wphy(hw, 0x0000, 0x1340); return 0; } /* Internal function pointers */ /** * e1000_get_phy_cfg_done - Generic PHY configuration done * @hw: pointer to the HW structure * * Return success if silicon family did not implement a family specific * get_cfg_done function. **/ static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) { if (hw->phy.ops.get_cfg_done) return hw->phy.ops.get_cfg_done(hw); return 0; } /** * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex * @hw: pointer to the HW structure * * When the silicon family has not implemented a forced speed/duplex * function for the PHY, simply return 0. **/ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) { if (hw->phy.ops.force_speed_duplex) return hw->phy.ops.force_speed_duplex(hw); return 0; } /** * e1000e_get_phy_type_from_id - Get PHY type from id * @phy_id: phy_id read from the phy * * Returns the phy type from the id. **/ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) { enum e1000_phy_type phy_type = e1000_phy_unknown; switch (phy_id) { case M88E1000_I_PHY_ID: case M88E1000_E_PHY_ID: case M88E1111_I_PHY_ID: case M88E1011_I_PHY_ID: phy_type = e1000_phy_m88; break; case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ phy_type = e1000_phy_igp_2; break; case GG82563_E_PHY_ID: phy_type = e1000_phy_gg82563; break; case IGP03E1000_E_PHY_ID: phy_type = e1000_phy_igp_3; break; case IFE_E_PHY_ID: case IFE_PLUS_E_PHY_ID: case IFE_C_E_PHY_ID: phy_type = e1000_phy_ife; break; case BME1000_E_PHY_ID: case BME1000_E_PHY_ID_R2: phy_type = e1000_phy_bm; break; case I82578_E_PHY_ID: phy_type = e1000_phy_82578; break; case I82577_E_PHY_ID: phy_type = e1000_phy_82577; break; case I82579_E_PHY_ID: phy_type = e1000_phy_82579; break; default: phy_type = e1000_phy_unknown; break; } return phy_type; } /** * e1000e_determine_phy_address - Determines PHY address. * @hw: pointer to the HW structure * * This uses a trial and error method to loop through possible PHY * addresses. It tests each by reading the PHY ID registers and * checking for a match. **/ s32 e1000e_determine_phy_address(struct e1000_hw *hw) { u32 phy_addr = 0; u32 i; enum e1000_phy_type phy_type = e1000_phy_unknown; hw->phy.id = phy_type; for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { hw->phy.addr = phy_addr; i = 0; do { e1000e_get_phy_id(hw); phy_type = e1000e_get_phy_type_from_id(hw->phy.id); /* * If phy_type is valid, break - we found our * PHY address */ if (phy_type != e1000_phy_unknown) return 0; usleep_range(1000, 2000); i++; } while (i < 10); } return -E1000_ERR_PHY_TYPE; } /** * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address * @page: page to access * * Returns the phy address for the page requested. **/ static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) { u32 phy_addr = 2; if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) phy_addr = 1; return phy_addr; } /** * e1000e_write_phy_reg_bm - Write BM PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; u32 page = offset >> IGP_PAGE_SHIFT; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, false, false); goto release; } hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); if (offset > MAX_PHY_MULTI_PAGE_REG) { u32 page_shift, page_select; /* * Page select is register 31 for phy address 1 and 22 for * phy address 2 and 3. Page select is shifted only for * phy address 1. */ if (hw->phy.addr == 1) { page_shift = IGP_PAGE_SHIFT; page_select = IGP01E1000_PHY_PAGE_SELECT; } else { page_shift = 0; page_select = BM_PHY_PAGE_SELECT; } /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, (page << page_shift)); if (ret_val) goto release; } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000e_read_phy_reg_bm - Read BM PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore, if necessary, then reads the PHY register at offset * and storing the retrieved information in data. Release any acquired * semaphores before exiting. **/ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; u32 page = offset >> IGP_PAGE_SHIFT; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, true, false); goto release; } hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); if (offset > MAX_PHY_MULTI_PAGE_REG) { u32 page_shift, page_select; /* * Page select is register 31 for phy address 1 and 22 for * phy address 2 and 3. Page select is shifted only for * phy address 1. */ if (hw->phy.addr == 1) { page_shift = IGP_PAGE_SHIFT; page_select = IGP01E1000_PHY_PAGE_SELECT; } else { page_shift = 0; page_select = BM_PHY_PAGE_SELECT; } /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, (page << page_shift)); if (ret_val) goto release; } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000e_read_phy_reg_bm2 - Read BM PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore, if necessary, then reads the PHY register at offset * and storing the retrieved information in data. Release any acquired * semaphores before exiting. **/ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; u16 page = (u16)(offset >> IGP_PAGE_SHIFT); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, true, false); goto release; } hw->phy.addr = 1; if (offset > MAX_PHY_MULTI_PAGE_REG) { /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, page); if (ret_val) goto release; } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000e_write_phy_reg_bm2 - Write BM PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; u16 page = (u16)(offset >> IGP_PAGE_SHIFT); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, false, false); goto release; } hw->phy.addr = 1; if (offset > MAX_PHY_MULTI_PAGE_REG) { /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, page); if (ret_val) goto release; } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers * @hw: pointer to the HW structure * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG * * Assumes semaphore already acquired and phy_reg points to a valid memory * address to store contents of the BM_WUC_ENABLE_REG register. **/ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) { s32 ret_val; u16 temp; /* All page select, port ctrl and wakeup registers use phy address 1 */ hw->phy.addr = 1; /* Select Port Control Registers page */ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); if (ret_val) { e_dbg("Could not set Port Control page\n"); return ret_val; } ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); if (ret_val) { e_dbg("Could not read PHY register %d.%d\n", BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); return ret_val; } /* * Enable both PHY wakeup mode and Wakeup register page writes. * Prevent a power state change by disabling ME and Host PHY wakeup. */ temp = *phy_reg; temp |= BM_WUC_ENABLE_BIT; temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp); if (ret_val) { e_dbg("Could not write PHY register %d.%d\n", BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); return ret_val; } /* * Select Host Wakeup Registers page - caller now able to write * registers on the Wakeup registers page */ return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); } /** * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs * @hw: pointer to the HW structure * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG * * Restore BM_WUC_ENABLE_REG to its original value. * * Assumes semaphore already acquired and *phy_reg is the contents of the * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by * caller. **/ s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) { s32 ret_val = 0; /* Select Port Control Registers page */ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); if (ret_val) { e_dbg("Could not set Port Control page\n"); return ret_val; } /* Restore 769.17 to its original value */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg); if (ret_val) e_dbg("Could not restore PHY register %d.%d\n", BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); return ret_val; } /** * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register * @hw: pointer to the HW structure * @offset: register offset to be read or written * @data: pointer to the data to read or write * @read: determines if operation is read or write * @page_set: BM_WUC_PAGE already set and access enabled * * Read the PHY register at offset and store the retrieved information in * data, or write data to PHY register at offset. Note the procedure to * access the PHY wakeup registers is different than reading the other PHY * registers. It works as such: * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1 * 2) Set page to 800 for host (801 if we were manageability) * 3) Write the address using the address opcode (0x11) * 4) Read or write the data using the data opcode (0x12) * 5) Restore 769.17.2 to its original value * * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm(). * * Assumes semaphore is already acquired. When page_set==true, assumes * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()). **/ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data, bool read, bool page_set) { s32 ret_val; u16 reg = BM_PHY_REG_NUM(offset); u16 page = BM_PHY_REG_PAGE(offset); u16 phy_reg = 0; /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */ if ((hw->mac.type == e1000_pchlan) && (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) e_dbg("Attempting to access page %d while gig enabled.\n", page); if (!page_set) { /* Enable access to PHY wakeup registers */ ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); if (ret_val) { e_dbg("Could not enable PHY wakeup reg access\n"); return ret_val; } } e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg); /* Write the Wakeup register page offset value using opcode 0x11 */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); if (ret_val) { e_dbg("Could not write address opcode to page %d\n", page); return ret_val; } if (read) { /* Read the Wakeup register page value using opcode 0x12 */ ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, data); } else { /* Write the Wakeup register page value using opcode 0x12 */ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, *data); } if (ret_val) { e_dbg("Could not access PHY reg %d.%d\n", page, reg); return ret_val; } if (!page_set) ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); return ret_val; } /** * e1000_power_up_phy_copper - Restore copper link in case of PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, restore the link to previous * settings. **/ void e1000_power_up_phy_copper(struct e1000_hw *hw) { u16 mii_reg = 0; /* The PHY will retain its settings across a power down/up cycle */ e1e_rphy(hw, PHY_CONTROL, &mii_reg); mii_reg &= ~MII_CR_POWER_DOWN; e1e_wphy(hw, PHY_CONTROL, mii_reg); } /** * e1000_power_down_phy_copper - Restore copper link in case of PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, restore the link to previous * settings. **/ void e1000_power_down_phy_copper(struct e1000_hw *hw) { u16 mii_reg = 0; /* The PHY will retain its settings across a power down/up cycle */ e1e_rphy(hw, PHY_CONTROL, &mii_reg); mii_reg |= MII_CR_POWER_DOWN; e1e_wphy(hw, PHY_CONTROL, mii_reg); usleep_range(1000, 2000); } /** * e1000e_commit_phy - Soft PHY reset * @hw: pointer to the HW structure * * Performs a soft PHY reset on those that apply. This is a function pointer * entry point called by drivers. **/ s32 e1000e_commit_phy(struct e1000_hw *hw) { if (hw->phy.ops.commit) return hw->phy.ops.commit(hw); return 0; } /** * e1000_set_d0_lplu_state - Sets low power link up state for D0 * @hw: pointer to the HW structure * @active: boolean used to enable/disable lplu * * Success returns 0, Failure returns 1 * * The low power link up (lplu) state is set to the power management level D0 * and SmartSpeed is disabled when active is true, else clear lplu for D0 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU * is used during Dx states where the power conservation is most important. * During driver activity, SmartSpeed should be enabled so performance is * maintained. This is a function pointer entry point called by drivers. **/ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) { if (hw->phy.ops.set_d0_lplu_state) return hw->phy.ops.set_d0_lplu_state(hw, active); return 0; } /** * __e1000_read_phy_reg_hv - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then reads the PHY register at offset * and stores the retrieved information in data. Release any acquired * semaphore before exiting. **/ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, bool locked, bool page_set) { s32 ret_val; u16 page = BM_PHY_REG_PAGE(offset); u16 reg = BM_PHY_REG_NUM(offset); u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); if (!locked) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, true, page_set); goto out; } if (page > 0 && page < HV_INTC_FC_PAGE_START) { ret_val = e1000_access_phy_debug_regs_hv(hw, offset, data, true); goto out; } if (!page_set) { if (page == HV_INTC_FC_PAGE_START) page = 0; if (reg > MAX_PHY_MULTI_PAGE_REG) { /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000_set_page_igp(hw, (page << IGP_PAGE_SHIFT)); hw->phy.addr = phy_addr; if (ret_val) goto out; } } e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, page << IGP_PAGE_SHIFT, reg); ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); out: if (!locked) hw->phy.ops.release(hw); return ret_val; } /** * e1000_read_phy_reg_hv - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore then reads the PHY register at offset and stores * the retrieved information in data. Release the acquired semaphore * before exiting. **/ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_phy_reg_hv(hw, offset, data, false, false); } /** * e1000_read_phy_reg_hv_locked - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the PHY register at offset and stores the retrieved information * in data. Assumes semaphore already acquired. **/ s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_phy_reg_hv(hw, offset, data, true, false); } /** * e1000_read_phy_reg_page_hv - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Reads the PHY register at offset and stores the retrieved information * in data. Assumes semaphore already acquired and page already set. **/ s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_phy_reg_hv(hw, offset, data, true, true); } /** * __e1000_write_phy_reg_hv - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, bool locked, bool page_set) { s32 ret_val; u16 page = BM_PHY_REG_PAGE(offset); u16 reg = BM_PHY_REG_NUM(offset); u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); if (!locked) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, false, page_set); goto out; } if (page > 0 && page < HV_INTC_FC_PAGE_START) { ret_val = e1000_access_phy_debug_regs_hv(hw, offset, &data, false); goto out; } if (!page_set) { if (page == HV_INTC_FC_PAGE_START) page = 0; /* * Workaround MDIO accesses being disabled after entering IEEE * Power Down (when bit 11 of the PHY Control register is set) */ if ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision >= 1) && (hw->phy.addr == 2) && ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) { u16 data2 = 0x7EFF; ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3, &data2, false); if (ret_val) goto out; } if (reg > MAX_PHY_MULTI_PAGE_REG) { /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000_set_page_igp(hw, (page << IGP_PAGE_SHIFT)); hw->phy.addr = phy_addr; if (ret_val) goto out; } } e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page, page << IGP_PAGE_SHIFT, reg); ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); out: if (!locked) hw->phy.ops.release(hw); return ret_val; } /** * e1000_write_phy_reg_hv - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore then writes the data to PHY register at the offset. * Release the acquired semaphores before exiting. **/ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_phy_reg_hv(hw, offset, data, false, false); } /** * e1000_write_phy_reg_hv_locked - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Writes the data to PHY register at the offset. Assumes semaphore * already acquired. **/ s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_phy_reg_hv(hw, offset, data, true, false); } /** * e1000_write_phy_reg_page_hv - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Writes the data to PHY register at the offset. Assumes semaphore * already acquired and page already set. **/ s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_phy_reg_hv(hw, offset, data, true, true); } /** * e1000_get_phy_addr_for_hv_page - Get PHY address based on page * @page: page to be accessed **/ static u32 e1000_get_phy_addr_for_hv_page(u32 page) { u32 phy_addr = 2; if (page >= HV_INTC_FC_PAGE_START) phy_addr = 1; return phy_addr; } /** * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers * @hw: pointer to the HW structure * @offset: register offset to be read or written * @data: pointer to the data to be read or written * @read: determines if operation is read or write * * Reads the PHY register at offset and stores the retreived information * in data. Assumes semaphore already acquired. Note that the procedure * to access these regs uses the address port and data port to read/write. * These accesses done with PHY address 2 and without using pages. **/ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, u16 *data, bool read) { s32 ret_val; u32 addr_reg = 0; u32 data_reg = 0; /* This takes care of the difference with desktop vs mobile phy */ addr_reg = (hw->phy.type == e1000_phy_82578) ? I82578_ADDR_REG : I82577_ADDR_REG; data_reg = addr_reg + 1; /* All operations in this function are phy address 2 */ hw->phy.addr = 2; /* masking with 0x3F to remove the page from offset */ ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); if (ret_val) { e_dbg("Could not write the Address Offset port register\n"); return ret_val; } /* Read or write the data value next */ if (read) ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data); else ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); if (ret_val) e_dbg("Could not access the Data port register\n"); return ret_val; } /** * e1000_link_stall_workaround_hv - Si workaround * @hw: pointer to the HW structure * * This function works around a Si bug where the link partner can get * a link up indication before the PHY does. If small packets are sent * by the link partner they can be placed in the packet buffer without * being properly accounted for by the PHY and will stall preventing * further packets from being received. The workaround is to clear the * packet buffer after the PHY detects link up. **/ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) { s32 ret_val = 0; u16 data; if (hw->phy.type != e1000_phy_82578) return 0; /* Do not apply workaround if in PHY loopback bit 14 set */ e1e_rphy(hw, PHY_CONTROL, &data); if (data & PHY_CONTROL_LB) return 0; /* check if link is up and at 1Gbps */ ret_val = e1e_rphy(hw, BM_CS_STATUS, &data); if (ret_val) return ret_val; data &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_MASK; if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_1000)) return 0; msleep(200); /* flush the packets in the fifo buffer */ ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED); if (ret_val) return ret_val; return e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); } /** * e1000_check_polarity_82577 - Checks the polarity. * @hw: pointer to the HW structure * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) * * Polarity is determined based on the PHY specific status register. **/ s32 e1000_check_polarity_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); if (!ret_val) phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal; return ret_val; } /** * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY * @hw: pointer to the HW structure * * Calls the PHY setup function to force speed and duplex. **/ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); if (ret_val) return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); if (ret_val) return ret_val; udelay(1); if (phy->autoneg_wait_to_complete) { e_dbg("Waiting for forced speed/duplex link on 82577 phy\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) e_dbg("Link taking longer than expected.\n"); /* Try once more */ ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); } return ret_val; } /** * e1000_get_phy_info_82577 - Retrieve I82577 PHY information * @hw: pointer to the HW structure * * Read PHY status to determine if link is up. If link is up, then * set/determine 10base-T extended distance and polarity correction. Read * PHY port status to determine MDI/MDIx and speed. Based on the speed, * determine on the cable length, local and remote receiver. **/ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (!link) { e_dbg("Phy info is only valid if link is up\n"); return -E1000_ERR_CONFIG; } phy->polarity_correction = true; ret_val = e1000_check_polarity_82577(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); if (ret_val) return ret_val; phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false; if ((data & I82577_PHY_STATUS2_SPEED_MASK) == I82577_PHY_STATUS2_SPEED_1000MBPS) { ret_val = hw->phy.ops.get_cable_length(hw); if (ret_val) return ret_val; ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); if (ret_val) return ret_val; phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; } else { phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; } return 0; } /** * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY * @hw: pointer to the HW structure * * Reads the diagnostic status register and verifies result is valid before * placing it in the phy_cable_length field. **/ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, length; ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data); if (ret_val) return ret_val; length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >> I82577_DSTATUS_CABLE_LENGTH_SHIFT; if (length == E1000_CABLE_LENGTH_UNDEFINED) ret_val = -E1000_ERR_PHY; phy->cable_length = length; return 0; }
gpl-2.0
gpillusion/Samsung_Kernel_Source_SM-G710K
drivers/staging/android/switch/switch_class.c
2300
4371
/* * switch_class.c * * Copyright (C) 2008 Google, Inc. * Author: Mike Lockwood <lockwood@android.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/err.h> #include "switch.h" struct class *switch_class; static atomic_t device_count; static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct switch_dev *sdev = (struct switch_dev *) dev_get_drvdata(dev); if (sdev->print_state) { int ret = sdev->print_state(sdev, buf); if (ret >= 0) return ret; } return sprintf(buf, "%d\n", sdev->state); } static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct switch_dev *sdev = (struct switch_dev *) dev_get_drvdata(dev); if (sdev->print_name) { int ret = sdev->print_name(sdev, buf); if (ret >= 0) return ret; } return sprintf(buf, "%s\n", sdev->name); } static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, state_show, NULL); static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, name_show, NULL); void switch_set_state(struct switch_dev *sdev, int state) { char name_buf[120]; char state_buf[120]; char *prop_buf; char *envp[3]; int env_offset = 0; int length; if (sdev->state != state) { sdev->state = state; prop_buf = (char *)get_zeroed_page(GFP_KERNEL); if (prop_buf) { length = name_show(sdev->dev, NULL, prop_buf); if (length > 0) { if (prop_buf[length - 1] == '\n') prop_buf[length - 1] = 0; snprintf(name_buf, sizeof(name_buf), "SWITCH_NAME=%s", prop_buf); envp[env_offset++] = name_buf; } length = state_show(sdev->dev, NULL, prop_buf); if (length > 0) { if (prop_buf[length - 1] == '\n') prop_buf[length - 1] = 0; snprintf(state_buf, sizeof(state_buf), "SWITCH_STATE=%s", prop_buf); envp[env_offset++] = state_buf; } envp[env_offset] = NULL; kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp); free_page((unsigned long)prop_buf); } else { printk(KERN_ERR "out of memory in switch_set_state\n"); kobject_uevent(&sdev->dev->kobj, KOBJ_CHANGE); } } } EXPORT_SYMBOL_GPL(switch_set_state); static int create_switch_class(void) { if (!switch_class) { switch_class = class_create(THIS_MODULE, "switch"); if (IS_ERR(switch_class)) return PTR_ERR(switch_class); atomic_set(&device_count, 0); } return 0; } int switch_dev_register(struct switch_dev *sdev) { int ret; if (!switch_class) { ret = create_switch_class(); if (ret < 0) return ret; } sdev->index = atomic_inc_return(&device_count); sdev->dev = device_create(switch_class, NULL, MKDEV(0, sdev->index), NULL, sdev->name); if (IS_ERR(sdev->dev)) return PTR_ERR(sdev->dev); ret = device_create_file(sdev->dev, &dev_attr_state); if (ret < 0) goto err_create_file_1; ret = device_create_file(sdev->dev, &dev_attr_name); if (ret < 0) goto err_create_file_2; dev_set_drvdata(sdev->dev, sdev); sdev->state = 0; return 0; err_create_file_2: device_remove_file(sdev->dev, &dev_attr_state); err_create_file_1: device_destroy(switch_class, MKDEV(0, sdev->index)); printk(KERN_ERR "switch: Failed to register driver %s\n", sdev->name); return ret; } EXPORT_SYMBOL_GPL(switch_dev_register); void switch_dev_unregister(struct switch_dev *sdev) { device_remove_file(sdev->dev, &dev_attr_name); device_remove_file(sdev->dev, &dev_attr_state); dev_set_drvdata(sdev->dev, NULL); device_destroy(switch_class, MKDEV(0, sdev->index)); } EXPORT_SYMBOL_GPL(switch_dev_unregister); static int __init switch_class_init(void) { return create_switch_class(); } static void __exit switch_class_exit(void) { class_destroy(switch_class); } module_init(switch_class_init); module_exit(switch_class_exit); MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); MODULE_DESCRIPTION("Switch class driver"); MODULE_LICENSE("GPL");
gpl-2.0
tbalden/android_kernel_htc_endeavoru_jb
drivers/net/usb/mcs7830.c
3068
18476
/* * MOSCHIP MCS7830 based (7730/7830/7832) USB 2.0 Ethernet Devices * * based on usbnet.c, asix.c and the vendor provided mcs7830 driver * * Copyright (C) 2010 Andreas Mohr <andi@lisas.de> * Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de> * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net> * Copyright (c) 2002-2003 TiVo Inc. * * Definitions gathered from MOSCHIP, Data Sheet_7830DA.pdf (thanks!). * * 2010-12-19: add 7832 USB PID ("functionality same as MCS7830"), * per active notification by manufacturer * * TODO: * - support HIF_REG_CONFIG_SLEEPMODE/HIF_REG_CONFIG_TXENABLE (via autopm?) * - implement ethtool_ops get_pauseparam/set_pauseparam * via HIF_REG_PAUSE_THRESHOLD (>= revision C only!) * - implement get_eeprom/[set_eeprom] * - switch PHY on/off on ifup/ifdown (perhaps in usbnet.c, via MII) * - mcs7830_get_regs() handling is weird: for rev 2 we return 32 regs, * can access only ~ 24, remaining user buffer is uninitialized garbage * - anything else? * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/crc32.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/init.h> #include <linux/mii.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/usbnet.h> /* requests */ #define MCS7830_RD_BMREQ (USB_DIR_IN | USB_TYPE_VENDOR | \ USB_RECIP_DEVICE) #define MCS7830_WR_BMREQ (USB_DIR_OUT | USB_TYPE_VENDOR | \ USB_RECIP_DEVICE) #define MCS7830_RD_BREQ 0x0E #define MCS7830_WR_BREQ 0x0D #define MCS7830_CTRL_TIMEOUT 1000 #define MCS7830_MAX_MCAST 64 #define MCS7830_VENDOR_ID 0x9710 #define MCS7832_PRODUCT_ID 0x7832 #define MCS7830_PRODUCT_ID 0x7830 #define MCS7730_PRODUCT_ID 0x7730 #define SITECOM_VENDOR_ID 0x0DF6 #define LN_030_PRODUCT_ID 0x0021 #define MCS7830_MII_ADVERTISE (ADVERTISE_PAUSE_CAP | ADVERTISE_100FULL | \ ADVERTISE_100HALF | ADVERTISE_10FULL | \ ADVERTISE_10HALF | ADVERTISE_CSMA) /* HIF_REG_XX corresponding index value */ enum { HIF_REG_MULTICAST_HASH = 0x00, HIF_REG_PACKET_GAP1 = 0x08, HIF_REG_PACKET_GAP2 = 0x09, HIF_REG_PHY_DATA = 0x0a, HIF_REG_PHY_CMD1 = 0x0c, HIF_REG_PHY_CMD1_READ = 0x40, HIF_REG_PHY_CMD1_WRITE = 0x20, HIF_REG_PHY_CMD1_PHYADDR = 0x01, HIF_REG_PHY_CMD2 = 0x0d, HIF_REG_PHY_CMD2_PEND_FLAG_BIT = 0x80, HIF_REG_PHY_CMD2_READY_FLAG_BIT = 0x40, HIF_REG_CONFIG = 0x0e, /* hmm, spec sez: "R/W", "Except bit 3" (likely TXENABLE). */ HIF_REG_CONFIG_CFG = 0x80, HIF_REG_CONFIG_SPEED100 = 0x40, HIF_REG_CONFIG_FULLDUPLEX_ENABLE = 0x20, HIF_REG_CONFIG_RXENABLE = 0x10, HIF_REG_CONFIG_TXENABLE = 0x08, HIF_REG_CONFIG_SLEEPMODE = 0x04, HIF_REG_CONFIG_ALLMULTICAST = 0x02, HIF_REG_CONFIG_PROMISCUOUS = 0x01, HIF_REG_ETHERNET_ADDR = 0x0f, HIF_REG_FRAME_DROP_COUNTER = 0x15, /* 0..ff; reset: 0 */ HIF_REG_PAUSE_THRESHOLD = 0x16, HIF_REG_PAUSE_THRESHOLD_DEFAULT = 0, }; /* Trailing status byte in Ethernet Rx frame */ enum { MCS7830_RX_SHORT_FRAME = 0x01, /* < 64 bytes */ MCS7830_RX_LENGTH_ERROR = 0x02, /* framelen != Ethernet length field */ MCS7830_RX_ALIGNMENT_ERROR = 0x04, /* non-even number of nibbles */ MCS7830_RX_CRC_ERROR = 0x08, MCS7830_RX_LARGE_FRAME = 0x10, /* > 1518 bytes */ MCS7830_RX_FRAME_CORRECT = 0x20, /* frame is correct */ /* [7:6] reserved */ }; struct mcs7830_data { u8 multi_filter[8]; u8 config; }; static const char driver_name[] = "MOSCHIP usb-ethernet driver"; static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data) { struct usb_device *xdev = dev->udev; int ret; void *buffer; buffer = kmalloc(size, GFP_NOIO); if (buffer == NULL) return -ENOMEM; ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ, MCS7830_RD_BMREQ, 0x0000, index, buffer, size, MCS7830_CTRL_TIMEOUT); memcpy(data, buffer, size); kfree(buffer); return ret; } static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data) { struct usb_device *xdev = dev->udev; int ret; void *buffer; buffer = kmemdup(data, size, GFP_NOIO); if (buffer == NULL) return -ENOMEM; ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ, MCS7830_WR_BMREQ, 0x0000, index, buffer, size, MCS7830_CTRL_TIMEOUT); kfree(buffer); return ret; } static void mcs7830_async_cmd_callback(struct urb *urb) { struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context; int status = urb->status; if (status < 0) printk(KERN_DEBUG "%s() failed with %d\n", __func__, status); kfree(req); usb_free_urb(urb); } static void mcs7830_set_reg_async(struct usbnet *dev, u16 index, u16 size, void *data) { struct usb_ctrlrequest *req; int ret; struct urb *urb; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { dev_dbg(&dev->udev->dev, "Error allocating URB in write_cmd_async!\n"); return; } req = kmalloc(sizeof *req, GFP_ATOMIC); if (!req) { dev_err(&dev->udev->dev, "Failed to allocate memory for control request\n"); goto out; } req->bRequestType = MCS7830_WR_BMREQ; req->bRequest = MCS7830_WR_BREQ; req->wValue = 0; req->wIndex = cpu_to_le16(index); req->wLength = cpu_to_le16(size); usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0), (void *)req, data, size, mcs7830_async_cmd_callback, req); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) { dev_err(&dev->udev->dev, "Error submitting the control message: ret=%d\n", ret); goto out; } return; out: kfree(req); usb_free_urb(urb); } static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr) { int ret = mcs7830_get_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr); if (ret < 0) return ret; return 0; } static int mcs7830_hif_set_mac_address(struct usbnet *dev, unsigned char *addr) { int ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr); if (ret < 0) return ret; return 0; } static int mcs7830_set_mac_address(struct net_device *netdev, void *p) { int ret; struct usbnet *dev = netdev_priv(netdev); struct sockaddr *addr = p; if (netif_running(netdev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; ret = mcs7830_hif_set_mac_address(dev, addr->sa_data); if (ret < 0) return ret; /* it worked --> adopt it on netdev side */ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); return 0; } static int mcs7830_read_phy(struct usbnet *dev, u8 index) { int ret; int i; __le16 val; u8 cmd[2] = { HIF_REG_PHY_CMD1_READ | HIF_REG_PHY_CMD1_PHYADDR, HIF_REG_PHY_CMD2_PEND_FLAG_BIT | index, }; mutex_lock(&dev->phy_mutex); /* write the MII command */ ret = mcs7830_set_reg(dev, HIF_REG_PHY_CMD1, 2, cmd); if (ret < 0) goto out; /* wait for the data to become valid, should be within < 1ms */ for (i = 0; i < 10; i++) { ret = mcs7830_get_reg(dev, HIF_REG_PHY_CMD1, 2, cmd); if ((ret < 0) || (cmd[1] & HIF_REG_PHY_CMD2_READY_FLAG_BIT)) break; ret = -EIO; msleep(1); } if (ret < 0) goto out; /* read actual register contents */ ret = mcs7830_get_reg(dev, HIF_REG_PHY_DATA, 2, &val); if (ret < 0) goto out; ret = le16_to_cpu(val); dev_dbg(&dev->udev->dev, "read PHY reg %02x: %04x (%d tries)\n", index, val, i); out: mutex_unlock(&dev->phy_mutex); return ret; } static int mcs7830_write_phy(struct usbnet *dev, u8 index, u16 val) { int ret; int i; __le16 le_val; u8 cmd[2] = { HIF_REG_PHY_CMD1_WRITE | HIF_REG_PHY_CMD1_PHYADDR, HIF_REG_PHY_CMD2_PEND_FLAG_BIT | (index & 0x1F), }; mutex_lock(&dev->phy_mutex); /* write the new register contents */ le_val = cpu_to_le16(val); ret = mcs7830_set_reg(dev, HIF_REG_PHY_DATA, 2, &le_val); if (ret < 0) goto out; /* write the MII command */ ret = mcs7830_set_reg(dev, HIF_REG_PHY_CMD1, 2, cmd); if (ret < 0) goto out; /* wait for the command to be accepted by the PHY */ for (i = 0; i < 10; i++) { ret = mcs7830_get_reg(dev, HIF_REG_PHY_CMD1, 2, cmd); if ((ret < 0) || (cmd[1] & HIF_REG_PHY_CMD2_READY_FLAG_BIT)) break; ret = -EIO; msleep(1); } if (ret < 0) goto out; ret = 0; dev_dbg(&dev->udev->dev, "write PHY reg %02x: %04x (%d tries)\n", index, val, i); out: mutex_unlock(&dev->phy_mutex); return ret; } /* * This algorithm comes from the original mcs7830 version 1.4 driver, * not sure if it is needed. */ static int mcs7830_set_autoneg(struct usbnet *dev, int ptrUserPhyMode) { int ret; /* Enable all media types */ ret = mcs7830_write_phy(dev, MII_ADVERTISE, MCS7830_MII_ADVERTISE); /* First reset BMCR */ if (!ret) ret = mcs7830_write_phy(dev, MII_BMCR, 0x0000); /* Enable Auto Neg */ if (!ret) ret = mcs7830_write_phy(dev, MII_BMCR, BMCR_ANENABLE); /* Restart Auto Neg (Keep the Enable Auto Neg Bit Set) */ if (!ret) ret = mcs7830_write_phy(dev, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART ); return ret; } /* * if we can read register 22, the chip revision is C or higher */ static int mcs7830_get_rev(struct usbnet *dev) { u8 dummy[2]; int ret; ret = mcs7830_get_reg(dev, HIF_REG_FRAME_DROP_COUNTER, 2, dummy); if (ret > 0) return 2; /* Rev C or later */ return 1; /* earlier revision */ } /* * On rev. C we need to set the pause threshold */ static void mcs7830_rev_C_fixup(struct usbnet *dev) { u8 pause_threshold = HIF_REG_PAUSE_THRESHOLD_DEFAULT; int retry; for (retry = 0; retry < 2; retry++) { if (mcs7830_get_rev(dev) == 2) { dev_info(&dev->udev->dev, "applying rev.C fixup\n"); mcs7830_set_reg(dev, HIF_REG_PAUSE_THRESHOLD, 1, &pause_threshold); } msleep(1); } } static int mcs7830_mdio_read(struct net_device *netdev, int phy_id, int location) { struct usbnet *dev = netdev_priv(netdev); return mcs7830_read_phy(dev, location); } static void mcs7830_mdio_write(struct net_device *netdev, int phy_id, int location, int val) { struct usbnet *dev = netdev_priv(netdev); mcs7830_write_phy(dev, location, val); } static int mcs7830_ioctl(struct net_device *net, struct ifreq *rq, int cmd) { struct usbnet *dev = netdev_priv(net); return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); } static inline struct mcs7830_data *mcs7830_get_data(struct usbnet *dev) { return (struct mcs7830_data *)&dev->data; } static void mcs7830_hif_update_multicast_hash(struct usbnet *dev) { struct mcs7830_data *data = mcs7830_get_data(dev); mcs7830_set_reg_async(dev, HIF_REG_MULTICAST_HASH, sizeof data->multi_filter, data->multi_filter); } static void mcs7830_hif_update_config(struct usbnet *dev) { /* implementation specific to data->config (argument needs to be heap-based anyway - USB DMA!) */ struct mcs7830_data *data = mcs7830_get_data(dev); mcs7830_set_reg_async(dev, HIF_REG_CONFIG, 1, &data->config); } static void mcs7830_data_set_multicast(struct net_device *net) { struct usbnet *dev = netdev_priv(net); struct mcs7830_data *data = mcs7830_get_data(dev); memset(data->multi_filter, 0, sizeof data->multi_filter); data->config = HIF_REG_CONFIG_TXENABLE; /* this should not be needed, but it doesn't work otherwise */ data->config |= HIF_REG_CONFIG_ALLMULTICAST; if (net->flags & IFF_PROMISC) { data->config |= HIF_REG_CONFIG_PROMISCUOUS; } else if (net->flags & IFF_ALLMULTI || netdev_mc_count(net) > MCS7830_MAX_MCAST) { data->config |= HIF_REG_CONFIG_ALLMULTICAST; } else if (netdev_mc_empty(net)) { /* just broadcast and directed */ } else { /* We use the 20 byte dev->data * for our 8 byte filter buffer * to avoid allocating memory that * is tricky to free later */ struct netdev_hw_addr *ha; u32 crc_bits; /* Build the multicast hash filter. */ netdev_for_each_mc_addr(ha, net) { crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26; data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7); } } } static int mcs7830_apply_base_config(struct usbnet *dev) { int ret; /* re-configure known MAC (suspend case etc.) */ ret = mcs7830_hif_set_mac_address(dev, dev->net->dev_addr); if (ret) { dev_info(&dev->udev->dev, "Cannot set MAC address\n"); goto out; } /* Set up PHY */ ret = mcs7830_set_autoneg(dev, 0); if (ret) { dev_info(&dev->udev->dev, "Cannot set autoneg\n"); goto out; } mcs7830_hif_update_multicast_hash(dev); mcs7830_hif_update_config(dev); mcs7830_rev_C_fixup(dev); ret = 0; out: return ret; } /* credits go to asix_set_multicast */ static void mcs7830_set_multicast(struct net_device *net) { struct usbnet *dev = netdev_priv(net); mcs7830_data_set_multicast(net); mcs7830_hif_update_multicast_hash(dev); mcs7830_hif_update_config(dev); } static int mcs7830_get_regs_len(struct net_device *net) { struct usbnet *dev = netdev_priv(net); switch (mcs7830_get_rev(dev)) { case 1: return 21; case 2: return 32; } return 0; } static void mcs7830_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *drvinfo) { usbnet_get_drvinfo(net, drvinfo); drvinfo->regdump_len = mcs7830_get_regs_len(net); } static void mcs7830_get_regs(struct net_device *net, struct ethtool_regs *regs, void *data) { struct usbnet *dev = netdev_priv(net); regs->version = mcs7830_get_rev(dev); mcs7830_get_reg(dev, 0, regs->len, data); } static const struct ethtool_ops mcs7830_ethtool_ops = { .get_drvinfo = mcs7830_get_drvinfo, .get_regs_len = mcs7830_get_regs_len, .get_regs = mcs7830_get_regs, /* common usbnet calls */ .get_link = usbnet_get_link, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_settings = usbnet_get_settings, .set_settings = usbnet_set_settings, .nway_reset = usbnet_nway_reset, }; static const struct net_device_ops mcs7830_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = mcs7830_ioctl, .ndo_set_multicast_list = mcs7830_set_multicast, .ndo_set_mac_address = mcs7830_set_mac_address, }; static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev) { struct net_device *net = dev->net; int ret; int retry; /* Initial startup: Gather MAC address setting from EEPROM */ ret = -EINVAL; for (retry = 0; retry < 5 && ret; retry++) ret = mcs7830_hif_get_mac_address(dev, net->dev_addr); if (ret) { dev_warn(&dev->udev->dev, "Cannot read MAC address\n"); goto out; } mcs7830_data_set_multicast(net); ret = mcs7830_apply_base_config(dev); if (ret) goto out; net->ethtool_ops = &mcs7830_ethtool_ops; net->netdev_ops = &mcs7830_netdev_ops; /* reserve space for the status byte on rx */ dev->rx_urb_size = ETH_FRAME_LEN + 1; dev->mii.mdio_read = mcs7830_mdio_read; dev->mii.mdio_write = mcs7830_mdio_write; dev->mii.dev = net; dev->mii.phy_id_mask = 0x3f; dev->mii.reg_num_mask = 0x1f; dev->mii.phy_id = *((u8 *) net->dev_addr + 1); ret = usbnet_get_endpoints(dev, udev); out: return ret; } /* The chip always appends a status byte that we need to strip */ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { u8 status; if (skb->len == 0) { dev_err(&dev->udev->dev, "unexpected empty rx frame\n"); return 0; } skb_trim(skb, skb->len - 1); status = skb->data[skb->len]; if (status != MCS7830_RX_FRAME_CORRECT) { dev_dbg(&dev->udev->dev, "rx fixup status %x\n", status); /* hmm, perhaps usbnet.c already sees a globally visible frame error and increments rx_errors on its own already? */ dev->net->stats.rx_errors++; if (status & (MCS7830_RX_SHORT_FRAME |MCS7830_RX_LENGTH_ERROR |MCS7830_RX_LARGE_FRAME)) dev->net->stats.rx_length_errors++; if (status & MCS7830_RX_ALIGNMENT_ERROR) dev->net->stats.rx_frame_errors++; if (status & MCS7830_RX_CRC_ERROR) dev->net->stats.rx_crc_errors++; } return skb->len > 0; } static const struct driver_info moschip_info = { .description = "MOSCHIP 7830/7832/7730 usb-NET adapter", .bind = mcs7830_bind, .rx_fixup = mcs7830_rx_fixup, .flags = FLAG_ETHER, .in = 1, .out = 2, }; static const struct driver_info sitecom_info = { .description = "Sitecom LN-30 usb-NET adapter", .bind = mcs7830_bind, .rx_fixup = mcs7830_rx_fixup, .flags = FLAG_ETHER, .in = 1, .out = 2, }; static const struct usb_device_id products[] = { { USB_DEVICE(MCS7830_VENDOR_ID, MCS7832_PRODUCT_ID), .driver_info = (unsigned long) &moschip_info, }, { USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID), .driver_info = (unsigned long) &moschip_info, }, { USB_DEVICE(MCS7830_VENDOR_ID, MCS7730_PRODUCT_ID), .driver_info = (unsigned long) &moschip_info, }, { USB_DEVICE(SITECOM_VENDOR_ID, LN_030_PRODUCT_ID), .driver_info = (unsigned long) &sitecom_info, }, {}, }; MODULE_DEVICE_TABLE(usb, products); static int mcs7830_reset_resume (struct usb_interface *intf) { /* YES, this function is successful enough that ethtool -d does show same output pre-/post-suspend */ struct usbnet *dev = usb_get_intfdata(intf); mcs7830_apply_base_config(dev); usbnet_resume(intf); return 0; } static struct usb_driver mcs7830_driver = { .name = driver_name, .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .reset_resume = mcs7830_reset_resume, }; static int __init mcs7830_init(void) { return usb_register(&mcs7830_driver); } module_init(mcs7830_init); static void __exit mcs7830_exit(void) { usb_deregister(&mcs7830_driver); } module_exit(mcs7830_exit); MODULE_DESCRIPTION("USB to network adapter MCS7830)"); MODULE_LICENSE("GPL");
gpl-2.0
AdiPat/android_kernel_htc_pico
drivers/infiniband/hw/mthca/mthca_provider.c
3580
36511
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_smi.h> #include <rdma/ib_umem.h> #include <rdma/ib_user_verbs.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/mm.h> #include "mthca_dev.h" #include "mthca_cmd.h" #include "mthca_user.h" #include "mthca_memfree.h" static void init_query_mad(struct ib_smp *mad) { mad->base_version = 1; mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; mad->class_version = 1; mad->method = IB_MGMT_METHOD_GET; } static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) { struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; struct mthca_dev *mdev = to_mdev(ibdev); u8 status; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; memset(props, 0, sizeof *props); props->fw_ver = mdev->fw_ver; init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mthca_MAD_IFC(mdev, 1, 1, 1, NULL, NULL, in_mad, out_mad, &status); if (err) goto out; if (status) { err = -EINVAL; goto out; } props->device_cap_flags = mdev->device_cap_flags; props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 0xffffff; props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); memcpy(&props->sys_image_guid, out_mad->data + 4, 8); props->max_mr_size = ~0ull; props->page_size_cap = mdev->limits.page_size_cap; props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps; props->max_qp_wr = mdev->limits.max_wqes; props->max_sge = mdev->limits.max_sg; props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs; props->max_cqe = mdev->limits.max_cqes; props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws; props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds; props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift; props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma; props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs; props->max_srq_wr = mdev->limits.max_srq_wqes; props->max_srq_sge = mdev->limits.max_srq_sge; props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay; props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ? IB_ATOMIC_HCA : IB_ATOMIC_NONE; props->max_pkeys = mdev->limits.pkey_table_len; props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms; props->max_mcast_qp_attach = MTHCA_QP_PER_MGM; props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; /* * If Sinai memory key optimization is being used, then only * the 8-bit key portion will change. For other HCAs, the * unused index bits will also be used for FMR remapping. */ if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT) props->max_map_per_fmr = 255; else props->max_map_per_fmr = (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1; err = 0; out: kfree(in_mad); kfree(out_mad); return err; } static int mthca_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; u8 status; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; memset(props, 0, sizeof *props); init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad, &status); if (err) goto out; if (status) { err = -EINVAL; goto out; } props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); props->lmc = out_mad->data[34] & 0x7; props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); props->sm_sl = out_mad->data[36] & 0xf; props->state = out_mad->data[32] & 0xf; props->phys_state = out_mad->data[33] >> 4; props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; props->max_msg_sz = 0x80000000; props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); props->active_width = out_mad->data[31] & 0xf; props->active_speed = out_mad->data[35] >> 4; props->max_mtu = out_mad->data[41] & 0xf; props->active_mtu = out_mad->data[36] >> 4; props->subnet_timeout = out_mad->data[51] & 0x1f; props->max_vl_num = out_mad->data[37] >> 4; props->init_type_reply = out_mad->data[41] >> 4; out: kfree(in_mad); kfree(out_mad); return err; } static int mthca_modify_device(struct ib_device *ibdev, int mask, struct ib_device_modify *props) { if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) return -EOPNOTSUPP; if (mask & IB_DEVICE_MODIFY_NODE_DESC) { if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) return -ERESTARTSYS; memcpy(ibdev->node_desc, props->node_desc, 64); mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); } return 0; } static int mthca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask, struct ib_port_modify *props) { struct mthca_set_ib_param set_ib; struct ib_port_attr attr; int err; u8 status; if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) return -ERESTARTSYS; err = mthca_query_port(ibdev, port, &attr); if (err) goto out; set_ib.set_si_guid = 0; set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR); set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & ~props->clr_port_cap_mask; err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status); if (err) goto out; if (status) { err = -EINVAL; goto out; } out: mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); return err; } static int mthca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) { struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; u8 status; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; in_mad->attr_mod = cpu_to_be32(index / 32); err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad, &status); if (err) goto out; if (status) { err = -EINVAL; goto out; } *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); out: kfree(in_mad); kfree(out_mad); return err; } static int mthca_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; u8 status; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad, &status); if (err) goto out; if (status) { err = -EINVAL; goto out; } memcpy(gid->raw, out_mad->data + 8, 8); init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; in_mad->attr_mod = cpu_to_be32(index / 8); err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad, &status); if (err) goto out; if (status) { err = -EINVAL; goto out; } memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); out: kfree(in_mad); kfree(out_mad); return err; } static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) { struct mthca_alloc_ucontext_resp uresp; struct mthca_ucontext *context; int err; if (!(to_mdev(ibdev)->active)) return ERR_PTR(-EAGAIN); memset(&uresp, 0, sizeof uresp); uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; if (mthca_is_memfree(to_mdev(ibdev))) uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size; else uresp.uarc_size = 0; context = kmalloc(sizeof *context, GFP_KERNEL); if (!context) return ERR_PTR(-ENOMEM); err = mthca_uar_alloc(to_mdev(ibdev), &context->uar); if (err) { kfree(context); return ERR_PTR(err); } context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev)); if (IS_ERR(context->db_tab)) { err = PTR_ERR(context->db_tab); mthca_uar_free(to_mdev(ibdev), &context->uar); kfree(context); return ERR_PTR(err); } if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) { mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab); mthca_uar_free(to_mdev(ibdev), &context->uar); kfree(context); return ERR_PTR(-EFAULT); } context->reg_mr_warned = 0; return &context->ibucontext; } static int mthca_dealloc_ucontext(struct ib_ucontext *context) { mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar, to_mucontext(context)->db_tab); mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar); kfree(to_mucontext(context)); return 0; } static int mthca_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma) { if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (io_remap_pfn_range(vma, vma->vm_start, to_mucontext(context)->uar.pfn, PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; return 0; } static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) { struct mthca_pd *pd; int err; pd = kmalloc(sizeof *pd, GFP_KERNEL); if (!pd) return ERR_PTR(-ENOMEM); err = mthca_pd_alloc(to_mdev(ibdev), !context, pd); if (err) { kfree(pd); return ERR_PTR(err); } if (context) { if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) { mthca_pd_free(to_mdev(ibdev), pd); kfree(pd); return ERR_PTR(-EFAULT); } } return &pd->ibpd; } static int mthca_dealloc_pd(struct ib_pd *pd) { mthca_pd_free(to_mdev(pd->device), to_mpd(pd)); kfree(pd); return 0; } static struct ib_ah *mthca_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr) { int err; struct mthca_ah *ah; ah = kmalloc(sizeof *ah, GFP_ATOMIC); if (!ah) return ERR_PTR(-ENOMEM); err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah); if (err) { kfree(ah); return ERR_PTR(err); } return &ah->ibah; } static int mthca_ah_destroy(struct ib_ah *ah) { mthca_destroy_ah(to_mdev(ah->device), to_mah(ah)); kfree(ah); return 0; } static struct ib_srq *mthca_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *init_attr, struct ib_udata *udata) { struct mthca_create_srq ucmd; struct mthca_ucontext *context = NULL; struct mthca_srq *srq; int err; srq = kmalloc(sizeof *srq, GFP_KERNEL); if (!srq) return ERR_PTR(-ENOMEM); if (pd->uobject) { context = to_mucontext(pd->uobject->context); if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { err = -EFAULT; goto err_free; } err = mthca_map_user_db(to_mdev(pd->device), &context->uar, context->db_tab, ucmd.db_index, ucmd.db_page); if (err) goto err_free; srq->mr.ibmr.lkey = ucmd.lkey; srq->db_index = ucmd.db_index; } err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd), &init_attr->attr, srq); if (err && pd->uobject) mthca_unmap_user_db(to_mdev(pd->device), &context->uar, context->db_tab, ucmd.db_index); if (err) goto err_free; if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) { mthca_free_srq(to_mdev(pd->device), srq); err = -EFAULT; goto err_free; } return &srq->ibsrq; err_free: kfree(srq); return ERR_PTR(err); } static int mthca_destroy_srq(struct ib_srq *srq) { struct mthca_ucontext *context; if (srq->uobject) { context = to_mucontext(srq->uobject->context); mthca_unmap_user_db(to_mdev(srq->device), &context->uar, context->db_tab, to_msrq(srq)->db_index); } mthca_free_srq(to_mdev(srq->device), to_msrq(srq)); kfree(srq); return 0; } static struct ib_qp *mthca_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { struct mthca_create_qp ucmd; struct mthca_qp *qp; int err; if (init_attr->create_flags) return ERR_PTR(-EINVAL); switch (init_attr->qp_type) { case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_UD: { struct mthca_ucontext *context; qp = kmalloc(sizeof *qp, GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); if (pd->uobject) { context = to_mucontext(pd->uobject->context); if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { kfree(qp); return ERR_PTR(-EFAULT); } err = mthca_map_user_db(to_mdev(pd->device), &context->uar, context->db_tab, ucmd.sq_db_index, ucmd.sq_db_page); if (err) { kfree(qp); return ERR_PTR(err); } err = mthca_map_user_db(to_mdev(pd->device), &context->uar, context->db_tab, ucmd.rq_db_index, ucmd.rq_db_page); if (err) { mthca_unmap_user_db(to_mdev(pd->device), &context->uar, context->db_tab, ucmd.sq_db_index); kfree(qp); return ERR_PTR(err); } qp->mr.ibmr.lkey = ucmd.lkey; qp->sq.db_index = ucmd.sq_db_index; qp->rq.db_index = ucmd.rq_db_index; } err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd), to_mcq(init_attr->send_cq), to_mcq(init_attr->recv_cq), init_attr->qp_type, init_attr->sq_sig_type, &init_attr->cap, qp); if (err && pd->uobject) { context = to_mucontext(pd->uobject->context); mthca_unmap_user_db(to_mdev(pd->device), &context->uar, context->db_tab, ucmd.sq_db_index); mthca_unmap_user_db(to_mdev(pd->device), &context->uar, context->db_tab, ucmd.rq_db_index); } qp->ibqp.qp_num = qp->qpn; break; } case IB_QPT_SMI: case IB_QPT_GSI: { /* Don't allow userspace to create special QPs */ if (pd->uobject) return ERR_PTR(-EINVAL); qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd), to_mcq(init_attr->send_cq), to_mcq(init_attr->recv_cq), init_attr->sq_sig_type, &init_attr->cap, qp->ibqp.qp_num, init_attr->port_num, to_msqp(qp)); break; } default: /* Don't support raw QPs */ return ERR_PTR(-ENOSYS); } if (err) { kfree(qp); return ERR_PTR(err); } init_attr->cap.max_send_wr = qp->sq.max; init_attr->cap.max_recv_wr = qp->rq.max; init_attr->cap.max_send_sge = qp->sq.max_gs; init_attr->cap.max_recv_sge = qp->rq.max_gs; init_attr->cap.max_inline_data = qp->max_inline_data; return &qp->ibqp; } static int mthca_destroy_qp(struct ib_qp *qp) { if (qp->uobject) { mthca_unmap_user_db(to_mdev(qp->device), &to_mucontext(qp->uobject->context)->uar, to_mucontext(qp->uobject->context)->db_tab, to_mqp(qp)->sq.db_index); mthca_unmap_user_db(to_mdev(qp->device), &to_mucontext(qp->uobject->context)->uar, to_mucontext(qp->uobject->context)->db_tab, to_mqp(qp)->rq.db_index); } mthca_free_qp(to_mdev(qp->device), to_mqp(qp)); kfree(qp); return 0; } static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, int comp_vector, struct ib_ucontext *context, struct ib_udata *udata) { struct mthca_create_cq ucmd; struct mthca_cq *cq; int nent; int err; if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes) return ERR_PTR(-EINVAL); if (context) { if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) return ERR_PTR(-EFAULT); err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, to_mucontext(context)->db_tab, ucmd.set_db_index, ucmd.set_db_page); if (err) return ERR_PTR(err); err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, to_mucontext(context)->db_tab, ucmd.arm_db_index, ucmd.arm_db_page); if (err) goto err_unmap_set; } cq = kmalloc(sizeof *cq, GFP_KERNEL); if (!cq) { err = -ENOMEM; goto err_unmap_arm; } if (context) { cq->buf.mr.ibmr.lkey = ucmd.lkey; cq->set_ci_db_index = ucmd.set_db_index; cq->arm_db_index = ucmd.arm_db_index; } for (nent = 1; nent <= entries; nent <<= 1) ; /* nothing */ err = mthca_init_cq(to_mdev(ibdev), nent, context ? to_mucontext(context) : NULL, context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num, cq); if (err) goto err_free; if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) { mthca_free_cq(to_mdev(ibdev), cq); goto err_free; } cq->resize_buf = NULL; return &cq->ibcq; err_free: kfree(cq); err_unmap_arm: if (context) mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, to_mucontext(context)->db_tab, ucmd.arm_db_index); err_unmap_set: if (context) mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, to_mucontext(context)->db_tab, ucmd.set_db_index); return ERR_PTR(err); } static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq, int entries) { int ret; spin_lock_irq(&cq->lock); if (cq->resize_buf) { ret = -EBUSY; goto unlock; } cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); if (!cq->resize_buf) { ret = -ENOMEM; goto unlock; } cq->resize_buf->state = CQ_RESIZE_ALLOC; ret = 0; unlock: spin_unlock_irq(&cq->lock); if (ret) return ret; ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); if (ret) { spin_lock_irq(&cq->lock); kfree(cq->resize_buf); cq->resize_buf = NULL; spin_unlock_irq(&cq->lock); return ret; } cq->resize_buf->cqe = entries - 1; spin_lock_irq(&cq->lock); cq->resize_buf->state = CQ_RESIZE_READY; spin_unlock_irq(&cq->lock); return 0; } static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) { struct mthca_dev *dev = to_mdev(ibcq->device); struct mthca_cq *cq = to_mcq(ibcq); struct mthca_resize_cq ucmd; u32 lkey; u8 status; int ret; if (entries < 1 || entries > dev->limits.max_cqes) return -EINVAL; mutex_lock(&cq->mutex); entries = roundup_pow_of_two(entries + 1); if (entries == ibcq->cqe + 1) { ret = 0; goto out; } if (cq->is_kernel) { ret = mthca_alloc_resize_buf(dev, cq, entries); if (ret) goto out; lkey = cq->resize_buf->buf.mr.ibmr.lkey; } else { if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { ret = -EFAULT; goto out; } lkey = ucmd.lkey; } ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries), &status); if (status) ret = -EINVAL; if (ret) { if (cq->resize_buf) { mthca_free_cq_buf(dev, &cq->resize_buf->buf, cq->resize_buf->cqe); kfree(cq->resize_buf); spin_lock_irq(&cq->lock); cq->resize_buf = NULL; spin_unlock_irq(&cq->lock); } goto out; } if (cq->is_kernel) { struct mthca_cq_buf tbuf; int tcqe; spin_lock_irq(&cq->lock); if (cq->resize_buf->state == CQ_RESIZE_READY) { mthca_cq_resize_copy_cqes(cq); tbuf = cq->buf; tcqe = cq->ibcq.cqe; cq->buf = cq->resize_buf->buf; cq->ibcq.cqe = cq->resize_buf->cqe; } else { tbuf = cq->resize_buf->buf; tcqe = cq->resize_buf->cqe; } kfree(cq->resize_buf); cq->resize_buf = NULL; spin_unlock_irq(&cq->lock); mthca_free_cq_buf(dev, &tbuf, tcqe); } else ibcq->cqe = entries - 1; out: mutex_unlock(&cq->mutex); return ret; } static int mthca_destroy_cq(struct ib_cq *cq) { if (cq->uobject) { mthca_unmap_user_db(to_mdev(cq->device), &to_mucontext(cq->uobject->context)->uar, to_mucontext(cq->uobject->context)->db_tab, to_mcq(cq)->arm_db_index); mthca_unmap_user_db(to_mdev(cq->device), &to_mucontext(cq->uobject->context)->uar, to_mucontext(cq->uobject->context)->db_tab, to_mcq(cq)->set_ci_db_index); } mthca_free_cq(to_mdev(cq->device), to_mcq(cq)); kfree(cq); return 0; } static inline u32 convert_access(int acc) { return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) | (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) | (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) | (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) | MTHCA_MPT_FLAG_LOCAL_READ; } static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc) { struct mthca_mr *mr; int err; mr = kmalloc(sizeof *mr, GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); err = mthca_mr_alloc_notrans(to_mdev(pd->device), to_mpd(pd)->pd_num, convert_access(acc), mr); if (err) { kfree(mr); return ERR_PTR(err); } mr->umem = NULL; return &mr->ibmr; } static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd, struct ib_phys_buf *buffer_list, int num_phys_buf, int acc, u64 *iova_start) { struct mthca_mr *mr; u64 *page_list; u64 total_size; unsigned long mask; int shift; int npages; int err; int i, j, n; mask = buffer_list[0].addr ^ *iova_start; total_size = 0; for (i = 0; i < num_phys_buf; ++i) { if (i != 0) mask |= buffer_list[i].addr; if (i != num_phys_buf - 1) mask |= buffer_list[i].addr + buffer_list[i].size; total_size += buffer_list[i].size; } if (mask & ~PAGE_MASK) return ERR_PTR(-EINVAL); shift = __ffs(mask | 1 << 31); buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1); buffer_list[0].addr &= ~0ull << shift; mr = kmalloc(sizeof *mr, GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); npages = 0; for (i = 0; i < num_phys_buf; ++i) npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift; if (!npages) return &mr->ibmr; page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL); if (!page_list) { kfree(mr); return ERR_PTR(-ENOMEM); } n = 0; for (i = 0; i < num_phys_buf; ++i) for (j = 0; j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift; ++j) page_list[n++] = buffer_list[i].addr + ((u64) j << shift); mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) " "in PD %x; shift %d, npages %d.\n", (unsigned long long) buffer_list[0].addr, (unsigned long long) *iova_start, to_mpd(pd)->pd_num, shift, npages); err = mthca_mr_alloc_phys(to_mdev(pd->device), to_mpd(pd)->pd_num, page_list, shift, npages, *iova_start, total_size, convert_access(acc), mr); if (err) { kfree(page_list); kfree(mr); return ERR_PTR(err); } kfree(page_list); mr->umem = NULL; return &mr->ibmr; } static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, int acc, struct ib_udata *udata) { struct mthca_dev *dev = to_mdev(pd->device); struct ib_umem_chunk *chunk; struct mthca_mr *mr; struct mthca_reg_mr ucmd; u64 *pages; int shift, n, len; int i, j, k; int err = 0; int write_mtt_size; if (udata->inlen - sizeof (struct ib_uverbs_cmd_hdr) < sizeof ucmd) { if (!to_mucontext(pd->uobject->context)->reg_mr_warned) { mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n", current->comm); mthca_warn(dev, " Update libmthca to fix this.\n"); } ++to_mucontext(pd->uobject->context)->reg_mr_warned; ucmd.mr_attrs = 0; } else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) return ERR_PTR(-EFAULT); mr = kmalloc(sizeof *mr, GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, ucmd.mr_attrs & MTHCA_MR_DMASYNC); if (IS_ERR(mr->umem)) { err = PTR_ERR(mr->umem); goto err; } shift = ffs(mr->umem->page_size) - 1; n = 0; list_for_each_entry(chunk, &mr->umem->chunk_list, list) n += chunk->nents; mr->mtt = mthca_alloc_mtt(dev, n); if (IS_ERR(mr->mtt)) { err = PTR_ERR(mr->mtt); goto err_umem; } pages = (u64 *) __get_free_page(GFP_KERNEL); if (!pages) { err = -ENOMEM; goto err_mtt; } i = n = 0; write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages)); list_for_each_entry(chunk, &mr->umem->chunk_list, list) for (j = 0; j < chunk->nmap; ++j) { len = sg_dma_len(&chunk->page_list[j]) >> shift; for (k = 0; k < len; ++k) { pages[i++] = sg_dma_address(&chunk->page_list[j]) + mr->umem->page_size * k; /* * Be friendly to write_mtt and pass it chunks * of appropriate size. */ if (i == write_mtt_size) { err = mthca_write_mtt(dev, mr->mtt, n, pages, i); if (err) goto mtt_done; n += i; i = 0; } } } if (i) err = mthca_write_mtt(dev, mr->mtt, n, pages, i); mtt_done: free_page((unsigned long) pages); if (err) goto err_mtt; err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length, convert_access(acc), mr); if (err) goto err_mtt; return &mr->ibmr; err_mtt: mthca_free_mtt(dev, mr->mtt); err_umem: ib_umem_release(mr->umem); err: kfree(mr); return ERR_PTR(err); } static int mthca_dereg_mr(struct ib_mr *mr) { struct mthca_mr *mmr = to_mmr(mr); mthca_free_mr(to_mdev(mr->device), mmr); if (mmr->umem) ib_umem_release(mmr->umem); kfree(mmr); return 0; } static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags, struct ib_fmr_attr *fmr_attr) { struct mthca_fmr *fmr; int err; fmr = kmalloc(sizeof *fmr, GFP_KERNEL); if (!fmr) return ERR_PTR(-ENOMEM); memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr); err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num, convert_access(mr_access_flags), fmr); if (err) { kfree(fmr); return ERR_PTR(err); } return &fmr->ibmr; } static int mthca_dealloc_fmr(struct ib_fmr *fmr) { struct mthca_fmr *mfmr = to_mfmr(fmr); int err; err = mthca_free_fmr(to_mdev(fmr->device), mfmr); if (err) return err; kfree(mfmr); return 0; } static int mthca_unmap_fmr(struct list_head *fmr_list) { struct ib_fmr *fmr; int err; u8 status; struct mthca_dev *mdev = NULL; list_for_each_entry(fmr, fmr_list, list) { if (mdev && to_mdev(fmr->device) != mdev) return -EINVAL; mdev = to_mdev(fmr->device); } if (!mdev) return 0; if (mthca_is_memfree(mdev)) { list_for_each_entry(fmr, fmr_list, list) mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr)); wmb(); } else list_for_each_entry(fmr, fmr_list, list) mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr)); err = mthca_SYNC_TPT(mdev, &status); if (err) return err; if (status) return -EINVAL; return 0; } static ssize_t show_rev(struct device *device, struct device_attribute *attr, char *buf) { struct mthca_dev *dev = container_of(device, struct mthca_dev, ib_dev.dev); return sprintf(buf, "%x\n", dev->rev_id); } static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, char *buf) { struct mthca_dev *dev = container_of(device, struct mthca_dev, ib_dev.dev); return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32), (int) (dev->fw_ver >> 16) & 0xffff, (int) dev->fw_ver & 0xffff); } static ssize_t show_hca(struct device *device, struct device_attribute *attr, char *buf) { struct mthca_dev *dev = container_of(device, struct mthca_dev, ib_dev.dev); switch (dev->pdev->device) { case PCI_DEVICE_ID_MELLANOX_TAVOR: return sprintf(buf, "MT23108\n"); case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT: return sprintf(buf, "MT25208 (MT23108 compat mode)\n"); case PCI_DEVICE_ID_MELLANOX_ARBEL: return sprintf(buf, "MT25208\n"); case PCI_DEVICE_ID_MELLANOX_SINAI: case PCI_DEVICE_ID_MELLANOX_SINAI_OLD: return sprintf(buf, "MT25204\n"); default: return sprintf(buf, "unknown\n"); } } static ssize_t show_board(struct device *device, struct device_attribute *attr, char *buf) { struct mthca_dev *dev = container_of(device, struct mthca_dev, ib_dev.dev); return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id); } static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); static struct device_attribute *mthca_dev_attributes[] = { &dev_attr_hw_rev, &dev_attr_fw_ver, &dev_attr_hca_type, &dev_attr_board_id }; static int mthca_init_node_data(struct mthca_dev *dev) { struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; u8 status; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; err = mthca_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad, &status); if (err) goto out; if (status) { err = -EINVAL; goto out; } memcpy(dev->ib_dev.node_desc, out_mad->data, 64); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mthca_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad, &status); if (err) goto out; if (status) { err = -EINVAL; goto out; } if (mthca_is_memfree(dev)) dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32)); memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); out: kfree(in_mad); kfree(out_mad); return err; } int mthca_register_device(struct mthca_dev *dev) { int ret; int i; ret = mthca_init_node_data(dev); if (ret) return ret; strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX); dev->ib_dev.owner = THIS_MODULE; dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION; dev->ib_dev.uverbs_cmd_mask = (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | (1ull << IB_USER_VERBS_CMD_REG_MR) | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | (1ull << IB_USER_VERBS_CMD_QUERY_QP) | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST); dev->ib_dev.node_type = RDMA_NODE_IB_CA; dev->ib_dev.phys_port_cnt = dev->limits.num_ports; dev->ib_dev.num_comp_vectors = 1; dev->ib_dev.dma_device = &dev->pdev->dev; dev->ib_dev.query_device = mthca_query_device; dev->ib_dev.query_port = mthca_query_port; dev->ib_dev.modify_device = mthca_modify_device; dev->ib_dev.modify_port = mthca_modify_port; dev->ib_dev.query_pkey = mthca_query_pkey; dev->ib_dev.query_gid = mthca_query_gid; dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext; dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext; dev->ib_dev.mmap = mthca_mmap_uar; dev->ib_dev.alloc_pd = mthca_alloc_pd; dev->ib_dev.dealloc_pd = mthca_dealloc_pd; dev->ib_dev.create_ah = mthca_ah_create; dev->ib_dev.query_ah = mthca_ah_query; dev->ib_dev.destroy_ah = mthca_ah_destroy; if (dev->mthca_flags & MTHCA_FLAG_SRQ) { dev->ib_dev.create_srq = mthca_create_srq; dev->ib_dev.modify_srq = mthca_modify_srq; dev->ib_dev.query_srq = mthca_query_srq; dev->ib_dev.destroy_srq = mthca_destroy_srq; dev->ib_dev.uverbs_cmd_mask |= (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); if (mthca_is_memfree(dev)) dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv; else dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv; } dev->ib_dev.create_qp = mthca_create_qp; dev->ib_dev.modify_qp = mthca_modify_qp; dev->ib_dev.query_qp = mthca_query_qp; dev->ib_dev.destroy_qp = mthca_destroy_qp; dev->ib_dev.create_cq = mthca_create_cq; dev->ib_dev.resize_cq = mthca_resize_cq; dev->ib_dev.destroy_cq = mthca_destroy_cq; dev->ib_dev.poll_cq = mthca_poll_cq; dev->ib_dev.get_dma_mr = mthca_get_dma_mr; dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr; dev->ib_dev.reg_user_mr = mthca_reg_user_mr; dev->ib_dev.dereg_mr = mthca_dereg_mr; if (dev->mthca_flags & MTHCA_FLAG_FMR) { dev->ib_dev.alloc_fmr = mthca_alloc_fmr; dev->ib_dev.unmap_fmr = mthca_unmap_fmr; dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr; if (mthca_is_memfree(dev)) dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr; else dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr; } dev->ib_dev.attach_mcast = mthca_multicast_attach; dev->ib_dev.detach_mcast = mthca_multicast_detach; dev->ib_dev.process_mad = mthca_process_mad; if (mthca_is_memfree(dev)) { dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq; dev->ib_dev.post_send = mthca_arbel_post_send; dev->ib_dev.post_recv = mthca_arbel_post_receive; } else { dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq; dev->ib_dev.post_send = mthca_tavor_post_send; dev->ib_dev.post_recv = mthca_tavor_post_receive; } mutex_init(&dev->cap_mask_mutex); ret = ib_register_device(&dev->ib_dev, NULL); if (ret) return ret; for (i = 0; i < ARRAY_SIZE(mthca_dev_attributes); ++i) { ret = device_create_file(&dev->ib_dev.dev, mthca_dev_attributes[i]); if (ret) { ib_unregister_device(&dev->ib_dev); return ret; } } mthca_start_catas_poll(dev); return 0; } void mthca_unregister_device(struct mthca_dev *dev) { mthca_stop_catas_poll(dev); ib_unregister_device(&dev->ib_dev); }
gpl-2.0
andrea9a/oslab
net/9p/util.c
4604
3266
/* * net/9p/util.c * * This file contains some helper functions * * Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net> * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to: * Free Software Foundation * 51 Franklin Street, Fifth Floor * Boston, MA 02111-1301 USA * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/parser.h> #include <linux/idr.h> #include <linux/slab.h> #include <net/9p/9p.h> /** * struct p9_idpool - per-connection accounting for tag idpool * @lock: protects the pool * @pool: idr to allocate tag id from * */ struct p9_idpool { spinlock_t lock; struct idr pool; }; /** * p9_idpool_create - create a new per-connection id pool * */ struct p9_idpool *p9_idpool_create(void) { struct p9_idpool *p; p = kmalloc(sizeof(struct p9_idpool), GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); spin_lock_init(&p->lock); idr_init(&p->pool); return p; } EXPORT_SYMBOL(p9_idpool_create); /** * p9_idpool_destroy - create a new per-connection id pool * @p: idpool to destroy */ void p9_idpool_destroy(struct p9_idpool *p) { idr_destroy(&p->pool); kfree(p); } EXPORT_SYMBOL(p9_idpool_destroy); /** * p9_idpool_get - allocate numeric id from pool * @p: pool to allocate from * * Bugs: This seems to be an awful generic function, should it be in idr.c with * the lock included in struct idr? */ int p9_idpool_get(struct p9_idpool *p) { int i; unsigned long flags; idr_preload(GFP_NOFS); spin_lock_irqsave(&p->lock, flags); /* no need to store exactly p, we just need something non-null */ i = idr_alloc(&p->pool, p, 0, 0, GFP_NOWAIT); spin_unlock_irqrestore(&p->lock, flags); idr_preload_end(); if (i < 0) return -1; p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", i, p); return i; } EXPORT_SYMBOL(p9_idpool_get); /** * p9_idpool_put - release numeric id from pool * @id: numeric id which is being released * @p: pool to release id into * * Bugs: This seems to be an awful generic function, should it be in idr.c with * the lock included in struct idr? */ void p9_idpool_put(int id, struct p9_idpool *p) { unsigned long flags; p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", id, p); spin_lock_irqsave(&p->lock, flags); idr_remove(&p->pool, id); spin_unlock_irqrestore(&p->lock, flags); } EXPORT_SYMBOL(p9_idpool_put); /** * p9_idpool_check - check if the specified id is available * @id: id to check * @p: pool to check */ int p9_idpool_check(int id, struct p9_idpool *p) { return idr_find(&p->pool, id) != NULL; } EXPORT_SYMBOL(p9_idpool_check);
gpl-2.0
sev3n85/android_kernel_samsung_s3ve3g
drivers/net/ethernet/neterion/vxge/vxge-main.c
4860
131486
/****************************************************************************** * This software may be used and distributed according to the terms of * the GNU General Public License (GPL), incorporated herein by reference. * Drivers based on or derived from this code fall under the GPL and must * retain the authorship, copyright and license notice. This file is not * a complete program and may only be used when the entire operating * system is licensed under the GPL. * See the file COPYING in this distribution for more information. * * vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O * Virtualized Server Adapter. * Copyright(c) 2002-2010 Exar Corp. * * The module loadable parameters that are supported by the driver and a brief * explanation of all the variables: * vlan_tag_strip: * Strip VLAN Tag enable/disable. Instructs the device to remove * the VLAN tag from all received tagged frames that are not * replicated at the internal L2 switch. * 0 - Do not strip the VLAN tag. * 1 - Strip the VLAN tag. * * addr_learn_en: * Enable learning the mac address of the guest OS interface in * a virtualization environment. * 0 - DISABLE * 1 - ENABLE * * max_config_port: * Maximum number of port to be supported. * MIN -1 and MAX - 2 * * max_config_vpath: * This configures the maximum no of VPATH configures for each * device function. * MIN - 1 and MAX - 17 * * max_config_dev: * This configures maximum no of Device function to be enabled. * MIN - 1 and MAX - 17 * ******************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/bitops.h> #include <linux/if_vlan.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/tcp.h> #include <net/ip.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/firmware.h> #include <linux/net_tstamp.h> #include <linux/prefetch.h> #include <linux/module.h> #include "vxge-main.h" #include "vxge-reg.h" MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O" "Virtualized Server Adapter"); static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = { {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID, PCI_ANY_ID}, {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID, PCI_ANY_ID}, {0} }; MODULE_DEVICE_TABLE(pci, vxge_id_table); VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE); VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT); VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT); VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT); VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT); VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV); static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] = {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31}; static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] = {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF}; module_param_array(bw_percentage, uint, NULL, 0); static struct vxge_drv_config *driver_config; static inline int is_vxge_card_up(struct vxgedev *vdev) { return test_bit(__VXGE_STATE_CARD_UP, &vdev->state); } static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) { struct sk_buff **skb_ptr = NULL; struct sk_buff **temp; #define NR_SKB_COMPLETED 128 struct sk_buff *completed[NR_SKB_COMPLETED]; int more; do { more = 0; skb_ptr = completed; if (__netif_tx_trylock(fifo->txq)) { vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr, NR_SKB_COMPLETED, &more); __netif_tx_unlock(fifo->txq); } /* free SKBs */ for (temp = completed; temp != skb_ptr; temp++) dev_kfree_skb_irq(*temp); } while (more); } static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev) { int i; /* Complete all transmits */ for (i = 0; i < vdev->no_of_vpath; i++) VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo); } static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev) { int i; struct vxge_ring *ring; /* Complete all receives*/ for (i = 0; i < vdev->no_of_vpath; i++) { ring = &vdev->vpaths[i].ring; vxge_hw_vpath_poll_rx(ring->handle); } } /* * vxge_callback_link_up * * This function is called during interrupt context to notify link up state * change. */ static void vxge_callback_link_up(struct __vxge_hw_device *hldev) { struct net_device *dev = hldev->ndev; struct vxgedev *vdev = netdev_priv(dev); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); netdev_notice(vdev->ndev, "Link Up\n"); vdev->stats.link_up++; netif_carrier_on(vdev->ndev); netif_tx_wake_all_queues(vdev->ndev); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); } /* * vxge_callback_link_down * * This function is called during interrupt context to notify link down state * change. */ static void vxge_callback_link_down(struct __vxge_hw_device *hldev) { struct net_device *dev = hldev->ndev; struct vxgedev *vdev = netdev_priv(dev); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); netdev_notice(vdev->ndev, "Link Down\n"); vdev->stats.link_down++; netif_carrier_off(vdev->ndev); netif_tx_stop_all_queues(vdev->ndev); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); } /* * vxge_rx_alloc * * Allocate SKB. */ static struct sk_buff * vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) { struct net_device *dev; struct sk_buff *skb; struct vxge_rx_priv *rx_priv; dev = ring->ndev; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", ring->ndev->name, __func__, __LINE__); rx_priv = vxge_hw_ring_rxd_private_get(dtrh); /* try to allocate skb first. this one may fail */ skb = netdev_alloc_skb(dev, skb_size + VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); if (skb == NULL) { vxge_debug_mem(VXGE_ERR, "%s: out of memory to allocate SKB", dev->name); ring->stats.skb_alloc_fail++; return NULL; } vxge_debug_mem(VXGE_TRACE, "%s: %s:%d Skb : 0x%p", ring->ndev->name, __func__, __LINE__, skb); skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); rx_priv->skb = skb; rx_priv->skb_data = NULL; rx_priv->data_size = skb_size; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); return skb; } /* * vxge_rx_map */ static int vxge_rx_map(void *dtrh, struct vxge_ring *ring) { struct vxge_rx_priv *rx_priv; dma_addr_t dma_addr; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", ring->ndev->name, __func__, __LINE__); rx_priv = vxge_hw_ring_rxd_private_get(dtrh); rx_priv->skb_data = rx_priv->skb->data; dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data, rx_priv->data_size, PCI_DMA_FROMDEVICE); if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) { ring->stats.pci_map_fail++; return -EIO; } vxge_debug_mem(VXGE_TRACE, "%s: %s:%d 1 buffer mode dma_addr = 0x%llx", ring->ndev->name, __func__, __LINE__, (unsigned long long)dma_addr); vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size); rx_priv->data_dma = dma_addr; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); return 0; } /* * vxge_rx_initial_replenish * Allocation of RxD as an initial replenish procedure. */ static enum vxge_hw_status vxge_rx_initial_replenish(void *dtrh, void *userdata) { struct vxge_ring *ring = (struct vxge_ring *)userdata; struct vxge_rx_priv *rx_priv; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", ring->ndev->name, __func__, __LINE__); if (vxge_rx_alloc(dtrh, ring, VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL) return VXGE_HW_FAIL; if (vxge_rx_map(dtrh, ring)) { rx_priv = vxge_hw_ring_rxd_private_get(dtrh); dev_kfree_skb(rx_priv->skb); return VXGE_HW_FAIL; } vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); return VXGE_HW_OK; } static inline void vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan, int pkt_length, struct vxge_hw_ring_rxd_info *ext_info) { vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", ring->ndev->name, __func__, __LINE__); skb_record_rx_queue(skb, ring->driver_id); skb->protocol = eth_type_trans(skb, ring->ndev); u64_stats_update_begin(&ring->stats.syncp); ring->stats.rx_frms++; ring->stats.rx_bytes += pkt_length; if (skb->pkt_type == PACKET_MULTICAST) ring->stats.rx_mcast++; u64_stats_update_end(&ring->stats.syncp); vxge_debug_rx(VXGE_TRACE, "%s: %s:%d skb protocol = %d", ring->ndev->name, __func__, __LINE__, skb->protocol); if (ext_info->vlan && ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) __vlan_hwaccel_put_tag(skb, ext_info->vlan); napi_gro_receive(ring->napi_p, skb); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); } static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring, struct vxge_rx_priv *rx_priv) { pci_dma_sync_single_for_device(ring->pdev, rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE); vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size); vxge_hw_ring_rxd_pre_post(ring->handle, dtr); } static inline void vxge_post(int *dtr_cnt, void **first_dtr, void *post_dtr, struct __vxge_hw_ring *ringh) { int dtr_count = *dtr_cnt; if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) { if (*first_dtr) vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr); *first_dtr = post_dtr; } else vxge_hw_ring_rxd_post_post(ringh, post_dtr); dtr_count++; *dtr_cnt = dtr_count; } /* * vxge_rx_1b_compl * * If the interrupt is because of a received frame or if the receive ring * contains fresh as yet un-processed frames, this function is called. */ static enum vxge_hw_status vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, u8 t_code, void *userdata) { struct vxge_ring *ring = (struct vxge_ring *)userdata; struct net_device *dev = ring->ndev; unsigned int dma_sizes; void *first_dtr = NULL; int dtr_cnt = 0; int data_size; dma_addr_t data_dma; int pkt_length; struct sk_buff *skb; struct vxge_rx_priv *rx_priv; struct vxge_hw_ring_rxd_info ext_info; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", ring->ndev->name, __func__, __LINE__); do { prefetch((char *)dtr + L1_CACHE_BYTES); rx_priv = vxge_hw_ring_rxd_private_get(dtr); skb = rx_priv->skb; data_size = rx_priv->data_size; data_dma = rx_priv->data_dma; prefetch(rx_priv->skb_data); vxge_debug_rx(VXGE_TRACE, "%s: %s:%d skb = 0x%p", ring->ndev->name, __func__, __LINE__, skb); vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes); pkt_length = dma_sizes; pkt_length -= ETH_FCS_LEN; vxge_debug_rx(VXGE_TRACE, "%s: %s:%d Packet Length = %d", ring->ndev->name, __func__, __LINE__, pkt_length); vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info); /* check skb validity */ vxge_assert(skb); prefetch((char *)skb + L1_CACHE_BYTES); if (unlikely(t_code)) { if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) != VXGE_HW_OK) { ring->stats.rx_errors++; vxge_debug_rx(VXGE_TRACE, "%s: %s :%d Rx T_code is %d", ring->ndev->name, __func__, __LINE__, t_code); /* If the t_code is not supported and if the * t_code is other than 0x5 (unparseable packet * such as unknown UPV6 header), Drop it !!! */ vxge_re_pre_post(dtr, ring, rx_priv); vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); ring->stats.rx_dropped++; continue; } } if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) { if (vxge_rx_alloc(dtr, ring, data_size) != NULL) { if (!vxge_rx_map(dtr, ring)) { skb_put(skb, pkt_length); pci_unmap_single(ring->pdev, data_dma, data_size, PCI_DMA_FROMDEVICE); vxge_hw_ring_rxd_pre_post(ringh, dtr); vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); } else { dev_kfree_skb(rx_priv->skb); rx_priv->skb = skb; rx_priv->data_size = data_size; vxge_re_pre_post(dtr, ring, rx_priv); vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); ring->stats.rx_dropped++; break; } } else { vxge_re_pre_post(dtr, ring, rx_priv); vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); ring->stats.rx_dropped++; break; } } else { struct sk_buff *skb_up; skb_up = netdev_alloc_skb(dev, pkt_length + VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); if (skb_up != NULL) { skb_reserve(skb_up, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); pci_dma_sync_single_for_cpu(ring->pdev, data_dma, data_size, PCI_DMA_FROMDEVICE); vxge_debug_mem(VXGE_TRACE, "%s: %s:%d skb_up = %p", ring->ndev->name, __func__, __LINE__, skb); memcpy(skb_up->data, skb->data, pkt_length); vxge_re_pre_post(dtr, ring, rx_priv); vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); /* will netif_rx small SKB instead */ skb = skb_up; skb_put(skb, pkt_length); } else { vxge_re_pre_post(dtr, ring, rx_priv); vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); vxge_debug_rx(VXGE_ERR, "%s: vxge_rx_1b_compl: out of " "memory", dev->name); ring->stats.skb_alloc_fail++; break; } } if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) && !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) && (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */ ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK && ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); if (ring->rx_hwts) { struct skb_shared_hwtstamps *skb_hwts; u32 ns = *(u32 *)(skb->head + pkt_length); skb_hwts = skb_hwtstamps(skb); skb_hwts->hwtstamp = ns_to_ktime(ns); skb_hwts->syststamp.tv64 = 0; } /* rth_hash_type and rth_it_hit are non-zero regardless of * whether rss is enabled. Only the rth_value is zero/non-zero * if rss is disabled/enabled, so key off of that. */ if (ext_info.rth_value) skb->rxhash = ext_info.rth_value; vxge_rx_complete(ring, skb, ext_info.vlan, pkt_length, &ext_info); ring->budget--; ring->pkts_processed++; if (!ring->budget) break; } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr, &t_code) == VXGE_HW_OK); if (first_dtr) vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr); vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); return VXGE_HW_OK; } /* * vxge_xmit_compl * * If an interrupt was raised to indicate DMA complete of the Tx packet, * this function is called. It identifies the last TxD whose buffer was * freed and frees all skbs whose data have already DMA'ed into the NICs * internal memory. */ static enum vxge_hw_status vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, enum vxge_hw_fifo_tcode t_code, void *userdata, struct sk_buff ***skb_ptr, int nr_skb, int *more) { struct vxge_fifo *fifo = (struct vxge_fifo *)userdata; struct sk_buff *skb, **done_skb = *skb_ptr; int pkt_cnt = 0; vxge_debug_entryexit(VXGE_TRACE, "%s:%d Entered....", __func__, __LINE__); do { int frg_cnt; skb_frag_t *frag; int i = 0, j; struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtr); skb = txd_priv->skb; frg_cnt = skb_shinfo(skb)->nr_frags; frag = &skb_shinfo(skb)->frags[0]; vxge_debug_tx(VXGE_TRACE, "%s: %s:%d fifo_hw = %p dtr = %p " "tcode = 0x%x", fifo->ndev->name, __func__, __LINE__, fifo_hw, dtr, t_code); /* check skb validity */ vxge_assert(skb); vxge_debug_tx(VXGE_TRACE, "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d", fifo->ndev->name, __func__, __LINE__, skb, txd_priv, frg_cnt); if (unlikely(t_code)) { fifo->stats.tx_errors++; vxge_debug_tx(VXGE_ERR, "%s: tx: dtr %p completed due to " "error t_code %01x", fifo->ndev->name, dtr, t_code); vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code); } /* for unfragmented skb */ pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], skb_headlen(skb), PCI_DMA_TODEVICE); for (j = 0; j < frg_cnt; j++) { pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++], skb_frag_size(frag), PCI_DMA_TODEVICE); frag += 1; } vxge_hw_fifo_txdl_free(fifo_hw, dtr); /* Updating the statistics block */ u64_stats_update_begin(&fifo->stats.syncp); fifo->stats.tx_frms++; fifo->stats.tx_bytes += skb->len; u64_stats_update_end(&fifo->stats.syncp); *done_skb++ = skb; if (--nr_skb <= 0) { *more = 1; break; } pkt_cnt++; if (pkt_cnt > fifo->indicate_max_pkts) break; } while (vxge_hw_fifo_txdl_next_completed(fifo_hw, &dtr, &t_code) == VXGE_HW_OK); *skb_ptr = done_skb; if (netif_tx_queue_stopped(fifo->txq)) netif_tx_wake_queue(fifo->txq); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", fifo->ndev->name, __func__, __LINE__); return VXGE_HW_OK; } /* select a vpath to transmit the packet */ static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb) { u16 queue_len, counter = 0; if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *ip; struct tcphdr *th; ip = ip_hdr(skb); if (!ip_is_fragment(ip)) { th = (struct tcphdr *)(((unsigned char *)ip) + ip->ihl*4); queue_len = vdev->no_of_vpath; counter = (ntohs(th->source) + ntohs(th->dest)) & vdev->vpath_selector[queue_len - 1]; if (counter >= queue_len) counter = queue_len - 1; } } return counter; } static enum vxge_hw_status vxge_search_mac_addr_in_list( struct vxge_vpath *vpath, u64 del_mac) { struct list_head *entry, *next; list_for_each_safe(entry, next, &vpath->mac_addr_list) { if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) return TRUE; } return FALSE; } static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac) { struct vxge_mac_addrs *new_mac_entry; u8 *mac_address = NULL; if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT) return TRUE; new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC); if (!new_mac_entry) { vxge_debug_mem(VXGE_ERR, "%s: memory allocation failed", VXGE_DRIVER_NAME); return FALSE; } list_add(&new_mac_entry->item, &vpath->mac_addr_list); /* Copy the new mac address to the list */ mac_address = (u8 *)&new_mac_entry->macaddr; memcpy(mac_address, mac->macaddr, ETH_ALEN); new_mac_entry->state = mac->state; vpath->mac_addr_cnt++; if (is_multicast_ether_addr(mac->macaddr)) vpath->mcast_addr_cnt++; return TRUE; } /* Add a mac address to DA table */ static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac) { enum vxge_hw_status status = VXGE_HW_OK; struct vxge_vpath *vpath; enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode; if (is_multicast_ether_addr(mac->macaddr)) duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE; else duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE; vpath = &vdev->vpaths[mac->vpath_no]; status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr, mac->macmask, duplicate_mode); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "DA config add entry failed for vpath:%d", vpath->device_id); } else if (FALSE == vxge_mac_list_add(vpath, mac)) status = -EPERM; return status; } static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) { struct macInfo mac_info; u8 *mac_address = NULL; u64 mac_addr = 0, vpath_vector = 0; int vpath_idx = 0; enum vxge_hw_status status = VXGE_HW_OK; struct vxge_vpath *vpath = NULL; struct __vxge_hw_device *hldev; hldev = pci_get_drvdata(vdev->pdev); mac_address = (u8 *)&mac_addr; memcpy(mac_address, mac_header, ETH_ALEN); /* Is this mac address already in the list? */ for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { vpath = &vdev->vpaths[vpath_idx]; if (vxge_search_mac_addr_in_list(vpath, mac_addr)) return vpath_idx; } memset(&mac_info, 0, sizeof(struct macInfo)); memcpy(mac_info.macaddr, mac_header, ETH_ALEN); /* Any vpath has room to add mac address to its da table? */ for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { vpath = &vdev->vpaths[vpath_idx]; if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) { /* Add this mac address to this vpath */ mac_info.vpath_no = vpath_idx; mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; status = vxge_add_mac_addr(vdev, &mac_info); if (status != VXGE_HW_OK) return -EPERM; return vpath_idx; } } mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST; vpath_idx = 0; mac_info.vpath_no = vpath_idx; /* Is the first vpath already selected as catch-basin ? */ vpath = &vdev->vpaths[vpath_idx]; if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) { /* Add this mac address to this vpath */ if (FALSE == vxge_mac_list_add(vpath, &mac_info)) return -EPERM; return vpath_idx; } /* Select first vpath as catch-basin */ vpath_vector = vxge_mBIT(vpath->device_id); status = vxge_hw_mgmt_reg_write(vpath->vdev->devh, vxge_hw_mgmt_reg_type_mrpcim, 0, (ulong)offsetof( struct vxge_hw_mrpcim_reg, rts_mgr_cbasin_cfg), vpath_vector); if (status != VXGE_HW_OK) { vxge_debug_tx(VXGE_ERR, "%s: Unable to set the vpath-%d in catch-basin mode", VXGE_DRIVER_NAME, vpath->device_id); return -EPERM; } if (FALSE == vxge_mac_list_add(vpath, &mac_info)) return -EPERM; return vpath_idx; } /** * vxge_xmit * @skb : the socket buffer containing the Tx data. * @dev : device pointer. * * This function is the Tx entry point of the driver. Neterion NIC supports * certain protocol assist features on Tx side, namely CSO, S/G, LSO. */ static netdev_tx_t vxge_xmit(struct sk_buff *skb, struct net_device *dev) { struct vxge_fifo *fifo = NULL; void *dtr_priv; void *dtr = NULL; struct vxgedev *vdev = NULL; enum vxge_hw_status status; int frg_cnt, first_frg_len; skb_frag_t *frag; int i = 0, j = 0, avail; u64 dma_pointer; struct vxge_tx_priv *txdl_priv = NULL; struct __vxge_hw_fifo *fifo_hw; int offload_type; int vpath_no = 0; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", dev->name, __func__, __LINE__); /* A buffer with no data will be dropped */ if (unlikely(skb->len <= 0)) { vxge_debug_tx(VXGE_ERR, "%s: Buffer has no data..", dev->name); dev_kfree_skb(skb); return NETDEV_TX_OK; } vdev = netdev_priv(dev); if (unlikely(!is_vxge_card_up(vdev))) { vxge_debug_tx(VXGE_ERR, "%s: vdev not initialized", dev->name); dev_kfree_skb(skb); return NETDEV_TX_OK; } if (vdev->config.addr_learn_en) { vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN); if (vpath_no == -EPERM) { vxge_debug_tx(VXGE_ERR, "%s: Failed to store the mac address", dev->name); dev_kfree_skb(skb); return NETDEV_TX_OK; } } if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) vpath_no = skb_get_queue_mapping(skb); else if (vdev->config.tx_steering_type == TX_PORT_STEERING) vpath_no = vxge_get_vpath_no(vdev, skb); vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no); if (vpath_no >= vdev->no_of_vpath) vpath_no = 0; fifo = &vdev->vpaths[vpath_no].fifo; fifo_hw = fifo->handle; if (netif_tx_queue_stopped(fifo->txq)) return NETDEV_TX_BUSY; avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw); if (avail == 0) { vxge_debug_tx(VXGE_ERR, "%s: No free TXDs available", dev->name); fifo->stats.txd_not_free++; goto _exit0; } /* Last TXD? Stop tx queue to avoid dropping packets. TX * completion will resume the queue. */ if (avail == 1) netif_tx_stop_queue(fifo->txq); status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv); if (unlikely(status != VXGE_HW_OK)) { vxge_debug_tx(VXGE_ERR, "%s: Out of descriptors .", dev->name); fifo->stats.txd_out_of_desc++; goto _exit0; } vxge_debug_tx(VXGE_TRACE, "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p", dev->name, __func__, __LINE__, fifo_hw, dtr, dtr_priv); if (vlan_tx_tag_present(skb)) { u16 vlan_tag = vlan_tx_tag_get(skb); vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag); } first_frg_len = skb_headlen(skb); dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len, PCI_DMA_TODEVICE); if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) { vxge_hw_fifo_txdl_free(fifo_hw, dtr); fifo->stats.pci_map_fail++; goto _exit0; } txdl_priv = vxge_hw_fifo_txdl_private_get(dtr); txdl_priv->skb = skb; txdl_priv->dma_buffers[j] = dma_pointer; frg_cnt = skb_shinfo(skb)->nr_frags; vxge_debug_tx(VXGE_TRACE, "%s: %s:%d skb = %p txdl_priv = %p " "frag_cnt = %d dma_pointer = 0x%llx", dev->name, __func__, __LINE__, skb, txdl_priv, frg_cnt, (unsigned long long)dma_pointer); vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer, first_frg_len); frag = &skb_shinfo(skb)->frags[0]; for (i = 0; i < frg_cnt; i++) { /* ignore 0 length fragment */ if (!skb_frag_size(frag)) continue; dma_pointer = (u64)skb_frag_dma_map(&fifo->pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer))) goto _exit2; vxge_debug_tx(VXGE_TRACE, "%s: %s:%d frag = %d dma_pointer = 0x%llx", dev->name, __func__, __LINE__, i, (unsigned long long)dma_pointer); txdl_priv->dma_buffers[j] = dma_pointer; vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer, skb_frag_size(frag)); frag += 1; } offload_type = vxge_offload_type(skb); if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { int mss = vxge_tcp_mss(skb); if (mss) { vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d", dev->name, __func__, __LINE__, mss); vxge_hw_fifo_txdl_mss_set(dtr, mss); } else { vxge_assert(skb->len <= dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE); vxge_assert(0); goto _exit1; } } if (skb->ip_summed == CHECKSUM_PARTIAL) vxge_hw_fifo_txdl_cksum_set_bits(dtr, VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN | VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN | VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN); vxge_hw_fifo_txdl_post(fifo_hw, dtr); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", dev->name, __func__, __LINE__); return NETDEV_TX_OK; _exit2: vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name); _exit1: j = 0; frag = &skb_shinfo(skb)->frags[0]; pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++], skb_headlen(skb), PCI_DMA_TODEVICE); for (; j < i; j++) { pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j], skb_frag_size(frag), PCI_DMA_TODEVICE); frag += 1; } vxge_hw_fifo_txdl_free(fifo_hw, dtr); _exit0: netif_tx_stop_queue(fifo->txq); dev_kfree_skb(skb); return NETDEV_TX_OK; } /* * vxge_rx_term * * Function will be called by hw function to abort all outstanding receive * descriptors. */ static void vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata) { struct vxge_ring *ring = (struct vxge_ring *)userdata; struct vxge_rx_priv *rx_priv = vxge_hw_ring_rxd_private_get(dtrh); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", ring->ndev->name, __func__, __LINE__); if (state != VXGE_HW_RXD_STATE_POSTED) return; pci_unmap_single(ring->pdev, rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE); dev_kfree_skb(rx_priv->skb); rx_priv->skb_data = NULL; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); } /* * vxge_tx_term * * Function will be called to abort all outstanding tx descriptors */ static void vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata) { struct vxge_fifo *fifo = (struct vxge_fifo *)userdata; skb_frag_t *frag; int i = 0, j, frg_cnt; struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh); struct sk_buff *skb = txd_priv->skb; vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); if (state != VXGE_HW_TXDL_STATE_POSTED) return; /* check skb validity */ vxge_assert(skb); frg_cnt = skb_shinfo(skb)->nr_frags; frag = &skb_shinfo(skb)->frags[0]; /* for unfragmented skb */ pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], skb_headlen(skb), PCI_DMA_TODEVICE); for (j = 0; j < frg_cnt; j++) { pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++], skb_frag_size(frag), PCI_DMA_TODEVICE); frag += 1; } dev_kfree_skb(skb); vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); } static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac) { struct list_head *entry, *next; u64 del_mac = 0; u8 *mac_address = (u8 *) (&del_mac); /* Copy the mac address to delete from the list */ memcpy(mac_address, mac->macaddr, ETH_ALEN); list_for_each_safe(entry, next, &vpath->mac_addr_list) { if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) { list_del(entry); kfree((struct vxge_mac_addrs *)entry); vpath->mac_addr_cnt--; if (is_multicast_ether_addr(mac->macaddr)) vpath->mcast_addr_cnt--; return TRUE; } } return FALSE; } /* delete a mac address from DA table */ static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac) { enum vxge_hw_status status = VXGE_HW_OK; struct vxge_vpath *vpath; vpath = &vdev->vpaths[mac->vpath_no]; status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr, mac->macmask); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "DA config delete entry failed for vpath:%d", vpath->device_id); } else vxge_mac_list_del(vpath, mac); return status; } /** * vxge_set_multicast * @dev: pointer to the device structure * * Entry point for multicast address enable/disable * This function is a driver entry point which gets called by the kernel * whenever multicast addresses must be enabled/disabled. This also gets * called to set/reset promiscuous mode. Depending on the deivce flag, we * determine, if multicast address must be enabled or if promiscuous mode * is to be disabled etc. */ static void vxge_set_multicast(struct net_device *dev) { struct netdev_hw_addr *ha; struct vxgedev *vdev; int i, mcast_cnt = 0; struct __vxge_hw_device *hldev; struct vxge_vpath *vpath; enum vxge_hw_status status = VXGE_HW_OK; struct macInfo mac_info; int vpath_idx = 0; struct vxge_mac_addrs *mac_entry; struct list_head *list_head; struct list_head *entry, *next; u8 *mac_address = NULL; vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); vdev = netdev_priv(dev); hldev = (struct __vxge_hw_device *)vdev->devh; if (unlikely(!is_vxge_card_up(vdev))) return; if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) { for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i]; vxge_assert(vpath->is_open); status = vxge_hw_vpath_mcast_enable(vpath->handle); if (status != VXGE_HW_OK) vxge_debug_init(VXGE_ERR, "failed to enable " "multicast, status %d", status); vdev->all_multi_flg = 1; } } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) { for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i]; vxge_assert(vpath->is_open); status = vxge_hw_vpath_mcast_disable(vpath->handle); if (status != VXGE_HW_OK) vxge_debug_init(VXGE_ERR, "failed to disable " "multicast, status %d", status); vdev->all_multi_flg = 0; } } if (!vdev->config.addr_learn_en) { for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i]; vxge_assert(vpath->is_open); if (dev->flags & IFF_PROMISC) status = vxge_hw_vpath_promisc_enable( vpath->handle); else status = vxge_hw_vpath_promisc_disable( vpath->handle); if (status != VXGE_HW_OK) vxge_debug_init(VXGE_ERR, "failed to %s promisc" ", status %d", dev->flags&IFF_PROMISC ? "enable" : "disable", status); } } memset(&mac_info, 0, sizeof(struct macInfo)); /* Update individual M_CAST address list */ if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) { mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; list_head = &vdev->vpaths[0].mac_addr_list; if ((netdev_mc_count(dev) + (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) > vdev->vpaths[0].max_mac_addr_cnt) goto _set_all_mcast; /* Delete previous MC's */ for (i = 0; i < mcast_cnt; i++) { list_for_each_safe(entry, next, list_head) { mac_entry = (struct vxge_mac_addrs *)entry; /* Copy the mac address to delete */ mac_address = (u8 *)&mac_entry->macaddr; memcpy(mac_info.macaddr, mac_address, ETH_ALEN); if (is_multicast_ether_addr(mac_info.macaddr)) { for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { mac_info.vpath_no = vpath_idx; status = vxge_del_mac_addr( vdev, &mac_info); } } } } /* Add new ones */ netdev_for_each_mc_addr(ha, dev) { memcpy(mac_info.macaddr, ha->addr, ETH_ALEN); for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { mac_info.vpath_no = vpath_idx; mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; status = vxge_add_mac_addr(vdev, &mac_info); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s:%d Setting individual" "multicast address failed", __func__, __LINE__); goto _set_all_mcast; } } } return; _set_all_mcast: mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; /* Delete previous MC's */ for (i = 0; i < mcast_cnt; i++) { list_for_each_safe(entry, next, list_head) { mac_entry = (struct vxge_mac_addrs *)entry; /* Copy the mac address to delete */ mac_address = (u8 *)&mac_entry->macaddr; memcpy(mac_info.macaddr, mac_address, ETH_ALEN); if (is_multicast_ether_addr(mac_info.macaddr)) break; } for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { mac_info.vpath_no = vpath_idx; status = vxge_del_mac_addr(vdev, &mac_info); } } /* Enable all multicast */ for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i]; vxge_assert(vpath->is_open); status = vxge_hw_vpath_mcast_enable(vpath->handle); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s:%d Enabling all multicasts failed", __func__, __LINE__); } vdev->all_multi_flg = 1; } dev->flags |= IFF_ALLMULTI; } vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); } /** * vxge_set_mac_addr * @dev: pointer to the device structure * * Update entry "0" (default MAC addr) */ static int vxge_set_mac_addr(struct net_device *dev, void *p) { struct sockaddr *addr = p; struct vxgedev *vdev; struct __vxge_hw_device *hldev; enum vxge_hw_status status = VXGE_HW_OK; struct macInfo mac_info_new, mac_info_old; int vpath_idx = 0; vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); vdev = netdev_priv(dev); hldev = vdev->devh; if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; memset(&mac_info_new, 0, sizeof(struct macInfo)); memset(&mac_info_old, 0, sizeof(struct macInfo)); vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); /* Get the old address */ memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len); /* Copy the new address */ memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len); /* First delete the old mac address from all the vpaths as we can't specify the index while adding new mac address */ for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx]; if (!vpath->is_open) { /* This can happen when this interface is added/removed to the bonding interface. Delete this station address from the linked list */ vxge_mac_list_del(vpath, &mac_info_old); /* Add this new address to the linked list for later restoring */ vxge_mac_list_add(vpath, &mac_info_new); continue; } /* Delete the station address */ mac_info_old.vpath_no = vpath_idx; status = vxge_del_mac_addr(vdev, &mac_info_old); } if (unlikely(!is_vxge_card_up(vdev))) { memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); return VXGE_HW_OK; } /* Set this mac address to all the vpaths */ for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { mac_info_new.vpath_no = vpath_idx; mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; status = vxge_add_mac_addr(vdev, &mac_info_new); if (status != VXGE_HW_OK) return -EINVAL; } memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); return status; } /* * vxge_vpath_intr_enable * @vdev: pointer to vdev * @vp_id: vpath for which to enable the interrupts * * Enables the interrupts for the vpath */ static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) { struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; int msix_id = 0; int tim_msix_id[4] = {0, 1, 0, 0}; int alarm_msix_id = VXGE_ALARM_MSIX_ID; vxge_hw_vpath_intr_enable(vpath->handle); if (vdev->config.intr_type == INTA) vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle); else { vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, alarm_msix_id); msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE; vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1); /* enable the alarm vector */ msix_id = (vpath->handle->vpath->hldev->first_vp_id * VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id; vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); } } /* * vxge_vpath_intr_disable * @vdev: pointer to vdev * @vp_id: vpath for which to disable the interrupts * * Disables the interrupts for the vpath */ static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) { struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; struct __vxge_hw_device *hldev; int msix_id; hldev = pci_get_drvdata(vdev->pdev); vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id); vxge_hw_vpath_intr_disable(vpath->handle); if (vdev->config.intr_type == INTA) vxge_hw_vpath_inta_mask_tx_rx(vpath->handle); else { msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE; vxge_hw_vpath_msix_mask(vpath->handle, msix_id); vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1); /* disable the alarm vector */ msix_id = (vpath->handle->vpath->hldev->first_vp_id * VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; vxge_hw_vpath_msix_mask(vpath->handle, msix_id); } } /* list all mac addresses from DA table */ static enum vxge_hw_status vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac) { enum vxge_hw_status status = VXGE_HW_OK; unsigned char macmask[ETH_ALEN]; unsigned char macaddr[ETH_ALEN]; status = vxge_hw_vpath_mac_addr_get(vpath->handle, macaddr, macmask); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "DA config list entry failed for vpath:%d", vpath->device_id); return status; } while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) { status = vxge_hw_vpath_mac_addr_get_next(vpath->handle, macaddr, macmask); if (status != VXGE_HW_OK) break; } return status; } /* Store all mac addresses from the list to the DA table */ static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath) { enum vxge_hw_status status = VXGE_HW_OK; struct macInfo mac_info; u8 *mac_address = NULL; struct list_head *entry, *next; memset(&mac_info, 0, sizeof(struct macInfo)); if (vpath->is_open) { list_for_each_safe(entry, next, &vpath->mac_addr_list) { mac_address = (u8 *)& ((struct vxge_mac_addrs *)entry)->macaddr; memcpy(mac_info.macaddr, mac_address, ETH_ALEN); ((struct vxge_mac_addrs *)entry)->state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; /* does this mac address already exist in da table? */ status = vxge_search_mac_addr_in_da_table(vpath, &mac_info); if (status != VXGE_HW_OK) { /* Add this mac address to the DA table */ status = vxge_hw_vpath_mac_addr_add( vpath->handle, mac_info.macaddr, mac_info.macmask, VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "DA add entry failed for vpath:%d", vpath->device_id); ((struct vxge_mac_addrs *)entry)->state = VXGE_LL_MAC_ADDR_IN_LIST; } } } } return status; } /* Store all vlan ids from the list to the vid table */ static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath) { enum vxge_hw_status status = VXGE_HW_OK; struct vxgedev *vdev = vpath->vdev; u16 vid; if (!vpath->is_open) return status; for_each_set_bit(vid, vdev->active_vlans, VLAN_N_VID) status = vxge_hw_vpath_vid_add(vpath->handle, vid); return status; } /* * vxge_reset_vpath * @vdev: pointer to vdev * @vp_id: vpath to reset * * Resets the vpath */ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) { enum vxge_hw_status status = VXGE_HW_OK; struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; int ret = 0; /* check if device is down already */ if (unlikely(!is_vxge_card_up(vdev))) return 0; /* is device reset already scheduled */ if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) return 0; if (vpath->handle) { if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) { if (is_vxge_card_up(vdev) && vxge_hw_vpath_recover_from_reset(vpath->handle) != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "vxge_hw_vpath_recover_from_reset" "failed for vpath:%d", vp_id); return status; } } else { vxge_debug_init(VXGE_ERR, "vxge_hw_vpath_reset failed for" "vpath:%d", vp_id); return status; } } else return VXGE_HW_FAIL; vxge_restore_vpath_mac_addr(vpath); vxge_restore_vpath_vid_table(vpath); /* Enable all broadcast */ vxge_hw_vpath_bcast_enable(vpath->handle); /* Enable all multicast */ if (vdev->all_multi_flg) { status = vxge_hw_vpath_mcast_enable(vpath->handle); if (status != VXGE_HW_OK) vxge_debug_init(VXGE_ERR, "%s:%d Enabling multicast failed", __func__, __LINE__); } /* Enable the interrupts */ vxge_vpath_intr_enable(vdev, vp_id); smp_wmb(); /* Enable the flow of traffic through the vpath */ vxge_hw_vpath_enable(vpath->handle); smp_wmb(); vxge_hw_vpath_rx_doorbell_init(vpath->handle); vpath->ring.last_status = VXGE_HW_OK; /* Vpath reset done */ clear_bit(vp_id, &vdev->vp_reset); /* Start the vpath queue */ if (netif_tx_queue_stopped(vpath->fifo.txq)) netif_tx_wake_queue(vpath->fifo.txq); return ret; } /* Configure CI */ static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev) { int i = 0; /* Enable CI for RTI */ if (vdev->config.intr_type == MSI_X) { for (i = 0; i < vdev->no_of_vpath; i++) { struct __vxge_hw_ring *hw_ring; hw_ring = vdev->vpaths[i].ring.handle; vxge_hw_vpath_dynamic_rti_ci_set(hw_ring); } } /* Enable CI for TTI */ for (i = 0; i < vdev->no_of_vpath; i++) { struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle; vxge_hw_vpath_tti_ci_set(hw_fifo); /* * For Inta (with or without napi), Set CI ON for only one * vpath. (Have only one free running timer). */ if ((vdev->config.intr_type == INTA) && (i == 0)) break; } return; } static int do_vxge_reset(struct vxgedev *vdev, int event) { enum vxge_hw_status status; int ret = 0, vp_id, i; vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) { /* check if device is down already */ if (unlikely(!is_vxge_card_up(vdev))) return 0; /* is reset already scheduled */ if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) return 0; } if (event == VXGE_LL_FULL_RESET) { netif_carrier_off(vdev->ndev); /* wait for all the vpath reset to complete */ for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { while (test_bit(vp_id, &vdev->vp_reset)) msleep(50); } netif_carrier_on(vdev->ndev); /* if execution mode is set to debug, don't reset the adapter */ if (unlikely(vdev->exec_mode)) { vxge_debug_init(VXGE_ERR, "%s: execution mode is debug, returning..", vdev->ndev->name); clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); netif_tx_stop_all_queues(vdev->ndev); return 0; } } if (event == VXGE_LL_FULL_RESET) { vxge_hw_device_wait_receive_idle(vdev->devh); vxge_hw_device_intr_disable(vdev->devh); switch (vdev->cric_err_event) { case VXGE_HW_EVENT_UNKNOWN: netif_tx_stop_all_queues(vdev->ndev); vxge_debug_init(VXGE_ERR, "fatal: %s: Disabling device due to" "unknown error", vdev->ndev->name); ret = -EPERM; goto out; case VXGE_HW_EVENT_RESET_START: break; case VXGE_HW_EVENT_RESET_COMPLETE: case VXGE_HW_EVENT_LINK_DOWN: case VXGE_HW_EVENT_LINK_UP: case VXGE_HW_EVENT_ALARM_CLEARED: case VXGE_HW_EVENT_ECCERR: case VXGE_HW_EVENT_MRPCIM_ECCERR: ret = -EPERM; goto out; case VXGE_HW_EVENT_FIFO_ERR: case VXGE_HW_EVENT_VPATH_ERR: break; case VXGE_HW_EVENT_CRITICAL_ERR: netif_tx_stop_all_queues(vdev->ndev); vxge_debug_init(VXGE_ERR, "fatal: %s: Disabling device due to" "serious error", vdev->ndev->name); /* SOP or device reset required */ /* This event is not currently used */ ret = -EPERM; goto out; case VXGE_HW_EVENT_SERR: netif_tx_stop_all_queues(vdev->ndev); vxge_debug_init(VXGE_ERR, "fatal: %s: Disabling device due to" "serious error", vdev->ndev->name); ret = -EPERM; goto out; case VXGE_HW_EVENT_SRPCIM_SERR: case VXGE_HW_EVENT_MRPCIM_SERR: ret = -EPERM; goto out; case VXGE_HW_EVENT_SLOT_FREEZE: netif_tx_stop_all_queues(vdev->ndev); vxge_debug_init(VXGE_ERR, "fatal: %s: Disabling device due to" "slot freeze", vdev->ndev->name); ret = -EPERM; goto out; default: break; } } if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) netif_tx_stop_all_queues(vdev->ndev); if (event == VXGE_LL_FULL_RESET) { status = vxge_reset_all_vpaths(vdev); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "fatal: %s: can not reset vpaths", vdev->ndev->name); ret = -EPERM; goto out; } } if (event == VXGE_LL_COMPL_RESET) { for (i = 0; i < vdev->no_of_vpath; i++) if (vdev->vpaths[i].handle) { if (vxge_hw_vpath_recover_from_reset( vdev->vpaths[i].handle) != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "vxge_hw_vpath_recover_" "from_reset failed for vpath: " "%d", i); ret = -EPERM; goto out; } } else { vxge_debug_init(VXGE_ERR, "vxge_hw_vpath_reset failed for " "vpath:%d", i); ret = -EPERM; goto out; } } if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) { /* Reprogram the DA table with populated mac addresses */ for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]); vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]); } /* enable vpath interrupts */ for (i = 0; i < vdev->no_of_vpath; i++) vxge_vpath_intr_enable(vdev, i); vxge_hw_device_intr_enable(vdev->devh); smp_wmb(); /* Indicate card up */ set_bit(__VXGE_STATE_CARD_UP, &vdev->state); /* Get the traffic to flow through the vpaths */ for (i = 0; i < vdev->no_of_vpath; i++) { vxge_hw_vpath_enable(vdev->vpaths[i].handle); smp_wmb(); vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); } netif_tx_wake_all_queues(vdev->ndev); } /* configure CI */ vxge_config_ci_for_tti_rti(vdev); out: vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); /* Indicate reset done */ if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state); return ret; } /* * vxge_reset * @vdev: pointer to ll device * * driver may reset the chip on events of serr, eccerr, etc */ static void vxge_reset(struct work_struct *work) { struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task); if (!netif_running(vdev->ndev)) return; do_vxge_reset(vdev, VXGE_LL_FULL_RESET); } /** * vxge_poll - Receive handler when Receive Polling is used. * @dev: pointer to the device structure. * @budget: Number of packets budgeted to be processed in this iteration. * * This function comes into picture only if Receive side is being handled * through polling (called NAPI in linux). It mostly does what the normal * Rx interrupt handler does in terms of descriptor and packet processing * but not in an interrupt context. Also it will process a specified number * of packets at most in one iteration. This value is passed down by the * kernel as the function argument 'budget'. */ static int vxge_poll_msix(struct napi_struct *napi, int budget) { struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi); int pkts_processed; int budget_org = budget; ring->budget = budget; ring->pkts_processed = 0; vxge_hw_vpath_poll_rx(ring->handle); pkts_processed = ring->pkts_processed; if (ring->pkts_processed < budget_org) { napi_complete(napi); /* Re enable the Rx interrupts for the vpath */ vxge_hw_channel_msix_unmask( (struct __vxge_hw_channel *)ring->handle, ring->rx_vector_no); mmiowb(); } /* We are copying and returning the local variable, in case if after * clearing the msix interrupt above, if the interrupt fires right * away which can preempt this NAPI thread */ return pkts_processed; } static int vxge_poll_inta(struct napi_struct *napi, int budget) { struct vxgedev *vdev = container_of(napi, struct vxgedev, napi); int pkts_processed = 0; int i; int budget_org = budget; struct vxge_ring *ring; struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev); for (i = 0; i < vdev->no_of_vpath; i++) { ring = &vdev->vpaths[i].ring; ring->budget = budget; ring->pkts_processed = 0; vxge_hw_vpath_poll_rx(ring->handle); pkts_processed += ring->pkts_processed; budget -= ring->pkts_processed; if (budget <= 0) break; } VXGE_COMPLETE_ALL_TX(vdev); if (pkts_processed < budget_org) { napi_complete(napi); /* Re enable the Rx interrupts for the ring */ vxge_hw_device_unmask_all(hldev); vxge_hw_device_flush_io(hldev); } return pkts_processed; } #ifdef CONFIG_NET_POLL_CONTROLLER /** * vxge_netpoll - netpoll event handler entry point * @dev : pointer to the device structure. * Description: * This function will be called by upper layer to check for events on the * interface in situations where interrupts are disabled. It is used for * specific in-kernel networking tasks, such as remote consoles and kernel * debugging over the network (example netdump in RedHat). */ static void vxge_netpoll(struct net_device *dev) { struct __vxge_hw_device *hldev; struct vxgedev *vdev; vdev = netdev_priv(dev); hldev = pci_get_drvdata(vdev->pdev); vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); if (pci_channel_offline(vdev->pdev)) return; disable_irq(dev->irq); vxge_hw_device_clear_tx_rx(hldev); vxge_hw_device_clear_tx_rx(hldev); VXGE_COMPLETE_ALL_RX(vdev); VXGE_COMPLETE_ALL_TX(vdev); enable_irq(dev->irq); vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); } #endif /* RTH configuration */ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) { enum vxge_hw_status status = VXGE_HW_OK; struct vxge_hw_rth_hash_types hash_types; u8 itable[256] = {0}; /* indirection table */ u8 mtable[256] = {0}; /* CPU to vpath mapping */ int index; /* * Filling * - itable with bucket numbers * - mtable with bucket-to-vpath mapping */ for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) { itable[index] = index; mtable[index] = index % vdev->no_of_vpath; } /* set indirection table, bucket-to-vpath mapping */ status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles, vdev->no_of_vpath, mtable, itable, vdev->config.rth_bkt_sz); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "RTH indirection table configuration failed " "for vpath:%d", vdev->vpaths[0].device_id); return status; } /* Fill RTH hash types */ hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4; hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4; hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6; hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6; hash_types.hash_type_tcpipv6ex_en = vdev->config.rth_hash_type_tcpipv6ex; hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex; /* * Because the itable_set() method uses the active_table field * for the target virtual path the RTH config should be updated * for all VPATHs. The h/w only uses the lowest numbered VPATH * when steering frames. */ for (index = 0; index < vdev->no_of_vpath; index++) { status = vxge_hw_vpath_rts_rth_set( vdev->vpaths[index].handle, vdev->config.rth_algorithm, &hash_types, vdev->config.rth_bkt_sz); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "RTH configuration failed for vpath:%d", vdev->vpaths[index].device_id); return status; } } return status; } /* reset vpaths */ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) { enum vxge_hw_status status = VXGE_HW_OK; struct vxge_vpath *vpath; int i; for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i]; if (vpath->handle) { if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) { if (is_vxge_card_up(vdev) && vxge_hw_vpath_recover_from_reset( vpath->handle) != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "vxge_hw_vpath_recover_" "from_reset failed for vpath: " "%d", i); return status; } } else { vxge_debug_init(VXGE_ERR, "vxge_hw_vpath_reset failed for " "vpath:%d", i); return status; } } } return status; } /* close vpaths */ static void vxge_close_vpaths(struct vxgedev *vdev, int index) { struct vxge_vpath *vpath; int i; for (i = index; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i]; if (vpath->handle && vpath->is_open) { vxge_hw_vpath_close(vpath->handle); vdev->stats.vpaths_open--; } vpath->is_open = 0; vpath->handle = NULL; } } /* open vpaths */ static int vxge_open_vpaths(struct vxgedev *vdev) { struct vxge_hw_vpath_attr attr; enum vxge_hw_status status; struct vxge_vpath *vpath; u32 vp_id = 0; int i; for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i]; vxge_assert(vpath->is_configured); if (!vdev->titan1) { struct vxge_hw_vp_config *vcfg; vcfg = &vdev->devh->config.vp_config[vpath->device_id]; vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A; vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B; vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C; vcfg->tti.uec_a = TTI_T1A_TX_UFC_A; vcfg->tti.uec_b = TTI_T1A_TX_UFC_B; vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu); vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu); vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL; vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL; } attr.vp_id = vpath->device_id; attr.fifo_attr.callback = vxge_xmit_compl; attr.fifo_attr.txdl_term = vxge_tx_term; attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv); attr.fifo_attr.userdata = &vpath->fifo; attr.ring_attr.callback = vxge_rx_1b_compl; attr.ring_attr.rxd_init = vxge_rx_initial_replenish; attr.ring_attr.rxd_term = vxge_rx_term; attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv); attr.ring_attr.userdata = &vpath->ring; vpath->ring.ndev = vdev->ndev; vpath->ring.pdev = vdev->pdev; status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle); if (status == VXGE_HW_OK) { vpath->fifo.handle = (struct __vxge_hw_fifo *)attr.fifo_attr.userdata; vpath->ring.handle = (struct __vxge_hw_ring *)attr.ring_attr.userdata; vpath->fifo.tx_steering_type = vdev->config.tx_steering_type; vpath->fifo.ndev = vdev->ndev; vpath->fifo.pdev = vdev->pdev; if (vdev->config.tx_steering_type) vpath->fifo.txq = netdev_get_tx_queue(vdev->ndev, i); else vpath->fifo.txq = netdev_get_tx_queue(vdev->ndev, 0); vpath->fifo.indicate_max_pkts = vdev->config.fifo_indicate_max_pkts; vpath->fifo.tx_vector_no = 0; vpath->ring.rx_vector_no = 0; vpath->ring.rx_hwts = vdev->rx_hwts; vpath->is_open = 1; vdev->vp_handles[i] = vpath->handle; vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip; vdev->stats.vpaths_open++; } else { vdev->stats.vpath_open_fail++; vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to " "open with status: %d", vdev->ndev->name, vpath->device_id, status); vxge_close_vpaths(vdev, 0); return -EPERM; } vp_id = vpath->handle->vpath->vp_id; vdev->vpaths_deployed |= vxge_mBIT(vp_id); } return VXGE_HW_OK; } /** * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing * if the interrupts are not within a range * @fifo: pointer to transmit fifo structure * Description: The function changes boundary timer and restriction timer * value depends on the traffic * Return Value: None */ static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo) { fifo->interrupt_count++; if (jiffies > fifo->jiffies + HZ / 100) { struct __vxge_hw_fifo *hw_fifo = fifo->handle; fifo->jiffies = jiffies; if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT && hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) { hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL; vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo); } else if (hw_fifo->rtimer != 0) { hw_fifo->rtimer = 0; vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo); } fifo->interrupt_count = 0; } } /** * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing * if the interrupts are not within a range * @ring: pointer to receive ring structure * Description: The function increases of decreases the packet counts within * the ranges of traffic utilization, if the interrupts due to this ring are * not within a fixed range. * Return Value: Nothing */ static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring) { ring->interrupt_count++; if (jiffies > ring->jiffies + HZ / 100) { struct __vxge_hw_ring *hw_ring = ring->handle; ring->jiffies = jiffies; if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT && hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) { hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL; vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring); } else if (hw_ring->rtimer != 0) { hw_ring->rtimer = 0; vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring); } ring->interrupt_count = 0; } } /* * vxge_isr_napi * @irq: the irq of the device. * @dev_id: a void pointer to the hldev structure of the Titan device * @ptregs: pointer to the registers pushed on the stack. * * This function is the ISR handler of the device when napi is enabled. It * identifies the reason for the interrupt and calls the relevant service * routines. */ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) { struct net_device *dev; struct __vxge_hw_device *hldev; u64 reason; enum vxge_hw_status status; struct vxgedev *vdev = (struct vxgedev *)dev_id; vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__); dev = vdev->ndev; hldev = pci_get_drvdata(vdev->pdev); if (pci_channel_offline(vdev->pdev)) return IRQ_NONE; if (unlikely(!is_vxge_card_up(vdev))) return IRQ_HANDLED; status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason); if (status == VXGE_HW_OK) { vxge_hw_device_mask_all(hldev); if (reason & VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT( vdev->vpaths_deployed >> (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) { vxge_hw_device_clear_tx_rx(hldev); napi_schedule(&vdev->napi); vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); return IRQ_HANDLED; } else vxge_hw_device_unmask_all(hldev); } else if (unlikely((status == VXGE_HW_ERR_VPATH) || (status == VXGE_HW_ERR_CRITICAL) || (status == VXGE_HW_ERR_FIFO))) { vxge_hw_device_mask_all(hldev); vxge_hw_device_flush_io(hldev); return IRQ_HANDLED; } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE)) return IRQ_HANDLED; vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); return IRQ_NONE; } #ifdef CONFIG_PCI_MSI static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id) { struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; adaptive_coalesce_tx_interrupts(fifo); vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle, fifo->tx_vector_no); vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle, fifo->tx_vector_no); VXGE_COMPLETE_VPATH_TX(fifo); vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle, fifo->tx_vector_no); mmiowb(); return IRQ_HANDLED; } static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id) { struct vxge_ring *ring = (struct vxge_ring *)dev_id; adaptive_coalesce_rx_interrupts(ring); vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, ring->rx_vector_no); vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle, ring->rx_vector_no); napi_schedule(&ring->napi); return IRQ_HANDLED; } static irqreturn_t vxge_alarm_msix_handle(int irq, void *dev_id) { int i; enum vxge_hw_status status; struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id; struct vxgedev *vdev = vpath->vdev; int msix_id = (vpath->handle->vpath->vp_id * VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; for (i = 0; i < vdev->no_of_vpath; i++) { /* Reduce the chance of losing alarm interrupts by masking * the vector. A pending bit will be set if an alarm is * generated and on unmask the interrupt will be fired. */ vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id); mmiowb(); status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, vdev->exec_mode); if (status == VXGE_HW_OK) { vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, msix_id); mmiowb(); continue; } vxge_debug_intr(VXGE_ERR, "%s: vxge_hw_vpath_alarm_process failed %x ", VXGE_DRIVER_NAME, status); } return IRQ_HANDLED; } static int vxge_alloc_msix(struct vxgedev *vdev) { int j, i, ret = 0; int msix_intr_vect = 0, temp; vdev->intr_cnt = 0; start: /* Tx/Rx MSIX Vectors count */ vdev->intr_cnt = vdev->no_of_vpath * 2; /* Alarm MSIX Vectors count */ vdev->intr_cnt++; vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry), GFP_KERNEL); if (!vdev->entries) { vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", VXGE_DRIVER_NAME); ret = -ENOMEM; goto alloc_entries_failed; } vdev->vxge_entries = kcalloc(vdev->intr_cnt, sizeof(struct vxge_msix_entry), GFP_KERNEL); if (!vdev->vxge_entries) { vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", VXGE_DRIVER_NAME); ret = -ENOMEM; goto alloc_vxge_entries_failed; } for (i = 0, j = 0; i < vdev->no_of_vpath; i++) { msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; /* Initialize the fifo vector */ vdev->entries[j].entry = msix_intr_vect; vdev->vxge_entries[j].entry = msix_intr_vect; vdev->vxge_entries[j].in_use = 0; j++; /* Initialize the ring vector */ vdev->entries[j].entry = msix_intr_vect + 1; vdev->vxge_entries[j].entry = msix_intr_vect + 1; vdev->vxge_entries[j].in_use = 0; j++; } /* Initialize the alarm vector */ vdev->entries[j].entry = VXGE_ALARM_MSIX_ID; vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID; vdev->vxge_entries[j].in_use = 0; ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt); if (ret > 0) { vxge_debug_init(VXGE_ERR, "%s: MSI-X enable failed for %d vectors, ret: %d", VXGE_DRIVER_NAME, vdev->intr_cnt, ret); if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) { ret = -ENODEV; goto enable_msix_failed; } kfree(vdev->entries); kfree(vdev->vxge_entries); vdev->entries = NULL; vdev->vxge_entries = NULL; /* Try with less no of vector by reducing no of vpaths count */ temp = (ret - 1)/2; vxge_close_vpaths(vdev, temp); vdev->no_of_vpath = temp; goto start; } else if (ret < 0) { ret = -ENODEV; goto enable_msix_failed; } return 0; enable_msix_failed: kfree(vdev->vxge_entries); alloc_vxge_entries_failed: kfree(vdev->entries); alloc_entries_failed: return ret; } static int vxge_enable_msix(struct vxgedev *vdev) { int i, ret = 0; /* 0 - Tx, 1 - Rx */ int tim_msix_id[4] = {0, 1, 0, 0}; vdev->intr_cnt = 0; /* allocate msix vectors */ ret = vxge_alloc_msix(vdev); if (!ret) { for (i = 0; i < vdev->no_of_vpath; i++) { struct vxge_vpath *vpath = &vdev->vpaths[i]; /* If fifo or ring are not enabled, the MSIX vector for * it should be set to 0. */ vpath->ring.rx_vector_no = (vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE) + 1; vpath->fifo.tx_vector_no = (vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE); vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, VXGE_ALARM_MSIX_ID); } } return ret; } static void vxge_rem_msix_isr(struct vxgedev *vdev) { int intr_cnt; for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1); intr_cnt++) { if (vdev->vxge_entries[intr_cnt].in_use) { synchronize_irq(vdev->entries[intr_cnt].vector); free_irq(vdev->entries[intr_cnt].vector, vdev->vxge_entries[intr_cnt].arg); vdev->vxge_entries[intr_cnt].in_use = 0; } } kfree(vdev->entries); kfree(vdev->vxge_entries); vdev->entries = NULL; vdev->vxge_entries = NULL; if (vdev->config.intr_type == MSI_X) pci_disable_msix(vdev->pdev); } #endif static void vxge_rem_isr(struct vxgedev *vdev) { struct __vxge_hw_device *hldev; hldev = pci_get_drvdata(vdev->pdev); #ifdef CONFIG_PCI_MSI if (vdev->config.intr_type == MSI_X) { vxge_rem_msix_isr(vdev); } else #endif if (vdev->config.intr_type == INTA) { synchronize_irq(vdev->pdev->irq); free_irq(vdev->pdev->irq, vdev); } } static int vxge_add_isr(struct vxgedev *vdev) { int ret = 0; #ifdef CONFIG_PCI_MSI int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0; int pci_fun = PCI_FUNC(vdev->pdev->devfn); if (vdev->config.intr_type == MSI_X) ret = vxge_enable_msix(vdev); if (ret) { vxge_debug_init(VXGE_ERR, "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME); vxge_debug_init(VXGE_ERR, "%s: Defaulting to INTA", VXGE_DRIVER_NAME); vdev->config.intr_type = INTA; } if (vdev->config.intr_type == MSI_X) { for (intr_idx = 0; intr_idx < (vdev->no_of_vpath * VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) { msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE; irq_req = 0; switch (msix_idx) { case 0: snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d", vdev->ndev->name, vdev->entries[intr_cnt].entry, pci_fun, vp_idx); ret = request_irq( vdev->entries[intr_cnt].vector, vxge_tx_msix_handle, 0, vdev->desc[intr_cnt], &vdev->vpaths[vp_idx].fifo); vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx].fifo; irq_req = 1; break; case 1: snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d", vdev->ndev->name, vdev->entries[intr_cnt].entry, pci_fun, vp_idx); ret = request_irq( vdev->entries[intr_cnt].vector, vxge_rx_msix_napi_handle, 0, vdev->desc[intr_cnt], &vdev->vpaths[vp_idx].ring); vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx].ring; irq_req = 1; break; } if (ret) { vxge_debug_init(VXGE_ERR, "%s: MSIX - %d Registration failed", vdev->ndev->name, intr_cnt); vxge_rem_msix_isr(vdev); vdev->config.intr_type = INTA; vxge_debug_init(VXGE_ERR, "%s: Defaulting to INTA" , vdev->ndev->name); goto INTA_MODE; } if (irq_req) { /* We requested for this msix interrupt */ vdev->vxge_entries[intr_cnt].in_use = 1; msix_idx += vdev->vpaths[vp_idx].device_id * VXGE_HW_VPATH_MSIX_ACTIVE; vxge_hw_vpath_msix_unmask( vdev->vpaths[vp_idx].handle, msix_idx); intr_cnt++; } /* Point to next vpath handler */ if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) && (vp_idx < (vdev->no_of_vpath - 1))) vp_idx++; } intr_cnt = vdev->no_of_vpath * 2; snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, "%s:vxge:MSI-X %d - Alarm - fn:%d", vdev->ndev->name, vdev->entries[intr_cnt].entry, pci_fun); /* For Alarm interrupts */ ret = request_irq(vdev->entries[intr_cnt].vector, vxge_alarm_msix_handle, 0, vdev->desc[intr_cnt], &vdev->vpaths[0]); if (ret) { vxge_debug_init(VXGE_ERR, "%s: MSIX - %d Registration failed", vdev->ndev->name, intr_cnt); vxge_rem_msix_isr(vdev); vdev->config.intr_type = INTA; vxge_debug_init(VXGE_ERR, "%s: Defaulting to INTA", vdev->ndev->name); goto INTA_MODE; } msix_idx = (vdev->vpaths[0].handle->vpath->vp_id * VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle, msix_idx); vdev->vxge_entries[intr_cnt].in_use = 1; vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0]; } INTA_MODE: #endif if (vdev->config.intr_type == INTA) { snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge:INTA", vdev->ndev->name); vxge_hw_device_set_intr_type(vdev->devh, VXGE_HW_INTR_MODE_IRQLINE); vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle); ret = request_irq((int) vdev->pdev->irq, vxge_isr_napi, IRQF_SHARED, vdev->desc[0], vdev); if (ret) { vxge_debug_init(VXGE_ERR, "%s %s-%d: ISR registration failed", VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq); return -ENODEV; } vxge_debug_init(VXGE_TRACE, "new %s-%d line allocated", "IRQ", vdev->pdev->irq); } return VXGE_HW_OK; } static void vxge_poll_vp_reset(unsigned long data) { struct vxgedev *vdev = (struct vxgedev *)data; int i, j = 0; for (i = 0; i < vdev->no_of_vpath; i++) { if (test_bit(i, &vdev->vp_reset)) { vxge_reset_vpath(vdev, i); j++; } } if (j && (vdev->config.intr_type != MSI_X)) { vxge_hw_device_unmask_all(vdev->devh); vxge_hw_device_flush_io(vdev->devh); } mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2); } static void vxge_poll_vp_lockup(unsigned long data) { struct vxgedev *vdev = (struct vxgedev *)data; enum vxge_hw_status status = VXGE_HW_OK; struct vxge_vpath *vpath; struct vxge_ring *ring; int i; unsigned long rx_frms; for (i = 0; i < vdev->no_of_vpath; i++) { ring = &vdev->vpaths[i].ring; /* Truncated to machine word size number of frames */ rx_frms = ACCESS_ONCE(ring->stats.rx_frms); /* Did this vpath received any packets */ if (ring->stats.prev_rx_frms == rx_frms) { status = vxge_hw_vpath_check_leak(ring->handle); /* Did it received any packets last time */ if ((VXGE_HW_FAIL == status) && (VXGE_HW_FAIL == ring->last_status)) { /* schedule vpath reset */ if (!test_and_set_bit(i, &vdev->vp_reset)) { vpath = &vdev->vpaths[i]; /* disable interrupts for this vpath */ vxge_vpath_intr_disable(vdev, i); /* stop the queue for this vpath */ netif_tx_stop_queue(vpath->fifo.txq); continue; } } } ring->stats.prev_rx_frms = rx_frms; ring->last_status = status; } /* Check every 1 milli second */ mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000); } static netdev_features_t vxge_fix_features(struct net_device *dev, netdev_features_t features) { netdev_features_t changed = dev->features ^ features; /* Enabling RTH requires some of the logic in vxge_device_register and a * vpath reset. Due to these restrictions, only allow modification * while the interface is down. */ if ((changed & NETIF_F_RXHASH) && netif_running(dev)) features ^= NETIF_F_RXHASH; return features; } static int vxge_set_features(struct net_device *dev, netdev_features_t features) { struct vxgedev *vdev = netdev_priv(dev); netdev_features_t changed = dev->features ^ features; if (!(changed & NETIF_F_RXHASH)) return 0; /* !netif_running() ensured by vxge_fix_features() */ vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH); if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) { dev->features = features ^ NETIF_F_RXHASH; vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH); return -EIO; } return 0; } /** * vxge_open * @dev: pointer to the device structure. * * This function is the open entry point of the driver. It mainly calls a * function to allocate Rx buffers and inserts them into the buffer * descriptors and then enables the Rx part of the NIC. * Return value: '0' on success and an appropriate (-)ve integer as * defined in errno.h file on failure. */ static int vxge_open(struct net_device *dev) { enum vxge_hw_status status; struct vxgedev *vdev; struct __vxge_hw_device *hldev; struct vxge_vpath *vpath; int ret = 0; int i; u64 val64, function_mode; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", dev->name, __func__, __LINE__); vdev = netdev_priv(dev); hldev = pci_get_drvdata(vdev->pdev); function_mode = vdev->config.device_hw_info.function_mode; /* make sure you have link off by default every time Nic is * initialized */ netif_carrier_off(dev); /* Open VPATHs */ status = vxge_open_vpaths(vdev); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s: fatal: Vpath open failed", vdev->ndev->name); ret = -EPERM; goto out0; } vdev->mtu = dev->mtu; status = vxge_add_isr(vdev); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s: fatal: ISR add failed", dev->name); ret = -EPERM; goto out1; } if (vdev->config.intr_type != MSI_X) { netif_napi_add(dev, &vdev->napi, vxge_poll_inta, vdev->config.napi_weight); napi_enable(&vdev->napi); for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i]; vpath->ring.napi_p = &vdev->napi; } } else { for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i]; netif_napi_add(dev, &vpath->ring.napi, vxge_poll_msix, vdev->config.napi_weight); napi_enable(&vpath->ring.napi); vpath->ring.napi_p = &vpath->ring.napi; } } /* configure RTH */ if (vdev->config.rth_steering) { status = vxge_rth_configure(vdev); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s: fatal: RTH configuration failed", dev->name); ret = -EPERM; goto out2; } } printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name, hldev->config.rth_en ? "enabled" : "disabled"); for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i]; /* set initial mtu before enabling the device */ status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s: fatal: can not set new MTU", dev->name); ret = -EPERM; goto out2; } } VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev); vxge_debug_init(vdev->level_trace, "%s: MTU is %d", vdev->ndev->name, vdev->mtu); VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev); /* Restore the DA, VID table and also multicast and promiscuous mode * states */ if (vdev->all_multi_flg) { for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i]; vxge_restore_vpath_mac_addr(vpath); vxge_restore_vpath_vid_table(vpath); status = vxge_hw_vpath_mcast_enable(vpath->handle); if (status != VXGE_HW_OK) vxge_debug_init(VXGE_ERR, "%s:%d Enabling multicast failed", __func__, __LINE__); } } /* Enable vpath to sniff all unicast/multicast traffic that not * addressed to them. We allow promiscuous mode for PF only */ val64 = 0; for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i); vxge_hw_mgmt_reg_write(vdev->devh, vxge_hw_mgmt_reg_type_mrpcim, 0, (ulong)offsetof(struct vxge_hw_mrpcim_reg, rxmac_authorize_all_addr), val64); vxge_hw_mgmt_reg_write(vdev->devh, vxge_hw_mgmt_reg_type_mrpcim, 0, (ulong)offsetof(struct vxge_hw_mrpcim_reg, rxmac_authorize_all_vid), val64); vxge_set_multicast(dev); /* Enabling Bcast and mcast for all vpath */ for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i]; status = vxge_hw_vpath_bcast_enable(vpath->handle); if (status != VXGE_HW_OK) vxge_debug_init(VXGE_ERR, "%s : Can not enable bcast for vpath " "id %d", dev->name, i); if (vdev->config.addr_learn_en) { status = vxge_hw_vpath_mcast_enable(vpath->handle); if (status != VXGE_HW_OK) vxge_debug_init(VXGE_ERR, "%s : Can not enable mcast for vpath " "id %d", dev->name, i); } } vxge_hw_device_setpause_data(vdev->devh, 0, vdev->config.tx_pause_enable, vdev->config.rx_pause_enable); if (vdev->vp_reset_timer.function == NULL) vxge_os_timer(vdev->vp_reset_timer, vxge_poll_vp_reset, vdev, (HZ/2)); /* There is no need to check for RxD leak and RxD lookup on Titan1A */ if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL) vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev, HZ / 2); set_bit(__VXGE_STATE_CARD_UP, &vdev->state); smp_wmb(); if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) { netif_carrier_on(vdev->ndev); netdev_notice(vdev->ndev, "Link Up\n"); vdev->stats.link_up++; } vxge_hw_device_intr_enable(vdev->devh); smp_wmb(); for (i = 0; i < vdev->no_of_vpath; i++) { vpath = &vdev->vpaths[i]; vxge_hw_vpath_enable(vpath->handle); smp_wmb(); vxge_hw_vpath_rx_doorbell_init(vpath->handle); } netif_tx_start_all_queues(vdev->ndev); /* configure CI */ vxge_config_ci_for_tti_rti(vdev); goto out0; out2: vxge_rem_isr(vdev); /* Disable napi */ if (vdev->config.intr_type != MSI_X) napi_disable(&vdev->napi); else { for (i = 0; i < vdev->no_of_vpath; i++) napi_disable(&vdev->vpaths[i].ring.napi); } out1: vxge_close_vpaths(vdev, 0); out0: vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", dev->name, __func__, __LINE__); return ret; } /* Loop through the mac address list and delete all the entries */ static void vxge_free_mac_add_list(struct vxge_vpath *vpath) { struct list_head *entry, *next; if (list_empty(&vpath->mac_addr_list)) return; list_for_each_safe(entry, next, &vpath->mac_addr_list) { list_del(entry); kfree((struct vxge_mac_addrs *)entry); } } static void vxge_napi_del_all(struct vxgedev *vdev) { int i; if (vdev->config.intr_type != MSI_X) netif_napi_del(&vdev->napi); else { for (i = 0; i < vdev->no_of_vpath; i++) netif_napi_del(&vdev->vpaths[i].ring.napi); } } static int do_vxge_close(struct net_device *dev, int do_io) { enum vxge_hw_status status; struct vxgedev *vdev; struct __vxge_hw_device *hldev; int i; u64 val64, vpath_vector; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", dev->name, __func__, __LINE__); vdev = netdev_priv(dev); hldev = pci_get_drvdata(vdev->pdev); if (unlikely(!is_vxge_card_up(vdev))) return 0; /* If vxge_handle_crit_err task is executing, * wait till it completes. */ while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) msleep(50); if (do_io) { /* Put the vpath back in normal mode */ vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id); status = vxge_hw_mgmt_reg_read(vdev->devh, vxge_hw_mgmt_reg_type_mrpcim, 0, (ulong)offsetof( struct vxge_hw_mrpcim_reg, rts_mgr_cbasin_cfg), &val64); if (status == VXGE_HW_OK) { val64 &= ~vpath_vector; status = vxge_hw_mgmt_reg_write(vdev->devh, vxge_hw_mgmt_reg_type_mrpcim, 0, (ulong)offsetof( struct vxge_hw_mrpcim_reg, rts_mgr_cbasin_cfg), val64); } /* Remove the function 0 from promiscuous mode */ vxge_hw_mgmt_reg_write(vdev->devh, vxge_hw_mgmt_reg_type_mrpcim, 0, (ulong)offsetof(struct vxge_hw_mrpcim_reg, rxmac_authorize_all_addr), 0); vxge_hw_mgmt_reg_write(vdev->devh, vxge_hw_mgmt_reg_type_mrpcim, 0, (ulong)offsetof(struct vxge_hw_mrpcim_reg, rxmac_authorize_all_vid), 0); smp_wmb(); } if (vdev->titan1) del_timer_sync(&vdev->vp_lockup_timer); del_timer_sync(&vdev->vp_reset_timer); if (do_io) vxge_hw_device_wait_receive_idle(hldev); clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); /* Disable napi */ if (vdev->config.intr_type != MSI_X) napi_disable(&vdev->napi); else { for (i = 0; i < vdev->no_of_vpath; i++) napi_disable(&vdev->vpaths[i].ring.napi); } netif_carrier_off(vdev->ndev); netdev_notice(vdev->ndev, "Link Down\n"); netif_tx_stop_all_queues(vdev->ndev); /* Note that at this point xmit() is stopped by upper layer */ if (do_io) vxge_hw_device_intr_disable(vdev->devh); vxge_rem_isr(vdev); vxge_napi_del_all(vdev); if (do_io) vxge_reset_all_vpaths(vdev); vxge_close_vpaths(vdev, 0); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", dev->name, __func__, __LINE__); clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state); return 0; } /** * vxge_close * @dev: device pointer. * * This is the stop entry point of the driver. It needs to undo exactly * whatever was done by the open entry point, thus it's usually referred to * as the close function.Among other things this function mainly stops the * Rx side of the NIC and frees all the Rx buffers in the Rx rings. * Return value: '0' on success and an appropriate (-)ve integer as * defined in errno.h file on failure. */ static int vxge_close(struct net_device *dev) { do_vxge_close(dev, 1); return 0; } /** * vxge_change_mtu * @dev: net device pointer. * @new_mtu :the new MTU size for the device. * * A driver entry point to change MTU size for the device. Before changing * the MTU the device must be stopped. */ static int vxge_change_mtu(struct net_device *dev, int new_mtu) { struct vxgedev *vdev = netdev_priv(dev); vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__); if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) { vxge_debug_init(vdev->level_err, "%s: mtu size is invalid", dev->name); return -EPERM; } /* check if device is down already */ if (unlikely(!is_vxge_card_up(vdev))) { /* just store new value, will use later on open() */ dev->mtu = new_mtu; vxge_debug_init(vdev->level_err, "%s", "device is down on MTU change"); return 0; } vxge_debug_init(vdev->level_trace, "trying to apply new MTU %d", new_mtu); if (vxge_close(dev)) return -EIO; dev->mtu = new_mtu; vdev->mtu = new_mtu; if (vxge_open(dev)) return -EIO; vxge_debug_init(vdev->level_trace, "%s: MTU changed to %d", vdev->ndev->name, new_mtu); vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__, __LINE__); return 0; } /** * vxge_get_stats64 * @dev: pointer to the device structure * @stats: pointer to struct rtnl_link_stats64 * */ static struct rtnl_link_stats64 * vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) { struct vxgedev *vdev = netdev_priv(dev); int k; /* net_stats already zeroed by caller */ for (k = 0; k < vdev->no_of_vpath; k++) { struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats; struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats; unsigned int start; u64 packets, bytes, multicast; do { start = u64_stats_fetch_begin(&rxstats->syncp); packets = rxstats->rx_frms; multicast = rxstats->rx_mcast; bytes = rxstats->rx_bytes; } while (u64_stats_fetch_retry(&rxstats->syncp, start)); net_stats->rx_packets += packets; net_stats->rx_bytes += bytes; net_stats->multicast += multicast; net_stats->rx_errors += rxstats->rx_errors; net_stats->rx_dropped += rxstats->rx_dropped; do { start = u64_stats_fetch_begin(&txstats->syncp); packets = txstats->tx_frms; bytes = txstats->tx_bytes; } while (u64_stats_fetch_retry(&txstats->syncp, start)); net_stats->tx_packets += packets; net_stats->tx_bytes += bytes; net_stats->tx_errors += txstats->tx_errors; } return net_stats; } static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh) { enum vxge_hw_status status; u64 val64; /* Timestamp is passed to the driver via the FCS, therefore we * must disable the FCS stripping by the adapter. Since this is * required for the driver to load (due to a hardware bug), * there is no need to do anything special here. */ val64 = VXGE_HW_XMAC_TIMESTAMP_EN | VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) | VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0); status = vxge_hw_mgmt_reg_write(devh, vxge_hw_mgmt_reg_type_mrpcim, 0, offsetof(struct vxge_hw_mrpcim_reg, xmac_timestamp), val64); vxge_hw_device_flush_io(devh); devh->config.hwts_en = VXGE_HW_HWTS_ENABLE; return status; } static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data) { struct hwtstamp_config config; int i; if (copy_from_user(&config, data, sizeof(config))) return -EFAULT; /* reserved for future extensions */ if (config.flags) return -EINVAL; /* Transmit HW Timestamp not supported */ switch (config.tx_type) { case HWTSTAMP_TX_OFF: break; case HWTSTAMP_TX_ON: default: return -ERANGE; } switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: vdev->rx_hwts = 0; config.rx_filter = HWTSTAMP_FILTER_NONE; break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE) return -EFAULT; vdev->rx_hwts = 1; config.rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } for (i = 0; i < vdev->no_of_vpath; i++) vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts; if (copy_to_user(data, &config, sizeof(config))) return -EFAULT; return 0; } /** * vxge_ioctl * @dev: Device pointer. * @ifr: An IOCTL specific structure, that can contain a pointer to * a proprietary structure used to pass information to the driver. * @cmd: This is used to distinguish between the different commands that * can be passed to the IOCTL functions. * * Entry point for the Ioctl. */ static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct vxgedev *vdev = netdev_priv(dev); int ret; switch (cmd) { case SIOCSHWTSTAMP: ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data); if (ret) return ret; break; default: return -EOPNOTSUPP; } return 0; } /** * vxge_tx_watchdog * @dev: pointer to net device structure * * Watchdog for transmit side. * This function is triggered if the Tx Queue is stopped * for a pre-defined amount of time when the Interface is still up. */ static void vxge_tx_watchdog(struct net_device *dev) { struct vxgedev *vdev; vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); vdev = netdev_priv(dev); vdev->cric_err_event = VXGE_HW_EVENT_RESET_START; schedule_work(&vdev->reset_task); vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); } /** * vxge_vlan_rx_add_vid * @dev: net device pointer. * @vid: vid * * Add the vlan id to the devices vlan id table */ static int vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct vxgedev *vdev = netdev_priv(dev); struct vxge_vpath *vpath; int vp_id; /* Add these vlan to the vid table */ for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { vpath = &vdev->vpaths[vp_id]; if (!vpath->is_open) continue; vxge_hw_vpath_vid_add(vpath->handle, vid); } set_bit(vid, vdev->active_vlans); return 0; } /** * vxge_vlan_rx_add_vid * @dev: net device pointer. * @vid: vid * * Remove the vlan id from the device's vlan id table */ static int vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct vxgedev *vdev = netdev_priv(dev); struct vxge_vpath *vpath; int vp_id; vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); /* Delete this vlan from the vid table */ for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { vpath = &vdev->vpaths[vp_id]; if (!vpath->is_open) continue; vxge_hw_vpath_vid_delete(vpath->handle, vid); } vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); clear_bit(vid, vdev->active_vlans); return 0; } static const struct net_device_ops vxge_netdev_ops = { .ndo_open = vxge_open, .ndo_stop = vxge_close, .ndo_get_stats64 = vxge_get_stats64, .ndo_start_xmit = vxge_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = vxge_set_multicast, .ndo_do_ioctl = vxge_ioctl, .ndo_set_mac_address = vxge_set_mac_addr, .ndo_change_mtu = vxge_change_mtu, .ndo_fix_features = vxge_fix_features, .ndo_set_features = vxge_set_features, .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid, .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid, .ndo_tx_timeout = vxge_tx_watchdog, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = vxge_netpoll, #endif }; static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, struct vxge_config *config, int high_dma, int no_of_vpath, struct vxgedev **vdev_out) { struct net_device *ndev; enum vxge_hw_status status = VXGE_HW_OK; struct vxgedev *vdev; int ret = 0, no_of_queue = 1; u64 stat; *vdev_out = NULL; if (config->tx_steering_type) no_of_queue = no_of_vpath; ndev = alloc_etherdev_mq(sizeof(struct vxgedev), no_of_queue); if (ndev == NULL) { vxge_debug_init( vxge_hw_device_trace_level_get(hldev), "%s : device allocation failed", __func__); ret = -ENODEV; goto _out0; } vxge_debug_entryexit( vxge_hw_device_trace_level_get(hldev), "%s: %s:%d Entering...", ndev->name, __func__, __LINE__); vdev = netdev_priv(ndev); memset(vdev, 0, sizeof(struct vxgedev)); vdev->ndev = ndev; vdev->devh = hldev; vdev->pdev = hldev->pdev; memcpy(&vdev->config, config, sizeof(struct vxge_config)); vdev->rx_hwts = 0; vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION); SET_NETDEV_DEV(ndev, &vdev->pdev->dev); ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX; if (vdev->config.rth_steering != NO_STEERING) ndev->hw_features |= NETIF_F_RXHASH; ndev->features |= ndev->hw_features | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; /* Driver entry points */ ndev->irq = vdev->pdev->irq; ndev->base_addr = (unsigned long) hldev->bar0; ndev->netdev_ops = &vxge_netdev_ops; ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT; INIT_WORK(&vdev->reset_task, vxge_reset); vxge_initialize_ethtool_ops(ndev); /* Allocate memory for vpath */ vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * no_of_vpath, GFP_KERNEL); if (!vdev->vpaths) { vxge_debug_init(VXGE_ERR, "%s: vpath memory allocation failed", vdev->ndev->name); ret = -ENOMEM; goto _out1; } vxge_debug_init(vxge_hw_device_trace_level_get(hldev), "%s : checksuming enabled", __func__); if (high_dma) { ndev->features |= NETIF_F_HIGHDMA; vxge_debug_init(vxge_hw_device_trace_level_get(hldev), "%s : using High DMA", __func__); } ret = register_netdev(ndev); if (ret) { vxge_debug_init(vxge_hw_device_trace_level_get(hldev), "%s: %s : device registration failed!", ndev->name, __func__); goto _out2; } /* Set the factory defined MAC address initially */ ndev->addr_len = ETH_ALEN; /* Make Link state as off at this point, when the Link change * interrupt comes the state will be automatically changed to * the right state. */ netif_carrier_off(ndev); vxge_debug_init(vxge_hw_device_trace_level_get(hldev), "%s: Ethernet device registered", ndev->name); hldev->ndev = ndev; *vdev_out = vdev; /* Resetting the Device stats */ status = vxge_hw_mrpcim_stats_access( hldev, VXGE_HW_STATS_OP_CLEAR_ALL_STATS, 0, 0, &stat); if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION) vxge_debug_init( vxge_hw_device_trace_level_get(hldev), "%s: device stats clear returns" "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name); vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev), "%s: %s:%d Exiting...", ndev->name, __func__, __LINE__); return ret; _out2: kfree(vdev->vpaths); _out1: free_netdev(ndev); _out0: return ret; } /* * vxge_device_unregister * * This function will unregister and free network device */ static void vxge_device_unregister(struct __vxge_hw_device *hldev) { struct vxgedev *vdev; struct net_device *dev; char buf[IFNAMSIZ]; dev = hldev->ndev; vdev = netdev_priv(dev); vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); strncpy(buf, dev->name, IFNAMSIZ); flush_work_sync(&vdev->reset_task); /* in 2.6 will call stop() if device is up */ unregister_netdev(dev); kfree(vdev->vpaths); /* we are safe to free it now */ free_netdev(dev); vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered", buf); vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf, __func__, __LINE__); } /* * vxge_callback_crit_err * * This function is called by the alarm handler in interrupt context. * Driver must analyze it based on the event type. */ static void vxge_callback_crit_err(struct __vxge_hw_device *hldev, enum vxge_hw_event type, u64 vp_id) { struct net_device *dev = hldev->ndev; struct vxgedev *vdev = netdev_priv(dev); struct vxge_vpath *vpath = NULL; int vpath_idx; vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); /* Note: This event type should be used for device wide * indications only - Serious errors, Slot freeze and critical errors */ vdev->cric_err_event = type; for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { vpath = &vdev->vpaths[vpath_idx]; if (vpath->device_id == vp_id) break; } if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) { if (type == VXGE_HW_EVENT_SLOT_FREEZE) { vxge_debug_init(VXGE_ERR, "%s: Slot is frozen", vdev->ndev->name); } else if (type == VXGE_HW_EVENT_SERR) { vxge_debug_init(VXGE_ERR, "%s: Encountered Serious Error", vdev->ndev->name); } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) vxge_debug_init(VXGE_ERR, "%s: Encountered Critical Error", vdev->ndev->name); } if ((type == VXGE_HW_EVENT_SERR) || (type == VXGE_HW_EVENT_SLOT_FREEZE)) { if (unlikely(vdev->exec_mode)) clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) { vxge_hw_device_mask_all(hldev); if (unlikely(vdev->exec_mode)) clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); } else if ((type == VXGE_HW_EVENT_FIFO_ERR) || (type == VXGE_HW_EVENT_VPATH_ERR)) { if (unlikely(vdev->exec_mode)) clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); else { /* check if this vpath is already set for reset */ if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) { /* disable interrupts for this vpath */ vxge_vpath_intr_disable(vdev, vpath_idx); /* stop the queue for this vpath */ netif_tx_stop_queue(vpath->fifo.txq); } } } vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); } static void verify_bandwidth(void) { int i, band_width, total = 0, equal_priority = 0; /* 1. If user enters 0 for some fifo, give equal priority to all */ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (bw_percentage[i] == 0) { equal_priority = 1; break; } } if (!equal_priority) { /* 2. If sum exceeds 100, give equal priority to all */ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (bw_percentage[i] == 0xFF) break; total += bw_percentage[i]; if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) { equal_priority = 1; break; } } } if (!equal_priority) { /* Is all the bandwidth consumed? */ if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) { if (i < VXGE_HW_MAX_VIRTUAL_PATHS) { /* Split rest of bw equally among next VPs*/ band_width = (VXGE_HW_VPATH_BANDWIDTH_MAX - total) / (VXGE_HW_MAX_VIRTUAL_PATHS - i); if (band_width < 2) /* min of 2% */ equal_priority = 1; else { for (; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) bw_percentage[i] = band_width; } } } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS) equal_priority = 1; } if (equal_priority) { vxge_debug_init(VXGE_ERR, "%s: Assigning equal bandwidth to all the vpaths", VXGE_DRIVER_NAME); bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX / VXGE_HW_MAX_VIRTUAL_PATHS; for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) bw_percentage[i] = bw_percentage[0]; } } /* * Vpath configuration */ static int __devinit vxge_config_vpaths( struct vxge_hw_device_config *device_config, u64 vpath_mask, struct vxge_config *config_param) { int i, no_of_vpaths = 0, default_no_vpath = 0, temp; u32 txdl_size, txdl_per_memblock; temp = driver_config->vpath_per_dev; if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) && (max_config_dev == VXGE_MAX_CONFIG_DEV)) { /* No more CPU. Return vpath number as zero.*/ if (driver_config->g_no_cpus == -1) return 0; if (!driver_config->g_no_cpus) driver_config->g_no_cpus = num_online_cpus(); driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1; if (!driver_config->vpath_per_dev) driver_config->vpath_per_dev = 1; for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) if (!vxge_bVALn(vpath_mask, i, 1)) continue; else default_no_vpath++; if (default_no_vpath < driver_config->vpath_per_dev) driver_config->vpath_per_dev = default_no_vpath; driver_config->g_no_cpus = driver_config->g_no_cpus - (driver_config->vpath_per_dev * 2); if (driver_config->g_no_cpus <= 0) driver_config->g_no_cpus = -1; } if (driver_config->vpath_per_dev == 1) { vxge_debug_ll_config(VXGE_TRACE, "%s: Disable tx and rx steering, " "as single vpath is configured", VXGE_DRIVER_NAME); config_param->rth_steering = NO_STEERING; config_param->tx_steering_type = NO_STEERING; device_config->rth_en = 0; } /* configure bandwidth */ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) device_config->vp_config[i].min_bandwidth = bw_percentage[i]; for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { device_config->vp_config[i].vp_id = i; device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU; if (no_of_vpaths < driver_config->vpath_per_dev) { if (!vxge_bVALn(vpath_mask, i, 1)) { vxge_debug_ll_config(VXGE_TRACE, "%s: vpath: %d is not available", VXGE_DRIVER_NAME, i); continue; } else { vxge_debug_ll_config(VXGE_TRACE, "%s: vpath: %d available", VXGE_DRIVER_NAME, i); no_of_vpaths++; } } else { vxge_debug_ll_config(VXGE_TRACE, "%s: vpath: %d is not configured, " "max_config_vpath exceeded", VXGE_DRIVER_NAME, i); break; } /* Configure Tx fifo's */ device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE; device_config->vp_config[i].fifo.max_frags = MAX_SKB_FRAGS + 1; device_config->vp_config[i].fifo.memblock_size = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE; txdl_size = device_config->vp_config[i].fifo.max_frags * sizeof(struct vxge_hw_fifo_txd); txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size; device_config->vp_config[i].fifo.fifo_blocks = ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1; device_config->vp_config[i].fifo.intr = VXGE_HW_FIFO_QUEUE_INTR_DISABLE; /* Configure tti properties */ device_config->vp_config[i].tti.intr_enable = VXGE_HW_TIM_INTR_ENABLE; device_config->vp_config[i].tti.btimer_val = (VXGE_TTI_BTIMER_VAL * 1000) / 272; device_config->vp_config[i].tti.timer_ac_en = VXGE_HW_TIM_TIMER_AC_ENABLE; /* For msi-x with napi (each vector has a handler of its own) - * Set CI to OFF for all vpaths */ device_config->vp_config[i].tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_DISABLE; device_config->vp_config[i].tti.timer_ri_en = VXGE_HW_TIM_TIMER_RI_DISABLE; device_config->vp_config[i].tti.util_sel = VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL; device_config->vp_config[i].tti.ltimer_val = (VXGE_TTI_LTIMER_VAL * 1000) / 272; device_config->vp_config[i].tti.rtimer_val = (VXGE_TTI_RTIMER_VAL * 1000) / 272; device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A; device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B; device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C; device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A; device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B; device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C; device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D; /* Configure Rx rings */ device_config->vp_config[i].ring.enable = VXGE_HW_RING_ENABLE; device_config->vp_config[i].ring.ring_blocks = VXGE_HW_DEF_RING_BLOCKS; device_config->vp_config[i].ring.buffer_mode = VXGE_HW_RING_RXD_BUFFER_MODE_1; device_config->vp_config[i].ring.rxds_limit = VXGE_HW_DEF_RING_RXDS_LIMIT; device_config->vp_config[i].ring.scatter_mode = VXGE_HW_RING_SCATTER_MODE_A; /* Configure rti properties */ device_config->vp_config[i].rti.intr_enable = VXGE_HW_TIM_INTR_ENABLE; device_config->vp_config[i].rti.btimer_val = (VXGE_RTI_BTIMER_VAL * 1000)/272; device_config->vp_config[i].rti.timer_ac_en = VXGE_HW_TIM_TIMER_AC_ENABLE; device_config->vp_config[i].rti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_DISABLE; device_config->vp_config[i].rti.timer_ri_en = VXGE_HW_TIM_TIMER_RI_DISABLE; device_config->vp_config[i].rti.util_sel = VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL; device_config->vp_config[i].rti.urange_a = RTI_RX_URANGE_A; device_config->vp_config[i].rti.urange_b = RTI_RX_URANGE_B; device_config->vp_config[i].rti.urange_c = RTI_RX_URANGE_C; device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A; device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B; device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C; device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D; device_config->vp_config[i].rti.rtimer_val = (VXGE_RTI_RTIMER_VAL * 1000) / 272; device_config->vp_config[i].rti.ltimer_val = (VXGE_RTI_LTIMER_VAL * 1000) / 272; device_config->vp_config[i].rpa_strip_vlan_tag = vlan_tag_strip; } driver_config->vpath_per_dev = temp; return no_of_vpaths; } /* initialize device configuratrions */ static void __devinit vxge_device_config_init( struct vxge_hw_device_config *device_config, int *intr_type) { /* Used for CQRQ/SRQ. */ device_config->dma_blockpool_initial = VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE; device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE; if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT) max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT; #ifndef CONFIG_PCI_MSI vxge_debug_init(VXGE_ERR, "%s: This Kernel does not support " "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME); *intr_type = INTA; #endif /* Configure whether MSI-X or IRQL. */ switch (*intr_type) { case INTA: device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE; break; case MSI_X: device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT; break; } /* Timer period between device poll */ device_config->device_poll_millis = VXGE_TIMER_DELAY; /* Configure mac based steering. */ device_config->rts_mac_en = addr_learn_en; /* Configure Vpaths */ device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT; vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ", __func__); vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d", device_config->intr_mode); vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d", device_config->device_poll_millis); vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d", device_config->rth_en); vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d", device_config->rth_it_type); } static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask) { int i; vxge_debug_init(VXGE_TRACE, "%s: %d Vpath(s) opened", vdev->ndev->name, vdev->no_of_vpath); switch (vdev->config.intr_type) { case INTA: vxge_debug_init(VXGE_TRACE, "%s: Interrupt type INTA", vdev->ndev->name); break; case MSI_X: vxge_debug_init(VXGE_TRACE, "%s: Interrupt type MSI-X", vdev->ndev->name); break; } if (vdev->config.rth_steering) { vxge_debug_init(VXGE_TRACE, "%s: RTH steering enabled for TCP_IPV4", vdev->ndev->name); } else { vxge_debug_init(VXGE_TRACE, "%s: RTH steering disabled", vdev->ndev->name); } switch (vdev->config.tx_steering_type) { case NO_STEERING: vxge_debug_init(VXGE_TRACE, "%s: Tx steering disabled", vdev->ndev->name); break; case TX_PRIORITY_STEERING: vxge_debug_init(VXGE_TRACE, "%s: Unsupported tx steering option", vdev->ndev->name); vxge_debug_init(VXGE_TRACE, "%s: Tx steering disabled", vdev->ndev->name); vdev->config.tx_steering_type = 0; break; case TX_VLAN_STEERING: vxge_debug_init(VXGE_TRACE, "%s: Unsupported tx steering option", vdev->ndev->name); vxge_debug_init(VXGE_TRACE, "%s: Tx steering disabled", vdev->ndev->name); vdev->config.tx_steering_type = 0; break; case TX_MULTIQ_STEERING: vxge_debug_init(VXGE_TRACE, "%s: Tx multiqueue steering enabled", vdev->ndev->name); break; case TX_PORT_STEERING: vxge_debug_init(VXGE_TRACE, "%s: Tx port steering enabled", vdev->ndev->name); break; default: vxge_debug_init(VXGE_ERR, "%s: Unsupported tx steering type", vdev->ndev->name); vxge_debug_init(VXGE_TRACE, "%s: Tx steering disabled", vdev->ndev->name); vdev->config.tx_steering_type = 0; } if (vdev->config.addr_learn_en) vxge_debug_init(VXGE_TRACE, "%s: MAC Address learning enabled", vdev->ndev->name); for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (!vxge_bVALn(vpath_mask, i, 1)) continue; vxge_debug_ll_config(VXGE_TRACE, "%s: MTU size - %d", vdev->ndev->name, ((struct __vxge_hw_device *)(vdev->devh))-> config.vp_config[i].mtu); vxge_debug_init(VXGE_TRACE, "%s: VLAN tag stripping %s", vdev->ndev->name, ((struct __vxge_hw_device *)(vdev->devh))-> config.vp_config[i].rpa_strip_vlan_tag ? "Enabled" : "Disabled"); vxge_debug_ll_config(VXGE_TRACE, "%s: Max frags : %d", vdev->ndev->name, ((struct __vxge_hw_device *)(vdev->devh))-> config.vp_config[i].fifo.max_frags); break; } } #ifdef CONFIG_PM /** * vxge_pm_suspend - vxge power management suspend entry point * */ static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state) { return -ENOSYS; } /** * vxge_pm_resume - vxge power management resume entry point * */ static int vxge_pm_resume(struct pci_dev *pdev) { return -ENOSYS; } #endif /** * vxge_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state * * This function is called after a PCI bus error affecting * this device has been detected. */ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); struct net_device *netdev = hldev->ndev; netif_device_detach(netdev); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; if (netif_running(netdev)) { /* Bring down the card, while avoiding PCI I/O */ do_vxge_close(netdev, 0); } pci_disable_device(pdev); return PCI_ERS_RESULT_NEED_RESET; } /** * vxge_io_slot_reset - called after the pci bus has been reset. * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. * At this point, the card has exprienced a hard reset, * followed by fixups by BIOS, and has its config space * set up identically to what it was at cold boot. */ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) { struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); struct net_device *netdev = hldev->ndev; struct vxgedev *vdev = netdev_priv(netdev); if (pci_enable_device(pdev)) { netdev_err(netdev, "Cannot re-enable device after reset\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); do_vxge_reset(vdev, VXGE_LL_FULL_RESET); return PCI_ERS_RESULT_RECOVERED; } /** * vxge_io_resume - called when traffic can start flowing again. * @pdev: Pointer to PCI device * * This callback is called when the error recovery driver tells * us that its OK to resume normal operation. */ static void vxge_io_resume(struct pci_dev *pdev) { struct __vxge_hw_device *hldev = pci_get_drvdata(pdev); struct net_device *netdev = hldev->ndev; if (netif_running(netdev)) { if (vxge_open(netdev)) { netdev_err(netdev, "Can't bring device back up after reset\n"); return; } } netif_device_attach(netdev); } static inline u32 vxge_get_num_vfs(u64 function_mode) { u32 num_functions = 0; switch (function_mode) { case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION: case VXGE_HW_FUNCTION_MODE_SRIOV_8: num_functions = 8; break; case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION: num_functions = 1; break; case VXGE_HW_FUNCTION_MODE_SRIOV: case VXGE_HW_FUNCTION_MODE_MRIOV: case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17: num_functions = 17; break; case VXGE_HW_FUNCTION_MODE_SRIOV_4: num_functions = 4; break; case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2: num_functions = 2; break; case VXGE_HW_FUNCTION_MODE_MRIOV_8: num_functions = 8; /* TODO */ break; } return num_functions; } int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override) { struct __vxge_hw_device *hldev = vdev->devh; u32 maj, min, bld, cmaj, cmin, cbld; enum vxge_hw_status status; const struct firmware *fw; int ret; ret = request_firmware(&fw, fw_name, &vdev->pdev->dev); if (ret) { vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found", VXGE_DRIVER_NAME, fw_name); goto out; } /* Load the new firmware onto the adapter */ status = vxge_update_fw_image(hldev, fw->data, fw->size); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s: FW image download to adapter failed '%s'.", VXGE_DRIVER_NAME, fw_name); ret = -EIO; goto out; } /* Read the version of the new firmware */ status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s: Upgrade read version failed '%s'.", VXGE_DRIVER_NAME, fw_name); ret = -EIO; goto out; } cmaj = vdev->config.device_hw_info.fw_version.major; cmin = vdev->config.device_hw_info.fw_version.minor; cbld = vdev->config.device_hw_info.fw_version.build; /* It's possible the version in /lib/firmware is not the latest version. * If so, we could get into a loop of trying to upgrade to the latest * and flashing the older version. */ if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) && !override) { ret = -EINVAL; goto out; } printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n", maj, min, bld); /* Flash the adapter with the new firmware */ status = vxge_hw_flash_fw(hldev); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.", VXGE_DRIVER_NAME, fw_name); ret = -EIO; goto out; } printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be " "hard reset before using, thus requiring a system reboot or a " "hotplug event.\n"); out: release_firmware(fw); return ret; } static int vxge_probe_fw_update(struct vxgedev *vdev) { u32 maj, min, bld; int ret, gpxe = 0; char *fw_name; maj = vdev->config.device_hw_info.fw_version.major; min = vdev->config.device_hw_info.fw_version.minor; bld = vdev->config.device_hw_info.fw_version.build; if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER) return 0; /* Ignore the build number when determining if the current firmware is * "too new" to load the driver */ if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) { vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known " "version, unable to load driver\n", VXGE_DRIVER_NAME); return -EINVAL; } /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to * work with this driver. */ if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) { vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be " "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld); return -EINVAL; } /* If file not specified, determine gPXE or not */ if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) { int i; for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) if (vdev->devh->eprom_versions[i]) { gpxe = 1; break; } } if (gpxe) fw_name = "vxge/X3fw-pxe.ncf"; else fw_name = "vxge/X3fw.ncf"; ret = vxge_fw_upgrade(vdev, fw_name, 0); /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on * probe, so ignore them */ if (ret != -EINVAL && ret != -ENOENT) return -EIO; else ret = 0; if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) > VXGE_FW_VER(maj, min, 0)) { vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to" " be used with this driver.\n" "Please get the latest version from " "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE", VXGE_DRIVER_NAME, maj, min, bld); return -EINVAL; } return ret; } static int __devinit is_sriov_initialized(struct pci_dev *pdev) { int pos; u16 ctrl; pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); if (pos) { pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl); if (ctrl & PCI_SRIOV_CTRL_VFE) return 1; } return 0; } static const struct vxge_hw_uld_cbs vxge_callbacks = { .link_up = vxge_callback_link_up, .link_down = vxge_callback_link_down, .crit_err = vxge_callback_crit_err, }; /** * vxge_probe * @pdev : structure containing the PCI related information of the device. * @pre: List of PCI devices supported by the driver listed in vxge_id_table. * Description: * This function is called when a new PCI device gets detected and initializes * it. * Return value: * returns 0 on success and negative on failure. * */ static int __devinit vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) { struct __vxge_hw_device *hldev; enum vxge_hw_status status; int ret; int high_dma = 0; u64 vpath_mask = 0; struct vxgedev *vdev; struct vxge_config *ll_config = NULL; struct vxge_hw_device_config *device_config = NULL; struct vxge_hw_device_attr attr; int i, j, no_of_vpath = 0, max_vpath_supported = 0; u8 *macaddr; struct vxge_mac_addrs *entry; static int bus = -1, device = -1; u32 host_type; u8 new_device = 0; enum vxge_hw_status is_privileged; u32 function_mode; u32 num_vfs = 0; vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); attr.pdev = pdev; /* In SRIOV-17 mode, functions of the same adapter * can be deployed on different buses */ if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) && !pdev->is_virtfn) new_device = 1; bus = pdev->bus->number; device = PCI_SLOT(pdev->devfn); if (new_device) { if (driver_config->config_dev_cnt && (driver_config->config_dev_cnt != driver_config->total_dev_cnt)) vxge_debug_init(VXGE_ERR, "%s: Configured %d of %d devices", VXGE_DRIVER_NAME, driver_config->config_dev_cnt, driver_config->total_dev_cnt); driver_config->config_dev_cnt = 0; driver_config->total_dev_cnt = 0; } /* Now making the CPU based no of vpath calculation * applicable for individual functions as well. */ driver_config->g_no_cpus = 0; driver_config->vpath_per_dev = max_config_vpath; driver_config->total_dev_cnt++; if (++driver_config->config_dev_cnt > max_config_dev) { ret = 0; goto _exit0; } device_config = kzalloc(sizeof(struct vxge_hw_device_config), GFP_KERNEL); if (!device_config) { ret = -ENOMEM; vxge_debug_init(VXGE_ERR, "device_config : malloc failed %s %d", __FILE__, __LINE__); goto _exit0; } ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL); if (!ll_config) { ret = -ENOMEM; vxge_debug_init(VXGE_ERR, "device_config : malloc failed %s %d", __FILE__, __LINE__); goto _exit0; } ll_config->tx_steering_type = TX_MULTIQ_STEERING; ll_config->intr_type = MSI_X; ll_config->napi_weight = NEW_NAPI_WEIGHT; ll_config->rth_steering = RTH_STEERING; /* get the default configuration parameters */ vxge_hw_device_config_default_get(device_config); /* initialize configuration parameters */ vxge_device_config_init(device_config, &ll_config->intr_type); ret = pci_enable_device(pdev); if (ret) { vxge_debug_init(VXGE_ERR, "%s : can not enable PCI device", __func__); goto _exit0; } if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { vxge_debug_ll_config(VXGE_TRACE, "%s : using 64bit DMA", __func__); high_dma = 1; if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { vxge_debug_init(VXGE_ERR, "%s : unable to obtain 64bit DMA for " "consistent allocations", __func__); ret = -ENOMEM; goto _exit1; } } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { vxge_debug_ll_config(VXGE_TRACE, "%s : using 32bit DMA", __func__); } else { ret = -ENOMEM; goto _exit1; } ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME); if (ret) { vxge_debug_init(VXGE_ERR, "%s : request regions failed", __func__); goto _exit1; } pci_set_master(pdev); attr.bar0 = pci_ioremap_bar(pdev, 0); if (!attr.bar0) { vxge_debug_init(VXGE_ERR, "%s : cannot remap io memory bar0", __func__); ret = -ENODEV; goto _exit2; } vxge_debug_ll_config(VXGE_TRACE, "pci ioremap bar0: %p:0x%llx", attr.bar0, (unsigned long long)pci_resource_start(pdev, 0)); status = vxge_hw_device_hw_info_get(attr.bar0, &ll_config->device_hw_info); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s: Reading of hardware info failed." "Please try upgrading the firmware.", VXGE_DRIVER_NAME); ret = -EINVAL; goto _exit3; } vpath_mask = ll_config->device_hw_info.vpath_mask; if (vpath_mask == 0) { vxge_debug_ll_config(VXGE_TRACE, "%s: No vpaths available in device", VXGE_DRIVER_NAME); ret = -EINVAL; goto _exit3; } vxge_debug_ll_config(VXGE_TRACE, "%s:%d Vpath mask = %llx", __func__, __LINE__, (unsigned long long)vpath_mask); function_mode = ll_config->device_hw_info.function_mode; host_type = ll_config->device_hw_info.host_type; is_privileged = __vxge_hw_device_is_privilaged(host_type, ll_config->device_hw_info.func_id); /* Check how many vpaths are available */ for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (!((vpath_mask) & vxge_mBIT(i))) continue; max_vpath_supported++; } if (new_device) num_vfs = vxge_get_num_vfs(function_mode) - 1; /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ if (is_sriov(function_mode) && !is_sriov_initialized(pdev) && (ll_config->intr_type != INTA)) { ret = pci_enable_sriov(pdev, num_vfs); if (ret) vxge_debug_ll_config(VXGE_ERR, "Failed in enabling SRIOV mode: %d\n", ret); /* No need to fail out, as an error here is non-fatal */ } /* * Configure vpaths and get driver configured number of vpaths * which is less than or equal to the maximum vpaths per function. */ no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config); if (!no_of_vpath) { vxge_debug_ll_config(VXGE_ERR, "%s: No more vpaths to configure", VXGE_DRIVER_NAME); ret = 0; goto _exit3; } /* Setting driver callbacks */ attr.uld_callbacks = &vxge_callbacks; status = vxge_hw_device_initialize(&hldev, &attr, device_config); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "Failed to initialize device (%d)", status); ret = -EINVAL; goto _exit3; } if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major, ll_config->device_hw_info.fw_version.minor, ll_config->device_hw_info.fw_version.build) >= VXGE_EPROM_FW_VER) { struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES]; status = vxge_hw_vpath_eprom_img_ver_get(hldev, img); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed", VXGE_DRIVER_NAME); /* This is a non-fatal error, continue */ } for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) { hldev->eprom_versions[i] = img[i].version; if (!img[i].is_valid) break; vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version " "%d.%d.%d.%d", VXGE_DRIVER_NAME, i, VXGE_EPROM_IMG_MAJOR(img[i].version), VXGE_EPROM_IMG_MINOR(img[i].version), VXGE_EPROM_IMG_FIX(img[i].version), VXGE_EPROM_IMG_BUILD(img[i].version)); } } /* if FCS stripping is not disabled in MAC fail driver load */ status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC" " failing driver load", VXGE_DRIVER_NAME); ret = -EINVAL; goto _exit4; } /* Always enable HWTS. This will always cause the FCS to be invalid, * due to the fact that HWTS is using the FCS as the location of the * timestamp. The HW FCS checking will still correctly determine if * there is a valid checksum, and the FCS is being removed by the driver * anyway. So no fucntionality is being lost. Since it is always * enabled, we now simply use the ioctl call to set whether or not the * driver should be paying attention to the HWTS. */ if (is_privileged == VXGE_HW_OK) { status = vxge_timestamp_config(hldev); if (status != VXGE_HW_OK) { vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed", VXGE_DRIVER_NAME); ret = -EFAULT; goto _exit4; } } vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); /* set private device info */ pci_set_drvdata(pdev, hldev); ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; ll_config->addr_learn_en = addr_learn_en; ll_config->rth_algorithm = RTH_ALG_JENKINS; ll_config->rth_hash_type_tcpipv4 = 1; ll_config->rth_hash_type_ipv4 = 0; ll_config->rth_hash_type_tcpipv6 = 0; ll_config->rth_hash_type_ipv6 = 0; ll_config->rth_hash_type_tcpipv6ex = 0; ll_config->rth_hash_type_ipv6ex = 0; ll_config->rth_bkt_sz = RTH_BUCKET_SIZE; ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath, &vdev); if (ret) { ret = -EINVAL; goto _exit4; } ret = vxge_probe_fw_update(vdev); if (ret) goto _exit5; vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL); VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), vxge_hw_device_trace_level_get(hldev)); /* set private HW device info */ vdev->mtu = VXGE_HW_DEFAULT_MTU; vdev->bar0 = attr.bar0; vdev->max_vpath_supported = max_vpath_supported; vdev->no_of_vpath = no_of_vpath; /* Virtual Path count */ for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { if (!vxge_bVALn(vpath_mask, i, 1)) continue; if (j >= vdev->no_of_vpath) break; vdev->vpaths[j].is_configured = 1; vdev->vpaths[j].device_id = i; vdev->vpaths[j].ring.driver_id = j; vdev->vpaths[j].vdev = vdev; vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath; memcpy((u8 *)vdev->vpaths[j].macaddr, ll_config->device_hw_info.mac_addrs[i], ETH_ALEN); /* Initialize the mac address list header */ INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list); vdev->vpaths[j].mac_addr_cnt = 0; vdev->vpaths[j].mcast_addr_cnt = 0; j++; } vdev->exec_mode = VXGE_EXEC_MODE_DISABLE; vdev->max_config_port = max_config_port; vdev->vlan_tag_strip = vlan_tag_strip; /* map the hashing selector table to the configured vpaths */ for (i = 0; i < vdev->no_of_vpath; i++) vdev->vpath_selector[i] = vpath_selector[i]; macaddr = (u8 *)vdev->vpaths[0].macaddr; ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0'; ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0'; ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0'; vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s", vdev->ndev->name, ll_config->device_hw_info.serial_number); vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s", vdev->ndev->name, ll_config->device_hw_info.part_number); vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter", vdev->ndev->name, ll_config->device_hw_info.product_desc); vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM", vdev->ndev->name, macaddr); vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d", vdev->ndev->name, vxge_hw_device_link_width_get(hldev)); vxge_debug_init(VXGE_TRACE, "%s: Firmware version : %s Date : %s", vdev->ndev->name, ll_config->device_hw_info.fw_version.version, ll_config->device_hw_info.fw_date.date); if (new_device) { switch (ll_config->device_hw_info.function_mode) { case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION: vxge_debug_init(VXGE_TRACE, "%s: Single Function Mode Enabled", vdev->ndev->name); break; case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION: vxge_debug_init(VXGE_TRACE, "%s: Multi Function Mode Enabled", vdev->ndev->name); break; case VXGE_HW_FUNCTION_MODE_SRIOV: vxge_debug_init(VXGE_TRACE, "%s: Single Root IOV Mode Enabled", vdev->ndev->name); break; case VXGE_HW_FUNCTION_MODE_MRIOV: vxge_debug_init(VXGE_TRACE, "%s: Multi Root IOV Mode Enabled", vdev->ndev->name); break; } } vxge_print_parm(vdev, vpath_mask); /* Store the fw version for ethttool option */ strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version); memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN); memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN); /* Copy the station mac address to the list */ for (i = 0; i < vdev->no_of_vpath; i++) { entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL); if (NULL == entry) { vxge_debug_init(VXGE_ERR, "%s: mac_addr_list : memory allocation failed", vdev->ndev->name); ret = -EPERM; goto _exit6; } macaddr = (u8 *)&entry->macaddr; memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN); list_add(&entry->item, &vdev->vpaths[i].mac_addr_list); vdev->vpaths[i].mac_addr_cnt = 1; } kfree(device_config); /* * INTA is shared in multi-function mode. This is unlike the INTA * implementation in MR mode, where each VH has its own INTA message. * - INTA is masked (disabled) as long as at least one function sets * its TITAN_MASK_ALL_INT.ALARM bit. * - INTA is unmasked (enabled) when all enabled functions have cleared * their own TITAN_MASK_ALL_INT.ALARM bit. * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up. * Though this driver leaves the top level interrupts unmasked while * leaving the required module interrupt bits masked on exit, there * could be a rougue driver around that does not follow this procedure * resulting in a failure to generate interrupts. The following code is * present to prevent such a failure. */ if (ll_config->device_hw_info.function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) if (vdev->config.intr_type == INTA) vxge_hw_device_unmask_all(hldev); vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), vxge_hw_device_trace_level_get(hldev)); kfree(ll_config); return 0; _exit6: for (i = 0; i < vdev->no_of_vpath; i++) vxge_free_mac_add_list(&vdev->vpaths[i]); _exit5: vxge_device_unregister(hldev); _exit4: pci_set_drvdata(pdev, NULL); vxge_hw_device_terminate(hldev); pci_disable_sriov(pdev); _exit3: iounmap(attr.bar0); _exit2: pci_release_region(pdev, 0); _exit1: pci_disable_device(pdev); _exit0: kfree(ll_config); kfree(device_config); driver_config->config_dev_cnt--; driver_config->total_dev_cnt--; return ret; } /** * vxge_rem_nic - Free the PCI device * @pdev: structure containing the PCI related information of the device. * Description: This function is called by the Pci subsystem to release a * PCI device and free up all resource held up by the device. */ static void __devexit vxge_remove(struct pci_dev *pdev) { struct __vxge_hw_device *hldev; struct vxgedev *vdev; int i; hldev = pci_get_drvdata(pdev); if (hldev == NULL) return; vdev = netdev_priv(hldev->ndev); vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__); vxge_debug_init(vdev->level_trace, "%s : removing PCI device...", __func__); for (i = 0; i < vdev->no_of_vpath; i++) vxge_free_mac_add_list(&vdev->vpaths[i]); vxge_device_unregister(hldev); pci_set_drvdata(pdev, NULL); /* Do not call pci_disable_sriov here, as it will break child devices */ vxge_hw_device_terminate(hldev); iounmap(vdev->bar0); pci_release_region(pdev, 0); pci_disable_device(pdev); driver_config->config_dev_cnt--; driver_config->total_dev_cnt--; vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered", __func__, __LINE__); vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__, __LINE__); } static struct pci_error_handlers vxge_err_handler = { .error_detected = vxge_io_error_detected, .slot_reset = vxge_io_slot_reset, .resume = vxge_io_resume, }; static struct pci_driver vxge_driver = { .name = VXGE_DRIVER_NAME, .id_table = vxge_id_table, .probe = vxge_probe, .remove = __devexit_p(vxge_remove), #ifdef CONFIG_PM .suspend = vxge_pm_suspend, .resume = vxge_pm_resume, #endif .err_handler = &vxge_err_handler, }; static int __init vxge_starter(void) { int ret = 0; pr_info("Copyright(c) 2002-2010 Exar Corp.\n"); pr_info("Driver version: %s\n", DRV_VERSION); verify_bandwidth(); driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL); if (!driver_config) return -ENOMEM; ret = pci_register_driver(&vxge_driver); if (ret) { kfree(driver_config); goto err; } if (driver_config->config_dev_cnt && (driver_config->config_dev_cnt != driver_config->total_dev_cnt)) vxge_debug_init(VXGE_ERR, "%s: Configured %d of %d devices", VXGE_DRIVER_NAME, driver_config->config_dev_cnt, driver_config->total_dev_cnt); err: return ret; } static void __exit vxge_closer(void) { pci_unregister_driver(&vxge_driver); kfree(driver_config); } module_init(vxge_starter); module_exit(vxge_closer);
gpl-2.0
CandyDevices/kernel_motorola_msm8226
kernel/debug/kdb/kdb_debugger.c
5116
4340
/* * Created by: Jason Wessel <jason.wessel@windriver.com> * * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kgdb.h> #include <linux/kdb.h> #include <linux/kdebug.h> #include <linux/export.h> #include "kdb_private.h" #include "../debug_core.h" /* * KDB interface to KGDB internals */ get_char_func kdb_poll_funcs[] = { dbg_io_get_char, NULL, NULL, NULL, NULL, NULL, }; EXPORT_SYMBOL_GPL(kdb_poll_funcs); int kdb_poll_idx = 1; EXPORT_SYMBOL_GPL(kdb_poll_idx); static struct kgdb_state *kdb_ks; int kdb_stub(struct kgdb_state *ks) { int error = 0; kdb_bp_t *bp; unsigned long addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs); kdb_reason_t reason = KDB_REASON_OOPS; kdb_dbtrap_t db_result = KDB_DB_NOBPT; int i; kdb_ks = ks; if (KDB_STATE(REENTRY)) { reason = KDB_REASON_SWITCH; KDB_STATE_CLEAR(REENTRY); addr = instruction_pointer(ks->linux_regs); } ks->pass_exception = 0; if (atomic_read(&kgdb_setting_breakpoint)) reason = KDB_REASON_KEYBOARD; for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) { if ((bp->bp_enabled) && (bp->bp_addr == addr)) { reason = KDB_REASON_BREAK; db_result = KDB_DB_BPT; if (addr != instruction_pointer(ks->linux_regs)) kgdb_arch_set_pc(ks->linux_regs, addr); break; } } if (reason == KDB_REASON_BREAK || reason == KDB_REASON_SWITCH) { for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) { if (bp->bp_free) continue; if (bp->bp_addr == addr) { bp->bp_delay = 1; bp->bp_delayed = 1; /* * SSBPT is set when the kernel debugger must single step a * task in order to re-establish an instruction breakpoint * which uses the instruction replacement mechanism. It is * cleared by any action that removes the need to single-step * the breakpoint. */ reason = KDB_REASON_BREAK; db_result = KDB_DB_BPT; KDB_STATE_SET(SSBPT); break; } } } if (reason != KDB_REASON_BREAK && ks->ex_vector == 0 && ks->signo == SIGTRAP) { reason = KDB_REASON_SSTEP; db_result = KDB_DB_BPT; } /* Set initial kdb state variables */ KDB_STATE_CLEAR(KGDB_TRANS); kdb_initial_cpu = atomic_read(&kgdb_active); kdb_current_task = kgdb_info[ks->cpu].task; kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo; /* Remove any breakpoints as needed by kdb and clear single step */ kdb_bp_remove(); KDB_STATE_CLEAR(DOING_SS); KDB_STATE_CLEAR(DOING_SSB); KDB_STATE_SET(PAGER); /* zero out any offline cpu data */ for_each_present_cpu(i) { if (!cpu_online(i)) { kgdb_info[i].debuggerinfo = NULL; kgdb_info[i].task = NULL; } } if (ks->err_code == DIE_OOPS || reason == KDB_REASON_OOPS) { ks->pass_exception = 1; KDB_FLAG_SET(CATASTROPHIC); } if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) { KDB_STATE_CLEAR(SSBPT); KDB_STATE_CLEAR(DOING_SS); } else { /* Start kdb main loop */ error = kdb_main_loop(KDB_REASON_ENTER, reason, ks->err_code, db_result, ks->linux_regs); } /* * Upon exit from the kdb main loop setup break points and restart * the system based on the requested continue state */ kdb_initial_cpu = -1; kdb_current_task = NULL; kdb_current_regs = NULL; KDB_STATE_CLEAR(PAGER); kdbnearsym_cleanup(); if (error == KDB_CMD_KGDB) { if (KDB_STATE(DOING_KGDB)) KDB_STATE_CLEAR(DOING_KGDB); return DBG_PASS_EVENT; } kdb_bp_install(ks->linux_regs); dbg_activate_sw_breakpoints(); /* Set the exit state to a single step or a continue */ if (KDB_STATE(DOING_SS)) gdbstub_state(ks, "s"); else gdbstub_state(ks, "c"); KDB_FLAG_CLEAR(CATASTROPHIC); /* Invoke arch specific exception handling prior to system resume */ kgdb_info[ks->cpu].ret_state = gdbstub_state(ks, "e"); if (ks->pass_exception) kgdb_info[ks->cpu].ret_state = 1; if (error == KDB_CMD_CPU) { KDB_STATE_SET(REENTRY); /* * Force clear the single step bit because kdb emulates this * differently vs the gdbstub */ kgdb_single_step = 0; dbg_deactivate_sw_breakpoints(); return DBG_SWITCH_CPU_EVENT; } return kgdb_info[ks->cpu].ret_state; } void kdb_gdb_state_pass(char *buf) { gdbstub_state(kdb_ks, buf); }
gpl-2.0
Donny3000/Gumstix-Overo-Kernel
arch/sh/boards/board-apsh4a3a.c
5116
4078
/* * ALPHAPROJECT AP-SH4A-3A Support. * * Copyright (C) 2010 ALPHAPROJECT Co.,Ltd. * Copyright (C) 2008 Yoshihiro Shimoda * Copyright (C) 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/mtd/physmap.h> #include <linux/smsc911x.h> #include <linux/irq.h> #include <linux/clk.h> #include <asm/machvec.h> #include <asm/sizes.h> #include <asm/clock.h> static struct mtd_partition nor_flash_partitions[] = { { .name = "loader", .offset = 0x00000000, .size = 512 * 1024, }, { .name = "bootenv", .offset = MTDPART_OFS_APPEND, .size = 512 * 1024, }, { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = 4 * 1024 * 1024, }, { .name = "data", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct physmap_flash_data nor_flash_data = { .width = 4, .parts = nor_flash_partitions, .nr_parts = ARRAY_SIZE(nor_flash_partitions), }; static struct resource nor_flash_resources[] = { [0] = { .start = 0x00000000, .end = 0x01000000 - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device nor_flash_device = { .name = "physmap-flash", .dev = { .platform_data = &nor_flash_data, }, .num_resources = ARRAY_SIZE(nor_flash_resources), .resource = nor_flash_resources, }; static struct resource smsc911x_resources[] = { [0] = { .name = "smsc911x-memory", .start = 0xA4000000, .end = 0xA4000000 + SZ_256 - 1, .flags = IORESOURCE_MEM, }, [1] = { .name = "smsc911x-irq", .start = evt2irq(0x200), .end = evt2irq(0x200), .flags = IORESOURCE_IRQ, }, }; static struct smsc911x_platform_config smsc911x_config = { .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, .flags = SMSC911X_USE_16BIT, .phy_interface = PHY_INTERFACE_MODE_MII, }; static struct platform_device smsc911x_device = { .name = "smsc911x", .id = -1, .num_resources = ARRAY_SIZE(smsc911x_resources), .resource = smsc911x_resources, .dev = { .platform_data = &smsc911x_config, }, }; static struct platform_device *apsh4a3a_devices[] __initdata = { &nor_flash_device, &smsc911x_device, }; static int __init apsh4a3a_devices_setup(void) { return platform_add_devices(apsh4a3a_devices, ARRAY_SIZE(apsh4a3a_devices)); } device_initcall(apsh4a3a_devices_setup); static int apsh4a3a_clk_init(void) { struct clk *clk; int ret; clk = clk_get(NULL, "extal"); if (IS_ERR(clk)) return PTR_ERR(clk); ret = clk_set_rate(clk, 33333000); clk_put(clk); return ret; } /* Initialize the board */ static void __init apsh4a3a_setup(char **cmdline_p) { printk(KERN_INFO "Alpha Project AP-SH4A-3A support:\n"); } static void __init apsh4a3a_init_irq(void) { plat_irq_setup_pins(IRQ_MODE_IRQ7654); } /* Return the board specific boot mode pin configuration */ static int apsh4a3a_mode_pins(void) { int value = 0; /* These are the factory default settings of SW1 and SW2. * If you change these dip switches then you will need to * adjust the values below as well. */ value &= ~MODE_PIN0; /* Clock Mode 16 */ value &= ~MODE_PIN1; value &= ~MODE_PIN2; value &= ~MODE_PIN3; value |= MODE_PIN4; value &= ~MODE_PIN5; /* 16-bit Area0 bus width */ value |= MODE_PIN6; /* Area 0 SRAM interface */ value |= MODE_PIN7; value |= MODE_PIN8; /* Little Endian */ value |= MODE_PIN9; /* Master Mode */ value |= MODE_PIN10; /* Crystal resonator */ value |= MODE_PIN11; /* Display Unit */ value |= MODE_PIN12; value &= ~MODE_PIN13; /* 29-bit address mode */ value |= MODE_PIN14; /* No PLL step-up */ return value; } /* * The Machine Vector */ static struct sh_machine_vector mv_apsh4a3a __initmv = { .mv_name = "AP-SH4A-3A", .mv_setup = apsh4a3a_setup, .mv_clk_init = apsh4a3a_clk_init, .mv_init_irq = apsh4a3a_init_irq, .mv_mode_pins = apsh4a3a_mode_pins, };
gpl-2.0
ChangYeoun/bbbb
tools/perf/util/sysfs.c
5116
1082
#include "util.h" #include "sysfs.h" static const char * const sysfs_known_mountpoints[] = { "/sys", 0, }; static int sysfs_found; char sysfs_mountpoint[PATH_MAX]; static int sysfs_valid_mountpoint(const char *sysfs) { struct statfs st_fs; if (statfs(sysfs, &st_fs) < 0) return -ENOENT; else if (st_fs.f_type != (long) SYSFS_MAGIC) return -ENOENT; return 0; } const char *sysfs_find_mountpoint(void) { const char * const *ptr; char type[100]; FILE *fp; if (sysfs_found) return (const char *) sysfs_mountpoint; ptr = sysfs_known_mountpoints; while (*ptr) { if (sysfs_valid_mountpoint(*ptr) == 0) { sysfs_found = 1; strcpy(sysfs_mountpoint, *ptr); return sysfs_mountpoint; } ptr++; } /* give up and parse /proc/mounts */ fp = fopen("/proc/mounts", "r"); if (fp == NULL) return NULL; while (!sysfs_found && fscanf(fp, "%*s %" STR(PATH_MAX) "s %99s %*s %*d %*d\n", sysfs_mountpoint, type) == 2) { if (strcmp(type, "sysfs") == 0) sysfs_found = 1; } fclose(fp); return sysfs_found ? sysfs_mountpoint : NULL; }
gpl-2.0
rosenpapazov/one_plus_one
drivers/s390/char/con3270.c
9212
15918
/* * IBM/3270 Driver - console view. * * Author(s): * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com> * Copyright IBM Corp. 2003, 2009 */ #include <linux/console.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/reboot.h> #include <asm/ccwdev.h> #include <asm/cio.h> #include <asm/cpcmd.h> #include <asm/ebcdic.h> #include "raw3270.h" #include "tty3270.h" #include "ctrlchar.h" #define CON3270_OUTPUT_BUFFER_SIZE 1024 #define CON3270_STRING_PAGES 4 static struct raw3270_fn con3270_fn; /* * Main 3270 console view data structure. */ struct con3270 { struct raw3270_view view; spinlock_t lock; struct list_head freemem; /* list of free memory for strings. */ /* Output stuff. */ struct list_head lines; /* list of lines. */ struct list_head update; /* list of lines to update. */ int line_nr; /* line number for next update. */ int nr_lines; /* # lines in list. */ int nr_up; /* # lines up in history. */ unsigned long update_flags; /* Update indication bits. */ struct string *cline; /* current output line. */ struct string *status; /* last line of display. */ struct raw3270_request *write; /* single write request. */ struct timer_list timer; /* Input stuff. */ struct string *input; /* input string for read request. */ struct raw3270_request *read; /* single read request. */ struct raw3270_request *kreset; /* single keyboard reset request. */ struct tasklet_struct readlet; /* tasklet to issue read request. */ }; static struct con3270 *condev; /* con3270->update_flags. See con3270_update for details. */ #define CON_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */ #define CON_UPDATE_LIST 2 /* Update lines in tty3270->update. */ #define CON_UPDATE_STATUS 4 /* Update status line. */ #define CON_UPDATE_ALL 8 /* Recreate screen. */ static void con3270_update(struct con3270 *); /* * Setup timeout for a device. On timeout trigger an update. */ static void con3270_set_timer(struct con3270 *cp, int expires) { if (expires == 0) del_timer(&cp->timer); else mod_timer(&cp->timer, jiffies + expires); } /* * The status line is the last line of the screen. It shows the string * "console view" in the lower left corner and "Running"/"More..."/"Holding" * in the lower right corner of the screen. */ static void con3270_update_status(struct con3270 *cp) { char *str; str = (cp->nr_up != 0) ? "History" : "Running"; memcpy(cp->status->string + 24, str, 7); codepage_convert(cp->view.ascebc, cp->status->string + 24, 7); cp->update_flags |= CON_UPDATE_STATUS; } static void con3270_create_status(struct con3270 *cp) { static const unsigned char blueprint[] = { TO_SBA, 0, 0, TO_SF,TF_LOG,TO_SA,TAT_COLOR, TAC_GREEN, 'c','o','n','s','o','l','e',' ','v','i','e','w', TO_RA,0,0,0,'R','u','n','n','i','n','g',TO_SF,TF_LOG }; cp->status = alloc_string(&cp->freemem, sizeof(blueprint)); /* Copy blueprint to status line */ memcpy(cp->status->string, blueprint, sizeof(blueprint)); /* Set TO_RA addresses. */ raw3270_buffer_address(cp->view.dev, cp->status->string + 1, cp->view.cols * (cp->view.rows - 1)); raw3270_buffer_address(cp->view.dev, cp->status->string + 21, cp->view.cols * cp->view.rows - 8); /* Convert strings to ebcdic. */ codepage_convert(cp->view.ascebc, cp->status->string + 8, 12); codepage_convert(cp->view.ascebc, cp->status->string + 24, 7); } /* * Set output offsets to 3270 datastream fragment of a console string. */ static void con3270_update_string(struct con3270 *cp, struct string *s, int nr) { if (s->len >= cp->view.cols - 5) return; raw3270_buffer_address(cp->view.dev, s->string + s->len - 3, cp->view.cols * (nr + 1)); } /* * Rebuild update list to print all lines. */ static void con3270_rebuild_update(struct con3270 *cp) { struct string *s, *n; int nr; /* * Throw away update list and create a new one, * containing all lines that will fit on the screen. */ list_for_each_entry_safe(s, n, &cp->update, update) list_del_init(&s->update); nr = cp->view.rows - 2 + cp->nr_up; list_for_each_entry_reverse(s, &cp->lines, list) { if (nr < cp->view.rows - 1) list_add(&s->update, &cp->update); if (--nr < 0) break; } cp->line_nr = 0; cp->update_flags |= CON_UPDATE_LIST; } /* * Alloc string for size bytes. Free strings from history if necessary. */ static struct string * con3270_alloc_string(struct con3270 *cp, size_t size) { struct string *s, *n; s = alloc_string(&cp->freemem, size); if (s) return s; list_for_each_entry_safe(s, n, &cp->lines, list) { list_del(&s->list); if (!list_empty(&s->update)) list_del(&s->update); cp->nr_lines--; if (free_string(&cp->freemem, s) >= size) break; } s = alloc_string(&cp->freemem, size); BUG_ON(!s); if (cp->nr_up != 0 && cp->nr_up + cp->view.rows > cp->nr_lines) { cp->nr_up = cp->nr_lines - cp->view.rows + 1; con3270_rebuild_update(cp); con3270_update_status(cp); } return s; } /* * Write completion callback. */ static void con3270_write_callback(struct raw3270_request *rq, void *data) { raw3270_request_reset(rq); xchg(&((struct con3270 *) rq->view)->write, rq); } /* * Update console display. */ static void con3270_update(struct con3270 *cp) { struct raw3270_request *wrq; char wcc, prolog[6]; unsigned long flags; unsigned long updated; struct string *s, *n; int rc; if (cp->view.dev) raw3270_activate_view(&cp->view); wrq = xchg(&cp->write, 0); if (!wrq) { con3270_set_timer(cp, 1); return; } spin_lock_irqsave(&cp->view.lock, flags); updated = 0; if (cp->update_flags & CON_UPDATE_ALL) { con3270_rebuild_update(cp); con3270_update_status(cp); cp->update_flags = CON_UPDATE_ERASE | CON_UPDATE_LIST | CON_UPDATE_STATUS; } if (cp->update_flags & CON_UPDATE_ERASE) { /* Use erase write alternate to initialize display. */ raw3270_request_set_cmd(wrq, TC_EWRITEA); updated |= CON_UPDATE_ERASE; } else raw3270_request_set_cmd(wrq, TC_WRITE); wcc = TW_NONE; raw3270_request_add_data(wrq, &wcc, 1); /* * Update status line. */ if (cp->update_flags & CON_UPDATE_STATUS) if (raw3270_request_add_data(wrq, cp->status->string, cp->status->len) == 0) updated |= CON_UPDATE_STATUS; if (cp->update_flags & CON_UPDATE_LIST) { prolog[0] = TO_SBA; prolog[3] = TO_SA; prolog[4] = TAT_COLOR; prolog[5] = TAC_TURQ; raw3270_buffer_address(cp->view.dev, prolog + 1, cp->view.cols * cp->line_nr); raw3270_request_add_data(wrq, prolog, 6); /* Write strings in the update list to the screen. */ list_for_each_entry_safe(s, n, &cp->update, update) { if (s != cp->cline) con3270_update_string(cp, s, cp->line_nr); if (raw3270_request_add_data(wrq, s->string, s->len) != 0) break; list_del_init(&s->update); if (s != cp->cline) cp->line_nr++; } if (list_empty(&cp->update)) updated |= CON_UPDATE_LIST; } wrq->callback = con3270_write_callback; rc = raw3270_start(&cp->view, wrq); if (rc == 0) { cp->update_flags &= ~updated; if (cp->update_flags) con3270_set_timer(cp, 1); } else { raw3270_request_reset(wrq); xchg(&cp->write, wrq); } spin_unlock_irqrestore(&cp->view.lock, flags); } /* * Read tasklet. */ static void con3270_read_tasklet(struct raw3270_request *rrq) { static char kreset_data = TW_KR; struct con3270 *cp; unsigned long flags; int nr_up, deactivate; cp = (struct con3270 *) rrq->view; spin_lock_irqsave(&cp->view.lock, flags); nr_up = cp->nr_up; deactivate = 0; /* Check aid byte. */ switch (cp->input->string[0]) { case 0x7d: /* enter: jump to bottom. */ nr_up = 0; break; case 0xf3: /* PF3: deactivate the console view. */ deactivate = 1; break; case 0x6d: /* clear: start from scratch. */ cp->update_flags = CON_UPDATE_ALL; con3270_set_timer(cp, 1); break; case 0xf7: /* PF7: do a page up in the console log. */ nr_up += cp->view.rows - 2; if (nr_up + cp->view.rows - 1 > cp->nr_lines) { nr_up = cp->nr_lines - cp->view.rows + 1; if (nr_up < 0) nr_up = 0; } break; case 0xf8: /* PF8: do a page down in the console log. */ nr_up -= cp->view.rows - 2; if (nr_up < 0) nr_up = 0; break; } if (nr_up != cp->nr_up) { cp->nr_up = nr_up; con3270_rebuild_update(cp); con3270_update_status(cp); con3270_set_timer(cp, 1); } spin_unlock_irqrestore(&cp->view.lock, flags); /* Start keyboard reset command. */ raw3270_request_reset(cp->kreset); raw3270_request_set_cmd(cp->kreset, TC_WRITE); raw3270_request_add_data(cp->kreset, &kreset_data, 1); raw3270_start(&cp->view, cp->kreset); if (deactivate) raw3270_deactivate_view(&cp->view); raw3270_request_reset(rrq); xchg(&cp->read, rrq); raw3270_put_view(&cp->view); } /* * Read request completion callback. */ static void con3270_read_callback(struct raw3270_request *rq, void *data) { raw3270_get_view(rq->view); /* Schedule tasklet to pass input to tty. */ tasklet_schedule(&((struct con3270 *) rq->view)->readlet); } /* * Issue a read request. Called only from interrupt function. */ static void con3270_issue_read(struct con3270 *cp) { struct raw3270_request *rrq; int rc; rrq = xchg(&cp->read, 0); if (!rrq) /* Read already scheduled. */ return; rrq->callback = con3270_read_callback; rrq->callback_data = cp; raw3270_request_set_cmd(rrq, TC_READMOD); raw3270_request_set_data(rrq, cp->input->string, cp->input->len); /* Issue the read modified request. */ rc = raw3270_start_irq(&cp->view, rrq); if (rc) raw3270_request_reset(rrq); } /* * Switch to the console view. */ static int con3270_activate(struct raw3270_view *view) { struct con3270 *cp; cp = (struct con3270 *) view; cp->update_flags = CON_UPDATE_ALL; con3270_set_timer(cp, 1); return 0; } static void con3270_deactivate(struct raw3270_view *view) { struct con3270 *cp; cp = (struct con3270 *) view; del_timer(&cp->timer); } static int con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb) { /* Handle ATTN. Schedule tasklet to read aid. */ if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) con3270_issue_read(cp); if (rq) { if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) rq->rc = -EIO; else /* Normal end. Copy residual count. */ rq->rescnt = irb->scsw.cmd.count; } return RAW3270_IO_DONE; } /* Console view to a 3270 device. */ static struct raw3270_fn con3270_fn = { .activate = con3270_activate, .deactivate = con3270_deactivate, .intv = (void *) con3270_irq }; static inline void con3270_cline_add(struct con3270 *cp) { if (!list_empty(&cp->cline->list)) /* Already added. */ return; list_add_tail(&cp->cline->list, &cp->lines); cp->nr_lines++; con3270_rebuild_update(cp); } static inline void con3270_cline_insert(struct con3270 *cp, unsigned char c) { cp->cline->string[cp->cline->len++] = cp->view.ascebc[(c < ' ') ? ' ' : c]; if (list_empty(&cp->cline->update)) { list_add_tail(&cp->cline->update, &cp->update); cp->update_flags |= CON_UPDATE_LIST; } } static inline void con3270_cline_end(struct con3270 *cp) { struct string *s; unsigned int size; /* Copy cline. */ size = (cp->cline->len < cp->view.cols - 5) ? cp->cline->len + 4 : cp->view.cols; s = con3270_alloc_string(cp, size); memcpy(s->string, cp->cline->string, cp->cline->len); if (s->len < cp->view.cols - 5) { s->string[s->len - 4] = TO_RA; s->string[s->len - 1] = 0; } else { while (--size > cp->cline->len) s->string[size] = cp->view.ascebc[' ']; } /* Replace cline with allocated line s and reset cline. */ list_add(&s->list, &cp->cline->list); list_del_init(&cp->cline->list); if (!list_empty(&cp->cline->update)) { list_add(&s->update, &cp->cline->update); list_del_init(&cp->cline->update); } cp->cline->len = 0; } /* * Write a string to the 3270 console */ static void con3270_write(struct console *co, const char *str, unsigned int count) { struct con3270 *cp; unsigned long flags; unsigned char c; cp = condev; spin_lock_irqsave(&cp->view.lock, flags); while (count-- > 0) { c = *str++; if (cp->cline->len == 0) con3270_cline_add(cp); if (c != '\n') con3270_cline_insert(cp, c); if (c == '\n' || cp->cline->len >= cp->view.cols) con3270_cline_end(cp); } /* Setup timer to output current console buffer after 1/10 second */ cp->nr_up = 0; if (cp->view.dev && !timer_pending(&cp->timer)) con3270_set_timer(cp, HZ/10); spin_unlock_irqrestore(&cp->view.lock,flags); } static struct tty_driver * con3270_device(struct console *c, int *index) { *index = c->index; return tty3270_driver; } /* * Wait for end of write request. */ static void con3270_wait_write(struct con3270 *cp) { while (!cp->write) { raw3270_wait_cons_dev(cp->view.dev); barrier(); } } /* * panic() calls con3270_flush through a panic_notifier * before the system enters a disabled, endless loop. */ static void con3270_flush(void) { struct con3270 *cp; unsigned long flags; cp = condev; if (!cp->view.dev) return; raw3270_pm_unfreeze(&cp->view); spin_lock_irqsave(&cp->view.lock, flags); con3270_wait_write(cp); cp->nr_up = 0; con3270_rebuild_update(cp); con3270_update_status(cp); while (cp->update_flags != 0) { spin_unlock_irqrestore(&cp->view.lock, flags); con3270_update(cp); spin_lock_irqsave(&cp->view.lock, flags); con3270_wait_write(cp); } spin_unlock_irqrestore(&cp->view.lock, flags); } static int con3270_notify(struct notifier_block *self, unsigned long event, void *data) { con3270_flush(); return NOTIFY_OK; } static struct notifier_block on_panic_nb = { .notifier_call = con3270_notify, .priority = 0, }; static struct notifier_block on_reboot_nb = { .notifier_call = con3270_notify, .priority = 0, }; /* * The console structure for the 3270 console */ static struct console con3270 = { .name = "tty3270", .write = con3270_write, .device = con3270_device, .flags = CON_PRINTBUFFER, }; /* * 3270 console initialization code called from console_init(). */ static int __init con3270_init(void) { struct ccw_device *cdev; struct raw3270 *rp; void *cbuf; int i; /* Check if 3270 is to be the console */ if (!CONSOLE_IS_3270) return -ENODEV; /* Set the console mode for VM */ if (MACHINE_IS_VM) { cpcmd("TERM CONMODE 3270", NULL, 0, NULL); cpcmd("TERM AUTOCR OFF", NULL, 0, NULL); } cdev = ccw_device_probe_console(); if (IS_ERR(cdev)) return -ENODEV; rp = raw3270_setup_console(cdev); if (IS_ERR(rp)) return PTR_ERR(rp); condev = kzalloc(sizeof(struct con3270), GFP_KERNEL | GFP_DMA); condev->view.dev = rp; condev->read = raw3270_request_alloc(0); condev->read->callback = con3270_read_callback; condev->read->callback_data = condev; condev->write = raw3270_request_alloc(CON3270_OUTPUT_BUFFER_SIZE); condev->kreset = raw3270_request_alloc(1); INIT_LIST_HEAD(&condev->lines); INIT_LIST_HEAD(&condev->update); setup_timer(&condev->timer, (void (*)(unsigned long)) con3270_update, (unsigned long) condev); tasklet_init(&condev->readlet, (void (*)(unsigned long)) con3270_read_tasklet, (unsigned long) condev->read); raw3270_add_view(&condev->view, &con3270_fn, 1); INIT_LIST_HEAD(&condev->freemem); for (i = 0; i < CON3270_STRING_PAGES; i++) { cbuf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); add_string_memory(&condev->freemem, cbuf, PAGE_SIZE); } condev->cline = alloc_string(&condev->freemem, condev->view.cols); condev->cline->len = 0; con3270_create_status(condev); condev->input = alloc_string(&condev->freemem, 80); atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb); register_reboot_notifier(&on_reboot_nb); register_console(&con3270); return 0; } console_initcall(con3270_init);
gpl-2.0
motoTurboZ/kernel-msm
sound/oss/dev_table.c
11772
5590
/* * sound/oss/dev_table.c * * Device call tables. * * * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. */ #include <linux/init.h> #include "sound_config.h" struct audio_operations *audio_devs[MAX_AUDIO_DEV]; EXPORT_SYMBOL(audio_devs); int num_audiodevs; EXPORT_SYMBOL(num_audiodevs); struct mixer_operations *mixer_devs[MAX_MIXER_DEV]; EXPORT_SYMBOL(mixer_devs); int num_mixers; EXPORT_SYMBOL(num_mixers); struct synth_operations *synth_devs[MAX_SYNTH_DEV+MAX_MIDI_DEV]; EXPORT_SYMBOL(synth_devs); int num_synths; struct midi_operations *midi_devs[MAX_MIDI_DEV]; EXPORT_SYMBOL(midi_devs); int num_midis; EXPORT_SYMBOL(num_midis); struct sound_timer_operations *sound_timer_devs[MAX_TIMER_DEV] = { &default_sound_timer, NULL }; EXPORT_SYMBOL(sound_timer_devs); int num_sound_timers = 1; static int sound_alloc_audiodev(void); int sound_install_audiodrv(int vers, char *name, struct audio_driver *driver, int driver_size, int flags, unsigned int format_mask, void *devc, int dma1, int dma2) { struct audio_driver *d; struct audio_operations *op; int num; if (vers != AUDIO_DRIVER_VERSION || driver_size > sizeof(struct audio_driver)) { printk(KERN_ERR "Sound: Incompatible audio driver for %s\n", name); return -(EINVAL); } num = sound_alloc_audiodev(); if (num == -1) { printk(KERN_ERR "sound: Too many audio drivers\n"); return -(EBUSY); } d = (struct audio_driver *) (sound_mem_blocks[sound_nblocks] = vmalloc(sizeof(struct audio_driver))); sound_nblocks++; if (sound_nblocks >= MAX_MEM_BLOCKS) sound_nblocks = MAX_MEM_BLOCKS - 1; op = (struct audio_operations *) (sound_mem_blocks[sound_nblocks] = vzalloc(sizeof(struct audio_operations))); sound_nblocks++; if (sound_nblocks >= MAX_MEM_BLOCKS) sound_nblocks = MAX_MEM_BLOCKS - 1; if (d == NULL || op == NULL) { printk(KERN_ERR "Sound: Can't allocate driver for (%s)\n", name); sound_unload_audiodev(num); return -(ENOMEM); } init_waitqueue_head(&op->in_sleeper); init_waitqueue_head(&op->out_sleeper); init_waitqueue_head(&op->poll_sleeper); if (driver_size < sizeof(struct audio_driver)) memset((char *) d, 0, sizeof(struct audio_driver)); memcpy((char *) d, (char *) driver, driver_size); op->d = d; strlcpy(op->name, name, sizeof(op->name)); op->flags = flags; op->format_mask = format_mask; op->devc = devc; /* * Hardcoded defaults */ audio_devs[num] = op; DMAbuf_init(num, dma1, dma2); audio_init_devices(); return num; } EXPORT_SYMBOL(sound_install_audiodrv); int sound_install_mixer(int vers, char *name, struct mixer_operations *driver, int driver_size, void *devc) { struct mixer_operations *op; int n = sound_alloc_mixerdev(); if (n == -1) { printk(KERN_ERR "Sound: Too many mixer drivers\n"); return -EBUSY; } if (vers != MIXER_DRIVER_VERSION || driver_size > sizeof(struct mixer_operations)) { printk(KERN_ERR "Sound: Incompatible mixer driver for %s\n", name); return -EINVAL; } /* FIXME: This leaks a mixer_operations struct every time its called until you unload sound! */ op = (struct mixer_operations *) (sound_mem_blocks[sound_nblocks] = vzalloc(sizeof(struct mixer_operations))); sound_nblocks++; if (sound_nblocks >= MAX_MEM_BLOCKS) sound_nblocks = MAX_MEM_BLOCKS - 1; if (op == NULL) { printk(KERN_ERR "Sound: Can't allocate mixer driver for (%s)\n", name); return -ENOMEM; } memcpy((char *) op, (char *) driver, driver_size); strlcpy(op->name, name, sizeof(op->name)); op->devc = devc; mixer_devs[n] = op; return n; } EXPORT_SYMBOL(sound_install_mixer); void sound_unload_audiodev(int dev) { if (dev != -1) { DMAbuf_deinit(dev); audio_devs[dev] = NULL; unregister_sound_dsp((dev<<4)+3); } } EXPORT_SYMBOL(sound_unload_audiodev); static int sound_alloc_audiodev(void) { int i = register_sound_dsp(&oss_sound_fops, -1); if(i==-1) return i; i>>=4; if(i>=num_audiodevs) num_audiodevs = i + 1; return i; } int sound_alloc_mididev(void) { int i = register_sound_midi(&oss_sound_fops, -1); if(i==-1) return i; i>>=4; if(i>=num_midis) num_midis = i + 1; return i; } EXPORT_SYMBOL(sound_alloc_mididev); int sound_alloc_synthdev(void) { int i; for (i = 0; i < MAX_SYNTH_DEV; i++) { if (synth_devs[i] == NULL) { if (i >= num_synths) num_synths++; return i; } } return -1; } EXPORT_SYMBOL(sound_alloc_synthdev); int sound_alloc_mixerdev(void) { int i = register_sound_mixer(&oss_sound_fops, -1); if(i==-1) return -1; i>>=4; if(i>=num_mixers) num_mixers = i + 1; return i; } EXPORT_SYMBOL(sound_alloc_mixerdev); int sound_alloc_timerdev(void) { int i; for (i = 0; i < MAX_TIMER_DEV; i++) { if (sound_timer_devs[i] == NULL) { if (i >= num_sound_timers) num_sound_timers++; return i; } } return -1; } EXPORT_SYMBOL(sound_alloc_timerdev); void sound_unload_mixerdev(int dev) { if (dev != -1) { mixer_devs[dev] = NULL; unregister_sound_mixer(dev<<4); num_mixers--; } } EXPORT_SYMBOL(sound_unload_mixerdev); void sound_unload_mididev(int dev) { if (dev != -1) { midi_devs[dev] = NULL; unregister_sound_midi((dev<<4)+2); } } EXPORT_SYMBOL(sound_unload_mididev); void sound_unload_synthdev(int dev) { if (dev != -1) synth_devs[dev] = NULL; } EXPORT_SYMBOL(sound_unload_synthdev); void sound_unload_timerdev(int dev) { if (dev != -1) sound_timer_devs[dev] = NULL; } EXPORT_SYMBOL(sound_unload_timerdev);
gpl-2.0
harunjo/galaxsih-kernel-JB-S3
arch/arm/mach-exynos/p8-gpio.c
253
21932
#include <linux/gpio.h> #include <linux/serial_core.h> #include <plat/devs.h> #include <plat/gpio-cfg.h> #include <plat/regs-serial.h> #include <mach/gpio.h> #include "px.h" struct gpio_init_data { uint num; uint cfg; uint val; uint pud; uint drv; }; static struct gpio_init_data p8_init_gpios[] = { { .num = EXYNOS4_GPD0(2), /* MSENSOR_MHL_SDA_2.8V */ .cfg = S3C_GPIO_INPUT, .val = S3C_GPIO_SETPIN_NONE, .pud = S3C_GPIO_PULL_NONE, .drv = S5P_GPIO_DRVSTR_LV1, }, { .num = EXYNOS4_GPD0(3), /* MSENSOR_MHL_SCL_2.8V */ .cfg = S3C_GPIO_INPUT, .val = S3C_GPIO_SETPIN_NONE, .pud = S3C_GPIO_PULL_NONE, .drv = S5P_GPIO_DRVSTR_LV1, }, { .num = EXYNOS4_GPD1(2), /* SENSE_SDA_2.8V */ .cfg = S3C_GPIO_INPUT, .val = S3C_GPIO_SETPIN_NONE, .pud = S3C_GPIO_PULL_NONE, .drv = S5P_GPIO_DRVSTR_LV1, }, { .num = EXYNOS4_GPD1(3), /* SENSE_SCL_2.8V */ .cfg = S3C_GPIO_INPUT, .val = S3C_GPIO_SETPIN_NONE, .pud = S3C_GPIO_PULL_NONE, .drv = S5P_GPIO_DRVSTR_LV1, }, { .num = EXYNOS4_GPK2(2), /* PS_ALS_SDA_2.8V */ .cfg = S3C_GPIO_INPUT, .val = S3C_GPIO_SETPIN_NONE, .pud = S3C_GPIO_PULL_NONE, .drv = S5P_GPIO_DRVSTR_LV1, }, { .num = EXYNOS4_GPK3(2), /* PS_ALS_SCL_2.8V */ .cfg = S3C_GPIO_INPUT, .val = S3C_GPIO_SETPIN_NONE, .pud = S3C_GPIO_PULL_NONE, .drv = S5P_GPIO_DRVSTR_LV1, }, { .num = EXYNOS4210_GPJ1(3), /* GPIO_CAM_MCLK */ .cfg = S3C_GPIO_INPUT, .val = S3C_GPIO_SETPIN_NONE, .pud = S3C_GPIO_PULL_DOWN, .drv = S5P_GPIO_DRVSTR_LV3, }, { .num = EXYNOS4_GPB(4), /* GPIO_IRDA_nINT */ .cfg = S3C_GPIO_OUTPUT, .val = S3C_GPIO_SETPIN_ZERO, .pud = S3C_GPIO_PULL_NONE, .drv = S5P_GPIO_DRVSTR_LV2, }, { .num = EXYNOS4_GPX0(4), /*TA_nCHG*/ .cfg = S3C_GPIO_INPUT, .val = S3C_GPIO_SETPIN_NONE, .pud = S3C_GPIO_PULL_UP, .drv = S5P_GPIO_DRVSTR_LV4, }, /* BT UART */ {GPIO_BT_RXD, S3C_GPIO_SFN(2), 2, S3C_GPIO_PULL_UP}, {GPIO_BT_TXD, S3C_GPIO_SFN(2), 2, S3C_GPIO_PULL_NONE}, {GPIO_BT_CTS, S3C_GPIO_SFN(2), 2, S3C_GPIO_PULL_NONE}, {GPIO_BT_RTS, S3C_GPIO_SFN(2), 2, S3C_GPIO_PULL_NONE}, /* GPS UART */ {GPIO_GPS_RXD, S3C_GPIO_SFN(2), 2, S3C_GPIO_PULL_UP}, {GPIO_GPS_TXD, S3C_GPIO_SFN(2), 2, S3C_GPIO_PULL_NONE}, {GPIO_GPS_CTS, S3C_GPIO_SFN(2), 2, S3C_GPIO_PULL_NONE}, {GPIO_GPS_RTS, S3C_GPIO_SFN(2), 2, S3C_GPIO_PULL_NONE}, {GPIO_GPS_nRST, S3C_GPIO_OUTPUT, 1, S3C_GPIO_PULL_UP}, {GPIO_GPS_PWR_EN, S3C_GPIO_OUTPUT, 0, S3C_GPIO_PULL_NONE}, /* UART switch: configure as output */ {GPIO_UART_SEL, S3C_GPIO_OUTPUT, 2, S3C_GPIO_PULL_NONE}, /* USB switch: configure as output */ {GPIO_USB_SEL1, S3C_GPIO_OUTPUT, 2, S3C_GPIO_PULL_NONE}, {GPIO_USB_SEL2, S3C_GPIO_OUTPUT, 2, S3C_GPIO_PULL_NONE}, {GPIO_USB_SEL3, S3C_GPIO_OUTPUT, 2, S3C_GPIO_PULL_NONE}, /* JIG On */ {GPIO_IF_CON_SENSE, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_NONE}, /* 30PIN CONNECTOR */ {GPIO_DOCK_INT, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_NONE}, /* MIC */ {GPIO_EAR_MIC_BIAS_EN, S3C_GPIO_OUTPUT, 0, S3C_GPIO_PULL_NONE}, /* TSP */ {GPIO_TSP_VENDOR, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_NONE}, /*** GPX ***/ {GPIO_GYRO_INT, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_NONE}, {GPIO_PS_VOUT_WAKE, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_NONE}, /* REMOTE_SENSE_IRQ */ {EXYNOS4_GPX0(2), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_UP}, {GPIO_ACCESSORY_INT, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_NONE}, {GPIO_MSENSE_INT, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_NONE}, {GPIO_FUEL_ALERT, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_NONE}, /* {GPIO_BT_HOST_WAKE, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_NONE}, */ {GPIO_DET_35, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_NONE}, {GPIO_USB_OTG_EN, S3C_GPIO_OUTPUT, 0, S3C_GPIO_PULL_NONE}, /* T_FLASH_DETECT */ {EXYNOS4_GPX3(4), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_NONE}, /* TA_nCONNECTED */ {EXYNOS4_GPX3(5), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_NONE}, {GPIO_HDMI_HPD, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_NONE}, /* NC */ {EXYNOS4_GPY0(0), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY0(1), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY0(2), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY0(3), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY0(4), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY0(5), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY1(0), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY1(1), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY1(2), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY1(3), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY2(0), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY2(1), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY2(2), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY2(3), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY2(4), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY2(5), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY3(1), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY3(3), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY6(0), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY6(2), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY6(3), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {EXYNOS4_GPY6(4), S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, /* for WIFI version */ #ifndef CONFIG_LINK_DEVICE_HSIC {GPIO_PHONE_ON, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {GPIO_SIM_DETECT, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {GPIO_IPC_SLAVE_WAKEUP, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {GPIO_IPC_HOST_WAKEUP, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {GPIO_CP_DUMP_INT, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {GPIO_SUSPEND_REQUEST, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {GPIO_CP_RST, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {GPIO_PHONE_ACTIVE, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {GPIO_ACTIVE_STATE, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {GPIO_PDA_ACTIVE, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, {GPIO_CP_REQ_RESET, S3C_GPIO_INPUT, 2, S3C_GPIO_PULL_DOWN}, #endif }; void p8_config_gpio_table(void) { u32 i, gpio; printk(KERN_DEBUG "%s\n", __func__); for (i = 0; i < ARRAY_SIZE(p8_init_gpios); i++) { gpio = p8_init_gpios[i].num; s3c_gpio_cfgpin(gpio, p8_init_gpios[i].cfg); s3c_gpio_setpull(gpio, p8_init_gpios[i].pud); if (p8_init_gpios[i].val != S3C_GPIO_SETPIN_NONE) gpio_set_value(gpio, p8_init_gpios[i].val); s5p_gpio_set_drvstr(gpio, p8_init_gpios[i].drv); } } /* this table only for c1 board */ static unsigned int p8_sleep_gpio_table[][3] = { { EXYNOS4_GPA0(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_UP}, { EXYNOS4_GPA0(1), S3C_GPIO_SLP_OUT1, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPA0(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPA0(3), S3C_GPIO_SLP_OUT1, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPA0(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPA0(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPA0(6), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPA0(7), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPA1(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_UP}, { EXYNOS4_GPA1(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_UP}, { EXYNOS4_GPA1(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPA1(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPA1(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPA1(5), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPB(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPB(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPB(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPB(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPB(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPB(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPB(6), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPB(7), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPC0(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPC0(1), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPC0(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPC0(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPC0(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPC1(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPC1(1), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPC1(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPC1(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPC1(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPD0(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPD0(1), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPD0(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPD0(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPD1(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPD1(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPD1(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPD1(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, /* NC */ { EXYNOS4210_GPE0(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4210_GPE0(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPE0(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPE0(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPE0(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4210_GPE1(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPE1(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPE1(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPE1(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ /* NC */ { EXYNOS4210_GPE1(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPE1(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPE1(6), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPE1(7), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPE2(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPE2(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPE2(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPE2(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPE2(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPE2(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4210_GPE3(0), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4210_GPE3(1), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4210_GPE3(2), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4210_GPE3(3), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4210_GPE3(4), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4210_GPE3(5), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4210_GPE3(6), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4210_GPE3(7), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4210_GPE4(0), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4210_GPE4(1), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4210_GPE4(2), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4210_GPE4(3), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4210_GPE4(4), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4210_GPE4(5), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4210_GPE4(6), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4210_GPE4(7), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPF0(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* AMOLED_RESET_1.8V */ { EXYNOS4_GPF0(1), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, /* PANEL_CRACK_DET_1.8V */ { EXYNOS4_GPF0(2), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPF0(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF0(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF0(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF0(6), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF0(7), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF1(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF1(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF1(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF1(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF1(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF1(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF1(6), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF1(7), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF2(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF2(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF2(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF2(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF2(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF2(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF2(6), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF2(7), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF3(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF3(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF3(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF3(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPF3(4), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPF3(5), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, /* NC */ { EXYNOS4210_GPJ0(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPJ0(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPJ0(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPJ0(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPJ0(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPJ0(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPJ0(6), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPJ0(7), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPJ1(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPJ1(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4210_GPJ1(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4210_GPJ1(3), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4210_GPJ1(4), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPK0(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK0(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK0(2), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPK0(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK0(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK0(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK0(6), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK1(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK1(1), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPK1(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK1(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK1(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK1(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK1(6), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK2(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK2(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK2(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPK2(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK2(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK2(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK2(6), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPK3(0), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPK3(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_UP}, { EXYNOS4_GPK3(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPK3(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_UP}, { EXYNOS4_GPK3(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_UP}, { EXYNOS4_GPK3(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_UP}, { EXYNOS4_GPK3(6), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_UP}, { EXYNOS4_GPL0(0), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPL0(1), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPL0(2), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPL0(3), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPL0(4), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPL0(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* WLAN_EN2 */ { EXYNOS4_GPL0(6), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPL0(7), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPL1(0), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPL1(1), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, /* WLAN_EN */ { EXYNOS4_GPL1(2), S3C_GPIO_SLP_OUT1, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPL2(0), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPL2(1), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPL2(2), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPL2(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPL2(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPL2(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPL2(6), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPL2(7), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPX0(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_UP}, /* TA_nCHG */ { EXYNOS4_GPY0(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY0(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY0(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY0(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY0(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY0(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY1(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY1(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY1(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY1(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY2(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY2(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY2(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY2(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY2(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY2(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ /* MHL_SDA_1.8V */ { EXYNOS4_GPY3(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPY3(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ /* MHL_SCL_1.8V */ { EXYNOS4_GPY3(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPY3(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY3(4), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, #ifndef CONFIG_LINK_DEVICE_HSIC { EXYNOS4_GPY3(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, #else /* GPIO_ACTIVE_STATE, EHCI on/off state to CP */ { EXYNOS4_GPY3(5), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, #endif { EXYNOS4_GPY3(6), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, /* USB_SEL2 */ { EXYNOS4_GPY3(7), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPY4(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPY4(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, #ifndef CONFIG_LINK_DEVICE_HSIC { EXYNOS4_GPY4(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, #else /* GPIO_PDA_ACTIVE, AP Sleep, LPA state to CP */ { EXYNOS4_GPY4(2), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, #endif { EXYNOS4_GPY4(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, /* V_ACCESSORY_5V CHECK */ { EXYNOS4_GPY4(4), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, /* USB_SEL3 */ { EXYNOS4_GPY4(5), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, #ifndef CONFIG_LINK_DEVICE_HSIC { EXYNOS4_GPY4(6), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, #else /* GPIO_CP_REQ_RESET */ { EXYNOS4_GPY4(6), S3C_GPIO_SLP_OUT1, S3C_GPIO_PULL_NONE}, #endif /* GPIO_UART_SEL */ { EXYNOS4_GPY4(7), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, /* HW_REV0 */ { EXYNOS4_GPY5(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, /* HW_REV1 */ { EXYNOS4_GPY5(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, /* HW_REV2 */ { EXYNOS4_GPY5(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, /* HW_REV3 */ { EXYNOS4_GPY5(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPY5(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, { EXYNOS4_GPY5(5), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPY5(6), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPY5(7), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPY6(0), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ /* ACCESSORY_EN */ { EXYNOS4_GPY6(1), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPY6(2), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY6(3), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY6(4), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPY6(5), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPY6(6), S3C_GPIO_SLP_OUT0, S3C_GPIO_PULL_NONE}, /* TA_EN */ { EXYNOS4_GPY6(7), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPZ(0), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPZ(1), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPZ(2), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPZ(3), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPZ(4), S3C_GPIO_SLP_PREV, S3C_GPIO_PULL_NONE}, { EXYNOS4_GPZ(5), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ { EXYNOS4_GPZ(6), S3C_GPIO_SLP_INPUT, S3C_GPIO_PULL_DOWN}, /* NC */ }; static void config_sleep_gpio_table(int array_size, unsigned int (*gpio_table)[3]) { u32 i, gpio; for (i = 0; i < array_size; i++) { gpio = gpio_table[i][0]; s3c_gpio_slp_cfgpin(gpio, gpio_table[i][1]); s3c_gpio_slp_setpull_updown(gpio, gpio_table[i][2]); } } void p8_config_sleep_gpio_table(void) { config_sleep_gpio_table(ARRAY_SIZE(p8_sleep_gpio_table), p8_sleep_gpio_table); }
gpl-2.0
htc-mirror/ville-u-ics-3.0.8-e2a40ab
net/ipv6/udp.c
253
38614
/* * UDP over IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on linux/ipv4/udp.c * * Fixes: * Hideaki YOSHIFUJI : sin6_scope_id support * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind * a single port at the same time. * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/init.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <net/ndisc.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/ip6_route.h> #include <net/raw.h> #include <net/tcp_states.h> #include <net/ip6_checksum.h> #include <net/xfrm.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include "udp_impl.h" int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) { const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); __be32 sk1_rcv_saddr = sk_rcv_saddr(sk); __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); int sk_ipv6only = ipv6_only_sock(sk); int sk2_ipv6only = inet_v6_ipv6only(sk2); int addr_type = ipv6_addr_type(sk_rcv_saddr6); int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; /* if both are mapped, treat as IPv4 */ if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) return (!sk2_ipv6only && (!sk1_rcv_saddr || !sk2_rcv_saddr || sk1_rcv_saddr == sk2_rcv_saddr)); if (addr_type2 == IPV6_ADDR_ANY && !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) return 1; if (addr_type == IPV6_ADDR_ANY && !(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) return 1; if (sk2_rcv_saddr6 && ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6)) return 1; return 0; } static unsigned int udp6_portaddr_hash(struct net *net, const struct in6_addr *addr6, unsigned int port) { unsigned int hash, mix = net_hash_mix(net); if (ipv6_addr_any(addr6)) hash = jhash_1word(0, mix); else if (ipv6_addr_v4mapped(addr6)) hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix); else hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix); return hash ^ port; } int udp_v6_get_port(struct sock *sk, unsigned short snum) { unsigned int hash2_nulladdr = udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum); unsigned int hash2_partial = udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0); /* precompute partial secondary hash */ udp_sk(sk)->udp_portaddr_hash = hash2_partial; return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr); } static void udp_v6_rehash(struct sock *sk) { u16 new_hash = udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, inet_sk(sk)->inet_num); udp_lib_rehash(sk, new_hash); } static inline int compute_score(struct sock *sk, struct net *net, unsigned short hnum, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif) { int score = -1; if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum && sk->sk_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); struct inet_sock *inet = inet_sk(sk); score = 0; if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score++; } if (!ipv6_addr_any(&np->rcv_saddr)) { if (!ipv6_addr_equal(&np->rcv_saddr, daddr)) return -1; score++; } if (!ipv6_addr_any(&np->daddr)) { if (!ipv6_addr_equal(&np->daddr, saddr)) return -1; score++; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) return -1; score++; } } return score; } #define SCORE2_MAX (1 + 1 + 1) static inline int compute_score2(struct sock *sk, struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, unsigned short hnum, int dif) { int score = -1; if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum && sk->sk_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); struct inet_sock *inet = inet_sk(sk); if (!ipv6_addr_equal(&np->rcv_saddr, daddr)) return -1; score = 0; if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score++; } if (!ipv6_addr_any(&np->daddr)) { if (!ipv6_addr_equal(&np->daddr, saddr)) return -1; score++; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) return -1; score++; } } return score; } /* called with read_rcu_lock() */ static struct sock *udp6_lib_lookup2(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, unsigned int hnum, int dif, struct udp_hslot *hslot2, unsigned int slot2) { struct sock *sk, *result; struct hlist_nulls_node *node; int score, badness; begin: result = NULL; badness = -1; udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { score = compute_score2(sk, net, saddr, sport, daddr, hnum, dif); if (score > badness) { result = sk; badness = score; if (score == SCORE2_MAX) goto exact_match; } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot2) goto begin; if (result) { exact_match: if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(compute_score2(result, net, saddr, sport, daddr, hnum, dif) < badness)) { sock_put(result); goto begin; } } return result; } static struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif, struct udp_table *udptable) { struct sock *sk, *result; struct hlist_nulls_node *node; unsigned short hnum = ntohs(dport); unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; int score, badness; rcu_read_lock(); if (hslot->count > 10) { hash2 = udp6_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp6_lib_lookup2(net, saddr, sport, daddr, hnum, dif, hslot2, slot2); if (!result) { hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp6_lib_lookup2(net, saddr, sport, &in6addr_any, hnum, dif, hslot2, slot2); } rcu_read_unlock(); return result; } begin: result = NULL; badness = -1; sk_nulls_for_each_rcu(sk, node, &hslot->head) { score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif); if (score > badness) { result = sk; badness = score; } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot) goto begin; if (result) { if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(compute_score(result, net, hnum, saddr, sport, daddr, dport, dif) < badness)) { sock_put(result); goto begin; } } rcu_read_unlock(); return result; } static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport, struct udp_table *udptable) { struct sock *sk; const struct ipv6hdr *iph = ipv6_hdr(skb); if (unlikely(sk = skb_steal_sock(skb))) return sk; return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport, &iph->daddr, dport, inet6_iif(skb), udptable); } struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif) { return __udp6_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table); } EXPORT_SYMBOL_GPL(udp6_lib_lookup); /* * This should be easy, if there is something there we * return it, otherwise we block. */ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); struct inet_sock *inet = inet_sk(sk); struct sk_buff *skb; unsigned int ulen; int peeked; int err; int is_udplite = IS_UDPLITE(sk); int is_udp4; bool slow; if (addr_len) *addr_len=sizeof(struct sockaddr_in6); if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len); if (np->rxpmtu && np->rxopt.bits.rxpmtu) return ipv6_recv_rxpmtu(sk, msg, len); try_again: skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), &peeked, &err); if (!skb) goto out; ulen = skb->len - sizeof(struct udphdr); if (len > ulen) len = ulen; else if (len < ulen) msg->msg_flags |= MSG_TRUNC; is_udp4 = (skb->protocol == htons(ETH_P_IP)); /* * If checksum is needed at all, try to do it while copying the * data. If the data is truncated, or if we only want a partial * coverage checksum (UDP-Lite), do it before the copy. */ if (len < ulen || UDP_SKB_CB(skb)->partial_cov) { if (udp_lib_checksum_complete(skb)) goto csum_copy_err; } if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,len); else { err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (err) goto out_free; if (!peeked) { if (is_udp4) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); else UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); } sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (msg->msg_name) { struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *) msg->msg_name; sin6->sin6_family = AF_INET6; sin6->sin6_port = udp_hdr(skb)->source; sin6->sin6_flowinfo = 0; sin6->sin6_scope_id = 0; if (is_udp4) ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &sin6->sin6_addr); else { ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr); if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) sin6->sin6_scope_id = IP6CB(skb)->iif; } } if (is_udp4) { if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); } else { if (np->rxopt.all) datagram_recv_ctl(sk, msg, skb); } err = len; if (flags & MSG_TRUNC) err = ulen; out_free: skb_free_datagram_locked(sk, skb); out: return err; csum_copy_err: slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) { if (is_udp4) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); else UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } unlock_sock_fast(sk, slow); if (noblock) return -EAGAIN; /* starting over for a new packet */ msg->msg_flags &= ~MSG_TRUNC; goto try_again; } void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info, struct udp_table *udptable) { struct ipv6_pinfo *np; const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; const struct in6_addr *saddr = &hdr->saddr; const struct in6_addr *daddr = &hdr->daddr; struct udphdr *uh = (struct udphdr*)(skb->data+offset); struct sock *sk; int err; sk = __udp6_lib_lookup(dev_net(skb->dev), daddr, uh->dest, saddr, uh->source, inet6_iif(skb), udptable); if (sk == NULL) return; np = inet6_sk(sk); if (!icmpv6_err_convert(type, code, &err) && !np->recverr) goto out; if (sk->sk_state != TCP_ESTABLISHED && !np->recverr) goto out; if (np->recverr) ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); sk->sk_err = err; sk->sk_error_report(sk); out: sock_put(sk); } static __inline__ void udpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info ) { __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); } int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) { struct udp_sock *up = udp_sk(sk); int rc; int is_udplite = IS_UDPLITE(sk); if (!ipv6_addr_any(&inet6_sk(sk)->daddr)) sock_rps_save_rxhash(sk, skb->rxhash); if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto drop; /* * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). */ if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { if (up->pcrlen == 0) { /* full coverage was set */ LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: partial coverage" " %d while full coverage %d requested\n", UDP_SKB_CB(skb)->cscov, skb->len); goto drop; } if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: coverage %d " "too small, need min %d\n", UDP_SKB_CB(skb)->cscov, up->pcrlen); goto drop; } } if (rcu_dereference_raw(sk->sk_filter)) { if (udp_lib_checksum_complete(skb)) goto drop; } if ((rc = ip_queue_rcv_skb(sk, skb)) < 0) { /* Note that an ENOMEM error is charged twice */ if (rc == -ENOMEM) UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, is_udplite); goto drop_no_sk_drops_inc; } return 0; drop: atomic_inc(&sk->sk_drops); drop_no_sk_drops_inc: UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); kfree_skb(skb); return -1; } static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, __be16 loc_port, const struct in6_addr *loc_addr, __be16 rmt_port, const struct in6_addr *rmt_addr, int dif) { struct hlist_nulls_node *node; struct sock *s = sk; unsigned short num = ntohs(loc_port); sk_nulls_for_each_from(s, node) { struct inet_sock *inet = inet_sk(s); if (!net_eq(sock_net(s), net)) continue; if (udp_sk(s)->udp_port_hash == num && s->sk_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(s); if (inet->inet_dport) { if (inet->inet_dport != rmt_port) continue; } if (!ipv6_addr_any(&np->daddr) && !ipv6_addr_equal(&np->daddr, rmt_addr)) continue; if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif) continue; if (!ipv6_addr_any(&np->rcv_saddr)) { if (!ipv6_addr_equal(&np->rcv_saddr, loc_addr)) continue; } if (!inet6_mc_check(s, loc_addr, rmt_addr)) continue; return s; } } return NULL; } static void flush_stack(struct sock **stack, unsigned int count, struct sk_buff *skb, unsigned int final) { unsigned int i; struct sock *sk; struct sk_buff *skb1; for (i = 0; i < count; i++) { skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); sk = stack[i]; if (skb1) { if (sk_rcvqueues_full(sk, skb1)) { kfree_skb(skb1); goto drop; } bh_lock_sock(sk); if (!sock_owned_by_user(sk)) udpv6_queue_rcv_skb(sk, skb1); else if (sk_add_backlog(sk, skb1)) { kfree_skb(skb1); bh_unlock_sock(sk); goto drop; } bh_unlock_sock(sk); continue; } drop: atomic_inc(&sk->sk_drops); UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, IS_UDPLITE(sk)); } } /* * Note: called only from the BH handler context, * so we don't need to lock the hashes. */ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, const struct in6_addr *saddr, const struct in6_addr *daddr, struct udp_table *udptable) { struct sock *sk, *stack[256 / sizeof(struct sock *)]; const struct udphdr *uh = udp_hdr(skb); struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); int dif; unsigned int i, count = 0; spin_lock(&hslot->lock); sk = sk_nulls_head(&hslot->head); dif = inet6_iif(skb); sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); while (sk) { stack[count++] = sk; sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr, uh->source, saddr, dif); if (unlikely(count == ARRAY_SIZE(stack))) { if (!sk) break; flush_stack(stack, count, skb, ~0); count = 0; } } /* * before releasing the lock, we must take reference on sockets */ for (i = 0; i < count; i++) sock_hold(stack[i]); spin_unlock(&hslot->lock); if (count) { flush_stack(stack, count, skb, count - 1); for (i = 0; i < count; i++) sock_put(stack[i]); } else { kfree_skb(skb); } return 0; } static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto) { int err; UDP_SKB_CB(skb)->partial_cov = 0; UDP_SKB_CB(skb)->cscov = skb->len; if (proto == IPPROTO_UDPLITE) { err = udplite_checksum_init(skb, uh); if (err) return err; } if (uh->check == 0) { /* RFC 2460 section 8.1 says that we SHOULD log this error. Well, it is reasonable. */ LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0\n"); return 1; } if (skb->ip_summed == CHECKSUM_COMPLETE && !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len, proto, skb->csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; if (!skb_csum_unnecessary(skb)) skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len, proto, 0)); return 0; } int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { struct net *net = dev_net(skb->dev); struct sock *sk; struct udphdr *uh; const struct in6_addr *saddr, *daddr; u32 ulen = 0; if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto discard; saddr = &ipv6_hdr(skb)->saddr; daddr = &ipv6_hdr(skb)->daddr; uh = udp_hdr(skb); ulen = ntohs(uh->len); if (ulen > skb->len) goto short_packet; if (proto == IPPROTO_UDP) { /* UDP validates ulen. */ /* Check for jumbo payload */ if (ulen == 0) ulen = skb->len; if (ulen < sizeof(*uh)) goto short_packet; if (ulen < skb->len) { if (pskb_trim_rcsum(skb, ulen)) goto short_packet; saddr = &ipv6_hdr(skb)->saddr; daddr = &ipv6_hdr(skb)->daddr; uh = udp_hdr(skb); } } if (udp6_csum_init(skb, uh, proto)) goto discard; /* * Multicast receive code */ if (ipv6_addr_is_multicast(daddr)) return __udp6_lib_mcast_deliver(net, skb, saddr, daddr, udptable); /* Unicast */ /* * check socket cache ... must talk to Alan about his plans * for sock caches... i'll skip this for now. */ sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); if (sk == NULL) { if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard; if (udp_lib_checksum_complete(skb)) goto discard; UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); kfree_skb(skb); return 0; } /* deliver */ if (sk_rcvqueues_full(sk, skb)) { sock_put(sk); goto discard; } bh_lock_sock(sk); if (!sock_owned_by_user(sk)) udpv6_queue_rcv_skb(sk, skb); else if (sk_add_backlog(sk, skb)) { atomic_inc(&sk->sk_drops); bh_unlock_sock(sk); sock_put(sk); goto discard; } bh_unlock_sock(sk); sock_put(sk); return 0; short_packet: LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", proto == IPPROTO_UDPLITE ? "-Lite" : "", saddr, ntohs(uh->source), ulen, skb->len, daddr, ntohs(uh->dest)); discard: UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); kfree_skb(skb); return 0; } static __inline__ int udpv6_rcv(struct sk_buff *skb) { return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP); } /* * Throw away all pending data and cancel the corking. Socket is locked. */ static void udp_v6_flush_pending_frames(struct sock *sk) { struct udp_sock *up = udp_sk(sk); if (up->pending == AF_INET) udp_flush_pending_frames(sk); else if (up->pending) { up->len = 0; up->pending = 0; ip6_flush_pending_frames(sk); } } /** * udp6_hwcsum_outgoing - handle outgoing HW checksumming * @sk: socket we are sending on * @skb: sk_buff containing the filled-in UDP header * (checksum field must be zeroed out) */ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, const struct in6_addr *saddr, const struct in6_addr *daddr, int len) { unsigned int offset; struct udphdr *uh = udp_hdr(skb); __wsum csum = 0; if (skb_queue_len(&sk->sk_write_queue) == 1) { /* Only one fragment on the socket. */ skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); } else { /* * HW-checksum won't work as there are two or more * fragments on the socket so that all csums of sk_buffs * should be together */ offset = skb_transport_offset(skb); skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); skb->ip_summed = CHECKSUM_NONE; skb_queue_walk(&sk->sk_write_queue, skb) { csum = csum_add(csum, skb->csum); } uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } } /* * Sending */ static int udp_v6_push_pending_frames(struct sock *sk) { struct sk_buff *skb; struct udphdr *uh; struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); struct flowi6 *fl6 = &inet->cork.fl.u.ip6; int err = 0; int is_udplite = IS_UDPLITE(sk); __wsum csum = 0; /* Grab the skbuff where UDP header space exists. */ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) goto out; /* * Create a UDP header */ uh = udp_hdr(skb); uh->source = fl6->fl6_sport; uh->dest = fl6->fl6_dport; uh->len = htons(up->len); uh->check = 0; if (is_udplite) csum = udplite_csum_outgoing(sk, skb); else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, up->len); goto send; } else csum = udp_csum_outgoing(sk, skb); /* add protocol-dependent pseudo-header */ uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, up->len, fl6->flowi6_proto, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; send: err = ip6_push_pending_frames(sk); if (err) { if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); err = 0; } } else UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_OUTDATAGRAMS, is_udplite); out: up->len = 0; up->pending = 0; return err; } int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { struct ipv6_txoptions opt_space; struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name; struct in6_addr *daddr, *final_p, final; struct ipv6_txoptions *opt = NULL; struct ip6_flowlabel *flowlabel = NULL; struct flowi6 fl6; struct dst_entry *dst; int addr_len = msg->msg_namelen; int ulen = len; int hlimit = -1; int tclass = -1; int dontfrag = -1; int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; int err; int connected = 0; int is_udplite = IS_UDPLITE(sk); int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); /* destination address check */ if (sin6) { if (addr_len < offsetof(struct sockaddr, sa_data)) return -EINVAL; switch (sin6->sin6_family) { case AF_INET6: if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; daddr = &sin6->sin6_addr; break; case AF_INET: goto do_udp_sendmsg; case AF_UNSPEC: msg->msg_name = sin6 = NULL; msg->msg_namelen = addr_len = 0; daddr = NULL; break; default: return -EINVAL; } } else if (!up->pending) { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; daddr = &np->daddr; } else daddr = NULL; if (daddr) { if (ipv6_addr_v4mapped(daddr)) { struct sockaddr_in sin; sin.sin_family = AF_INET; sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport; sin.sin_addr.s_addr = daddr->s6_addr32[3]; msg->msg_name = &sin; msg->msg_namelen = sizeof(sin); do_udp_sendmsg: if (__ipv6_only_sock(sk)) return -ENETUNREACH; return udp_sendmsg(iocb, sk, msg, len); } } if (up->pending == AF_INET) return udp_sendmsg(iocb, sk, msg, len); /* Rough check on arithmetic overflow, better check is made in ip6_append_data(). */ if (len > INT_MAX - sizeof(struct udphdr)) return -EMSGSIZE; if (up->pending) { /* * There are pending frames. * The socket lock must be held while it's corked. */ lock_sock(sk); if (likely(up->pending)) { if (unlikely(up->pending != AF_INET6)) { release_sock(sk); return -EAFNOSUPPORT; } dst = NULL; goto do_append_data; } release_sock(sk); } ulen += sizeof(struct udphdr); memset(&fl6, 0, sizeof(fl6)); if (sin6) { if (sin6->sin6_port == 0) return -EINVAL; fl6.fl6_dport = sin6->sin6_port; daddr = &sin6->sin6_addr; if (np->sndflow) { fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; daddr = &flowlabel->dst; } } /* * Otherwise it will be difficult to maintain * sk->sk_dst_cache. */ if (sk->sk_state == TCP_ESTABLISHED && ipv6_addr_equal(daddr, &np->daddr)) daddr = &np->daddr; if (addr_len >= sizeof(struct sockaddr_in6) && sin6->sin6_scope_id && ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL) fl6.flowi6_oif = sin6->sin6_scope_id; } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; fl6.fl6_dport = inet->inet_dport; daddr = &np->daddr; fl6.flowlabel = np->flow_label; connected = 1; } if (!fl6.flowi6_oif) fl6.flowi6_oif = sk->sk_bound_dev_if; if (!fl6.flowi6_oif) fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; fl6.flowi6_mark = sk->sk_mark; if (msg->msg_controllen) { opt = &opt_space; memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(*opt); err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, &tclass, &dontfrag); if (err < 0) { fl6_sock_release(flowlabel); return err; } if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; } if (!(opt->opt_nflen|opt->opt_flen)) opt = NULL; connected = 0; } if (opt == NULL) opt = np->opt; if (flowlabel) opt = fl6_merge_options(&opt_space, flowlabel, opt); opt = ipv6_fixup_options(&opt_space, opt); fl6.flowi6_proto = sk->sk_protocol; if (!ipv6_addr_any(daddr)) ipv6_addr_copy(&fl6.daddr, daddr); else fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) ipv6_addr_copy(&fl6.saddr, &np->saddr); fl6.fl6_sport = inet->inet_sport; final_p = fl6_update_dst(&fl6, opt, &final); if (final_p) connected = 0; if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) { fl6.flowi6_oif = np->mcast_oif; connected = 0; } security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, true); if (IS_ERR(dst)) { err = PTR_ERR(dst); dst = NULL; goto out; } #ifdef CONFIG_HTC_NETWORK_MODIFY if (IS_ERR(dst) || (!dst)) printk(KERN_ERR "[NET] dst is NULL in %s!\n", __func__); #endif if (hlimit < 0) { if (ipv6_addr_is_multicast(&fl6.daddr)) hlimit = np->mcast_hops; else hlimit = np->hop_limit; if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); } if (tclass < 0) tclass = np->tclass; if (dontfrag < 0) dontfrag = np->dontfrag; if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; back_from_confirm: lock_sock(sk); if (unlikely(up->pending)) { /* The socket is already corked while preparing it. */ /* ... which is an evident application bug. --ANK */ release_sock(sk); LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n"); err = -EINVAL; goto out; } up->pending = AF_INET6; do_append_data: up->len += ulen; getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; #ifdef CONFIG_HTC_NETWORK_MODIFY if (IS_ERR(dst) || (!dst)) printk(KERN_ERR "[NET] dst is NULL in %s!\n", __func__); #endif err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen, sizeof(struct udphdr), hlimit, tclass, opt, &fl6, (struct rt6_info*)dst, corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag); if (err) udp_v6_flush_pending_frames(sk); else if (!corkreq) err = udp_v6_push_pending_frames(sk); else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) up->pending = 0; if (dst) { if (connected) { ip6_dst_store(sk, dst, ipv6_addr_equal(&fl6.daddr, &np->daddr) ? &np->daddr : NULL, #ifdef CONFIG_IPV6_SUBTREES ipv6_addr_equal(&fl6.saddr, &np->saddr) ? &np->saddr : #endif NULL); } else { dst_release(dst); } dst = NULL; } if (err > 0) err = np->recverr ? net_xmit_errno(err) : 0; release_sock(sk); out: dst_release(dst); fl6_sock_release(flowlabel); if (!err) return len; /* * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting * ENOBUFS might not be good (it's not tunable per se), but otherwise * we don't have a good statistic (IpOutDiscards but it can be too many * things). We could add another new stat but at least for now that * seems like overkill. */ if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); } return err; do_confirm: dst_confirm(dst); if (!(msg->msg_flags&MSG_PROBE) || len) goto back_from_confirm; err = 0; goto out; } void udpv6_destroy_sock(struct sock *sk) { lock_sock(sk); udp_v6_flush_pending_frames(sk); release_sock(sk); inet6_destroy_sock(sk); } /* * Socket option code for UDP */ int udpv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_v6_push_pending_frames); return ipv6_setsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_v6_push_pending_frames); return compat_ipv6_setsockopt(sk, level, optname, optval, optlen); } #endif int udpv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return ipv6_getsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return compat_ipv6_getsockopt(sk, level, optname, optval, optlen); } #endif static int udp6_ufo_send_check(struct sk_buff *skb) { const struct ipv6hdr *ipv6h; struct udphdr *uh; if (!pskb_may_pull(skb, sizeof(*uh))) return -EINVAL; ipv6h = ipv6_hdr(skb); uh = udp_hdr(skb); uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, IPPROTO_UDP, 0); skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); skb->ip_summed = CHECKSUM_PARTIAL; return 0; } static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; unsigned int unfrag_ip6hlen, unfrag_len; struct frag_hdr *fptr; u8 *mac_start, *prevhdr; u8 nexthdr; u8 frag_hdr_sz = sizeof(struct frag_hdr); int offset; __wsum csum; struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) goto out; if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { /* Packet is from an untrusted source, reset gso_segs. */ int type = skb_shinfo(skb)->gso_type; if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) || !(type & (SKB_GSO_UDP)))) goto out; skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); segs = NULL; goto out; } /* Do software UFO. Complete and fill in the UDP checksum as HW cannot * do checksum of UDP packets sent as multiple IP fragments. */ offset = skb_checksum_start_offset(skb); csum = skb_checksum(skb, offset, skb->len- offset, 0); offset += skb->csum_offset; *(__sum16 *)(skb->data + offset) = csum_fold(csum); skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out; /* Find the unfragmentable header and shift it left by frag_hdr_sz * bytes to insert fragment header. */ unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; *prevhdr = NEXTHDR_FRAGMENT; unfrag_len = skb_network_header(skb) - skb_mac_header(skb) + unfrag_ip6hlen; mac_start = skb_mac_header(skb); memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len); skb->mac_header -= frag_hdr_sz; skb->network_header -= frag_hdr_sz; fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); fptr->nexthdr = nexthdr; fptr->reserved = 0; ipv6_select_ident(fptr, rt ? &rt->rt6i_dst.addr : &ipv6_hdr(skb)->daddr); /* Fragment the skb. ipv6 header and the remaining fields of the * fragment header are updated in ipv6_gso_segment() */ segs = skb_segment(skb, features); out: return segs; } static const struct inet6_protocol udpv6_protocol = { .handler = udpv6_rcv, .err_handler = udpv6_err, .gso_send_check = udp6_ufo_send_check, .gso_segment = udp6_ufo_fragment, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; /* ------------------------------------------------------------------------ */ #ifdef CONFIG_PROC_FS static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket) { struct inet_sock *inet = inet_sk(sp); struct ipv6_pinfo *np = inet6_sk(sp); const struct in6_addr *dest, *src; __u16 destp, srcp; dest = &np->daddr; src = &np->rcv_saddr; destp = ntohs(inet->inet_dport); srcp = ntohs(inet->inet_sport); seq_printf(seq, "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n", bucket, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); } int udp6_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, " sl " "local_address " "remote_address " "st tx_queue rx_queue tr tm->when retrnsmt" " uid timeout inode ref pointer drops\n"); else udp6_sock_seq_show(seq, v, ((struct udp_iter_state *)seq->private)->bucket); return 0; } static struct udp_seq_afinfo udp6_seq_afinfo = { .name = "udp6", .family = AF_INET6, .udp_table = &udp_table, .seq_fops = { .owner = THIS_MODULE, }, .seq_ops = { .show = udp6_seq_show, }, }; int __net_init udp6_proc_init(struct net *net) { return udp_proc_register(net, &udp6_seq_afinfo); } void udp6_proc_exit(struct net *net) { udp_proc_unregister(net, &udp6_seq_afinfo); } #endif /* CONFIG_PROC_FS */ /* ------------------------------------------------------------------------ */ struct proto udpv6_prot = { .name = "UDPv6", .owner = THIS_MODULE, .close = udp_lib_close, .connect = ip6_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, .destroy = udpv6_destroy_sock, .setsockopt = udpv6_setsockopt, .getsockopt = udpv6_getsockopt, .sendmsg = udpv6_sendmsg, .recvmsg = udpv6_recvmsg, .backlog_rcv = udpv6_queue_rcv_skb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, .rehash = udp_v6_rehash, .get_port = udp_v6_get_port, .memory_allocated = &udp_memory_allocated, .sysctl_mem = sysctl_udp_mem, .sysctl_wmem = &sysctl_udp_wmem_min, .sysctl_rmem = &sysctl_udp_rmem_min, .obj_size = sizeof(struct udp6_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .h.udp_table = &udp_table, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_udpv6_setsockopt, .compat_getsockopt = compat_udpv6_getsockopt, #endif .clear_sk = sk_prot_clear_portaddr_nulls, }; static struct inet_protosw udpv6_protosw = { .type = SOCK_DGRAM, .protocol = IPPROTO_UDP, .prot = &udpv6_prot, .ops = &inet6_dgram_ops, .no_check = UDP_CSUM_DEFAULT, .flags = INET_PROTOSW_PERMANENT, }; int __init udpv6_init(void) { int ret; ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP); if (ret) goto out; ret = inet6_register_protosw(&udpv6_protosw); if (ret) goto out_udpv6_protocol; out: return ret; out_udpv6_protocol: inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); goto out; } void udpv6_exit(void) { inet6_unregister_protosw(&udpv6_protosw); inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP); }
gpl-2.0
rabeeh/linux-2.6.32.9
drivers/staging/comedi/drivers/cb_pcidio.c
509
9441
/* comedi/drivers/cb_pcidio.c A Comedi driver for PCI-DIO24H & PCI-DIO48H of ComputerBoards (currently MeasurementComputing) COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: cb_pcidio Description: ComputerBoards' DIO boards with PCI interface Devices: [Measurement Computing] PCI-DIO24 (cb_pcidio), PCI-DIO24H, PCI-DIO48H Author: Yoshiya Matsuzaka Updated: Mon, 29 Oct 2007 15:40:47 +0000 Status: experimental This driver has been modified from skel.c of comedi-0.7.70. Configuration Options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first available PCI device will be used. Passing a zero for an option is the same as leaving it unspecified. */ /*------------------------------ HEADER FILES ---------------------------------*/ #include "../comedidev.h" #include "comedi_pci.h" #include "8255.h" /*-------------------------- MACROS and DATATYPES -----------------------------*/ #define PCI_VENDOR_ID_CB 0x1307 /* * Board descriptions for two imaginary boards. Describing the * boards in this way is optional, and completely driver-dependent. * Some drivers use arrays such as this, other do not. */ struct pcidio_board { const char *name; /* name of the board */ int dev_id; int n_8255; /* number of 8255 chips on board */ /* indices of base address regions */ int pcicontroler_badrindex; int dioregs_badrindex; }; static const struct pcidio_board pcidio_boards[] = { { .name = "pci-dio24", .dev_id = 0x0028, .n_8255 = 1, .pcicontroler_badrindex = 1, .dioregs_badrindex = 2, }, { .name = "pci-dio24h", .dev_id = 0x0014, .n_8255 = 1, .pcicontroler_badrindex = 1, .dioregs_badrindex = 2, }, { .name = "pci-dio48h", .dev_id = 0x000b, .n_8255 = 2, .pcicontroler_badrindex = 0, .dioregs_badrindex = 1, }, }; /* This is used by modprobe to translate PCI IDs to drivers. Should * only be used for PCI and ISA-PnP devices */ /* Please add your PCI vendor ID to comedidev.h, and it will be forwarded * upstream. */ static DEFINE_PCI_DEVICE_TABLE(pcidio_pci_table) = { { PCI_VENDOR_ID_CB, 0x0028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_CB, 0x0014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_CB, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { 0} }; MODULE_DEVICE_TABLE(pci, pcidio_pci_table); /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct pcidio_board *)dev->board_ptr) /* this structure is for data unique to this hardware driver. If several hardware drivers keep similar information in this structure, feel free to suggest moving the variable to the struct comedi_device struct. */ struct pcidio_private { int data; /* curently unused */ /* would be useful for a PCI device */ struct pci_dev *pci_dev; /* used for DO readback, curently unused */ unsigned int do_readback[4]; /* up to 4 unsigned int suffice to hold 96 bits for PCI-DIO96 */ unsigned long dio_reg_base; /* address of port A of the first 8255 chip on board */ }; /* * most drivers define the following macro to make it easy to * access the private structure. */ #define devpriv ((struct pcidio_private *)dev->private) /* * The struct comedi_driver structure tells the Comedi core module * which functions to call to configure/deconfigure (attach/detach) * the board, and also about the kernel module that contains * the device code. */ static int pcidio_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int pcidio_detach(struct comedi_device *dev); static struct comedi_driver driver_cb_pcidio = { .driver_name = "cb_pcidio", .module = THIS_MODULE, .attach = pcidio_attach, .detach = pcidio_detach, /* It is not necessary to implement the following members if you are * writing a driver for a ISA PnP or PCI card */ /* Most drivers will support multiple types of boards by * having an array of board structures. These were defined * in pcidio_boards[] above. Note that the element 'name' * was first in the structure -- Comedi uses this fact to * extract the name of the board without knowing any details * about the structure except for its length. * When a device is attached (by comedi_config), the name * of the device is given to Comedi, and Comedi tries to * match it by going through the list of board names. If * there is a match, the address of the pointer is put * into dev->board_ptr and driver->attach() is called. * * Note that these are not necessary if you can determine * the type of board in software. ISA PnP, PCI, and PCMCIA * devices are such boards. */ /* The following fields should NOT be initialized if you are dealing * with PCI devices * * .board_name = pcidio_boards, * .offset = sizeof(struct pcidio_board), * .num_names = sizeof(pcidio_boards) / sizeof(structpcidio_board), */ }; /*------------------------------- FUNCTIONS -----------------------------------*/ /* * Attach is called by the Comedi core to configure the driver * for a particular board. If you specified a board_name array * in the driver structure, dev->board_ptr contains that * address. */ static int pcidio_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct pci_dev *pcidev = NULL; int index; int i; printk("comedi%d: cb_pcidio: \n", dev->minor); /* * Allocate the private structure area. alloc_private() is a * convenient macro defined in comedidev.h. */ if (alloc_private(dev, sizeof(struct pcidio_private)) < 0) return -ENOMEM; /* * If you can probe the device to determine what device in a series * it is, this is the place to do it. Otherwise, dev->board_ptr * should already be initialized. */ /* * Probe the device to determine what device in the series it is. */ for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL); pcidev != NULL; pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) { /* is it not a computer boards card? */ if (pcidev->vendor != PCI_VENDOR_ID_CB) continue; /* loop through cards supported by this driver */ for (index = 0; index < ARRAY_SIZE(pcidio_boards); index++) { if (pcidio_boards[index].dev_id != pcidev->device) continue; /* was a particular bus/slot requested? */ if (it->options[0] || it->options[1]) { /* are we on the wrong bus/slot? */ if (pcidev->bus->number != it->options[0] || PCI_SLOT(pcidev->devfn) != it->options[1]) { continue; } } dev->board_ptr = pcidio_boards + index; goto found; } } printk("No supported ComputerBoards/MeasurementComputing card found on " "requested position\n"); return -EIO; found: /* * Initialize dev->board_name. Note that we can use the "thisboard" * macro now, since we just initialized it in the last line. */ dev->board_name = thisboard->name; devpriv->pci_dev = pcidev; printk("Found %s on bus %i, slot %i\n", thisboard->name, devpriv->pci_dev->bus->number, PCI_SLOT(devpriv->pci_dev->devfn)); if (comedi_pci_enable(pcidev, thisboard->name)) { printk ("cb_pcidio: failed to enable PCI device and request regions\n"); return -EIO; } devpriv->dio_reg_base = pci_resource_start(devpriv->pci_dev, pcidio_boards[index].dioregs_badrindex); /* * Allocate the subdevice structures. alloc_subdevice() is a * convenient macro defined in comedidev.h. */ if (alloc_subdevices(dev, thisboard->n_8255) < 0) return -ENOMEM; for (i = 0; i < thisboard->n_8255; i++) { subdev_8255_init(dev, dev->subdevices + i, NULL, devpriv->dio_reg_base + i * 4); printk(" subdev %d: base = 0x%lx\n", i, devpriv->dio_reg_base + i * 4); } printk("attached\n"); return 1; } /* * _detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int pcidio_detach(struct comedi_device *dev) { printk("comedi%d: cb_pcidio: remove\n", dev->minor); if (devpriv) { if (devpriv->pci_dev) { if (devpriv->dio_reg_base) { comedi_pci_disable(devpriv->pci_dev); } pci_dev_put(devpriv->pci_dev); } } if (dev->subdevices) { int i; for (i = 0; i < thisboard->n_8255; i++) { subdev_8255_cleanup(dev, dev->subdevices + i); } } return 0; } /* * A convenient macro that defines init_module() and cleanup_module(), * as necessary. */ COMEDI_PCI_INITCLEANUP(driver_cb_pcidio, pcidio_pci_table);
gpl-2.0
linux4kix/VF845kernelFroyo
drivers/usb/host/ohci-pxa27x.c
509
14647
/* * OHCI HCD (Host Controller Driver) for USB. * * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at> * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net> * (C) Copyright 2002 Hewlett-Packard Company * * Bus Glue for pxa27x * * Written by Christopher Hoover <ch@hpl.hp.com> * Based on fragments of previous driver by Russell King et al. * * Modified for LH7A404 from ohci-sa1111.c * by Durgesh Pattamatta <pattamattad@sharpsec.com> * * Modified for pxa27x from ohci-lh7a404.c * by Nick Bane <nick@cecomputing.co.uk> 26-8-2004 * * This file is licenced under the GPL. */ #include <linux/device.h> #include <linux/signal.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <mach/ohci.h> /* * UHC: USB Host Controller (OHCI-like) register definitions */ #define UHCREV (0x0000) /* UHC HCI Spec Revision */ #define UHCHCON (0x0004) /* UHC Host Control Register */ #define UHCCOMS (0x0008) /* UHC Command Status Register */ #define UHCINTS (0x000C) /* UHC Interrupt Status Register */ #define UHCINTE (0x0010) /* UHC Interrupt Enable */ #define UHCINTD (0x0014) /* UHC Interrupt Disable */ #define UHCHCCA (0x0018) /* UHC Host Controller Comm. Area */ #define UHCPCED (0x001C) /* UHC Period Current Endpt Descr */ #define UHCCHED (0x0020) /* UHC Control Head Endpt Descr */ #define UHCCCED (0x0024) /* UHC Control Current Endpt Descr */ #define UHCBHED (0x0028) /* UHC Bulk Head Endpt Descr */ #define UHCBCED (0x002C) /* UHC Bulk Current Endpt Descr */ #define UHCDHEAD (0x0030) /* UHC Done Head */ #define UHCFMI (0x0034) /* UHC Frame Interval */ #define UHCFMR (0x0038) /* UHC Frame Remaining */ #define UHCFMN (0x003C) /* UHC Frame Number */ #define UHCPERS (0x0040) /* UHC Periodic Start */ #define UHCLS (0x0044) /* UHC Low Speed Threshold */ #define UHCRHDA (0x0048) /* UHC Root Hub Descriptor A */ #define UHCRHDA_NOCP (1 << 12) /* No over current protection */ #define UHCRHDA_OCPM (1 << 11) /* Over Current Protection Mode */ #define UHCRHDA_POTPGT(x) \ (((x) & 0xff) << 24) /* Power On To Power Good Time */ #define UHCRHDB (0x004C) /* UHC Root Hub Descriptor B */ #define UHCRHS (0x0050) /* UHC Root Hub Status */ #define UHCRHPS1 (0x0054) /* UHC Root Hub Port 1 Status */ #define UHCRHPS2 (0x0058) /* UHC Root Hub Port 2 Status */ #define UHCRHPS3 (0x005C) /* UHC Root Hub Port 3 Status */ #define UHCSTAT (0x0060) /* UHC Status Register */ #define UHCSTAT_UPS3 (1 << 16) /* USB Power Sense Port3 */ #define UHCSTAT_SBMAI (1 << 15) /* System Bus Master Abort Interrupt*/ #define UHCSTAT_SBTAI (1 << 14) /* System Bus Target Abort Interrupt*/ #define UHCSTAT_UPRI (1 << 13) /* USB Port Resume Interrupt */ #define UHCSTAT_UPS2 (1 << 12) /* USB Power Sense Port 2 */ #define UHCSTAT_UPS1 (1 << 11) /* USB Power Sense Port 1 */ #define UHCSTAT_HTA (1 << 10) /* HCI Target Abort */ #define UHCSTAT_HBA (1 << 8) /* HCI Buffer Active */ #define UHCSTAT_RWUE (1 << 7) /* HCI Remote Wake Up Event */ #define UHCHR (0x0064) /* UHC Reset Register */ #define UHCHR_SSEP3 (1 << 11) /* Sleep Standby Enable for Port3 */ #define UHCHR_SSEP2 (1 << 10) /* Sleep Standby Enable for Port2 */ #define UHCHR_SSEP1 (1 << 9) /* Sleep Standby Enable for Port1 */ #define UHCHR_PCPL (1 << 7) /* Power control polarity low */ #define UHCHR_PSPL (1 << 6) /* Power sense polarity low */ #define UHCHR_SSE (1 << 5) /* Sleep Standby Enable */ #define UHCHR_UIT (1 << 4) /* USB Interrupt Test */ #define UHCHR_SSDC (1 << 3) /* Simulation Scale Down Clock */ #define UHCHR_CGR (1 << 2) /* Clock Generation Reset */ #define UHCHR_FHR (1 << 1) /* Force Host Controller Reset */ #define UHCHR_FSBIR (1 << 0) /* Force System Bus Iface Reset */ #define UHCHIE (0x0068) /* UHC Interrupt Enable Register*/ #define UHCHIE_UPS3IE (1 << 14) /* Power Sense Port3 IntEn */ #define UHCHIE_UPRIE (1 << 13) /* Port Resume IntEn */ #define UHCHIE_UPS2IE (1 << 12) /* Power Sense Port2 IntEn */ #define UHCHIE_UPS1IE (1 << 11) /* Power Sense Port1 IntEn */ #define UHCHIE_TAIE (1 << 10) /* HCI Interface Transfer Abort Interrupt Enable*/ #define UHCHIE_HBAIE (1 << 8) /* HCI Buffer Active IntEn */ #define UHCHIE_RWIE (1 << 7) /* Remote Wake-up IntEn */ #define UHCHIT (0x006C) /* UHC Interrupt Test register */ #define PXA_UHC_MAX_PORTNUM 3 struct pxa27x_ohci { /* must be 1st member here for hcd_to_ohci() to work */ struct ohci_hcd ohci; struct device *dev; struct clk *clk; void __iomem *mmio_base; }; #define to_pxa27x_ohci(hcd) (struct pxa27x_ohci *)hcd_to_ohci(hcd) /* PMM_NPS_MODE -- PMM Non-power switching mode Ports are powered continuously. PMM_GLOBAL_MODE -- PMM global switching mode All ports are powered at the same time. PMM_PERPORT_MODE -- PMM per port switching mode Ports are powered individually. */ static int pxa27x_ohci_select_pmm(struct pxa27x_ohci *ohci, int mode) { uint32_t uhcrhda = __raw_readl(ohci->mmio_base + UHCRHDA); uint32_t uhcrhdb = __raw_readl(ohci->mmio_base + UHCRHDB); switch (mode) { case PMM_NPS_MODE: uhcrhda |= RH_A_NPS; break; case PMM_GLOBAL_MODE: uhcrhda &= ~(RH_A_NPS & RH_A_PSM); break; case PMM_PERPORT_MODE: uhcrhda &= ~(RH_A_NPS); uhcrhda |= RH_A_PSM; /* Set port power control mask bits, only 3 ports. */ uhcrhdb |= (0x7<<17); break; default: printk( KERN_ERR "Invalid mode %d, set to non-power switch mode.\n", mode ); uhcrhda |= RH_A_NPS; } __raw_writel(uhcrhda, ohci->mmio_base + UHCRHDA); __raw_writel(uhcrhdb, ohci->mmio_base + UHCRHDB); return 0; } extern int usb_disabled(void); /*-------------------------------------------------------------------------*/ static inline void pxa27x_setup_hc(struct pxa27x_ohci *ohci, struct pxaohci_platform_data *inf) { uint32_t uhchr = __raw_readl(ohci->mmio_base + UHCHR); uint32_t uhcrhda = __raw_readl(ohci->mmio_base + UHCRHDA); if (inf->flags & ENABLE_PORT1) uhchr &= ~UHCHR_SSEP1; if (inf->flags & ENABLE_PORT2) uhchr &= ~UHCHR_SSEP2; if (inf->flags & ENABLE_PORT3) uhchr &= ~UHCHR_SSEP3; if (inf->flags & POWER_CONTROL_LOW) uhchr |= UHCHR_PCPL; if (inf->flags & POWER_SENSE_LOW) uhchr |= UHCHR_PSPL; if (inf->flags & NO_OC_PROTECTION) uhcrhda |= UHCRHDA_NOCP; else uhcrhda &= ~UHCRHDA_NOCP; if (inf->flags & OC_MODE_PERPORT) uhcrhda |= UHCRHDA_OCPM; else uhcrhda &= ~UHCRHDA_OCPM; if (inf->power_on_delay) { uhcrhda &= ~UHCRHDA_POTPGT(0xff); uhcrhda |= UHCRHDA_POTPGT(inf->power_on_delay / 2); } __raw_writel(uhchr, ohci->mmio_base + UHCHR); __raw_writel(uhcrhda, ohci->mmio_base + UHCRHDA); } static inline void pxa27x_reset_hc(struct pxa27x_ohci *ohci) { uint32_t uhchr = __raw_readl(ohci->mmio_base + UHCHR); __raw_writel(uhchr | UHCHR_FHR, ohci->mmio_base + UHCHR); udelay(11); __raw_writel(uhchr & ~UHCHR_FHR, ohci->mmio_base + UHCHR); } #ifdef CONFIG_CPU_PXA27x extern void pxa27x_clear_otgph(void); #else #define pxa27x_clear_otgph() do {} while (0) #endif static int pxa27x_start_hc(struct pxa27x_ohci *ohci, struct device *dev) { int retval = 0; struct pxaohci_platform_data *inf; uint32_t uhchr; inf = dev->platform_data; clk_enable(ohci->clk); pxa27x_reset_hc(ohci); uhchr = __raw_readl(ohci->mmio_base + UHCHR) | UHCHR_FSBIR; __raw_writel(uhchr, ohci->mmio_base + UHCHR); while (__raw_readl(ohci->mmio_base + UHCHR) & UHCHR_FSBIR) cpu_relax(); pxa27x_setup_hc(ohci, inf); if (inf->init) retval = inf->init(dev); if (retval < 0) return retval; uhchr = __raw_readl(ohci->mmio_base + UHCHR) & ~UHCHR_SSE; __raw_writel(uhchr, ohci->mmio_base + UHCHR); __raw_writel(UHCHIE_UPRIE | UHCHIE_RWIE, ohci->mmio_base + UHCHIE); /* Clear any OTG Pin Hold */ pxa27x_clear_otgph(); return 0; } static void pxa27x_stop_hc(struct pxa27x_ohci *ohci, struct device *dev) { struct pxaohci_platform_data *inf; uint32_t uhccoms; inf = dev->platform_data; if (inf->exit) inf->exit(dev); pxa27x_reset_hc(ohci); /* Host Controller Reset */ uhccoms = __raw_readl(ohci->mmio_base + UHCCOMS) | 0x01; __raw_writel(uhccoms, ohci->mmio_base + UHCCOMS); udelay(10); clk_disable(ohci->clk); } /*-------------------------------------------------------------------------*/ /* configure so an HC device and id are always provided */ /* always called with process context; sleeping is OK */ /** * usb_hcd_pxa27x_probe - initialize pxa27x-based HCDs * Context: !in_interrupt() * * Allocates basic resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. * */ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device *pdev) { int retval, irq; struct usb_hcd *hcd; struct pxaohci_platform_data *inf; struct pxa27x_ohci *ohci; struct resource *r; struct clk *usb_clk; inf = pdev->dev.platform_data; if (!inf) return -ENODEV; irq = platform_get_irq(pdev, 0); if (irq < 0) { pr_err("no resource of IORESOURCE_IRQ"); return -ENXIO; } usb_clk = clk_get(&pdev->dev, NULL); if (IS_ERR(usb_clk)) return PTR_ERR(usb_clk); hcd = usb_create_hcd (driver, &pdev->dev, "pxa27x"); if (!hcd) return -ENOMEM; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { pr_err("no resource of IORESOURCE_MEM"); retval = -ENXIO; goto err1; } hcd->rsrc_start = r->start; hcd->rsrc_len = resource_size(r); if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { pr_debug("request_mem_region failed"); retval = -EBUSY; goto err1; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { pr_debug("ioremap failed"); retval = -ENOMEM; goto err2; } /* initialize "struct pxa27x_ohci" */ ohci = (struct pxa27x_ohci *)hcd_to_ohci(hcd); ohci->dev = &pdev->dev; ohci->clk = usb_clk; ohci->mmio_base = (void __iomem *)hcd->regs; if ((retval = pxa27x_start_hc(ohci, &pdev->dev)) < 0) { pr_debug("pxa27x_start_hc failed"); goto err3; } /* Select Power Management Mode */ pxa27x_ohci_select_pmm(ohci, inf->port_mode); if (inf->power_budget) hcd->power_budget = inf->power_budget; ohci_hcd_init(hcd_to_ohci(hcd)); retval = usb_add_hcd(hcd, irq, IRQF_DISABLED); if (retval == 0) return retval; pxa27x_stop_hc(ohci, &pdev->dev); err3: iounmap(hcd->regs); err2: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); err1: usb_put_hcd(hcd); clk_put(usb_clk); return retval; } /* may be called without controller electrically present */ /* may be called with controller, bus, and devices active */ /** * usb_hcd_pxa27x_remove - shutdown processing for pxa27x-based HCDs * @dev: USB Host Controller being removed * Context: !in_interrupt() * * Reverses the effect of usb_hcd_pxa27x_probe(), first invoking * the HCD's stop() method. It is always called from a thread * context, normally "rmmod", "apmd", or something similar. * */ void usb_hcd_pxa27x_remove (struct usb_hcd *hcd, struct platform_device *pdev) { struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd); usb_remove_hcd(hcd); pxa27x_stop_hc(ohci, &pdev->dev); iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); clk_put(ohci->clk); } /*-------------------------------------------------------------------------*/ static int __devinit ohci_pxa27x_start (struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci (hcd); int ret; ohci_dbg (ohci, "ohci_pxa27x_start, ohci:%p", ohci); /* The value of NDP in roothub_a is incorrect on this hardware */ ohci->num_ports = 3; if ((ret = ohci_init(ohci)) < 0) return ret; if ((ret = ohci_run (ohci)) < 0) { err ("can't start %s", hcd->self.bus_name); ohci_stop (hcd); return ret; } return 0; } /*-------------------------------------------------------------------------*/ static const struct hc_driver ohci_pxa27x_hc_driver = { .description = hcd_name, .product_desc = "PXA27x OHCI", .hcd_priv_size = sizeof(struct pxa27x_ohci), /* * generic hardware linkage */ .irq = ohci_irq, .flags = HCD_USB11 | HCD_MEMORY, /* * basic lifecycle operations */ .start = ohci_pxa27x_start, .stop = ohci_stop, .shutdown = ohci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = ohci_urb_enqueue, .urb_dequeue = ohci_urb_dequeue, .endpoint_disable = ohci_endpoint_disable, /* * scheduling support */ .get_frame_number = ohci_get_frame, /* * root hub support */ .hub_status_data = ohci_hub_status_data, .hub_control = ohci_hub_control, #ifdef CONFIG_PM .bus_suspend = ohci_bus_suspend, .bus_resume = ohci_bus_resume, #endif .start_port_reset = ohci_start_port_reset, }; /*-------------------------------------------------------------------------*/ static int ohci_hcd_pxa27x_drv_probe(struct platform_device *pdev) { pr_debug ("In ohci_hcd_pxa27x_drv_probe"); if (usb_disabled()) return -ENODEV; return usb_hcd_pxa27x_probe(&ohci_pxa27x_hc_driver, pdev); } static int ohci_hcd_pxa27x_drv_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); usb_hcd_pxa27x_remove(hcd, pdev); platform_set_drvdata(pdev, NULL); return 0; } #ifdef CONFIG_PM static int ohci_hcd_pxa27x_drv_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd); if (time_before(jiffies, ohci->ohci.next_statechange)) msleep(5); ohci->ohci.next_statechange = jiffies; pxa27x_stop_hc(ohci, dev); hcd->state = HC_STATE_SUSPENDED; return 0; } static int ohci_hcd_pxa27x_drv_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd); struct pxaohci_platform_data *inf = dev->platform_data; int status; if (time_before(jiffies, ohci->ohci.next_statechange)) msleep(5); ohci->ohci.next_statechange = jiffies; if ((status = pxa27x_start_hc(ohci, dev)) < 0) return status; /* Select Power Management Mode */ pxa27x_ohci_select_pmm(ohci, inf->port_mode); ohci_finish_controller_resume(hcd); return 0; } static struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = { .suspend = ohci_hcd_pxa27x_drv_suspend, .resume = ohci_hcd_pxa27x_drv_resume, }; #endif /* work with hotplug and coldplug */ MODULE_ALIAS("platform:pxa27x-ohci"); static struct platform_driver ohci_hcd_pxa27x_driver = { .probe = ohci_hcd_pxa27x_drv_probe, .remove = ohci_hcd_pxa27x_drv_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "pxa27x-ohci", .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &ohci_hcd_pxa27x_pm_ops, #endif }, };
gpl-2.0
bebek15/samsung_kernel_msm7x27
drivers/staging/cx25821/cx25821-video3.c
765
12058
/* * Driver for the Conexant CX25821 PCIe bridge * * Copyright (C) 2009 Conexant Systems Inc. * Authors <shu.lin@conexant.com>, <hiep.huynh@conexant.com> * Based on Steven Toth <stoth@linuxtv.org> cx23885 driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "cx25821-video.h" static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct cx25821_buffer *buf = container_of(vb, struct cx25821_buffer, vb); struct cx25821_buffer *prev; struct cx25821_fh *fh = vq->priv_data; struct cx25821_dev *dev = fh->dev; struct cx25821_dmaqueue *q = &dev->vidq[SRAM_CH03]; /* add jump to stopper */ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC); buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma); buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */ dprintk(2, "jmp to stopper (0x%x)\n", buf->risc.jmp[1]); if (!list_empty(&q->queued)) { list_add_tail(&buf->vb.queue, &q->queued); buf->vb.state = VIDEOBUF_QUEUED; dprintk(2, "[%p/%d] buffer_queue - append to queued\n", buf, buf->vb.i); } else if (list_empty(&q->active)) { list_add_tail(&buf->vb.queue, &q->active); cx25821_start_video_dma(dev, q, buf, &dev->sram_channels[SRAM_CH03]); buf->vb.state = VIDEOBUF_ACTIVE; buf->count = q->count++; mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT); dprintk(2, "[%p/%d] buffer_queue - first active, buf cnt = %d, q->count = %d\n", buf, buf->vb.i, buf->count, q->count); } else { prev = list_entry(q->active.prev, struct cx25821_buffer, vb.queue); if (prev->vb.width == buf->vb.width && prev->vb.height == buf->vb.height && prev->fmt == buf->fmt) { list_add_tail(&buf->vb.queue, &q->active); buf->vb.state = VIDEOBUF_ACTIVE; buf->count = q->count++; prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); /* 64 bit bits 63-32 */ prev->risc.jmp[2] = cpu_to_le32(0); dprintk(2, "[%p/%d] buffer_queue - append to active, buf->count=%d\n", buf, buf->vb.i, buf->count); } else { list_add_tail(&buf->vb.queue, &q->queued); buf->vb.state = VIDEOBUF_QUEUED; dprintk(2, "[%p/%d] buffer_queue - first queued\n", buf, buf->vb.i); } } if (list_empty(&q->active)) { dprintk(2, "active queue empty!\n"); } } static struct videobuf_queue_ops cx25821_video_qops = { .buf_setup = cx25821_buffer_setup, .buf_prepare = cx25821_buffer_prepare, .buf_queue = buffer_queue, .buf_release = cx25821_buffer_release, }; static int video_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct cx25821_dev *dev = video_drvdata(file); struct cx25821_fh *fh; enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE; u32 pix_format; printk("open dev=%s type=%s\n", video_device_node_name(vdev), v4l2_type_names[type]); /* allocate + initialize per filehandle data */ fh = kzalloc(sizeof(*fh), GFP_KERNEL); if (NULL == fh) return -ENOMEM; lock_kernel(); file->private_data = fh; fh->dev = dev; fh->type = type; fh->width = 720; if (dev->tvnorm & V4L2_STD_PAL_BG || dev->tvnorm & V4L2_STD_PAL_DK) fh->height = 576; else fh->height = 480; dev->channel_opened = SRAM_CH03; pix_format = (dev->pixel_formats[dev->channel_opened] == PIXEL_FRMT_411) ? V4L2_PIX_FMT_Y41P : V4L2_PIX_FMT_YUYV; fh->fmt = format_by_fourcc(pix_format); v4l2_prio_open(&dev->prio, &fh->prio); videobuf_queue_sg_init(&fh->vidq, &cx25821_video_qops, &dev->pci->dev, &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED, sizeof(struct cx25821_buffer), fh); dprintk(1, "post videobuf_queue_init()\n"); unlock_kernel(); return 0; } static ssize_t video_read(struct file *file, char __user * data, size_t count, loff_t * ppos) { struct cx25821_fh *fh = file->private_data; switch (fh->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: if (cx25821_res_locked(fh->dev, RESOURCE_VIDEO3)) return -EBUSY; return videobuf_read_one(&fh->vidq, data, count, ppos, file->f_flags & O_NONBLOCK); default: BUG(); return 0; } } static unsigned int video_poll(struct file *file, struct poll_table_struct *wait) { struct cx25821_fh *fh = file->private_data; struct cx25821_buffer *buf; if (cx25821_res_check(fh, RESOURCE_VIDEO3)) { /* streaming capture */ if (list_empty(&fh->vidq.stream)) return POLLERR; buf = list_entry(fh->vidq.stream.next, struct cx25821_buffer, vb.stream); } else { /* read() capture */ buf = (struct cx25821_buffer *)fh->vidq.read_buf; if (NULL == buf) return POLLERR; } poll_wait(file, &buf->vb.done, wait); if (buf->vb.state == VIDEOBUF_DONE || buf->vb.state == VIDEOBUF_ERROR) { if (buf->vb.state == VIDEOBUF_DONE) { struct cx25821_dev *dev = fh->dev; if (dev && dev->use_cif_resolution[SRAM_CH03]) { u8 cam_id = *((char *)buf->vb.baddr + 3); memcpy((char *)buf->vb.baddr, (char *)buf->vb.baddr + (fh->width * 2), (fh->width * 2)); *((char *)buf->vb.baddr + 3) = cam_id; } } return POLLIN | POLLRDNORM; } return 0; } static int video_release(struct file *file) { struct cx25821_fh *fh = file->private_data; struct cx25821_dev *dev = fh->dev; //stop the risc engine and fifo cx_write(channel3->dma_ctl, 0); /* FIFO and RISC disable */ /* stop video capture */ if (cx25821_res_check(fh, RESOURCE_VIDEO3)) { videobuf_queue_cancel(&fh->vidq); cx25821_res_free(dev, fh, RESOURCE_VIDEO3); } if (fh->vidq.read_buf) { cx25821_buffer_release(&fh->vidq, fh->vidq.read_buf); kfree(fh->vidq.read_buf); } videobuf_mmap_free(&fh->vidq); v4l2_prio_close(&dev->prio, fh->prio); file->private_data = NULL; kfree(fh); return 0; } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i) { struct cx25821_fh *fh = priv; struct cx25821_dev *dev = fh->dev; if (unlikely(fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)) { return -EINVAL; } if (unlikely(i != fh->type)) { return -EINVAL; } if (unlikely(!cx25821_res_get(dev, fh, cx25821_get_resource(fh, RESOURCE_VIDEO3)))) { return -EBUSY; } return videobuf_streamon(get_queue(fh)); } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) { struct cx25821_fh *fh = priv; struct cx25821_dev *dev = fh->dev; int err, res; if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (i != fh->type) return -EINVAL; res = cx25821_get_resource(fh, RESOURCE_VIDEO3); err = videobuf_streamoff(get_queue(fh)); if (err < 0) return err; cx25821_res_free(dev, fh, res); return 0; } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cx25821_fh *fh = priv; struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev; int err; int pix_format = 0; if (fh) { err = v4l2_prio_check(&dev->prio, fh->prio); if (0 != err) return err; } dprintk(2, "%s()\n", __func__); err = cx25821_vidioc_try_fmt_vid_cap(file, priv, f); if (0 != err) return err; fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat); fh->vidq.field = f->fmt.pix.field; // check if width and height is valid based on set standard if (cx25821_is_valid_width(f->fmt.pix.width, dev->tvnorm)) { fh->width = f->fmt.pix.width; } if (cx25821_is_valid_height(f->fmt.pix.height, dev->tvnorm)) { fh->height = f->fmt.pix.height; } if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_Y41P) pix_format = PIXEL_FRMT_411; else if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) pix_format = PIXEL_FRMT_422; else return -EINVAL; cx25821_set_pixel_format(dev, SRAM_CH03, pix_format); // check if cif resolution if (fh->width == 320 || fh->width == 352) { dev->use_cif_resolution[SRAM_CH03] = 1; } else { dev->use_cif_resolution[SRAM_CH03] = 0; } dev->cif_width[SRAM_CH03] = fh->width; medusa_set_resolution(dev, fh->width, SRAM_CH03); dprintk(2, "%s() width=%d height=%d field=%d\n", __func__, fh->width, fh->height, fh->vidq.field); cx25821_call_all(dev, video, s_fmt, f); return 0; } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p) { int ret_val = 0; struct cx25821_fh *fh = priv; struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev; ret_val = videobuf_dqbuf(get_queue(fh), p, file->f_flags & O_NONBLOCK); p->sequence = dev->vidq[SRAM_CH03].count; return ret_val; } static int vidioc_log_status(struct file *file, void *priv) { struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev; char name[32 + 2]; struct sram_channel *sram_ch = &dev->sram_channels[SRAM_CH03]; u32 tmp = 0; snprintf(name, sizeof(name), "%s/2", dev->name); printk(KERN_INFO "%s/2: ============ START LOG STATUS ============\n", dev->name); cx25821_call_all(dev, core, log_status); tmp = cx_read(sram_ch->dma_ctl); printk(KERN_INFO "Video input 3 is %s\n", (tmp & 0x11) ? "streaming" : "stopped"); printk(KERN_INFO "%s/2: ============= END LOG STATUS =============\n", dev->name); return 0; } static int vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctl) { struct cx25821_fh *fh = priv; struct cx25821_dev *dev = ((struct cx25821_fh *)priv)->dev; int err; if (fh) { err = v4l2_prio_check(&dev->prio, fh->prio); if (0 != err) return err; } return cx25821_set_control(dev, ctl, SRAM_CH03); } // exported stuff static const struct v4l2_file_operations video_fops = { .owner = THIS_MODULE, .open = video_open, .release = video_release, .read = video_read, .poll = video_poll, .mmap = cx25821_video_mmap, .ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_querycap = cx25821_vidioc_querycap, .vidioc_enum_fmt_vid_cap = cx25821_vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = cx25821_vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = cx25821_vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_reqbufs = cx25821_vidioc_reqbufs, .vidioc_querybuf = cx25821_vidioc_querybuf, .vidioc_qbuf = cx25821_vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, #ifdef TUNER_FLAG .vidioc_s_std = cx25821_vidioc_s_std, .vidioc_querystd = cx25821_vidioc_querystd, #endif .vidioc_cropcap = cx25821_vidioc_cropcap, .vidioc_s_crop = cx25821_vidioc_s_crop, .vidioc_g_crop = cx25821_vidioc_g_crop, .vidioc_enum_input = cx25821_vidioc_enum_input, .vidioc_g_input = cx25821_vidioc_g_input, .vidioc_s_input = cx25821_vidioc_s_input, .vidioc_g_ctrl = cx25821_vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_queryctrl = cx25821_vidioc_queryctrl, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_log_status = vidioc_log_status, .vidioc_g_priority = cx25821_vidioc_g_priority, .vidioc_s_priority = cx25821_vidioc_s_priority, #ifdef CONFIG_VIDEO_V4L1_COMPAT .vidiocgmbuf = cx25821_vidiocgmbuf, #endif #ifdef TUNER_FLAG .vidioc_g_tuner = cx25821_vidioc_g_tuner, .vidioc_s_tuner = cx25821_vidioc_s_tuner, .vidioc_g_frequency = cx25821_vidioc_g_frequency, .vidioc_s_frequency = cx25821_vidioc_s_frequency, #endif #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = cx25821_vidioc_g_register, .vidioc_s_register = cx25821_vidioc_s_register, #endif }; struct video_device cx25821_video_template3 = { .name = "cx25821-video", .fops = &video_fops, .ioctl_ops = &video_ioctl_ops, .tvnorms = CX25821_NORMS, .current_norm = V4L2_STD_NTSC_M, };
gpl-2.0
hsavit1/linux
arch/s390/kvm/gaccess.c
765
24095
/* * guest access functions * * Copyright IBM Corp. 2014 * */ #include <linux/vmalloc.h> #include <linux/err.h> #include <asm/pgtable.h> #include "kvm-s390.h" #include "gaccess.h" #include <asm/switch_to.h> union asce { unsigned long val; struct { unsigned long origin : 52; /* Region- or Segment-Table Origin */ unsigned long : 2; unsigned long g : 1; /* Subspace Group Control */ unsigned long p : 1; /* Private Space Control */ unsigned long s : 1; /* Storage-Alteration-Event Control */ unsigned long x : 1; /* Space-Switch-Event Control */ unsigned long r : 1; /* Real-Space Control */ unsigned long : 1; unsigned long dt : 2; /* Designation-Type Control */ unsigned long tl : 2; /* Region- or Segment-Table Length */ }; }; enum { ASCE_TYPE_SEGMENT = 0, ASCE_TYPE_REGION3 = 1, ASCE_TYPE_REGION2 = 2, ASCE_TYPE_REGION1 = 3 }; union region1_table_entry { unsigned long val; struct { unsigned long rto: 52;/* Region-Table Origin */ unsigned long : 2; unsigned long p : 1; /* DAT-Protection Bit */ unsigned long : 1; unsigned long tf : 2; /* Region-Second-Table Offset */ unsigned long i : 1; /* Region-Invalid Bit */ unsigned long : 1; unsigned long tt : 2; /* Table-Type Bits */ unsigned long tl : 2; /* Region-Second-Table Length */ }; }; union region2_table_entry { unsigned long val; struct { unsigned long rto: 52;/* Region-Table Origin */ unsigned long : 2; unsigned long p : 1; /* DAT-Protection Bit */ unsigned long : 1; unsigned long tf : 2; /* Region-Third-Table Offset */ unsigned long i : 1; /* Region-Invalid Bit */ unsigned long : 1; unsigned long tt : 2; /* Table-Type Bits */ unsigned long tl : 2; /* Region-Third-Table Length */ }; }; struct region3_table_entry_fc0 { unsigned long sto: 52;/* Segment-Table Origin */ unsigned long : 1; unsigned long fc : 1; /* Format-Control */ unsigned long p : 1; /* DAT-Protection Bit */ unsigned long : 1; unsigned long tf : 2; /* Segment-Table Offset */ unsigned long i : 1; /* Region-Invalid Bit */ unsigned long cr : 1; /* Common-Region Bit */ unsigned long tt : 2; /* Table-Type Bits */ unsigned long tl : 2; /* Segment-Table Length */ }; struct region3_table_entry_fc1 { unsigned long rfaa : 33; /* Region-Frame Absolute Address */ unsigned long : 14; unsigned long av : 1; /* ACCF-Validity Control */ unsigned long acc: 4; /* Access-Control Bits */ unsigned long f : 1; /* Fetch-Protection Bit */ unsigned long fc : 1; /* Format-Control */ unsigned long p : 1; /* DAT-Protection Bit */ unsigned long co : 1; /* Change-Recording Override */ unsigned long : 2; unsigned long i : 1; /* Region-Invalid Bit */ unsigned long cr : 1; /* Common-Region Bit */ unsigned long tt : 2; /* Table-Type Bits */ unsigned long : 2; }; union region3_table_entry { unsigned long val; struct region3_table_entry_fc0 fc0; struct region3_table_entry_fc1 fc1; struct { unsigned long : 53; unsigned long fc : 1; /* Format-Control */ unsigned long : 4; unsigned long i : 1; /* Region-Invalid Bit */ unsigned long cr : 1; /* Common-Region Bit */ unsigned long tt : 2; /* Table-Type Bits */ unsigned long : 2; }; }; struct segment_entry_fc0 { unsigned long pto: 53;/* Page-Table Origin */ unsigned long fc : 1; /* Format-Control */ unsigned long p : 1; /* DAT-Protection Bit */ unsigned long : 3; unsigned long i : 1; /* Segment-Invalid Bit */ unsigned long cs : 1; /* Common-Segment Bit */ unsigned long tt : 2; /* Table-Type Bits */ unsigned long : 2; }; struct segment_entry_fc1 { unsigned long sfaa : 44; /* Segment-Frame Absolute Address */ unsigned long : 3; unsigned long av : 1; /* ACCF-Validity Control */ unsigned long acc: 4; /* Access-Control Bits */ unsigned long f : 1; /* Fetch-Protection Bit */ unsigned long fc : 1; /* Format-Control */ unsigned long p : 1; /* DAT-Protection Bit */ unsigned long co : 1; /* Change-Recording Override */ unsigned long : 2; unsigned long i : 1; /* Segment-Invalid Bit */ unsigned long cs : 1; /* Common-Segment Bit */ unsigned long tt : 2; /* Table-Type Bits */ unsigned long : 2; }; union segment_table_entry { unsigned long val; struct segment_entry_fc0 fc0; struct segment_entry_fc1 fc1; struct { unsigned long : 53; unsigned long fc : 1; /* Format-Control */ unsigned long : 4; unsigned long i : 1; /* Segment-Invalid Bit */ unsigned long cs : 1; /* Common-Segment Bit */ unsigned long tt : 2; /* Table-Type Bits */ unsigned long : 2; }; }; enum { TABLE_TYPE_SEGMENT = 0, TABLE_TYPE_REGION3 = 1, TABLE_TYPE_REGION2 = 2, TABLE_TYPE_REGION1 = 3 }; union page_table_entry { unsigned long val; struct { unsigned long pfra : 52; /* Page-Frame Real Address */ unsigned long z : 1; /* Zero Bit */ unsigned long i : 1; /* Page-Invalid Bit */ unsigned long p : 1; /* DAT-Protection Bit */ unsigned long co : 1; /* Change-Recording Override */ unsigned long : 8; }; }; /* * vaddress union in order to easily decode a virtual address into its * region first index, region second index etc. parts. */ union vaddress { unsigned long addr; struct { unsigned long rfx : 11; unsigned long rsx : 11; unsigned long rtx : 11; unsigned long sx : 11; unsigned long px : 8; unsigned long bx : 12; }; struct { unsigned long rfx01 : 2; unsigned long : 9; unsigned long rsx01 : 2; unsigned long : 9; unsigned long rtx01 : 2; unsigned long : 9; unsigned long sx01 : 2; unsigned long : 29; }; }; /* * raddress union which will contain the result (real or absolute address) * after a page table walk. The rfaa, sfaa and pfra members are used to * simply assign them the value of a region, segment or page table entry. */ union raddress { unsigned long addr; unsigned long rfaa : 33; /* Region-Frame Absolute Address */ unsigned long sfaa : 44; /* Segment-Frame Absolute Address */ unsigned long pfra : 52; /* Page-Frame Real Address */ }; union alet { u32 val; struct { u32 reserved : 7; u32 p : 1; u32 alesn : 8; u32 alen : 16; }; }; union ald { u32 val; struct { u32 : 1; u32 alo : 24; u32 all : 7; }; }; struct ale { unsigned long i : 1; /* ALEN-Invalid Bit */ unsigned long : 5; unsigned long fo : 1; /* Fetch-Only Bit */ unsigned long p : 1; /* Private Bit */ unsigned long alesn : 8; /* Access-List-Entry Sequence Number */ unsigned long aleax : 16; /* Access-List-Entry Authorization Index */ unsigned long : 32; unsigned long : 1; unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */ unsigned long : 6; unsigned long astesn : 32; /* ASTE Sequence Number */ } __packed; struct aste { unsigned long i : 1; /* ASX-Invalid Bit */ unsigned long ato : 29; /* Authority-Table Origin */ unsigned long : 1; unsigned long b : 1; /* Base-Space Bit */ unsigned long ax : 16; /* Authorization Index */ unsigned long atl : 12; /* Authority-Table Length */ unsigned long : 2; unsigned long ca : 1; /* Controlled-ASN Bit */ unsigned long ra : 1; /* Reusable-ASN Bit */ unsigned long asce : 64; /* Address-Space-Control Element */ unsigned long ald : 32; unsigned long astesn : 32; /* .. more fields there */ } __packed; int ipte_lock_held(struct kvm_vcpu *vcpu) { union ipte_control *ic = &vcpu->kvm->arch.sca->ipte_control; if (vcpu->arch.sie_block->eca & 1) return ic->kh != 0; return vcpu->kvm->arch.ipte_lock_count != 0; } static void ipte_lock_simple(struct kvm_vcpu *vcpu) { union ipte_control old, new, *ic; mutex_lock(&vcpu->kvm->arch.ipte_mutex); vcpu->kvm->arch.ipte_lock_count++; if (vcpu->kvm->arch.ipte_lock_count > 1) goto out; ic = &vcpu->kvm->arch.sca->ipte_control; do { old = READ_ONCE(*ic); while (old.k) { cond_resched(); old = READ_ONCE(*ic); } new = old; new.k = 1; } while (cmpxchg(&ic->val, old.val, new.val) != old.val); out: mutex_unlock(&vcpu->kvm->arch.ipte_mutex); } static void ipte_unlock_simple(struct kvm_vcpu *vcpu) { union ipte_control old, new, *ic; mutex_lock(&vcpu->kvm->arch.ipte_mutex); vcpu->kvm->arch.ipte_lock_count--; if (vcpu->kvm->arch.ipte_lock_count) goto out; ic = &vcpu->kvm->arch.sca->ipte_control; do { old = READ_ONCE(*ic); new = old; new.k = 0; } while (cmpxchg(&ic->val, old.val, new.val) != old.val); wake_up(&vcpu->kvm->arch.ipte_wq); out: mutex_unlock(&vcpu->kvm->arch.ipte_mutex); } static void ipte_lock_siif(struct kvm_vcpu *vcpu) { union ipte_control old, new, *ic; ic = &vcpu->kvm->arch.sca->ipte_control; do { old = READ_ONCE(*ic); while (old.kg) { cond_resched(); old = READ_ONCE(*ic); } new = old; new.k = 1; new.kh++; } while (cmpxchg(&ic->val, old.val, new.val) != old.val); } static void ipte_unlock_siif(struct kvm_vcpu *vcpu) { union ipte_control old, new, *ic; ic = &vcpu->kvm->arch.sca->ipte_control; do { old = READ_ONCE(*ic); new = old; new.kh--; if (!new.kh) new.k = 0; } while (cmpxchg(&ic->val, old.val, new.val) != old.val); if (!new.kh) wake_up(&vcpu->kvm->arch.ipte_wq); } void ipte_lock(struct kvm_vcpu *vcpu) { if (vcpu->arch.sie_block->eca & 1) ipte_lock_siif(vcpu); else ipte_lock_simple(vcpu); } void ipte_unlock(struct kvm_vcpu *vcpu) { if (vcpu->arch.sie_block->eca & 1) ipte_unlock_siif(vcpu); else ipte_unlock_simple(vcpu); } static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, int write) { union alet alet; struct ale ale; struct aste aste; unsigned long ald_addr, authority_table_addr; union ald ald; int eax, rc; u8 authority_table; if (ar >= NUM_ACRS) return -EINVAL; save_access_regs(vcpu->run->s.regs.acrs); alet.val = vcpu->run->s.regs.acrs[ar]; if (ar == 0 || alet.val == 0) { asce->val = vcpu->arch.sie_block->gcr[1]; return 0; } else if (alet.val == 1) { asce->val = vcpu->arch.sie_block->gcr[7]; return 0; } if (alet.reserved) return PGM_ALET_SPECIFICATION; if (alet.p) ald_addr = vcpu->arch.sie_block->gcr[5]; else ald_addr = vcpu->arch.sie_block->gcr[2]; ald_addr &= 0x7fffffc0; rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald)); if (rc) return rc; if (alet.alen / 8 > ald.all) return PGM_ALEN_TRANSLATION; if (0x7fffffff - ald.alo * 128 < alet.alen * 16) return PGM_ADDRESSING; rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale, sizeof(struct ale)); if (rc) return rc; if (ale.i == 1) return PGM_ALEN_TRANSLATION; if (ale.alesn != alet.alesn) return PGM_ALE_SEQUENCE; rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste)); if (rc) return rc; if (aste.i) return PGM_ASTE_VALIDITY; if (aste.astesn != ale.astesn) return PGM_ASTE_SEQUENCE; if (ale.p == 1) { eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff; if (ale.aleax != eax) { if (eax / 16 > aste.atl) return PGM_EXTENDED_AUTHORITY; authority_table_addr = aste.ato * 4 + eax / 4; rc = read_guest_real(vcpu, authority_table_addr, &authority_table, sizeof(u8)); if (rc) return rc; if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0) return PGM_EXTENDED_AUTHORITY; } } if (ale.fo == 1 && write) return PGM_PROTECTION; asce->val = aste.asce; return 0; } struct trans_exc_code_bits { unsigned long addr : 52; /* Translation-exception Address */ unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */ unsigned long : 6; unsigned long b60 : 1; unsigned long b61 : 1; unsigned long as : 2; /* ASCE Identifier */ }; enum { FSI_UNKNOWN = 0, /* Unknown wether fetch or store */ FSI_STORE = 1, /* Exception was due to store operation */ FSI_FETCH = 2 /* Exception was due to fetch operation */ }; static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, int write) { int rc; psw_t *psw = &vcpu->arch.sie_block->gpsw; struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; struct trans_exc_code_bits *tec_bits; memset(pgm, 0, sizeof(*pgm)); tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; tec_bits->as = psw_bits(*psw).as; if (!psw_bits(*psw).t) { asce->val = 0; asce->r = 1; return 0; } switch (psw_bits(vcpu->arch.sie_block->gpsw).as) { case PSW_AS_PRIMARY: asce->val = vcpu->arch.sie_block->gcr[1]; return 0; case PSW_AS_SECONDARY: asce->val = vcpu->arch.sie_block->gcr[7]; return 0; case PSW_AS_HOME: asce->val = vcpu->arch.sie_block->gcr[13]; return 0; case PSW_AS_ACCREG: rc = ar_translation(vcpu, asce, ar, write); switch (rc) { case PGM_ALEN_TRANSLATION: case PGM_ALE_SEQUENCE: case PGM_ASTE_VALIDITY: case PGM_ASTE_SEQUENCE: case PGM_EXTENDED_AUTHORITY: vcpu->arch.pgm.exc_access_id = ar; break; case PGM_PROTECTION: tec_bits->b60 = 1; tec_bits->b61 = 1; break; } if (rc > 0) pgm->code = rc; return rc; } return 0; } static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) { return kvm_read_guest(kvm, gpa, val, sizeof(*val)); } /** * guest_translate - translate a guest virtual into a guest absolute address * @vcpu: virtual cpu * @gva: guest virtual address * @gpa: points to where guest physical (absolute) address should be stored * @asce: effective asce * @write: indicates if access is a write access * * Translate a guest virtual address into a guest absolute address by means * of dynamic address translation as specified by the architecture. * If the resulting absolute address is not available in the configuration * an addressing exception is indicated and @gpa will not be changed. * * Returns: - zero on success; @gpa contains the resulting absolute address * - a negative value if guest access failed due to e.g. broken * guest mapping * - a positve value if an access exception happened. In this case * the returned value is the program interruption code as defined * by the architecture */ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, unsigned long *gpa, const union asce asce, int write) { union vaddress vaddr = {.addr = gva}; union raddress raddr = {.addr = gva}; union page_table_entry pte; int dat_protection = 0; union ctlreg0 ctlreg0; unsigned long ptr; int edat1, edat2; ctlreg0.val = vcpu->arch.sie_block->gcr[0]; edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8); edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78); if (asce.r) goto real_address; ptr = asce.origin * 4096; switch (asce.dt) { case ASCE_TYPE_REGION1: if (vaddr.rfx01 > asce.tl) return PGM_REGION_FIRST_TRANS; ptr += vaddr.rfx * 8; break; case ASCE_TYPE_REGION2: if (vaddr.rfx) return PGM_ASCE_TYPE; if (vaddr.rsx01 > asce.tl) return PGM_REGION_SECOND_TRANS; ptr += vaddr.rsx * 8; break; case ASCE_TYPE_REGION3: if (vaddr.rfx || vaddr.rsx) return PGM_ASCE_TYPE; if (vaddr.rtx01 > asce.tl) return PGM_REGION_THIRD_TRANS; ptr += vaddr.rtx * 8; break; case ASCE_TYPE_SEGMENT: if (vaddr.rfx || vaddr.rsx || vaddr.rtx) return PGM_ASCE_TYPE; if (vaddr.sx01 > asce.tl) return PGM_SEGMENT_TRANSLATION; ptr += vaddr.sx * 8; break; } switch (asce.dt) { case ASCE_TYPE_REGION1: { union region1_table_entry rfte; if (kvm_is_error_gpa(vcpu->kvm, ptr)) return PGM_ADDRESSING; if (deref_table(vcpu->kvm, ptr, &rfte.val)) return -EFAULT; if (rfte.i) return PGM_REGION_FIRST_TRANS; if (rfte.tt != TABLE_TYPE_REGION1) return PGM_TRANSLATION_SPEC; if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl) return PGM_REGION_SECOND_TRANS; if (edat1) dat_protection |= rfte.p; ptr = rfte.rto * 4096 + vaddr.rsx * 8; } /* fallthrough */ case ASCE_TYPE_REGION2: { union region2_table_entry rste; if (kvm_is_error_gpa(vcpu->kvm, ptr)) return PGM_ADDRESSING; if (deref_table(vcpu->kvm, ptr, &rste.val)) return -EFAULT; if (rste.i) return PGM_REGION_SECOND_TRANS; if (rste.tt != TABLE_TYPE_REGION2) return PGM_TRANSLATION_SPEC; if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl) return PGM_REGION_THIRD_TRANS; if (edat1) dat_protection |= rste.p; ptr = rste.rto * 4096 + vaddr.rtx * 8; } /* fallthrough */ case ASCE_TYPE_REGION3: { union region3_table_entry rtte; if (kvm_is_error_gpa(vcpu->kvm, ptr)) return PGM_ADDRESSING; if (deref_table(vcpu->kvm, ptr, &rtte.val)) return -EFAULT; if (rtte.i) return PGM_REGION_THIRD_TRANS; if (rtte.tt != TABLE_TYPE_REGION3) return PGM_TRANSLATION_SPEC; if (rtte.cr && asce.p && edat2) return PGM_TRANSLATION_SPEC; if (rtte.fc && edat2) { dat_protection |= rtte.fc1.p; raddr.rfaa = rtte.fc1.rfaa; goto absolute_address; } if (vaddr.sx01 < rtte.fc0.tf) return PGM_SEGMENT_TRANSLATION; if (vaddr.sx01 > rtte.fc0.tl) return PGM_SEGMENT_TRANSLATION; if (edat1) dat_protection |= rtte.fc0.p; ptr = rtte.fc0.sto * 4096 + vaddr.sx * 8; } /* fallthrough */ case ASCE_TYPE_SEGMENT: { union segment_table_entry ste; if (kvm_is_error_gpa(vcpu->kvm, ptr)) return PGM_ADDRESSING; if (deref_table(vcpu->kvm, ptr, &ste.val)) return -EFAULT; if (ste.i) return PGM_SEGMENT_TRANSLATION; if (ste.tt != TABLE_TYPE_SEGMENT) return PGM_TRANSLATION_SPEC; if (ste.cs && asce.p) return PGM_TRANSLATION_SPEC; if (ste.fc && edat1) { dat_protection |= ste.fc1.p; raddr.sfaa = ste.fc1.sfaa; goto absolute_address; } dat_protection |= ste.fc0.p; ptr = ste.fc0.pto * 2048 + vaddr.px * 8; } } if (kvm_is_error_gpa(vcpu->kvm, ptr)) return PGM_ADDRESSING; if (deref_table(vcpu->kvm, ptr, &pte.val)) return -EFAULT; if (pte.i) return PGM_PAGE_TRANSLATION; if (pte.z) return PGM_TRANSLATION_SPEC; if (pte.co && !edat1) return PGM_TRANSLATION_SPEC; dat_protection |= pte.p; raddr.pfra = pte.pfra; real_address: raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr); absolute_address: if (write && dat_protection) return PGM_PROTECTION; if (kvm_is_error_gpa(vcpu->kvm, raddr.addr)) return PGM_ADDRESSING; *gpa = raddr.addr; return 0; } static inline int is_low_address(unsigned long ga) { /* Check for address ranges 0..511 and 4096..4607 */ return (ga & ~0x11fful) == 0; } static int low_address_protection_enabled(struct kvm_vcpu *vcpu, const union asce asce) { union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; psw_t *psw = &vcpu->arch.sie_block->gpsw; if (!ctlreg0.lap) return 0; if (psw_bits(*psw).t && asce.p) return 0; return 1; } static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, unsigned long *pages, unsigned long nr_pages, const union asce asce, int write) { struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; psw_t *psw = &vcpu->arch.sie_block->gpsw; struct trans_exc_code_bits *tec_bits; int lap_enabled, rc; tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; lap_enabled = low_address_protection_enabled(vcpu, asce); while (nr_pages) { ga = kvm_s390_logical_to_effective(vcpu, ga); tec_bits->addr = ga >> PAGE_SHIFT; if (write && lap_enabled && is_low_address(ga)) { pgm->code = PGM_PROTECTION; return pgm->code; } ga &= PAGE_MASK; if (psw_bits(*psw).t) { rc = guest_translate(vcpu, ga, pages, asce, write); if (rc < 0) return rc; if (rc == PGM_PROTECTION) tec_bits->b61 = 1; if (rc) pgm->code = rc; } else { *pages = kvm_s390_real_to_abs(vcpu, ga); if (kvm_is_error_gpa(vcpu->kvm, *pages)) pgm->code = PGM_ADDRESSING; } if (pgm->code) return pgm->code; ga += PAGE_SIZE; pages++; nr_pages--; } return 0; } int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, unsigned long len, int write) { psw_t *psw = &vcpu->arch.sie_block->gpsw; unsigned long _len, nr_pages, gpa, idx; unsigned long pages_array[2]; unsigned long *pages; int need_ipte_lock; union asce asce; int rc; if (!len) return 0; rc = get_vcpu_asce(vcpu, &asce, ar, write); if (rc) return rc; nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; pages = pages_array; if (nr_pages > ARRAY_SIZE(pages_array)) pages = vmalloc(nr_pages * sizeof(unsigned long)); if (!pages) return -ENOMEM; need_ipte_lock = psw_bits(*psw).t && !asce.r; if (need_ipte_lock) ipte_lock(vcpu); rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write); for (idx = 0; idx < nr_pages && !rc; idx++) { gpa = *(pages + idx) + (ga & ~PAGE_MASK); _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); if (write) rc = kvm_write_guest(vcpu->kvm, gpa, data, _len); else rc = kvm_read_guest(vcpu->kvm, gpa, data, _len); len -= _len; ga += _len; data += _len; } if (need_ipte_lock) ipte_unlock(vcpu); if (nr_pages > ARRAY_SIZE(pages_array)) vfree(pages); return rc; } int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data, unsigned long len, int write) { unsigned long _len, gpa; int rc = 0; while (len && !rc) { gpa = kvm_s390_real_to_abs(vcpu, gra); _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); if (write) rc = write_guest_abs(vcpu, gpa, data, _len); else rc = read_guest_abs(vcpu, gpa, data, _len); len -= _len; gra += _len; data += _len; } return rc; } /** * guest_translate_address - translate guest logical into guest absolute address * * Parameter semantics are the same as the ones from guest_translate. * The memory contents at the guest address are not changed. * * Note: The IPTE lock is not taken during this function, so the caller * has to take care of this. */ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, unsigned long *gpa, int write) { struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; psw_t *psw = &vcpu->arch.sie_block->gpsw; struct trans_exc_code_bits *tec; union asce asce; int rc; gva = kvm_s390_logical_to_effective(vcpu, gva); tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code; rc = get_vcpu_asce(vcpu, &asce, ar, write); tec->addr = gva >> PAGE_SHIFT; if (rc) return rc; if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) { if (write) { rc = pgm->code = PGM_PROTECTION; return rc; } } if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */ rc = guest_translate(vcpu, gva, gpa, asce, write); if (rc > 0) { if (rc == PGM_PROTECTION) tec->b61 = 1; pgm->code = rc; } } else { rc = 0; *gpa = kvm_s390_real_to_abs(vcpu, gva); if (kvm_is_error_gpa(vcpu->kvm, *gpa)) rc = pgm->code = PGM_ADDRESSING; } return rc; } /** * check_gva_range - test a range of guest virtual addresses for accessibility */ int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, unsigned long length, int is_write) { unsigned long gpa; unsigned long currlen; int rc = 0; ipte_lock(vcpu); while (length > 0 && !rc) { currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE)); rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write); gva += currlen; length -= currlen; } ipte_unlock(vcpu); return rc; } /** * kvm_s390_check_low_addr_prot_real - check for low-address protection * @gra: Guest real address * * Checks whether an address is subject to low-address protection and set * up vcpu->arch.pgm accordingly if necessary. * * Return: 0 if no protection exception, or PGM_PROTECTION if protected. */ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra) { struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; psw_t *psw = &vcpu->arch.sie_block->gpsw; struct trans_exc_code_bits *tec_bits; union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; if (!ctlreg0.lap || !is_low_address(gra)) return 0; memset(pgm, 0, sizeof(*pgm)); tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; tec_bits->fsi = FSI_STORE; tec_bits->as = psw_bits(*psw).as; tec_bits->addr = gra >> PAGE_SHIFT; pgm->code = PGM_PROTECTION; return pgm->code; }
gpl-2.0
CM-zenfone2/android_kernel_asus_moorefield
drivers/tty/serial/sunzilog.c
2045
41442
/* sunzilog.c: Zilog serial driver for Sparc systems. * * Driver for Zilog serial chips found on Sun workstations and * servers. This driver could actually be made more generic. * * This is based on the old drivers/sbus/char/zs.c code. A lot * of code has been simply moved over directly from there but * much has been rewritten. Credits therefore go out to Eddie * C. Dost, Pete Zaitcev, Ted Ts'o and Alex Buell for their * work there. * * Copyright (C) 2002, 2006, 2007 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/major.h> #include <linux/string.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/circ_buf.h> #include <linux/serial.h> #include <linux/sysrq.h> #include <linux/console.h> #include <linux/spinlock.h> #ifdef CONFIG_SERIO #include <linux/serio.h> #endif #include <linux/init.h> #include <linux/of_device.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/prom.h> #include <asm/setup.h> #if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/serial_core.h> #include <linux/sunserialcore.h> #include "sunzilog.h" /* On 32-bit sparcs we need to delay after register accesses * to accommodate sun4 systems, but we do not need to flush writes. * On 64-bit sparc we only need to flush single writes to ensure * completion. */ #ifndef CONFIG_SPARC64 #define ZSDELAY() udelay(5) #define ZSDELAY_LONG() udelay(20) #define ZS_WSYNC(channel) do { } while (0) #else #define ZSDELAY() #define ZSDELAY_LONG() #define ZS_WSYNC(__channel) \ readb(&((__channel)->control)) #endif #define ZS_CLOCK 4915200 /* Zilog input clock rate. */ #define ZS_CLOCK_DIVISOR 16 /* Divisor this driver uses. */ /* * We wrap our port structure around the generic uart_port. */ struct uart_sunzilog_port { struct uart_port port; /* IRQ servicing chain. */ struct uart_sunzilog_port *next; /* Current values of Zilog write registers. */ unsigned char curregs[NUM_ZSREGS]; unsigned int flags; #define SUNZILOG_FLAG_CONS_KEYB 0x00000001 #define SUNZILOG_FLAG_CONS_MOUSE 0x00000002 #define SUNZILOG_FLAG_IS_CONS 0x00000004 #define SUNZILOG_FLAG_IS_KGDB 0x00000008 #define SUNZILOG_FLAG_MODEM_STATUS 0x00000010 #define SUNZILOG_FLAG_IS_CHANNEL_A 0x00000020 #define SUNZILOG_FLAG_REGS_HELD 0x00000040 #define SUNZILOG_FLAG_TX_STOPPED 0x00000080 #define SUNZILOG_FLAG_TX_ACTIVE 0x00000100 #define SUNZILOG_FLAG_ESCC 0x00000200 #define SUNZILOG_FLAG_ISR_HANDLER 0x00000400 unsigned int cflag; unsigned char parity_mask; unsigned char prev_status; #ifdef CONFIG_SERIO struct serio serio; int serio_open; #endif }; static void sunzilog_putchar(struct uart_port *port, int ch); #define ZILOG_CHANNEL_FROM_PORT(PORT) ((struct zilog_channel __iomem *)((PORT)->membase)) #define UART_ZILOG(PORT) ((struct uart_sunzilog_port *)(PORT)) #define ZS_IS_KEYB(UP) ((UP)->flags & SUNZILOG_FLAG_CONS_KEYB) #define ZS_IS_MOUSE(UP) ((UP)->flags & SUNZILOG_FLAG_CONS_MOUSE) #define ZS_IS_CONS(UP) ((UP)->flags & SUNZILOG_FLAG_IS_CONS) #define ZS_IS_KGDB(UP) ((UP)->flags & SUNZILOG_FLAG_IS_KGDB) #define ZS_WANTS_MODEM_STATUS(UP) ((UP)->flags & SUNZILOG_FLAG_MODEM_STATUS) #define ZS_IS_CHANNEL_A(UP) ((UP)->flags & SUNZILOG_FLAG_IS_CHANNEL_A) #define ZS_REGS_HELD(UP) ((UP)->flags & SUNZILOG_FLAG_REGS_HELD) #define ZS_TX_STOPPED(UP) ((UP)->flags & SUNZILOG_FLAG_TX_STOPPED) #define ZS_TX_ACTIVE(UP) ((UP)->flags & SUNZILOG_FLAG_TX_ACTIVE) /* Reading and writing Zilog8530 registers. The delays are to make this * driver work on the Sun4 which needs a settling delay after each chip * register access, other machines handle this in hardware via auxiliary * flip-flops which implement the settle time we do in software. * * The port lock must be held and local IRQs must be disabled * when {read,write}_zsreg is invoked. */ static unsigned char read_zsreg(struct zilog_channel __iomem *channel, unsigned char reg) { unsigned char retval; writeb(reg, &channel->control); ZSDELAY(); retval = readb(&channel->control); ZSDELAY(); return retval; } static void write_zsreg(struct zilog_channel __iomem *channel, unsigned char reg, unsigned char value) { writeb(reg, &channel->control); ZSDELAY(); writeb(value, &channel->control); ZSDELAY(); } static void sunzilog_clear_fifo(struct zilog_channel __iomem *channel) { int i; for (i = 0; i < 32; i++) { unsigned char regval; regval = readb(&channel->control); ZSDELAY(); if (regval & Rx_CH_AV) break; regval = read_zsreg(channel, R1); readb(&channel->data); ZSDELAY(); if (regval & (PAR_ERR | Rx_OVR | CRC_ERR)) { writeb(ERR_RES, &channel->control); ZSDELAY(); ZS_WSYNC(channel); } } } /* This function must only be called when the TX is not busy. The UART * port lock must be held and local interrupts disabled. */ static int __load_zsregs(struct zilog_channel __iomem *channel, unsigned char *regs) { int i; int escc; unsigned char r15; /* Let pending transmits finish. */ for (i = 0; i < 1000; i++) { unsigned char stat = read_zsreg(channel, R1); if (stat & ALL_SNT) break; udelay(100); } writeb(ERR_RES, &channel->control); ZSDELAY(); ZS_WSYNC(channel); sunzilog_clear_fifo(channel); /* Disable all interrupts. */ write_zsreg(channel, R1, regs[R1] & ~(RxINT_MASK | TxINT_ENAB | EXT_INT_ENAB)); /* Set parity, sync config, stop bits, and clock divisor. */ write_zsreg(channel, R4, regs[R4]); /* Set misc. TX/RX control bits. */ write_zsreg(channel, R10, regs[R10]); /* Set TX/RX controls sans the enable bits. */ write_zsreg(channel, R3, regs[R3] & ~RxENAB); write_zsreg(channel, R5, regs[R5] & ~TxENAB); /* Synchronous mode config. */ write_zsreg(channel, R6, regs[R6]); write_zsreg(channel, R7, regs[R7]); /* Don't mess with the interrupt vector (R2, unused by us) and * master interrupt control (R9). We make sure this is setup * properly at probe time then never touch it again. */ /* Disable baud generator. */ write_zsreg(channel, R14, regs[R14] & ~BRENAB); /* Clock mode control. */ write_zsreg(channel, R11, regs[R11]); /* Lower and upper byte of baud rate generator divisor. */ write_zsreg(channel, R12, regs[R12]); write_zsreg(channel, R13, regs[R13]); /* Now rewrite R14, with BRENAB (if set). */ write_zsreg(channel, R14, regs[R14]); /* External status interrupt control. */ write_zsreg(channel, R15, (regs[R15] | WR7pEN) & ~FIFOEN); /* ESCC Extension Register */ r15 = read_zsreg(channel, R15); if (r15 & 0x01) { write_zsreg(channel, R7, regs[R7p]); /* External status interrupt and FIFO control. */ write_zsreg(channel, R15, regs[R15] & ~WR7pEN); escc = 1; } else { /* Clear FIFO bit case it is an issue */ regs[R15] &= ~FIFOEN; escc = 0; } /* Reset external status interrupts. */ write_zsreg(channel, R0, RES_EXT_INT); /* First Latch */ write_zsreg(channel, R0, RES_EXT_INT); /* Second Latch */ /* Rewrite R3/R5, this time without enables masked. */ write_zsreg(channel, R3, regs[R3]); write_zsreg(channel, R5, regs[R5]); /* Rewrite R1, this time without IRQ enabled masked. */ write_zsreg(channel, R1, regs[R1]); return escc; } /* Reprogram the Zilog channel HW registers with the copies found in the * software state struct. If the transmitter is busy, we defer this update * until the next TX complete interrupt. Else, we do it right now. * * The UART port lock must be held and local interrupts disabled. */ static void sunzilog_maybe_update_regs(struct uart_sunzilog_port *up, struct zilog_channel __iomem *channel) { if (!ZS_REGS_HELD(up)) { if (ZS_TX_ACTIVE(up)) { up->flags |= SUNZILOG_FLAG_REGS_HELD; } else { __load_zsregs(channel, up->curregs); } } } static void sunzilog_change_mouse_baud(struct uart_sunzilog_port *up) { unsigned int cur_cflag = up->cflag; int brg, new_baud; up->cflag &= ~CBAUD; up->cflag |= suncore_mouse_baud_cflag_next(cur_cflag, &new_baud); brg = BPS_TO_BRG(new_baud, ZS_CLOCK / ZS_CLOCK_DIVISOR); up->curregs[R12] = (brg & 0xff); up->curregs[R13] = (brg >> 8) & 0xff; sunzilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(&up->port)); } static void sunzilog_kbdms_receive_chars(struct uart_sunzilog_port *up, unsigned char ch, int is_break) { if (ZS_IS_KEYB(up)) { /* Stop-A is handled by drivers/char/keyboard.c now. */ #ifdef CONFIG_SERIO if (up->serio_open) serio_interrupt(&up->serio, ch, 0); #endif } else if (ZS_IS_MOUSE(up)) { int ret = suncore_mouse_baud_detection(ch, is_break); switch (ret) { case 2: sunzilog_change_mouse_baud(up); /* fallthru */ case 1: break; case 0: #ifdef CONFIG_SERIO if (up->serio_open) serio_interrupt(&up->serio, ch, 0); #endif break; }; } } static struct tty_port * sunzilog_receive_chars(struct uart_sunzilog_port *up, struct zilog_channel __iomem *channel) { struct tty_port *port = NULL; unsigned char ch, r1, flag; if (up->port.state != NULL) /* Unopened serial console */ port = &up->port.state->port; for (;;) { r1 = read_zsreg(channel, R1); if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) { writeb(ERR_RES, &channel->control); ZSDELAY(); ZS_WSYNC(channel); } ch = readb(&channel->control); ZSDELAY(); /* This funny hack depends upon BRK_ABRT not interfering * with the other bits we care about in R1. */ if (ch & BRK_ABRT) r1 |= BRK_ABRT; if (!(ch & Rx_CH_AV)) break; ch = readb(&channel->data); ZSDELAY(); ch &= up->parity_mask; if (unlikely(ZS_IS_KEYB(up)) || unlikely(ZS_IS_MOUSE(up))) { sunzilog_kbdms_receive_chars(up, ch, 0); continue; } /* A real serial line, record the character and status. */ flag = TTY_NORMAL; up->port.icount.rx++; if (r1 & (BRK_ABRT | PAR_ERR | Rx_OVR | CRC_ERR)) { if (r1 & BRK_ABRT) { r1 &= ~(PAR_ERR | CRC_ERR); up->port.icount.brk++; if (uart_handle_break(&up->port)) continue; } else if (r1 & PAR_ERR) up->port.icount.parity++; else if (r1 & CRC_ERR) up->port.icount.frame++; if (r1 & Rx_OVR) up->port.icount.overrun++; r1 &= up->port.read_status_mask; if (r1 & BRK_ABRT) flag = TTY_BREAK; else if (r1 & PAR_ERR) flag = TTY_PARITY; else if (r1 & CRC_ERR) flag = TTY_FRAME; } if (uart_handle_sysrq_char(&up->port, ch) || !port) continue; if (up->port.ignore_status_mask == 0xff || (r1 & up->port.ignore_status_mask) == 0) { tty_insert_flip_char(port, ch, flag); } if (r1 & Rx_OVR) tty_insert_flip_char(port, 0, TTY_OVERRUN); } return port; } static void sunzilog_status_handle(struct uart_sunzilog_port *up, struct zilog_channel __iomem *channel) { unsigned char status; status = readb(&channel->control); ZSDELAY(); writeb(RES_EXT_INT, &channel->control); ZSDELAY(); ZS_WSYNC(channel); if (status & BRK_ABRT) { if (ZS_IS_MOUSE(up)) sunzilog_kbdms_receive_chars(up, 0, 1); if (ZS_IS_CONS(up)) { /* Wait for BREAK to deassert to avoid potentially * confusing the PROM. */ while (1) { status = readb(&channel->control); ZSDELAY(); if (!(status & BRK_ABRT)) break; } sun_do_break(); return; } } if (ZS_WANTS_MODEM_STATUS(up)) { if (status & SYNC) up->port.icount.dsr++; /* The Zilog just gives us an interrupt when DCD/CTS/etc. change. * But it does not tell us which bit has changed, we have to keep * track of this ourselves. */ if ((status ^ up->prev_status) ^ DCD) uart_handle_dcd_change(&up->port, (status & DCD)); if ((status ^ up->prev_status) ^ CTS) uart_handle_cts_change(&up->port, (status & CTS)); wake_up_interruptible(&up->port.state->port.delta_msr_wait); } up->prev_status = status; } static void sunzilog_transmit_chars(struct uart_sunzilog_port *up, struct zilog_channel __iomem *channel) { struct circ_buf *xmit; if (ZS_IS_CONS(up)) { unsigned char status = readb(&channel->control); ZSDELAY(); /* TX still busy? Just wait for the next TX done interrupt. * * It can occur because of how we do serial console writes. It would * be nice to transmit console writes just like we normally would for * a TTY line. (ie. buffered and TX interrupt driven). That is not * easy because console writes cannot sleep. One solution might be * to poll on enough port->xmit space becoming free. -DaveM */ if (!(status & Tx_BUF_EMP)) return; } up->flags &= ~SUNZILOG_FLAG_TX_ACTIVE; if (ZS_REGS_HELD(up)) { __load_zsregs(channel, up->curregs); up->flags &= ~SUNZILOG_FLAG_REGS_HELD; } if (ZS_TX_STOPPED(up)) { up->flags &= ~SUNZILOG_FLAG_TX_STOPPED; goto ack_tx_int; } if (up->port.x_char) { up->flags |= SUNZILOG_FLAG_TX_ACTIVE; writeb(up->port.x_char, &channel->data); ZSDELAY(); ZS_WSYNC(channel); up->port.icount.tx++; up->port.x_char = 0; return; } if (up->port.state == NULL) goto ack_tx_int; xmit = &up->port.state->xmit; if (uart_circ_empty(xmit)) goto ack_tx_int; if (uart_tx_stopped(&up->port)) goto ack_tx_int; up->flags |= SUNZILOG_FLAG_TX_ACTIVE; writeb(xmit->buf[xmit->tail], &channel->data); ZSDELAY(); ZS_WSYNC(channel); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); up->port.icount.tx++; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); return; ack_tx_int: writeb(RES_Tx_P, &channel->control); ZSDELAY(); ZS_WSYNC(channel); } static irqreturn_t sunzilog_interrupt(int irq, void *dev_id) { struct uart_sunzilog_port *up = dev_id; while (up) { struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(&up->port); struct tty_port *port; unsigned char r3; spin_lock(&up->port.lock); r3 = read_zsreg(channel, R3); /* Channel A */ port = NULL; if (r3 & (CHAEXT | CHATxIP | CHARxIP)) { writeb(RES_H_IUS, &channel->control); ZSDELAY(); ZS_WSYNC(channel); if (r3 & CHARxIP) port = sunzilog_receive_chars(up, channel); if (r3 & CHAEXT) sunzilog_status_handle(up, channel); if (r3 & CHATxIP) sunzilog_transmit_chars(up, channel); } spin_unlock(&up->port.lock); if (port) tty_flip_buffer_push(port); /* Channel B */ up = up->next; channel = ZILOG_CHANNEL_FROM_PORT(&up->port); spin_lock(&up->port.lock); port = NULL; if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) { writeb(RES_H_IUS, &channel->control); ZSDELAY(); ZS_WSYNC(channel); if (r3 & CHBRxIP) port = sunzilog_receive_chars(up, channel); if (r3 & CHBEXT) sunzilog_status_handle(up, channel); if (r3 & CHBTxIP) sunzilog_transmit_chars(up, channel); } spin_unlock(&up->port.lock); if (port) tty_flip_buffer_push(port); up = up->next; } return IRQ_HANDLED; } /* A convenient way to quickly get R0 status. The caller must _not_ hold the * port lock, it is acquired here. */ static __inline__ unsigned char sunzilog_read_channel_status(struct uart_port *port) { struct zilog_channel __iomem *channel; unsigned char status; channel = ZILOG_CHANNEL_FROM_PORT(port); status = readb(&channel->control); ZSDELAY(); return status; } /* The port lock is not held. */ static unsigned int sunzilog_tx_empty(struct uart_port *port) { unsigned long flags; unsigned char status; unsigned int ret; spin_lock_irqsave(&port->lock, flags); status = sunzilog_read_channel_status(port); spin_unlock_irqrestore(&port->lock, flags); if (status & Tx_BUF_EMP) ret = TIOCSER_TEMT; else ret = 0; return ret; } /* The port lock is held and interrupts are disabled. */ static unsigned int sunzilog_get_mctrl(struct uart_port *port) { unsigned char status; unsigned int ret; status = sunzilog_read_channel_status(port); ret = 0; if (status & DCD) ret |= TIOCM_CAR; if (status & SYNC) ret |= TIOCM_DSR; if (status & CTS) ret |= TIOCM_CTS; return ret; } /* The port lock is held and interrupts are disabled. */ static void sunzilog_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port; struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port); unsigned char set_bits, clear_bits; set_bits = clear_bits = 0; if (mctrl & TIOCM_RTS) set_bits |= RTS; else clear_bits |= RTS; if (mctrl & TIOCM_DTR) set_bits |= DTR; else clear_bits |= DTR; /* NOTE: Not subject to 'transmitter active' rule. */ up->curregs[R5] |= set_bits; up->curregs[R5] &= ~clear_bits; write_zsreg(channel, R5, up->curregs[R5]); } /* The port lock is held and interrupts are disabled. */ static void sunzilog_stop_tx(struct uart_port *port) { struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port; up->flags |= SUNZILOG_FLAG_TX_STOPPED; } /* The port lock is held and interrupts are disabled. */ static void sunzilog_start_tx(struct uart_port *port) { struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port; struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port); unsigned char status; up->flags |= SUNZILOG_FLAG_TX_ACTIVE; up->flags &= ~SUNZILOG_FLAG_TX_STOPPED; status = readb(&channel->control); ZSDELAY(); /* TX busy? Just wait for the TX done interrupt. */ if (!(status & Tx_BUF_EMP)) return; /* Send the first character to jump-start the TX done * IRQ sending engine. */ if (port->x_char) { writeb(port->x_char, &channel->data); ZSDELAY(); ZS_WSYNC(channel); port->icount.tx++; port->x_char = 0; } else { struct circ_buf *xmit = &port->state->xmit; writeb(xmit->buf[xmit->tail], &channel->data); ZSDELAY(); ZS_WSYNC(channel); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&up->port); } } /* The port lock is held. */ static void sunzilog_stop_rx(struct uart_port *port) { struct uart_sunzilog_port *up = UART_ZILOG(port); struct zilog_channel __iomem *channel; if (ZS_IS_CONS(up)) return; channel = ZILOG_CHANNEL_FROM_PORT(port); /* Disable all RX interrupts. */ up->curregs[R1] &= ~RxINT_MASK; sunzilog_maybe_update_regs(up, channel); } /* The port lock is held. */ static void sunzilog_enable_ms(struct uart_port *port) { struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port; struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port); unsigned char new_reg; new_reg = up->curregs[R15] | (DCDIE | SYNCIE | CTSIE); if (new_reg != up->curregs[R15]) { up->curregs[R15] = new_reg; /* NOTE: Not subject to 'transmitter active' rule. */ write_zsreg(channel, R15, up->curregs[R15] & ~WR7pEN); } } /* The port lock is not held. */ static void sunzilog_break_ctl(struct uart_port *port, int break_state) { struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port; struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port); unsigned char set_bits, clear_bits, new_reg; unsigned long flags; set_bits = clear_bits = 0; if (break_state) set_bits |= SND_BRK; else clear_bits |= SND_BRK; spin_lock_irqsave(&port->lock, flags); new_reg = (up->curregs[R5] | set_bits) & ~clear_bits; if (new_reg != up->curregs[R5]) { up->curregs[R5] = new_reg; /* NOTE: Not subject to 'transmitter active' rule. */ write_zsreg(channel, R5, up->curregs[R5]); } spin_unlock_irqrestore(&port->lock, flags); } static void __sunzilog_startup(struct uart_sunzilog_port *up) { struct zilog_channel __iomem *channel; channel = ZILOG_CHANNEL_FROM_PORT(&up->port); up->prev_status = readb(&channel->control); /* Enable receiver and transmitter. */ up->curregs[R3] |= RxENAB; up->curregs[R5] |= TxENAB; up->curregs[R1] |= EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB; sunzilog_maybe_update_regs(up, channel); } static int sunzilog_startup(struct uart_port *port) { struct uart_sunzilog_port *up = UART_ZILOG(port); unsigned long flags; if (ZS_IS_CONS(up)) return 0; spin_lock_irqsave(&port->lock, flags); __sunzilog_startup(up); spin_unlock_irqrestore(&port->lock, flags); return 0; } /* * The test for ZS_IS_CONS is explained by the following e-mail: ***** * From: Russell King <rmk@arm.linux.org.uk> * Date: Sun, 8 Dec 2002 10:18:38 +0000 * * On Sun, Dec 08, 2002 at 02:43:36AM -0500, Pete Zaitcev wrote: * > I boot my 2.5 boxes using "console=ttyS0,9600" argument, * > and I noticed that something is not right with reference * > counting in this case. It seems that when the console * > is open by kernel initially, this is not accounted * > as an open, and uart_startup is not called. * * That is correct. We are unable to call uart_startup when the serial * console is initialised because it may need to allocate memory (as * request_irq does) and the memory allocators may not have been * initialised. * * 1. initialise the port into a state where it can send characters in the * console write method. * * 2. don't do the actual hardware shutdown in your shutdown() method (but * do the normal software shutdown - ie, free irqs etc) ***** */ static void sunzilog_shutdown(struct uart_port *port) { struct uart_sunzilog_port *up = UART_ZILOG(port); struct zilog_channel __iomem *channel; unsigned long flags; if (ZS_IS_CONS(up)) return; spin_lock_irqsave(&port->lock, flags); channel = ZILOG_CHANNEL_FROM_PORT(port); /* Disable receiver and transmitter. */ up->curregs[R3] &= ~RxENAB; up->curregs[R5] &= ~TxENAB; /* Disable all interrupts and BRK assertion. */ up->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK); up->curregs[R5] &= ~SND_BRK; sunzilog_maybe_update_regs(up, channel); spin_unlock_irqrestore(&port->lock, flags); } /* Shared by TTY driver and serial console setup. The port lock is held * and local interrupts are disabled. */ static void sunzilog_convert_to_zs(struct uart_sunzilog_port *up, unsigned int cflag, unsigned int iflag, int brg) { up->curregs[R10] = NRZ; up->curregs[R11] = TCBR | RCBR; /* Program BAUD and clock source. */ up->curregs[R4] &= ~XCLK_MASK; up->curregs[R4] |= X16CLK; up->curregs[R12] = brg & 0xff; up->curregs[R13] = (brg >> 8) & 0xff; up->curregs[R14] = BRSRC | BRENAB; /* Character size, stop bits, and parity. */ up->curregs[R3] &= ~RxN_MASK; up->curregs[R5] &= ~TxN_MASK; switch (cflag & CSIZE) { case CS5: up->curregs[R3] |= Rx5; up->curregs[R5] |= Tx5; up->parity_mask = 0x1f; break; case CS6: up->curregs[R3] |= Rx6; up->curregs[R5] |= Tx6; up->parity_mask = 0x3f; break; case CS7: up->curregs[R3] |= Rx7; up->curregs[R5] |= Tx7; up->parity_mask = 0x7f; break; case CS8: default: up->curregs[R3] |= Rx8; up->curregs[R5] |= Tx8; up->parity_mask = 0xff; break; }; up->curregs[R4] &= ~0x0c; if (cflag & CSTOPB) up->curregs[R4] |= SB2; else up->curregs[R4] |= SB1; if (cflag & PARENB) up->curregs[R4] |= PAR_ENAB; else up->curregs[R4] &= ~PAR_ENAB; if (!(cflag & PARODD)) up->curregs[R4] |= PAR_EVEN; else up->curregs[R4] &= ~PAR_EVEN; up->port.read_status_mask = Rx_OVR; if (iflag & INPCK) up->port.read_status_mask |= CRC_ERR | PAR_ERR; if (iflag & (BRKINT | PARMRK)) up->port.read_status_mask |= BRK_ABRT; up->port.ignore_status_mask = 0; if (iflag & IGNPAR) up->port.ignore_status_mask |= CRC_ERR | PAR_ERR; if (iflag & IGNBRK) { up->port.ignore_status_mask |= BRK_ABRT; if (iflag & IGNPAR) up->port.ignore_status_mask |= Rx_OVR; } if ((cflag & CREAD) == 0) up->port.ignore_status_mask = 0xff; } /* The port lock is not held. */ static void sunzilog_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port; unsigned long flags; int baud, brg; baud = uart_get_baud_rate(port, termios, old, 1200, 76800); spin_lock_irqsave(&up->port.lock, flags); brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR); sunzilog_convert_to_zs(up, termios->c_cflag, termios->c_iflag, brg); if (UART_ENABLE_MS(&up->port, termios->c_cflag)) up->flags |= SUNZILOG_FLAG_MODEM_STATUS; else up->flags &= ~SUNZILOG_FLAG_MODEM_STATUS; up->cflag = termios->c_cflag; sunzilog_maybe_update_regs(up, ZILOG_CHANNEL_FROM_PORT(port)); uart_update_timeout(port, termios->c_cflag, baud); spin_unlock_irqrestore(&up->port.lock, flags); } static const char *sunzilog_type(struct uart_port *port) { struct uart_sunzilog_port *up = UART_ZILOG(port); return (up->flags & SUNZILOG_FLAG_ESCC) ? "zs (ESCC)" : "zs"; } /* We do not request/release mappings of the registers here, this * happens at early serial probe time. */ static void sunzilog_release_port(struct uart_port *port) { } static int sunzilog_request_port(struct uart_port *port) { return 0; } /* These do not need to do anything interesting either. */ static void sunzilog_config_port(struct uart_port *port, int flags) { } /* We do not support letting the user mess with the divisor, IRQ, etc. */ static int sunzilog_verify_port(struct uart_port *port, struct serial_struct *ser) { return -EINVAL; } #ifdef CONFIG_CONSOLE_POLL static int sunzilog_get_poll_char(struct uart_port *port) { unsigned char ch, r1; struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port; struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(&up->port); r1 = read_zsreg(channel, R1); if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) { writeb(ERR_RES, &channel->control); ZSDELAY(); ZS_WSYNC(channel); } ch = readb(&channel->control); ZSDELAY(); /* This funny hack depends upon BRK_ABRT not interfering * with the other bits we care about in R1. */ if (ch & BRK_ABRT) r1 |= BRK_ABRT; if (!(ch & Rx_CH_AV)) return NO_POLL_CHAR; ch = readb(&channel->data); ZSDELAY(); ch &= up->parity_mask; return ch; } static void sunzilog_put_poll_char(struct uart_port *port, unsigned char ch) { struct uart_sunzilog_port *up = (struct uart_sunzilog_port *)port; sunzilog_putchar(&up->port, ch); } #endif /* CONFIG_CONSOLE_POLL */ static struct uart_ops sunzilog_pops = { .tx_empty = sunzilog_tx_empty, .set_mctrl = sunzilog_set_mctrl, .get_mctrl = sunzilog_get_mctrl, .stop_tx = sunzilog_stop_tx, .start_tx = sunzilog_start_tx, .stop_rx = sunzilog_stop_rx, .enable_ms = sunzilog_enable_ms, .break_ctl = sunzilog_break_ctl, .startup = sunzilog_startup, .shutdown = sunzilog_shutdown, .set_termios = sunzilog_set_termios, .type = sunzilog_type, .release_port = sunzilog_release_port, .request_port = sunzilog_request_port, .config_port = sunzilog_config_port, .verify_port = sunzilog_verify_port, #ifdef CONFIG_CONSOLE_POLL .poll_get_char = sunzilog_get_poll_char, .poll_put_char = sunzilog_put_poll_char, #endif }; static int uart_chip_count; static struct uart_sunzilog_port *sunzilog_port_table; static struct zilog_layout __iomem **sunzilog_chip_regs; static struct uart_sunzilog_port *sunzilog_irq_chain; static struct uart_driver sunzilog_reg = { .owner = THIS_MODULE, .driver_name = "sunzilog", .dev_name = "ttyS", .major = TTY_MAJOR, }; static int __init sunzilog_alloc_tables(int num_sunzilog) { struct uart_sunzilog_port *up; unsigned long size; int num_channels = num_sunzilog * 2; int i; size = num_channels * sizeof(struct uart_sunzilog_port); sunzilog_port_table = kzalloc(size, GFP_KERNEL); if (!sunzilog_port_table) return -ENOMEM; for (i = 0; i < num_channels; i++) { up = &sunzilog_port_table[i]; spin_lock_init(&up->port.lock); if (i == 0) sunzilog_irq_chain = up; if (i < num_channels - 1) up->next = up + 1; else up->next = NULL; } size = num_sunzilog * sizeof(struct zilog_layout __iomem *); sunzilog_chip_regs = kzalloc(size, GFP_KERNEL); if (!sunzilog_chip_regs) { kfree(sunzilog_port_table); sunzilog_irq_chain = NULL; return -ENOMEM; } return 0; } static void sunzilog_free_tables(void) { kfree(sunzilog_port_table); sunzilog_irq_chain = NULL; kfree(sunzilog_chip_regs); } #define ZS_PUT_CHAR_MAX_DELAY 2000 /* 10 ms */ static void sunzilog_putchar(struct uart_port *port, int ch) { struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(port); int loops = ZS_PUT_CHAR_MAX_DELAY; /* This is a timed polling loop so do not switch the explicit * udelay with ZSDELAY as that is a NOP on some platforms. -DaveM */ do { unsigned char val = readb(&channel->control); if (val & Tx_BUF_EMP) { ZSDELAY(); break; } udelay(5); } while (--loops); writeb(ch, &channel->data); ZSDELAY(); ZS_WSYNC(channel); } #ifdef CONFIG_SERIO static DEFINE_SPINLOCK(sunzilog_serio_lock); static int sunzilog_serio_write(struct serio *serio, unsigned char ch) { struct uart_sunzilog_port *up = serio->port_data; unsigned long flags; spin_lock_irqsave(&sunzilog_serio_lock, flags); sunzilog_putchar(&up->port, ch); spin_unlock_irqrestore(&sunzilog_serio_lock, flags); return 0; } static int sunzilog_serio_open(struct serio *serio) { struct uart_sunzilog_port *up = serio->port_data; unsigned long flags; int ret; spin_lock_irqsave(&sunzilog_serio_lock, flags); if (!up->serio_open) { up->serio_open = 1; ret = 0; } else ret = -EBUSY; spin_unlock_irqrestore(&sunzilog_serio_lock, flags); return ret; } static void sunzilog_serio_close(struct serio *serio) { struct uart_sunzilog_port *up = serio->port_data; unsigned long flags; spin_lock_irqsave(&sunzilog_serio_lock, flags); up->serio_open = 0; spin_unlock_irqrestore(&sunzilog_serio_lock, flags); } #endif /* CONFIG_SERIO */ #ifdef CONFIG_SERIAL_SUNZILOG_CONSOLE static void sunzilog_console_write(struct console *con, const char *s, unsigned int count) { struct uart_sunzilog_port *up = &sunzilog_port_table[con->index]; unsigned long flags; int locked = 1; local_irq_save(flags); if (up->port.sysrq) { locked = 0; } else if (oops_in_progress) { locked = spin_trylock(&up->port.lock); } else spin_lock(&up->port.lock); uart_console_write(&up->port, s, count, sunzilog_putchar); udelay(2); if (locked) spin_unlock(&up->port.lock); local_irq_restore(flags); } static int __init sunzilog_console_setup(struct console *con, char *options) { struct uart_sunzilog_port *up = &sunzilog_port_table[con->index]; unsigned long flags; int baud, brg; if (up->port.type != PORT_SUNZILOG) return -1; printk(KERN_INFO "Console: ttyS%d (SunZilog zs%d)\n", (sunzilog_reg.minor - 64) + con->index, con->index); /* Get firmware console settings. */ sunserial_console_termios(con, up->port.dev->of_node); /* Firmware console speed is limited to 150-->38400 baud so * this hackish cflag thing is OK. */ switch (con->cflag & CBAUD) { case B150: baud = 150; break; case B300: baud = 300; break; case B600: baud = 600; break; case B1200: baud = 1200; break; case B2400: baud = 2400; break; case B4800: baud = 4800; break; default: case B9600: baud = 9600; break; case B19200: baud = 19200; break; case B38400: baud = 38400; break; }; brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR); spin_lock_irqsave(&up->port.lock, flags); up->curregs[R15] |= BRKIE; sunzilog_convert_to_zs(up, con->cflag, 0, brg); sunzilog_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS); __sunzilog_startup(up); spin_unlock_irqrestore(&up->port.lock, flags); return 0; } static struct console sunzilog_console_ops = { .name = "ttyS", .write = sunzilog_console_write, .device = uart_console_device, .setup = sunzilog_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &sunzilog_reg, }; static inline struct console *SUNZILOG_CONSOLE(void) { return &sunzilog_console_ops; } #else #define SUNZILOG_CONSOLE() (NULL) #endif static void sunzilog_init_kbdms(struct uart_sunzilog_port *up) { int baud, brg; if (up->flags & SUNZILOG_FLAG_CONS_KEYB) { up->cflag = B1200 | CS8 | CLOCAL | CREAD; baud = 1200; } else { up->cflag = B4800 | CS8 | CLOCAL | CREAD; baud = 4800; } up->curregs[R15] |= BRKIE; brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR); sunzilog_convert_to_zs(up, up->cflag, 0, brg); sunzilog_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS); __sunzilog_startup(up); } #ifdef CONFIG_SERIO static void sunzilog_register_serio(struct uart_sunzilog_port *up) { struct serio *serio = &up->serio; serio->port_data = up; serio->id.type = SERIO_RS232; if (up->flags & SUNZILOG_FLAG_CONS_KEYB) { serio->id.proto = SERIO_SUNKBD; strlcpy(serio->name, "zskbd", sizeof(serio->name)); } else { serio->id.proto = SERIO_SUN; serio->id.extra = 1; strlcpy(serio->name, "zsms", sizeof(serio->name)); } strlcpy(serio->phys, ((up->flags & SUNZILOG_FLAG_CONS_KEYB) ? "zs/serio0" : "zs/serio1"), sizeof(serio->phys)); serio->write = sunzilog_serio_write; serio->open = sunzilog_serio_open; serio->close = sunzilog_serio_close; serio->dev.parent = up->port.dev; serio_register_port(serio); } #endif static void sunzilog_init_hw(struct uart_sunzilog_port *up) { struct zilog_channel __iomem *channel; unsigned long flags; int baud, brg; channel = ZILOG_CHANNEL_FROM_PORT(&up->port); spin_lock_irqsave(&up->port.lock, flags); if (ZS_IS_CHANNEL_A(up)) { write_zsreg(channel, R9, FHWRES); ZSDELAY_LONG(); (void) read_zsreg(channel, R0); } if (up->flags & (SUNZILOG_FLAG_CONS_KEYB | SUNZILOG_FLAG_CONS_MOUSE)) { up->curregs[R1] = EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB; up->curregs[R4] = PAR_EVEN | X16CLK | SB1; up->curregs[R3] = RxENAB | Rx8; up->curregs[R5] = TxENAB | Tx8; up->curregs[R6] = 0x00; /* SDLC Address */ up->curregs[R7] = 0x7E; /* SDLC Flag */ up->curregs[R9] = NV; up->curregs[R7p] = 0x00; sunzilog_init_kbdms(up); /* Only enable interrupts if an ISR handler available */ if (up->flags & SUNZILOG_FLAG_ISR_HANDLER) up->curregs[R9] |= MIE; write_zsreg(channel, R9, up->curregs[R9]); } else { /* Normal serial TTY. */ up->parity_mask = 0xff; up->curregs[R1] = EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB; up->curregs[R4] = PAR_EVEN | X16CLK | SB1; up->curregs[R3] = RxENAB | Rx8; up->curregs[R5] = TxENAB | Tx8; up->curregs[R6] = 0x00; /* SDLC Address */ up->curregs[R7] = 0x7E; /* SDLC Flag */ up->curregs[R9] = NV; up->curregs[R10] = NRZ; up->curregs[R11] = TCBR | RCBR; baud = 9600; brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR); up->curregs[R12] = (brg & 0xff); up->curregs[R13] = (brg >> 8) & 0xff; up->curregs[R14] = BRSRC | BRENAB; up->curregs[R15] = FIFOEN; /* Use FIFO if on ESCC */ up->curregs[R7p] = TxFIFO_LVL | RxFIFO_LVL; if (__load_zsregs(channel, up->curregs)) { up->flags |= SUNZILOG_FLAG_ESCC; } /* Only enable interrupts if an ISR handler available */ if (up->flags & SUNZILOG_FLAG_ISR_HANDLER) up->curregs[R9] |= MIE; write_zsreg(channel, R9, up->curregs[R9]); } spin_unlock_irqrestore(&up->port.lock, flags); #ifdef CONFIG_SERIO if (up->flags & (SUNZILOG_FLAG_CONS_KEYB | SUNZILOG_FLAG_CONS_MOUSE)) sunzilog_register_serio(up); #endif } static int zilog_irq; static int zs_probe(struct platform_device *op) { static int kbm_inst, uart_inst; int inst; struct uart_sunzilog_port *up; struct zilog_layout __iomem *rp; int keyboard_mouse = 0; int err; if (of_find_property(op->dev.of_node, "keyboard", NULL)) keyboard_mouse = 1; /* uarts must come before keyboards/mice */ if (keyboard_mouse) inst = uart_chip_count + kbm_inst; else inst = uart_inst; sunzilog_chip_regs[inst] = of_ioremap(&op->resource[0], 0, sizeof(struct zilog_layout), "zs"); if (!sunzilog_chip_regs[inst]) return -ENOMEM; rp = sunzilog_chip_regs[inst]; if (!zilog_irq) zilog_irq = op->archdata.irqs[0]; up = &sunzilog_port_table[inst * 2]; /* Channel A */ up[0].port.mapbase = op->resource[0].start + 0x00; up[0].port.membase = (void __iomem *) &rp->channelA; up[0].port.iotype = UPIO_MEM; up[0].port.irq = op->archdata.irqs[0]; up[0].port.uartclk = ZS_CLOCK; up[0].port.fifosize = 1; up[0].port.ops = &sunzilog_pops; up[0].port.type = PORT_SUNZILOG; up[0].port.flags = 0; up[0].port.line = (inst * 2) + 0; up[0].port.dev = &op->dev; up[0].flags |= SUNZILOG_FLAG_IS_CHANNEL_A; if (keyboard_mouse) up[0].flags |= SUNZILOG_FLAG_CONS_KEYB; sunzilog_init_hw(&up[0]); /* Channel B */ up[1].port.mapbase = op->resource[0].start + 0x04; up[1].port.membase = (void __iomem *) &rp->channelB; up[1].port.iotype = UPIO_MEM; up[1].port.irq = op->archdata.irqs[0]; up[1].port.uartclk = ZS_CLOCK; up[1].port.fifosize = 1; up[1].port.ops = &sunzilog_pops; up[1].port.type = PORT_SUNZILOG; up[1].port.flags = 0; up[1].port.line = (inst * 2) + 1; up[1].port.dev = &op->dev; up[1].flags |= 0; if (keyboard_mouse) up[1].flags |= SUNZILOG_FLAG_CONS_MOUSE; sunzilog_init_hw(&up[1]); if (!keyboard_mouse) { if (sunserial_console_match(SUNZILOG_CONSOLE(), op->dev.of_node, &sunzilog_reg, up[0].port.line, false)) up->flags |= SUNZILOG_FLAG_IS_CONS; err = uart_add_one_port(&sunzilog_reg, &up[0].port); if (err) { of_iounmap(&op->resource[0], rp, sizeof(struct zilog_layout)); return err; } if (sunserial_console_match(SUNZILOG_CONSOLE(), op->dev.of_node, &sunzilog_reg, up[1].port.line, false)) up->flags |= SUNZILOG_FLAG_IS_CONS; err = uart_add_one_port(&sunzilog_reg, &up[1].port); if (err) { uart_remove_one_port(&sunzilog_reg, &up[0].port); of_iounmap(&op->resource[0], rp, sizeof(struct zilog_layout)); return err; } uart_inst++; } else { printk(KERN_INFO "%s: Keyboard at MMIO 0x%llx (irq = %d) " "is a %s\n", dev_name(&op->dev), (unsigned long long) up[0].port.mapbase, op->archdata.irqs[0], sunzilog_type(&up[0].port)); printk(KERN_INFO "%s: Mouse at MMIO 0x%llx (irq = %d) " "is a %s\n", dev_name(&op->dev), (unsigned long long) up[1].port.mapbase, op->archdata.irqs[0], sunzilog_type(&up[1].port)); kbm_inst++; } dev_set_drvdata(&op->dev, &up[0]); return 0; } static void zs_remove_one(struct uart_sunzilog_port *up) { if (ZS_IS_KEYB(up) || ZS_IS_MOUSE(up)) { #ifdef CONFIG_SERIO serio_unregister_port(&up->serio); #endif } else uart_remove_one_port(&sunzilog_reg, &up->port); } static int zs_remove(struct platform_device *op) { struct uart_sunzilog_port *up = dev_get_drvdata(&op->dev); struct zilog_layout __iomem *regs; zs_remove_one(&up[0]); zs_remove_one(&up[1]); regs = sunzilog_chip_regs[up[0].port.line / 2]; of_iounmap(&op->resource[0], regs, sizeof(struct zilog_layout)); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id zs_match[] = { { .name = "zs", }, {}, }; MODULE_DEVICE_TABLE(of, zs_match); static struct platform_driver zs_driver = { .driver = { .name = "zs", .owner = THIS_MODULE, .of_match_table = zs_match, }, .probe = zs_probe, .remove = zs_remove, }; static int __init sunzilog_init(void) { struct device_node *dp; int err; int num_keybms = 0; int num_sunzilog = 0; for_each_node_by_name(dp, "zs") { num_sunzilog++; if (of_find_property(dp, "keyboard", NULL)) num_keybms++; } if (num_sunzilog) { err = sunzilog_alloc_tables(num_sunzilog); if (err) goto out; uart_chip_count = num_sunzilog - num_keybms; err = sunserial_register_minors(&sunzilog_reg, uart_chip_count * 2); if (err) goto out_free_tables; } err = platform_driver_register(&zs_driver); if (err) goto out_unregister_uart; if (zilog_irq) { struct uart_sunzilog_port *up = sunzilog_irq_chain; err = request_irq(zilog_irq, sunzilog_interrupt, IRQF_SHARED, "zs", sunzilog_irq_chain); if (err) goto out_unregister_driver; /* Enable Interrupts */ while (up) { struct zilog_channel __iomem *channel; /* printk (KERN_INFO "Enable IRQ for ZILOG Hardware %p\n", up); */ channel = ZILOG_CHANNEL_FROM_PORT(&up->port); up->flags |= SUNZILOG_FLAG_ISR_HANDLER; up->curregs[R9] |= MIE; write_zsreg(channel, R9, up->curregs[R9]); up = up->next; } } out: return err; out_unregister_driver: platform_driver_unregister(&zs_driver); out_unregister_uart: if (num_sunzilog) { sunserial_unregister_minors(&sunzilog_reg, num_sunzilog); sunzilog_reg.cons = NULL; } out_free_tables: sunzilog_free_tables(); goto out; } static void __exit sunzilog_exit(void) { platform_driver_unregister(&zs_driver); if (zilog_irq) { struct uart_sunzilog_port *up = sunzilog_irq_chain; /* Disable Interrupts */ while (up) { struct zilog_channel __iomem *channel; /* printk (KERN_INFO "Disable IRQ for ZILOG Hardware %p\n", up); */ channel = ZILOG_CHANNEL_FROM_PORT(&up->port); up->flags &= ~SUNZILOG_FLAG_ISR_HANDLER; up->curregs[R9] &= ~MIE; write_zsreg(channel, R9, up->curregs[R9]); up = up->next; } free_irq(zilog_irq, sunzilog_irq_chain); zilog_irq = 0; } if (sunzilog_reg.nr) { sunserial_unregister_minors(&sunzilog_reg, sunzilog_reg.nr); sunzilog_free_tables(); } } module_init(sunzilog_init); module_exit(sunzilog_exit); MODULE_AUTHOR("David S. Miller"); MODULE_DESCRIPTION("Sun Zilog serial port driver"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
gpl-2.0
cbolumar/android_kernel_samsung_msm8916
arch/arm/mach-davinci/board-dm365-evm.c
2045
18891
/* * TI DaVinci DM365 EVM board support * * Copyright (C) 2009 Texas Instruments Incorporated * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/i2c/at24.h> #include <linux/leds.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/slab.h> #include <linux/mtd/nand.h> #include <linux/input.h> #include <linux/spi/spi.h> #include <linux/spi/eeprom.h> #include <linux/v4l2-dv-timings.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/mux.h> #include <mach/common.h> #include <linux/platform_data/i2c-davinci.h> #include <mach/serial.h> #include <linux/platform_data/mmc-davinci.h> #include <linux/platform_data/mtd-davinci.h> #include <linux/platform_data/keyscan-davinci.h> #include <media/ths7303.h> #include <media/tvp514x.h> #include "davinci.h" static inline int have_imager(void) { /* REVISIT when it's supported, trigger via Kconfig */ return 0; } static inline int have_tvp7002(void) { /* REVISIT when it's supported, trigger via Kconfig */ return 0; } #define DM365_EVM_PHY_ID "davinci_mdio-0:01" /* * A MAX-II CPLD is used for various board control functions. */ #define CPLD_OFFSET(a13a8,a2a1) (((a13a8) << 10) + ((a2a1) << 3)) #define CPLD_VERSION CPLD_OFFSET(0,0) /* r/o */ #define CPLD_TEST CPLD_OFFSET(0,1) #define CPLD_LEDS CPLD_OFFSET(0,2) #define CPLD_MUX CPLD_OFFSET(0,3) #define CPLD_SWITCH CPLD_OFFSET(1,0) /* r/o */ #define CPLD_POWER CPLD_OFFSET(1,1) #define CPLD_VIDEO CPLD_OFFSET(1,2) #define CPLD_CARDSTAT CPLD_OFFSET(1,3) /* r/o */ #define CPLD_DILC_OUT CPLD_OFFSET(2,0) #define CPLD_DILC_IN CPLD_OFFSET(2,1) /* r/o */ #define CPLD_IMG_DIR0 CPLD_OFFSET(2,2) #define CPLD_IMG_MUX0 CPLD_OFFSET(2,3) #define CPLD_IMG_MUX1 CPLD_OFFSET(3,0) #define CPLD_IMG_DIR1 CPLD_OFFSET(3,1) #define CPLD_IMG_MUX2 CPLD_OFFSET(3,2) #define CPLD_IMG_MUX3 CPLD_OFFSET(3,3) #define CPLD_IMG_DIR2 CPLD_OFFSET(4,0) #define CPLD_IMG_MUX4 CPLD_OFFSET(4,1) #define CPLD_IMG_MUX5 CPLD_OFFSET(4,2) #define CPLD_RESETS CPLD_OFFSET(4,3) #define CPLD_CCD_DIR1 CPLD_OFFSET(0x3e,0) #define CPLD_CCD_IO1 CPLD_OFFSET(0x3e,1) #define CPLD_CCD_DIR2 CPLD_OFFSET(0x3e,2) #define CPLD_CCD_IO2 CPLD_OFFSET(0x3e,3) #define CPLD_CCD_DIR3 CPLD_OFFSET(0x3f,0) #define CPLD_CCD_IO3 CPLD_OFFSET(0x3f,1) static void __iomem *cpld; /* NOTE: this is geared for the standard config, with a socketed * 2 GByte Micron NAND (MT29F16G08FAA) using 128KB sectors. If you * swap chips with a different block size, partitioning will * need to be changed. This NAND chip MT29F16G08FAA is the default * NAND shipped with the Spectrum Digital DM365 EVM */ #define NAND_BLOCK_SIZE SZ_128K static struct mtd_partition davinci_nand_partitions[] = { { /* UBL (a few copies) plus U-Boot */ .name = "bootloader", .offset = 0, .size = 30 * NAND_BLOCK_SIZE, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { /* U-Boot environment */ .name = "params", .offset = MTDPART_OFS_APPEND, .size = 2 * NAND_BLOCK_SIZE, .mask_flags = 0, }, { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = SZ_4M, .mask_flags = 0, }, { .name = "filesystem1", .offset = MTDPART_OFS_APPEND, .size = SZ_512M, .mask_flags = 0, }, { .name = "filesystem2", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, .mask_flags = 0, } /* two blocks with bad block table (and mirror) at the end */ }; static struct davinci_nand_pdata davinci_nand_data = { .mask_chipsel = BIT(14), .parts = davinci_nand_partitions, .nr_parts = ARRAY_SIZE(davinci_nand_partitions), .ecc_mode = NAND_ECC_HW, .bbt_options = NAND_BBT_USE_FLASH, .ecc_bits = 4, }; static struct resource davinci_nand_resources[] = { { .start = DM365_ASYNC_EMIF_DATA_CE0_BASE, .end = DM365_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1, .flags = IORESOURCE_MEM, }, { .start = DM365_ASYNC_EMIF_CONTROL_BASE, .end = DM365_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device davinci_nand_device = { .name = "davinci_nand", .id = 0, .num_resources = ARRAY_SIZE(davinci_nand_resources), .resource = davinci_nand_resources, .dev = { .platform_data = &davinci_nand_data, }, }; static struct at24_platform_data eeprom_info = { .byte_len = (256*1024) / 8, .page_size = 64, .flags = AT24_FLAG_ADDR16, .setup = davinci_get_mac_addr, .context = (void *)0x7f00, }; static struct snd_platform_data dm365_evm_snd_data = { .asp_chan_q = EVENTQ_3, }; static struct i2c_board_info i2c_info[] = { { I2C_BOARD_INFO("24c256", 0x50), .platform_data = &eeprom_info, }, { I2C_BOARD_INFO("tlv320aic3x", 0x18), }, }; static struct davinci_i2c_platform_data i2c_pdata = { .bus_freq = 400 /* kHz */, .bus_delay = 0 /* usec */, }; static int dm365evm_keyscan_enable(struct device *dev) { return davinci_cfg_reg(DM365_KEYSCAN); } static unsigned short dm365evm_keymap[] = { KEY_KP2, KEY_LEFT, KEY_EXIT, KEY_DOWN, KEY_ENTER, KEY_UP, KEY_KP1, KEY_RIGHT, KEY_MENU, KEY_RECORD, KEY_REWIND, KEY_KPMINUS, KEY_STOP, KEY_FASTFORWARD, KEY_KPPLUS, KEY_PLAYPAUSE, 0 }; static struct davinci_ks_platform_data dm365evm_ks_data = { .device_enable = dm365evm_keyscan_enable, .keymap = dm365evm_keymap, .keymapsize = ARRAY_SIZE(dm365evm_keymap), .rep = 1, /* Scan period = strobe + interval */ .strobe = 0x5, .interval = 0x2, .matrix_type = DAVINCI_KEYSCAN_MATRIX_4X4, }; static int cpld_mmc_get_cd(int module) { if (!cpld) return -ENXIO; /* low == card present */ return !(__raw_readb(cpld + CPLD_CARDSTAT) & BIT(module ? 4 : 0)); } static int cpld_mmc_get_ro(int module) { if (!cpld) return -ENXIO; /* high == card's write protect switch active */ return !!(__raw_readb(cpld + CPLD_CARDSTAT) & BIT(module ? 5 : 1)); } static struct davinci_mmc_config dm365evm_mmc_config = { .get_cd = cpld_mmc_get_cd, .get_ro = cpld_mmc_get_ro, .wires = 4, .max_freq = 50000000, .caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED, }; static void dm365evm_emac_configure(void) { /* * EMAC pins are multiplexed with GPIO and UART * Further details are available at the DM365 ARM * Subsystem Users Guide(sprufg5.pdf) pages 125 - 127 */ davinci_cfg_reg(DM365_EMAC_TX_EN); davinci_cfg_reg(DM365_EMAC_TX_CLK); davinci_cfg_reg(DM365_EMAC_COL); davinci_cfg_reg(DM365_EMAC_TXD3); davinci_cfg_reg(DM365_EMAC_TXD2); davinci_cfg_reg(DM365_EMAC_TXD1); davinci_cfg_reg(DM365_EMAC_TXD0); davinci_cfg_reg(DM365_EMAC_RXD3); davinci_cfg_reg(DM365_EMAC_RXD2); davinci_cfg_reg(DM365_EMAC_RXD1); davinci_cfg_reg(DM365_EMAC_RXD0); davinci_cfg_reg(DM365_EMAC_RX_CLK); davinci_cfg_reg(DM365_EMAC_RX_DV); davinci_cfg_reg(DM365_EMAC_RX_ER); davinci_cfg_reg(DM365_EMAC_CRS); davinci_cfg_reg(DM365_EMAC_MDIO); davinci_cfg_reg(DM365_EMAC_MDCLK); /* * EMAC interrupts are multiplexed with GPIO interrupts * Details are available at the DM365 ARM * Subsystem Users Guide(sprufg5.pdf) pages 133 - 134 */ davinci_cfg_reg(DM365_INT_EMAC_RXTHRESH); davinci_cfg_reg(DM365_INT_EMAC_RXPULSE); davinci_cfg_reg(DM365_INT_EMAC_TXPULSE); davinci_cfg_reg(DM365_INT_EMAC_MISCPULSE); } static void dm365evm_mmc_configure(void) { /* * MMC/SD pins are multiplexed with GPIO and EMIF * Further details are available at the DM365 ARM * Subsystem Users Guide(sprufg5.pdf) pages 118, 128 - 131 */ davinci_cfg_reg(DM365_SD1_CLK); davinci_cfg_reg(DM365_SD1_CMD); davinci_cfg_reg(DM365_SD1_DATA3); davinci_cfg_reg(DM365_SD1_DATA2); davinci_cfg_reg(DM365_SD1_DATA1); davinci_cfg_reg(DM365_SD1_DATA0); } static struct tvp514x_platform_data tvp5146_pdata = { .clk_polarity = 0, .hs_polarity = 1, .vs_polarity = 1 }; #define TVP514X_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL) /* Inputs available at the TVP5146 */ static struct v4l2_input tvp5146_inputs[] = { { .index = 0, .name = "Composite", .type = V4L2_INPUT_TYPE_CAMERA, .std = TVP514X_STD_ALL, }, { .index = 1, .name = "S-Video", .type = V4L2_INPUT_TYPE_CAMERA, .std = TVP514X_STD_ALL, }, }; /* * this is the route info for connecting each input to decoder * ouput that goes to vpfe. There is a one to one correspondence * with tvp5146_inputs */ static struct vpfe_route tvp5146_routes[] = { { .input = INPUT_CVBS_VI2B, .output = OUTPUT_10BIT_422_EMBEDDED_SYNC, }, { .input = INPUT_SVIDEO_VI2C_VI1C, .output = OUTPUT_10BIT_422_EMBEDDED_SYNC, }, }; static struct vpfe_subdev_info vpfe_sub_devs[] = { { .name = "tvp5146", .grp_id = 0, .num_inputs = ARRAY_SIZE(tvp5146_inputs), .inputs = tvp5146_inputs, .routes = tvp5146_routes, .can_route = 1, .ccdc_if_params = { .if_type = VPFE_BT656, .hdpol = VPFE_PINPOL_POSITIVE, .vdpol = VPFE_PINPOL_POSITIVE, }, .board_info = { I2C_BOARD_INFO("tvp5146", 0x5d), .platform_data = &tvp5146_pdata, }, }, }; static struct vpfe_config vpfe_cfg = { .num_subdevs = ARRAY_SIZE(vpfe_sub_devs), .sub_devs = vpfe_sub_devs, .i2c_adapter_id = 1, .card_name = "DM365 EVM", .ccdc = "ISIF", }; /* venc standards timings */ static struct vpbe_enc_mode_info dm365evm_enc_std_timing[] = { { .name = "ntsc", .timings_type = VPBE_ENC_STD, .std_id = V4L2_STD_NTSC, .interlaced = 1, .xres = 720, .yres = 480, .aspect = {11, 10}, .fps = {30000, 1001}, .left_margin = 0x79, .upper_margin = 0x10, }, { .name = "pal", .timings_type = VPBE_ENC_STD, .std_id = V4L2_STD_PAL, .interlaced = 1, .xres = 720, .yres = 576, .aspect = {54, 59}, .fps = {25, 1}, .left_margin = 0x7E, .upper_margin = 0x16, }, }; /* venc dv timings */ static struct vpbe_enc_mode_info dm365evm_enc_preset_timing[] = { { .name = "480p59_94", .timings_type = VPBE_ENC_DV_TIMINGS, .dv_timings = V4L2_DV_BT_CEA_720X480P59_94, .interlaced = 0, .xres = 720, .yres = 480, .aspect = {1, 1}, .fps = {5994, 100}, .left_margin = 0x8F, .upper_margin = 0x2D, }, { .name = "576p50", .timings_type = VPBE_ENC_DV_TIMINGS, .dv_timings = V4L2_DV_BT_CEA_720X576P50, .interlaced = 0, .xres = 720, .yres = 576, .aspect = {1, 1}, .fps = {50, 1}, .left_margin = 0x8C, .upper_margin = 0x36, }, { .name = "720p60", .timings_type = VPBE_ENC_DV_TIMINGS, .dv_timings = V4L2_DV_BT_CEA_1280X720P60, .interlaced = 0, .xres = 1280, .yres = 720, .aspect = {1, 1}, .fps = {60, 1}, .left_margin = 0x117, .right_margin = 70, .upper_margin = 38, .lower_margin = 3, .hsync_len = 80, .vsync_len = 5, }, { .name = "1080i60", .timings_type = VPBE_ENC_DV_TIMINGS, .dv_timings = V4L2_DV_BT_CEA_1920X1080I60, .interlaced = 1, .xres = 1920, .yres = 1080, .aspect = {1, 1}, .fps = {30, 1}, .left_margin = 0xc9, .right_margin = 80, .upper_margin = 30, .lower_margin = 3, .hsync_len = 88, .vsync_len = 5, }, }; #define VENC_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL) /* * The outputs available from VPBE + ecnoders. Keep the * the order same as that of encoders. First those from venc followed by that * from encoders. Index in the output refers to index on a particular * encoder.Driver uses this index to pass it to encoder when it supports more * than one output. Application uses index of the array to set an output. */ static struct vpbe_output dm365evm_vpbe_outputs[] = { { .output = { .index = 0, .name = "Composite", .type = V4L2_OUTPUT_TYPE_ANALOG, .std = VENC_STD_ALL, .capabilities = V4L2_OUT_CAP_STD, }, .subdev_name = DM365_VPBE_VENC_SUBDEV_NAME, .default_mode = "ntsc", .num_modes = ARRAY_SIZE(dm365evm_enc_std_timing), .modes = dm365evm_enc_std_timing, .if_params = V4L2_MBUS_FMT_FIXED, }, { .output = { .index = 1, .name = "Component", .type = V4L2_OUTPUT_TYPE_ANALOG, .capabilities = V4L2_OUT_CAP_DV_TIMINGS, }, .subdev_name = DM365_VPBE_VENC_SUBDEV_NAME, .default_mode = "480p59_94", .num_modes = ARRAY_SIZE(dm365evm_enc_preset_timing), .modes = dm365evm_enc_preset_timing, .if_params = V4L2_MBUS_FMT_FIXED, }, }; /* * Amplifiers on the board */ struct ths7303_platform_data ths7303_pdata = { .ch_1 = 3, .ch_2 = 3, .ch_3 = 3, .init_enable = 1, }; static struct amp_config_info vpbe_amp = { .module_name = "ths7303", .is_i2c = 1, .board_info = { I2C_BOARD_INFO("ths7303", 0x2c), .platform_data = &ths7303_pdata, } }; static struct vpbe_config dm365evm_display_cfg = { .module_name = "dm365-vpbe-display", .i2c_adapter_id = 1, .amp = &vpbe_amp, .osd = { .module_name = DM365_VPBE_OSD_SUBDEV_NAME, }, .venc = { .module_name = DM365_VPBE_VENC_SUBDEV_NAME, }, .num_outputs = ARRAY_SIZE(dm365evm_vpbe_outputs), .outputs = dm365evm_vpbe_outputs, }; static void __init evm_init_i2c(void) { davinci_init_i2c(&i2c_pdata); i2c_register_board_info(1, i2c_info, ARRAY_SIZE(i2c_info)); } static struct platform_device *dm365_evm_nand_devices[] __initdata = { &davinci_nand_device, }; static inline int have_leds(void) { #ifdef CONFIG_LEDS_CLASS return 1; #else return 0; #endif } struct cpld_led { struct led_classdev cdev; u8 mask; }; static const struct { const char *name; const char *trigger; } cpld_leds[] = { { "dm365evm::ds2", }, { "dm365evm::ds3", }, { "dm365evm::ds4", }, { "dm365evm::ds5", }, { "dm365evm::ds6", "nand-disk", }, { "dm365evm::ds7", "mmc1", }, { "dm365evm::ds8", "mmc0", }, { "dm365evm::ds9", "heartbeat", }, }; static void cpld_led_set(struct led_classdev *cdev, enum led_brightness b) { struct cpld_led *led = container_of(cdev, struct cpld_led, cdev); u8 reg = __raw_readb(cpld + CPLD_LEDS); if (b != LED_OFF) reg &= ~led->mask; else reg |= led->mask; __raw_writeb(reg, cpld + CPLD_LEDS); } static enum led_brightness cpld_led_get(struct led_classdev *cdev) { struct cpld_led *led = container_of(cdev, struct cpld_led, cdev); u8 reg = __raw_readb(cpld + CPLD_LEDS); return (reg & led->mask) ? LED_OFF : LED_FULL; } static int __init cpld_leds_init(void) { int i; if (!have_leds() || !cpld) return 0; /* setup LEDs */ __raw_writeb(0xff, cpld + CPLD_LEDS); for (i = 0; i < ARRAY_SIZE(cpld_leds); i++) { struct cpld_led *led; led = kzalloc(sizeof(*led), GFP_KERNEL); if (!led) break; led->cdev.name = cpld_leds[i].name; led->cdev.brightness_set = cpld_led_set; led->cdev.brightness_get = cpld_led_get; led->cdev.default_trigger = cpld_leds[i].trigger; led->mask = BIT(i); if (led_classdev_register(NULL, &led->cdev) < 0) { kfree(led); break; } } return 0; } /* run after subsys_initcall() for LEDs */ fs_initcall(cpld_leds_init); static void __init evm_init_cpld(void) { u8 mux, resets; const char *label; struct clk *aemif_clk; /* Make sure we can configure the CPLD through CS1. Then * leave it on for later access to MMC and LED registers. */ aemif_clk = clk_get(NULL, "aemif"); if (IS_ERR(aemif_clk)) return; clk_prepare_enable(aemif_clk); if (request_mem_region(DM365_ASYNC_EMIF_DATA_CE1_BASE, SECTION_SIZE, "cpld") == NULL) goto fail; cpld = ioremap(DM365_ASYNC_EMIF_DATA_CE1_BASE, SECTION_SIZE); if (!cpld) { release_mem_region(DM365_ASYNC_EMIF_DATA_CE1_BASE, SECTION_SIZE); fail: pr_err("ERROR: can't map CPLD\n"); clk_disable_unprepare(aemif_clk); return; } /* External muxing for some signals */ mux = 0; /* Read SW5 to set up NAND + keypad _or_ OneNAND (sync read). * NOTE: SW4 bus width setting must match! */ if ((__raw_readb(cpld + CPLD_SWITCH) & BIT(5)) == 0) { /* external keypad mux */ mux |= BIT(7); platform_add_devices(dm365_evm_nand_devices, ARRAY_SIZE(dm365_evm_nand_devices)); } else { /* no OneNAND support yet */ } /* Leave external chips in reset when unused. */ resets = BIT(3) | BIT(2) | BIT(1) | BIT(0); /* Static video input config with SN74CBT16214 1-of-3 mux: * - port b1 == tvp7002 (mux lowbits == 1 or 6) * - port b2 == imager (mux lowbits == 2 or 7) * - port b3 == tvp5146 (mux lowbits == 5) * * Runtime switching could work too, with limitations. */ if (have_imager()) { label = "HD imager"; mux |= 2; /* externally mux MMC1/ENET/AIC33 to imager */ mux |= BIT(6) | BIT(5) | BIT(3); } else { struct davinci_soc_info *soc_info = &davinci_soc_info; /* we can use MMC1 ... */ dm365evm_mmc_configure(); davinci_setup_mmc(1, &dm365evm_mmc_config); /* ... and ENET ... */ dm365evm_emac_configure(); soc_info->emac_pdata->phy_id = DM365_EVM_PHY_ID; resets &= ~BIT(3); /* ... and AIC33 */ resets &= ~BIT(1); if (have_tvp7002()) { mux |= 1; resets &= ~BIT(2); label = "tvp7002 HD"; } else { /* default to tvp5146 */ mux |= 5; resets &= ~BIT(0); label = "tvp5146 SD"; } } __raw_writeb(mux, cpld + CPLD_MUX); __raw_writeb(resets, cpld + CPLD_RESETS); pr_info("EVM: %s video input\n", label); /* REVISIT export switches: NTSC/PAL (SW5.6), EXTRA1 (SW5.2), etc */ } static struct davinci_uart_config uart_config __initdata = { .enabled_uarts = (1 << 0), }; static void __init dm365_evm_map_io(void) { dm365_init(); } static struct spi_eeprom at25640 = { .byte_len = SZ_64K / 8, .name = "at25640", .page_size = 32, .flags = EE_ADDR2, }; static struct spi_board_info dm365_evm_spi_info[] __initconst = { { .modalias = "at25", .platform_data = &at25640, .max_speed_hz = 10 * 1000 * 1000, .bus_num = 0, .chip_select = 0, .mode = SPI_MODE_0, }, }; static __init void dm365_evm_init(void) { evm_init_i2c(); davinci_serial_init(&uart_config); dm365evm_emac_configure(); dm365evm_mmc_configure(); davinci_setup_mmc(0, &dm365evm_mmc_config); dm365_init_video(&vpfe_cfg, &dm365evm_display_cfg); /* maybe setup mmc1/etc ... _after_ mmc0 */ evm_init_cpld(); #ifdef CONFIG_SND_DM365_AIC3X_CODEC dm365_init_asp(&dm365_evm_snd_data); #elif defined(CONFIG_SND_DM365_VOICE_CODEC) dm365_init_vc(&dm365_evm_snd_data); #endif dm365_init_rtc(); dm365_init_ks(&dm365evm_ks_data); dm365_init_spi0(BIT(0), dm365_evm_spi_info, ARRAY_SIZE(dm365_evm_spi_info)); } MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM") .atag_offset = 0x100, .map_io = dm365_evm_map_io, .init_irq = davinci_irq_init, .init_time = davinci_timer_init, .init_machine = dm365_evm_init, .init_late = davinci_init_late, .dma_zone_size = SZ_128M, .restart = davinci_restart, MACHINE_END
gpl-2.0
MeshSr/linux-meshsr
net/sched/cls_flow.c
2301
15797
/* * net/sched/cls_flow.c Generic flow classifier * * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/list.h> #include <linux/jhash.h> #include <linux/random.h> #include <linux/pkt_cls.h> #include <linux/skbuff.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/if_vlan.h> #include <linux/slab.h> #include <linux/module.h> #include <net/pkt_cls.h> #include <net/ip.h> #include <net/route.h> #include <net/flow_keys.h> #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #include <net/netfilter/nf_conntrack.h> #endif struct flow_head { struct list_head filters; }; struct flow_filter { struct list_head list; struct tcf_exts exts; struct tcf_ematch_tree ematches; struct timer_list perturb_timer; u32 perturb_period; u32 handle; u32 nkeys; u32 keymask; u32 mode; u32 mask; u32 xor; u32 rshift; u32 addend; u32 divisor; u32 baseclass; u32 hashrnd; }; static const struct tcf_ext_map flow_ext_map = { .action = TCA_FLOW_ACT, .police = TCA_FLOW_POLICE, }; static inline u32 addr_fold(void *addr) { unsigned long a = (unsigned long)addr; return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0); } static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow) { if (flow->src) return ntohl(flow->src); return addr_fold(skb->sk); } static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow) { if (flow->dst) return ntohl(flow->dst); return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; } static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow) { return flow->ip_proto; } static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow) { if (flow->ports) return ntohs(flow->port16[0]); return addr_fold(skb->sk); } static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow) { if (flow->ports) return ntohs(flow->port16[1]); return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; } static u32 flow_get_iif(const struct sk_buff *skb) { return skb->skb_iif; } static u32 flow_get_priority(const struct sk_buff *skb) { return skb->priority; } static u32 flow_get_mark(const struct sk_buff *skb) { return skb->mark; } static u32 flow_get_nfct(const struct sk_buff *skb) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) return addr_fold(skb->nfct); #else return 0; #endif } #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #define CTTUPLE(skb, member) \ ({ \ enum ip_conntrack_info ctinfo; \ const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \ if (ct == NULL) \ goto fallback; \ ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \ }) #else #define CTTUPLE(skb, member) \ ({ \ goto fallback; \ 0; \ }) #endif static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow) { switch (skb->protocol) { case htons(ETH_P_IP): return ntohl(CTTUPLE(skb, src.u3.ip)); case htons(ETH_P_IPV6): return ntohl(CTTUPLE(skb, src.u3.ip6[3])); } fallback: return flow_get_src(skb, flow); } static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow) { switch (skb->protocol) { case htons(ETH_P_IP): return ntohl(CTTUPLE(skb, dst.u3.ip)); case htons(ETH_P_IPV6): return ntohl(CTTUPLE(skb, dst.u3.ip6[3])); } fallback: return flow_get_dst(skb, flow); } static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, const struct flow_keys *flow) { return ntohs(CTTUPLE(skb, src.u.all)); fallback: return flow_get_proto_src(skb, flow); } static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow) { return ntohs(CTTUPLE(skb, dst.u.all)); fallback: return flow_get_proto_dst(skb, flow); } static u32 flow_get_rtclassid(const struct sk_buff *skb) { #ifdef CONFIG_IP_ROUTE_CLASSID if (skb_dst(skb)) return skb_dst(skb)->tclassid; #endif return 0; } static u32 flow_get_skuid(const struct sk_buff *skb) { if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) { kuid_t skuid = skb->sk->sk_socket->file->f_cred->fsuid; return from_kuid(&init_user_ns, skuid); } return 0; } static u32 flow_get_skgid(const struct sk_buff *skb) { if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) { kgid_t skgid = skb->sk->sk_socket->file->f_cred->fsgid; return from_kgid(&init_user_ns, skgid); } return 0; } static u32 flow_get_vlan_tag(const struct sk_buff *skb) { u16 uninitialized_var(tag); if (vlan_get_tag(skb, &tag) < 0) return 0; return tag & VLAN_VID_MASK; } static u32 flow_get_rxhash(struct sk_buff *skb) { return skb_get_rxhash(skb); } static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow) { switch (key) { case FLOW_KEY_SRC: return flow_get_src(skb, flow); case FLOW_KEY_DST: return flow_get_dst(skb, flow); case FLOW_KEY_PROTO: return flow_get_proto(skb, flow); case FLOW_KEY_PROTO_SRC: return flow_get_proto_src(skb, flow); case FLOW_KEY_PROTO_DST: return flow_get_proto_dst(skb, flow); case FLOW_KEY_IIF: return flow_get_iif(skb); case FLOW_KEY_PRIORITY: return flow_get_priority(skb); case FLOW_KEY_MARK: return flow_get_mark(skb); case FLOW_KEY_NFCT: return flow_get_nfct(skb); case FLOW_KEY_NFCT_SRC: return flow_get_nfct_src(skb, flow); case FLOW_KEY_NFCT_DST: return flow_get_nfct_dst(skb, flow); case FLOW_KEY_NFCT_PROTO_SRC: return flow_get_nfct_proto_src(skb, flow); case FLOW_KEY_NFCT_PROTO_DST: return flow_get_nfct_proto_dst(skb, flow); case FLOW_KEY_RTCLASSID: return flow_get_rtclassid(skb); case FLOW_KEY_SKUID: return flow_get_skuid(skb); case FLOW_KEY_SKGID: return flow_get_skgid(skb); case FLOW_KEY_VLAN_TAG: return flow_get_vlan_tag(skb); case FLOW_KEY_RXHASH: return flow_get_rxhash(skb); default: WARN_ON(1); return 0; } } #define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \ (1 << FLOW_KEY_DST) | \ (1 << FLOW_KEY_PROTO) | \ (1 << FLOW_KEY_PROTO_SRC) | \ (1 << FLOW_KEY_PROTO_DST) | \ (1 << FLOW_KEY_NFCT_SRC) | \ (1 << FLOW_KEY_NFCT_DST) | \ (1 << FLOW_KEY_NFCT_PROTO_SRC) | \ (1 << FLOW_KEY_NFCT_PROTO_DST)) static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { struct flow_head *head = tp->root; struct flow_filter *f; u32 keymask; u32 classid; unsigned int n, key; int r; list_for_each_entry(f, &head->filters, list) { u32 keys[FLOW_KEY_MAX + 1]; struct flow_keys flow_keys; if (!tcf_em_tree_match(skb, &f->ematches, NULL)) continue; keymask = f->keymask; if (keymask & FLOW_KEYS_NEEDED) skb_flow_dissect(skb, &flow_keys); for (n = 0; n < f->nkeys; n++) { key = ffs(keymask) - 1; keymask &= ~(1 << key); keys[n] = flow_key_get(skb, key, &flow_keys); } if (f->mode == FLOW_MODE_HASH) classid = jhash2(keys, f->nkeys, f->hashrnd); else { classid = keys[0]; classid = (classid & f->mask) ^ f->xor; classid = (classid >> f->rshift) + f->addend; } if (f->divisor) classid %= f->divisor; res->class = 0; res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid); r = tcf_exts_exec(skb, &f->exts, res); if (r < 0) continue; return r; } return -1; } static void flow_perturbation(unsigned long arg) { struct flow_filter *f = (struct flow_filter *)arg; get_random_bytes(&f->hashrnd, 4); if (f->perturb_period) mod_timer(&f->perturb_timer, jiffies + f->perturb_period); } static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = { [TCA_FLOW_KEYS] = { .type = NLA_U32 }, [TCA_FLOW_MODE] = { .type = NLA_U32 }, [TCA_FLOW_BASECLASS] = { .type = NLA_U32 }, [TCA_FLOW_RSHIFT] = { .type = NLA_U32 }, [TCA_FLOW_ADDEND] = { .type = NLA_U32 }, [TCA_FLOW_MASK] = { .type = NLA_U32 }, [TCA_FLOW_XOR] = { .type = NLA_U32 }, [TCA_FLOW_DIVISOR] = { .type = NLA_U32 }, [TCA_FLOW_ACT] = { .type = NLA_NESTED }, [TCA_FLOW_POLICE] = { .type = NLA_NESTED }, [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED }, [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, }; static int flow_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, unsigned long *arg) { struct flow_head *head = tp->root; struct flow_filter *f; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_FLOW_MAX + 1]; struct tcf_exts e; struct tcf_ematch_tree t; unsigned int nkeys = 0; unsigned int perturb_period = 0; u32 baseclass = 0; u32 keymask = 0; u32 mode; int err; if (opt == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy); if (err < 0) return err; if (tb[TCA_FLOW_BASECLASS]) { baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]); if (TC_H_MIN(baseclass) == 0) return -EINVAL; } if (tb[TCA_FLOW_KEYS]) { keymask = nla_get_u32(tb[TCA_FLOW_KEYS]); nkeys = hweight32(keymask); if (nkeys == 0) return -EINVAL; if (fls(keymask) - 1 > FLOW_KEY_MAX) return -EOPNOTSUPP; if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) && sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns) return -EOPNOTSUPP; } err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, &flow_ext_map); if (err < 0) return err; err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t); if (err < 0) goto err1; f = (struct flow_filter *)*arg; if (f != NULL) { err = -EINVAL; if (f->handle != handle && handle) goto err2; mode = f->mode; if (tb[TCA_FLOW_MODE]) mode = nla_get_u32(tb[TCA_FLOW_MODE]); if (mode != FLOW_MODE_HASH && nkeys > 1) goto err2; if (mode == FLOW_MODE_HASH) perturb_period = f->perturb_period; if (tb[TCA_FLOW_PERTURB]) { if (mode != FLOW_MODE_HASH) goto err2; perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; } } else { err = -EINVAL; if (!handle) goto err2; if (!tb[TCA_FLOW_KEYS]) goto err2; mode = FLOW_MODE_MAP; if (tb[TCA_FLOW_MODE]) mode = nla_get_u32(tb[TCA_FLOW_MODE]); if (mode != FLOW_MODE_HASH && nkeys > 1) goto err2; if (tb[TCA_FLOW_PERTURB]) { if (mode != FLOW_MODE_HASH) goto err2; perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; } if (TC_H_MAJ(baseclass) == 0) baseclass = TC_H_MAKE(tp->q->handle, baseclass); if (TC_H_MIN(baseclass) == 0) baseclass = TC_H_MAKE(baseclass, 1); err = -ENOBUFS; f = kzalloc(sizeof(*f), GFP_KERNEL); if (f == NULL) goto err2; f->handle = handle; f->mask = ~0U; get_random_bytes(&f->hashrnd, 4); f->perturb_timer.function = flow_perturbation; f->perturb_timer.data = (unsigned long)f; init_timer_deferrable(&f->perturb_timer); } tcf_exts_change(tp, &f->exts, &e); tcf_em_tree_change(tp, &f->ematches, &t); tcf_tree_lock(tp); if (tb[TCA_FLOW_KEYS]) { f->keymask = keymask; f->nkeys = nkeys; } f->mode = mode; if (tb[TCA_FLOW_MASK]) f->mask = nla_get_u32(tb[TCA_FLOW_MASK]); if (tb[TCA_FLOW_XOR]) f->xor = nla_get_u32(tb[TCA_FLOW_XOR]); if (tb[TCA_FLOW_RSHIFT]) f->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]); if (tb[TCA_FLOW_ADDEND]) f->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]); if (tb[TCA_FLOW_DIVISOR]) f->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]); if (baseclass) f->baseclass = baseclass; f->perturb_period = perturb_period; del_timer(&f->perturb_timer); if (perturb_period) mod_timer(&f->perturb_timer, jiffies + perturb_period); if (*arg == 0) list_add_tail(&f->list, &head->filters); tcf_tree_unlock(tp); *arg = (unsigned long)f; return 0; err2: tcf_em_tree_destroy(tp, &t); err1: tcf_exts_destroy(tp, &e); return err; } static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f) { del_timer_sync(&f->perturb_timer); tcf_exts_destroy(tp, &f->exts); tcf_em_tree_destroy(tp, &f->ematches); kfree(f); } static int flow_delete(struct tcf_proto *tp, unsigned long arg) { struct flow_filter *f = (struct flow_filter *)arg; tcf_tree_lock(tp); list_del(&f->list); tcf_tree_unlock(tp); flow_destroy_filter(tp, f); return 0; } static int flow_init(struct tcf_proto *tp) { struct flow_head *head; head = kzalloc(sizeof(*head), GFP_KERNEL); if (head == NULL) return -ENOBUFS; INIT_LIST_HEAD(&head->filters); tp->root = head; return 0; } static void flow_destroy(struct tcf_proto *tp) { struct flow_head *head = tp->root; struct flow_filter *f, *next; list_for_each_entry_safe(f, next, &head->filters, list) { list_del(&f->list); flow_destroy_filter(tp, f); } kfree(head); } static unsigned long flow_get(struct tcf_proto *tp, u32 handle) { struct flow_head *head = tp->root; struct flow_filter *f; list_for_each_entry(f, &head->filters, list) if (f->handle == handle) return (unsigned long)f; return 0; } static void flow_put(struct tcf_proto *tp, unsigned long f) { } static int flow_dump(struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { struct flow_filter *f = (struct flow_filter *)fh; struct nlattr *nest; if (f == NULL) return skb->len; t->tcm_handle = f->handle; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) || nla_put_u32(skb, TCA_FLOW_MODE, f->mode)) goto nla_put_failure; if (f->mask != ~0 || f->xor != 0) { if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) || nla_put_u32(skb, TCA_FLOW_XOR, f->xor)) goto nla_put_failure; } if (f->rshift && nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift)) goto nla_put_failure; if (f->addend && nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend)) goto nla_put_failure; if (f->divisor && nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor)) goto nla_put_failure; if (f->baseclass && nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass)) goto nla_put_failure; if (f->perturb_period && nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ)) goto nla_put_failure; if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0) goto nla_put_failure; #ifdef CONFIG_NET_EMATCH if (f->ematches.hdr.nmatches && tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0) goto nla_put_failure; #endif nla_nest_end(skb, nest); if (tcf_exts_dump_stats(skb, &f->exts, &flow_ext_map) < 0) goto nla_put_failure; return skb->len; nla_put_failure: nlmsg_trim(skb, nest); return -1; } static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg) { struct flow_head *head = tp->root; struct flow_filter *f; list_for_each_entry(f, &head->filters, list) { if (arg->count < arg->skip) goto skip; if (arg->fn(tp, (unsigned long)f, arg) < 0) { arg->stop = 1; break; } skip: arg->count++; } } static struct tcf_proto_ops cls_flow_ops __read_mostly = { .kind = "flow", .classify = flow_classify, .init = flow_init, .destroy = flow_destroy, .change = flow_change, .delete = flow_delete, .get = flow_get, .put = flow_put, .dump = flow_dump, .walk = flow_walk, .owner = THIS_MODULE, }; static int __init cls_flow_init(void) { return register_tcf_proto_ops(&cls_flow_ops); } static void __exit cls_flow_exit(void) { unregister_tcf_proto_ops(&cls_flow_ops); } module_init(cls_flow_init); module_exit(cls_flow_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_DESCRIPTION("TC flow classifier");
gpl-2.0
Koloses/kernel_j5
drivers/net/Space.c
2301
5377
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Holds initial configuration information for devices. * * Version: @(#)Space.c 1.0.7 08/12/93 * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Donald J. Becker, <becker@scyld.com> * * Changelog: * Stephen Hemminger (09/2003) * - get rid of pre-linked dev list, dynamic device allocation * Paul Gortmaker (03/2002) * - struct init cleanup, enable multiple ISA autoprobes. * Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 09/1999 * - fix sbni: s/device/net_device/ * Paul Gortmaker (06/98): * - sort probes in a sane way, make sure all (safe) probes * get run once & failed autoprobes don't autoprobe again. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/netlink.h> /* A unified ethernet device probe. This is the easiest way to have every ethernet adaptor have the name "eth[0123...]". */ extern struct net_device *hp100_probe(int unit); extern struct net_device *ultra_probe(int unit); extern struct net_device *wd_probe(int unit); extern struct net_device *ne_probe(int unit); extern struct net_device *fmv18x_probe(int unit); extern struct net_device *i82596_probe(int unit); extern struct net_device *ni65_probe(int unit); extern struct net_device *sonic_probe(int unit); extern struct net_device *smc_init(int unit); extern struct net_device *atarilance_probe(int unit); extern struct net_device *sun3lance_probe(int unit); extern struct net_device *sun3_82586_probe(int unit); extern struct net_device *apne_probe(int unit); extern struct net_device *cs89x0_probe(int unit); extern struct net_device *mvme147lance_probe(int unit); extern struct net_device *tc515_probe(int unit); extern struct net_device *lance_probe(int unit); extern struct net_device *mac8390_probe(int unit); extern struct net_device *mac89x0_probe(int unit); extern struct net_device *cops_probe(int unit); extern struct net_device *ltpc_probe(void); /* Fibre Channel adapters */ extern int iph5526_probe(struct net_device *dev); /* SBNI adapters */ extern int sbni_probe(int unit); struct devprobe2 { struct net_device *(*probe)(int unit); int status; /* non-zero if autoprobe has failed */ }; static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe) { struct net_device *dev; for (; p->probe; p++) { if (autoprobe && p->status) continue; dev = p->probe(unit); if (!IS_ERR(dev)) return 0; if (autoprobe) p->status = PTR_ERR(dev); } return -ENODEV; } /* * ISA probes that touch addresses < 0x400 (including those that also * look for EISA/PCI cards in addition to ISA cards). */ static struct devprobe2 isa_probes[] __initdata = { #if defined(CONFIG_HP100) && defined(CONFIG_ISA) /* ISA, EISA */ {hp100_probe, 0}, #endif #ifdef CONFIG_3C515 {tc515_probe, 0}, #endif #ifdef CONFIG_ULTRA {ultra_probe, 0}, #endif #ifdef CONFIG_WD80x3 {wd_probe, 0}, #endif #if defined(CONFIG_NE2000) || \ defined(CONFIG_NE_H8300) /* ISA (use ne2k-pci for PCI cards) */ {ne_probe, 0}, #endif #ifdef CONFIG_LANCE /* ISA/VLB (use pcnet32 for PCI cards) */ {lance_probe, 0}, #endif #ifdef CONFIG_SMC9194 {smc_init, 0}, #endif #ifdef CONFIG_CS89x0 #ifndef CONFIG_CS89x0_PLATFORM {cs89x0_probe, 0}, #endif #endif #if defined(CONFIG_MVME16x_NET) || defined(CONFIG_BVME6000_NET) /* Intel I82596 */ {i82596_probe, 0}, #endif #ifdef CONFIG_NI65 {ni65_probe, 0}, #endif {NULL, 0}, }; static struct devprobe2 m68k_probes[] __initdata = { #ifdef CONFIG_ATARILANCE /* Lance-based Atari ethernet boards */ {atarilance_probe, 0}, #endif #ifdef CONFIG_SUN3LANCE /* sun3 onboard Lance chip */ {sun3lance_probe, 0}, #endif #ifdef CONFIG_SUN3_82586 /* sun3 onboard Intel 82586 chip */ {sun3_82586_probe, 0}, #endif #ifdef CONFIG_APNE /* A1200 PCMCIA NE2000 */ {apne_probe, 0}, #endif #ifdef CONFIG_MVME147_NET /* MVME147 internal Ethernet */ {mvme147lance_probe, 0}, #endif #ifdef CONFIG_MAC8390 /* NuBus NS8390-based cards */ {mac8390_probe, 0}, #endif #ifdef CONFIG_MAC89x0 {mac89x0_probe, 0}, #endif {NULL, 0}, }; /* * Unified ethernet device probe, segmented per architecture and * per bus interface. This drives the legacy devices only for now. */ static void __init ethif_probe2(int unit) { unsigned long base_addr = netdev_boot_base("eth", unit); if (base_addr == 1) return; (void)( probe_list2(unit, m68k_probes, base_addr == 0) && probe_list2(unit, isa_probes, base_addr == 0)); } /* Statically configured drivers -- order matters here. */ static int __init net_olddevs_init(void) { int num; #ifdef CONFIG_SBNI for (num = 0; num < 8; ++num) sbni_probe(num); #endif for (num = 0; num < 8; ++num) ethif_probe2(num); #ifdef CONFIG_COPS cops_probe(0); cops_probe(1); cops_probe(2); #endif #ifdef CONFIG_LTPC ltpc_probe(); #endif return 0; } device_initcall(net_olddevs_init);
gpl-2.0
linux4kix/linux-linaro-stable-mx6
drivers/scsi/in2000.c
2557
72895
/* * in2000.c - Linux device driver for the * Always IN2000 ISA SCSI card. * * Copyright (c) 1996 John Shifflett, GeoLog Consulting * john@geolog.com * jshiffle@netcom.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * For the avoidance of doubt the "preferred form" of this code is one which * is in an open non patent encumbered format. Where cryptographic key signing * forms part of the process of creating an executable the information * including keys needed to generate an equivalently functional executable * are deemed to be part of the source code. * * Drew Eckhardt's excellent 'Generic NCR5380' sources provided * much of the inspiration and some of the code for this driver. * The Linux IN2000 driver distributed in the Linux kernels through * version 1.2.13 was an extremely valuable reference on the arcane * (and still mysterious) workings of the IN2000's fifo. It also * is where I lifted in2000_biosparam(), the gist of the card * detection scheme, and other bits of code. Many thanks to the * talented and courageous people who wrote, contributed to, and * maintained that driver (including Brad McLean, Shaun Savage, * Bill Earnest, Larry Doolittle, Roger Sunshine, John Luckey, * Matt Postiff, Peter Lu, zerucha@shell.portal.com, and Eric * Youngdale). I should also mention the driver written by * Hamish Macdonald for the (GASP!) Amiga A2091 card, included * in the Linux-m68k distribution; it gave me a good initial * understanding of the proper way to run a WD33c93 chip, and I * ended up stealing lots of code from it. * * _This_ driver is (I feel) an improvement over the old one in * several respects: * - All problems relating to the data size of a SCSI request are * gone (as far as I know). The old driver couldn't handle * swapping to partitions because that involved 4k blocks, nor * could it deal with the st.c tape driver unmodified, because * that usually involved 4k - 32k blocks. The old driver never * quite got away from a morbid dependence on 2k block sizes - * which of course is the size of the card's fifo. * * - Target Disconnection/Reconnection is now supported. Any * system with more than one device active on the SCSI bus * will benefit from this. The driver defaults to what I'm * calling 'adaptive disconnect' - meaning that each command * is evaluated individually as to whether or not it should * be run with the option to disconnect/reselect (if the * device chooses), or as a "SCSI-bus-hog". * * - Synchronous data transfers are now supported. Because there * are a few devices (and many improperly terminated systems) * that choke when doing sync, the default is sync DISABLED * for all devices. This faster protocol can (and should!) * be enabled on selected devices via the command-line. * * - Runtime operating parameters can now be specified through * either the LILO or the 'insmod' command line. For LILO do: * "in2000=blah,blah,blah" * and with insmod go like: * "insmod /usr/src/linux/modules/in2000.o setup_strings=blah,blah" * The defaults should be good for most people. See the comment * for 'setup_strings' below for more details. * * - The old driver relied exclusively on what the Western Digital * docs call "Combination Level 2 Commands", which are a great * idea in that the CPU is relieved of a lot of interrupt * overhead. However, by accepting a certain (user-settable) * amount of additional interrupts, this driver achieves * better control over the SCSI bus, and data transfers are * almost as fast while being much easier to define, track, * and debug. * * - You can force detection of a card whose BIOS has been disabled. * * - Multiple IN2000 cards might almost be supported. I've tried to * keep it in mind, but have no way to test... * * * TODO: * tagged queuing. multiple cards. * * * NOTE: * When using this or any other SCSI driver as a module, you'll * find that with the stock kernel, at most _two_ SCSI hard * drives will be linked into the device list (ie, usable). * If your IN2000 card has more than 2 disks on its bus, you * might want to change the define of 'SD_EXTRA_DEVS' in the * 'hosts.h' file from 2 to whatever is appropriate. It took * me a while to track down this surprisingly obscure and * undocumented little "feature". * * * People with bug reports, wish-lists, complaints, comments, * or improvements are asked to pah-leeez email me (John Shifflett) * at john@geolog.com or jshiffle@netcom.com! I'm anxious to get * this thing into as good a shape as possible, and I'm positive * there are lots of lurking bugs and "Stupid Places". * * Updated for Linux 2.5 by Alan Cox <alan@lxorguk.ukuu.org.uk> * - Using new_eh handler * - Hopefully got all the locking right again * See "FIXME" notes for items that could do with more work */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/ioport.h> #include <linux/stat.h> #include <asm/io.h> #include "scsi.h" #include <scsi/scsi_host.h> #define IN2000_VERSION "1.33-2.5" #define IN2000_DATE "2002/11/03" #include "in2000.h" /* * 'setup_strings' is a single string used to pass operating parameters and * settings from the kernel/module command-line to the driver. 'setup_args[]' * is an array of strings that define the compile-time default values for * these settings. If Linux boots with a LILO or insmod command-line, those * settings are combined with 'setup_args[]'. Note that LILO command-lines * are prefixed with "in2000=" while insmod uses a "setup_strings=" prefix. * The driver recognizes the following keywords (lower case required) and * arguments: * * - ioport:addr -Where addr is IO address of a (usually ROM-less) card. * - noreset -No optional args. Prevents SCSI bus reset at boot time. * - nosync:x -x is a bitmask where the 1st 7 bits correspond with * the 7 possible SCSI devices (bit 0 for device #0, etc). * Set a bit to PREVENT sync negotiation on that device. * The driver default is sync DISABLED on all devices. * - period:ns -ns is the minimum # of nanoseconds in a SCSI data transfer * period. Default is 500; acceptable values are 250 - 1000. * - disconnect:x -x = 0 to never allow disconnects, 2 to always allow them. * x = 1 does 'adaptive' disconnects, which is the default * and generally the best choice. * - debug:x -If 'DEBUGGING_ON' is defined, x is a bitmask that causes * various types of debug output to printed - see the DB_xxx * defines in in2000.h * - proc:x -If 'PROC_INTERFACE' is defined, x is a bitmask that * determines how the /proc interface works and what it * does - see the PR_xxx defines in in2000.h * * Syntax Notes: * - Numeric arguments can be decimal or the '0x' form of hex notation. There * _must_ be a colon between a keyword and its numeric argument, with no * spaces. * - Keywords are separated by commas, no spaces, in the standard kernel * command-line manner. * - A keyword in the 'nth' comma-separated command-line member will overwrite * the 'nth' element of setup_args[]. A blank command-line member (in * other words, a comma with no preceding keyword) will _not_ overwrite * the corresponding setup_args[] element. * * A few LILO examples (for insmod, use 'setup_strings' instead of 'in2000'): * - in2000=ioport:0x220,noreset * - in2000=period:250,disconnect:2,nosync:0x03 * - in2000=debug:0x1e * - in2000=proc:3 */ /* Normally, no defaults are specified... */ static char *setup_args[] = { "", "", "", "", "", "", "", "", "" }; /* filled in by 'insmod' */ static char *setup_strings; module_param(setup_strings, charp, 0); static inline uchar read_3393(struct IN2000_hostdata *hostdata, uchar reg_num) { write1_io(reg_num, IO_WD_ADDR); return read1_io(IO_WD_DATA); } #define READ_AUX_STAT() read1_io(IO_WD_ASR) static inline void write_3393(struct IN2000_hostdata *hostdata, uchar reg_num, uchar value) { write1_io(reg_num, IO_WD_ADDR); write1_io(value, IO_WD_DATA); } static inline void write_3393_cmd(struct IN2000_hostdata *hostdata, uchar cmd) { /* while (READ_AUX_STAT() & ASR_CIP) printk("|");*/ write1_io(WD_COMMAND, IO_WD_ADDR); write1_io(cmd, IO_WD_DATA); } static uchar read_1_byte(struct IN2000_hostdata *hostdata) { uchar asr, x = 0; write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_3393_cmd(hostdata, WD_CMD_TRANS_INFO | 0x80); do { asr = READ_AUX_STAT(); if (asr & ASR_DBR) x = read_3393(hostdata, WD_DATA); } while (!(asr & ASR_INT)); return x; } static void write_3393_count(struct IN2000_hostdata *hostdata, unsigned long value) { write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR); write1_io((value >> 16), IO_WD_DATA); write1_io((value >> 8), IO_WD_DATA); write1_io(value, IO_WD_DATA); } static unsigned long read_3393_count(struct IN2000_hostdata *hostdata) { unsigned long value; write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR); value = read1_io(IO_WD_DATA) << 16; value |= read1_io(IO_WD_DATA) << 8; value |= read1_io(IO_WD_DATA); return value; } /* The 33c93 needs to be told which direction a command transfers its * data; we use this function to figure it out. Returns true if there * will be a DATA_OUT phase with this command, false otherwise. * (Thanks to Joerg Dorchain for the research and suggestion.) */ static int is_dir_out(Scsi_Cmnd * cmd) { switch (cmd->cmnd[0]) { case WRITE_6: case WRITE_10: case WRITE_12: case WRITE_LONG: case WRITE_SAME: case WRITE_BUFFER: case WRITE_VERIFY: case WRITE_VERIFY_12: case COMPARE: case COPY: case COPY_VERIFY: case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW: case SEARCH_EQUAL_12: case SEARCH_HIGH_12: case SEARCH_LOW_12: case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE: case MODE_SELECT: case MODE_SELECT_10: case LOG_SELECT: case SEND_DIAGNOSTIC: case CHANGE_DEFINITION: case UPDATE_BLOCK: case SET_WINDOW: case MEDIUM_SCAN: case SEND_VOLUME_TAG: case 0xea: return 1; default: return 0; } } static struct sx_period sx_table[] = { {1, 0x20}, {252, 0x20}, {376, 0x30}, {500, 0x40}, {624, 0x50}, {752, 0x60}, {876, 0x70}, {1000, 0x00}, {0, 0} }; static int round_period(unsigned int period) { int x; for (x = 1; sx_table[x].period_ns; x++) { if ((period <= sx_table[x - 0].period_ns) && (period > sx_table[x - 1].period_ns)) { return x; } } return 7; } static uchar calc_sync_xfer(unsigned int period, unsigned int offset) { uchar result; period *= 4; /* convert SDTR code to ns */ result = sx_table[round_period(period)].reg_value; result |= (offset < OPTIMUM_SX_OFF) ? offset : OPTIMUM_SX_OFF; return result; } static void in2000_execute(struct Scsi_Host *instance); static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) { struct Scsi_Host *instance; struct IN2000_hostdata *hostdata; Scsi_Cmnd *tmp; instance = cmd->device->host; hostdata = (struct IN2000_hostdata *) instance->hostdata; DB(DB_QUEUE_COMMAND, scmd_printk(KERN_DEBUG, cmd, "Q-%02x(", cmd->cmnd[0])) /* Set up a few fields in the Scsi_Cmnd structure for our own use: * - host_scribble is the pointer to the next cmd in the input queue * - scsi_done points to the routine we call when a cmd is finished * - result is what you'd expect */ cmd->host_scribble = NULL; cmd->scsi_done = done; cmd->result = 0; /* We use the Scsi_Pointer structure that's included with each command * as a scratchpad (as it's intended to be used!). The handy thing about * the SCp.xxx fields is that they're always associated with a given * cmd, and are preserved across disconnect-reselect. This means we * can pretty much ignore SAVE_POINTERS and RESTORE_POINTERS messages * if we keep all the critical pointers and counters in SCp: * - SCp.ptr is the pointer into the RAM buffer * - SCp.this_residual is the size of that buffer * - SCp.buffer points to the current scatter-gather buffer * - SCp.buffers_residual tells us how many S.G. buffers there are * - SCp.have_data_in helps keep track of >2048 byte transfers * - SCp.sent_command is not used * - SCp.phase records this command's SRCID_ER bit setting */ if (scsi_bufflen(cmd)) { cmd->SCp.buffer = scsi_sglist(cmd); cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); cmd->SCp.this_residual = cmd->SCp.buffer->length; } else { cmd->SCp.buffer = NULL; cmd->SCp.buffers_residual = 0; cmd->SCp.ptr = NULL; cmd->SCp.this_residual = 0; } cmd->SCp.have_data_in = 0; /* We don't set SCp.phase here - that's done in in2000_execute() */ /* WD docs state that at the conclusion of a "LEVEL2" command, the * status byte can be retrieved from the LUN register. Apparently, * this is the case only for *uninterrupted* LEVEL2 commands! If * there are any unexpected phases entered, even if they are 100% * legal (different devices may choose to do things differently), * the LEVEL2 command sequence is exited. This often occurs prior * to receiving the status byte, in which case the driver does a * status phase interrupt and gets the status byte on its own. * While such a command can then be "resumed" (ie restarted to * finish up as a LEVEL2 command), the LUN register will NOT be * a valid status byte at the command's conclusion, and we must * use the byte obtained during the earlier interrupt. Here, we * preset SCp.Status to an illegal value (0xff) so that when * this command finally completes, we can tell where the actual * status byte is stored. */ cmd->SCp.Status = ILLEGAL_STATUS_BYTE; /* We need to disable interrupts before messing with the input * queue and calling in2000_execute(). */ /* * Add the cmd to the end of 'input_Q'. Note that REQUEST_SENSE * commands are added to the head of the queue so that the desired * sense data is not lost before REQUEST_SENSE executes. */ if (!(hostdata->input_Q) || (cmd->cmnd[0] == REQUEST_SENSE)) { cmd->host_scribble = (uchar *) hostdata->input_Q; hostdata->input_Q = cmd; } else { /* find the end of the queue */ for (tmp = (Scsi_Cmnd *) hostdata->input_Q; tmp->host_scribble; tmp = (Scsi_Cmnd *) tmp->host_scribble); tmp->host_scribble = (uchar *) cmd; } /* We know that there's at least one command in 'input_Q' now. * Go see if any of them are runnable! */ in2000_execute(cmd->device->host); DB(DB_QUEUE_COMMAND, printk(")Q ")) return 0; } static DEF_SCSI_QCMD(in2000_queuecommand) /* * This routine attempts to start a scsi command. If the host_card is * already connected, we give up immediately. Otherwise, look through * the input_Q, using the first command we find that's intended * for a currently non-busy target/lun. * Note that this function is always called with interrupts already * disabled (either from in2000_queuecommand() or in2000_intr()). */ static void in2000_execute(struct Scsi_Host *instance) { struct IN2000_hostdata *hostdata; Scsi_Cmnd *cmd, *prev; int i; unsigned short *sp; unsigned short f; unsigned short flushbuf[16]; hostdata = (struct IN2000_hostdata *) instance->hostdata; DB(DB_EXECUTE, printk("EX(")) if (hostdata->selecting || hostdata->connected) { DB(DB_EXECUTE, printk(")EX-0 ")) return; } /* * Search through the input_Q for a command destined * for an idle target/lun. */ cmd = (Scsi_Cmnd *) hostdata->input_Q; prev = NULL; while (cmd) { if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun))) break; prev = cmd; cmd = (Scsi_Cmnd *) cmd->host_scribble; } /* quit if queue empty or all possible targets are busy */ if (!cmd) { DB(DB_EXECUTE, printk(")EX-1 ")) return; } /* remove command from queue */ if (prev) prev->host_scribble = cmd->host_scribble; else hostdata->input_Q = (Scsi_Cmnd *) cmd->host_scribble; #ifdef PROC_STATISTICS hostdata->cmd_cnt[cmd->device->id]++; #endif /* * Start the selection process */ if (is_dir_out(cmd)) write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id); else write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD); /* Now we need to figure out whether or not this command is a good * candidate for disconnect/reselect. We guess to the best of our * ability, based on a set of hierarchical rules. When several * devices are operating simultaneously, disconnects are usually * an advantage. In a single device system, or if only 1 device * is being accessed, transfers usually go faster if disconnects * are not allowed: * * + Commands should NEVER disconnect if hostdata->disconnect = * DIS_NEVER (this holds for tape drives also), and ALWAYS * disconnect if hostdata->disconnect = DIS_ALWAYS. * + Tape drive commands should always be allowed to disconnect. * + Disconnect should be allowed if disconnected_Q isn't empty. * + Commands should NOT disconnect if input_Q is empty. * + Disconnect should be allowed if there are commands in input_Q * for a different target/lun. In this case, the other commands * should be made disconnect-able, if not already. * * I know, I know - this code would flunk me out of any * "C Programming 101" class ever offered. But it's easy * to change around and experiment with for now. */ cmd->SCp.phase = 0; /* assume no disconnect */ if (hostdata->disconnect == DIS_NEVER) goto no; if (hostdata->disconnect == DIS_ALWAYS) goto yes; if (cmd->device->type == 1) /* tape drive? */ goto yes; if (hostdata->disconnected_Q) /* other commands disconnected? */ goto yes; if (!(hostdata->input_Q)) /* input_Q empty? */ goto no; for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble) { if ((prev->device->id != cmd->device->id) || (prev->device->lun != cmd->device->lun)) { for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble) prev->SCp.phase = 1; goto yes; } } goto no; yes: cmd->SCp.phase = 1; #ifdef PROC_STATISTICS hostdata->disc_allowed_cnt[cmd->device->id]++; #endif no: write_3393(hostdata, WD_SOURCE_ID, ((cmd->SCp.phase) ? SRCID_ER : 0)); write_3393(hostdata, WD_TARGET_LUN, cmd->device->lun); write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]); hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); if ((hostdata->level2 <= L2_NONE) || (hostdata->sync_stat[cmd->device->id] == SS_UNSET)) { /* * Do a 'Select-With-ATN' command. This will end with * one of the following interrupts: * CSR_RESEL_AM: failure - can try again later. * CSR_TIMEOUT: failure - give up. * CSR_SELECT: success - proceed. */ hostdata->selecting = cmd; /* Every target has its own synchronous transfer setting, kept in * the sync_xfer array, and a corresponding status byte in sync_stat[]. * Each target's sync_stat[] entry is initialized to SS_UNSET, and its * sync_xfer[] entry is initialized to the default/safe value. SS_UNSET * means that the parameters are undetermined as yet, and that we * need to send an SDTR message to this device after selection is * complete. We set SS_FIRST to tell the interrupt routine to do so, * unless we don't want to even _try_ synchronous transfers: In this * case we set SS_SET to make the defaults final. */ if (hostdata->sync_stat[cmd->device->id] == SS_UNSET) { if (hostdata->sync_off & (1 << cmd->device->id)) hostdata->sync_stat[cmd->device->id] = SS_SET; else hostdata->sync_stat[cmd->device->id] = SS_FIRST; } hostdata->state = S_SELECTING; write_3393_count(hostdata, 0); /* this guarantees a DATA_PHASE interrupt */ write_3393_cmd(hostdata, WD_CMD_SEL_ATN); } else { /* * Do a 'Select-With-ATN-Xfer' command. This will end with * one of the following interrupts: * CSR_RESEL_AM: failure - can try again later. * CSR_TIMEOUT: failure - give up. * anything else: success - proceed. */ hostdata->connected = cmd; write_3393(hostdata, WD_COMMAND_PHASE, 0); /* copy command_descriptor_block into WD chip * (take advantage of auto-incrementing) */ write1_io(WD_CDB_1, IO_WD_ADDR); for (i = 0; i < cmd->cmd_len; i++) write1_io(cmd->cmnd[i], IO_WD_DATA); /* The wd33c93 only knows about Group 0, 1, and 5 commands when * it's doing a 'select-and-transfer'. To be safe, we write the * size of the CDB into the OWN_ID register for every case. This * way there won't be problems with vendor-unique, audio, etc. */ write_3393(hostdata, WD_OWN_ID, cmd->cmd_len); /* When doing a non-disconnect command, we can save ourselves a DATA * phase interrupt later by setting everything up now. With writes we * need to pre-fill the fifo; if there's room for the 32 flush bytes, * put them in there too - that'll avoid a fifo interrupt. Reads are * somewhat simpler. * KLUDGE NOTE: It seems that you can't completely fill the fifo here: * This results in the IO_FIFO_COUNT register rolling over to zero, * and apparently the gate array logic sees this as empty, not full, * so the 3393 chip is never signalled to start reading from the * fifo. Or maybe it's seen as a permanent fifo interrupt condition. * Regardless, we fix this by temporarily pretending that the fifo * is 16 bytes smaller. (I see now that the old driver has a comment * about "don't fill completely" in an analogous place - must be the * same deal.) This results in CDROM, swap partitions, and tape drives * needing an extra interrupt per write command - I think we can live * with that! */ if (!(cmd->SCp.phase)) { write_3393_count(hostdata, cmd->SCp.this_residual); write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS); write1_io(0, IO_FIFO_WRITE); /* clear fifo counter, write mode */ if (is_dir_out(cmd)) { hostdata->fifo = FI_FIFO_WRITING; if ((i = cmd->SCp.this_residual) > (IN2000_FIFO_SIZE - 16)) i = IN2000_FIFO_SIZE - 16; cmd->SCp.have_data_in = i; /* this much data in fifo */ i >>= 1; /* Gulp. Assuming modulo 2. */ sp = (unsigned short *) cmd->SCp.ptr; f = hostdata->io_base + IO_FIFO; #ifdef FAST_WRITE_IO FAST_WRITE2_IO(); #else while (i--) write2_io(*sp++, IO_FIFO); #endif /* Is there room for the flush bytes? */ if (cmd->SCp.have_data_in <= ((IN2000_FIFO_SIZE - 16) - 32)) { sp = flushbuf; i = 16; #ifdef FAST_WRITE_IO FAST_WRITE2_IO(); #else while (i--) write2_io(0, IO_FIFO); #endif } } else { write1_io(0, IO_FIFO_READ); /* put fifo in read mode */ hostdata->fifo = FI_FIFO_READING; cmd->SCp.have_data_in = 0; /* nothing transferred yet */ } } else { write_3393_count(hostdata, 0); /* this guarantees a DATA_PHASE interrupt */ } hostdata->state = S_RUNNING_LEVEL2; write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); } /* * Since the SCSI bus can handle only 1 connection at a time, * we get out of here now. If the selection fails, or when * the command disconnects, we'll come back to this routine * to search the input_Q again... */ DB(DB_EXECUTE, printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : "")) } static void transfer_pio(uchar * buf, int cnt, int data_in_dir, struct IN2000_hostdata *hostdata) { uchar asr; DB(DB_TRANSFER, printk("(%p,%d,%s)", buf, cnt, data_in_dir ? "in" : "out")) write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_3393_count(hostdata, cnt); write_3393_cmd(hostdata, WD_CMD_TRANS_INFO); if (data_in_dir) { do { asr = READ_AUX_STAT(); if (asr & ASR_DBR) *buf++ = read_3393(hostdata, WD_DATA); } while (!(asr & ASR_INT)); } else { do { asr = READ_AUX_STAT(); if (asr & ASR_DBR) write_3393(hostdata, WD_DATA, *buf++); } while (!(asr & ASR_INT)); } /* Note: we are returning with the interrupt UN-cleared. * Since (presumably) an entire I/O operation has * completed, the bus phase is probably different, and * the interrupt routine will discover this when it * responds to the uncleared int. */ } static void transfer_bytes(Scsi_Cmnd * cmd, int data_in_dir) { struct IN2000_hostdata *hostdata; unsigned short *sp; unsigned short f; int i; hostdata = (struct IN2000_hostdata *) cmd->device->host->hostdata; /* Normally, you'd expect 'this_residual' to be non-zero here. * In a series of scatter-gather transfers, however, this * routine will usually be called with 'this_residual' equal * to 0 and 'buffers_residual' non-zero. This means that a * previous transfer completed, clearing 'this_residual', and * now we need to setup the next scatter-gather buffer as the * source or destination for THIS transfer. */ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { ++cmd->SCp.buffer; --cmd->SCp.buffers_residual; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); } /* Set up hardware registers */ write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]); write_3393_count(hostdata, cmd->SCp.this_residual); write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS); write1_io(0, IO_FIFO_WRITE); /* zero counter, assume write */ /* Reading is easy. Just issue the command and return - we'll * get an interrupt later when we have actual data to worry about. */ if (data_in_dir) { write1_io(0, IO_FIFO_READ); if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) { write_3393(hostdata, WD_COMMAND_PHASE, 0x45); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else write_3393_cmd(hostdata, WD_CMD_TRANS_INFO); hostdata->fifo = FI_FIFO_READING; cmd->SCp.have_data_in = 0; return; } /* Writing is more involved - we'll start the WD chip and write as * much data to the fifo as we can right now. Later interrupts will * write any bytes that don't make it at this stage. */ if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) { write_3393(hostdata, WD_COMMAND_PHASE, 0x45); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else write_3393_cmd(hostdata, WD_CMD_TRANS_INFO); hostdata->fifo = FI_FIFO_WRITING; sp = (unsigned short *) cmd->SCp.ptr; if ((i = cmd->SCp.this_residual) > IN2000_FIFO_SIZE) i = IN2000_FIFO_SIZE; cmd->SCp.have_data_in = i; i >>= 1; /* Gulp. We assume this_residual is modulo 2 */ f = hostdata->io_base + IO_FIFO; #ifdef FAST_WRITE_IO FAST_WRITE2_IO(); #else while (i--) write2_io(*sp++, IO_FIFO); #endif } /* We need to use spin_lock_irqsave() & spin_unlock_irqrestore() in this * function in order to work in an SMP environment. (I'd be surprised * if the driver is ever used by anyone on a real multi-CPU motherboard, * but it _does_ need to be able to compile and run in an SMP kernel.) */ static irqreturn_t in2000_intr(int irqnum, void *dev_id) { struct Scsi_Host *instance = dev_id; struct IN2000_hostdata *hostdata; Scsi_Cmnd *patch, *cmd; uchar asr, sr, phs, id, lun, *ucp, msg; int i, j; unsigned long length; unsigned short *sp; unsigned short f; unsigned long flags; hostdata = (struct IN2000_hostdata *) instance->hostdata; /* Get the spin_lock and disable further ints, for SMP */ spin_lock_irqsave(instance->host_lock, flags); #ifdef PROC_STATISTICS hostdata->int_cnt++; #endif /* The IN2000 card has 2 interrupt sources OR'ed onto its IRQ line - the * WD3393 chip and the 2k fifo (which is actually a dual-port RAM combined * with a big logic array, so it's a little different than what you might * expect). As far as I know, there's no reason that BOTH can't be active * at the same time, but there's a problem: while we can read the 3393 * to tell if _it_ wants an interrupt, I don't know of a way to ask the * fifo the same question. The best we can do is check the 3393 and if * it _isn't_ the source of the interrupt, then we can be pretty sure * that the fifo is the culprit. * UPDATE: I have it on good authority (Bill Earnest) that bit 0 of the * IO_FIFO_COUNT register mirrors the fifo interrupt state. I * assume that bit clear means interrupt active. As it turns * out, the driver really doesn't need to check for this after * all, so my remarks above about a 'problem' can safely be * ignored. The way the logic is set up, there's no advantage * (that I can see) to worrying about it. * * It seems that the fifo interrupt signal is negated when we extract * bytes during read or write bytes during write. * - fifo will interrupt when data is moving from it to the 3393, and * there are 31 (or less?) bytes left to go. This is sort of short- * sighted: what if you don't WANT to do more? In any case, our * response is to push more into the fifo - either actual data or * dummy bytes if need be. Note that we apparently have to write at * least 32 additional bytes to the fifo after an interrupt in order * to get it to release the ones it was holding on to - writing fewer * than 32 will result in another fifo int. * UPDATE: Again, info from Bill Earnest makes this more understandable: * 32 bytes = two counts of the fifo counter register. He tells * me that the fifo interrupt is a non-latching signal derived * from a straightforward boolean interpretation of the 7 * highest bits of the fifo counter and the fifo-read/fifo-write * state. Who'd a thought? */ write1_io(0, IO_LED_ON); asr = READ_AUX_STAT(); if (!(asr & ASR_INT)) { /* no WD33c93 interrupt? */ /* Ok. This is definitely a FIFO-only interrupt. * * If FI_FIFO_READING is set, there are up to 2048 bytes waiting to be read, * maybe more to come from the SCSI bus. Read as many as we can out of the * fifo and into memory at the location of SCp.ptr[SCp.have_data_in], and * update have_data_in afterwards. * * If we have FI_FIFO_WRITING, the FIFO has almost run out of bytes to move * into the WD3393 chip (I think the interrupt happens when there are 31 * bytes left, but it may be fewer...). The 3393 is still waiting, so we * shove some more into the fifo, which gets things moving again. If the * original SCSI command specified more than 2048 bytes, there may still * be some of that data left: fine - use it (from SCp.ptr[SCp.have_data_in]). * Don't forget to update have_data_in. If we've already written out the * entire buffer, feed 32 dummy bytes to the fifo - they're needed to * push out the remaining real data. * (Big thanks to Bill Earnest for getting me out of the mud in here.) */ cmd = (Scsi_Cmnd *) hostdata->connected; /* assume we're connected */ CHECK_NULL(cmd, "fifo_int") if (hostdata->fifo == FI_FIFO_READING) { DB(DB_FIFO, printk("{R:%02x} ", read1_io(IO_FIFO_COUNT))) sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in); i = read1_io(IO_FIFO_COUNT) & 0xfe; i <<= 2; /* # of words waiting in the fifo */ f = hostdata->io_base + IO_FIFO; #ifdef FAST_READ_IO FAST_READ2_IO(); #else while (i--) *sp++ = read2_io(IO_FIFO); #endif i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in); i <<= 1; cmd->SCp.have_data_in += i; } else if (hostdata->fifo == FI_FIFO_WRITING) { DB(DB_FIFO, printk("{W:%02x} ", read1_io(IO_FIFO_COUNT))) /* If all bytes have been written to the fifo, flush out the stragglers. * Note that while writing 16 dummy words seems arbitrary, we don't * have another choice that I can see. What we really want is to read * the 3393 transfer count register (that would tell us how many bytes * needed flushing), but the TRANSFER_INFO command hasn't completed * yet (not enough bytes!) and that register won't be accessible. So, * we use 16 words - a number obtained through trial and error. * UPDATE: Bill says this is exactly what Always does, so there. * More thanks due him for help in this section. */ if (cmd->SCp.this_residual == cmd->SCp.have_data_in) { i = 16; while (i--) /* write 32 dummy bytes */ write2_io(0, IO_FIFO); } /* If there are still bytes left in the SCSI buffer, write as many as we * can out to the fifo. */ else { sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in); i = cmd->SCp.this_residual - cmd->SCp.have_data_in; /* bytes yet to go */ j = read1_io(IO_FIFO_COUNT) & 0xfe; j <<= 2; /* how many words the fifo has room for */ if ((j << 1) > i) j = (i >> 1); while (j--) write2_io(*sp++, IO_FIFO); i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in); i <<= 1; cmd->SCp.have_data_in += i; } } else { printk("*** Spurious FIFO interrupt ***"); } write1_io(0, IO_LED_OFF); /* release the SMP spin_lock and restore irq state */ spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } /* This interrupt was triggered by the WD33c93 chip. The fifo interrupt * may also be asserted, but we don't bother to check it: we get more * detailed info from FIFO_READING and FIFO_WRITING (see below). */ cmd = (Scsi_Cmnd *) hostdata->connected; /* assume we're connected */ sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear the interrupt */ phs = read_3393(hostdata, WD_COMMAND_PHASE); if (!cmd && (sr != CSR_RESEL_AM && sr != CSR_TIMEOUT && sr != CSR_SELECT)) { printk("\nNR:wd-intr-1\n"); write1_io(0, IO_LED_OFF); /* release the SMP spin_lock and restore irq state */ spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } DB(DB_INTR, printk("{%02x:%02x-", asr, sr)) /* After starting a FIFO-based transfer, the next _WD3393_ interrupt is * guaranteed to be in response to the completion of the transfer. * If we were reading, there's probably data in the fifo that needs * to be copied into RAM - do that here. Also, we have to update * 'this_residual' and 'ptr' based on the contents of the * TRANSFER_COUNT register, in case the device decided to do an * intermediate disconnect (a device may do this if it has to * do a seek, or just to be nice and let other devices have * some bus time during long transfers). * After doing whatever is necessary with the fifo, we go on and * service the WD3393 interrupt normally. */ if (hostdata->fifo == FI_FIFO_READING) { /* buffer index = start-of-buffer + #-of-bytes-already-read */ sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in); /* bytes remaining in fifo = (total-wanted - #-not-got) - #-already-read */ i = (cmd->SCp.this_residual - read_3393_count(hostdata)) - cmd->SCp.have_data_in; i >>= 1; /* Gulp. We assume this will always be modulo 2 */ f = hostdata->io_base + IO_FIFO; #ifdef FAST_READ_IO FAST_READ2_IO(); #else while (i--) *sp++ = read2_io(IO_FIFO); #endif hostdata->fifo = FI_FIFO_UNUSED; length = cmd->SCp.this_residual; cmd->SCp.this_residual = read_3393_count(hostdata); cmd->SCp.ptr += (length - cmd->SCp.this_residual); DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual)) } else if (hostdata->fifo == FI_FIFO_WRITING) { hostdata->fifo = FI_FIFO_UNUSED; length = cmd->SCp.this_residual; cmd->SCp.this_residual = read_3393_count(hostdata); cmd->SCp.ptr += (length - cmd->SCp.this_residual); DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual)) } /* Respond to the specific WD3393 interrupt - there are quite a few! */ switch (sr) { case CSR_TIMEOUT: DB(DB_INTR, printk("TIMEOUT")) if (hostdata->state == S_RUNNING_LEVEL2) hostdata->connected = NULL; else { cmd = (Scsi_Cmnd *) hostdata->selecting; /* get a valid cmd */ CHECK_NULL(cmd, "csr_timeout") hostdata->selecting = NULL; } cmd->result = DID_NO_CONNECT << 16; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; cmd->scsi_done(cmd); /* We are not connected to a target - check to see if there * are commands waiting to be executed. */ in2000_execute(instance); break; /* Note: this interrupt should not occur in a LEVEL2 command */ case CSR_SELECT: DB(DB_INTR, printk("SELECT")) hostdata->connected = cmd = (Scsi_Cmnd *) hostdata->selecting; CHECK_NULL(cmd, "csr_select") hostdata->selecting = NULL; /* construct an IDENTIFY message with correct disconnect bit */ hostdata->outgoing_msg[0] = (0x80 | 0x00 | cmd->device->lun); if (cmd->SCp.phase) hostdata->outgoing_msg[0] |= 0x40; if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) { #ifdef SYNC_DEBUG printk(" sending SDTR "); #endif hostdata->sync_stat[cmd->device->id] = SS_WAITING; /* tack on a 2nd message to ask about synchronous transfers */ hostdata->outgoing_msg[1] = EXTENDED_MESSAGE; hostdata->outgoing_msg[2] = 3; hostdata->outgoing_msg[3] = EXTENDED_SDTR; hostdata->outgoing_msg[4] = OPTIMUM_SX_PER / 4; hostdata->outgoing_msg[5] = OPTIMUM_SX_OFF; hostdata->outgoing_len = 6; } else hostdata->outgoing_len = 1; hostdata->state = S_CONNECTED; break; case CSR_XFER_DONE | PHS_DATA_IN: case CSR_UNEXP | PHS_DATA_IN: case CSR_SRV_REQ | PHS_DATA_IN: DB(DB_INTR, printk("IN-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual)) transfer_bytes(cmd, DATA_IN_DIR); if (hostdata->state != S_RUNNING_LEVEL2) hostdata->state = S_CONNECTED; break; case CSR_XFER_DONE | PHS_DATA_OUT: case CSR_UNEXP | PHS_DATA_OUT: case CSR_SRV_REQ | PHS_DATA_OUT: DB(DB_INTR, printk("OUT-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual)) transfer_bytes(cmd, DATA_OUT_DIR); if (hostdata->state != S_RUNNING_LEVEL2) hostdata->state = S_CONNECTED; break; /* Note: this interrupt should not occur in a LEVEL2 command */ case CSR_XFER_DONE | PHS_COMMAND: case CSR_UNEXP | PHS_COMMAND: case CSR_SRV_REQ | PHS_COMMAND: DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0])) transfer_pio(cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata); hostdata->state = S_CONNECTED; break; case CSR_XFER_DONE | PHS_STATUS: case CSR_UNEXP | PHS_STATUS: case CSR_SRV_REQ | PHS_STATUS: DB(DB_INTR, printk("STATUS=")) cmd->SCp.Status = read_1_byte(hostdata); DB(DB_INTR, printk("%02x", cmd->SCp.Status)) if (hostdata->level2 >= L2_BASIC) { sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */ hostdata->state = S_RUNNING_LEVEL2; write_3393(hostdata, WD_COMMAND_PHASE, 0x50); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); } else { hostdata->state = S_CONNECTED; } break; case CSR_XFER_DONE | PHS_MESS_IN: case CSR_UNEXP | PHS_MESS_IN: case CSR_SRV_REQ | PHS_MESS_IN: DB(DB_INTR, printk("MSG_IN=")) msg = read_1_byte(hostdata); sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */ hostdata->incoming_msg[hostdata->incoming_ptr] = msg; if (hostdata->incoming_msg[0] == EXTENDED_MESSAGE) msg = EXTENDED_MESSAGE; else hostdata->incoming_ptr = 0; cmd->SCp.Message = msg; switch (msg) { case COMMAND_COMPLETE: DB(DB_INTR, printk("CCMP")) write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_PRE_CMP_DISC; break; case SAVE_POINTERS: DB(DB_INTR, printk("SDP")) write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; case RESTORE_POINTERS: DB(DB_INTR, printk("RDP")) if (hostdata->level2 >= L2_BASIC) { write_3393(hostdata, WD_COMMAND_PHASE, 0x45); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else { write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; } break; case DISCONNECT: DB(DB_INTR, printk("DIS")) cmd->device->disconnect = 1; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_PRE_TMP_DISC; break; case MESSAGE_REJECT: DB(DB_INTR, printk("REJ")) #ifdef SYNC_DEBUG printk("-REJ-"); #endif if (hostdata->sync_stat[cmd->device->id] == SS_WAITING) hostdata->sync_stat[cmd->device->id] = SS_SET; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; case EXTENDED_MESSAGE: DB(DB_INTR, printk("EXT")) ucp = hostdata->incoming_msg; #ifdef SYNC_DEBUG printk("%02x", ucp[hostdata->incoming_ptr]); #endif /* Is this the last byte of the extended message? */ if ((hostdata->incoming_ptr >= 2) && (hostdata->incoming_ptr == (ucp[1] + 1))) { switch (ucp[2]) { /* what's the EXTENDED code? */ case EXTENDED_SDTR: id = calc_sync_xfer(ucp[3], ucp[4]); if (hostdata->sync_stat[cmd->device->id] != SS_WAITING) { /* A device has sent an unsolicited SDTR message; rather than go * through the effort of decoding it and then figuring out what * our reply should be, we're just gonna say that we have a * synchronous fifo depth of 0. This will result in asynchronous * transfers - not ideal but so much easier. * Actually, this is OK because it assures us that if we don't * specifically ask for sync transfers, we won't do any. */ write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ hostdata->outgoing_msg[0] = EXTENDED_MESSAGE; hostdata->outgoing_msg[1] = 3; hostdata->outgoing_msg[2] = EXTENDED_SDTR; hostdata->outgoing_msg[3] = hostdata->default_sx_per / 4; hostdata->outgoing_msg[4] = 0; hostdata->outgoing_len = 5; hostdata->sync_xfer[cmd->device->id] = calc_sync_xfer(hostdata->default_sx_per / 4, 0); } else { hostdata->sync_xfer[cmd->device->id] = id; } #ifdef SYNC_DEBUG printk("sync_xfer=%02x", hostdata->sync_xfer[cmd->device->id]); #endif hostdata->sync_stat[cmd->device->id] = SS_SET; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; case EXTENDED_WDTR: write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ printk("sending WDTR "); hostdata->outgoing_msg[0] = EXTENDED_MESSAGE; hostdata->outgoing_msg[1] = 2; hostdata->outgoing_msg[2] = EXTENDED_WDTR; hostdata->outgoing_msg[3] = 0; /* 8 bit transfer width */ hostdata->outgoing_len = 4; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; default: write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ printk("Rejecting Unknown Extended Message(%02x). ", ucp[2]); hostdata->outgoing_msg[0] = MESSAGE_REJECT; hostdata->outgoing_len = 1; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; } hostdata->incoming_ptr = 0; } /* We need to read more MESS_IN bytes for the extended message */ else { hostdata->incoming_ptr++; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; } break; default: printk("Rejecting Unknown Message(%02x) ", msg); write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ hostdata->outgoing_msg[0] = MESSAGE_REJECT; hostdata->outgoing_len = 1; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; } break; /* Note: this interrupt will occur only after a LEVEL2 command */ case CSR_SEL_XFER_DONE: /* Make sure that reselection is enabled at this point - it may * have been turned off for the command that just completed. */ write_3393(hostdata, WD_SOURCE_ID, SRCID_ER); if (phs == 0x60) { DB(DB_INTR, printk("SX-DONE")) cmd->SCp.Message = COMMAND_COMPLETE; lun = read_3393(hostdata, WD_TARGET_LUN); DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun)) hostdata->connected = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE) cmd->SCp.Status = lun; if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); else cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); cmd->scsi_done(cmd); /* We are no longer connected to a target - check to see if * there are commands waiting to be executed. */ in2000_execute(instance); } else { printk("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---", asr, sr, phs); } break; /* Note: this interrupt will occur only after a LEVEL2 command */ case CSR_SDP: DB(DB_INTR, printk("SDP")) hostdata->state = S_RUNNING_LEVEL2; write_3393(hostdata, WD_COMMAND_PHASE, 0x41); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); break; case CSR_XFER_DONE | PHS_MESS_OUT: case CSR_UNEXP | PHS_MESS_OUT: case CSR_SRV_REQ | PHS_MESS_OUT: DB(DB_INTR, printk("MSG_OUT=")) /* To get here, we've probably requested MESSAGE_OUT and have * already put the correct bytes in outgoing_msg[] and filled * in outgoing_len. We simply send them out to the SCSI bus. * Sometimes we get MESSAGE_OUT phase when we're not expecting * it - like when our SDTR message is rejected by a target. Some * targets send the REJECT before receiving all of the extended * message, and then seem to go back to MESSAGE_OUT for a byte * or two. Not sure why, or if I'm doing something wrong to * cause this to happen. Regardless, it seems that sending * NOP messages in these situations results in no harm and * makes everyone happy. */ if (hostdata->outgoing_len == 0) { hostdata->outgoing_len = 1; hostdata->outgoing_msg[0] = NOP; } transfer_pio(hostdata->outgoing_msg, hostdata->outgoing_len, DATA_OUT_DIR, hostdata); DB(DB_INTR, printk("%02x", hostdata->outgoing_msg[0])) hostdata->outgoing_len = 0; hostdata->state = S_CONNECTED; break; case CSR_UNEXP_DISC: /* I think I've seen this after a request-sense that was in response * to an error condition, but not sure. We certainly need to do * something when we get this interrupt - the question is 'what?'. * Let's think positively, and assume some command has finished * in a legal manner (like a command that provokes a request-sense), * so we treat it as a normal command-complete-disconnect. */ /* Make sure that reselection is enabled at this point - it may * have been turned off for the command that just completed. */ write_3393(hostdata, WD_SOURCE_ID, SRCID_ER); if (cmd == NULL) { printk(" - Already disconnected! "); hostdata->state = S_UNCONNECTED; /* release the SMP spin_lock and restore irq state */ spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } DB(DB_INTR, printk("UNEXP_DISC")) hostdata->connected = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); else cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); cmd->scsi_done(cmd); /* We are no longer connected to a target - check to see if * there are commands waiting to be executed. */ in2000_execute(instance); break; case CSR_DISC: /* Make sure that reselection is enabled at this point - it may * have been turned off for the command that just completed. */ write_3393(hostdata, WD_SOURCE_ID, SRCID_ER); DB(DB_INTR, printk("DISC")) if (cmd == NULL) { printk(" - Already disconnected! "); hostdata->state = S_UNCONNECTED; } switch (hostdata->state) { case S_PRE_CMP_DISC: hostdata->connected = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; DB(DB_INTR, printk(":%d", cmd->SCp.Status)) if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); else cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); cmd->scsi_done(cmd); break; case S_PRE_TMP_DISC: case S_RUNNING_LEVEL2: cmd->host_scribble = (uchar *) hostdata->disconnected_Q; hostdata->disconnected_Q = cmd; hostdata->connected = NULL; hostdata->state = S_UNCONNECTED; #ifdef PROC_STATISTICS hostdata->disc_done_cnt[cmd->device->id]++; #endif break; default: printk("*** Unexpected DISCONNECT interrupt! ***"); hostdata->state = S_UNCONNECTED; } /* We are no longer connected to a target - check to see if * there are commands waiting to be executed. */ in2000_execute(instance); break; case CSR_RESEL_AM: DB(DB_INTR, printk("RESEL")) /* First we have to make sure this reselection didn't */ /* happen during Arbitration/Selection of some other device. */ /* If yes, put losing command back on top of input_Q. */ if (hostdata->level2 <= L2_NONE) { if (hostdata->selecting) { cmd = (Scsi_Cmnd *) hostdata->selecting; hostdata->selecting = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); cmd->host_scribble = (uchar *) hostdata->input_Q; hostdata->input_Q = cmd; } } else { if (cmd) { if (phs == 0x00) { hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); cmd->host_scribble = (uchar *) hostdata->input_Q; hostdata->input_Q = cmd; } else { printk("---%02x:%02x:%02x-TROUBLE: Intrusive ReSelect!---", asr, sr, phs); while (1) printk("\r"); } } } /* OK - find out which device reselected us. */ id = read_3393(hostdata, WD_SOURCE_ID); id &= SRCID_MASK; /* and extract the lun from the ID message. (Note that we don't * bother to check for a valid message here - I guess this is * not the right way to go, but....) */ lun = read_3393(hostdata, WD_DATA); if (hostdata->level2 < L2_RESELECT) write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); lun &= 7; /* Now we look for the command that's reconnecting. */ cmd = (Scsi_Cmnd *) hostdata->disconnected_Q; patch = NULL; while (cmd) { if (id == cmd->device->id && lun == cmd->device->lun) break; patch = cmd; cmd = (Scsi_Cmnd *) cmd->host_scribble; } /* Hmm. Couldn't find a valid command.... What to do? */ if (!cmd) { printk("---TROUBLE: target %d.%d not in disconnect queue---", id, lun); break; } /* Ok, found the command - now start it up again. */ if (patch) patch->host_scribble = cmd->host_scribble; else hostdata->disconnected_Q = (Scsi_Cmnd *) cmd->host_scribble; hostdata->connected = cmd; /* We don't need to worry about 'initialize_SCp()' or 'hostdata->busy[]' * because these things are preserved over a disconnect. * But we DO need to fix the DPD bit so it's correct for this command. */ if (is_dir_out(cmd)) write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id); else write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD); if (hostdata->level2 >= L2_RESELECT) { write_3393_count(hostdata, 0); /* we want a DATA_PHASE interrupt */ write_3393(hostdata, WD_COMMAND_PHASE, 0x45); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else hostdata->state = S_CONNECTED; break; default: printk("--UNKNOWN INTERRUPT:%02x:%02x:%02x--", asr, sr, phs); } write1_io(0, IO_LED_OFF); DB(DB_INTR, printk("} ")) /* release the SMP spin_lock and restore irq state */ spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } #define RESET_CARD 0 #define RESET_CARD_AND_BUS 1 #define B_FLAG 0x80 /* * Caller must hold instance lock! */ static int reset_hardware(struct Scsi_Host *instance, int type) { struct IN2000_hostdata *hostdata; int qt, x; hostdata = (struct IN2000_hostdata *) instance->hostdata; write1_io(0, IO_LED_ON); if (type == RESET_CARD_AND_BUS) { write1_io(0, IO_CARD_RESET); x = read1_io(IO_HARDWARE); } x = read_3393(hostdata, WD_SCSI_STATUS); /* clear any WD intrpt */ write_3393(hostdata, WD_OWN_ID, instance->this_id | OWNID_EAF | OWNID_RAF | OWNID_FS_8); write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, calc_sync_xfer(hostdata->default_sx_per / 4, DEFAULT_SX_OFF)); write1_io(0, IO_FIFO_WRITE); /* clear fifo counter */ write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */ write_3393(hostdata, WD_COMMAND, WD_CMD_RESET); /* FIXME: timeout ?? */ while (!(READ_AUX_STAT() & ASR_INT)) cpu_relax(); /* wait for RESET to complete */ x = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */ write_3393(hostdata, WD_QUEUE_TAG, 0xa5); /* any random number */ qt = read_3393(hostdata, WD_QUEUE_TAG); if (qt == 0xa5) { x |= B_FLAG; write_3393(hostdata, WD_QUEUE_TAG, 0); } write_3393(hostdata, WD_TIMEOUT_PERIOD, TIMEOUT_PERIOD_VALUE); write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write1_io(0, IO_LED_OFF); return x; } static int in2000_bus_reset(Scsi_Cmnd * cmd) { struct Scsi_Host *instance; struct IN2000_hostdata *hostdata; int x; unsigned long flags; instance = cmd->device->host; hostdata = (struct IN2000_hostdata *) instance->hostdata; printk(KERN_WARNING "scsi%d: Reset. ", instance->host_no); spin_lock_irqsave(instance->host_lock, flags); /* do scsi-reset here */ reset_hardware(instance, RESET_CARD_AND_BUS); for (x = 0; x < 8; x++) { hostdata->busy[x] = 0; hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF); hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */ } hostdata->input_Q = NULL; hostdata->selecting = NULL; hostdata->connected = NULL; hostdata->disconnected_Q = NULL; hostdata->state = S_UNCONNECTED; hostdata->fifo = FI_FIFO_UNUSED; hostdata->incoming_ptr = 0; hostdata->outgoing_len = 0; cmd->result = DID_RESET << 16; spin_unlock_irqrestore(instance->host_lock, flags); return SUCCESS; } static int __in2000_abort(Scsi_Cmnd * cmd) { struct Scsi_Host *instance; struct IN2000_hostdata *hostdata; Scsi_Cmnd *tmp, *prev; uchar sr, asr; unsigned long timeout; instance = cmd->device->host; hostdata = (struct IN2000_hostdata *) instance->hostdata; printk(KERN_DEBUG "scsi%d: Abort-", instance->host_no); printk("(asr=%02x,count=%ld,resid=%d,buf_resid=%d,have_data=%d,FC=%02x)- ", READ_AUX_STAT(), read_3393_count(hostdata), cmd->SCp.this_residual, cmd->SCp.buffers_residual, cmd->SCp.have_data_in, read1_io(IO_FIFO_COUNT)); /* * Case 1 : If the command hasn't been issued yet, we simply remove it * from the inout_Q. */ tmp = (Scsi_Cmnd *) hostdata->input_Q; prev = NULL; while (tmp) { if (tmp == cmd) { if (prev) prev->host_scribble = cmd->host_scribble; cmd->host_scribble = NULL; cmd->result = DID_ABORT << 16; printk(KERN_WARNING "scsi%d: Abort - removing command from input_Q. ", instance->host_no); cmd->scsi_done(cmd); return SUCCESS; } prev = tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble; } /* * Case 2 : If the command is connected, we're going to fail the abort * and let the high level SCSI driver retry at a later time or * issue a reset. * * Timeouts, and therefore aborted commands, will be highly unlikely * and handling them cleanly in this situation would make the common * case of noresets less efficient, and would pollute our code. So, * we fail. */ if (hostdata->connected == cmd) { printk(KERN_WARNING "scsi%d: Aborting connected command - ", instance->host_no); printk("sending wd33c93 ABORT command - "); write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_3393_cmd(hostdata, WD_CMD_ABORT); /* Now we have to attempt to flush out the FIFO... */ printk("flushing fifo - "); timeout = 1000000; do { asr = READ_AUX_STAT(); if (asr & ASR_DBR) read_3393(hostdata, WD_DATA); } while (!(asr & ASR_INT) && timeout-- > 0); sr = read_3393(hostdata, WD_SCSI_STATUS); printk("asr=%02x, sr=%02x, %ld bytes un-transferred (timeout=%ld) - ", asr, sr, read_3393_count(hostdata), timeout); /* * Abort command processed. * Still connected. * We must disconnect. */ printk("sending wd33c93 DISCONNECT command - "); write_3393_cmd(hostdata, WD_CMD_DISCONNECT); timeout = 1000000; asr = READ_AUX_STAT(); while ((asr & ASR_CIP) && timeout-- > 0) asr = READ_AUX_STAT(); sr = read_3393(hostdata, WD_SCSI_STATUS); printk("asr=%02x, sr=%02x.", asr, sr); hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->connected = NULL; hostdata->state = S_UNCONNECTED; cmd->result = DID_ABORT << 16; cmd->scsi_done(cmd); in2000_execute(instance); return SUCCESS; } /* * Case 3: If the command is currently disconnected from the bus, * we're not going to expend much effort here: Let's just return * an ABORT_SNOOZE and hope for the best... */ for (tmp = (Scsi_Cmnd *) hostdata->disconnected_Q; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble) if (cmd == tmp) { printk(KERN_DEBUG "scsi%d: unable to abort disconnected command.\n", instance->host_no); return FAILED; } /* * Case 4 : If we reached this point, the command was not found in any of * the queues. * * We probably reached this point because of an unlikely race condition * between the command completing successfully and the abortion code, * so we won't panic, but we will notify the user in case something really * broke. */ in2000_execute(instance); printk("scsi%d: warning : SCSI command probably completed successfully" " before abortion. ", instance->host_no); return SUCCESS; } static int in2000_abort(Scsi_Cmnd * cmd) { int rc; spin_lock_irq(cmd->device->host->host_lock); rc = __in2000_abort(cmd); spin_unlock_irq(cmd->device->host->host_lock); return rc; } #define MAX_IN2000_HOSTS 3 #define MAX_SETUP_ARGS ARRAY_SIZE(setup_args) #define SETUP_BUFFER_SIZE 200 static char setup_buffer[SETUP_BUFFER_SIZE]; static char setup_used[MAX_SETUP_ARGS]; static int done_setup = 0; static void __init in2000_setup(char *str, int *ints) { int i; char *p1, *p2; strlcpy(setup_buffer, str, SETUP_BUFFER_SIZE); p1 = setup_buffer; i = 0; while (*p1 && (i < MAX_SETUP_ARGS)) { p2 = strchr(p1, ','); if (p2) { *p2 = '\0'; if (p1 != p2) setup_args[i] = p1; p1 = p2 + 1; i++; } else { setup_args[i] = p1; break; } } for (i = 0; i < MAX_SETUP_ARGS; i++) setup_used[i] = 0; done_setup = 1; } /* check_setup_args() returns index if key found, 0 if not */ static int __init check_setup_args(char *key, int *val, char *buf) { int x; char *cp; for (x = 0; x < MAX_SETUP_ARGS; x++) { if (setup_used[x]) continue; if (!strncmp(setup_args[x], key, strlen(key))) break; } if (x == MAX_SETUP_ARGS) return 0; setup_used[x] = 1; cp = setup_args[x] + strlen(key); *val = -1; if (*cp != ':') return ++x; cp++; if ((*cp >= '0') && (*cp <= '9')) { *val = simple_strtoul(cp, NULL, 0); } return ++x; } /* The "correct" (ie portable) way to access memory-mapped hardware * such as the IN2000 EPROM and dip switch is through the use of * special macros declared in 'asm/io.h'. We use readb() and readl() * when reading from the card's BIOS area in in2000_detect(). */ static u32 bios_tab[] in2000__INITDATA = { 0xc8000, 0xd0000, 0xd8000, 0 }; static unsigned short base_tab[] in2000__INITDATA = { 0x220, 0x200, 0x110, 0x100, }; static int int_tab[] in2000__INITDATA = { 15, 14, 11, 10 }; static int probe_bios(u32 addr, u32 *s1, uchar *switches) { void __iomem *p = ioremap(addr, 0x34); if (!p) return 0; *s1 = readl(p + 0x10); if (*s1 == 0x41564f4e || readl(p + 0x30) == 0x61776c41) { /* Read the switch image that's mapped into EPROM space */ *switches = ~readb(p + 0x20); iounmap(p); return 1; } iounmap(p); return 0; } static int __init in2000_detect(struct scsi_host_template * tpnt) { struct Scsi_Host *instance; struct IN2000_hostdata *hostdata; int detect_count; int bios; int x; unsigned short base; uchar switches; uchar hrev; unsigned long flags; int val; char buf[32]; /* Thanks to help from Bill Earnest, probing for IN2000 cards is a * pretty straightforward and fool-proof operation. There are 3 * possible locations for the IN2000 EPROM in memory space - if we * find a BIOS signature, we can read the dip switch settings from * the byte at BIOS+32 (shadowed in by logic on the card). From 2 * of the switch bits we get the card's address in IO space. There's * an image of the dip switch there, also, so we have a way to back- * check that this really is an IN2000 card. Very nifty. Use the * 'ioport:xx' command-line parameter if your BIOS EPROM is absent * or disabled. */ if (!done_setup && setup_strings) in2000_setup(setup_strings, NULL); detect_count = 0; for (bios = 0; bios_tab[bios]; bios++) { u32 s1 = 0; if (check_setup_args("ioport", &val, buf)) { base = val; switches = ~inb(base + IO_SWITCHES) & 0xff; printk("Forcing IN2000 detection at IOport 0x%x ", base); bios = 2; } /* * There have been a couple of BIOS versions with different layouts * for the obvious ID strings. We look for the 2 most common ones and * hope that they cover all the cases... */ else if (probe_bios(bios_tab[bios], &s1, &switches)) { printk("Found IN2000 BIOS at 0x%x ", (unsigned int) bios_tab[bios]); /* Find out where the IO space is */ x = switches & (SW_ADDR0 | SW_ADDR1); base = base_tab[x]; /* Check for the IN2000 signature in IO space. */ x = ~inb(base + IO_SWITCHES) & 0xff; if (x != switches) { printk("Bad IO signature: %02x vs %02x.\n", x, switches); continue; } } else continue; /* OK. We have a base address for the IO ports - run a few safety checks */ if (!(switches & SW_BIT7)) { /* I _think_ all cards do this */ printk("There is no IN-2000 SCSI card at IOport 0x%03x!\n", base); continue; } /* Let's assume any hardware version will work, although the driver * has only been tested on 0x21, 0x22, 0x25, 0x26, and 0x27. We'll * print out the rev number for reference later, but accept them all. */ hrev = inb(base + IO_HARDWARE); /* Bit 2 tells us if interrupts are disabled */ if (switches & SW_DISINT) { printk("The IN-2000 SCSI card at IOport 0x%03x ", base); printk("is not configured for interrupt operation!\n"); printk("This driver requires an interrupt: cancelling detection.\n"); continue; } /* Ok. We accept that there's an IN2000 at ioaddr 'base'. Now * initialize it. */ tpnt->proc_name = "in2000"; instance = scsi_register(tpnt, sizeof(struct IN2000_hostdata)); if (instance == NULL) continue; detect_count++; hostdata = (struct IN2000_hostdata *) instance->hostdata; instance->io_port = hostdata->io_base = base; hostdata->dip_switch = switches; hostdata->hrev = hrev; write1_io(0, IO_FIFO_WRITE); /* clear fifo counter */ write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */ write1_io(0, IO_INTR_MASK); /* allow all ints */ x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT]; if (request_irq(x, in2000_intr, IRQF_DISABLED, "in2000", instance)) { printk("in2000_detect: Unable to allocate IRQ.\n"); detect_count--; continue; } instance->irq = x; instance->n_io_port = 13; request_region(base, 13, "in2000"); /* lock in this IO space for our use */ for (x = 0; x < 8; x++) { hostdata->busy[x] = 0; hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF); hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */ #ifdef PROC_STATISTICS hostdata->cmd_cnt[x] = 0; hostdata->disc_allowed_cnt[x] = 0; hostdata->disc_done_cnt[x] = 0; #endif } hostdata->input_Q = NULL; hostdata->selecting = NULL; hostdata->connected = NULL; hostdata->disconnected_Q = NULL; hostdata->state = S_UNCONNECTED; hostdata->fifo = FI_FIFO_UNUSED; hostdata->level2 = L2_BASIC; hostdata->disconnect = DIS_ADAPTIVE; hostdata->args = DEBUG_DEFAULTS; hostdata->incoming_ptr = 0; hostdata->outgoing_len = 0; hostdata->default_sx_per = DEFAULT_SX_PER; /* Older BIOS's had a 'sync on/off' switch - use its setting */ if (s1 == 0x41564f4e && (switches & SW_SYNC_DOS5)) hostdata->sync_off = 0x00; /* sync defaults to on */ else hostdata->sync_off = 0xff; /* sync defaults to off */ #ifdef PROC_INTERFACE hostdata->proc = PR_VERSION | PR_INFO | PR_STATISTICS | PR_CONNECTED | PR_INPUTQ | PR_DISCQ | PR_STOP; #ifdef PROC_STATISTICS hostdata->int_cnt = 0; #endif #endif if (check_setup_args("nosync", &val, buf)) hostdata->sync_off = val; if (check_setup_args("period", &val, buf)) hostdata->default_sx_per = sx_table[round_period((unsigned int) val)].period_ns; if (check_setup_args("disconnect", &val, buf)) { if ((val >= DIS_NEVER) && (val <= DIS_ALWAYS)) hostdata->disconnect = val; else hostdata->disconnect = DIS_ADAPTIVE; } if (check_setup_args("noreset", &val, buf)) hostdata->args ^= A_NO_SCSI_RESET; if (check_setup_args("level2", &val, buf)) hostdata->level2 = val; if (check_setup_args("debug", &val, buf)) hostdata->args = (val & DB_MASK); #ifdef PROC_INTERFACE if (check_setup_args("proc", &val, buf)) hostdata->proc = val; #endif /* FIXME: not strictly needed I think but the called code expects to be locked */ spin_lock_irqsave(instance->host_lock, flags); x = reset_hardware(instance, (hostdata->args & A_NO_SCSI_RESET) ? RESET_CARD : RESET_CARD_AND_BUS); spin_unlock_irqrestore(instance->host_lock, flags); hostdata->microcode = read_3393(hostdata, WD_CDB_1); if (x & 0x01) { if (x & B_FLAG) hostdata->chip = C_WD33C93B; else hostdata->chip = C_WD33C93A; } else hostdata->chip = C_WD33C93; printk("dip_switch=%02x irq=%d ioport=%02x floppy=%s sync/DOS5=%s ", (switches & 0x7f), instance->irq, hostdata->io_base, (switches & SW_FLOPPY) ? "Yes" : "No", (switches & SW_SYNC_DOS5) ? "Yes" : "No"); printk("hardware_ver=%02x chip=%s microcode=%02x\n", hrev, (hostdata->chip == C_WD33C93) ? "WD33c93" : (hostdata->chip == C_WD33C93A) ? "WD33c93A" : (hostdata->chip == C_WD33C93B) ? "WD33c93B" : "unknown", hostdata->microcode); #ifdef DEBUGGING_ON printk("setup_args = "); for (x = 0; x < MAX_SETUP_ARGS; x++) printk("%s,", setup_args[x]); printk("\n"); #endif if (hostdata->sync_off == 0xff) printk("Sync-transfer DISABLED on all devices: ENABLE from command-line\n"); printk("IN2000 driver version %s - %s\n", IN2000_VERSION, IN2000_DATE); } return detect_count; } static int in2000_release(struct Scsi_Host *shost) { if (shost->irq) free_irq(shost->irq, shost); if (shost->io_port && shost->n_io_port) release_region(shost->io_port, shost->n_io_port); return 0; } /* NOTE: I lifted this function straight out of the old driver, * and have not tested it. Presumably it does what it's * supposed to do... */ static int in2000_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *iinfo) { int size; size = capacity; iinfo[0] = 64; iinfo[1] = 32; iinfo[2] = size >> 11; /* This should approximate the large drive handling that the DOS ASPI manager uses. Drives very near the boundaries may not be handled correctly (i.e. near 2.0 Gb and 4.0 Gb) */ if (iinfo[2] > 1024) { iinfo[0] = 64; iinfo[1] = 63; iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]); } if (iinfo[2] > 1024) { iinfo[0] = 128; iinfo[1] = 63; iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]); } if (iinfo[2] > 1024) { iinfo[0] = 255; iinfo[1] = 63; iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]); } return 0; } static int in2000_write_info(struct Scsi_Host *instance, char *buf, int len) { #ifdef PROC_INTERFACE char *bp; struct IN2000_hostdata *hd; int x, i; hd = (struct IN2000_hostdata *) instance->hostdata; buf[len] = '\0'; bp = buf; if (!strncmp(bp, "debug:", 6)) { bp += 6; hd->args = simple_strtoul(bp, NULL, 0) & DB_MASK; } else if (!strncmp(bp, "disconnect:", 11)) { bp += 11; x = simple_strtoul(bp, NULL, 0); if (x < DIS_NEVER || x > DIS_ALWAYS) x = DIS_ADAPTIVE; hd->disconnect = x; } else if (!strncmp(bp, "period:", 7)) { bp += 7; x = simple_strtoul(bp, NULL, 0); hd->default_sx_per = sx_table[round_period((unsigned int) x)].period_ns; } else if (!strncmp(bp, "resync:", 7)) { bp += 7; x = simple_strtoul(bp, NULL, 0); for (i = 0; i < 7; i++) if (x & (1 << i)) hd->sync_stat[i] = SS_UNSET; } else if (!strncmp(bp, "proc:", 5)) { bp += 5; hd->proc = simple_strtoul(bp, NULL, 0); } else if (!strncmp(bp, "level2:", 7)) { bp += 7; hd->level2 = simple_strtoul(bp, NULL, 0); } #endif return len; } static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance) { #ifdef PROC_INTERFACE unsigned long flags; struct IN2000_hostdata *hd; Scsi_Cmnd *cmd; int x; hd = (struct IN2000_hostdata *) instance->hostdata; spin_lock_irqsave(instance->host_lock, flags); if (hd->proc & PR_VERSION) seq_printf(m, "\nVersion %s - %s.", IN2000_VERSION, IN2000_DATE); if (hd->proc & PR_INFO) { seq_printf(m, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No"); seq_printf(m, "\nsync_xfer[] = "); for (x = 0; x < 7; x++) seq_printf(m, "\t%02x", hd->sync_xfer[x]); seq_printf(m, "\nsync_stat[] = "); for (x = 0; x < 7; x++) seq_printf(m, "\t%02x", hd->sync_stat[x]); } #ifdef PROC_STATISTICS if (hd->proc & PR_STATISTICS) { seq_printf(m, "\ncommands issued: "); for (x = 0; x < 7; x++) seq_printf(m, "\t%ld", hd->cmd_cnt[x]); seq_printf(m, "\ndisconnects allowed:"); for (x = 0; x < 7; x++) seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]); seq_printf(m, "\ndisconnects done: "); for (x = 0; x < 7; x++) seq_printf(m, "\t%ld", hd->disc_done_cnt[x]); seq_printf(m, "\ninterrupts: \t%ld", hd->int_cnt); } #endif if (hd->proc & PR_CONNECTED) { seq_printf(m, "\nconnected: "); if (hd->connected) { cmd = (Scsi_Cmnd *) hd->connected; seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); } } if (hd->proc & PR_INPUTQ) { seq_printf(m, "\ninput_Q: "); cmd = (Scsi_Cmnd *) hd->input_Q; while (cmd) { seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); cmd = (Scsi_Cmnd *) cmd->host_scribble; } } if (hd->proc & PR_DISCQ) { seq_printf(m, "\ndisconnected_Q:"); cmd = (Scsi_Cmnd *) hd->disconnected_Q; while (cmd) { seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); cmd = (Scsi_Cmnd *) cmd->host_scribble; } } if (hd->proc & PR_TEST) { ; /* insert your own custom function here */ } seq_printf(m, "\n"); spin_unlock_irqrestore(instance->host_lock, flags); #endif /* PROC_INTERFACE */ return 0; } MODULE_LICENSE("GPL"); static struct scsi_host_template driver_template = { .proc_name = "in2000", .write_info = in2000_write_info, .show_info = in2000_show_info, .name = "Always IN2000", .detect = in2000_detect, .release = in2000_release, .queuecommand = in2000_queuecommand, .eh_abort_handler = in2000_abort, .eh_bus_reset_handler = in2000_bus_reset, .bios_param = in2000_biosparam, .can_queue = IN2000_CAN_Q, .this_id = IN2000_HOST_ID, .sg_tablesize = IN2000_SG, .cmd_per_lun = IN2000_CPL, .use_clustering = DISABLE_CLUSTERING, }; #include "scsi_module.c"
gpl-2.0
kbc-developers/sc04d_kernel
arch/arm/mach-kirkwood/sheevaplug-setup.c
2813
3982
/* * arch/arm/mach-kirkwood/sheevaplug-setup.c * * Marvell SheevaPlug Reference Board Setup * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/ata_platform.h> #include <linux/mtd/partitions.h> #include <linux/mv643xx_eth.h> #include <linux/gpio.h> #include <linux/leds.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/kirkwood.h> #include <plat/mvsdio.h> #include "common.h" #include "mpp.h" static struct mtd_partition sheevaplug_nand_parts[] = { { .name = "u-boot", .offset = 0, .size = SZ_1M }, { .name = "uImage", .offset = MTDPART_OFS_NXTBLK, .size = SZ_4M }, { .name = "root", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL }, }; static struct mv643xx_eth_platform_data sheevaplug_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(0), }; static struct mv_sata_platform_data sheeva_esata_sata_data = { .n_ports = 2, }; static struct mvsdio_platform_data sheevaplug_mvsdio_data = { /* unfortunately the CD signal has not been connected */ }; static struct mvsdio_platform_data sheeva_esata_mvsdio_data = { .gpio_write_protect = 44, /* MPP44 used as SD write protect */ .gpio_card_detect = 47, /* MPP47 used as SD card detect */ }; static struct gpio_led sheevaplug_led_pins[] = { { .name = "plug:red:misc", .default_trigger = "none", .gpio = 46, .active_low = 1, }, { .name = "plug:green:health", .default_trigger = "default-on", .gpio = 49, .active_low = 1, }, }; static struct gpio_led_platform_data sheevaplug_led_data = { .leds = sheevaplug_led_pins, .num_leds = ARRAY_SIZE(sheevaplug_led_pins), }; static struct platform_device sheevaplug_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &sheevaplug_led_data, } }; static unsigned int sheevaplug_mpp_config[] __initdata = { MPP29_GPIO, /* USB Power Enable */ MPP46_GPIO, /* LED Red */ MPP49_GPIO, /* LED */ 0 }; static unsigned int sheeva_esata_mpp_config[] __initdata = { MPP29_GPIO, /* USB Power Enable */ MPP44_GPIO, /* SD Write Protect */ MPP47_GPIO, /* SD Card Detect */ MPP49_GPIO, /* LED Green */ 0 }; static void __init sheevaplug_init(void) { /* * Basic setup. Needs to be called early. */ kirkwood_init(); /* setup gpio pin select */ if (machine_is_sheeva_esata()) kirkwood_mpp_conf(sheeva_esata_mpp_config); else kirkwood_mpp_conf(sheevaplug_mpp_config); kirkwood_uart0_init(); kirkwood_nand_init(ARRAY_AND_SIZE(sheevaplug_nand_parts), 25); if (gpio_request(29, "USB Power Enable") != 0 || gpio_direction_output(29, 1) != 0) printk(KERN_ERR "can't set up GPIO 29 (USB Power Enable)\n"); kirkwood_ehci_init(); kirkwood_ge00_init(&sheevaplug_ge00_data); /* honor lower power consumption for plugs with out eSATA */ if (machine_is_sheeva_esata()) kirkwood_sata_init(&sheeva_esata_sata_data); /* enable sd wp and sd cd on plugs with esata */ if (machine_is_sheeva_esata()) kirkwood_sdio_init(&sheeva_esata_mvsdio_data); else kirkwood_sdio_init(&sheevaplug_mvsdio_data); platform_device_register(&sheevaplug_leds); } #ifdef CONFIG_MACH_SHEEVAPLUG MACHINE_START(SHEEVAPLUG, "Marvell SheevaPlug Reference Board") /* Maintainer: shadi Ammouri <shadi@marvell.com> */ .boot_params = 0x00000100, .init_machine = sheevaplug_init, .map_io = kirkwood_map_io, .init_early = kirkwood_init_early, .init_irq = kirkwood_init_irq, .timer = &kirkwood_timer, MACHINE_END #endif #ifdef CONFIG_MACH_ESATA_SHEEVAPLUG MACHINE_START(ESATA_SHEEVAPLUG, "Marvell eSATA SheevaPlug Reference Board") .boot_params = 0x00000100, .init_machine = sheevaplug_init, .map_io = kirkwood_map_io, .init_early = kirkwood_init_early, .init_irq = kirkwood_init_irq, .timer = &kirkwood_timer, MACHINE_END #endif
gpl-2.0
kamarush/yuga_aosp_kernel_lp
net/netfilter/xt_NFQUEUE.c
4605
4143
/* iptables module for using new netfilter netlink queue * * (C) 2005 by Harald Welte <laforge@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/jhash.h> #include <linux/netfilter.h> #include <linux/netfilter_arp.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_NFQUEUE.h> MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_DESCRIPTION("Xtables: packet forwarding to netlink"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_NFQUEUE"); MODULE_ALIAS("ip6t_NFQUEUE"); MODULE_ALIAS("arpt_NFQUEUE"); static u32 jhash_initval __read_mostly; static bool rnd_inited __read_mostly; static unsigned int nfqueue_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_NFQ_info *tinfo = par->targinfo; return NF_QUEUE_NR(tinfo->queuenum); } static u32 hash_v4(const struct sk_buff *skb) { const struct iphdr *iph = ip_hdr(skb); __be32 ipaddr; /* packets in either direction go into same queue */ ipaddr = iph->saddr ^ iph->daddr; return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval); } #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) static u32 hash_v6(const struct sk_buff *skb) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); __be32 addr[4]; addr[0] = ip6h->saddr.s6_addr32[0] ^ ip6h->daddr.s6_addr32[0]; addr[1] = ip6h->saddr.s6_addr32[1] ^ ip6h->daddr.s6_addr32[1]; addr[2] = ip6h->saddr.s6_addr32[2] ^ ip6h->daddr.s6_addr32[2]; addr[3] = ip6h->saddr.s6_addr32[3] ^ ip6h->daddr.s6_addr32[3]; return jhash2((__force u32 *)addr, ARRAY_SIZE(addr), jhash_initval); } #endif static unsigned int nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_NFQ_info_v1 *info = par->targinfo; u32 queue = info->queuenum; if (info->queues_total > 1) { if (par->family == NFPROTO_IPV4) queue = (((u64) hash_v4(skb) * info->queues_total) >> 32) + queue; #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) else if (par->family == NFPROTO_IPV6) queue = (((u64) hash_v6(skb) * info->queues_total) >> 32) + queue; #endif } return NF_QUEUE_NR(queue); } static unsigned int nfqueue_tg_v2(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_NFQ_info_v2 *info = par->targinfo; unsigned int ret = nfqueue_tg_v1(skb, par); if (info->bypass) ret |= NF_VERDICT_FLAG_QUEUE_BYPASS; return ret; } static int nfqueue_tg_check(const struct xt_tgchk_param *par) { const struct xt_NFQ_info_v2 *info = par->targinfo; u32 maxid; if (unlikely(!rnd_inited)) { get_random_bytes(&jhash_initval, sizeof(jhash_initval)); rnd_inited = true; } if (info->queues_total == 0) { pr_err("NFQUEUE: number of total queues is 0\n"); return -EINVAL; } maxid = info->queues_total - 1 + info->queuenum; if (maxid > 0xffff) { pr_err("NFQUEUE: number of queues (%u) out of range (got %u)\n", info->queues_total, maxid); return -ERANGE; } if (par->target->revision == 2 && info->bypass > 1) return -EINVAL; return 0; } static struct xt_target nfqueue_tg_reg[] __read_mostly = { { .name = "NFQUEUE", .family = NFPROTO_UNSPEC, .target = nfqueue_tg, .targetsize = sizeof(struct xt_NFQ_info), .me = THIS_MODULE, }, { .name = "NFQUEUE", .revision = 1, .family = NFPROTO_UNSPEC, .checkentry = nfqueue_tg_check, .target = nfqueue_tg_v1, .targetsize = sizeof(struct xt_NFQ_info_v1), .me = THIS_MODULE, }, { .name = "NFQUEUE", .revision = 2, .family = NFPROTO_UNSPEC, .checkentry = nfqueue_tg_check, .target = nfqueue_tg_v2, .targetsize = sizeof(struct xt_NFQ_info_v2), .me = THIS_MODULE, }, }; static int __init nfqueue_tg_init(void) { return xt_register_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg)); } static void __exit nfqueue_tg_exit(void) { xt_unregister_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg)); } module_init(nfqueue_tg_init); module_exit(nfqueue_tg_exit);
gpl-2.0
tycoo/moto_x_kernel
net/batman-adv/gateway_common.c
4861
4184
/* * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: * * Marek Lindner * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA * */ #include "main.h" #include "gateway_common.h" #include "gateway_client.h" /* calculates the gateway class from kbit */ static void kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class) { int mdown = 0, tdown, tup, difference; uint8_t sbit, part; *gw_srv_class = 0; difference = 0x0FFFFFFF; /* test all downspeeds */ for (sbit = 0; sbit < 2; sbit++) { for (part = 0; part < 16; part++) { tdown = 32 * (sbit + 2) * (1 << part); if (abs(tdown - down) < difference) { *gw_srv_class = (sbit << 7) + (part << 3); difference = abs(tdown - down); mdown = tdown; } } } /* test all upspeeds */ difference = 0x0FFFFFFF; for (part = 0; part < 8; part++) { tup = ((part + 1) * (mdown)) / 8; if (abs(tup - up) < difference) { *gw_srv_class = (*gw_srv_class & 0xF8) | part; difference = abs(tup - up); } } } /* returns the up and downspeeds in kbit, calculated from the class */ void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up) { int sbit = (gw_srv_class & 0x80) >> 7; int dpart = (gw_srv_class & 0x78) >> 3; int upart = (gw_srv_class & 0x07); if (!gw_srv_class) { *down = 0; *up = 0; return; } *down = 32 * (sbit + 2) * (1 << dpart); *up = ((upart + 1) * (*down)) / 8; } static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff, int *up, int *down) { int ret, multi = 1; char *slash_ptr, *tmp_ptr; long ldown, lup; slash_ptr = strchr(buff, '/'); if (slash_ptr) *slash_ptr = 0; if (strlen(buff) > 4) { tmp_ptr = buff + strlen(buff) - 4; if (strnicmp(tmp_ptr, "mbit", 4) == 0) multi = 1024; if ((strnicmp(tmp_ptr, "kbit", 4) == 0) || (multi > 1)) *tmp_ptr = '\0'; } ret = kstrtol(buff, 10, &ldown); if (ret) { bat_err(net_dev, "Download speed of gateway mode invalid: %s\n", buff); return false; } *down = ldown * multi; /* we also got some upload info */ if (slash_ptr) { multi = 1; if (strlen(slash_ptr + 1) > 4) { tmp_ptr = slash_ptr + 1 - 4 + strlen(slash_ptr + 1); if (strnicmp(tmp_ptr, "mbit", 4) == 0) multi = 1024; if ((strnicmp(tmp_ptr, "kbit", 4) == 0) || (multi > 1)) *tmp_ptr = '\0'; } ret = kstrtol(slash_ptr + 1, 10, &lup); if (ret) { bat_err(net_dev, "Upload speed of gateway mode invalid: %s\n", slash_ptr + 1); return false; } *up = lup * multi; } return true; } ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count) { struct bat_priv *bat_priv = netdev_priv(net_dev); long gw_bandwidth_tmp = 0; int up = 0, down = 0; bool ret; ret = parse_gw_bandwidth(net_dev, buff, &up, &down); if (!ret) goto end; if ((!down) || (down < 256)) down = 2000; if (!up) up = down / 5; kbit_to_gw_bandwidth(down, up, &gw_bandwidth_tmp); /** * the gw bandwidth we guessed above might not match the given * speeds, hence we need to calculate it back to show the number * that is going to be propagated **/ gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up); gw_deselect(bat_priv); bat_info(net_dev, "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n", atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp, (down > 2048 ? down / 1024 : down), (down > 2048 ? "MBit" : "KBit"), (up > 2048 ? up / 1024 : up), (up > 2048 ? "MBit" : "KBit")); atomic_set(&bat_priv->gw_bandwidth, gw_bandwidth_tmp); end: return count; }
gpl-2.0
bilalliberty/android_kernel_htc_liberty-villec2
drivers/input/misc/mc13783-pwrbutton.c
5117
7504
/** * Copyright (C) 2011 Philippe Rétornaz * * Based on twl4030-pwrbutton driver by: * Peter De Schrijver <peter.de-schrijver@nokia.com> * Felipe Balbi <felipe.balbi@nokia.com> * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of this * archive for more details. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/mfd/mc13783.h> #include <linux/sched.h> #include <linux/slab.h> struct mc13783_pwrb { struct input_dev *pwr; struct mc13xxx *mc13783; #define MC13783_PWRB_B1_POL_INVERT (1 << 0) #define MC13783_PWRB_B2_POL_INVERT (1 << 1) #define MC13783_PWRB_B3_POL_INVERT (1 << 2) int flags; unsigned short keymap[3]; }; #define MC13783_REG_INTERRUPT_SENSE_1 5 #define MC13783_IRQSENSE1_ONOFD1S (1 << 3) #define MC13783_IRQSENSE1_ONOFD2S (1 << 4) #define MC13783_IRQSENSE1_ONOFD3S (1 << 5) #define MC13783_REG_POWER_CONTROL_2 15 #define MC13783_POWER_CONTROL_2_ON1BDBNC 4 #define MC13783_POWER_CONTROL_2_ON2BDBNC 6 #define MC13783_POWER_CONTROL_2_ON3BDBNC 8 #define MC13783_POWER_CONTROL_2_ON1BRSTEN (1 << 1) #define MC13783_POWER_CONTROL_2_ON2BRSTEN (1 << 2) #define MC13783_POWER_CONTROL_2_ON3BRSTEN (1 << 3) static irqreturn_t button_irq(int irq, void *_priv) { struct mc13783_pwrb *priv = _priv; int val; mc13xxx_irq_ack(priv->mc13783, irq); mc13xxx_reg_read(priv->mc13783, MC13783_REG_INTERRUPT_SENSE_1, &val); switch (irq) { case MC13783_IRQ_ONOFD1: val = val & MC13783_IRQSENSE1_ONOFD1S ? 1 : 0; if (priv->flags & MC13783_PWRB_B1_POL_INVERT) val ^= 1; input_report_key(priv->pwr, priv->keymap[0], val); break; case MC13783_IRQ_ONOFD2: val = val & MC13783_IRQSENSE1_ONOFD2S ? 1 : 0; if (priv->flags & MC13783_PWRB_B2_POL_INVERT) val ^= 1; input_report_key(priv->pwr, priv->keymap[1], val); break; case MC13783_IRQ_ONOFD3: val = val & MC13783_IRQSENSE1_ONOFD3S ? 1 : 0; if (priv->flags & MC13783_PWRB_B3_POL_INVERT) val ^= 1; input_report_key(priv->pwr, priv->keymap[2], val); break; } input_sync(priv->pwr); return IRQ_HANDLED; } static int __devinit mc13783_pwrbutton_probe(struct platform_device *pdev) { const struct mc13xxx_buttons_platform_data *pdata; struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent); struct input_dev *pwr; struct mc13783_pwrb *priv; int err = 0; int reg = 0; pdata = dev_get_platdata(&pdev->dev); if (!pdata) { dev_err(&pdev->dev, "missing platform data\n"); return -ENODEV; } pwr = input_allocate_device(); if (!pwr) { dev_dbg(&pdev->dev, "Can't allocate power button\n"); return -ENOMEM; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { err = -ENOMEM; dev_dbg(&pdev->dev, "Can't allocate power button\n"); goto free_input_dev; } reg |= (pdata->b1on_flags & 0x3) << MC13783_POWER_CONTROL_2_ON1BDBNC; reg |= (pdata->b2on_flags & 0x3) << MC13783_POWER_CONTROL_2_ON2BDBNC; reg |= (pdata->b3on_flags & 0x3) << MC13783_POWER_CONTROL_2_ON3BDBNC; priv->pwr = pwr; priv->mc13783 = mc13783; mc13xxx_lock(mc13783); if (pdata->b1on_flags & MC13783_BUTTON_ENABLE) { priv->keymap[0] = pdata->b1on_key; if (pdata->b1on_key != KEY_RESERVED) __set_bit(pdata->b1on_key, pwr->keybit); if (pdata->b1on_flags & MC13783_BUTTON_POL_INVERT) priv->flags |= MC13783_PWRB_B1_POL_INVERT; if (pdata->b1on_flags & MC13783_BUTTON_RESET_EN) reg |= MC13783_POWER_CONTROL_2_ON1BRSTEN; err = mc13xxx_irq_request(mc13783, MC13783_IRQ_ONOFD1, button_irq, "b1on", priv); if (err) { dev_dbg(&pdev->dev, "Can't request irq\n"); goto free_priv; } } if (pdata->b2on_flags & MC13783_BUTTON_ENABLE) { priv->keymap[1] = pdata->b2on_key; if (pdata->b2on_key != KEY_RESERVED) __set_bit(pdata->b2on_key, pwr->keybit); if (pdata->b2on_flags & MC13783_BUTTON_POL_INVERT) priv->flags |= MC13783_PWRB_B2_POL_INVERT; if (pdata->b2on_flags & MC13783_BUTTON_RESET_EN) reg |= MC13783_POWER_CONTROL_2_ON2BRSTEN; err = mc13xxx_irq_request(mc13783, MC13783_IRQ_ONOFD2, button_irq, "b2on", priv); if (err) { dev_dbg(&pdev->dev, "Can't request irq\n"); goto free_irq_b1; } } if (pdata->b3on_flags & MC13783_BUTTON_ENABLE) { priv->keymap[2] = pdata->b3on_key; if (pdata->b3on_key != KEY_RESERVED) __set_bit(pdata->b3on_key, pwr->keybit); if (pdata->b3on_flags & MC13783_BUTTON_POL_INVERT) priv->flags |= MC13783_PWRB_B3_POL_INVERT; if (pdata->b3on_flags & MC13783_BUTTON_RESET_EN) reg |= MC13783_POWER_CONTROL_2_ON3BRSTEN; err = mc13xxx_irq_request(mc13783, MC13783_IRQ_ONOFD3, button_irq, "b3on", priv); if (err) { dev_dbg(&pdev->dev, "Can't request irq: %d\n", err); goto free_irq_b2; } } mc13xxx_reg_rmw(mc13783, MC13783_REG_POWER_CONTROL_2, 0x3FE, reg); mc13xxx_unlock(mc13783); pwr->name = "mc13783_pwrbutton"; pwr->phys = "mc13783_pwrbutton/input0"; pwr->dev.parent = &pdev->dev; pwr->keycode = priv->keymap; pwr->keycodemax = ARRAY_SIZE(priv->keymap); pwr->keycodesize = sizeof(priv->keymap[0]); __set_bit(EV_KEY, pwr->evbit); err = input_register_device(pwr); if (err) { dev_dbg(&pdev->dev, "Can't register power button: %d\n", err); goto free_irq; } platform_set_drvdata(pdev, priv); return 0; free_irq: mc13xxx_lock(mc13783); if (pdata->b3on_flags & MC13783_BUTTON_ENABLE) mc13xxx_irq_free(mc13783, MC13783_IRQ_ONOFD3, priv); free_irq_b2: if (pdata->b2on_flags & MC13783_BUTTON_ENABLE) mc13xxx_irq_free(mc13783, MC13783_IRQ_ONOFD2, priv); free_irq_b1: if (pdata->b1on_flags & MC13783_BUTTON_ENABLE) mc13xxx_irq_free(mc13783, MC13783_IRQ_ONOFD1, priv); free_priv: mc13xxx_unlock(mc13783); kfree(priv); free_input_dev: input_free_device(pwr); return err; } static int __devexit mc13783_pwrbutton_remove(struct platform_device *pdev) { struct mc13783_pwrb *priv = platform_get_drvdata(pdev); const struct mc13xxx_buttons_platform_data *pdata; pdata = dev_get_platdata(&pdev->dev); mc13xxx_lock(priv->mc13783); if (pdata->b3on_flags & MC13783_BUTTON_ENABLE) mc13xxx_irq_free(priv->mc13783, MC13783_IRQ_ONOFD3, priv); if (pdata->b2on_flags & MC13783_BUTTON_ENABLE) mc13xxx_irq_free(priv->mc13783, MC13783_IRQ_ONOFD2, priv); if (pdata->b1on_flags & MC13783_BUTTON_ENABLE) mc13xxx_irq_free(priv->mc13783, MC13783_IRQ_ONOFD1, priv); mc13xxx_unlock(priv->mc13783); input_unregister_device(priv->pwr); kfree(priv); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver mc13783_pwrbutton_driver = { .probe = mc13783_pwrbutton_probe, .remove = __devexit_p(mc13783_pwrbutton_remove), .driver = { .name = "mc13783-pwrbutton", .owner = THIS_MODULE, }, }; module_platform_driver(mc13783_pwrbutton_driver); MODULE_ALIAS("platform:mc13783-pwrbutton"); MODULE_DESCRIPTION("MC13783 Power Button"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Philippe Retornaz");
gpl-2.0
placiano/NBKernel_Lollipop
sound/i2c/i2c.c
9469
8516
/* * Generic i2c interface for ALSA * * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> * Modified for the ALSA driver by Jaroslav Kysela <perex@perex.cz> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/string.h> #include <linux/errno.h> #include <sound/core.h> #include <sound/i2c.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Generic i2c interface for ALSA"); MODULE_LICENSE("GPL"); static int snd_i2c_bit_sendbytes(struct snd_i2c_device *device, unsigned char *bytes, int count); static int snd_i2c_bit_readbytes(struct snd_i2c_device *device, unsigned char *bytes, int count); static int snd_i2c_bit_probeaddr(struct snd_i2c_bus *bus, unsigned short addr); static struct snd_i2c_ops snd_i2c_bit_ops = { .sendbytes = snd_i2c_bit_sendbytes, .readbytes = snd_i2c_bit_readbytes, .probeaddr = snd_i2c_bit_probeaddr, }; static int snd_i2c_bus_free(struct snd_i2c_bus *bus) { struct snd_i2c_bus *slave; struct snd_i2c_device *device; if (snd_BUG_ON(!bus)) return -EINVAL; while (!list_empty(&bus->devices)) { device = snd_i2c_device(bus->devices.next); snd_i2c_device_free(device); } if (bus->master) list_del(&bus->buses); else { while (!list_empty(&bus->buses)) { slave = snd_i2c_slave_bus(bus->buses.next); snd_device_free(bus->card, slave); } } if (bus->private_free) bus->private_free(bus); kfree(bus); return 0; } static int snd_i2c_bus_dev_free(struct snd_device *device) { struct snd_i2c_bus *bus = device->device_data; return snd_i2c_bus_free(bus); } int snd_i2c_bus_create(struct snd_card *card, const char *name, struct snd_i2c_bus *master, struct snd_i2c_bus **ri2c) { struct snd_i2c_bus *bus; int err; static struct snd_device_ops ops = { .dev_free = snd_i2c_bus_dev_free, }; *ri2c = NULL; bus = kzalloc(sizeof(*bus), GFP_KERNEL); if (bus == NULL) return -ENOMEM; mutex_init(&bus->lock_mutex); INIT_LIST_HEAD(&bus->devices); INIT_LIST_HEAD(&bus->buses); bus->card = card; bus->ops = &snd_i2c_bit_ops; if (master) { list_add_tail(&bus->buses, &master->buses); bus->master = master; } strlcpy(bus->name, name, sizeof(bus->name)); err = snd_device_new(card, SNDRV_DEV_BUS, bus, &ops); if (err < 0) { snd_i2c_bus_free(bus); return err; } *ri2c = bus; return 0; } EXPORT_SYMBOL(snd_i2c_bus_create); int snd_i2c_device_create(struct snd_i2c_bus *bus, const char *name, unsigned char addr, struct snd_i2c_device **rdevice) { struct snd_i2c_device *device; *rdevice = NULL; if (snd_BUG_ON(!bus)) return -EINVAL; device = kzalloc(sizeof(*device), GFP_KERNEL); if (device == NULL) return -ENOMEM; device->addr = addr; strlcpy(device->name, name, sizeof(device->name)); list_add_tail(&device->list, &bus->devices); device->bus = bus; *rdevice = device; return 0; } EXPORT_SYMBOL(snd_i2c_device_create); int snd_i2c_device_free(struct snd_i2c_device *device) { if (device->bus) list_del(&device->list); if (device->private_free) device->private_free(device); kfree(device); return 0; } EXPORT_SYMBOL(snd_i2c_device_free); int snd_i2c_sendbytes(struct snd_i2c_device *device, unsigned char *bytes, int count) { return device->bus->ops->sendbytes(device, bytes, count); } EXPORT_SYMBOL(snd_i2c_sendbytes); int snd_i2c_readbytes(struct snd_i2c_device *device, unsigned char *bytes, int count) { return device->bus->ops->readbytes(device, bytes, count); } EXPORT_SYMBOL(snd_i2c_readbytes); int snd_i2c_probeaddr(struct snd_i2c_bus *bus, unsigned short addr) { return bus->ops->probeaddr(bus, addr); } EXPORT_SYMBOL(snd_i2c_probeaddr); /* * bit-operations */ static inline void snd_i2c_bit_hw_start(struct snd_i2c_bus *bus) { if (bus->hw_ops.bit->start) bus->hw_ops.bit->start(bus); } static inline void snd_i2c_bit_hw_stop(struct snd_i2c_bus *bus) { if (bus->hw_ops.bit->stop) bus->hw_ops.bit->stop(bus); } static void snd_i2c_bit_direction(struct snd_i2c_bus *bus, int clock, int data) { if (bus->hw_ops.bit->direction) bus->hw_ops.bit->direction(bus, clock, data); } static void snd_i2c_bit_set(struct snd_i2c_bus *bus, int clock, int data) { bus->hw_ops.bit->setlines(bus, clock, data); } #if 0 static int snd_i2c_bit_clock(struct snd_i2c_bus *bus) { if (bus->hw_ops.bit->getclock) return bus->hw_ops.bit->getclock(bus); return -ENXIO; } #endif static int snd_i2c_bit_data(struct snd_i2c_bus *bus, int ack) { return bus->hw_ops.bit->getdata(bus, ack); } static void snd_i2c_bit_start(struct snd_i2c_bus *bus) { snd_i2c_bit_hw_start(bus); snd_i2c_bit_direction(bus, 1, 1); /* SCL - wr, SDA - wr */ snd_i2c_bit_set(bus, 1, 1); snd_i2c_bit_set(bus, 1, 0); snd_i2c_bit_set(bus, 0, 0); } static void snd_i2c_bit_stop(struct snd_i2c_bus *bus) { snd_i2c_bit_set(bus, 0, 0); snd_i2c_bit_set(bus, 1, 0); snd_i2c_bit_set(bus, 1, 1); snd_i2c_bit_hw_stop(bus); } static void snd_i2c_bit_send(struct snd_i2c_bus *bus, int data) { snd_i2c_bit_set(bus, 0, data); snd_i2c_bit_set(bus, 1, data); snd_i2c_bit_set(bus, 0, data); } static int snd_i2c_bit_ack(struct snd_i2c_bus *bus) { int ack; snd_i2c_bit_set(bus, 0, 1); snd_i2c_bit_set(bus, 1, 1); snd_i2c_bit_direction(bus, 1, 0); /* SCL - wr, SDA - rd */ ack = snd_i2c_bit_data(bus, 1); snd_i2c_bit_direction(bus, 1, 1); /* SCL - wr, SDA - wr */ snd_i2c_bit_set(bus, 0, 1); return ack ? -EIO : 0; } static int snd_i2c_bit_sendbyte(struct snd_i2c_bus *bus, unsigned char data) { int i, err; for (i = 7; i >= 0; i--) snd_i2c_bit_send(bus, !!(data & (1 << i))); err = snd_i2c_bit_ack(bus); if (err < 0) return err; return 0; } static int snd_i2c_bit_readbyte(struct snd_i2c_bus *bus, int last) { int i; unsigned char data = 0; snd_i2c_bit_set(bus, 0, 1); snd_i2c_bit_direction(bus, 1, 0); /* SCL - wr, SDA - rd */ for (i = 7; i >= 0; i--) { snd_i2c_bit_set(bus, 1, 1); if (snd_i2c_bit_data(bus, 0)) data |= (1 << i); snd_i2c_bit_set(bus, 0, 1); } snd_i2c_bit_direction(bus, 1, 1); /* SCL - wr, SDA - wr */ snd_i2c_bit_send(bus, !!last); return data; } static int snd_i2c_bit_sendbytes(struct snd_i2c_device *device, unsigned char *bytes, int count) { struct snd_i2c_bus *bus = device->bus; int err, res = 0; if (device->flags & SND_I2C_DEVICE_ADDRTEN) return -EIO; /* not yet implemented */ snd_i2c_bit_start(bus); err = snd_i2c_bit_sendbyte(bus, device->addr << 1); if (err < 0) { snd_i2c_bit_hw_stop(bus); return err; } while (count-- > 0) { err = snd_i2c_bit_sendbyte(bus, *bytes++); if (err < 0) { snd_i2c_bit_hw_stop(bus); return err; } res++; } snd_i2c_bit_stop(bus); return res; } static int snd_i2c_bit_readbytes(struct snd_i2c_device *device, unsigned char *bytes, int count) { struct snd_i2c_bus *bus = device->bus; int err, res = 0; if (device->flags & SND_I2C_DEVICE_ADDRTEN) return -EIO; /* not yet implemented */ snd_i2c_bit_start(bus); err = snd_i2c_bit_sendbyte(bus, (device->addr << 1) | 1); if (err < 0) { snd_i2c_bit_hw_stop(bus); return err; } while (count-- > 0) { err = snd_i2c_bit_readbyte(bus, count == 0); if (err < 0) { snd_i2c_bit_hw_stop(bus); return err; } *bytes++ = (unsigned char)err; res++; } snd_i2c_bit_stop(bus); return res; } static int snd_i2c_bit_probeaddr(struct snd_i2c_bus *bus, unsigned short addr) { int err; if (addr & 0x8000) /* 10-bit address */ return -EIO; /* not yet implemented */ if (addr & 0x7f80) /* invalid address */ return -EINVAL; snd_i2c_bit_start(bus); err = snd_i2c_bit_sendbyte(bus, addr << 1); snd_i2c_bit_stop(bus); return err; } static int __init alsa_i2c_init(void) { return 0; } static void __exit alsa_i2c_exit(void) { } module_init(alsa_i2c_init) module_exit(alsa_i2c_exit)
gpl-2.0
bossino/panda_es_kernel
kernel/debug/kdb/kdb_keyboard.c
9725
5680
/* * Kernel Debugger Architecture Dependent Console I/O handler * * This file is subject to the terms and conditions of the GNU General Public * License. * * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved. */ #include <linux/kdb.h> #include <linux/keyboard.h> #include <linux/ctype.h> #include <linux/module.h> #include <linux/io.h> /* Keyboard Controller Registers on normal PCs. */ #define KBD_STATUS_REG 0x64 /* Status register (R) */ #define KBD_DATA_REG 0x60 /* Keyboard data register (R/W) */ /* Status Register Bits */ #define KBD_STAT_OBF 0x01 /* Keyboard output buffer full */ #define KBD_STAT_MOUSE_OBF 0x20 /* Mouse output buffer full */ static int kbd_exists; static int kbd_last_ret; /* * Check if the keyboard controller has a keypress for us. * Some parts (Enter Release, LED change) are still blocking polled here, * but hopefully they are all short. */ int kdb_get_kbd_char(void) { int scancode, scanstatus; static int shift_lock; /* CAPS LOCK state (0-off, 1-on) */ static int shift_key; /* Shift next keypress */ static int ctrl_key; u_short keychar; if (KDB_FLAG(NO_I8042) || KDB_FLAG(NO_VT_CONSOLE) || (inb(KBD_STATUS_REG) == 0xff && inb(KBD_DATA_REG) == 0xff)) { kbd_exists = 0; return -1; } kbd_exists = 1; if ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0) return -1; /* * Fetch the scancode */ scancode = inb(KBD_DATA_REG); scanstatus = inb(KBD_STATUS_REG); /* * Ignore mouse events. */ if (scanstatus & KBD_STAT_MOUSE_OBF) return -1; /* * Ignore release, trigger on make * (except for shift keys, where we want to * keep the shift state so long as the key is * held down). */ if (((scancode&0x7f) == 0x2a) || ((scancode&0x7f) == 0x36)) { /* * Next key may use shift table */ if ((scancode & 0x80) == 0) shift_key = 1; else shift_key = 0; return -1; } if ((scancode&0x7f) == 0x1d) { /* * Left ctrl key */ if ((scancode & 0x80) == 0) ctrl_key = 1; else ctrl_key = 0; return -1; } if ((scancode & 0x80) != 0) { if (scancode == 0x9c) kbd_last_ret = 0; return -1; } scancode &= 0x7f; /* * Translate scancode */ if (scancode == 0x3a) { /* * Toggle caps lock */ shift_lock ^= 1; #ifdef KDB_BLINK_LED kdb_toggleled(0x4); #endif return -1; } if (scancode == 0x0e) { /* * Backspace */ return 8; } /* Special Key */ switch (scancode) { case 0xF: /* Tab */ return 9; case 0x53: /* Del */ return 4; case 0x47: /* Home */ return 1; case 0x4F: /* End */ return 5; case 0x4B: /* Left */ return 2; case 0x48: /* Up */ return 16; case 0x50: /* Down */ return 14; case 0x4D: /* Right */ return 6; } if (scancode == 0xe0) return -1; /* * For Japanese 86/106 keyboards * See comment in drivers/char/pc_keyb.c. * - Masahiro Adegawa */ if (scancode == 0x73) scancode = 0x59; else if (scancode == 0x7d) scancode = 0x7c; if (!shift_lock && !shift_key && !ctrl_key) { keychar = plain_map[scancode]; } else if ((shift_lock || shift_key) && key_maps[1]) { keychar = key_maps[1][scancode]; } else if (ctrl_key && key_maps[4]) { keychar = key_maps[4][scancode]; } else { keychar = 0x0020; kdb_printf("Unknown state/scancode (%d)\n", scancode); } keychar &= 0x0fff; if (keychar == '\t') keychar = ' '; switch (KTYP(keychar)) { case KT_LETTER: case KT_LATIN: if (isprint(keychar)) break; /* printable characters */ /* drop through */ case KT_SPEC: if (keychar == K_ENTER) break; /* drop through */ default: return -1; /* ignore unprintables */ } if (scancode == 0x1c) { kbd_last_ret = 1; return 13; } return keychar & 0xff; } EXPORT_SYMBOL_GPL(kdb_get_kbd_char); /* * Best effort cleanup of ENTER break codes on leaving KDB. Called on * exiting KDB, when we know we processed an ENTER or KP ENTER scan * code. */ void kdb_kbd_cleanup_state(void) { int scancode, scanstatus; /* * Nothing to clean up, since either * ENTER was never pressed, or has already * gotten cleaned up. */ if (!kbd_last_ret) return; kbd_last_ret = 0; /* * Enter key. Need to absorb the break code here, lest it gets * leaked out if we exit KDB as the result of processing 'g'. * * This has several interesting implications: * + Need to handle KP ENTER, which has break code 0xe0 0x9c. * + Need to handle repeat ENTER and repeat KP ENTER. Repeats * only get a break code at the end of the repeated * sequence. This means we can't propagate the repeated key * press, and must swallow it away. * + Need to handle possible PS/2 mouse input. * + Need to handle mashed keys. */ while (1) { while ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0) cpu_relax(); /* * Fetch the scancode. */ scancode = inb(KBD_DATA_REG); scanstatus = inb(KBD_STATUS_REG); /* * Skip mouse input. */ if (scanstatus & KBD_STAT_MOUSE_OBF) continue; /* * If we see 0xe0, this is either a break code for KP * ENTER, or a repeat make for KP ENTER. Either way, * since the second byte is equivalent to an ENTER, * skip the 0xe0 and try again. * * If we see 0x1c, this must be a repeat ENTER or KP * ENTER (and we swallowed 0xe0 before). Try again. * * We can also see make and break codes for other keys * mashed before or after pressing ENTER. Thus, if we * see anything other than 0x9c, we have to try again. * * Note, if you held some key as ENTER was depressed, * that break code would get leaked out. */ if (scancode != 0x9c) continue; return; } }
gpl-2.0
badbear1727/E120LKERNEL
drivers/mca/mca-device.c
9981
6771
/* -*- mode: c; c-basic-offset: 8 -*- */ /* * MCA device support functions * * These functions support the ongoing device access API. * * (C) 2002 James Bottomley <James.Bottomley@HansenPartnership.com> * **----------------------------------------------------------------------------- ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU General Public License for more details. ** ** You should have received a copy of the GNU General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ** **----------------------------------------------------------------------------- */ #include <linux/module.h> #include <linux/device.h> #include <linux/mca.h> #include <linux/string.h> /** * mca_device_read_stored_pos - read POS register from stored data * @mca_dev: device to read from * @reg: register to read from * * Fetch a POS value that was stored at boot time by the kernel * when it scanned the MCA space. The register value is returned. * Missing or invalid registers report 0. */ unsigned char mca_device_read_stored_pos(struct mca_device *mca_dev, int reg) { if(reg < 0 || reg >= 8) return 0; return mca_dev->pos[reg]; } EXPORT_SYMBOL(mca_device_read_stored_pos); /** * mca_device_read_pos - read POS register from card * @mca_dev: device to read from * @reg: register to read from * * Fetch a POS value directly from the hardware to obtain the * current value. This is much slower than * mca_device_read_stored_pos and may not be invoked from * interrupt context. It handles the deep magic required for * onboard devices transparently. */ unsigned char mca_device_read_pos(struct mca_device *mca_dev, int reg) { struct mca_bus *mca_bus = to_mca_bus(mca_dev->dev.parent); return mca_bus->f.mca_read_pos(mca_dev, reg); return mca_dev->pos[reg]; } EXPORT_SYMBOL(mca_device_read_pos); /** * mca_device_write_pos - read POS register from card * @mca_dev: device to write pos register to * @reg: register to write to * @byte: byte to write to the POS registers * * Store a POS value directly to the hardware. You should not * normally need to use this function and should have a very good * knowledge of MCA bus before you do so. Doing this wrongly can * damage the hardware. * * This function may not be used from interrupt context. * */ void mca_device_write_pos(struct mca_device *mca_dev, int reg, unsigned char byte) { struct mca_bus *mca_bus = to_mca_bus(mca_dev->dev.parent); mca_bus->f.mca_write_pos(mca_dev, reg, byte); } EXPORT_SYMBOL(mca_device_write_pos); /** * mca_device_transform_irq - transform the ADF obtained IRQ * @mca_device: device whose irq needs transforming * @irq: input irq from ADF * * MCA Adapter Definition Files (ADF) contain irq, ioport, memory * etc. definitions. In systems with more than one bus, these need * to be transformed through bus mapping functions to get the real * system global quantities. * * This function transforms the interrupt number and returns the * transformed system global interrupt */ int mca_device_transform_irq(struct mca_device *mca_dev, int irq) { struct mca_bus *mca_bus = to_mca_bus(mca_dev->dev.parent); return mca_bus->f.mca_transform_irq(mca_dev, irq); } EXPORT_SYMBOL(mca_device_transform_irq); /** * mca_device_transform_ioport - transform the ADF obtained I/O port * @mca_device: device whose port needs transforming * @ioport: input I/O port from ADF * * MCA Adapter Definition Files (ADF) contain irq, ioport, memory * etc. definitions. In systems with more than one bus, these need * to be transformed through bus mapping functions to get the real * system global quantities. * * This function transforms the I/O port number and returns the * transformed system global port number. * * This transformation can be assumed to be linear for port ranges. */ int mca_device_transform_ioport(struct mca_device *mca_dev, int port) { struct mca_bus *mca_bus = to_mca_bus(mca_dev->dev.parent); return mca_bus->f.mca_transform_ioport(mca_dev, port); } EXPORT_SYMBOL(mca_device_transform_ioport); /** * mca_device_transform_memory - transform the ADF obtained memory * @mca_device: device whose memory region needs transforming * @mem: memory region start from ADF * * MCA Adapter Definition Files (ADF) contain irq, ioport, memory * etc. definitions. In systems with more than one bus, these need * to be transformed through bus mapping functions to get the real * system global quantities. * * This function transforms the memory region start and returns the * transformed system global memory region (physical). * * This transformation can be assumed to be linear for region ranges. */ void *mca_device_transform_memory(struct mca_device *mca_dev, void *mem) { struct mca_bus *mca_bus = to_mca_bus(mca_dev->dev.parent); return mca_bus->f.mca_transform_memory(mca_dev, mem); } EXPORT_SYMBOL(mca_device_transform_memory); /** * mca_device_claimed - check if claimed by driver * @mca_dev: device to check * * Returns 1 if the slot has been claimed by a driver */ int mca_device_claimed(struct mca_device *mca_dev) { return mca_dev->driver_loaded; } EXPORT_SYMBOL(mca_device_claimed); /** * mca_device_set_claim - set the claim value of the driver * @mca_dev: device to set value for * @val: claim value to set (1 claimed, 0 unclaimed) */ void mca_device_set_claim(struct mca_device *mca_dev, int val) { mca_dev->driver_loaded = val; } EXPORT_SYMBOL(mca_device_set_claim); /** * mca_device_status - get the status of the device * @mca_device: device to get * * returns an enumeration of the device status: * * MCA_ADAPTER_NORMAL adapter is OK. * MCA_ADAPTER_NONE no adapter at device (should never happen). * MCA_ADAPTER_DISABLED adapter is disabled. * MCA_ADAPTER_ERROR adapter cannot be initialised. */ enum MCA_AdapterStatus mca_device_status(struct mca_device *mca_dev) { return mca_dev->status; } EXPORT_SYMBOL(mca_device_status); /** * mca_device_set_name - set the name of the device * @mca_device: device to set the name of * @name: name to set */ void mca_device_set_name(struct mca_device *mca_dev, const char *name) { if(!mca_dev) return; strlcpy(mca_dev->name, name, sizeof(mca_dev->name)); } EXPORT_SYMBOL(mca_device_set_name);
gpl-2.0
luckasfb/android_kernel_iocean_x7
arch/parisc/kernel/ftrace.c
10493
4360
/* * Code for tracing calls in Linux kernel. * Copyright (C) 2009 Helge Deller <deller@gmx.de> * * based on code for x86 which is: * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> * * future possible enhancements: * - add CONFIG_DYNAMIC_FTRACE * - add CONFIG_STACK_TRACER */ #include <linux/init.h> #include <linux/ftrace.h> #include <asm/sections.h> #include <asm/ftrace.h> #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Add a function return address to the trace stack on thread info.*/ static int push_return_trace(unsigned long ret, unsigned long long time, unsigned long func, int *depth) { int index; if (!current->ret_stack) return -EBUSY; /* The return trace stack is full */ if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { atomic_inc(&current->trace_overrun); return -EBUSY; } index = ++current->curr_ret_stack; barrier(); current->ret_stack[index].ret = ret; current->ret_stack[index].func = func; current->ret_stack[index].calltime = time; *depth = index; return 0; } /* Retrieve a function return address to the trace stack on thread info.*/ static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) { int index; index = current->curr_ret_stack; if (unlikely(index < 0)) { ftrace_graph_stop(); WARN_ON(1); /* Might as well panic, otherwise we have no where to go */ *ret = (unsigned long) dereference_function_descriptor(&panic); return; } *ret = current->ret_stack[index].ret; trace->func = current->ret_stack[index].func; trace->calltime = current->ret_stack[index].calltime; trace->overrun = atomic_read(&current->trace_overrun); trace->depth = index; barrier(); current->curr_ret_stack--; } /* * Send the trace to the ring-buffer. * @return the original return address. */ unsigned long ftrace_return_to_handler(unsigned long retval0, unsigned long retval1) { struct ftrace_graph_ret trace; unsigned long ret; pop_return_trace(&trace, &ret); trace.rettime = local_clock(); ftrace_graph_return(&trace); if (unlikely(!ret)) { ftrace_graph_stop(); WARN_ON(1); /* Might as well panic. What else to do? */ ret = (unsigned long) dereference_function_descriptor(&panic); } /* HACK: we hand over the old functions' return values in %r23 and %r24. Assembly in entry.S will take care and move those to their final registers %ret0 and %ret1 */ asm( "copy %0, %%r23 \n\t" "copy %1, %%r24 \n" : : "r" (retval0), "r" (retval1) ); return ret; } /* * Hook the return address and push it in the stack of return addrs * in current thread info. */ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) { unsigned long old; unsigned long long calltime; struct ftrace_graph_ent trace; if (unlikely(atomic_read(&current->tracing_graph_pause))) return; old = *parent; *parent = (unsigned long) dereference_function_descriptor(&return_to_handler); if (unlikely(!__kernel_text_address(old))) { ftrace_graph_stop(); *parent = old; WARN_ON(1); return; } calltime = local_clock(); if (push_return_trace(old, calltime, self_addr, &trace.depth) == -EBUSY) { *parent = old; return; } trace.func = self_addr; /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; *parent = old; } } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ void ftrace_function_trampoline(unsigned long parent, unsigned long self_addr, unsigned long org_sp_gr3) { extern ftrace_func_t ftrace_trace_function; if (function_trace_stop) return; if (ftrace_trace_function != ftrace_stub) { ftrace_trace_function(parent, self_addr); return; } #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (ftrace_graph_entry && ftrace_graph_return) { unsigned long sp; unsigned long *parent_rp; asm volatile ("copy %%r30, %0" : "=r"(sp)); /* sanity check: is stack pointer which we got from assembler function in entry.S in a reasonable range compared to current stack pointer? */ if ((sp - org_sp_gr3) > 0x400) return; /* calculate pointer to %rp in stack */ parent_rp = (unsigned long *) org_sp_gr3 - 0x10; /* sanity check: parent_rp should hold parent */ if (*parent_rp != parent) return; prepare_ftrace_return(parent_rp, self_addr); return; } #endif }
gpl-2.0
Huexxx/zeus_external_busybox
networking/ipcalc.c
254
6071
/* vi: set sw=4 ts=4: */ /* * Mini ipcalc implementation for busybox * * By Jordan Crouse <jordan@cosmicpenguin.net> * Stephan Linz <linz@li-pro.net> * * This is a complete reimplementation of the ipcalc program * from Red Hat. I didn't look at their source code, but there * is no denying that this is a loving reimplementation * * Licensed under GPLv2 or later, see file LICENSE in this source tree. */ //usage:#define ipcalc_trivial_usage //usage: "[OPTIONS] ADDRESS[[/]NETMASK] [NETMASK]" //usage:#define ipcalc_full_usage "\n\n" //usage: "Calculate IP network settings from a IP address\n" //usage: IF_FEATURE_IPCALC_LONG_OPTIONS( //usage: "\n -b,--broadcast Display calculated broadcast address" //usage: "\n -n,--network Display calculated network address" //usage: "\n -m,--netmask Display default netmask for IP" //usage: IF_FEATURE_IPCALC_FANCY( //usage: "\n -p,--prefix Display the prefix for IP/NETMASK" //usage: "\n -h,--hostname Display first resolved host name" //usage: "\n -s,--silent Don't ever display error messages" //usage: ) //usage: ) //usage: IF_NOT_FEATURE_IPCALC_LONG_OPTIONS( //usage: "\n -b Display calculated broadcast address" //usage: "\n -n Display calculated network address" //usage: "\n -m Display default netmask for IP" //usage: IF_FEATURE_IPCALC_FANCY( //usage: "\n -p Display the prefix for IP/NETMASK" //usage: "\n -h Display first resolved host name" //usage: "\n -s Don't ever display error messages" //usage: ) //usage: ) #include "libbb.h" /* After libbb.h, because on some systems it needs other includes */ #include <arpa/inet.h> #define CLASS_A_NETMASK ntohl(0xFF000000) #define CLASS_B_NETMASK ntohl(0xFFFF0000) #define CLASS_C_NETMASK ntohl(0xFFFFFF00) static unsigned long get_netmask(unsigned long ipaddr) { ipaddr = htonl(ipaddr); if ((ipaddr & 0xC0000000) == 0xC0000000) return CLASS_C_NETMASK; else if ((ipaddr & 0x80000000) == 0x80000000) return CLASS_B_NETMASK; else if ((ipaddr & 0x80000000) == 0) return CLASS_A_NETMASK; else return 0; } #if ENABLE_FEATURE_IPCALC_FANCY static int get_prefix(unsigned long netmask) { unsigned long msk = 0x80000000; int ret = 0; netmask = htonl(netmask); while (msk) { if (netmask & msk) ret++; msk >>= 1; } return ret; } #else int get_prefix(unsigned long netmask); #endif #define NETMASK 0x01 #define BROADCAST 0x02 #define NETWORK 0x04 #define NETPREFIX 0x08 #define HOSTNAME 0x10 #define SILENT 0x20 #if ENABLE_FEATURE_IPCALC_LONG_OPTIONS static const char ipcalc_longopts[] ALIGN1 = "netmask\0" No_argument "m" // netmask from IP (assuming complete class A, B, or C network) "broadcast\0" No_argument "b" // broadcast from IP [netmask] "network\0" No_argument "n" // network from IP [netmask] # if ENABLE_FEATURE_IPCALC_FANCY "prefix\0" No_argument "p" // prefix from IP[/prefix] [netmask] "hostname\0" No_argument "h" // hostname from IP "silent\0" No_argument "s" // don’t ever display error messages # endif ; #endif int ipcalc_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; int ipcalc_main(int argc UNUSED_PARAM, char **argv) { unsigned opt; bool have_netmask = 0; struct in_addr s_netmask, s_broadcast, s_network, s_ipaddr; /* struct in_addr { in_addr_t s_addr; } and in_addr_t * (which in turn is just a typedef to uint32_t) * are essentially the same type. A few macros for less verbosity: */ #define netmask (s_netmask.s_addr) #define broadcast (s_broadcast.s_addr) #define network (s_network.s_addr) #define ipaddr (s_ipaddr.s_addr) char *ipstr; #if ENABLE_FEATURE_IPCALC_LONG_OPTIONS applet_long_options = ipcalc_longopts; #endif opt_complementary = "-1:?2"; /* minimum 1 arg, maximum 2 args */ opt = getopt32(argv, "mbn" IF_FEATURE_IPCALC_FANCY("phs")); argv += optind; if (opt & SILENT) logmode = LOGMODE_NONE; /* suppress error_msg() output */ opt &= ~SILENT; if (!(opt & (BROADCAST | NETWORK | NETPREFIX))) { /* if no options at all or * (no broadcast,network,prefix) and (two args)... */ if (!opt || argv[1]) bb_show_usage(); } ipstr = argv[0]; if (ENABLE_FEATURE_IPCALC_FANCY) { unsigned long netprefix = 0; char *prefixstr; prefixstr = ipstr; while (*prefixstr) { if (*prefixstr == '/') { *prefixstr++ = '\0'; if (*prefixstr) { unsigned msk; netprefix = xatoul_range(prefixstr, 0, 32); netmask = 0; msk = 0x80000000; while (netprefix > 0) { netmask |= msk; msk >>= 1; netprefix--; } netmask = htonl(netmask); /* Even if it was 0, we will signify that we have a netmask. This allows */ /* for specification of default routes, etc which have a 0 netmask/prefix */ have_netmask = 1; } break; } prefixstr++; } } if (inet_aton(ipstr, &s_ipaddr) == 0) { bb_error_msg_and_die("bad IP address: %s", argv[0]); } if (argv[1]) { if (ENABLE_FEATURE_IPCALC_FANCY && have_netmask) { bb_error_msg_and_die("use prefix or netmask, not both"); } if (inet_aton(argv[1], &s_netmask) == 0) { bb_error_msg_and_die("bad netmask: %s", argv[1]); } } else { /* JHC - If the netmask wasn't provided then calculate it */ if (!ENABLE_FEATURE_IPCALC_FANCY || !have_netmask) netmask = get_netmask(ipaddr); } if (opt & NETMASK) { printf("NETMASK=%s\n", inet_ntoa(s_netmask)); } if (opt & BROADCAST) { broadcast = (ipaddr & netmask) | ~netmask; printf("BROADCAST=%s\n", inet_ntoa(s_broadcast)); } if (opt & NETWORK) { network = ipaddr & netmask; printf("NETWORK=%s\n", inet_ntoa(s_network)); } if (ENABLE_FEATURE_IPCALC_FANCY) { if (opt & NETPREFIX) { printf("PREFIX=%i\n", get_prefix(netmask)); } if (opt & HOSTNAME) { struct hostent *hostinfo; hostinfo = gethostbyaddr((char *) &ipaddr, sizeof(ipaddr), AF_INET); if (!hostinfo) { bb_herror_msg_and_die("can't find hostname for %s", argv[0]); } str_tolower(hostinfo->h_name); printf("HOSTNAME=%s\n", hostinfo->h_name); } } return EXIT_SUCCESS; }
gpl-2.0
Exodius/WoWSource434-1
dep/mysqllite/mysys/mf_arr_appstr.c
510
1830
/* Copyright (C) 2007 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "mysys_priv.h" #include <m_string.h> /* strcmp() */ /** Append str to array, or move to the end if it already exists @param str String to be appended @param array The array, terminated by a NULL element, all unused elements pre-initialized to NULL @param size Size of the array; array must be terminated by a NULL pointer, so can hold size - 1 elements @retval FALSE Success @retval TRUE Failure, array is full */ my_bool array_append_string_unique(const char *str, const char **array, size_t size) { const char **p; /* end points at the terminating NULL element */ const char **end= array + size - 1; DBUG_ASSERT(*end == NULL); for (p= array; *p; ++p) { if (strcmp(*p, str) == 0) break; } if (p >= end) return TRUE; /* Array is full */ DBUG_ASSERT(*p == NULL || strcmp(*p, str) == 0); while (*(p + 1)) { *p= *(p + 1); ++p; } DBUG_ASSERT(p < end); *p= str; return FALSE; /* Success */ }
gpl-2.0
romracer/atrix-kernel
drivers/staging/rtl8192e/r8192E_dm.c
510
132146
/*++ Copyright-c Realtek Semiconductor Corp. All rights reserved. Module Name: r8192U_dm.c Abstract: HW dynamic mechanism. Major Change History: When Who What ---------- --------------- ------------------------------- 2008-05-14 amy create version 0 porting from windows code. --*/ #include "r8192E.h" #include "r8192E_dm.h" #include "r8192E_hw.h" #include "r819xE_phy.h" #include "r819xE_phyreg.h" #include "r8190_rtl8256.h" /*---------------------------Define Local Constant---------------------------*/ // // Indicate different AP vendor for IOT issue. // #ifdef RTL8190P static u32 edca_setting_DL[HT_IOT_PEER_MAX] = { 0x5e4322, 0x5e4322, 0x5e4322, 0x604322, 0xa44f, 0x5e4322}; static u32 edca_setting_UL[HT_IOT_PEER_MAX] = { 0x5e4322, 0xa44f, 0x5e4322, 0x604322, 0x5e4322, 0x5e4322}; #else #ifdef RTL8192E static u32 edca_setting_DL[HT_IOT_PEER_MAX] = { 0x5e4322, 0x5e4322, 0x5e4322, 0x604322, 0xa44f, 0x5e4322}; static u32 edca_setting_UL[HT_IOT_PEER_MAX] = { 0x5e4322, 0xa44f, 0x5e4322, 0x604322, 0x5e4322, 0x5e4322}; #else static u32 edca_setting_DL[HT_IOT_PEER_MAX] = { 0x5e4322, 0x5e4322, 0x5e4322, 0x604322, 0xa44f, 0x5ea44f}; static u32 edca_setting_UL[HT_IOT_PEER_MAX] = { 0x5e4322, 0xa44f, 0x5e4322, 0x604322, 0x5ea44f, 0x5ea44f}; #endif #endif #define RTK_UL_EDCA 0xa44f #define RTK_DL_EDCA 0x5e4322 /*---------------------------Define Local Constant---------------------------*/ /*------------------------Define global variable-----------------------------*/ // Debug variable ? dig_t dm_digtable; // Store current shoftware write register content for MAC PHY. u8 dm_shadow[16][256] = {{0}}; // For Dynamic Rx Path Selection by Signal Strength DRxPathSel DM_RxPathSelTable; /*------------------------Define global variable-----------------------------*/ /*------------------------Define local variable------------------------------*/ /*------------------------Define local variable------------------------------*/ /*--------------------Define export function prototype-----------------------*/ extern void init_hal_dm(struct net_device *dev); extern void deinit_hal_dm(struct net_device *dev); extern void hal_dm_watchdog(struct net_device *dev); extern void init_rate_adaptive(struct net_device *dev); extern void dm_txpower_trackingcallback(struct work_struct *work); extern void dm_cck_txpower_adjust(struct net_device *dev,bool binch14); extern void dm_restore_dynamic_mechanism_state(struct net_device *dev); extern void dm_backup_dynamic_mechanism_state(struct net_device *dev); extern void dm_change_dynamic_initgain_thresh(struct net_device *dev, u32 dm_type, u32 dm_value); extern void DM_ChangeFsyncSetting(struct net_device *dev, s32 DM_Type, s32 DM_Value); extern void dm_force_tx_fw_info(struct net_device *dev, u32 force_type, u32 force_value); extern void dm_init_edca_turbo(struct net_device *dev); extern void dm_rf_operation_test_callback(unsigned long data); extern void dm_rf_pathcheck_workitemcallback(struct work_struct *work); extern void dm_fsync_timer_callback(unsigned long data); extern void dm_check_fsync(struct net_device *dev); extern void dm_shadow_init(struct net_device *dev); extern void dm_initialize_txpower_tracking(struct net_device *dev); #ifdef RTL8192E extern void dm_gpio_change_rf_callback(struct work_struct *work); #endif /*--------------------Define export function prototype-----------------------*/ /*---------------------Define local function prototype-----------------------*/ // DM --> Rate Adaptive static void dm_check_rate_adaptive(struct net_device *dev); // DM --> Bandwidth switch static void dm_init_bandwidth_autoswitch(struct net_device *dev); static void dm_bandwidth_autoswitch( struct net_device *dev); // DM --> TX power control //static void dm_initialize_txpower_tracking(struct net_device *dev); static void dm_check_txpower_tracking(struct net_device *dev); //static void dm_txpower_reset_recovery(struct net_device *dev); // DM --> BB init gain restore #ifndef RTL8192U static void dm_bb_initialgain_restore(struct net_device *dev); // DM --> BB init gain backup static void dm_bb_initialgain_backup(struct net_device *dev); #endif // DM --> Dynamic Init Gain by RSSI static void dm_dig_init(struct net_device *dev); static void dm_ctrl_initgain_byrssi(struct net_device *dev); static void dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev); static void dm_ctrl_initgain_byrssi_by_driverrssi( struct net_device *dev); static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(struct net_device *dev); static void dm_initial_gain(struct net_device *dev); static void dm_pd_th(struct net_device *dev); static void dm_cs_ratio(struct net_device *dev); static void dm_init_ctstoself(struct net_device *dev); // DM --> EDCA turboe mode control static void dm_check_edca_turbo(struct net_device *dev); // DM --> HW RF control static void dm_check_rfctrl_gpio(struct net_device *dev); #ifndef RTL8190P //static void dm_gpio_change_rf(struct net_device *dev); #endif // DM --> Check PBC static void dm_check_pbc_gpio(struct net_device *dev); // DM --> Check current RX RF path state static void dm_check_rx_path_selection(struct net_device *dev); static void dm_init_rxpath_selection(struct net_device *dev); static void dm_rxpath_sel_byrssi(struct net_device *dev); // DM --> Fsync for broadcom ap static void dm_init_fsync(struct net_device *dev); static void dm_deInit_fsync(struct net_device *dev); //Added by vivi, 20080522 static void dm_check_txrateandretrycount(struct net_device *dev); /*---------------------Define local function prototype-----------------------*/ /*---------------------Define of Tx Power Control For Near/Far Range --------*/ //Add by Jacken 2008/02/18 static void dm_init_dynamic_txpower(struct net_device *dev); static void dm_dynamic_txpower(struct net_device *dev); // DM --> For rate adaptive and DIG, we must send RSSI to firmware static void dm_send_rssi_tofw(struct net_device *dev); static void dm_ctstoself(struct net_device *dev); /*---------------------------Define function prototype------------------------*/ //================================================================================ // HW Dynamic mechanism interface. //================================================================================ // // Description: // Prepare SW resource for HW dynamic mechanism. // // Assumption: // This function is only invoked at driver intialization once. // // void init_hal_dm(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); // Undecorated Smoothed Signal Strength, it can utilized to dynamic mechanism. priv->undecorated_smoothed_pwdb = -1; //Initial TX Power Control for near/far range , add by amy 2008/05/15, porting from windows code. dm_init_dynamic_txpower(dev); init_rate_adaptive(dev); //dm_initialize_txpower_tracking(dev); dm_dig_init(dev); dm_init_edca_turbo(dev); dm_init_bandwidth_autoswitch(dev); dm_init_fsync(dev); dm_init_rxpath_selection(dev); dm_init_ctstoself(dev); #ifdef RTL8192E INIT_DELAYED_WORK(&priv->gpio_change_rf_wq, dm_gpio_change_rf_callback); #endif } // InitHalDm void deinit_hal_dm(struct net_device *dev) { dm_deInit_fsync(dev); } #ifdef USB_RX_AGGREGATION_SUPPORT void dm_CheckRxAggregation(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev); PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo; static unsigned long lastTxOkCnt = 0; static unsigned long lastRxOkCnt = 0; unsigned long curTxOkCnt = 0; unsigned long curRxOkCnt = 0; /* if (pHalData->bForcedUsbRxAggr) { if (pHalData->ForcedUsbRxAggrInfo == 0) { if (pHalData->bCurrentRxAggrEnable) { Adapter->HalFunc.HalUsbRxAggrHandler(Adapter, FALSE); } } else { if (!pHalData->bCurrentRxAggrEnable || (pHalData->ForcedUsbRxAggrInfo != pHalData->LastUsbRxAggrInfoSetting)) { Adapter->HalFunc.HalUsbRxAggrHandler(Adapter, TRUE); } } return; } */ curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt; curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt; if((curTxOkCnt + curRxOkCnt) < 15000000) { return; } if(curTxOkCnt > 4*curRxOkCnt) { if (priv->bCurrentRxAggrEnable) { write_nic_dword(dev, 0x1a8, 0); priv->bCurrentRxAggrEnable = false; } }else{ if (!priv->bCurrentRxAggrEnable && !pHTInfo->bCurrentRT2RTAggregation) { u32 ulValue; ulValue = (pHTInfo->UsbRxFwAggrEn<<24) | (pHTInfo->UsbRxFwAggrPageNum<<16) | (pHTInfo->UsbRxFwAggrPacketNum<<8) | (pHTInfo->UsbRxFwAggrTimeout); /* * If usb rx firmware aggregation is enabled, * when anyone of three threshold conditions above is reached, * firmware will send aggregated packet to driver. */ write_nic_dword(dev, 0x1a8, ulValue); priv->bCurrentRxAggrEnable = true; } } lastTxOkCnt = priv->stats.txbytesunicast; lastRxOkCnt = priv->stats.rxbytesunicast; } // dm_CheckEdcaTurbo #endif void hal_dm_watchdog(struct net_device *dev) { //struct r8192_priv *priv = ieee80211_priv(dev); //static u8 previous_bssid[6] ={0}; /*Add by amy 2008/05/15 ,porting from windows code.*/ dm_check_rate_adaptive(dev); dm_dynamic_txpower(dev); dm_check_txrateandretrycount(dev); dm_check_txpower_tracking(dev); dm_ctrl_initgain_byrssi(dev); dm_check_edca_turbo(dev); dm_bandwidth_autoswitch(dev); dm_check_rfctrl_gpio(dev); dm_check_rx_path_selection(dev); dm_check_fsync(dev); // Add by amy 2008-05-15 porting from windows code. dm_check_pbc_gpio(dev); dm_send_rssi_tofw(dev); dm_ctstoself(dev); #ifdef USB_RX_AGGREGATION_SUPPORT dm_CheckRxAggregation(dev); #endif } //HalDmWatchDog /* * Decide Rate Adaptive Set according to distance (signal strength) * 01/11/2008 MHC Modify input arguments and RATR table level. * 01/16/2008 MHC RF_Type is assigned in ReadAdapterInfo(). We must call * the function after making sure RF_Type. */ void init_rate_adaptive(struct net_device * dev) { struct r8192_priv *priv = ieee80211_priv(dev); prate_adaptive pra = (prate_adaptive)&priv->rate_adaptive; pra->ratr_state = DM_RATR_STA_MAX; pra->high2low_rssi_thresh_for_ra = RateAdaptiveTH_High; pra->low2high_rssi_thresh_for_ra20M = RateAdaptiveTH_Low_20M+5; pra->low2high_rssi_thresh_for_ra40M = RateAdaptiveTH_Low_40M+5; pra->high_rssi_thresh_for_ra = RateAdaptiveTH_High+5; pra->low_rssi_thresh_for_ra20M = RateAdaptiveTH_Low_20M; pra->low_rssi_thresh_for_ra40M = RateAdaptiveTH_Low_40M; if(priv->CustomerID == RT_CID_819x_Netcore) pra->ping_rssi_enable = 1; else pra->ping_rssi_enable = 0; pra->ping_rssi_thresh_for_ra = 15; if (priv->rf_type == RF_2T4R) { // 07/10/08 MH Modify for RA smooth scheme. /* 2008/01/11 MH Modify 2T RATR table for different RSSI. 080515 porting by amy from windows code.*/ pra->upper_rssi_threshold_ratr = 0x8f0f0000; pra->middle_rssi_threshold_ratr = 0x8f0ff000; pra->low_rssi_threshold_ratr = 0x8f0ff001; pra->low_rssi_threshold_ratr_40M = 0x8f0ff005; pra->low_rssi_threshold_ratr_20M = 0x8f0ff001; pra->ping_rssi_ratr = 0x0000000d;//cosa add for test } else if (priv->rf_type == RF_1T2R) { pra->upper_rssi_threshold_ratr = 0x000f0000; pra->middle_rssi_threshold_ratr = 0x000ff000; pra->low_rssi_threshold_ratr = 0x000ff001; pra->low_rssi_threshold_ratr_40M = 0x000ff005; pra->low_rssi_threshold_ratr_20M = 0x000ff001; pra->ping_rssi_ratr = 0x0000000d;//cosa add for test } } // InitRateAdaptive /*----------------------------------------------------------------------------- * Function: dm_check_rate_adaptive() * * Overview: * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/26/08 amy Create version 0 proting from windows code. * *---------------------------------------------------------------------------*/ static void dm_check_rate_adaptive(struct net_device * dev) { struct r8192_priv *priv = ieee80211_priv(dev); PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo; prate_adaptive pra = (prate_adaptive)&priv->rate_adaptive; u32 currentRATR, targetRATR = 0; u32 LowRSSIThreshForRA = 0, HighRSSIThreshForRA = 0; bool bshort_gi_enabled = false; static u8 ping_rssi_state=0; if(!priv->up) { RT_TRACE(COMP_RATE, "<---- dm_check_rate_adaptive(): driver is going to unload\n"); return; } if(pra->rate_adaptive_disabled)//this variable is set by ioctl. return; // TODO: Only 11n mode is implemented currently, if( !(priv->ieee80211->mode == WIRELESS_MODE_N_24G || priv->ieee80211->mode == WIRELESS_MODE_N_5G)) return; if( priv->ieee80211->state == IEEE80211_LINKED ) { // RT_TRACE(COMP_RATE, "dm_CheckRateAdaptive(): \t"); // // Check whether Short GI is enabled // bshort_gi_enabled = (pHTInfo->bCurTxBW40MHz && pHTInfo->bCurShortGI40MHz) || (!pHTInfo->bCurTxBW40MHz && pHTInfo->bCurShortGI20MHz); pra->upper_rssi_threshold_ratr = (pra->upper_rssi_threshold_ratr & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ; pra->middle_rssi_threshold_ratr = (pra->middle_rssi_threshold_ratr & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ; if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) { pra->low_rssi_threshold_ratr = (pra->low_rssi_threshold_ratr_40M & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ; } else { pra->low_rssi_threshold_ratr = (pra->low_rssi_threshold_ratr_20M & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ; } //cosa add for test pra->ping_rssi_ratr = (pra->ping_rssi_ratr & (~BIT31)) | ((bshort_gi_enabled)? BIT31:0) ; /* 2007/10/08 MH We support RA smooth scheme now. When it is the first time to link with AP. We will not change upper/lower threshold. If STA stay in high or low level, we must change two different threshold to prevent jumping frequently. */ if (pra->ratr_state == DM_RATR_STA_HIGH) { HighRSSIThreshForRA = pra->high2low_rssi_thresh_for_ra; LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)? (pra->low_rssi_thresh_for_ra40M):(pra->low_rssi_thresh_for_ra20M); } else if (pra->ratr_state == DM_RATR_STA_LOW) { HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra; LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)? (pra->low2high_rssi_thresh_for_ra40M):(pra->low2high_rssi_thresh_for_ra20M); } else { HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra; LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)? (pra->low_rssi_thresh_for_ra40M):(pra->low_rssi_thresh_for_ra20M); } //DbgPrint("[DM] THresh H/L=%d/%d\n\r", RATR.HighRSSIThreshForRA, RATR.LowRSSIThreshForRA); if(priv->undecorated_smoothed_pwdb >= (long)HighRSSIThreshForRA) { //DbgPrint("[DM] RSSI=%d STA=HIGH\n\r", pHalData->UndecoratedSmoothedPWDB); pra->ratr_state = DM_RATR_STA_HIGH; targetRATR = pra->upper_rssi_threshold_ratr; }else if(priv->undecorated_smoothed_pwdb >= (long)LowRSSIThreshForRA) { //DbgPrint("[DM] RSSI=%d STA=Middle\n\r", pHalData->UndecoratedSmoothedPWDB); pra->ratr_state = DM_RATR_STA_MIDDLE; targetRATR = pra->middle_rssi_threshold_ratr; }else { //DbgPrint("[DM] RSSI=%d STA=LOW\n\r", pHalData->UndecoratedSmoothedPWDB); pra->ratr_state = DM_RATR_STA_LOW; targetRATR = pra->low_rssi_threshold_ratr; } //cosa add for test if(pra->ping_rssi_enable) { //pHalData->UndecoratedSmoothedPWDB = 19; if(priv->undecorated_smoothed_pwdb < (long)(pra->ping_rssi_thresh_for_ra+5)) { if( (priv->undecorated_smoothed_pwdb < (long)pra->ping_rssi_thresh_for_ra) || ping_rssi_state ) { //DbgPrint("TestRSSI = %d, set RATR to 0x%x \n", pHalData->UndecoratedSmoothedPWDB, pRA->TestRSSIRATR); pra->ratr_state = DM_RATR_STA_LOW; targetRATR = pra->ping_rssi_ratr; ping_rssi_state = 1; } //else // DbgPrint("TestRSSI is between the range. \n"); } else { //DbgPrint("TestRSSI Recover to 0x%x \n", targetRATR); ping_rssi_state = 0; } } // 2008.04.01 #if 1 // For RTL819X, if pairwisekey = wep/tkip, we support only MCS0~7. if(priv->ieee80211->GetHalfNmodeSupportByAPsHandler(dev)) targetRATR &= 0xf00fffff; #endif // // Check whether updating of RATR0 is required // currentRATR = read_nic_dword(dev, RATR0); if( targetRATR != currentRATR ) { u32 ratr_value; ratr_value = targetRATR; RT_TRACE(COMP_RATE,"currentRATR = %x, targetRATR = %x\n", currentRATR, targetRATR); if(priv->rf_type == RF_1T2R) { ratr_value &= ~(RATE_ALL_OFDM_2SS); } write_nic_dword(dev, RATR0, ratr_value); write_nic_byte(dev, UFWP, 1); pra->last_ratr = targetRATR; } } else { pra->ratr_state = DM_RATR_STA_MAX; } } // dm_CheckRateAdaptive static void dm_init_bandwidth_autoswitch(struct net_device * dev) { struct r8192_priv *priv = ieee80211_priv(dev); priv->ieee80211->bandwidth_auto_switch.threshold_20Mhzto40Mhz = BW_AUTO_SWITCH_LOW_HIGH; priv->ieee80211->bandwidth_auto_switch.threshold_40Mhzto20Mhz = BW_AUTO_SWITCH_HIGH_LOW; priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = false; priv->ieee80211->bandwidth_auto_switch.bautoswitch_enable = false; } // dm_init_bandwidth_autoswitch static void dm_bandwidth_autoswitch(struct net_device * dev) { struct r8192_priv *priv = ieee80211_priv(dev); if(priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ||!priv->ieee80211->bandwidth_auto_switch.bautoswitch_enable){ return; }else{ if(priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz == false){//If send packets in 40 Mhz in 20/40 if(priv->undecorated_smoothed_pwdb <= priv->ieee80211->bandwidth_auto_switch.threshold_40Mhzto20Mhz) priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = true; }else{//in force send packets in 20 Mhz in 20/40 if(priv->undecorated_smoothed_pwdb >= priv->ieee80211->bandwidth_auto_switch.threshold_20Mhzto40Mhz) priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = false; } } } // dm_BandwidthAutoSwitch //OFDM default at 0db, index=6. #ifndef RTL8190P static u32 OFDMSwingTable[OFDM_Table_Length] = { 0x7f8001fe, // 0, +6db 0x71c001c7, // 1, +5db 0x65400195, // 2, +4db 0x5a400169, // 3, +3db 0x50800142, // 4, +2db 0x47c0011f, // 5, +1db 0x40000100, // 6, +0db ===> default, upper for higher temprature, lower for low temprature 0x390000e4, // 7, -1db 0x32c000cb, // 8, -2db 0x2d4000b5, // 9, -3db 0x288000a2, // 10, -4db 0x24000090, // 11, -5db 0x20000080, // 12, -6db 0x1c800072, // 13, -7db 0x19800066, // 14, -8db 0x26c0005b, // 15, -9db 0x24400051, // 16, -10db 0x12000048, // 17, -11db 0x10000040 // 18, -12db }; static u8 CCKSwingTable_Ch1_Ch13[CCK_Table_length][8] = { {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, // 0, +0db ===> CCK40M default {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, // 1, -1db {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, // 2, -2db {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, // 3, -3db {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, // 4, -4db {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, // 5, -5db {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, // 6, -6db ===> CCK20M default {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, // 7, -7db {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, // 8, -8db {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, // 9, -9db {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, // 10, -10db {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01} // 11, -11db }; static u8 CCKSwingTable_Ch14[CCK_Table_length][8] = { {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, // 0, +0db ===> CCK40M default {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, // 1, -1db {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, // 2, -2db {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, // 3, -3db {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, // 4, -4db {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, // 5, -5db {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, // 6, -6db ===> CCK20M default {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, // 7, -7db {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, // 8, -8db {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, // 9, -9db {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, // 10, -10db {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00} // 11, -11db }; #endif #define Pw_Track_Flag 0x11d #define Tssi_Mea_Value 0x13c #define Tssi_Report_Value1 0x134 #define Tssi_Report_Value2 0x13e #define FW_Busy_Flag 0x13f static void dm_TXPowerTrackingCallback_TSSI(struct net_device * dev) { struct r8192_priv *priv = ieee80211_priv(dev); bool bHighpowerstate, viviflag = FALSE; DCMD_TXCMD_T tx_cmd; u8 powerlevelOFDM24G; int i =0, j = 0, k = 0; u8 RF_Type, tmp_report[5]={0, 0, 0, 0, 0}; u32 Value; u8 Pwr_Flag; u16 Avg_TSSI_Meas, TSSI_13dBm, Avg_TSSI_Meas_from_driver=0; #ifdef RTL8192U RT_STATUS rtStatus = RT_STATUS_SUCCESS; #endif // bool rtStatus = true; u32 delta=0; RT_TRACE(COMP_POWER_TRACKING,"%s()\n",__FUNCTION__); // write_nic_byte(dev, 0x1ba, 0); write_nic_byte(dev, Pw_Track_Flag, 0); write_nic_byte(dev, FW_Busy_Flag, 0); priv->ieee80211->bdynamic_txpower_enable = false; bHighpowerstate = priv->bDynamicTxHighPower; powerlevelOFDM24G = (u8)(priv->Pwr_Track>>24); RF_Type = priv->rf_type; Value = (RF_Type<<8) | powerlevelOFDM24G; RT_TRACE(COMP_POWER_TRACKING, "powerlevelOFDM24G = %x\n", powerlevelOFDM24G); for(j = 0; j<=30; j++) { //fill tx_cmd tx_cmd.Op = TXCMD_SET_TX_PWR_TRACKING; tx_cmd.Length = 4; tx_cmd.Value = Value; #ifdef RTL8192U rtStatus = SendTxCommandPacket(dev, &tx_cmd, 12); if (rtStatus == RT_STATUS_FAILURE) { RT_TRACE(COMP_POWER_TRACKING, "Set configuration with tx cmd queue fail!\n"); } #else cmpk_message_handle_tx(dev, (u8*)&tx_cmd, DESC_PACKET_TYPE_INIT, sizeof(DCMD_TXCMD_T)); #endif mdelay(1); //DbgPrint("hi, vivi, strange\n"); for(i = 0;i <= 30; i++) { Pwr_Flag = read_nic_byte(dev, Pw_Track_Flag); if (Pwr_Flag == 0) { mdelay(1); continue; } Avg_TSSI_Meas = read_nic_word(dev, Tssi_Mea_Value); if(Avg_TSSI_Meas == 0) { write_nic_byte(dev, Pw_Track_Flag, 0); write_nic_byte(dev, FW_Busy_Flag, 0); return; } for(k = 0;k < 5; k++) { if(k !=4) tmp_report[k] = read_nic_byte(dev, Tssi_Report_Value1+k); else tmp_report[k] = read_nic_byte(dev, Tssi_Report_Value2); RT_TRACE(COMP_POWER_TRACKING, "TSSI_report_value = %d\n", tmp_report[k]); } //check if the report value is right for(k = 0;k < 5; k++) { if(tmp_report[k] <= 20) { viviflag =TRUE; break; } } if(viviflag ==TRUE) { write_nic_byte(dev, Pw_Track_Flag, 0); viviflag = FALSE; RT_TRACE(COMP_POWER_TRACKING, "we filted this data\n"); for(k = 0;k < 5; k++) tmp_report[k] = 0; break; } for(k = 0;k < 5; k++) { Avg_TSSI_Meas_from_driver += tmp_report[k]; } Avg_TSSI_Meas_from_driver = Avg_TSSI_Meas_from_driver*100/5; RT_TRACE(COMP_POWER_TRACKING, "Avg_TSSI_Meas_from_driver = %d\n", Avg_TSSI_Meas_from_driver); TSSI_13dBm = priv->TSSI_13dBm; RT_TRACE(COMP_POWER_TRACKING, "TSSI_13dBm = %d\n", TSSI_13dBm); //if(abs(Avg_TSSI_Meas_from_driver - TSSI_13dBm) <= E_FOR_TX_POWER_TRACK) // For MacOS-compatible if(Avg_TSSI_Meas_from_driver > TSSI_13dBm) delta = Avg_TSSI_Meas_from_driver - TSSI_13dBm; else delta = TSSI_13dBm - Avg_TSSI_Meas_from_driver; if(delta <= E_FOR_TX_POWER_TRACK) { priv->ieee80211->bdynamic_txpower_enable = TRUE; write_nic_byte(dev, Pw_Track_Flag, 0); write_nic_byte(dev, FW_Busy_Flag, 0); RT_TRACE(COMP_POWER_TRACKING, "tx power track is done\n"); RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex); RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real); #ifdef RTL8190P RT_TRACE(COMP_POWER_TRACKING, "priv->rfc_txpowertrackingindex = %d\n", priv->rfc_txpowertrackingindex); RT_TRACE(COMP_POWER_TRACKING, "priv->rfc_txpowertrackingindex_real = %d\n", priv->rfc_txpowertrackingindex_real); #endif RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresentAttentuation_difference = %d\n", priv->CCKPresentAttentuation_difference); RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresentAttentuation = %d\n", priv->CCKPresentAttentuation); return; } else { if(Avg_TSSI_Meas_from_driver < TSSI_13dBm - E_FOR_TX_POWER_TRACK) { if (RF_Type == RF_2T4R) { if((priv->rfa_txpowertrackingindex > 0) &&(priv->rfc_txpowertrackingindex > 0)) { priv->rfa_txpowertrackingindex--; if(priv->rfa_txpowertrackingindex_real > 4) { priv->rfa_txpowertrackingindex_real--; rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value); } priv->rfc_txpowertrackingindex--; if(priv->rfc_txpowertrackingindex_real > 4) { priv->rfc_txpowertrackingindex_real--; rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex_real].txbbgain_value); } } else { rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[4].txbbgain_value); rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[4].txbbgain_value); } } else { if(priv->rfc_txpowertrackingindex > 0) { priv->rfc_txpowertrackingindex--; if(priv->rfc_txpowertrackingindex_real > 4) { priv->rfc_txpowertrackingindex_real--; rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex_real].txbbgain_value); } } else rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[4].txbbgain_value); } } else { if (RF_Type == RF_2T4R) { if((priv->rfa_txpowertrackingindex < TxBBGainTableLength - 1) &&(priv->rfc_txpowertrackingindex < TxBBGainTableLength - 1)) { priv->rfa_txpowertrackingindex++; priv->rfa_txpowertrackingindex_real++; rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex_real].txbbgain_value); priv->rfc_txpowertrackingindex++; priv->rfc_txpowertrackingindex_real++; rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex_real].txbbgain_value); } else { rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[TxBBGainTableLength - 1].txbbgain_value); rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[TxBBGainTableLength - 1].txbbgain_value); } } else { if(priv->rfc_txpowertrackingindex < (TxBBGainTableLength - 1)) { priv->rfc_txpowertrackingindex++; priv->rfc_txpowertrackingindex_real++; rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex_real].txbbgain_value); } else rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[TxBBGainTableLength - 1].txbbgain_value); } } if (RF_Type == RF_2T4R) priv->CCKPresentAttentuation_difference = priv->rfa_txpowertrackingindex - priv->rfa_txpowertracking_default; else priv->CCKPresentAttentuation_difference = priv->rfc_txpowertrackingindex - priv->rfc_txpowertracking_default; if(priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20) priv->CCKPresentAttentuation = priv->CCKPresentAttentuation_20Mdefault + priv->CCKPresentAttentuation_difference; else priv->CCKPresentAttentuation = priv->CCKPresentAttentuation_40Mdefault + priv->CCKPresentAttentuation_difference; if(priv->CCKPresentAttentuation > (CCKTxBBGainTableLength-1)) priv->CCKPresentAttentuation = CCKTxBBGainTableLength-1; if(priv->CCKPresentAttentuation < 0) priv->CCKPresentAttentuation = 0; if(1) { if(priv->ieee80211->current_network.channel == 14 && !priv->bcck_in_ch14) { priv->bcck_in_ch14 = TRUE; dm_cck_txpower_adjust(dev,priv->bcck_in_ch14); } else if(priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14) { priv->bcck_in_ch14 = FALSE; dm_cck_txpower_adjust(dev,priv->bcck_in_ch14); } else dm_cck_txpower_adjust(dev,priv->bcck_in_ch14); } RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex = %d\n", priv->rfa_txpowertrackingindex); RT_TRACE(COMP_POWER_TRACKING, "priv->rfa_txpowertrackingindex_real = %d\n", priv->rfa_txpowertrackingindex_real); #ifdef RTL8190P RT_TRACE(COMP_POWER_TRACKING, "priv->rfc_txpowertrackingindex = %d\n", priv->rfc_txpowertrackingindex); RT_TRACE(COMP_POWER_TRACKING, "priv->rfc_txpowertrackingindex_real = %d\n", priv->rfc_txpowertrackingindex_real); #endif RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresentAttentuation_difference = %d\n", priv->CCKPresentAttentuation_difference); RT_TRACE(COMP_POWER_TRACKING, "priv->CCKPresentAttentuation = %d\n", priv->CCKPresentAttentuation); if (priv->CCKPresentAttentuation_difference <= -12||priv->CCKPresentAttentuation_difference >= 24) { priv->ieee80211->bdynamic_txpower_enable = TRUE; write_nic_byte(dev, Pw_Track_Flag, 0); write_nic_byte(dev, FW_Busy_Flag, 0); RT_TRACE(COMP_POWER_TRACKING, "tx power track--->limited\n"); return; } } write_nic_byte(dev, Pw_Track_Flag, 0); Avg_TSSI_Meas_from_driver = 0; for(k = 0;k < 5; k++) tmp_report[k] = 0; break; } write_nic_byte(dev, FW_Busy_Flag, 0); } priv->ieee80211->bdynamic_txpower_enable = TRUE; write_nic_byte(dev, Pw_Track_Flag, 0); } #ifndef RTL8190P static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device * dev) { #define ThermalMeterVal 9 struct r8192_priv *priv = ieee80211_priv(dev); u32 tmpRegA, TempCCk; u8 tmpOFDMindex, tmpCCKindex, tmpCCK20Mindex, tmpCCK40Mindex, tmpval; int i =0, CCKSwingNeedUpdate=0; if(!priv->btxpower_trackingInit) { //Query OFDM default setting tmpRegA= rtl8192_QueryBBReg(dev, rOFDM0_XATxIQImbalance, bMaskDWord); for(i=0; i<OFDM_Table_Length; i++) //find the index { if(tmpRegA == OFDMSwingTable[i]) { priv->OFDM_index= (u8)i; RT_TRACE(COMP_POWER_TRACKING, "Initial reg0x%x = 0x%x, OFDM_index=0x%x\n", rOFDM0_XATxIQImbalance, tmpRegA, priv->OFDM_index); } } //Query CCK default setting From 0xa22 TempCCk = rtl8192_QueryBBReg(dev, rCCK0_TxFilter1, bMaskByte2); for(i=0 ; i<CCK_Table_length ; i++) { if(TempCCk == (u32)CCKSwingTable_Ch1_Ch13[i][0]) { priv->CCK_index =(u8) i; RT_TRACE(COMP_POWER_TRACKING, "Initial reg0x%x = 0x%x, CCK_index=0x%x\n", rCCK0_TxFilter1, TempCCk, priv->CCK_index); break; } } priv->btxpower_trackingInit = TRUE; //pHalData->TXPowercount = 0; return; } // read and filter out unreasonable value tmpRegA = rtl8192_phy_QueryRFReg(dev, RF90_PATH_A, 0x12, 0x078); // 0x12: RF Reg[10:7] RT_TRACE(COMP_POWER_TRACKING, "Readback ThermalMeterA = %d \n", tmpRegA); if(tmpRegA < 3 || tmpRegA > 13) return; if(tmpRegA >= 12) // if over 12, TP will be bad when high temprature tmpRegA = 12; RT_TRACE(COMP_POWER_TRACKING, "Valid ThermalMeterA = %d \n", tmpRegA); priv->ThermalMeter[0] = ThermalMeterVal; //We use fixed value by Bryant's suggestion priv->ThermalMeter[1] = ThermalMeterVal; //We use fixed value by Bryant's suggestion //Get current RF-A temprature index if(priv->ThermalMeter[0] >= (u8)tmpRegA) //lower temprature { tmpOFDMindex = tmpCCK20Mindex = 6+(priv->ThermalMeter[0]-(u8)tmpRegA); tmpCCK40Mindex = tmpCCK20Mindex - 6; if(tmpOFDMindex >= OFDM_Table_Length) tmpOFDMindex = OFDM_Table_Length-1; if(tmpCCK20Mindex >= CCK_Table_length) tmpCCK20Mindex = CCK_Table_length-1; if(tmpCCK40Mindex >= CCK_Table_length) tmpCCK40Mindex = CCK_Table_length-1; } else { tmpval = ((u8)tmpRegA - priv->ThermalMeter[0]); if(tmpval >= 6) // higher temprature tmpOFDMindex = tmpCCK20Mindex = 0; // max to +6dB else tmpOFDMindex = tmpCCK20Mindex = 6 - tmpval; tmpCCK40Mindex = 0; } //DbgPrint("%ddb, tmpOFDMindex = %d, tmpCCK20Mindex = %d, tmpCCK40Mindex = %d", //((u1Byte)tmpRegA - pHalData->ThermalMeter[0]), //tmpOFDMindex, tmpCCK20Mindex, tmpCCK40Mindex); if(priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) //40M tmpCCKindex = tmpCCK40Mindex; else tmpCCKindex = tmpCCK20Mindex; //record for bandwidth swith priv->Record_CCK_20Mindex = tmpCCK20Mindex; priv->Record_CCK_40Mindex = tmpCCK40Mindex; RT_TRACE(COMP_POWER_TRACKING, "Record_CCK_20Mindex / Record_CCK_40Mindex = %d / %d.\n", priv->Record_CCK_20Mindex, priv->Record_CCK_40Mindex); if(priv->ieee80211->current_network.channel == 14 && !priv->bcck_in_ch14) { priv->bcck_in_ch14 = TRUE; CCKSwingNeedUpdate = 1; } else if(priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14) { priv->bcck_in_ch14 = FALSE; CCKSwingNeedUpdate = 1; } if(priv->CCK_index != tmpCCKindex) { priv->CCK_index = tmpCCKindex; CCKSwingNeedUpdate = 1; } if(CCKSwingNeedUpdate) { //DbgPrint("Update CCK Swing, CCK_index = %d\n", pHalData->CCK_index); dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } if(priv->OFDM_index != tmpOFDMindex) { priv->OFDM_index = tmpOFDMindex; rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, OFDMSwingTable[priv->OFDM_index]); RT_TRACE(COMP_POWER_TRACKING, "Update OFDMSwing[%d] = 0x%x\n", priv->OFDM_index, OFDMSwingTable[priv->OFDM_index]); } priv->txpower_count = 0; } #endif void dm_txpower_trackingcallback(struct work_struct *work) { struct delayed_work *dwork = container_of(work,struct delayed_work,work); struct r8192_priv *priv = container_of(dwork,struct r8192_priv,txpower_tracking_wq); struct net_device *dev = priv->ieee80211->dev; #ifdef RTL8190P dm_TXPowerTrackingCallback_TSSI(dev); #else //if(priv->bDcut == TRUE) if(priv->IC_Cut >= IC_VersionCut_D) dm_TXPowerTrackingCallback_TSSI(dev); else dm_TXPowerTrackingCallback_ThermalMeter(dev); #endif } static void dm_InitializeTXPowerTracking_TSSI(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); //Initial the Tx BB index and mapping value priv->txbbgain_table[0].txbb_iq_amplifygain = 12; priv->txbbgain_table[0].txbbgain_value=0x7f8001fe; priv->txbbgain_table[1].txbb_iq_amplifygain = 11; priv->txbbgain_table[1].txbbgain_value=0x788001e2; priv->txbbgain_table[2].txbb_iq_amplifygain = 10; priv->txbbgain_table[2].txbbgain_value=0x71c001c7; priv->txbbgain_table[3].txbb_iq_amplifygain = 9; priv->txbbgain_table[3].txbbgain_value=0x6b8001ae; priv->txbbgain_table[4].txbb_iq_amplifygain = 8; priv->txbbgain_table[4].txbbgain_value=0x65400195; priv->txbbgain_table[5].txbb_iq_amplifygain = 7; priv->txbbgain_table[5].txbbgain_value=0x5fc0017f; priv->txbbgain_table[6].txbb_iq_amplifygain = 6; priv->txbbgain_table[6].txbbgain_value=0x5a400169; priv->txbbgain_table[7].txbb_iq_amplifygain = 5; priv->txbbgain_table[7].txbbgain_value=0x55400155; priv->txbbgain_table[8].txbb_iq_amplifygain = 4; priv->txbbgain_table[8].txbbgain_value=0x50800142; priv->txbbgain_table[9].txbb_iq_amplifygain = 3; priv->txbbgain_table[9].txbbgain_value=0x4c000130; priv->txbbgain_table[10].txbb_iq_amplifygain = 2; priv->txbbgain_table[10].txbbgain_value=0x47c0011f; priv->txbbgain_table[11].txbb_iq_amplifygain = 1; priv->txbbgain_table[11].txbbgain_value=0x43c0010f; priv->txbbgain_table[12].txbb_iq_amplifygain = 0; priv->txbbgain_table[12].txbbgain_value=0x40000100; priv->txbbgain_table[13].txbb_iq_amplifygain = -1; priv->txbbgain_table[13].txbbgain_value=0x3c8000f2; priv->txbbgain_table[14].txbb_iq_amplifygain = -2; priv->txbbgain_table[14].txbbgain_value=0x390000e4; priv->txbbgain_table[15].txbb_iq_amplifygain = -3; priv->txbbgain_table[15].txbbgain_value=0x35c000d7; priv->txbbgain_table[16].txbb_iq_amplifygain = -4; priv->txbbgain_table[16].txbbgain_value=0x32c000cb; priv->txbbgain_table[17].txbb_iq_amplifygain = -5; priv->txbbgain_table[17].txbbgain_value=0x300000c0; priv->txbbgain_table[18].txbb_iq_amplifygain = -6; priv->txbbgain_table[18].txbbgain_value=0x2d4000b5; priv->txbbgain_table[19].txbb_iq_amplifygain = -7; priv->txbbgain_table[19].txbbgain_value=0x2ac000ab; priv->txbbgain_table[20].txbb_iq_amplifygain = -8; priv->txbbgain_table[20].txbbgain_value=0x288000a2; priv->txbbgain_table[21].txbb_iq_amplifygain = -9; priv->txbbgain_table[21].txbbgain_value=0x26000098; priv->txbbgain_table[22].txbb_iq_amplifygain = -10; priv->txbbgain_table[22].txbbgain_value=0x24000090; priv->txbbgain_table[23].txbb_iq_amplifygain = -11; priv->txbbgain_table[23].txbbgain_value=0x22000088; priv->txbbgain_table[24].txbb_iq_amplifygain = -12; priv->txbbgain_table[24].txbbgain_value=0x20000080; priv->txbbgain_table[25].txbb_iq_amplifygain = -13; priv->txbbgain_table[25].txbbgain_value=0x1a00006c; priv->txbbgain_table[26].txbb_iq_amplifygain = -14; priv->txbbgain_table[26].txbbgain_value=0x1c800072; priv->txbbgain_table[27].txbb_iq_amplifygain = -15; priv->txbbgain_table[27].txbbgain_value=0x18000060; priv->txbbgain_table[28].txbb_iq_amplifygain = -16; priv->txbbgain_table[28].txbbgain_value=0x19800066; priv->txbbgain_table[29].txbb_iq_amplifygain = -17; priv->txbbgain_table[29].txbbgain_value=0x15800056; priv->txbbgain_table[30].txbb_iq_amplifygain = -18; priv->txbbgain_table[30].txbbgain_value=0x26c0005b; priv->txbbgain_table[31].txbb_iq_amplifygain = -19; priv->txbbgain_table[31].txbbgain_value=0x14400051; priv->txbbgain_table[32].txbb_iq_amplifygain = -20; priv->txbbgain_table[32].txbbgain_value=0x24400051; priv->txbbgain_table[33].txbb_iq_amplifygain = -21; priv->txbbgain_table[33].txbbgain_value=0x1300004c; priv->txbbgain_table[34].txbb_iq_amplifygain = -22; priv->txbbgain_table[34].txbbgain_value=0x12000048; priv->txbbgain_table[35].txbb_iq_amplifygain = -23; priv->txbbgain_table[35].txbbgain_value=0x11000044; priv->txbbgain_table[36].txbb_iq_amplifygain = -24; priv->txbbgain_table[36].txbbgain_value=0x10000040; //ccktxbb_valuearray[0] is 0xA22 [1] is 0xA24 ...[7] is 0xA29 //This Table is for CH1~CH13 priv->cck_txbbgain_table[0].ccktxbb_valuearray[0] = 0x36; priv->cck_txbbgain_table[0].ccktxbb_valuearray[1] = 0x35; priv->cck_txbbgain_table[0].ccktxbb_valuearray[2] = 0x2e; priv->cck_txbbgain_table[0].ccktxbb_valuearray[3] = 0x25; priv->cck_txbbgain_table[0].ccktxbb_valuearray[4] = 0x1c; priv->cck_txbbgain_table[0].ccktxbb_valuearray[5] = 0x12; priv->cck_txbbgain_table[0].ccktxbb_valuearray[6] = 0x09; priv->cck_txbbgain_table[0].ccktxbb_valuearray[7] = 0x04; priv->cck_txbbgain_table[1].ccktxbb_valuearray[0] = 0x33; priv->cck_txbbgain_table[1].ccktxbb_valuearray[1] = 0x32; priv->cck_txbbgain_table[1].ccktxbb_valuearray[2] = 0x2b; priv->cck_txbbgain_table[1].ccktxbb_valuearray[3] = 0x23; priv->cck_txbbgain_table[1].ccktxbb_valuearray[4] = 0x1a; priv->cck_txbbgain_table[1].ccktxbb_valuearray[5] = 0x11; priv->cck_txbbgain_table[1].ccktxbb_valuearray[6] = 0x08; priv->cck_txbbgain_table[1].ccktxbb_valuearray[7] = 0x04; priv->cck_txbbgain_table[2].ccktxbb_valuearray[0] = 0x30; priv->cck_txbbgain_table[2].ccktxbb_valuearray[1] = 0x2f; priv->cck_txbbgain_table[2].ccktxbb_valuearray[2] = 0x29; priv->cck_txbbgain_table[2].ccktxbb_valuearray[3] = 0x21; priv->cck_txbbgain_table[2].ccktxbb_valuearray[4] = 0x19; priv->cck_txbbgain_table[2].ccktxbb_valuearray[5] = 0x10; priv->cck_txbbgain_table[2].ccktxbb_valuearray[6] = 0x08; priv->cck_txbbgain_table[2].ccktxbb_valuearray[7] = 0x03; priv->cck_txbbgain_table[3].ccktxbb_valuearray[0] = 0x2d; priv->cck_txbbgain_table[3].ccktxbb_valuearray[1] = 0x2d; priv->cck_txbbgain_table[3].ccktxbb_valuearray[2] = 0x27; priv->cck_txbbgain_table[3].ccktxbb_valuearray[3] = 0x1f; priv->cck_txbbgain_table[3].ccktxbb_valuearray[4] = 0x18; priv->cck_txbbgain_table[3].ccktxbb_valuearray[5] = 0x0f; priv->cck_txbbgain_table[3].ccktxbb_valuearray[6] = 0x08; priv->cck_txbbgain_table[3].ccktxbb_valuearray[7] = 0x03; priv->cck_txbbgain_table[4].ccktxbb_valuearray[0] = 0x2b; priv->cck_txbbgain_table[4].ccktxbb_valuearray[1] = 0x2a; priv->cck_txbbgain_table[4].ccktxbb_valuearray[2] = 0x25; priv->cck_txbbgain_table[4].ccktxbb_valuearray[3] = 0x1e; priv->cck_txbbgain_table[4].ccktxbb_valuearray[4] = 0x16; priv->cck_txbbgain_table[4].ccktxbb_valuearray[5] = 0x0e; priv->cck_txbbgain_table[4].ccktxbb_valuearray[6] = 0x07; priv->cck_txbbgain_table[4].ccktxbb_valuearray[7] = 0x03; priv->cck_txbbgain_table[5].ccktxbb_valuearray[0] = 0x28; priv->cck_txbbgain_table[5].ccktxbb_valuearray[1] = 0x28; priv->cck_txbbgain_table[5].ccktxbb_valuearray[2] = 0x22; priv->cck_txbbgain_table[5].ccktxbb_valuearray[3] = 0x1c; priv->cck_txbbgain_table[5].ccktxbb_valuearray[4] = 0x15; priv->cck_txbbgain_table[5].ccktxbb_valuearray[5] = 0x0d; priv->cck_txbbgain_table[5].ccktxbb_valuearray[6] = 0x07; priv->cck_txbbgain_table[5].ccktxbb_valuearray[7] = 0x03; priv->cck_txbbgain_table[6].ccktxbb_valuearray[0] = 0x26; priv->cck_txbbgain_table[6].ccktxbb_valuearray[1] = 0x25; priv->cck_txbbgain_table[6].ccktxbb_valuearray[2] = 0x21; priv->cck_txbbgain_table[6].ccktxbb_valuearray[3] = 0x1b; priv->cck_txbbgain_table[6].ccktxbb_valuearray[4] = 0x14; priv->cck_txbbgain_table[6].ccktxbb_valuearray[5] = 0x0d; priv->cck_txbbgain_table[6].ccktxbb_valuearray[6] = 0x06; priv->cck_txbbgain_table[6].ccktxbb_valuearray[7] = 0x03; priv->cck_txbbgain_table[7].ccktxbb_valuearray[0] = 0x24; priv->cck_txbbgain_table[7].ccktxbb_valuearray[1] = 0x23; priv->cck_txbbgain_table[7].ccktxbb_valuearray[2] = 0x1f; priv->cck_txbbgain_table[7].ccktxbb_valuearray[3] = 0x19; priv->cck_txbbgain_table[7].ccktxbb_valuearray[4] = 0x13; priv->cck_txbbgain_table[7].ccktxbb_valuearray[5] = 0x0c; priv->cck_txbbgain_table[7].ccktxbb_valuearray[6] = 0x06; priv->cck_txbbgain_table[7].ccktxbb_valuearray[7] = 0x03; priv->cck_txbbgain_table[8].ccktxbb_valuearray[0] = 0x22; priv->cck_txbbgain_table[8].ccktxbb_valuearray[1] = 0x21; priv->cck_txbbgain_table[8].ccktxbb_valuearray[2] = 0x1d; priv->cck_txbbgain_table[8].ccktxbb_valuearray[3] = 0x18; priv->cck_txbbgain_table[8].ccktxbb_valuearray[4] = 0x11; priv->cck_txbbgain_table[8].ccktxbb_valuearray[5] = 0x0b; priv->cck_txbbgain_table[8].ccktxbb_valuearray[6] = 0x06; priv->cck_txbbgain_table[8].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[9].ccktxbb_valuearray[0] = 0x20; priv->cck_txbbgain_table[9].ccktxbb_valuearray[1] = 0x20; priv->cck_txbbgain_table[9].ccktxbb_valuearray[2] = 0x1b; priv->cck_txbbgain_table[9].ccktxbb_valuearray[3] = 0x16; priv->cck_txbbgain_table[9].ccktxbb_valuearray[4] = 0x11; priv->cck_txbbgain_table[9].ccktxbb_valuearray[5] = 0x08; priv->cck_txbbgain_table[9].ccktxbb_valuearray[6] = 0x05; priv->cck_txbbgain_table[9].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[10].ccktxbb_valuearray[0] = 0x1f; priv->cck_txbbgain_table[10].ccktxbb_valuearray[1] = 0x1e; priv->cck_txbbgain_table[10].ccktxbb_valuearray[2] = 0x1a; priv->cck_txbbgain_table[10].ccktxbb_valuearray[3] = 0x15; priv->cck_txbbgain_table[10].ccktxbb_valuearray[4] = 0x10; priv->cck_txbbgain_table[10].ccktxbb_valuearray[5] = 0x0a; priv->cck_txbbgain_table[10].ccktxbb_valuearray[6] = 0x05; priv->cck_txbbgain_table[10].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[11].ccktxbb_valuearray[0] = 0x1d; priv->cck_txbbgain_table[11].ccktxbb_valuearray[1] = 0x1c; priv->cck_txbbgain_table[11].ccktxbb_valuearray[2] = 0x18; priv->cck_txbbgain_table[11].ccktxbb_valuearray[3] = 0x14; priv->cck_txbbgain_table[11].ccktxbb_valuearray[4] = 0x0f; priv->cck_txbbgain_table[11].ccktxbb_valuearray[5] = 0x0a; priv->cck_txbbgain_table[11].ccktxbb_valuearray[6] = 0x05; priv->cck_txbbgain_table[11].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[12].ccktxbb_valuearray[0] = 0x1b; priv->cck_txbbgain_table[12].ccktxbb_valuearray[1] = 0x1a; priv->cck_txbbgain_table[12].ccktxbb_valuearray[2] = 0x17; priv->cck_txbbgain_table[12].ccktxbb_valuearray[3] = 0x13; priv->cck_txbbgain_table[12].ccktxbb_valuearray[4] = 0x0e; priv->cck_txbbgain_table[12].ccktxbb_valuearray[5] = 0x09; priv->cck_txbbgain_table[12].ccktxbb_valuearray[6] = 0x04; priv->cck_txbbgain_table[12].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[13].ccktxbb_valuearray[0] = 0x1a; priv->cck_txbbgain_table[13].ccktxbb_valuearray[1] = 0x19; priv->cck_txbbgain_table[13].ccktxbb_valuearray[2] = 0x16; priv->cck_txbbgain_table[13].ccktxbb_valuearray[3] = 0x12; priv->cck_txbbgain_table[13].ccktxbb_valuearray[4] = 0x0d; priv->cck_txbbgain_table[13].ccktxbb_valuearray[5] = 0x09; priv->cck_txbbgain_table[13].ccktxbb_valuearray[6] = 0x04; priv->cck_txbbgain_table[13].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[14].ccktxbb_valuearray[0] = 0x18; priv->cck_txbbgain_table[14].ccktxbb_valuearray[1] = 0x17; priv->cck_txbbgain_table[14].ccktxbb_valuearray[2] = 0x15; priv->cck_txbbgain_table[14].ccktxbb_valuearray[3] = 0x11; priv->cck_txbbgain_table[14].ccktxbb_valuearray[4] = 0x0c; priv->cck_txbbgain_table[14].ccktxbb_valuearray[5] = 0x08; priv->cck_txbbgain_table[14].ccktxbb_valuearray[6] = 0x04; priv->cck_txbbgain_table[14].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[15].ccktxbb_valuearray[0] = 0x17; priv->cck_txbbgain_table[15].ccktxbb_valuearray[1] = 0x16; priv->cck_txbbgain_table[15].ccktxbb_valuearray[2] = 0x13; priv->cck_txbbgain_table[15].ccktxbb_valuearray[3] = 0x10; priv->cck_txbbgain_table[15].ccktxbb_valuearray[4] = 0x0c; priv->cck_txbbgain_table[15].ccktxbb_valuearray[5] = 0x08; priv->cck_txbbgain_table[15].ccktxbb_valuearray[6] = 0x04; priv->cck_txbbgain_table[15].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[16].ccktxbb_valuearray[0] = 0x16; priv->cck_txbbgain_table[16].ccktxbb_valuearray[1] = 0x15; priv->cck_txbbgain_table[16].ccktxbb_valuearray[2] = 0x12; priv->cck_txbbgain_table[16].ccktxbb_valuearray[3] = 0x0f; priv->cck_txbbgain_table[16].ccktxbb_valuearray[4] = 0x0b; priv->cck_txbbgain_table[16].ccktxbb_valuearray[5] = 0x07; priv->cck_txbbgain_table[16].ccktxbb_valuearray[6] = 0x04; priv->cck_txbbgain_table[16].ccktxbb_valuearray[7] = 0x01; priv->cck_txbbgain_table[17].ccktxbb_valuearray[0] = 0x14; priv->cck_txbbgain_table[17].ccktxbb_valuearray[1] = 0x14; priv->cck_txbbgain_table[17].ccktxbb_valuearray[2] = 0x11; priv->cck_txbbgain_table[17].ccktxbb_valuearray[3] = 0x0e; priv->cck_txbbgain_table[17].ccktxbb_valuearray[4] = 0x0b; priv->cck_txbbgain_table[17].ccktxbb_valuearray[5] = 0x07; priv->cck_txbbgain_table[17].ccktxbb_valuearray[6] = 0x03; priv->cck_txbbgain_table[17].ccktxbb_valuearray[7] = 0x02; priv->cck_txbbgain_table[18].ccktxbb_valuearray[0] = 0x13; priv->cck_txbbgain_table[18].ccktxbb_valuearray[1] = 0x13; priv->cck_txbbgain_table[18].ccktxbb_valuearray[2] = 0x10; priv->cck_txbbgain_table[18].ccktxbb_valuearray[3] = 0x0d; priv->cck_txbbgain_table[18].ccktxbb_valuearray[4] = 0x0a; priv->cck_txbbgain_table[18].ccktxbb_valuearray[5] = 0x06; priv->cck_txbbgain_table[18].ccktxbb_valuearray[6] = 0x03; priv->cck_txbbgain_table[18].ccktxbb_valuearray[7] = 0x01; priv->cck_txbbgain_table[19].ccktxbb_valuearray[0] = 0x12; priv->cck_txbbgain_table[19].ccktxbb_valuearray[1] = 0x12; priv->cck_txbbgain_table[19].ccktxbb_valuearray[2] = 0x0f; priv->cck_txbbgain_table[19].ccktxbb_valuearray[3] = 0x0c; priv->cck_txbbgain_table[19].ccktxbb_valuearray[4] = 0x09; priv->cck_txbbgain_table[19].ccktxbb_valuearray[5] = 0x06; priv->cck_txbbgain_table[19].ccktxbb_valuearray[6] = 0x03; priv->cck_txbbgain_table[19].ccktxbb_valuearray[7] = 0x01; priv->cck_txbbgain_table[20].ccktxbb_valuearray[0] = 0x11; priv->cck_txbbgain_table[20].ccktxbb_valuearray[1] = 0x11; priv->cck_txbbgain_table[20].ccktxbb_valuearray[2] = 0x0f; priv->cck_txbbgain_table[20].ccktxbb_valuearray[3] = 0x0c; priv->cck_txbbgain_table[20].ccktxbb_valuearray[4] = 0x09; priv->cck_txbbgain_table[20].ccktxbb_valuearray[5] = 0x06; priv->cck_txbbgain_table[20].ccktxbb_valuearray[6] = 0x03; priv->cck_txbbgain_table[20].ccktxbb_valuearray[7] = 0x01; priv->cck_txbbgain_table[21].ccktxbb_valuearray[0] = 0x10; priv->cck_txbbgain_table[21].ccktxbb_valuearray[1] = 0x10; priv->cck_txbbgain_table[21].ccktxbb_valuearray[2] = 0x0e; priv->cck_txbbgain_table[21].ccktxbb_valuearray[3] = 0x0b; priv->cck_txbbgain_table[21].ccktxbb_valuearray[4] = 0x08; priv->cck_txbbgain_table[21].ccktxbb_valuearray[5] = 0x05; priv->cck_txbbgain_table[21].ccktxbb_valuearray[6] = 0x03; priv->cck_txbbgain_table[21].ccktxbb_valuearray[7] = 0x01; priv->cck_txbbgain_table[22].ccktxbb_valuearray[0] = 0x0f; priv->cck_txbbgain_table[22].ccktxbb_valuearray[1] = 0x0f; priv->cck_txbbgain_table[22].ccktxbb_valuearray[2] = 0x0d; priv->cck_txbbgain_table[22].ccktxbb_valuearray[3] = 0x0b; priv->cck_txbbgain_table[22].ccktxbb_valuearray[4] = 0x08; priv->cck_txbbgain_table[22].ccktxbb_valuearray[5] = 0x05; priv->cck_txbbgain_table[22].ccktxbb_valuearray[6] = 0x03; priv->cck_txbbgain_table[22].ccktxbb_valuearray[7] = 0x01; //ccktxbb_valuearray[0] is 0xA22 [1] is 0xA24 ...[7] is 0xA29 //This Table is for CH14 priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[0] = 0x36; priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[1] = 0x35; priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[2] = 0x2e; priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[3] = 0x1b; priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[0].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[0] = 0x33; priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[1] = 0x32; priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[2] = 0x2b; priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[3] = 0x19; priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[1].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[0] = 0x30; priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[1] = 0x2f; priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[2] = 0x29; priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[3] = 0x18; priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[2].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[0] = 0x2d; priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[1] = 0x2d; priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[2] = 0x27; priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[3] = 0x17; priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[3].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[0] = 0x2b; priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[1] = 0x2a; priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[2] = 0x25; priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[3] = 0x15; priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[4].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[0] = 0x28; priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[1] = 0x28; priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[2] = 0x22; priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[3] = 0x14; priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[5].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[0] = 0x26; priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[1] = 0x25; priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[2] = 0x21; priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[3] = 0x13; priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[6].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[0] = 0x24; priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[1] = 0x23; priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[2] = 0x1f; priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[3] = 0x12; priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[7].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[0] = 0x22; priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[1] = 0x21; priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[2] = 0x1d; priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[3] = 0x11; priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[8].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[0] = 0x20; priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[1] = 0x20; priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[2] = 0x1b; priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[3] = 0x10; priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[9].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[0] = 0x1f; priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[1] = 0x1e; priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[2] = 0x1a; priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[3] = 0x0f; priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[10].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[0] = 0x1d; priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[1] = 0x1c; priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[2] = 0x18; priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[3] = 0x0e; priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[11].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[0] = 0x1b; priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[1] = 0x1a; priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[2] = 0x17; priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[3] = 0x0e; priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[12].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[0] = 0x1a; priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[1] = 0x19; priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[2] = 0x16; priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[3] = 0x0d; priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[13].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[0] = 0x18; priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[1] = 0x17; priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[2] = 0x15; priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[3] = 0x0c; priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[14].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[0] = 0x17; priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[1] = 0x16; priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[2] = 0x13; priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[3] = 0x0b; priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[15].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[0] = 0x16; priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[1] = 0x15; priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[2] = 0x12; priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[3] = 0x0b; priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[16].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[0] = 0x14; priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[1] = 0x14; priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[2] = 0x11; priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[3] = 0x0a; priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[17].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[0] = 0x13; priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[1] = 0x13; priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[2] = 0x10; priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[3] = 0x0a; priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[18].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[0] = 0x12; priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[1] = 0x12; priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[2] = 0x0f; priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[3] = 0x09; priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[19].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[0] = 0x11; priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[1] = 0x11; priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[2] = 0x0f; priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[3] = 0x09; priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[20].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[0] = 0x10; priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[1] = 0x10; priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[2] = 0x0e; priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[3] = 0x08; priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[21].ccktxbb_valuearray[7] = 0x00; priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[0] = 0x0f; priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[1] = 0x0f; priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[2] = 0x0d; priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[3] = 0x08; priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[4] = 0x00; priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[5] = 0x00; priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[6] = 0x00; priv->cck_txbbgain_ch14_table[22].ccktxbb_valuearray[7] = 0x00; priv->btxpower_tracking = TRUE; priv->txpower_count = 0; priv->btxpower_trackingInit = FALSE; } #ifndef RTL8190P static void dm_InitializeTXPowerTracking_ThermalMeter(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); // Tx Power tracking by Theremal Meter require Firmware R/W 3-wire. This mechanism // can be enabled only when Firmware R/W 3-wire is enabled. Otherwise, frequent r/w // 3-wire by driver cause RF goes into wrong state. if(priv->ieee80211->FwRWRF) priv->btxpower_tracking = TRUE; else priv->btxpower_tracking = FALSE; priv->txpower_count = 0; priv->btxpower_trackingInit = FALSE; } #endif void dm_initialize_txpower_tracking(struct net_device *dev) { #ifndef RTL8190P struct r8192_priv *priv = ieee80211_priv(dev); #endif #ifdef RTL8190P dm_InitializeTXPowerTracking_TSSI(dev); #else //if(priv->bDcut == TRUE) if(priv->IC_Cut >= IC_VersionCut_D) dm_InitializeTXPowerTracking_TSSI(dev); else dm_InitializeTXPowerTracking_ThermalMeter(dev); #endif } // dm_InitializeTXPowerTracking static void dm_CheckTXPowerTracking_TSSI(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); static u32 tx_power_track_counter = 0; RT_TRACE(COMP_POWER_TRACKING,"%s()\n",__FUNCTION__); if(read_nic_byte(dev, 0x11e) ==1) return; if(!priv->btxpower_tracking) return; tx_power_track_counter++; if(tx_power_track_counter > 90) { queue_delayed_work(priv->priv_wq,&priv->txpower_tracking_wq,0); tx_power_track_counter =0; } } #ifndef RTL8190P static void dm_CheckTXPowerTracking_ThermalMeter(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); static u8 TM_Trigger=0; //DbgPrint("dm_CheckTXPowerTracking() \n"); if(!priv->btxpower_tracking) return; else { if(priv->txpower_count <= 2) { priv->txpower_count++; return; } } if(!TM_Trigger) { //Attention!! You have to wirte all 12bits data to RF, or it may cause RF to crash //actually write reg0x02 bit1=0, then bit1=1. //DbgPrint("Trigger ThermalMeter, write RF reg0x2 = 0x4d to 0x4f\n"); rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d); rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f); rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d); rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f); TM_Trigger = 1; return; } else { //DbgPrint("Schedule TxPowerTrackingWorkItem\n"); queue_delayed_work(priv->priv_wq,&priv->txpower_tracking_wq,0); TM_Trigger = 0; } } #endif static void dm_check_txpower_tracking(struct net_device *dev) { #ifndef RTL8190P struct r8192_priv *priv = ieee80211_priv(dev); //static u32 tx_power_track_counter = 0; #endif #ifdef RTL8190P dm_CheckTXPowerTracking_TSSI(dev); #else //if(priv->bDcut == TRUE) if(priv->IC_Cut >= IC_VersionCut_D) dm_CheckTXPowerTracking_TSSI(dev); else dm_CheckTXPowerTracking_ThermalMeter(dev); #endif } // dm_CheckTXPowerTracking static void dm_CCKTxPowerAdjust_TSSI(struct net_device *dev, bool bInCH14) { u32 TempVal; struct r8192_priv *priv = ieee80211_priv(dev); //Write 0xa22 0xa23 TempVal = 0; if(!bInCH14){ //Write 0xa22 0xa23 TempVal = (u32)(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[0] + (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[1]<<8)) ; rtl8192_setBBreg(dev, rCCK0_TxFilter1,bMaskHWord, TempVal); //Write 0xa24 ~ 0xa27 TempVal = 0; TempVal = (u32)(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[2] + (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[3]<<8) + (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[4]<<16 )+ (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[5]<<24)); rtl8192_setBBreg(dev, rCCK0_TxFilter2,bMaskDWord, TempVal); //Write 0xa28 0xa29 TempVal = 0; TempVal = (u32)(priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[6] + (priv->cck_txbbgain_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[7]<<8)) ; rtl8192_setBBreg(dev, rCCK0_DebugPort,bMaskLWord, TempVal); } else { TempVal = (u32)(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[0] + (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[1]<<8)) ; rtl8192_setBBreg(dev, rCCK0_TxFilter1,bMaskHWord, TempVal); //Write 0xa24 ~ 0xa27 TempVal = 0; TempVal = (u32)(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[2] + (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[3]<<8) + (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[4]<<16 )+ (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[5]<<24)); rtl8192_setBBreg(dev, rCCK0_TxFilter2,bMaskDWord, TempVal); //Write 0xa28 0xa29 TempVal = 0; TempVal = (u32)(priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[6] + (priv->cck_txbbgain_ch14_table[(u8)(priv->CCKPresentAttentuation)].ccktxbb_valuearray[7]<<8)) ; rtl8192_setBBreg(dev, rCCK0_DebugPort,bMaskLWord, TempVal); } } #ifndef RTL8190P static void dm_CCKTxPowerAdjust_ThermalMeter(struct net_device *dev, bool bInCH14) { u32 TempVal; struct r8192_priv *priv = ieee80211_priv(dev); TempVal = 0; if(!bInCH14) { //Write 0xa22 0xa23 TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][0] + (CCKSwingTable_Ch1_Ch13[priv->CCK_index][1]<<8) ; rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal); RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_TxFilter1, TempVal); //Write 0xa24 ~ 0xa27 TempVal = 0; TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][2] + (CCKSwingTable_Ch1_Ch13[priv->CCK_index][3]<<8) + (CCKSwingTable_Ch1_Ch13[priv->CCK_index][4]<<16 )+ (CCKSwingTable_Ch1_Ch13[priv->CCK_index][5]<<24); rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal); RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_TxFilter2, TempVal); //Write 0xa28 0xa29 TempVal = 0; TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] + (CCKSwingTable_Ch1_Ch13[priv->CCK_index][7]<<8) ; rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal); RT_TRACE(COMP_POWER_TRACKING, "CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_DebugPort, TempVal); } else { // priv->CCKTxPowerAdjustCntNotCh14++; //cosa add for debug. //Write 0xa22 0xa23 TempVal = CCKSwingTable_Ch14[priv->CCK_index][0] + (CCKSwingTable_Ch14[priv->CCK_index][1]<<8) ; rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal); RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n", rCCK0_TxFilter1, TempVal); //Write 0xa24 ~ 0xa27 TempVal = 0; TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] + (CCKSwingTable_Ch14[priv->CCK_index][3]<<8) + (CCKSwingTable_Ch14[priv->CCK_index][4]<<16 )+ (CCKSwingTable_Ch14[priv->CCK_index][5]<<24); rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal); RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n", rCCK0_TxFilter2, TempVal); //Write 0xa28 0xa29 TempVal = 0; TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] + (CCKSwingTable_Ch14[priv->CCK_index][7]<<8) ; rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal); RT_TRACE(COMP_POWER_TRACKING,"CCK chnl 14, reg 0x%x = 0x%x\n", rCCK0_DebugPort, TempVal); } } #endif void dm_cck_txpower_adjust(struct net_device *dev, bool binch14) { // dm_CCKTxPowerAdjust #ifndef RTL8190P struct r8192_priv *priv = ieee80211_priv(dev); #endif #ifdef RTL8190P dm_CCKTxPowerAdjust_TSSI(dev, binch14); #else //if(priv->bDcut == TRUE) if(priv->IC_Cut >= IC_VersionCut_D) dm_CCKTxPowerAdjust_TSSI(dev, binch14); else dm_CCKTxPowerAdjust_ThermalMeter(dev, binch14); #endif } #ifndef RTL8192U static void dm_txpower_reset_recovery( struct net_device *dev ) { struct r8192_priv *priv = ieee80211_priv(dev); RT_TRACE(COMP_POWER_TRACKING, "Start Reset Recovery ==>\n"); rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbbgain_value); RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc80 is %08x\n",priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbbgain_value); RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in RFA_txPowerTrackingIndex is %x\n",priv->rfa_txpowertrackingindex); RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery : RF A I/Q Amplify Gain is %ld\n",priv->txbbgain_table[priv->rfa_txpowertrackingindex].txbb_iq_amplifygain); RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: CCK Attenuation is %d dB\n",priv->CCKPresentAttentuation); dm_cck_txpower_adjust(dev,priv->bcck_in_ch14); rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord, priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbbgain_value); RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc90 is %08x\n",priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbbgain_value); RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in RFC_txPowerTrackingIndex is %x\n",priv->rfc_txpowertrackingindex); RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery : RF C I/Q Amplify Gain is %ld\n",priv->txbbgain_table[priv->rfc_txpowertrackingindex].txbb_iq_amplifygain); } // dm_TXPowerResetRecovery void dm_restore_dynamic_mechanism_state(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); u32 reg_ratr = priv->rate_adaptive.last_ratr; if(!priv->up) { RT_TRACE(COMP_RATE, "<---- dm_restore_dynamic_mechanism_state(): driver is going to unload\n"); return; } // // Restore previous state for rate adaptive // if(priv->rate_adaptive.rate_adaptive_disabled) return; // TODO: Only 11n mode is implemented currently, if( !(priv->ieee80211->mode==WIRELESS_MODE_N_24G || priv->ieee80211->mode==WIRELESS_MODE_N_5G)) return; { /* 2007/11/15 MH Copy from 8190PCI. */ u32 ratr_value; ratr_value = reg_ratr; if(priv->rf_type == RF_1T2R) // 1T2R, Spatial Stream 2 should be disabled { ratr_value &=~ (RATE_ALL_OFDM_2SS); //DbgPrint("HW_VAR_TATR_0 from 0x%x ==> 0x%x\n", ((pu4Byte)(val))[0], ratr_value); } //DbgPrint("set HW_VAR_TATR_0 = 0x%x\n", ratr_value); //cosa PlatformEFIOWrite4Byte(Adapter, RATR0, ((pu4Byte)(val))[0]); write_nic_dword(dev, RATR0, ratr_value); write_nic_byte(dev, UFWP, 1); } //Resore TX Power Tracking Index if(priv->btxpower_trackingInit && priv->btxpower_tracking){ dm_txpower_reset_recovery(dev); } // //Restore BB Initial Gain // dm_bb_initialgain_restore(dev); } // DM_RestoreDynamicMechanismState static void dm_bb_initialgain_restore(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); u32 bit_mask = 0x7f; //Bit0~ Bit6 if(dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI) return; //Disable Initial Gain //PHY_SetBBReg(Adapter, UFWP, bMaskLWord, 0x800); rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // Only clear byte 1 and rewrite. rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, bit_mask, (u32)priv->initgain_backup.xaagccore1); rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, bit_mask, (u32)priv->initgain_backup.xbagccore1); rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, bit_mask, (u32)priv->initgain_backup.xcagccore1); rtl8192_setBBreg(dev, rOFDM0_XDAGCCore1, bit_mask, (u32)priv->initgain_backup.xdagccore1); bit_mask = bMaskByte2; rtl8192_setBBreg(dev, rCCK0_CCA, bit_mask, (u32)priv->initgain_backup.cca); RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc50 is %x\n",priv->initgain_backup.xaagccore1); RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc58 is %x\n",priv->initgain_backup.xbagccore1); RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc60 is %x\n",priv->initgain_backup.xcagccore1); RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc68 is %x\n",priv->initgain_backup.xdagccore1); RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xa0a is %x\n",priv->initgain_backup.cca); //Enable Initial Gain //PHY_SetBBReg(Adapter, UFWP, bMaskLWord, 0x100); rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); // Only clear byte 1 and rewrite. } // dm_BBInitialGainRestore void dm_backup_dynamic_mechanism_state(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); // Fsync to avoid reset priv->bswitch_fsync = false; priv->bfsync_processing = false; //Backup BB InitialGain dm_bb_initialgain_backup(dev); } // DM_BackupDynamicMechanismState static void dm_bb_initialgain_backup(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); u32 bit_mask = bMaskByte0; //Bit0~ Bit6 if(dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI) return; //PHY_SetBBReg(Adapter, UFWP, bMaskLWord, 0x800); rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // Only clear byte 1 and rewrite. priv->initgain_backup.xaagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1, bit_mask); priv->initgain_backup.xbagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1, bit_mask); priv->initgain_backup.xcagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1, bit_mask); priv->initgain_backup.xdagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XDAGCCore1, bit_mask); bit_mask = bMaskByte2; priv->initgain_backup.cca = (u8)rtl8192_QueryBBReg(dev, rCCK0_CCA, bit_mask); RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc50 is %x\n",priv->initgain_backup.xaagccore1); RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc58 is %x\n",priv->initgain_backup.xbagccore1); RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc60 is %x\n",priv->initgain_backup.xcagccore1); RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc68 is %x\n",priv->initgain_backup.xdagccore1); RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xa0a is %x\n",priv->initgain_backup.cca); } // dm_BBInitialGainBakcup #endif /*----------------------------------------------------------------------------- * Function: dm_change_dynamic_initgain_thresh() * * Overview: * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/29/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ void dm_change_dynamic_initgain_thresh(struct net_device *dev, u32 dm_type, u32 dm_value) { if (dm_type == DIG_TYPE_THRESH_HIGH) { dm_digtable.rssi_high_thresh = dm_value; } else if (dm_type == DIG_TYPE_THRESH_LOW) { dm_digtable.rssi_low_thresh = dm_value; } else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_HIGH) { dm_digtable.rssi_high_power_highthresh = dm_value; } else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_HIGH) { dm_digtable.rssi_high_power_highthresh = dm_value; } else if (dm_type == DIG_TYPE_ENABLE) { dm_digtable.dig_state = DM_STA_DIG_MAX; dm_digtable.dig_enable_flag = true; } else if (dm_type == DIG_TYPE_DISABLE) { dm_digtable.dig_state = DM_STA_DIG_MAX; dm_digtable.dig_enable_flag = false; } else if (dm_type == DIG_TYPE_DBG_MODE) { if(dm_value >= DM_DBG_MAX) dm_value = DM_DBG_OFF; dm_digtable.dbg_mode = (u8)dm_value; } else if (dm_type == DIG_TYPE_RSSI) { if(dm_value > 100) dm_value = 30; dm_digtable.rssi_val = (long)dm_value; } else if (dm_type == DIG_TYPE_ALGORITHM) { if (dm_value >= DIG_ALGO_MAX) dm_value = DIG_ALGO_BY_FALSE_ALARM; if(dm_digtable.dig_algorithm != (u8)dm_value) dm_digtable.dig_algorithm_switch = 1; dm_digtable.dig_algorithm = (u8)dm_value; } else if (dm_type == DIG_TYPE_BACKOFF) { if(dm_value > 30) dm_value = 30; dm_digtable.backoff_val = (u8)dm_value; } else if(dm_type == DIG_TYPE_RX_GAIN_MIN) { if(dm_value == 0) dm_value = 0x1; dm_digtable.rx_gain_range_min = (u8)dm_value; } else if(dm_type == DIG_TYPE_RX_GAIN_MAX) { if(dm_value > 0x50) dm_value = 0x50; dm_digtable.rx_gain_range_max = (u8)dm_value; } } /* DM_ChangeDynamicInitGainThresh */ /*----------------------------------------------------------------------------- * Function: dm_dig_init() * * Overview: Set DIG scheme init value. * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/15/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ static void dm_dig_init(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); /* 2007/10/05 MH Disable DIG scheme now. Not tested. */ dm_digtable.dig_enable_flag = true; dm_digtable.dig_algorithm = DIG_ALGO_BY_RSSI; dm_digtable.dbg_mode = DM_DBG_OFF; //off=by real rssi value, on=by DM_DigTable.Rssi_val for new dig dm_digtable.dig_algorithm_switch = 0; /* 2007/10/04 MH Define init gain threshol. */ dm_digtable.dig_state = DM_STA_DIG_MAX; dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX; dm_digtable.initialgain_lowerbound_state = false; dm_digtable.rssi_low_thresh = DM_DIG_THRESH_LOW; dm_digtable.rssi_high_thresh = DM_DIG_THRESH_HIGH; dm_digtable.rssi_high_power_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW; dm_digtable.rssi_high_power_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH; dm_digtable.rssi_val = 50; //for new dig debug rssi value dm_digtable.backoff_val = DM_DIG_BACKOFF; dm_digtable.rx_gain_range_max = DM_DIG_MAX; if(priv->CustomerID == RT_CID_819x_Netcore) dm_digtable.rx_gain_range_min = DM_DIG_MIN_Netcore; else dm_digtable.rx_gain_range_min = DM_DIG_MIN; } /* dm_dig_init */ /*----------------------------------------------------------------------------- * Function: dm_ctrl_initgain_byrssi() * * Overview: Driver must monitor RSSI and notify firmware to change initial * gain according to different threshold. BB team provide the * suggested solution. * * Input: struct net_device *dev * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/27/2008 amy Create Version 0 porting from windows code. *---------------------------------------------------------------------------*/ static void dm_ctrl_initgain_byrssi(struct net_device *dev) { if (dm_digtable.dig_enable_flag == false) return; if(dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM) dm_ctrl_initgain_byrssi_by_fwfalse_alarm(dev); else if(dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI) dm_ctrl_initgain_byrssi_by_driverrssi(dev); else return; } static void dm_ctrl_initgain_byrssi_by_driverrssi( struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); u8 i; static u8 fw_dig=0; if (dm_digtable.dig_enable_flag == false) return; //DbgPrint("Dig by Sw Rssi \n"); if(dm_digtable.dig_algorithm_switch) // if swithed algorithm, we have to disable FW Dig. fw_dig = 0; if(fw_dig <= 3) // execute several times to make sure the FW Dig is disabled {// FW DIG Off for(i=0; i<3; i++) rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // Only clear byte 1 and rewrite. fw_dig++; dm_digtable.dig_state = DM_STA_DIG_OFF; //fw dig off. } if(priv->ieee80211->state == IEEE80211_LINKED) dm_digtable.cur_connect_state = DIG_CONNECT; else dm_digtable.cur_connect_state = DIG_DISCONNECT; //DbgPrint("DM_DigTable.PreConnectState = %d, DM_DigTable.CurConnectState = %d \n", //DM_DigTable.PreConnectState, DM_DigTable.CurConnectState); if(dm_digtable.dbg_mode == DM_DBG_OFF) dm_digtable.rssi_val = priv->undecorated_smoothed_pwdb; //DbgPrint("DM_DigTable.Rssi_val = %d \n", DM_DigTable.Rssi_val); dm_initial_gain(dev); dm_pd_th(dev); dm_cs_ratio(dev); if(dm_digtable.dig_algorithm_switch) dm_digtable.dig_algorithm_switch = 0; dm_digtable.pre_connect_state = dm_digtable.cur_connect_state; } /* dm_CtrlInitGainByRssi */ static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm( struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); static u32 reset_cnt = 0; u8 i; if (dm_digtable.dig_enable_flag == false) return; if(dm_digtable.dig_algorithm_switch) { dm_digtable.dig_state = DM_STA_DIG_MAX; // Fw DIG On. for(i=0; i<3; i++) rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); // Only clear byte 1 and rewrite. dm_digtable.dig_algorithm_switch = 0; } if (priv->ieee80211->state != IEEE80211_LINKED) return; // For smooth, we can not change DIG state. if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_low_thresh) && (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_thresh)) { return; } //DbgPrint("Dig by Fw False Alarm\n"); //if (DM_DigTable.Dig_State == DM_STA_DIG_OFF) /*DbgPrint("DIG Check\n\r RSSI=%d LOW=%d HIGH=%d STATE=%d", pHalData->UndecoratedSmoothedPWDB, DM_DigTable.RssiLowThresh, DM_DigTable.RssiHighThresh, DM_DigTable.Dig_State);*/ /* 1. When RSSI decrease, We have to judge if it is smaller than a treshold and then execute below step. */ if ((priv->undecorated_smoothed_pwdb <= dm_digtable.rssi_low_thresh)) { /* 2008/02/05 MH When we execute silent reset, the DIG PHY parameters will be reset to init value. We must prevent the condition. */ if (dm_digtable.dig_state == DM_STA_DIG_OFF && (priv->reset_count == reset_cnt)) { return; } else { reset_cnt = priv->reset_count; } // If DIG is off, DIG high power state must reset. dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX; dm_digtable.dig_state = DM_STA_DIG_OFF; // 1.1 DIG Off. rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // Only clear byte 1 and rewrite. // 1.2 Set initial gain. write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x17); write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x17); write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x17); write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x17); // 1.3 Lower PD_TH for OFDM. if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) { /* 2008/01/11 MH 40MHZ 90/92 register are not the same. */ // 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same. #ifdef RTL8190P write_nic_byte(dev, rOFDM0_RxDetector1, 0x40); #else write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00); #endif /*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P) write_nic_byte(pAdapter, rOFDM0_RxDetector1, 0x40); */ //else if (pAdapter->HardwareType == HARDWARE_TYPE_RTL8192E) //else //PlatformEFIOWrite1Byte(pAdapter, rOFDM0_RxDetector1, 0x40); } else write_nic_byte(dev, rOFDM0_RxDetector1, 0x42); // 1.4 Lower CS ratio for CCK. write_nic_byte(dev, 0xa0a, 0x08); // 1.5 Higher EDCCA. //PlatformEFIOWrite4Byte(pAdapter, rOFDM0_ECCAThreshold, 0x325); return; } /* 2. When RSSI increase, We have to judge if it is larger than a treshold and then execute below step. */ if ((priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh) ) { u8 reset_flag = 0; if (dm_digtable.dig_state == DM_STA_DIG_ON && (priv->reset_count == reset_cnt)) { dm_ctrl_initgain_byrssi_highpwr(dev); return; } else { if (priv->reset_count != reset_cnt) reset_flag = 1; reset_cnt = priv->reset_count; } dm_digtable.dig_state = DM_STA_DIG_ON; //DbgPrint("DIG ON\n\r"); // 2.1 Set initial gain. // 2008/02/26 MH SD3-Jerry suggest to prevent dirty environment. if (reset_flag == 1) { write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x2c); write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x2c); write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x2c); write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x2c); } else { write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x20); write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x20); write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x20); write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x20); } // 2.2 Higher PD_TH for OFDM. if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) { /* 2008/01/11 MH 40MHZ 90/92 register are not the same. */ // 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same. #ifdef RTL8190P write_nic_byte(dev, rOFDM0_RxDetector1, 0x42); #else write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20); #endif /* else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P) write_nic_byte(dev, rOFDM0_RxDetector1, 0x42); */ //else if (pAdapter->HardwareType == HARDWARE_TYPE_RTL8192E) //else //PlatformEFIOWrite1Byte(pAdapter, rOFDM0_RxDetector1, 0x42); } else write_nic_byte(dev, rOFDM0_RxDetector1, 0x44); // 2.3 Higher CS ratio for CCK. write_nic_byte(dev, 0xa0a, 0xcd); // 2.4 Lower EDCCA. /* 2008/01/11 MH 90/92 series are the same. */ //PlatformEFIOWrite4Byte(pAdapter, rOFDM0_ECCAThreshold, 0x346); // 2.5 DIG On. rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); // Only clear byte 1 and rewrite. } dm_ctrl_initgain_byrssi_highpwr(dev); } /* dm_CtrlInitGainByRssi */ /*----------------------------------------------------------------------------- * Function: dm_ctrl_initgain_byrssi_highpwr() * * Overview: * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/28/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ static void dm_ctrl_initgain_byrssi_highpwr( struct net_device * dev) { struct r8192_priv *priv = ieee80211_priv(dev); static u32 reset_cnt_highpwr = 0; // For smooth, we can not change high power DIG state in the range. if ((priv->undecorated_smoothed_pwdb > dm_digtable.rssi_high_power_lowthresh) && (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_power_highthresh)) { return; } /* 3. When RSSI >75% or <70%, it is a high power issue. We have to judge if it is larger than a treshold and then execute below step. */ // 2008/02/05 MH SD3-Jerry Modify PD_TH for high power issue. if (priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_power_highthresh) { if (dm_digtable.dig_highpwr_state == DM_STA_DIG_ON && (priv->reset_count == reset_cnt_highpwr)) return; else dm_digtable.dig_highpwr_state = DM_STA_DIG_ON; // 3.1 Higher PD_TH for OFDM for high power state. if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) { #ifdef RTL8190P write_nic_byte(dev, rOFDM0_RxDetector1, 0x41); #else write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10); #endif /*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P) write_nic_byte(dev, rOFDM0_RxDetector1, 0x41); */ } else write_nic_byte(dev, rOFDM0_RxDetector1, 0x43); } else { if (dm_digtable.dig_highpwr_state == DM_STA_DIG_OFF&& (priv->reset_count == reset_cnt_highpwr)) return; else dm_digtable.dig_highpwr_state = DM_STA_DIG_OFF; if (priv->undecorated_smoothed_pwdb < dm_digtable.rssi_high_power_lowthresh && priv->undecorated_smoothed_pwdb >= dm_digtable.rssi_high_thresh) { // 3.2 Recover PD_TH for OFDM for normal power region. if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) { #ifdef RTL8190P write_nic_byte(dev, rOFDM0_RxDetector1, 0x42); #else write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20); #endif /*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P) write_nic_byte(dev, rOFDM0_RxDetector1, 0x42); */ } else write_nic_byte(dev, rOFDM0_RxDetector1, 0x44); } } reset_cnt_highpwr = priv->reset_count; } /* dm_CtrlInitGainByRssiHighPwr */ static void dm_initial_gain( struct net_device * dev) { struct r8192_priv *priv = ieee80211_priv(dev); u8 initial_gain=0; static u8 initialized=0, force_write=0; static u32 reset_cnt=0; if(dm_digtable.dig_algorithm_switch) { initialized = 0; reset_cnt = 0; } if(dm_digtable.pre_connect_state == dm_digtable.cur_connect_state) { if(dm_digtable.cur_connect_state == DIG_CONNECT) { if((dm_digtable.rssi_val+10-dm_digtable.backoff_val) > dm_digtable.rx_gain_range_max) dm_digtable.cur_ig_value = dm_digtable.rx_gain_range_max; else if((dm_digtable.rssi_val+10-dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min) dm_digtable.cur_ig_value = dm_digtable.rx_gain_range_min; else dm_digtable.cur_ig_value = dm_digtable.rssi_val+10-dm_digtable.backoff_val; } else //current state is disconnected { if(dm_digtable.cur_ig_value == 0) dm_digtable.cur_ig_value = priv->DefaultInitialGain[0]; else dm_digtable.cur_ig_value = dm_digtable.pre_ig_value; } } else // disconnected -> connected or connected -> disconnected { dm_digtable.cur_ig_value = priv->DefaultInitialGain[0]; dm_digtable.pre_ig_value = 0; } //DbgPrint("DM_DigTable.CurIGValue = 0x%x, DM_DigTable.PreIGValue = 0x%x\n", DM_DigTable.CurIGValue, DM_DigTable.PreIGValue); // if silent reset happened, we should rewrite the values back if(priv->reset_count != reset_cnt) { force_write = 1; reset_cnt = priv->reset_count; } if(dm_digtable.pre_ig_value != read_nic_byte(dev, rOFDM0_XAAGCCore1)) force_write = 1; { if((dm_digtable.pre_ig_value != dm_digtable.cur_ig_value) || !initialized || force_write) { initial_gain = (u8)dm_digtable.cur_ig_value; //DbgPrint("Write initial gain = 0x%x\n", initial_gain); // Set initial gain. write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XDAGCCore1, initial_gain); dm_digtable.pre_ig_value = dm_digtable.cur_ig_value; initialized = 1; force_write = 0; } } } static void dm_pd_th( struct net_device * dev) { struct r8192_priv *priv = ieee80211_priv(dev); static u8 initialized=0, force_write=0; static u32 reset_cnt = 0; if(dm_digtable.dig_algorithm_switch) { initialized = 0; reset_cnt = 0; } if(dm_digtable.pre_connect_state == dm_digtable.cur_connect_state) { if(dm_digtable.cur_connect_state == DIG_CONNECT) { if (dm_digtable.rssi_val >= dm_digtable.rssi_high_power_highthresh) dm_digtable.curpd_thstate = DIG_PD_AT_HIGH_POWER; else if ((dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh)) dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER; else if ((dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh) && (dm_digtable.rssi_val < dm_digtable.rssi_high_power_lowthresh)) dm_digtable.curpd_thstate = DIG_PD_AT_NORMAL_POWER; else dm_digtable.curpd_thstate = dm_digtable.prepd_thstate; } else { dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER; } } else // disconnected -> connected or connected -> disconnected { dm_digtable.curpd_thstate = DIG_PD_AT_LOW_POWER; } // if silent reset happened, we should rewrite the values back if(priv->reset_count != reset_cnt) { force_write = 1; reset_cnt = priv->reset_count; } { if((dm_digtable.prepd_thstate != dm_digtable.curpd_thstate) || (initialized<=3) || force_write) { //DbgPrint("Write PD_TH state = %d\n", DM_DigTable.CurPD_THState); if(dm_digtable.curpd_thstate == DIG_PD_AT_LOW_POWER) { // Lower PD_TH for OFDM. if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) { /* 2008/01/11 MH 40MHZ 90/92 register are not the same. */ // 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same. #ifdef RTL8190P write_nic_byte(dev, rOFDM0_RxDetector1, 0x40); #else write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00); #endif /*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P) write_nic_byte(dev, rOFDM0_RxDetector1, 0x40); */ } else write_nic_byte(dev, rOFDM0_RxDetector1, 0x42); } else if(dm_digtable.curpd_thstate == DIG_PD_AT_NORMAL_POWER) { // Higher PD_TH for OFDM. if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) { /* 2008/01/11 MH 40MHZ 90/92 register are not the same. */ // 2008/02/05 MH SD3-Jerry 92U/92E PD_TH are the same. #ifdef RTL8190P write_nic_byte(dev, rOFDM0_RxDetector1, 0x42); #else write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20); #endif /*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P) write_nic_byte(dev, rOFDM0_RxDetector1, 0x42); */ } else write_nic_byte(dev, rOFDM0_RxDetector1, 0x44); } else if(dm_digtable.curpd_thstate == DIG_PD_AT_HIGH_POWER) { // Higher PD_TH for OFDM for high power state. if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) { #ifdef RTL8190P write_nic_byte(dev, rOFDM0_RxDetector1, 0x41); #else write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10); #endif /*else if (priv->card_8192 == HARDWARE_TYPE_RTL8190P) write_nic_byte(dev, rOFDM0_RxDetector1, 0x41); */ } else write_nic_byte(dev, rOFDM0_RxDetector1, 0x43); } dm_digtable.prepd_thstate = dm_digtable.curpd_thstate; if(initialized <= 3) initialized++; force_write = 0; } } } static void dm_cs_ratio( struct net_device * dev) { struct r8192_priv *priv = ieee80211_priv(dev); static u8 initialized=0,force_write=0; static u32 reset_cnt = 0; if(dm_digtable.dig_algorithm_switch) { initialized = 0; reset_cnt = 0; } if(dm_digtable.pre_connect_state == dm_digtable.cur_connect_state) { if(dm_digtable.cur_connect_state == DIG_CONNECT) { if ((dm_digtable.rssi_val <= dm_digtable.rssi_low_thresh)) dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER; else if ((dm_digtable.rssi_val >= dm_digtable.rssi_high_thresh) ) dm_digtable.curcs_ratio_state = DIG_CS_RATIO_HIGHER; else dm_digtable.curcs_ratio_state = dm_digtable.precs_ratio_state; } else { dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER; } } else // disconnected -> connected or connected -> disconnected { dm_digtable.curcs_ratio_state = DIG_CS_RATIO_LOWER; } // if silent reset happened, we should rewrite the values back if(priv->reset_count != reset_cnt) { force_write = 1; reset_cnt = priv->reset_count; } { if((dm_digtable.precs_ratio_state != dm_digtable.curcs_ratio_state) || !initialized || force_write) { //DbgPrint("Write CS_ratio state = %d\n", DM_DigTable.CurCS_ratioState); if(dm_digtable.curcs_ratio_state == DIG_CS_RATIO_LOWER) { // Lower CS ratio for CCK. write_nic_byte(dev, 0xa0a, 0x08); } else if(dm_digtable.curcs_ratio_state == DIG_CS_RATIO_HIGHER) { // Higher CS ratio for CCK. write_nic_byte(dev, 0xa0a, 0xcd); } dm_digtable.precs_ratio_state = dm_digtable.curcs_ratio_state; initialized = 1; force_write = 0; } } } void dm_init_edca_turbo(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); priv->bcurrent_turbo_EDCA = false; priv->ieee80211->bis_any_nonbepkts = false; priv->bis_cur_rdlstate = false; } // dm_init_edca_turbo #if 1 static void dm_check_edca_turbo( struct net_device * dev) { struct r8192_priv *priv = ieee80211_priv(dev); PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo; //PSTA_QOS pStaQos = pMgntInfo->pStaQos; // Keep past Tx/Rx packet count for RT-to-RT EDCA turbo. static unsigned long lastTxOkCnt = 0; static unsigned long lastRxOkCnt = 0; unsigned long curTxOkCnt = 0; unsigned long curRxOkCnt = 0; // // Do not be Turbo if it's under WiFi config and Qos Enabled, because the EDCA parameters // should follow the settings from QAP. By Bruce, 2007-12-07. // #if 1 if(priv->ieee80211->state != IEEE80211_LINKED) goto dm_CheckEdcaTurbo_EXIT; #endif // We do not turn on EDCA turbo mode for some AP that has IOT issue if(priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_DISABLE_EDCA_TURBO) goto dm_CheckEdcaTurbo_EXIT; // printk("========>%s():bis_any_nonbepkts is %d\n",__FUNCTION__,priv->bis_any_nonbepkts); // Check the status for current condition. if(!priv->ieee80211->bis_any_nonbepkts) { curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt; curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt; // For RT-AP, we needs to turn it on when Rx>Tx if(curRxOkCnt > 4*curTxOkCnt) { //printk("%s():curRxOkCnt > 4*curTxOkCnt\n"); if(!priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA) { write_nic_dword(dev, EDCAPARA_BE, edca_setting_DL[pHTInfo->IOTPeer]); priv->bis_cur_rdlstate = true; } } else { //printk("%s():curRxOkCnt < 4*curTxOkCnt\n"); if(priv->bis_cur_rdlstate || !priv->bcurrent_turbo_EDCA) { write_nic_dword(dev, EDCAPARA_BE, edca_setting_UL[pHTInfo->IOTPeer]); priv->bis_cur_rdlstate = false; } } priv->bcurrent_turbo_EDCA = true; } else { // // Turn Off EDCA turbo here. // Restore original EDCA according to the declaration of AP. // if(priv->bcurrent_turbo_EDCA) { { u8 u1bAIFS; u32 u4bAcParam; struct ieee80211_qos_parameters *qos_parameters = &priv->ieee80211->current_network.qos_data.parameters; u8 mode = priv->ieee80211->mode; // For Each time updating EDCA parameter, reset EDCA turbo mode status. dm_init_edca_turbo(dev); u1bAIFS = qos_parameters->aifs[0] * ((mode&(IEEE_G|IEEE_N_24G)) ?9:20) + aSifsTime; u4bAcParam = ((((u32)(qos_parameters->tx_op_limit[0]))<< AC_PARAM_TXOP_LIMIT_OFFSET)| (((u32)(qos_parameters->cw_max[0]))<< AC_PARAM_ECW_MAX_OFFSET)| (((u32)(qos_parameters->cw_min[0]))<< AC_PARAM_ECW_MIN_OFFSET)| ((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET)); printk("===>u4bAcParam:%x, ", u4bAcParam); //write_nic_dword(dev, WDCAPARA_ADD[i], u4bAcParam); write_nic_dword(dev, EDCAPARA_BE, u4bAcParam); // Check ACM bit. // If it is set, immediately set ACM control bit to downgrading AC for passing WMM testplan. Annie, 2005-12-13. { // TODO: Modified this part and try to set acm control in only 1 IO processing!! PACI_AIFSN pAciAifsn = (PACI_AIFSN)&(qos_parameters->aifs[0]); u8 AcmCtrl = read_nic_byte( dev, AcmHwCtrl ); if( pAciAifsn->f.ACM ) { // ACM bit is 1. AcmCtrl |= AcmHw_BeqEn; } else { // ACM bit is 0. AcmCtrl &= (~AcmHw_BeqEn); } RT_TRACE( COMP_QOS,"SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", AcmCtrl ) ; write_nic_byte(dev, AcmHwCtrl, AcmCtrl ); } } priv->bcurrent_turbo_EDCA = false; } } dm_CheckEdcaTurbo_EXIT: // Set variables for next time. priv->ieee80211->bis_any_nonbepkts = false; lastTxOkCnt = priv->stats.txbytesunicast; lastRxOkCnt = priv->stats.rxbytesunicast; } // dm_CheckEdcaTurbo #endif static void dm_init_ctstoself(struct net_device * dev) { struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev); priv->ieee80211->bCTSToSelfEnable = TRUE; priv->ieee80211->CTSToSelfTH = CTSToSelfTHVal; } static void dm_ctstoself(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv((struct net_device *)dev); PRT_HIGH_THROUGHPUT pHTInfo = priv->ieee80211->pHTInfo; static unsigned long lastTxOkCnt = 0; static unsigned long lastRxOkCnt = 0; unsigned long curTxOkCnt = 0; unsigned long curRxOkCnt = 0; if(priv->ieee80211->bCTSToSelfEnable != TRUE) { pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF; return; } /* 1. Uplink 2. Linksys350/Linksys300N 3. <50 disable, >55 enable */ if(pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) { curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt; curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt; if(curRxOkCnt > 4*curTxOkCnt) //downlink, disable CTS to self { pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF; //DbgPrint("dm_CTSToSelf() ==> CTS to self disabled -- downlink\n"); } else //uplink { #if 1 pHTInfo->IOTAction |= HT_IOT_ACT_FORCED_CTS2SELF; #else if(priv->undecorated_smoothed_pwdb < priv->ieee80211->CTSToSelfTH) // disable CTS to self { pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF; //DbgPrint("dm_CTSToSelf() ==> CTS to self disabled\n"); } else if(priv->undecorated_smoothed_pwdb >= (priv->ieee80211->CTSToSelfTH+5)) // enable CTS to self { pHTInfo->IOTAction |= HT_IOT_ACT_FORCED_CTS2SELF; //DbgPrint("dm_CTSToSelf() ==> CTS to self enabled\n"); } #endif } lastTxOkCnt = priv->stats.txbytesunicast; lastRxOkCnt = priv->stats.rxbytesunicast; } } /*----------------------------------------------------------------------------- * Function: dm_check_rfctrl_gpio() * * Overview: Copy 8187B template for 9xseries. * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/28/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ #if 1 static void dm_check_rfctrl_gpio(struct net_device * dev) { #ifdef RTL8192E struct r8192_priv *priv = ieee80211_priv(dev); #endif // Walk around for DTM test, we will not enable HW - radio on/off because r/w // page 1 register before Lextra bus is enabled cause system fails when resuming // from S4. 20080218, Emily // Stop to execute workitem to prevent S3/S4 bug. #ifdef RTL8190P return; #endif #ifdef RTL8192U return; #endif #ifdef RTL8192E queue_delayed_work(priv->priv_wq,&priv->gpio_change_rf_wq,0); #endif } /* dm_CheckRfCtrlGPIO */ #endif /*----------------------------------------------------------------------------- * Function: dm_check_pbc_gpio() * * Overview: Check if PBC button is pressed. * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/28/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ static void dm_check_pbc_gpio(struct net_device *dev) { #ifdef RTL8192U struct r8192_priv *priv = ieee80211_priv(dev); u8 tmp1byte; tmp1byte = read_nic_byte(dev,GPI); if(tmp1byte == 0xff) return; if (tmp1byte&BIT6 || tmp1byte&BIT0) { // Here we only set bPbcPressed to TRUE // After trigger PBC, the variable will be set to FALSE RT_TRACE(COMP_IO, "CheckPbcGPIO - PBC is pressed\n"); priv->bpbc_pressed = true; } #endif } #ifdef RTL8192E /*----------------------------------------------------------------------------- * Function: dm_GPIOChangeRF * Overview: PCI will not support workitem call back HW radio on-off control. * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 02/21/2008 MHC Create Version 0. * *---------------------------------------------------------------------------*/ void dm_gpio_change_rf_callback(struct work_struct *work) { struct delayed_work *dwork = container_of(work,struct delayed_work,work); struct r8192_priv *priv = container_of(dwork,struct r8192_priv,gpio_change_rf_wq); struct net_device *dev = priv->ieee80211->dev; u8 tmp1byte; RT_RF_POWER_STATE eRfPowerStateToSet; bool bActuallySet = false; bActuallySet=false; if(!priv->up) { RT_TRACE((COMP_INIT | COMP_POWER | COMP_RF),"dm_gpio_change_rf_callback(): Callback function breaks out!!\n"); } else { // 0x108 GPIO input register is read only //set 0x108 B1= 1: RF-ON; 0: RF-OFF. tmp1byte = read_nic_byte(dev,GPI); eRfPowerStateToSet = (tmp1byte&BIT1) ? eRfOn : eRfOff; if( (priv->bHwRadioOff == true) && (eRfPowerStateToSet == eRfOn)) { RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio ON\n"); priv->bHwRadioOff = false; bActuallySet = true; } else if ( (priv->bHwRadioOff == false) && (eRfPowerStateToSet == eRfOff)) { RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio OFF\n"); priv->bHwRadioOff = true; bActuallySet = true; } if(bActuallySet) { priv->bHwRfOffAction = 1; MgntActSet_RF_State(dev, eRfPowerStateToSet, RF_CHANGE_BY_HW); //DrvIFIndicateCurrentPhyStatus(pAdapter); } else { msleep(2000); } } } /* dm_GPIOChangeRF */ #endif /*----------------------------------------------------------------------------- * Function: DM_RFPathCheckWorkItemCallBack() * * Overview: Check if Current RF RX path is enabled * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 01/30/2008 MHC Create Version 0. * *---------------------------------------------------------------------------*/ void dm_rf_pathcheck_workitemcallback(struct work_struct *work) { struct delayed_work *dwork = container_of(work,struct delayed_work,work); struct r8192_priv *priv = container_of(dwork,struct r8192_priv,rfpath_check_wq); struct net_device *dev =priv->ieee80211->dev; //bool bactually_set = false; u8 rfpath = 0, i; /* 2008/01/30 MH After discussing with SD3 Jerry, 0xc04/0xd04 register will always be the same. We only read 0xc04 now. */ rfpath = read_nic_byte(dev, 0xc04); // Check Bit 0-3, it means if RF A-D is enabled. for (i = 0; i < RF90_PATH_MAX; i++) { if (rfpath & (0x01<<i)) priv->brfpath_rxenable[i] = 1; else priv->brfpath_rxenable[i] = 0; } if(!DM_RxPathSelTable.Enable) return; dm_rxpath_sel_byrssi(dev); } /* DM_RFPathCheckWorkItemCallBack */ static void dm_init_rxpath_selection(struct net_device * dev) { u8 i; struct r8192_priv *priv = ieee80211_priv(dev); DM_RxPathSelTable.Enable = 1; //default enabled DM_RxPathSelTable.SS_TH_low = RxPathSelection_SS_TH_low; DM_RxPathSelTable.diff_TH = RxPathSelection_diff_TH; if(priv->CustomerID == RT_CID_819x_Netcore) DM_RxPathSelTable.cck_method = CCK_Rx_Version_2; else DM_RxPathSelTable.cck_method = CCK_Rx_Version_1; DM_RxPathSelTable.DbgMode = DM_DBG_OFF; DM_RxPathSelTable.disabledRF = 0; for(i=0; i<4; i++) { DM_RxPathSelTable.rf_rssi[i] = 50; DM_RxPathSelTable.cck_pwdb_sta[i] = -64; DM_RxPathSelTable.rf_enable_rssi_th[i] = 100; } } static void dm_rxpath_sel_byrssi(struct net_device * dev) { struct r8192_priv *priv = ieee80211_priv(dev); u8 i, max_rssi_index=0, min_rssi_index=0, sec_rssi_index=0, rf_num=0; u8 tmp_max_rssi=0, tmp_min_rssi=0, tmp_sec_rssi=0; u8 cck_default_Rx=0x2; //RF-C u8 cck_optional_Rx=0x3;//RF-D long tmp_cck_max_pwdb=0, tmp_cck_min_pwdb=0, tmp_cck_sec_pwdb=0; u8 cck_rx_ver2_max_index=0, cck_rx_ver2_min_index=0, cck_rx_ver2_sec_index=0; u8 cur_rf_rssi; long cur_cck_pwdb; static u8 disabled_rf_cnt=0, cck_Rx_Path_initialized=0; u8 update_cck_rx_path; if(priv->rf_type != RF_2T4R) return; if(!cck_Rx_Path_initialized) { DM_RxPathSelTable.cck_Rx_path = (read_nic_byte(dev, 0xa07)&0xf); cck_Rx_Path_initialized = 1; } DM_RxPathSelTable.disabledRF = 0xf; DM_RxPathSelTable.disabledRF &=~ (read_nic_byte(dev, 0xc04)); if(priv->ieee80211->mode == WIRELESS_MODE_B) { DM_RxPathSelTable.cck_method = CCK_Rx_Version_2; //pure B mode, fixed cck version2 //DbgPrint("Pure B mode, use cck rx version2 \n"); } //decide max/sec/min rssi index for (i=0; i<RF90_PATH_MAX; i++) { if(!DM_RxPathSelTable.DbgMode) DM_RxPathSelTable.rf_rssi[i] = priv->stats.rx_rssi_percentage[i]; if(priv->brfpath_rxenable[i]) { rf_num++; cur_rf_rssi = DM_RxPathSelTable.rf_rssi[i]; if(rf_num == 1) // find first enabled rf path and the rssi values { //initialize, set all rssi index to the same one max_rssi_index = min_rssi_index = sec_rssi_index = i; tmp_max_rssi = tmp_min_rssi = tmp_sec_rssi = cur_rf_rssi; } else if(rf_num == 2) { // we pick up the max index first, and let sec and min to be the same one if(cur_rf_rssi >= tmp_max_rssi) { tmp_max_rssi = cur_rf_rssi; max_rssi_index = i; } else { tmp_sec_rssi = tmp_min_rssi = cur_rf_rssi; sec_rssi_index = min_rssi_index = i; } } else { if(cur_rf_rssi > tmp_max_rssi) { tmp_sec_rssi = tmp_max_rssi; sec_rssi_index = max_rssi_index; tmp_max_rssi = cur_rf_rssi; max_rssi_index = i; } else if(cur_rf_rssi == tmp_max_rssi) { // let sec and min point to the different index tmp_sec_rssi = cur_rf_rssi; sec_rssi_index = i; } else if((cur_rf_rssi < tmp_max_rssi) &&(cur_rf_rssi > tmp_sec_rssi)) { tmp_sec_rssi = cur_rf_rssi; sec_rssi_index = i; } else if(cur_rf_rssi == tmp_sec_rssi) { if(tmp_sec_rssi == tmp_min_rssi) { // let sec and min point to the different index tmp_sec_rssi = cur_rf_rssi; sec_rssi_index = i; } else { // This case we don't need to set any index } } else if((cur_rf_rssi < tmp_sec_rssi) && (cur_rf_rssi > tmp_min_rssi)) { // This case we don't need to set any index } else if(cur_rf_rssi == tmp_min_rssi) { if(tmp_sec_rssi == tmp_min_rssi) { // let sec and min point to the different index tmp_min_rssi = cur_rf_rssi; min_rssi_index = i; } else { // This case we don't need to set any index } } else if(cur_rf_rssi < tmp_min_rssi) { tmp_min_rssi = cur_rf_rssi; min_rssi_index = i; } } } } rf_num = 0; // decide max/sec/min cck pwdb index if(DM_RxPathSelTable.cck_method == CCK_Rx_Version_2) { for (i=0; i<RF90_PATH_MAX; i++) { if(priv->brfpath_rxenable[i]) { rf_num++; cur_cck_pwdb = DM_RxPathSelTable.cck_pwdb_sta[i]; if(rf_num == 1) // find first enabled rf path and the rssi values { //initialize, set all rssi index to the same one cck_rx_ver2_max_index = cck_rx_ver2_min_index = cck_rx_ver2_sec_index = i; tmp_cck_max_pwdb = tmp_cck_min_pwdb = tmp_cck_sec_pwdb = cur_cck_pwdb; } else if(rf_num == 2) { // we pick up the max index first, and let sec and min to be the same one if(cur_cck_pwdb >= tmp_cck_max_pwdb) { tmp_cck_max_pwdb = cur_cck_pwdb; cck_rx_ver2_max_index = i; } else { tmp_cck_sec_pwdb = tmp_cck_min_pwdb = cur_cck_pwdb; cck_rx_ver2_sec_index = cck_rx_ver2_min_index = i; } } else { if(cur_cck_pwdb > tmp_cck_max_pwdb) { tmp_cck_sec_pwdb = tmp_cck_max_pwdb; cck_rx_ver2_sec_index = cck_rx_ver2_max_index; tmp_cck_max_pwdb = cur_cck_pwdb; cck_rx_ver2_max_index = i; } else if(cur_cck_pwdb == tmp_cck_max_pwdb) { // let sec and min point to the different index tmp_cck_sec_pwdb = cur_cck_pwdb; cck_rx_ver2_sec_index = i; } else if((cur_cck_pwdb < tmp_cck_max_pwdb) &&(cur_cck_pwdb > tmp_cck_sec_pwdb)) { tmp_cck_sec_pwdb = cur_cck_pwdb; cck_rx_ver2_sec_index = i; } else if(cur_cck_pwdb == tmp_cck_sec_pwdb) { if(tmp_cck_sec_pwdb == tmp_cck_min_pwdb) { // let sec and min point to the different index tmp_cck_sec_pwdb = cur_cck_pwdb; cck_rx_ver2_sec_index = i; } else { // This case we don't need to set any index } } else if((cur_cck_pwdb < tmp_cck_sec_pwdb) && (cur_cck_pwdb > tmp_cck_min_pwdb)) { // This case we don't need to set any index } else if(cur_cck_pwdb == tmp_cck_min_pwdb) { if(tmp_cck_sec_pwdb == tmp_cck_min_pwdb) { // let sec and min point to the different index tmp_cck_min_pwdb = cur_cck_pwdb; cck_rx_ver2_min_index = i; } else { // This case we don't need to set any index } } else if(cur_cck_pwdb < tmp_cck_min_pwdb) { tmp_cck_min_pwdb = cur_cck_pwdb; cck_rx_ver2_min_index = i; } } } } } // Set CCK Rx path // reg0xA07[3:2]=cck default rx path, reg0xa07[1:0]=cck optional rx path. update_cck_rx_path = 0; if(DM_RxPathSelTable.cck_method == CCK_Rx_Version_2) { cck_default_Rx = cck_rx_ver2_max_index; cck_optional_Rx = cck_rx_ver2_sec_index; if(tmp_cck_max_pwdb != -64) update_cck_rx_path = 1; } if(tmp_min_rssi < DM_RxPathSelTable.SS_TH_low && disabled_rf_cnt < 2) { if((tmp_max_rssi - tmp_min_rssi) >= DM_RxPathSelTable.diff_TH) { //record the enabled rssi threshold DM_RxPathSelTable.rf_enable_rssi_th[min_rssi_index] = tmp_max_rssi+5; //disable the BB Rx path, OFDM rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x1<<min_rssi_index, 0x0); // 0xc04[3:0] rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x1<<min_rssi_index, 0x0); // 0xd04[3:0] disabled_rf_cnt++; } if(DM_RxPathSelTable.cck_method == CCK_Rx_Version_1) { cck_default_Rx = max_rssi_index; cck_optional_Rx = sec_rssi_index; if(tmp_max_rssi) update_cck_rx_path = 1; } } if(update_cck_rx_path) { DM_RxPathSelTable.cck_Rx_path = (cck_default_Rx<<2)|(cck_optional_Rx); rtl8192_setBBreg(dev, rCCK0_AFESetting, 0x0f000000, DM_RxPathSelTable.cck_Rx_path); } if(DM_RxPathSelTable.disabledRF) { for(i=0; i<4; i++) { if((DM_RxPathSelTable.disabledRF>>i) & 0x1) //disabled rf { if(tmp_max_rssi >= DM_RxPathSelTable.rf_enable_rssi_th[i]) { //enable the BB Rx path //DbgPrint("RF-%d is enabled. \n", 0x1<<i); rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x1<<i, 0x1); // 0xc04[3:0] rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x1<<i, 0x1); // 0xd04[3:0] DM_RxPathSelTable.rf_enable_rssi_th[i] = 100; disabled_rf_cnt--; } } } } } /*----------------------------------------------------------------------------- * Function: dm_check_rx_path_selection() * * Overview: Call a workitem to check current RXRF path and Rx Path selection by RSSI. * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/28/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ static void dm_check_rx_path_selection(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); queue_delayed_work(priv->priv_wq,&priv->rfpath_check_wq,0); } /* dm_CheckRxRFPath */ static void dm_init_fsync (struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); priv->ieee80211->fsync_time_interval = 500; priv->ieee80211->fsync_rate_bitmap = 0x0f000800; priv->ieee80211->fsync_rssi_threshold = 30; #ifdef RTL8190P priv->ieee80211->bfsync_enable = true; #else priv->ieee80211->bfsync_enable = false; #endif priv->ieee80211->fsync_multiple_timeinterval = 3; priv->ieee80211->fsync_firstdiff_ratethreshold= 100; priv->ieee80211->fsync_seconddiff_ratethreshold= 200; priv->ieee80211->fsync_state = Default_Fsync; priv->framesyncMonitor = 1; // current default 0xc38 monitor on init_timer(&priv->fsync_timer); priv->fsync_timer.data = (unsigned long)dev; priv->fsync_timer.function = dm_fsync_timer_callback; } static void dm_deInit_fsync(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); del_timer_sync(&priv->fsync_timer); } void dm_fsync_timer_callback(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct r8192_priv *priv = ieee80211_priv((struct net_device *)data); u32 rate_index, rate_count = 0, rate_count_diff=0; bool bSwitchFromCountDiff = false; bool bDoubleTimeInterval = false; if( priv->ieee80211->state == IEEE80211_LINKED && priv->ieee80211->bfsync_enable && (priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC)) { // Count rate 54, MCS [7], [12, 13, 14, 15] u32 rate_bitmap; for(rate_index = 0; rate_index <= 27; rate_index++) { rate_bitmap = 1 << rate_index; if(priv->ieee80211->fsync_rate_bitmap & rate_bitmap) rate_count+= priv->stats.received_rate_histogram[1][rate_index]; } if(rate_count < priv->rate_record) rate_count_diff = 0xffffffff - rate_count + priv->rate_record; else rate_count_diff = rate_count - priv->rate_record; if(rate_count_diff < priv->rateCountDiffRecord) { u32 DiffNum = priv->rateCountDiffRecord - rate_count_diff; // Contiune count if(DiffNum >= priv->ieee80211->fsync_seconddiff_ratethreshold) priv->ContiuneDiffCount++; else priv->ContiuneDiffCount = 0; // Contiune count over if(priv->ContiuneDiffCount >=2) { bSwitchFromCountDiff = true; priv->ContiuneDiffCount = 0; } } else { // Stop contiune count priv->ContiuneDiffCount = 0; } //If Count diff <= FsyncRateCountThreshold if(rate_count_diff <= priv->ieee80211->fsync_firstdiff_ratethreshold) { bSwitchFromCountDiff = true; priv->ContiuneDiffCount = 0; } priv->rate_record = rate_count; priv->rateCountDiffRecord = rate_count_diff; RT_TRACE(COMP_HALDM, "rateRecord %d rateCount %d, rateCountdiff %d bSwitchFsync %d\n", priv->rate_record, rate_count, rate_count_diff , priv->bswitch_fsync); // if we never receive those mcs rate and rssi > 30 % then switch fsyn if(priv->undecorated_smoothed_pwdb > priv->ieee80211->fsync_rssi_threshold && bSwitchFromCountDiff) { bDoubleTimeInterval = true; priv->bswitch_fsync = !priv->bswitch_fsync; if(priv->bswitch_fsync) { #ifdef RTL8190P write_nic_byte(dev,0xC36, 0x00); #else write_nic_byte(dev,0xC36, 0x1c); #endif write_nic_byte(dev, 0xC3e, 0x90); } else { #ifdef RTL8190P write_nic_byte(dev, 0xC36, 0x40); #else write_nic_byte(dev, 0xC36, 0x5c); #endif write_nic_byte(dev, 0xC3e, 0x96); } } else if(priv->undecorated_smoothed_pwdb <= priv->ieee80211->fsync_rssi_threshold) { if(priv->bswitch_fsync) { priv->bswitch_fsync = false; #ifdef RTL8190P write_nic_byte(dev, 0xC36, 0x40); #else write_nic_byte(dev, 0xC36, 0x5c); #endif write_nic_byte(dev, 0xC3e, 0x96); } } if(bDoubleTimeInterval){ if(timer_pending(&priv->fsync_timer)) del_timer_sync(&priv->fsync_timer); priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval*priv->ieee80211->fsync_multiple_timeinterval); add_timer(&priv->fsync_timer); } else{ if(timer_pending(&priv->fsync_timer)) del_timer_sync(&priv->fsync_timer); priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval); add_timer(&priv->fsync_timer); } } else { // Let Register return to default value; if(priv->bswitch_fsync) { priv->bswitch_fsync = false; #ifdef RTL8190P write_nic_byte(dev, 0xC36, 0x40); #else write_nic_byte(dev, 0xC36, 0x5c); #endif write_nic_byte(dev, 0xC3e, 0x96); } priv->ContiuneDiffCount = 0; #ifdef RTL8190P write_nic_dword(dev, rOFDM0_RxDetector2, 0x164052cd); #else write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd); #endif } RT_TRACE(COMP_HALDM, "ContiuneDiffCount %d\n", priv->ContiuneDiffCount); RT_TRACE(COMP_HALDM, "rateRecord %d rateCount %d, rateCountdiff %d bSwitchFsync %d\n", priv->rate_record, rate_count, rate_count_diff , priv->bswitch_fsync); } static void dm_StartHWFsync(struct net_device *dev) { RT_TRACE(COMP_HALDM, "%s\n", __FUNCTION__); write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cf); write_nic_byte(dev, 0xc3b, 0x41); } static void dm_EndSWFsync(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); RT_TRACE(COMP_HALDM, "%s\n", __FUNCTION__); del_timer_sync(&(priv->fsync_timer)); // Let Register return to default value; if(priv->bswitch_fsync) { priv->bswitch_fsync = false; #ifdef RTL8190P write_nic_byte(dev, 0xC36, 0x40); #else write_nic_byte(dev, 0xC36, 0x5c); #endif write_nic_byte(dev, 0xC3e, 0x96); } priv->ContiuneDiffCount = 0; #ifndef RTL8190P write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd); #endif } static void dm_StartSWFsync(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); u32 rateIndex; u32 rateBitmap; RT_TRACE(COMP_HALDM,"%s\n", __FUNCTION__); // Initial rate record to zero, start to record. priv->rate_record = 0; // Initial contiune diff count to zero, start to record. priv->ContiuneDiffCount = 0; priv->rateCountDiffRecord = 0; priv->bswitch_fsync = false; if(priv->ieee80211->mode == WIRELESS_MODE_N_24G) { priv->ieee80211->fsync_firstdiff_ratethreshold= 600; priv->ieee80211->fsync_seconddiff_ratethreshold = 0xffff; } else { priv->ieee80211->fsync_firstdiff_ratethreshold= 200; priv->ieee80211->fsync_seconddiff_ratethreshold = 200; } for(rateIndex = 0; rateIndex <= 27; rateIndex++) { rateBitmap = 1 << rateIndex; if(priv->ieee80211->fsync_rate_bitmap & rateBitmap) priv->rate_record += priv->stats.received_rate_histogram[1][rateIndex]; } if(timer_pending(&priv->fsync_timer)) del_timer_sync(&priv->fsync_timer); priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval); add_timer(&priv->fsync_timer); #ifndef RTL8190P write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd); #endif } static void dm_EndHWFsync(struct net_device *dev) { RT_TRACE(COMP_HALDM,"%s\n", __FUNCTION__); write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd); write_nic_byte(dev, 0xc3b, 0x49); } void dm_check_fsync(struct net_device *dev) { #define RegC38_Default 0 #define RegC38_NonFsync_Other_AP 1 #define RegC38_Fsync_AP_BCM 2 struct r8192_priv *priv = ieee80211_priv(dev); //u32 framesyncC34; static u8 reg_c38_State=RegC38_Default; static u32 reset_cnt=0; RT_TRACE(COMP_HALDM, "RSSI %d TimeInterval %d MultipleTimeInterval %d\n", priv->ieee80211->fsync_rssi_threshold, priv->ieee80211->fsync_time_interval, priv->ieee80211->fsync_multiple_timeinterval); RT_TRACE(COMP_HALDM, "RateBitmap 0x%x FirstDiffRateThreshold %d SecondDiffRateThreshold %d\n", priv->ieee80211->fsync_rate_bitmap, priv->ieee80211->fsync_firstdiff_ratethreshold, priv->ieee80211->fsync_seconddiff_ratethreshold); if( priv->ieee80211->state == IEEE80211_LINKED && (priv->ieee80211->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC)) { if(priv->ieee80211->bfsync_enable == 0) { switch(priv->ieee80211->fsync_state) { case Default_Fsync: dm_StartHWFsync(dev); priv->ieee80211->fsync_state = HW_Fsync; break; case SW_Fsync: dm_EndSWFsync(dev); dm_StartHWFsync(dev); priv->ieee80211->fsync_state = HW_Fsync; break; case HW_Fsync: default: break; } } else { switch(priv->ieee80211->fsync_state) { case Default_Fsync: dm_StartSWFsync(dev); priv->ieee80211->fsync_state = SW_Fsync; break; case HW_Fsync: dm_EndHWFsync(dev); dm_StartSWFsync(dev); priv->ieee80211->fsync_state = SW_Fsync; break; case SW_Fsync: default: break; } } if(priv->framesyncMonitor) { if(reg_c38_State != RegC38_Fsync_AP_BCM) { //For broadcom AP we write different default value #ifdef RTL8190P write_nic_byte(dev, rOFDM0_RxDetector3, 0x15); #else write_nic_byte(dev, rOFDM0_RxDetector3, 0x95); #endif reg_c38_State = RegC38_Fsync_AP_BCM; } } } else { switch(priv->ieee80211->fsync_state) { case HW_Fsync: dm_EndHWFsync(dev); priv->ieee80211->fsync_state = Default_Fsync; break; case SW_Fsync: dm_EndSWFsync(dev); priv->ieee80211->fsync_state = Default_Fsync; break; case Default_Fsync: default: break; } if(priv->framesyncMonitor) { if(priv->ieee80211->state == IEEE80211_LINKED) { if(priv->undecorated_smoothed_pwdb <= RegC38_TH) { if(reg_c38_State != RegC38_NonFsync_Other_AP) { #ifdef RTL8190P write_nic_byte(dev, rOFDM0_RxDetector3, 0x10); #else write_nic_byte(dev, rOFDM0_RxDetector3, 0x90); #endif reg_c38_State = RegC38_NonFsync_Other_AP; #if 0//cosa if (Adapter->HardwareType == HARDWARE_TYPE_RTL8190P) DbgPrint("Fsync is idle, rssi<=35, write 0xc38 = 0x%x \n", 0x10); else DbgPrint("Fsync is idle, rssi<=35, write 0xc38 = 0x%x \n", 0x90); #endif } } else if(priv->undecorated_smoothed_pwdb >= (RegC38_TH+5)) { if(reg_c38_State) { write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync); reg_c38_State = RegC38_Default; //DbgPrint("Fsync is idle, rssi>=40, write 0xc38 = 0x%x \n", pHalData->framesync); } } } else { if(reg_c38_State) { write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync); reg_c38_State = RegC38_Default; //DbgPrint("Fsync is idle, not connected, write 0xc38 = 0x%x \n", pHalData->framesync); } } } } if(priv->framesyncMonitor) { if(priv->reset_count != reset_cnt) { //After silent reset, the reg_c38_State will be returned to default value write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync); reg_c38_State = RegC38_Default; reset_cnt = priv->reset_count; //DbgPrint("reg_c38_State = 0 for silent reset. \n"); } } else { if(reg_c38_State) { write_nic_byte(dev, rOFDM0_RxDetector3, priv->framesync); reg_c38_State = RegC38_Default; //DbgPrint("framesync no monitor, write 0xc38 = 0x%x \n", pHalData->framesync); } } } /*----------------------------------------------------------------------------- * Function: dm_shadow_init() * * Overview: Store all NIC MAC/BB register content. * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/29/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ void dm_shadow_init(struct net_device *dev) { u8 page; u16 offset; for (page = 0; page < 5; page++) for (offset = 0; offset < 256; offset++) { dm_shadow[page][offset] = read_nic_byte(dev, offset+page*256); //DbgPrint("P-%d/O-%02x=%02x\r\n", page, offset, DM_Shadow[page][offset]); } for (page = 8; page < 11; page++) for (offset = 0; offset < 256; offset++) dm_shadow[page][offset] = read_nic_byte(dev, offset+page*256); for (page = 12; page < 15; page++) for (offset = 0; offset < 256; offset++) dm_shadow[page][offset] = read_nic_byte(dev, offset+page*256); } /* dm_shadow_init */ /*---------------------------Define function prototype------------------------*/ /*----------------------------------------------------------------------------- * Function: DM_DynamicTxPower() * * Overview: Detect Signal strength to control TX Registry Tx Power Control For Near/Far Range * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 03/06/2008 Jacken Create Version 0. * *---------------------------------------------------------------------------*/ static void dm_init_dynamic_txpower(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); //Initial TX Power Control for near/far range , add by amy 2008/05/15, porting from windows code. priv->ieee80211->bdynamic_txpower_enable = true; //Default to enable Tx Power Control priv->bLastDTPFlag_High = false; priv->bLastDTPFlag_Low = false; priv->bDynamicTxHighPower = false; priv->bDynamicTxLowPower = false; } static void dm_dynamic_txpower(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); unsigned int txhipower_threshhold=0; unsigned int txlowpower_threshold=0; if(priv->ieee80211->bdynamic_txpower_enable != true) { priv->bDynamicTxHighPower = false; priv->bDynamicTxLowPower = false; return; } //printk("priv->ieee80211->current_network.unknown_cap_exist is %d ,priv->ieee80211->current_network.broadcom_cap_exist is %d\n",priv->ieee80211->current_network.unknown_cap_exist,priv->ieee80211->current_network.broadcom_cap_exist); if((priv->ieee80211->current_network.atheros_cap_exist ) && (priv->ieee80211->mode == IEEE_G)){ txhipower_threshhold = TX_POWER_ATHEROAP_THRESH_HIGH; txlowpower_threshold = TX_POWER_ATHEROAP_THRESH_LOW; } else { txhipower_threshhold = TX_POWER_NEAR_FIELD_THRESH_HIGH; txlowpower_threshold = TX_POWER_NEAR_FIELD_THRESH_LOW; } // printk("=======>%s(): txhipower_threshhold is %d,txlowpower_threshold is %d\n",__FUNCTION__,txhipower_threshhold,txlowpower_threshold); RT_TRACE(COMP_TXAGC,"priv->undecorated_smoothed_pwdb = %ld \n" , priv->undecorated_smoothed_pwdb); if(priv->ieee80211->state == IEEE80211_LINKED) { if(priv->undecorated_smoothed_pwdb >= txhipower_threshhold) { priv->bDynamicTxHighPower = true; priv->bDynamicTxLowPower = false; } else { // high power state check if(priv->undecorated_smoothed_pwdb < txlowpower_threshold && priv->bDynamicTxHighPower == true) { priv->bDynamicTxHighPower = false; } // low power state check if(priv->undecorated_smoothed_pwdb < 35) { priv->bDynamicTxLowPower = true; } else if(priv->undecorated_smoothed_pwdb >= 40) { priv->bDynamicTxLowPower = false; } } } else { //pHalData->bTXPowerCtrlforNearFarRange = !pHalData->bTXPowerCtrlforNearFarRange; priv->bDynamicTxHighPower = false; priv->bDynamicTxLowPower = false; } if( (priv->bDynamicTxHighPower != priv->bLastDTPFlag_High ) || (priv->bDynamicTxLowPower != priv->bLastDTPFlag_Low ) ) { RT_TRACE(COMP_TXAGC,"SetTxPowerLevel8190() channel = %d \n" , priv->ieee80211->current_network.channel); rtl8192_phy_setTxPower(dev,priv->ieee80211->current_network.channel); } priv->bLastDTPFlag_High = priv->bDynamicTxHighPower; priv->bLastDTPFlag_Low = priv->bDynamicTxLowPower; } /* dm_dynamic_txpower */ //added by vivi, for read tx rate and retrycount static void dm_check_txrateandretrycount(struct net_device * dev) { struct r8192_priv *priv = ieee80211_priv(dev); struct ieee80211_device* ieee = priv->ieee80211; //for 11n tx rate // priv->stats.CurrentShowTxate = read_nic_byte(dev, Current_Tx_Rate_Reg); ieee->softmac_stats.CurrentShowTxate = read_nic_byte(dev, Current_Tx_Rate_Reg); //printk("=============>tx_rate_reg:%x\n", ieee->softmac_stats.CurrentShowTxate); //for initial tx rate // priv->stats.last_packet_rate = read_nic_byte(dev, Initial_Tx_Rate_Reg); ieee->softmac_stats.last_packet_rate = read_nic_byte(dev ,Initial_Tx_Rate_Reg); //for tx tx retry count // priv->stats.txretrycount = read_nic_dword(dev, Tx_Retry_Count_Reg); ieee->softmac_stats.txretrycount = read_nic_dword(dev, Tx_Retry_Count_Reg); } static void dm_send_rssi_tofw(struct net_device *dev) { DCMD_TXCMD_T tx_cmd; struct r8192_priv *priv = ieee80211_priv(dev); // If we test chariot, we should stop the TX command ? // Because 92E will always silent reset when we send tx command. We use register // 0x1e0(byte) to botify driver. write_nic_byte(dev, DRIVER_RSSI, (u8)priv->undecorated_smoothed_pwdb); return; #if 1 tx_cmd.Op = TXCMD_SET_RX_RSSI; tx_cmd.Length = 4; tx_cmd.Value = priv->undecorated_smoothed_pwdb; cmpk_message_handle_tx(dev, (u8*)&tx_cmd, DESC_PACKET_TYPE_INIT, sizeof(DCMD_TXCMD_T)); #endif } /*---------------------------Define function prototype------------------------*/
gpl-2.0
aimaletdinow/LABS
drivers/i2c/busses/i2c-pnx.c
766
21454
/* * Provides I2C support for Philips PNX010x/PNX4008 boards. * * Authors: Dennis Kovalev <dkovalev@ru.mvista.com> * Vitaly Wool <vwool@ru.mvista.com> * * 2004-2006 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/timer.h> #include <linux/completion.h> #include <linux/platform_device.h> #include <linux/i2c-pnx.h> #include <linux/io.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/slab.h> #include <linux/of.h> #define I2C_PNX_TIMEOUT_DEFAULT 10 /* msec */ #define I2C_PNX_SPEED_KHZ_DEFAULT 100 #define I2C_PNX_REGION_SIZE 0x100 enum { mstatus_tdi = 0x00000001, mstatus_afi = 0x00000002, mstatus_nai = 0x00000004, mstatus_drmi = 0x00000008, mstatus_active = 0x00000020, mstatus_scl = 0x00000040, mstatus_sda = 0x00000080, mstatus_rff = 0x00000100, mstatus_rfe = 0x00000200, mstatus_tff = 0x00000400, mstatus_tfe = 0x00000800, }; enum { mcntrl_tdie = 0x00000001, mcntrl_afie = 0x00000002, mcntrl_naie = 0x00000004, mcntrl_drmie = 0x00000008, mcntrl_drsie = 0x00000010, mcntrl_rffie = 0x00000020, mcntrl_daie = 0x00000040, mcntrl_tffie = 0x00000080, mcntrl_reset = 0x00000100, mcntrl_cdbmode = 0x00000400, }; enum { rw_bit = 1 << 0, start_bit = 1 << 8, stop_bit = 1 << 9, }; #define I2C_REG_RX(a) ((a)->ioaddr) /* Rx FIFO reg (RO) */ #define I2C_REG_TX(a) ((a)->ioaddr) /* Tx FIFO reg (WO) */ #define I2C_REG_STS(a) ((a)->ioaddr + 0x04) /* Status reg (RO) */ #define I2C_REG_CTL(a) ((a)->ioaddr + 0x08) /* Ctl reg */ #define I2C_REG_CKL(a) ((a)->ioaddr + 0x0c) /* Clock divider low */ #define I2C_REG_CKH(a) ((a)->ioaddr + 0x10) /* Clock divider high */ #define I2C_REG_ADR(a) ((a)->ioaddr + 0x14) /* I2C address */ #define I2C_REG_RFL(a) ((a)->ioaddr + 0x18) /* Rx FIFO level (RO) */ #define I2C_REG_TFL(a) ((a)->ioaddr + 0x1c) /* Tx FIFO level (RO) */ #define I2C_REG_RXB(a) ((a)->ioaddr + 0x20) /* Num of bytes Rx-ed (RO) */ #define I2C_REG_TXB(a) ((a)->ioaddr + 0x24) /* Num of bytes Tx-ed (RO) */ #define I2C_REG_TXS(a) ((a)->ioaddr + 0x28) /* Tx slave FIFO (RO) */ #define I2C_REG_STFL(a) ((a)->ioaddr + 0x2c) /* Tx slave FIFO level (RO) */ static inline int wait_timeout(struct i2c_pnx_algo_data *data) { long timeout = data->timeout; while (timeout > 0 && (ioread32(I2C_REG_STS(data)) & mstatus_active)) { mdelay(1); timeout--; } return (timeout <= 0); } static inline int wait_reset(struct i2c_pnx_algo_data *data) { long timeout = data->timeout; while (timeout > 0 && (ioread32(I2C_REG_CTL(data)) & mcntrl_reset)) { mdelay(1); timeout--; } return (timeout <= 0); } static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data) { struct timer_list *timer = &alg_data->mif.timer; unsigned long expires = msecs_to_jiffies(alg_data->timeout); if (expires <= 1) expires = 2; del_timer_sync(timer); dev_dbg(&alg_data->adapter.dev, "Timer armed at %lu plus %lu jiffies.\n", jiffies, expires); timer->expires = jiffies + expires; timer->data = (unsigned long)alg_data; add_timer(timer); } /** * i2c_pnx_start - start a device * @slave_addr: slave address * @adap: pointer to adapter structure * * Generate a START signal in the desired mode. */ static int i2c_pnx_start(unsigned char slave_addr, struct i2c_pnx_algo_data *alg_data) { dev_dbg(&alg_data->adapter.dev, "%s(): addr 0x%x mode %d\n", __func__, slave_addr, alg_data->mif.mode); /* Check for 7 bit slave addresses only */ if (slave_addr & ~0x7f) { dev_err(&alg_data->adapter.dev, "%s: Invalid slave address %x. Only 7-bit addresses are supported\n", alg_data->adapter.name, slave_addr); return -EINVAL; } /* First, make sure bus is idle */ if (wait_timeout(alg_data)) { /* Somebody else is monopolizing the bus */ dev_err(&alg_data->adapter.dev, "%s: Bus busy. Slave addr = %02x, cntrl = %x, stat = %x\n", alg_data->adapter.name, slave_addr, ioread32(I2C_REG_CTL(alg_data)), ioread32(I2C_REG_STS(alg_data))); return -EBUSY; } else if (ioread32(I2C_REG_STS(alg_data)) & mstatus_afi) { /* Sorry, we lost the bus */ dev_err(&alg_data->adapter.dev, "%s: Arbitration failure. Slave addr = %02x\n", alg_data->adapter.name, slave_addr); return -EIO; } /* * OK, I2C is enabled and we have the bus. * Clear the current TDI and AFI status flags. */ iowrite32(ioread32(I2C_REG_STS(alg_data)) | mstatus_tdi | mstatus_afi, I2C_REG_STS(alg_data)); dev_dbg(&alg_data->adapter.dev, "%s(): sending %#x\n", __func__, (slave_addr << 1) | start_bit | alg_data->mif.mode); /* Write the slave address, START bit and R/W bit */ iowrite32((slave_addr << 1) | start_bit | alg_data->mif.mode, I2C_REG_TX(alg_data)); dev_dbg(&alg_data->adapter.dev, "%s(): exit\n", __func__); return 0; } /** * i2c_pnx_stop - stop a device * @adap: pointer to I2C adapter structure * * Generate a STOP signal to terminate the master transaction. */ static void i2c_pnx_stop(struct i2c_pnx_algo_data *alg_data) { /* Only 1 msec max timeout due to interrupt context */ long timeout = 1000; dev_dbg(&alg_data->adapter.dev, "%s(): entering: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); /* Write a STOP bit to TX FIFO */ iowrite32(0xff | stop_bit, I2C_REG_TX(alg_data)); /* Wait until the STOP is seen. */ while (timeout > 0 && (ioread32(I2C_REG_STS(alg_data)) & mstatus_active)) { /* may be called from interrupt context */ udelay(1); timeout--; } dev_dbg(&alg_data->adapter.dev, "%s(): exiting: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); } /** * i2c_pnx_master_xmit - transmit data to slave * @adap: pointer to I2C adapter structure * * Sends one byte of data to the slave */ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data) { u32 val; dev_dbg(&alg_data->adapter.dev, "%s(): entering: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); if (alg_data->mif.len > 0) { /* We still have something to talk about... */ val = *alg_data->mif.buf++; if (alg_data->mif.len == 1) val |= stop_bit; alg_data->mif.len--; iowrite32(val, I2C_REG_TX(alg_data)); dev_dbg(&alg_data->adapter.dev, "%s(): xmit %#x [%d]\n", __func__, val, alg_data->mif.len + 1); if (alg_data->mif.len == 0) { if (alg_data->last) { /* Wait until the STOP is seen. */ if (wait_timeout(alg_data)) dev_err(&alg_data->adapter.dev, "The bus is still active after timeout\n"); } /* Disable master interrupts */ iowrite32(ioread32(I2C_REG_CTL(alg_data)) & ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie), I2C_REG_CTL(alg_data)); del_timer_sync(&alg_data->mif.timer); dev_dbg(&alg_data->adapter.dev, "%s(): Waking up xfer routine.\n", __func__); complete(&alg_data->mif.complete); } } else if (alg_data->mif.len == 0) { /* zero-sized transfer */ i2c_pnx_stop(alg_data); /* Disable master interrupts. */ iowrite32(ioread32(I2C_REG_CTL(alg_data)) & ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie), I2C_REG_CTL(alg_data)); /* Stop timer. */ del_timer_sync(&alg_data->mif.timer); dev_dbg(&alg_data->adapter.dev, "%s(): Waking up xfer routine after zero-xfer.\n", __func__); complete(&alg_data->mif.complete); } dev_dbg(&alg_data->adapter.dev, "%s(): exiting: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); return 0; } /** * i2c_pnx_master_rcv - receive data from slave * @adap: pointer to I2C adapter structure * * Reads one byte data from the slave */ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data) { unsigned int val = 0; u32 ctl = 0; dev_dbg(&alg_data->adapter.dev, "%s(): entering: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); /* Check, whether there is already data, * or we didn't 'ask' for it yet. */ if (ioread32(I2C_REG_STS(alg_data)) & mstatus_rfe) { /* 'Asking' is done asynchronously, e.g. dummy TX of several * bytes is done before the first actual RX arrives in FIFO. * Therefore, ordered bytes (via TX) are counted separately. */ if (alg_data->mif.order) { dev_dbg(&alg_data->adapter.dev, "%s(): Write dummy data to fill Rx-fifo...\n", __func__); if (alg_data->mif.order == 1) { /* Last byte, do not acknowledge next rcv. */ val |= stop_bit; /* * Enable interrupt RFDAIE (data in Rx fifo), * and disable DRMIE (need data for Tx) */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl |= mcntrl_rffie | mcntrl_daie; ctl &= ~mcntrl_drmie; iowrite32(ctl, I2C_REG_CTL(alg_data)); } /* * Now we'll 'ask' for data: * For each byte we want to receive, we must * write a (dummy) byte to the Tx-FIFO. */ iowrite32(val, I2C_REG_TX(alg_data)); alg_data->mif.order--; } return 0; } /* Handle data. */ if (alg_data->mif.len > 0) { val = ioread32(I2C_REG_RX(alg_data)); *alg_data->mif.buf++ = (u8) (val & 0xff); dev_dbg(&alg_data->adapter.dev, "%s(): rcv 0x%x [%d]\n", __func__, val, alg_data->mif.len); alg_data->mif.len--; if (alg_data->mif.len == 0) { if (alg_data->last) /* Wait until the STOP is seen. */ if (wait_timeout(alg_data)) dev_err(&alg_data->adapter.dev, "The bus is still active after timeout\n"); /* Disable master interrupts */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie | mcntrl_drmie | mcntrl_daie); iowrite32(ctl, I2C_REG_CTL(alg_data)); /* Kill timer. */ del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } } dev_dbg(&alg_data->adapter.dev, "%s(): exiting: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); return 0; } static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id) { struct i2c_pnx_algo_data *alg_data = dev_id; u32 stat, ctl; dev_dbg(&alg_data->adapter.dev, "%s(): mstat = %x mctrl = %x, mode = %d\n", __func__, ioread32(I2C_REG_STS(alg_data)), ioread32(I2C_REG_CTL(alg_data)), alg_data->mif.mode); stat = ioread32(I2C_REG_STS(alg_data)); /* let's see what kind of event this is */ if (stat & mstatus_afi) { /* We lost arbitration in the midst of a transfer */ alg_data->mif.ret = -EIO; /* Disable master interrupts. */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie | mcntrl_drmie); iowrite32(ctl, I2C_REG_CTL(alg_data)); /* Stop timer, to prevent timeout. */ del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } else if (stat & mstatus_nai) { /* Slave did not acknowledge, generate a STOP */ dev_dbg(&alg_data->adapter.dev, "%s(): Slave did not acknowledge, generating a STOP.\n", __func__); i2c_pnx_stop(alg_data); /* Disable master interrupts. */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie | mcntrl_drmie); iowrite32(ctl, I2C_REG_CTL(alg_data)); /* Our return value. */ alg_data->mif.ret = -EIO; /* Stop timer, to prevent timeout. */ del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } else { /* * Two options: * - Master Tx needs data. * - There is data in the Rx-fifo * The latter is only the case if we have requested for data, * via a dummy write. (See 'i2c_pnx_master_rcv'.) * We therefore check, as a sanity check, whether that interrupt * has been enabled. */ if ((stat & mstatus_drmi) || !(stat & mstatus_rfe)) { if (alg_data->mif.mode == I2C_SMBUS_WRITE) { i2c_pnx_master_xmit(alg_data); } else if (alg_data->mif.mode == I2C_SMBUS_READ) { i2c_pnx_master_rcv(alg_data); } } } /* Clear TDI and AFI bits */ stat = ioread32(I2C_REG_STS(alg_data)); iowrite32(stat | mstatus_tdi | mstatus_afi, I2C_REG_STS(alg_data)); dev_dbg(&alg_data->adapter.dev, "%s(): exiting, stat = %x ctrl = %x.\n", __func__, ioread32(I2C_REG_STS(alg_data)), ioread32(I2C_REG_CTL(alg_data))); return IRQ_HANDLED; } static void i2c_pnx_timeout(unsigned long data) { struct i2c_pnx_algo_data *alg_data = (struct i2c_pnx_algo_data *)data; u32 ctl; dev_err(&alg_data->adapter.dev, "Master timed out. stat = %04x, cntrl = %04x. Resetting master...\n", ioread32(I2C_REG_STS(alg_data)), ioread32(I2C_REG_CTL(alg_data))); /* Reset master and disable interrupts */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie | mcntrl_drmie); iowrite32(ctl, I2C_REG_CTL(alg_data)); ctl |= mcntrl_reset; iowrite32(ctl, I2C_REG_CTL(alg_data)); wait_reset(alg_data); alg_data->mif.ret = -EIO; complete(&alg_data->mif.complete); } static inline void bus_reset_if_active(struct i2c_pnx_algo_data *alg_data) { u32 stat; if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_active) { dev_err(&alg_data->adapter.dev, "%s: Bus is still active after xfer. Reset it...\n", alg_data->adapter.name); iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_reset, I2C_REG_CTL(alg_data)); wait_reset(alg_data); } else if (!(stat & mstatus_rfe) || !(stat & mstatus_tfe)) { /* If there is data in the fifo's after transfer, * flush fifo's by reset. */ iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_reset, I2C_REG_CTL(alg_data)); wait_reset(alg_data); } else if (stat & mstatus_nai) { iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_reset, I2C_REG_CTL(alg_data)); wait_reset(alg_data); } } /** * i2c_pnx_xfer - generic transfer entry point * @adap: pointer to I2C adapter structure * @msgs: array of messages * @num: number of messages * * Initiates the transfer */ static int i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct i2c_msg *pmsg; int rc = 0, completed = 0, i; struct i2c_pnx_algo_data *alg_data = adap->algo_data; u32 stat = ioread32(I2C_REG_STS(alg_data)); dev_dbg(&alg_data->adapter.dev, "%s(): entering: %d messages, stat = %04x.\n", __func__, num, ioread32(I2C_REG_STS(alg_data))); bus_reset_if_active(alg_data); /* Process transactions in a loop. */ for (i = 0; rc >= 0 && i < num; i++) { u8 addr; pmsg = &msgs[i]; addr = pmsg->addr; if (pmsg->flags & I2C_M_TEN) { dev_err(&alg_data->adapter.dev, "%s: 10 bits addr not supported!\n", alg_data->adapter.name); rc = -EINVAL; break; } alg_data->mif.buf = pmsg->buf; alg_data->mif.len = pmsg->len; alg_data->mif.order = pmsg->len; alg_data->mif.mode = (pmsg->flags & I2C_M_RD) ? I2C_SMBUS_READ : I2C_SMBUS_WRITE; alg_data->mif.ret = 0; alg_data->last = (i == num - 1); dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n", __func__, alg_data->mif.mode, alg_data->mif.len); i2c_pnx_arm_timer(alg_data); /* initialize the completion var */ init_completion(&alg_data->mif.complete); /* Enable master interrupt */ iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_afie | mcntrl_naie | mcntrl_drmie, I2C_REG_CTL(alg_data)); /* Put start-code and slave-address on the bus. */ rc = i2c_pnx_start(addr, alg_data); if (rc < 0) break; /* Wait for completion */ wait_for_completion(&alg_data->mif.complete); if (!(rc = alg_data->mif.ret)) completed++; dev_dbg(&alg_data->adapter.dev, "%s(): Complete, return code = %d.\n", __func__, rc); /* Clear TDI and AFI bits in case they are set. */ if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_tdi) { dev_dbg(&alg_data->adapter.dev, "%s: TDI still set... clearing now.\n", alg_data->adapter.name); iowrite32(stat, I2C_REG_STS(alg_data)); } if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_afi) { dev_dbg(&alg_data->adapter.dev, "%s: AFI still set... clearing now.\n", alg_data->adapter.name); iowrite32(stat, I2C_REG_STS(alg_data)); } } bus_reset_if_active(alg_data); /* Cleanup to be sure... */ alg_data->mif.buf = NULL; alg_data->mif.len = 0; alg_data->mif.order = 0; dev_dbg(&alg_data->adapter.dev, "%s(): exiting, stat = %x\n", __func__, ioread32(I2C_REG_STS(alg_data))); if (completed != num) return ((rc < 0) ? rc : -EREMOTEIO); return num; } static u32 i2c_pnx_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static struct i2c_algorithm pnx_algorithm = { .master_xfer = i2c_pnx_xfer, .functionality = i2c_pnx_func, }; #ifdef CONFIG_PM_SLEEP static int i2c_pnx_controller_suspend(struct device *dev) { struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev); clk_disable(alg_data->clk); return 0; } static int i2c_pnx_controller_resume(struct device *dev) { struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev); return clk_enable(alg_data->clk); } static SIMPLE_DEV_PM_OPS(i2c_pnx_pm, i2c_pnx_controller_suspend, i2c_pnx_controller_resume); #define PNX_I2C_PM (&i2c_pnx_pm) #else #define PNX_I2C_PM NULL #endif static int i2c_pnx_probe(struct platform_device *pdev) { unsigned long tmp; int ret = 0; struct i2c_pnx_algo_data *alg_data; unsigned long freq; struct resource *res; u32 speed = I2C_PNX_SPEED_KHZ_DEFAULT * 1000; alg_data = devm_kzalloc(&pdev->dev, sizeof(*alg_data), GFP_KERNEL); if (!alg_data) return -ENOMEM; platform_set_drvdata(pdev, alg_data); alg_data->adapter.dev.parent = &pdev->dev; alg_data->adapter.algo = &pnx_algorithm; alg_data->adapter.algo_data = alg_data; alg_data->adapter.nr = pdev->id; alg_data->timeout = I2C_PNX_TIMEOUT_DEFAULT; #ifdef CONFIG_OF alg_data->adapter.dev.of_node = of_node_get(pdev->dev.of_node); if (pdev->dev.of_node) { of_property_read_u32(pdev->dev.of_node, "clock-frequency", &speed); /* * At this point, it is planned to add an OF timeout property. * As soon as there is a consensus about how to call and handle * this, sth. like the following can be put here: * * of_property_read_u32(pdev->dev.of_node, "timeout", * &alg_data->timeout); */ } #endif alg_data->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(alg_data->clk)) return PTR_ERR(alg_data->clk); init_timer(&alg_data->mif.timer); alg_data->mif.timer.function = i2c_pnx_timeout; alg_data->mif.timer.data = (unsigned long)alg_data; snprintf(alg_data->adapter.name, sizeof(alg_data->adapter.name), "%s", pdev->name); /* Register I/O resource */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); alg_data->ioaddr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(alg_data->ioaddr)) return PTR_ERR(alg_data->ioaddr); ret = clk_enable(alg_data->clk); if (ret) return ret; freq = clk_get_rate(alg_data->clk); /* * Clock Divisor High This value is the number of system clocks * the serial clock (SCL) will be high. * For example, if the system clock period is 50 ns and the maximum * desired serial period is 10000 ns (100 kHz), then CLKHI would be * set to 0.5*(f_sys/f_i2c)-2=0.5*(20e6/100e3)-2=98. The actual value * programmed into CLKHI will vary from this slightly due to * variations in the output pad's rise and fall times as well as * the deglitching filter length. */ tmp = (freq / speed) / 2 - 2; if (tmp > 0x3FF) tmp = 0x3FF; iowrite32(tmp, I2C_REG_CKH(alg_data)); iowrite32(tmp, I2C_REG_CKL(alg_data)); iowrite32(mcntrl_reset, I2C_REG_CTL(alg_data)); if (wait_reset(alg_data)) { ret = -ENODEV; goto out_clock; } init_completion(&alg_data->mif.complete); alg_data->irq = platform_get_irq(pdev, 0); if (alg_data->irq < 0) { dev_err(&pdev->dev, "Failed to get IRQ from platform resource\n"); ret = alg_data->irq; goto out_clock; } ret = devm_request_irq(&pdev->dev, alg_data->irq, i2c_pnx_interrupt, 0, pdev->name, alg_data); if (ret) goto out_clock; /* Register this adapter with the I2C subsystem */ ret = i2c_add_numbered_adapter(&alg_data->adapter); if (ret < 0) { dev_err(&pdev->dev, "I2C: Failed to add bus\n"); goto out_clock; } dev_dbg(&pdev->dev, "%s: Master at %#8x, irq %d.\n", alg_data->adapter.name, res->start, alg_data->irq); return 0; out_clock: clk_disable(alg_data->clk); return ret; } static int i2c_pnx_remove(struct platform_device *pdev) { struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev); i2c_del_adapter(&alg_data->adapter); clk_disable(alg_data->clk); return 0; } #ifdef CONFIG_OF static const struct of_device_id i2c_pnx_of_match[] = { { .compatible = "nxp,pnx-i2c" }, { }, }; MODULE_DEVICE_TABLE(of, i2c_pnx_of_match); #endif static struct platform_driver i2c_pnx_driver = { .driver = { .name = "pnx-i2c", .owner = THIS_MODULE, .of_match_table = of_match_ptr(i2c_pnx_of_match), .pm = PNX_I2C_PM, }, .probe = i2c_pnx_probe, .remove = i2c_pnx_remove, }; static int __init i2c_adap_pnx_init(void) { return platform_driver_register(&i2c_pnx_driver); } static void __exit i2c_adap_pnx_exit(void) { platform_driver_unregister(&i2c_pnx_driver); } MODULE_AUTHOR("Vitaly Wool, Dennis Kovalev <source@mvista.com>"); MODULE_DESCRIPTION("I2C driver for Philips IP3204-based I2C busses"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pnx-i2c"); /* We need to make sure I2C is initialized before USB */ subsys_initcall(i2c_adap_pnx_init); module_exit(i2c_adap_pnx_exit);
gpl-2.0
warped-rudi/linux-linaro-stable-mx6
drivers/input/keyboard/pxa930_rotary.c
766
4619
/* * Driver for the enhanced rotary controller on pxa930 and pxa935 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/platform_data/keyboard-pxa930_rotary.h> #define SBCR (0x04) #define ERCR (0x0c) #define SBCR_ERSB (1 << 5) struct pxa930_rotary { struct input_dev *input_dev; void __iomem *mmio_base; int last_ercr; struct pxa930_rotary_platform_data *pdata; }; static void clear_sbcr(struct pxa930_rotary *r) { uint32_t sbcr = __raw_readl(r->mmio_base + SBCR); __raw_writel(sbcr | SBCR_ERSB, r->mmio_base + SBCR); __raw_writel(sbcr & ~SBCR_ERSB, r->mmio_base + SBCR); } static irqreturn_t rotary_irq(int irq, void *dev_id) { struct pxa930_rotary *r = dev_id; struct pxa930_rotary_platform_data *pdata = r->pdata; int ercr, delta, key; ercr = __raw_readl(r->mmio_base + ERCR) & 0xf; clear_sbcr(r); delta = ercr - r->last_ercr; if (delta == 0) return IRQ_HANDLED; r->last_ercr = ercr; if (pdata->up_key && pdata->down_key) { key = (delta > 0) ? pdata->up_key : pdata->down_key; input_report_key(r->input_dev, key, 1); input_sync(r->input_dev); input_report_key(r->input_dev, key, 0); } else input_report_rel(r->input_dev, pdata->rel_code, delta); input_sync(r->input_dev); return IRQ_HANDLED; } static int pxa930_rotary_open(struct input_dev *dev) { struct pxa930_rotary *r = input_get_drvdata(dev); clear_sbcr(r); return 0; } static void pxa930_rotary_close(struct input_dev *dev) { struct pxa930_rotary *r = input_get_drvdata(dev); clear_sbcr(r); } static int pxa930_rotary_probe(struct platform_device *pdev) { struct pxa930_rotary_platform_data *pdata = dev_get_platdata(&pdev->dev); struct pxa930_rotary *r; struct input_dev *input_dev; struct resource *res; int irq; int err; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no irq for rotary controller\n"); return -ENXIO; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "no I/O memory defined\n"); return -ENXIO; } if (!pdata) { dev_err(&pdev->dev, "no platform data defined\n"); return -EINVAL; } r = kzalloc(sizeof(struct pxa930_rotary), GFP_KERNEL); if (!r) return -ENOMEM; r->mmio_base = ioremap_nocache(res->start, resource_size(res)); if (r->mmio_base == NULL) { dev_err(&pdev->dev, "failed to remap IO memory\n"); err = -ENXIO; goto failed_free; } r->pdata = pdata; platform_set_drvdata(pdev, r); /* allocate and register the input device */ input_dev = input_allocate_device(); if (!input_dev) { dev_err(&pdev->dev, "failed to allocate input device\n"); err = -ENOMEM; goto failed_free_io; } input_dev->name = pdev->name; input_dev->id.bustype = BUS_HOST; input_dev->open = pxa930_rotary_open; input_dev->close = pxa930_rotary_close; input_dev->dev.parent = &pdev->dev; if (pdata->up_key && pdata->down_key) { __set_bit(pdata->up_key, input_dev->keybit); __set_bit(pdata->down_key, input_dev->keybit); __set_bit(EV_KEY, input_dev->evbit); } else { __set_bit(pdata->rel_code, input_dev->relbit); __set_bit(EV_REL, input_dev->evbit); } r->input_dev = input_dev; input_set_drvdata(input_dev, r); err = request_irq(irq, rotary_irq, 0, "enhanced rotary", r); if (err) { dev_err(&pdev->dev, "failed to request IRQ\n"); goto failed_free_input; } err = input_register_device(input_dev); if (err) { dev_err(&pdev->dev, "failed to register input device\n"); goto failed_free_irq; } return 0; failed_free_irq: free_irq(irq, r); failed_free_input: input_free_device(input_dev); failed_free_io: iounmap(r->mmio_base); failed_free: kfree(r); return err; } static int pxa930_rotary_remove(struct platform_device *pdev) { struct pxa930_rotary *r = platform_get_drvdata(pdev); free_irq(platform_get_irq(pdev, 0), r); input_unregister_device(r->input_dev); iounmap(r->mmio_base); kfree(r); return 0; } static struct platform_driver pxa930_rotary_driver = { .driver = { .name = "pxa930-rotary", .owner = THIS_MODULE, }, .probe = pxa930_rotary_probe, .remove = pxa930_rotary_remove, }; module_platform_driver(pxa930_rotary_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Driver for PXA93x Enhanced Rotary Controller"); MODULE_AUTHOR("Yao Yong <yaoyong@marvell.com>");
gpl-2.0