repo_name
string
path
string
copies
string
size
string
content
string
license
string
MasterAwesome/android_kernel_oneplus_msm8974
arch/arm/mm/nommu.c
3929
2573
/* * linux/arch/arm/mm/nommu.c * * ARM uCLinux supporting functions. */ #include <linux/module.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/io.h> #include <linux/memblock.h> #include <asm/cacheflush.h> #include <asm/sections.h> #include <asm/page.h> #include <asm/setup.h> #include <asm/traps.h> #include <asm/mach/arch.h> #include "mm.h" void __init arm_mm_memblock_reserve(void) { /* * Register the exception vector page. * some architectures which the DRAM is the exception vector to trap, * alloc_page breaks with error, although it is not NULL, but "0." */ memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); } void __init sanity_check_meminfo(void) { phys_addr_t end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]); high_memory = __va(end - 1) + 1; } /* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. */ void __init paging_init(struct machine_desc *mdesc) { early_trap_init((void *)CONFIG_VECTORS_BASE); bootmem_init(); } /* * We don't need to do anything here for nommu machines. */ void setup_mm_for_reboot(void) { } void flush_dcache_page(struct page *page) { __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); } EXPORT_SYMBOL(flush_dcache_page); void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *dst, const void *src, unsigned long len) { memcpy(dst, src, len); if (vma->vm_flags & VM_EXEC) __cpuc_coherent_user_range(uaddr, uaddr + len); } void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, unsigned int mtype) { if (pfn >= (0x100000000ULL >> PAGE_SHIFT)) return NULL; return (void __iomem *) (offset + (pfn << PAGE_SHIFT)); } EXPORT_SYMBOL(__arm_ioremap_pfn); void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, size_t size, unsigned int mtype, void *caller) { return __arm_ioremap_pfn(pfn, offset, size, mtype); } void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) { return (void __iomem *)phys_addr; } EXPORT_SYMBOL(__arm_ioremap); void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, unsigned int, void *); void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, unsigned int mtype, void *caller) { return __arm_ioremap(phys_addr, size, mtype); } void (*arch_iounmap)(volatile void __iomem *); void __arm_iounmap(volatile void __iomem *addr) { } EXPORT_SYMBOL(__arm_iounmap);
gpl-2.0
ProtouProject/android_kernel_htc_protou
arch/x86/pci/numaq_32.c
4953
3727
/* * numaq_32.c - Low-level PCI access for NUMA-Q machines */ #include <linux/pci.h> #include <linux/init.h> #include <linux/nodemask.h> #include <asm/apic.h> #include <asm/mpspec.h> #include <asm/pci_x86.h> #include <asm/numaq.h> #define BUS2QUAD(global) (mp_bus_id_to_node[global]) #define BUS2LOCAL(global) (mp_bus_id_to_local[global]) #define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local]) #define PCI_CONF1_MQ_ADDRESS(bus, devfn, reg) \ (0x80000000 | (BUS2LOCAL(bus) << 16) | (devfn << 8) | (reg & ~3)) static void write_cf8(unsigned bus, unsigned devfn, unsigned reg) { unsigned val = PCI_CONF1_MQ_ADDRESS(bus, devfn, reg); if (xquad_portio) writel(val, XQUAD_PORT_ADDR(0xcf8, BUS2QUAD(bus))); else outl(val, 0xCF8); } static int pci_conf1_mq_read(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 *value) { unsigned long flags; void *adr __iomem = XQUAD_PORT_ADDR(0xcfc, BUS2QUAD(bus)); WARN_ON(seg); if (!value || (bus >= MAX_MP_BUSSES) || (devfn > 255) || (reg > 255)) return -EINVAL; raw_spin_lock_irqsave(&pci_config_lock, flags); write_cf8(bus, devfn, reg); switch (len) { case 1: if (xquad_portio) *value = readb(adr + (reg & 3)); else *value = inb(0xCFC + (reg & 3)); break; case 2: if (xquad_portio) *value = readw(adr + (reg & 2)); else *value = inw(0xCFC + (reg & 2)); break; case 4: if (xquad_portio) *value = readl(adr); else *value = inl(0xCFC); break; } raw_spin_unlock_irqrestore(&pci_config_lock, flags); return 0; } static int pci_conf1_mq_write(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 value) { unsigned long flags; void *adr __iomem = XQUAD_PORT_ADDR(0xcfc, BUS2QUAD(bus)); WARN_ON(seg); if ((bus >= MAX_MP_BUSSES) || (devfn > 255) || (reg > 255)) return -EINVAL; raw_spin_lock_irqsave(&pci_config_lock, flags); write_cf8(bus, devfn, reg); switch (len) { case 1: if (xquad_portio) writeb(value, adr + (reg & 3)); else outb((u8)value, 0xCFC + (reg & 3)); break; case 2: if (xquad_portio) writew(value, adr + (reg & 2)); else outw((u16)value, 0xCFC + (reg & 2)); break; case 4: if (xquad_portio) writel(value, adr + reg); else outl((u32)value, 0xCFC); break; } raw_spin_unlock_irqrestore(&pci_config_lock, flags); return 0; } #undef PCI_CONF1_MQ_ADDRESS static const struct pci_raw_ops pci_direct_conf1_mq = { .read = pci_conf1_mq_read, .write = pci_conf1_mq_write }; static void __devinit pci_fixup_i450nx(struct pci_dev *d) { /* * i450NX -- Find and scan all secondary buses on all PXB's. */ int pxb, reg; u8 busno, suba, subb; int quad = BUS2QUAD(d->bus->number); dev_info(&d->dev, "searching for i450NX host bridges\n"); reg = 0xd0; for(pxb=0; pxb<2; pxb++) { pci_read_config_byte(d, reg++, &busno); pci_read_config_byte(d, reg++, &suba); pci_read_config_byte(d, reg++, &subb); dev_dbg(&d->dev, "i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno, suba, subb); if (busno) { /* Bus A */ pci_scan_bus_with_sysdata(QUADLOCAL2BUS(quad, busno)); } if (suba < subb) { /* Bus B */ pci_scan_bus_with_sysdata(QUADLOCAL2BUS(quad, suba+1)); } } pcibios_last_bus = -1; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, pci_fixup_i450nx); int __init pci_numaq_init(void) { int quad; raw_pci_ops = &pci_direct_conf1_mq; pci_root_bus = pcibios_scan_root(0); if (num_online_nodes() > 1) for_each_online_node(quad) { if (quad == 0) continue; printk("Scanning PCI bus %d for quad %d\n", QUADLOCAL2BUS(quad,0), quad); pci_scan_bus_with_sysdata(QUADLOCAL2BUS(quad, 0)); } return 0; }
gpl-2.0
miuihu/android_kernel_xiaomi_armor
arch/arm/mach-orion5x/d2net-setup.c
4953
9476
/* * arch/arm/mach-orion5x/d2net-setup.c * * LaCie d2Network and Big Disk Network NAS setup * * Copyright (C) 2009 Simon Guinot <sguinot@lacie.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <linux/irq.h> #include <linux/mtd/physmap.h> #include <linux/mv643xx_eth.h> #include <linux/leds.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <linux/i2c.h> #include <linux/ata_platform.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/pci.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" /***************************************************************************** * LaCie d2 Network Info ****************************************************************************/ /* * 512KB NOR flash Device bus boot chip select */ #define D2NET_NOR_BOOT_BASE 0xfff80000 #define D2NET_NOR_BOOT_SIZE SZ_512K /***************************************************************************** * 512KB NOR Flash on Boot Device ****************************************************************************/ /* * TODO: Check write support on flash MX29LV400CBTC-70G */ static struct mtd_partition d2net_partitions[] = { { .name = "Full512kb", .size = MTDPART_SIZ_FULL, .offset = 0, .mask_flags = MTD_WRITEABLE, }, }; static struct physmap_flash_data d2net_nor_flash_data = { .width = 1, .parts = d2net_partitions, .nr_parts = ARRAY_SIZE(d2net_partitions), }; static struct resource d2net_nor_flash_resource = { .flags = IORESOURCE_MEM, .start = D2NET_NOR_BOOT_BASE, .end = D2NET_NOR_BOOT_BASE + D2NET_NOR_BOOT_SIZE - 1, }; static struct platform_device d2net_nor_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &d2net_nor_flash_data, }, .num_resources = 1, .resource = &d2net_nor_flash_resource, }; /***************************************************************************** * Ethernet ****************************************************************************/ static struct mv643xx_eth_platform_data d2net_eth_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; /***************************************************************************** * I2C devices ****************************************************************************/ /* * i2c addr | chip | description * 0x32 | Ricoh 5C372b | RTC * 0x3e | GMT G762 | PWM fan controller * 0x50 | HT24LC08 | eeprom (1kB) * * TODO: Add G762 support to the g760a driver. */ static struct i2c_board_info __initdata d2net_i2c_devices[] = { { I2C_BOARD_INFO("rs5c372b", 0x32), }, { I2C_BOARD_INFO("24c08", 0x50), }, }; /***************************************************************************** * SATA ****************************************************************************/ static struct mv_sata_platform_data d2net_sata_data = { .n_ports = 2, }; #define D2NET_GPIO_SATA0_POWER 3 #define D2NET_GPIO_SATA1_POWER 12 static void __init d2net_sata_power_init(void) { int err; err = gpio_request(D2NET_GPIO_SATA0_POWER, "SATA0 power"); if (err == 0) { err = gpio_direction_output(D2NET_GPIO_SATA0_POWER, 1); if (err) gpio_free(D2NET_GPIO_SATA0_POWER); } if (err) pr_err("d2net: failed to configure SATA0 power GPIO\n"); err = gpio_request(D2NET_GPIO_SATA1_POWER, "SATA1 power"); if (err == 0) { err = gpio_direction_output(D2NET_GPIO_SATA1_POWER, 1); if (err) gpio_free(D2NET_GPIO_SATA1_POWER); } if (err) pr_err("d2net: failed to configure SATA1 power GPIO\n"); } /***************************************************************************** * GPIO LED's ****************************************************************************/ /* * The blue front LED is wired to the CPLD and can blink in relation with the * SATA activity. * * The following array detail the different LED registers and the combination * of their possible values: * * led_off | blink_ctrl | SATA active | LED state * | | | * 1 | x | x | off * 0 | 0 | 0 | off * 0 | 1 | 0 | blink (rate 300ms) * 0 | x | 1 | on * * Notes: The blue and the red front LED's can't be on at the same time. * Red LED have priority. */ #define D2NET_GPIO_RED_LED 6 #define D2NET_GPIO_BLUE_LED_BLINK_CTRL 16 #define D2NET_GPIO_BLUE_LED_OFF 23 static struct gpio_led d2net_leds[] = { { .name = "d2net:blue:sata", .default_trigger = "default-on", .gpio = D2NET_GPIO_BLUE_LED_OFF, .active_low = 1, }, { .name = "d2net:red:fail", .gpio = D2NET_GPIO_RED_LED, }, }; static struct gpio_led_platform_data d2net_led_data = { .num_leds = ARRAY_SIZE(d2net_leds), .leds = d2net_leds, }; static struct platform_device d2net_gpio_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &d2net_led_data, }, }; static void __init d2net_gpio_leds_init(void) { int err; /* Configure GPIO over MPP max number. */ orion_gpio_set_valid(D2NET_GPIO_BLUE_LED_OFF, 1); /* Configure register blink_ctrl to allow SATA activity LED blinking. */ err = gpio_request(D2NET_GPIO_BLUE_LED_BLINK_CTRL, "blue LED blink"); if (err == 0) { err = gpio_direction_output(D2NET_GPIO_BLUE_LED_BLINK_CTRL, 1); if (err) gpio_free(D2NET_GPIO_BLUE_LED_BLINK_CTRL); } if (err) pr_err("d2net: failed to configure blue LED blink GPIO\n"); platform_device_register(&d2net_gpio_leds); } /**************************************************************************** * GPIO keys ****************************************************************************/ #define D2NET_GPIO_PUSH_BUTTON 18 #define D2NET_GPIO_POWER_SWITCH_ON 8 #define D2NET_GPIO_POWER_SWITCH_OFF 9 #define D2NET_SWITCH_POWER_ON 0x1 #define D2NET_SWITCH_POWER_OFF 0x2 static struct gpio_keys_button d2net_buttons[] = { { .type = EV_SW, .code = D2NET_SWITCH_POWER_OFF, .gpio = D2NET_GPIO_POWER_SWITCH_OFF, .desc = "Power rocker switch (auto|off)", .active_low = 0, }, { .type = EV_SW, .code = D2NET_SWITCH_POWER_ON, .gpio = D2NET_GPIO_POWER_SWITCH_ON, .desc = "Power rocker switch (on|auto)", .active_low = 0, }, { .type = EV_KEY, .code = KEY_POWER, .gpio = D2NET_GPIO_PUSH_BUTTON, .desc = "Front Push Button", .active_low = 0, }, }; static struct gpio_keys_platform_data d2net_button_data = { .buttons = d2net_buttons, .nbuttons = ARRAY_SIZE(d2net_buttons), }; static struct platform_device d2net_gpio_buttons = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &d2net_button_data, }, }; /***************************************************************************** * General Setup ****************************************************************************/ static unsigned int d2net_mpp_modes[] __initdata = { MPP0_GPIO, /* Board ID (bit 0) */ MPP1_GPIO, /* Board ID (bit 1) */ MPP2_GPIO, /* Board ID (bit 2) */ MPP3_GPIO, /* SATA 0 power */ MPP4_UNUSED, MPP5_GPIO, /* Fan fail detection */ MPP6_GPIO, /* Red front LED */ MPP7_UNUSED, MPP8_GPIO, /* Rear power switch (on|auto) */ MPP9_GPIO, /* Rear power switch (auto|off) */ MPP10_UNUSED, MPP11_UNUSED, MPP12_GPIO, /* SATA 1 power */ MPP13_UNUSED, MPP14_SATA_LED, /* SATA 0 active */ MPP15_SATA_LED, /* SATA 1 active */ MPP16_GPIO, /* Blue front LED blink control */ MPP17_UNUSED, MPP18_GPIO, /* Front button (0 = Released, 1 = Pushed ) */ MPP19_UNUSED, 0, /* 22: USB port 1 fuse (0 = Fail, 1 = Ok) */ /* 23: Blue front LED off */ /* 24: Inhibit board power off (0 = Disabled, 1 = Enabled) */ }; #define D2NET_GPIO_INHIBIT_POWER_OFF 24 static void __init d2net_init(void) { /* * Setup basic Orion functions. Need to be called early. */ orion5x_init(); orion5x_mpp_conf(d2net_mpp_modes); /* * Configure peripherals. */ orion5x_ehci0_init(); orion5x_eth_init(&d2net_eth_data); orion5x_i2c_init(); orion5x_uart0_init(); d2net_sata_power_init(); orion5x_sata_init(&d2net_sata_data); orion5x_setup_dev_boot_win(D2NET_NOR_BOOT_BASE, D2NET_NOR_BOOT_SIZE); platform_device_register(&d2net_nor_flash); platform_device_register(&d2net_gpio_buttons); d2net_gpio_leds_init(); pr_notice("d2net: Flash write are not yet supported.\n"); i2c_register_board_info(0, d2net_i2c_devices, ARRAY_SIZE(d2net_i2c_devices)); orion_gpio_set_valid(D2NET_GPIO_INHIBIT_POWER_OFF, 1); } /* Warning: LaCie use a wrong mach-type (0x20e=526) in their bootloader. */ #ifdef CONFIG_MACH_D2NET MACHINE_START(D2NET, "LaCie d2 Network") .atag_offset = 0x100, .init_machine = d2net_init, .map_io = orion5x_map_io, .init_early = orion5x_init_early, .init_irq = orion5x_init_irq, .timer = &orion5x_timer, .fixup = tag_fixup_mem32, .restart = orion5x_restart, MACHINE_END #endif #ifdef CONFIG_MACH_BIGDISK MACHINE_START(BIGDISK, "LaCie Big Disk Network") .atag_offset = 0x100, .init_machine = d2net_init, .map_io = orion5x_map_io, .init_early = orion5x_init_early, .init_irq = orion5x_init_irq, .timer = &orion5x_timer, .fixup = tag_fixup_mem32, .restart = orion5x_restart, MACHINE_END #endif
gpl-2.0
ion-storm/Unleashed-N4
drivers/staging/comedi/drivers/dyna_pci10xx.c
5465
12410
/* * comedi/drivers/dyna_pci10xx.c * Copyright (C) 2011 Prashant Shah, pshah.mumbai@gmail.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: dyna_pci10xx Devices: Dynalog India PCI DAQ Cards, http://www.dynalogindia.com/ Author: Prashant Shah <pshah.mumbai@gmail.com> Developed at Automation Labs, Chemical Dept., IIT Bombay, India. Prof. Kannan Moudgalya <kannan@iitb.ac.in> http://www.iitb.ac.in Status: Stable Version: 1.0 Device Supported : - Dynalog PCI 1050 Notes : - Dynalog India Pvt. Ltd. does not have a registered PCI Vendor ID and they are using the PLX Technlogies Vendor ID since that is the PCI Chip used in the card. - Dynalog India Pvt. Ltd. has provided the internal register specification for their cards in their manuals. */ #include "../comedidev.h" #include "comedi_pci.h" #include <linux/mutex.h> #define PCI_VENDOR_ID_DYNALOG 0x10b5 #define DRV_NAME "dyna_pci10xx" #define READ_TIMEOUT 50 static DEFINE_MUTEX(start_stop_sem); static DEFINE_PCI_DEVICE_TABLE(dyna_pci10xx_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_DYNALOG, 0x1050) }, { 0 } }; MODULE_DEVICE_TABLE(pci, dyna_pci10xx_pci_table); static int dyna_pci10xx_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int dyna_pci10xx_detach(struct comedi_device *dev); static const struct comedi_lrange range_pci1050_ai = { 3, { BIP_RANGE(10), BIP_RANGE(5), UNI_RANGE(10) } }; static const char range_codes_pci1050_ai[] = { 0x00, 0x10, 0x30 }; static const struct comedi_lrange range_pci1050_ao = { 1, { UNI_RANGE(10) } }; static const char range_codes_pci1050_ao[] = { 0x00 }; struct boardtype { const char *name; int device_id; int ai_chans; int ai_bits; int ao_chans; int ao_bits; int di_chans; int di_bits; int do_chans; int do_bits; const struct comedi_lrange *range_ai; const char *range_codes_ai; const struct comedi_lrange *range_ao; const char *range_codes_ao; }; static const struct boardtype boardtypes[] = { { .name = "dyna_pci1050", .device_id = 0x1050, .ai_chans = 16, .ai_bits = 12, .ao_chans = 16, .ao_bits = 12, .di_chans = 16, .di_bits = 16, .do_chans = 16, .do_bits = 16, .range_ai = &range_pci1050_ai, .range_codes_ai = range_codes_pci1050_ai, .range_ao = &range_pci1050_ao, .range_codes_ao = range_codes_pci1050_ao, }, /* dummy entry corresponding to driver name */ {.name = DRV_NAME}, }; static struct comedi_driver driver_dyna_pci10xx = { .driver_name = DRV_NAME, .module = THIS_MODULE, .attach = dyna_pci10xx_attach, .detach = dyna_pci10xx_detach, .board_name = &boardtypes[0].name, .offset = sizeof(struct boardtype), .num_names = ARRAY_SIZE(boardtypes), }; struct dyna_pci10xx_private { struct pci_dev *pci_dev; /* ptr to PCI device */ char valid; /* card is usable */ struct mutex mutex; /* device base address registers */ unsigned long BADR0, BADR1, BADR2, BADR3, BADR4, BADR5; }; #define thisboard ((const struct boardtype *)dev->board_ptr) #define devpriv ((struct dyna_pci10xx_private *)dev->private) /******************************************************************************/ /************************** READ WRITE FUNCTIONS ******************************/ /******************************************************************************/ /* analog input callback */ static int dyna_pci10xx_insn_read_ai(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n, counter; u16 d = 0; unsigned int chan, range; /* get the channel number and range */ chan = CR_CHAN(insn->chanspec); range = thisboard->range_codes_ai[CR_RANGE((insn->chanspec))]; mutex_lock(&devpriv->mutex); /* convert n samples */ for (n = 0; n < insn->n; n++) { /* trigger conversion */ smp_mb(); outw_p(0x0000 + range + chan, devpriv->BADR2 + 2); udelay(10); /* read data */ for (counter = 0; counter < READ_TIMEOUT; counter++) { d = inw_p(devpriv->BADR2); /* check if read is successfull if the EOC bit is set */ if (d & (1 << 15)) goto conv_finish; } data[n] = 0; printk(KERN_DEBUG "comedi: dyna_pci10xx: " "timeout reading analog input\n"); continue; conv_finish: /* mask the first 4 bits - EOC bits */ d &= 0x0FFF; data[n] = d; } mutex_unlock(&devpriv->mutex); /* return the number of samples read/written */ return n; } /* analog output callback */ static int dyna_pci10xx_insn_write_ao(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n; unsigned int chan, range; chan = CR_CHAN(insn->chanspec); range = thisboard->range_codes_ai[CR_RANGE((insn->chanspec))]; mutex_lock(&devpriv->mutex); for (n = 0; n < insn->n; n++) { smp_mb(); /* trigger conversion and write data */ outw_p(data[n], devpriv->BADR2); udelay(10); } mutex_unlock(&devpriv->mutex); return n; } /* digital input bit interface */ static int dyna_pci10xx_di_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { u16 d = 0; if (insn->n != 2) return -EINVAL; mutex_lock(&devpriv->mutex); smp_mb(); d = inw_p(devpriv->BADR3); udelay(10); /* on return the data[0] contains output and data[1] contains input */ data[1] = d; data[0] = s->state; mutex_unlock(&devpriv->mutex); return 2; } /* digital output bit interface */ static int dyna_pci10xx_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (insn->n != 2) return -EINVAL; /* The insn data is a mask in data[0] and the new data * in data[1], each channel cooresponding to a bit. * s->state contains the previous write data */ mutex_lock(&devpriv->mutex); if (data[0]) { s->state &= ~data[0]; s->state |= (data[0] & data[1]); smp_mb(); outw_p(s->state, devpriv->BADR3); udelay(10); } /* * On return, data[1] contains the value of the digital * input and output lines. We just return the software copy of the * output values if it was a purely digital output subdevice. */ data[1] = s->state; mutex_unlock(&devpriv->mutex); return 2; } /******************************************************************************/ /*********************** INITIALIZATION FUNCTIONS *****************************/ /******************************************************************************/ static int dyna_pci10xx_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; struct pci_dev *pcidev; unsigned int opt_bus, opt_slot; int board_index, i; mutex_lock(&start_stop_sem); if (alloc_private(dev, sizeof(struct dyna_pci10xx_private)) < 0) { printk(KERN_ERR "comedi: dyna_pci10xx: " "failed to allocate memory!\n"); mutex_unlock(&start_stop_sem); return -ENOMEM; } opt_bus = it->options[0]; opt_slot = it->options[1]; dev->board_name = thisboard->name; dev->irq = 0; /* * Probe the PCI bus and located the matching device */ for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL); pcidev != NULL; pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) { board_index = -1; for (i = 0; i < ARRAY_SIZE(boardtypes); ++i) { if ((pcidev->vendor == PCI_VENDOR_ID_DYNALOG) && (pcidev->device == boardtypes[i].device_id)) { board_index = i; break; } } if (board_index < 0) continue; /* Found matching vendor/device. */ if (opt_bus || opt_slot) { /* Check bus/slot. */ if (opt_bus != pcidev->bus->number || opt_slot != PCI_SLOT(pcidev->devfn)) continue; /* no match */ } goto found; } printk(KERN_ERR "comedi: dyna_pci10xx: no supported device found!\n"); mutex_unlock(&start_stop_sem); return -EIO; found: if (!pcidev) { if (opt_bus || opt_slot) { printk(KERN_ERR "comedi: dyna_pci10xx: " "invalid PCI device at b:s %d:%d\n", opt_bus, opt_slot); } else { printk(KERN_ERR "comedi: dyna_pci10xx: " "invalid PCI device\n"); } mutex_unlock(&start_stop_sem); return -EIO; } if (comedi_pci_enable(pcidev, DRV_NAME)) { printk(KERN_ERR "comedi: dyna_pci10xx: " "failed to enable PCI device and request regions!"); mutex_unlock(&start_stop_sem); return -EIO; } mutex_init(&devpriv->mutex); dev->board_ptr = &boardtypes[board_index]; devpriv->pci_dev = pcidev; printk(KERN_INFO "comedi: dyna_pci10xx: device found!\n"); /* initialize device base address registers */ devpriv->BADR0 = pci_resource_start(pcidev, 0); devpriv->BADR1 = pci_resource_start(pcidev, 1); devpriv->BADR2 = pci_resource_start(pcidev, 2); devpriv->BADR3 = pci_resource_start(pcidev, 3); devpriv->BADR4 = pci_resource_start(pcidev, 4); devpriv->BADR5 = pci_resource_start(pcidev, 5); if (alloc_subdevices(dev, 4) < 0) { printk(KERN_ERR "comedi: dyna_pci10xx: " "failed allocating subdevices\n"); mutex_unlock(&start_stop_sem); return -ENOMEM; } /* analog input */ s = dev->subdevices + 0; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF; s->n_chan = thisboard->ai_chans; s->maxdata = 0x0FFF; s->range_table = thisboard->range_ai; s->len_chanlist = 16; s->insn_read = dyna_pci10xx_insn_read_ai; /* analog output */ s = dev->subdevices + 1; s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE; s->n_chan = thisboard->ao_chans; s->maxdata = 0x0FFF; s->range_table = thisboard->range_ao; s->len_chanlist = 16; s->insn_write = dyna_pci10xx_insn_write_ao; /* digital input */ s = dev->subdevices + 2; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE | SDF_GROUND; s->n_chan = thisboard->di_chans; s->maxdata = 1; s->range_table = &range_digital; s->len_chanlist = thisboard->di_chans; s->insn_bits = dyna_pci10xx_di_insn_bits; /* digital output */ s = dev->subdevices + 3; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE | SDF_GROUND; s->n_chan = thisboard->do_chans; s->maxdata = 1; s->range_table = &range_digital; s->len_chanlist = thisboard->do_chans; s->state = 0; s->insn_bits = dyna_pci10xx_do_insn_bits; devpriv->valid = 1; mutex_unlock(&start_stop_sem); printk(KERN_INFO "comedi: dyna_pci10xx: %s - device setup completed!\n", boardtypes[board_index].name); return 1; } static int dyna_pci10xx_detach(struct comedi_device *dev) { if (devpriv && devpriv->pci_dev) { comedi_pci_disable(devpriv->pci_dev); mutex_destroy(&devpriv->mutex); } return 0; } static int __devinit driver_dyna_pci10xx_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_dyna_pci10xx.driver_name); } static void __devexit driver_dyna_pci10xx_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_dyna_pci10xx_pci_driver = { .id_table = dyna_pci10xx_pci_table, .probe = &driver_dyna_pci10xx_pci_probe, .remove = __devexit_p(&driver_dyna_pci10xx_pci_remove) }; static int __init driver_dyna_pci10xx_init_module(void) { int retval; retval = comedi_driver_register(&driver_dyna_pci10xx); if (retval < 0) return retval; driver_dyna_pci10xx_pci_driver.name = (char *)driver_dyna_pci10xx.driver_name; return pci_register_driver(&driver_dyna_pci10xx_pci_driver); } static void __exit driver_dyna_pci10xx_cleanup_module(void) { pci_unregister_driver(&driver_dyna_pci10xx_pci_driver); comedi_driver_unregister(&driver_dyna_pci10xx); } module_init(driver_dyna_pci10xx_init_module); module_exit(driver_dyna_pci10xx_cleanup_module); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Prashant Shah <pshah.mumbai@gmail.com>"); MODULE_DESCRIPTION("Comedi based drivers for Dynalog PCI DAQ cards");
gpl-2.0
russelldias98/linux_kernel_3.0.68
arch/cris/arch-v32/kernel/process.c
7513
7026
/* * Copyright (C) 2000-2003 Axis Communications AB * * Authors: Bjorn Wesen (bjornw@axis.com) * Mikael Starvik (starvik@axis.com) * Tobias Anderberg (tobiasa@axis.com), CRISv32 port. * * This file handles the architecture-dependent parts of process handling.. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/fs.h> #include <hwregs/reg_rdwr.h> #include <hwregs/reg_map.h> #include <hwregs/timer_defs.h> #include <hwregs/intr_vect_defs.h> extern void stop_watchdog(void); extern int cris_hlt_counter; /* We use this if we don't have any better idle routine. */ void default_idle(void) { local_irq_disable(); if (!need_resched() && !cris_hlt_counter) { /* Halt until exception. */ __asm__ volatile("ei \n\t" "halt "); } local_irq_enable(); } /* * Free current thread data structures etc.. */ extern void deconfigure_bp(long pid); void exit_thread(void) { deconfigure_bp(current->pid); } /* * If the watchdog is enabled, disable interrupts and enter an infinite loop. * The watchdog will reset the CPU after 0.1s. If the watchdog isn't enabled * then enable it and wait. */ extern void arch_enable_nmi(void); void hard_reset_now(void) { /* * Don't declare this variable elsewhere. We don't want any other * code to know about it than the watchdog handler in entry.S and * this code, implementing hard reset through the watchdog. */ #if defined(CONFIG_ETRAX_WATCHDOG) extern int cause_of_death; #endif printk("*** HARD RESET ***\n"); local_irq_disable(); #if defined(CONFIG_ETRAX_WATCHDOG) cause_of_death = 0xbedead; #else { reg_timer_rw_wd_ctrl wd_ctrl = {0}; stop_watchdog(); wd_ctrl.key = 16; /* Arbitrary key. */ wd_ctrl.cnt = 1; /* Minimum time. */ wd_ctrl.cmd = regk_timer_start; arch_enable_nmi(); REG_WR(timer, regi_timer0, rw_wd_ctrl, wd_ctrl); } #endif while (1) ; /* Wait for reset. */ } /* * Return saved PC of a blocked thread. */ unsigned long thread_saved_pc(struct task_struct *t) { return task_pt_regs(t)->erp; } static void kernel_thread_helper(void* dummy, int (*fn)(void *), void * arg) { fn(arg); do_exit(-1); /* Should never be called, return bad exit value. */ } /* Create a kernel thread. */ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) { struct pt_regs regs; memset(&regs, 0, sizeof(regs)); /* Don't use r10 since that is set to 0 in copy_thread. */ regs.r11 = (unsigned long) fn; regs.r12 = (unsigned long) arg; regs.erp = (unsigned long) kernel_thread_helper; regs.ccs = 1 << (I_CCS_BITNR + CCS_SHIFT); /* Create the new process. */ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); } /* * Setup the child's kernel stack with a pt_regs and call switch_stack() on it. * It will be unnested during _resume and _ret_from_sys_call when the new thread * is scheduled. * * Also setup the thread switching structure which is used to keep * thread-specific data during _resumes. */ extern asmlinkage void ret_from_fork(void); int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { struct pt_regs *childregs; struct switch_stack *swstack; /* * Put the pt_regs structure at the end of the new kernel stack page and * fix it up. Note: the task_struct doubles as the kernel stack for the * task. */ childregs = task_pt_regs(p); *childregs = *regs; /* Struct copy of pt_regs. */ p->set_child_tid = p->clear_child_tid = NULL; childregs->r10 = 0; /* Child returns 0 after a fork/clone. */ /* Set a new TLS ? * The TLS is in $mof because it is the 5th argument to sys_clone. */ if (p->mm && (clone_flags & CLONE_SETTLS)) { task_thread_info(p)->tls = regs->mof; } /* Put the switch stack right below the pt_regs. */ swstack = ((struct switch_stack *) childregs) - 1; /* Parameter to ret_from_sys_call. 0 is don't restart the syscall. */ swstack->r9 = 0; /* * We want to return into ret_from_sys_call after the _resume. * ret_from_fork will call ret_from_sys_call. */ swstack->return_ip = (unsigned long) ret_from_fork; /* Fix the user-mode and kernel-mode stackpointer. */ p->thread.usp = usp; p->thread.ksp = (unsigned long) swstack; return 0; } /* * Be aware of the "magic" 7th argument in the four system-calls below. * They need the latest stackframe, which is put as the 7th argument by * entry.S. The previous arguments are dummies or actually used, but need * to be defined to reach the 7th argument. * * N.B.: Another method to get the stackframe is to use current_regs(). But * it returns the latest stack-frame stacked when going from _user mode_ and * some of these (at least sys_clone) are called from kernel-mode sometimes * (for example during kernel_thread, above) and thus cannot use it. Thus, * to be sure not to get any surprises, we use the method for the other calls * as well. */ asmlinkage int sys_fork(long r10, long r11, long r12, long r13, long mof, long srp, struct pt_regs *regs) { return do_fork(SIGCHLD, rdusp(), regs, 0, NULL, NULL); } /* FIXME: Is parent_tid/child_tid really third/fourth argument? Update lib? */ asmlinkage int sys_clone(unsigned long newusp, unsigned long flags, int *parent_tid, int *child_tid, unsigned long tls, long srp, struct pt_regs *regs) { if (!newusp) newusp = rdusp(); return do_fork(flags, newusp, regs, 0, parent_tid, child_tid); } /* * vfork is a system call in i386 because of register-pressure - maybe * we can remove it and handle it in libc but we put it here until then. */ asmlinkage int sys_vfork(long r10, long r11, long r12, long r13, long mof, long srp, struct pt_regs *regs) { return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL, NULL); } /* sys_execve() executes a new program. */ asmlinkage int sys_execve(const char *fname, const char *const *argv, const char *const *envp, long r13, long mof, long srp, struct pt_regs *regs) { int error; char *filename; filename = getname(fname); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; error = do_execve(filename, argv, envp, regs); putname(filename); out: return error; } unsigned long get_wchan(struct task_struct *p) { /* TODO */ return 0; } #undef last_sched #undef first_sched void show_regs(struct pt_regs * regs) { unsigned long usp = rdusp(); printk("ERP: %08lx SRP: %08lx CCS: %08lx USP: %08lx MOF: %08lx\n", regs->erp, regs->srp, regs->ccs, usp, regs->mof); printk(" r0: %08lx r1: %08lx r2: %08lx r3: %08lx\n", regs->r0, regs->r1, regs->r2, regs->r3); printk(" r4: %08lx r5: %08lx r6: %08lx r7: %08lx\n", regs->r4, regs->r5, regs->r6, regs->r7); printk(" r8: %08lx r9: %08lx r10: %08lx r11: %08lx\n", regs->r8, regs->r9, regs->r10, regs->r11); printk("r12: %08lx r13: %08lx oR10: %08lx\n", regs->r12, regs->r13, regs->orig_r10); }
gpl-2.0
jorapi/android_kernel_lge_g3
arch/mips/sgi-ip22/ip22-setup.c
9049
2178
/* * ip22-setup.c: SGI specific setup, including init of the feature struct. * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * Copyright (C) 1997, 1998 Ralf Baechle (ralf@gnu.org) */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/kdev_t.h> #include <linux/types.h> #include <linux/module.h> #include <linux/console.h> #include <linux/sched.h> #include <linux/tty.h> #include <asm/addrspace.h> #include <asm/bcache.h> #include <asm/bootinfo.h> #include <asm/irq.h> #include <asm/reboot.h> #include <asm/time.h> #include <asm/io.h> #include <asm/traps.h> #include <asm/sgialib.h> #include <asm/sgi/mc.h> #include <asm/sgi/hpc3.h> #include <asm/sgi/ip22.h> extern void ip22_be_init(void) __init; void __init plat_mem_setup(void) { char *ctype; char *cserial; board_be_init = ip22_be_init; /* Init the INDY HPC I/O controller. Need to call this before * fucking with the memory controller because it needs to know the * boardID and whether this is a Guiness or a FullHouse machine. */ sgihpc_init(); /* Init INDY memory controller. */ sgimc_init(); #ifdef CONFIG_BOARD_SCACHE /* Now enable boardcaches, if any. */ indy_sc_init(); #endif /* Set EISA IO port base for Indigo2 * ioremap cannot fail */ set_io_port_base((unsigned long)ioremap(0x00080000, 0x1fffffff - 0x00080000)); /* ARCS console environment variable is set to "g?" for * graphics console, it is set to "d" for the first serial * line and "d2" for the second serial line. * * Need to check if the case is 'g' but no keyboard: * (ConsoleIn/Out = serial) */ ctype = ArcGetEnvironmentVariable("console"); cserial = ArcGetEnvironmentVariable("ConsoleOut"); if ((ctype && *ctype == 'd') || (cserial && *cserial == 's')) { static char options[8] __initdata; char *baud = ArcGetEnvironmentVariable("dbaud"); if (baud) strcpy(options, baud); add_preferred_console("ttyS", *(ctype + 1) == '2' ? 1 : 0, baud ? options : NULL); } else if (!ctype || *ctype != 'g') { /* Use ARC if we don't want serial ('d') or graphics ('g'). */ prom_flags |= PROM_FLAG_USE_AS_CONSOLE; add_preferred_console("arc", 0, NULL); } }
gpl-2.0
gengzh0016/kernel_BBxM
drivers/net/ethernet/brocade/bna/bnad_debugfs.c
90
15540
/* * Linux network driver for Brocade Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * Copyright (c) 2005-2011 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com */ #include <linux/debugfs.h> #include <linux/module.h> #include "bnad.h" /* * BNA debufs interface * * To access the interface, debugfs file system should be mounted * if not already mounted using: * mount -t debugfs none /sys/kernel/debug * * BNA Hierarchy: * - bna/pci_dev:<pci_name> * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bna * * Debugging service available per pci_dev: * fwtrc: To collect current firmware trace. * fwsave: To collect last saved fw trace as a result of firmware crash. * regwr: To write one word to chip register * regrd: To read one or more words from chip register. */ struct bnad_debug_info { char *debug_buffer; void *i_private; int buffer_len; }; static int bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file) { struct bnad *bnad = inode->i_private; struct bnad_debug_info *fw_debug; unsigned long flags; int rc; fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); if (!fw_debug) return -ENOMEM; fw_debug->buffer_len = BNA_DBG_FWTRC_LEN; fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL); if (!fw_debug->debug_buffer) { kfree(fw_debug); fw_debug = NULL; pr_warn("bna %s: Failed to allocate fwtrc buffer\n", pci_name(bnad->pcidev)); return -ENOMEM; } spin_lock_irqsave(&bnad->bna_lock, flags); rc = bfa_nw_ioc_debug_fwtrc(&bnad->bna.ioceth.ioc, fw_debug->debug_buffer, &fw_debug->buffer_len); spin_unlock_irqrestore(&bnad->bna_lock, flags); if (rc != BFA_STATUS_OK) { kfree(fw_debug->debug_buffer); fw_debug->debug_buffer = NULL; kfree(fw_debug); fw_debug = NULL; pr_warn("bnad %s: Failed to collect fwtrc\n", pci_name(bnad->pcidev)); return -ENOMEM; } file->private_data = fw_debug; return 0; } static int bnad_debugfs_open_fwsave(struct inode *inode, struct file *file) { struct bnad *bnad = inode->i_private; struct bnad_debug_info *fw_debug; unsigned long flags; int rc; fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); if (!fw_debug) return -ENOMEM; fw_debug->buffer_len = BNA_DBG_FWTRC_LEN; fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL); if (!fw_debug->debug_buffer) { kfree(fw_debug); fw_debug = NULL; pr_warn("bna %s: Failed to allocate fwsave buffer\n", pci_name(bnad->pcidev)); return -ENOMEM; } spin_lock_irqsave(&bnad->bna_lock, flags); rc = bfa_nw_ioc_debug_fwsave(&bnad->bna.ioceth.ioc, fw_debug->debug_buffer, &fw_debug->buffer_len); spin_unlock_irqrestore(&bnad->bna_lock, flags); if (rc != BFA_STATUS_OK && rc != BFA_STATUS_ENOFSAVE) { kfree(fw_debug->debug_buffer); fw_debug->debug_buffer = NULL; kfree(fw_debug); fw_debug = NULL; pr_warn("bna %s: Failed to collect fwsave\n", pci_name(bnad->pcidev)); return -ENOMEM; } file->private_data = fw_debug; return 0; } static int bnad_debugfs_open_reg(struct inode *inode, struct file *file) { struct bnad_debug_info *reg_debug; reg_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); if (!reg_debug) return -ENOMEM; reg_debug->i_private = inode->i_private; file->private_data = reg_debug; return 0; } static int bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len) { struct bnad_drvinfo *drvinfo = (struct bnad_drvinfo *) buffer; struct bnad_iocmd_comp fcomp; unsigned long flags = 0; int ret = BFA_STATUS_FAILED; /* Get IOC info */ spin_lock_irqsave(&bnad->bna_lock, flags); bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, &drvinfo->ioc_attr); spin_unlock_irqrestore(&bnad->bna_lock, flags); /* Retrieve CEE related info */ fcomp.bnad = bnad; fcomp.comp_status = 0; init_completion(&fcomp.comp); spin_lock_irqsave(&bnad->bna_lock, flags); ret = bfa_nw_cee_get_attr(&bnad->bna.cee, &drvinfo->cee_attr, bnad_cb_completion, &fcomp); if (ret != BFA_STATUS_OK) { spin_unlock_irqrestore(&bnad->bna_lock, flags); goto out; } spin_unlock_irqrestore(&bnad->bna_lock, flags); wait_for_completion(&fcomp.comp); drvinfo->cee_status = fcomp.comp_status; /* Retrieve flash partition info */ fcomp.comp_status = 0; init_completion(&fcomp.comp); spin_lock_irqsave(&bnad->bna_lock, flags); ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr, bnad_cb_completion, &fcomp); if (ret != BFA_STATUS_OK) { spin_unlock_irqrestore(&bnad->bna_lock, flags); goto out; } spin_unlock_irqrestore(&bnad->bna_lock, flags); wait_for_completion(&fcomp.comp); drvinfo->flash_status = fcomp.comp_status; out: return ret; } static int bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file) { struct bnad *bnad = inode->i_private; struct bnad_debug_info *drv_info; int rc; drv_info = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); if (!drv_info) return -ENOMEM; drv_info->buffer_len = sizeof(struct bnad_drvinfo); drv_info->debug_buffer = kzalloc(drv_info->buffer_len, GFP_KERNEL); if (!drv_info->debug_buffer) { kfree(drv_info); drv_info = NULL; pr_warn("bna %s: Failed to allocate drv info buffer\n", pci_name(bnad->pcidev)); return -ENOMEM; } mutex_lock(&bnad->conf_mutex); rc = bnad_get_debug_drvinfo(bnad, drv_info->debug_buffer, drv_info->buffer_len); mutex_unlock(&bnad->conf_mutex); if (rc != BFA_STATUS_OK) { kfree(drv_info->debug_buffer); drv_info->debug_buffer = NULL; kfree(drv_info); drv_info = NULL; pr_warn("bna %s: Failed to collect drvinfo\n", pci_name(bnad->pcidev)); return -ENOMEM; } file->private_data = drv_info; return 0; } /* Changes the current file position */ static loff_t bnad_debugfs_lseek(struct file *file, loff_t offset, int orig) { loff_t pos = file->f_pos; struct bnad_debug_info *debug = file->private_data; if (!debug) return -EINVAL; switch (orig) { case 0: file->f_pos = offset; break; case 1: file->f_pos += offset; break; case 2: file->f_pos = debug->buffer_len - offset; break; default: return -EINVAL; } if (file->f_pos < 0 || file->f_pos > debug->buffer_len) { file->f_pos = pos; return -EINVAL; } return file->f_pos; } static ssize_t bnad_debugfs_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pos) { struct bnad_debug_info *debug = file->private_data; if (!debug || !debug->debug_buffer) return 0; return simple_read_from_buffer(buf, nbytes, pos, debug->debug_buffer, debug->buffer_len); } #define BFA_REG_CT_ADDRSZ (0x40000) #define BFA_REG_CB_ADDRSZ (0x20000) #define BFA_REG_ADDRSZ(__ioc) \ ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ? \ BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)) #define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1) /* * Function to check if the register offset passed is valid. */ static int bna_reg_offset_check(struct bfa_ioc *ioc, u32 offset, u32 len) { u8 area; /* check [16:15] */ area = (offset >> 15) & 0x7; if (area == 0) { /* PCIe core register */ if ((offset + (len<<2)) > 0x8000) /* 8k dwords or 32KB */ return BFA_STATUS_EINVAL; } else if (area == 0x1) { /* CB 32 KB memory page */ if ((offset + (len<<2)) > 0x10000) /* 8k dwords or 32KB */ return BFA_STATUS_EINVAL; } else { /* CB register space 64KB */ if ((offset + (len<<2)) > BFA_REG_ADDRMSK(ioc)) return BFA_STATUS_EINVAL; } return BFA_STATUS_OK; } static ssize_t bnad_debugfs_read_regrd(struct file *file, char __user *buf, size_t nbytes, loff_t *pos) { struct bnad_debug_info *regrd_debug = file->private_data; struct bnad *bnad = (struct bnad *)regrd_debug->i_private; ssize_t rc; if (!bnad->regdata) return 0; rc = simple_read_from_buffer(buf, nbytes, pos, bnad->regdata, bnad->reglen); if ((*pos + nbytes) >= bnad->reglen) { kfree(bnad->regdata); bnad->regdata = NULL; bnad->reglen = 0; } return rc; } static ssize_t bnad_debugfs_write_regrd(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct bnad_debug_info *regrd_debug = file->private_data; struct bnad *bnad = (struct bnad *)regrd_debug->i_private; struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc; int addr, len, rc, i; u32 *regbuf; void __iomem *rb, *reg_addr; unsigned long flags; void *kern_buf; /* Allocate memory to store the user space buf */ kern_buf = kzalloc(nbytes, GFP_KERNEL); if (!kern_buf) { pr_warn("bna %s: Failed to allocate user buffer\n", pci_name(bnad->pcidev)); return -ENOMEM; } if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) { kfree(kern_buf); return -ENOMEM; } rc = sscanf(kern_buf, "%x:%x", &addr, &len); if (rc < 2) { pr_warn("bna %s: Failed to read user buffer\n", pci_name(bnad->pcidev)); kfree(kern_buf); return -EINVAL; } kfree(kern_buf); kfree(bnad->regdata); bnad->regdata = NULL; bnad->reglen = 0; bnad->regdata = kzalloc(len << 2, GFP_KERNEL); if (!bnad->regdata) { pr_warn("bna %s: Failed to allocate regrd buffer\n", pci_name(bnad->pcidev)); return -ENOMEM; } bnad->reglen = len << 2; rb = bfa_ioc_bar0(ioc); addr &= BFA_REG_ADDRMSK(ioc); /* offset and len sanity check */ rc = bna_reg_offset_check(ioc, addr, len); if (rc) { pr_warn("bna %s: Failed reg offset check\n", pci_name(bnad->pcidev)); kfree(bnad->regdata); bnad->regdata = NULL; bnad->reglen = 0; return -EINVAL; } reg_addr = rb + addr; regbuf = (u32 *)bnad->regdata; spin_lock_irqsave(&bnad->bna_lock, flags); for (i = 0; i < len; i++) { *regbuf = readl(reg_addr); regbuf++; reg_addr += sizeof(u32); } spin_unlock_irqrestore(&bnad->bna_lock, flags); return nbytes; } static ssize_t bnad_debugfs_write_regwr(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct bnad_debug_info *debug = file->private_data; struct bnad *bnad = (struct bnad *)debug->i_private; struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc; int addr, val, rc; void __iomem *reg_addr; unsigned long flags; void *kern_buf; /* Allocate memory to store the user space buf */ kern_buf = kzalloc(nbytes, GFP_KERNEL); if (!kern_buf) { pr_warn("bna %s: Failed to allocate user buffer\n", pci_name(bnad->pcidev)); return -ENOMEM; } if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) { kfree(kern_buf); return -ENOMEM; } rc = sscanf(kern_buf, "%x:%x", &addr, &val); if (rc < 2) { pr_warn("bna %s: Failed to read user buffer\n", pci_name(bnad->pcidev)); kfree(kern_buf); return -EINVAL; } kfree(kern_buf); addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */ /* offset and len sanity check */ rc = bna_reg_offset_check(ioc, addr, 1); if (rc) { pr_warn("bna %s: Failed reg offset check\n", pci_name(bnad->pcidev)); return -EINVAL; } reg_addr = (bfa_ioc_bar0(ioc)) + addr; spin_lock_irqsave(&bnad->bna_lock, flags); writel(val, reg_addr); spin_unlock_irqrestore(&bnad->bna_lock, flags); return nbytes; } static int bnad_debugfs_release(struct inode *inode, struct file *file) { struct bnad_debug_info *debug = file->private_data; if (!debug) return 0; file->private_data = NULL; kfree(debug); return 0; } static int bnad_debugfs_buffer_release(struct inode *inode, struct file *file) { struct bnad_debug_info *debug = file->private_data; if (!debug) return 0; kfree(debug->debug_buffer); file->private_data = NULL; kfree(debug); debug = NULL; return 0; } static const struct file_operations bnad_debugfs_op_fwtrc = { .owner = THIS_MODULE, .open = bnad_debugfs_open_fwtrc, .llseek = bnad_debugfs_lseek, .read = bnad_debugfs_read, .release = bnad_debugfs_buffer_release, }; static const struct file_operations bnad_debugfs_op_fwsave = { .owner = THIS_MODULE, .open = bnad_debugfs_open_fwsave, .llseek = bnad_debugfs_lseek, .read = bnad_debugfs_read, .release = bnad_debugfs_buffer_release, }; static const struct file_operations bnad_debugfs_op_regrd = { .owner = THIS_MODULE, .open = bnad_debugfs_open_reg, .llseek = bnad_debugfs_lseek, .read = bnad_debugfs_read_regrd, .write = bnad_debugfs_write_regrd, .release = bnad_debugfs_release, }; static const struct file_operations bnad_debugfs_op_regwr = { .owner = THIS_MODULE, .open = bnad_debugfs_open_reg, .llseek = bnad_debugfs_lseek, .write = bnad_debugfs_write_regwr, .release = bnad_debugfs_release, }; static const struct file_operations bnad_debugfs_op_drvinfo = { .owner = THIS_MODULE, .open = bnad_debugfs_open_drvinfo, .llseek = bnad_debugfs_lseek, .read = bnad_debugfs_read, .release = bnad_debugfs_buffer_release, }; struct bnad_debugfs_entry { const char *name; mode_t mode; const struct file_operations *fops; }; static const struct bnad_debugfs_entry bnad_debugfs_files[] = { { "fwtrc", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwtrc, }, { "fwsave", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwsave, }, { "regrd", S_IFREG|S_IRUGO|S_IWUSR, &bnad_debugfs_op_regrd, }, { "regwr", S_IFREG|S_IWUSR, &bnad_debugfs_op_regwr, }, { "drvinfo", S_IFREG|S_IRUGO, &bnad_debugfs_op_drvinfo, }, }; static struct dentry *bna_debugfs_root; static atomic_t bna_debugfs_port_count; /* Initialize debugfs interface for BNA */ void bnad_debugfs_init(struct bnad *bnad) { const struct bnad_debugfs_entry *file; char name[64]; int i; /* Setup the BNA debugfs root directory*/ if (!bna_debugfs_root) { bna_debugfs_root = debugfs_create_dir("bna", NULL); atomic_set(&bna_debugfs_port_count, 0); if (!bna_debugfs_root) { pr_warn("BNA: debugfs root dir creation failed\n"); return; } } /* Setup the pci_dev debugfs directory for the port */ snprintf(name, sizeof(name), "pci_dev:%s", pci_name(bnad->pcidev)); if (!bnad->port_debugfs_root) { bnad->port_debugfs_root = debugfs_create_dir(name, bna_debugfs_root); if (!bnad->port_debugfs_root) { pr_warn("bna pci_dev %s: root dir creation failed\n", pci_name(bnad->pcidev)); return; } atomic_inc(&bna_debugfs_port_count); for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) { file = &bnad_debugfs_files[i]; bnad->bnad_dentry_files[i] = debugfs_create_file(file->name, file->mode, bnad->port_debugfs_root, bnad, file->fops); if (!bnad->bnad_dentry_files[i]) { pr_warn( "BNA pci_dev:%s: create %s entry failed\n", pci_name(bnad->pcidev), file->name); return; } } } } /* Uninitialize debugfs interface for BNA */ void bnad_debugfs_uninit(struct bnad *bnad) { int i; for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) { if (bnad->bnad_dentry_files[i]) { debugfs_remove(bnad->bnad_dentry_files[i]); bnad->bnad_dentry_files[i] = NULL; } } /* Remove the pci_dev debugfs directory for the port */ if (bnad->port_debugfs_root) { debugfs_remove(bnad->port_debugfs_root); bnad->port_debugfs_root = NULL; atomic_dec(&bna_debugfs_port_count); } /* Remove the BNA debugfs root directory */ if (atomic_read(&bna_debugfs_port_count) == 0) { debugfs_remove(bna_debugfs_root); bna_debugfs_root = NULL; } }
gpl-2.0
NEKTech-Labs/wrapfs-kernel-linux-3.17
drivers/iommu/amd_iommu_init.c
90
58778
/* * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. * Author: Joerg Roedel <joerg.roedel@amd.com> * Leo Duran <leo.duran@amd.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/pci.h> #include <linux/acpi.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/syscore_ops.h> #include <linux/interrupt.h> #include <linux/msi.h> #include <linux/amd-iommu.h> #include <linux/export.h> #include <linux/iommu.h> #include <asm/pci-direct.h> #include <asm/iommu.h> #include <asm/gart.h> #include <asm/x86_init.h> #include <asm/iommu_table.h> #include <asm/io_apic.h> #include <asm/irq_remapping.h> #include "amd_iommu_proto.h" #include "amd_iommu_types.h" #include "irq_remapping.h" /* * definitions for the ACPI scanning code */ #define IVRS_HEADER_LENGTH 48 #define ACPI_IVHD_TYPE 0x10 #define ACPI_IVMD_TYPE_ALL 0x20 #define ACPI_IVMD_TYPE 0x21 #define ACPI_IVMD_TYPE_RANGE 0x22 #define IVHD_DEV_ALL 0x01 #define IVHD_DEV_SELECT 0x02 #define IVHD_DEV_SELECT_RANGE_START 0x03 #define IVHD_DEV_RANGE_END 0x04 #define IVHD_DEV_ALIAS 0x42 #define IVHD_DEV_ALIAS_RANGE 0x43 #define IVHD_DEV_EXT_SELECT 0x46 #define IVHD_DEV_EXT_SELECT_RANGE 0x47 #define IVHD_DEV_SPECIAL 0x48 #define IVHD_SPECIAL_IOAPIC 1 #define IVHD_SPECIAL_HPET 2 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 #define IVHD_FLAG_PASSPW_EN_MASK 0x02 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 #define IVHD_FLAG_ISOC_EN_MASK 0x08 #define IVMD_FLAG_EXCL_RANGE 0x08 #define IVMD_FLAG_UNITY_MAP 0x01 #define ACPI_DEVFLAG_INITPASS 0x01 #define ACPI_DEVFLAG_EXTINT 0x02 #define ACPI_DEVFLAG_NMI 0x04 #define ACPI_DEVFLAG_SYSMGT1 0x10 #define ACPI_DEVFLAG_SYSMGT2 0x20 #define ACPI_DEVFLAG_LINT0 0x40 #define ACPI_DEVFLAG_LINT1 0x80 #define ACPI_DEVFLAG_ATSDIS 0x10000000 /* * ACPI table definitions * * These data structures are laid over the table to parse the important values * out of it. */ /* * structure describing one IOMMU in the ACPI table. Typically followed by one * or more ivhd_entrys. */ struct ivhd_header { u8 type; u8 flags; u16 length; u16 devid; u16 cap_ptr; u64 mmio_phys; u16 pci_seg; u16 info; u32 efr; } __attribute__((packed)); /* * A device entry describing which devices a specific IOMMU translates and * which requestor ids they use. */ struct ivhd_entry { u8 type; u16 devid; u8 flags; u32 ext; } __attribute__((packed)); /* * An AMD IOMMU memory definition structure. It defines things like exclusion * ranges for devices and regions that should be unity mapped. */ struct ivmd_header { u8 type; u8 flags; u16 length; u16 devid; u16 aux; u64 resv; u64 range_start; u64 range_length; } __attribute__((packed)); bool amd_iommu_dump; bool amd_iommu_irq_remap __read_mostly; static bool amd_iommu_detected; static bool __initdata amd_iommu_disabled; u16 amd_iommu_last_bdf; /* largest PCI device id we have to handle */ LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings we find in ACPI */ u32 amd_iommu_unmap_flush; /* if true, flush on every unmap */ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the system */ /* Array to assign indices to IOMMUs*/ struct amd_iommu *amd_iommus[MAX_IOMMUS]; int amd_iommus_present; /* IOMMUs have a non-present cache? */ bool amd_iommu_np_cache __read_mostly; bool amd_iommu_iotlb_sup __read_mostly = true; u32 amd_iommu_max_pasid __read_mostly = ~0; bool amd_iommu_v2_present __read_mostly; bool amd_iommu_pc_present __read_mostly; bool amd_iommu_force_isolation __read_mostly; /* * List of protection domains - used during resume */ LIST_HEAD(amd_iommu_pd_list); spinlock_t amd_iommu_pd_lock; /* * Pointer to the device table which is shared by all AMD IOMMUs * it is indexed by the PCI device id or the HT unit id and contains * information about the domain the device belongs to as well as the * page table root pointer. */ struct dev_table_entry *amd_iommu_dev_table; /* * The alias table is a driver specific data structure which contains the * mappings of the PCI device ids to the actual requestor ids on the IOMMU. * More than one device can share the same requestor id. */ u16 *amd_iommu_alias_table; /* * The rlookup table is used to find the IOMMU which is responsible * for a specific device. It is also indexed by the PCI device id. */ struct amd_iommu **amd_iommu_rlookup_table; /* * This table is used to find the irq remapping table for a given device id * quickly. */ struct irq_remap_table **irq_lookup_table; /* * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap * to know which ones are already in use. */ unsigned long *amd_iommu_pd_alloc_bitmap; static u32 dev_table_size; /* size of the device table */ static u32 alias_table_size; /* size of the alias table */ static u32 rlookup_table_size; /* size if the rlookup table */ enum iommu_init_state { IOMMU_START_STATE, IOMMU_IVRS_DETECTED, IOMMU_ACPI_FINISHED, IOMMU_ENABLED, IOMMU_PCI_INIT, IOMMU_INTERRUPTS_EN, IOMMU_DMA_OPS, IOMMU_INITIALIZED, IOMMU_NOT_FOUND, IOMMU_INIT_ERROR, }; /* Early ioapic and hpet maps from kernel command line */ #define EARLY_MAP_SIZE 4 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE]; static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE]; static int __initdata early_ioapic_map_size; static int __initdata early_hpet_map_size; static bool __initdata cmdline_maps; static enum iommu_init_state init_state = IOMMU_START_STATE; static int amd_iommu_enable_interrupts(void); static int __init iommu_go_to_state(enum iommu_init_state state); static inline void update_last_devid(u16 devid) { if (devid > amd_iommu_last_bdf) amd_iommu_last_bdf = devid; } static inline unsigned long tbl_size(int entry_size) { unsigned shift = PAGE_SHIFT + get_order(((int)amd_iommu_last_bdf + 1) * entry_size); return 1UL << shift; } /* Access to l1 and l2 indexed register spaces */ static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) { u32 val; pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); pci_read_config_dword(iommu->dev, 0xfc, &val); return val; } static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) { pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); pci_write_config_dword(iommu->dev, 0xfc, val); pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); } static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) { u32 val; pci_write_config_dword(iommu->dev, 0xf0, address); pci_read_config_dword(iommu->dev, 0xf4, &val); return val; } static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) { pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); pci_write_config_dword(iommu->dev, 0xf4, val); } /**************************************************************************** * * AMD IOMMU MMIO register space handling functions * * These functions are used to program the IOMMU device registers in * MMIO space required for that driver. * ****************************************************************************/ /* * This function set the exclusion range in the IOMMU. DMA accesses to the * exclusion range are passed through untranslated */ static void iommu_set_exclusion_range(struct amd_iommu *iommu) { u64 start = iommu->exclusion_start & PAGE_MASK; u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; u64 entry; if (!iommu->exclusion_start) return; entry = start | MMIO_EXCL_ENABLE_MASK; memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, &entry, sizeof(entry)); entry = limit; memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, &entry, sizeof(entry)); } /* Programs the physical address of the device table into the IOMMU hardware */ static void iommu_set_device_table(struct amd_iommu *iommu) { u64 entry; BUG_ON(iommu->mmio_base == NULL); entry = virt_to_phys(amd_iommu_dev_table); entry |= (dev_table_size >> 12) - 1; memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, &entry, sizeof(entry)); } /* Generic functions to enable/disable certain features of the IOMMU. */ static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) { u32 ctrl; ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); ctrl |= (1 << bit); writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); } static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) { u32 ctrl; ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); ctrl &= ~(1 << bit); writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); } static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) { u32 ctrl; ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); ctrl &= ~CTRL_INV_TO_MASK; ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); } /* Function to enable the hardware */ static void iommu_enable(struct amd_iommu *iommu) { iommu_feature_enable(iommu, CONTROL_IOMMU_EN); } static void iommu_disable(struct amd_iommu *iommu) { /* Disable command buffer */ iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); /* Disable event logging and event interrupts */ iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); /* Disable IOMMU hardware itself */ iommu_feature_disable(iommu, CONTROL_IOMMU_EN); } /* * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in * the system has one. */ static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) { if (!request_mem_region(address, end, "amd_iommu")) { pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n", address, end); pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n"); return NULL; } return (u8 __iomem *)ioremap_nocache(address, end); } static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) { if (iommu->mmio_base) iounmap(iommu->mmio_base); release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); } /**************************************************************************** * * The functions below belong to the first pass of AMD IOMMU ACPI table * parsing. In this pass we try to find out the highest device id this * code has to handle. Upon this information the size of the shared data * structures is determined later. * ****************************************************************************/ /* * This function calculates the length of a given IVHD entry */ static inline int ivhd_entry_length(u8 *ivhd) { return 0x04 << (*ivhd >> 6); } /* * This function reads the last device id the IOMMU has to handle from the PCI * capability header for this IOMMU */ static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr) { u32 cap; cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); update_last_devid(PCI_DEVID(MMIO_GET_BUS(cap), MMIO_GET_LD(cap))); return 0; } /* * After reading the highest device id from the IOMMU PCI capability header * this function looks if there is a higher device id defined in the ACPI table */ static int __init find_last_devid_from_ivhd(struct ivhd_header *h) { u8 *p = (void *)h, *end = (void *)h; struct ivhd_entry *dev; p += sizeof(*h); end += h->length; find_last_devid_on_pci(PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), PCI_FUNC(h->devid), h->cap_ptr); while (p < end) { dev = (struct ivhd_entry *)p; switch (dev->type) { case IVHD_DEV_SELECT: case IVHD_DEV_RANGE_END: case IVHD_DEV_ALIAS: case IVHD_DEV_EXT_SELECT: /* all the above subfield types refer to device ids */ update_last_devid(dev->devid); break; default: break; } p += ivhd_entry_length(p); } WARN_ON(p != end); return 0; } /* * Iterate over all IVHD entries in the ACPI table and find the highest device * id which we need to handle. This is the first of three functions which parse * the ACPI table. So we check the checksum here. */ static int __init find_last_devid_acpi(struct acpi_table_header *table) { int i; u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table; struct ivhd_header *h; /* * Validate checksum here so we don't need to do it when * we actually parse the table */ for (i = 0; i < table->length; ++i) checksum += p[i]; if (checksum != 0) /* ACPI table corrupt */ return -ENODEV; p += IVRS_HEADER_LENGTH; end += table->length; while (p < end) { h = (struct ivhd_header *)p; switch (h->type) { case ACPI_IVHD_TYPE: find_last_devid_from_ivhd(h); break; default: break; } p += h->length; } WARN_ON(p != end); return 0; } /**************************************************************************** * * The following functions belong to the code path which parses the ACPI table * the second time. In this ACPI parsing iteration we allocate IOMMU specific * data structures, initialize the device/alias/rlookup table and also * basically initialize the hardware. * ****************************************************************************/ /* * Allocates the command buffer. This buffer is per AMD IOMMU. We can * write commands to that buffer later and the IOMMU will execute them * asynchronously */ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) { u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(CMD_BUFFER_SIZE)); if (cmd_buf == NULL) return NULL; iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; return cmd_buf; } /* * This function resets the command buffer if the IOMMU stopped fetching * commands from it. */ void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) { iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); } /* * This function writes the command buffer address to the hardware and * enables it. */ static void iommu_enable_command_buffer(struct amd_iommu *iommu) { u64 entry; BUG_ON(iommu->cmd_buf == NULL); entry = (u64)virt_to_phys(iommu->cmd_buf); entry |= MMIO_CMD_SIZE_512; memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, &entry, sizeof(entry)); amd_iommu_reset_cmd_buffer(iommu); iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); } static void __init free_command_buffer(struct amd_iommu *iommu) { free_pages((unsigned long)iommu->cmd_buf, get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); } /* allocates the memory where the IOMMU will log its events to */ static u8 * __init alloc_event_buffer(struct amd_iommu *iommu) { iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(EVT_BUFFER_SIZE)); if (iommu->evt_buf == NULL) return NULL; iommu->evt_buf_size = EVT_BUFFER_SIZE; return iommu->evt_buf; } static void iommu_enable_event_buffer(struct amd_iommu *iommu) { u64 entry; BUG_ON(iommu->evt_buf == NULL); entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, &entry, sizeof(entry)); /* set head and tail to zero manually */ writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); } static void __init free_event_buffer(struct amd_iommu *iommu) { free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); } /* allocates the memory where the IOMMU will log its events to */ static u8 * __init alloc_ppr_log(struct amd_iommu *iommu) { iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(PPR_LOG_SIZE)); if (iommu->ppr_log == NULL) return NULL; return iommu->ppr_log; } static void iommu_enable_ppr_log(struct amd_iommu *iommu) { u64 entry; if (iommu->ppr_log == NULL) return; entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, &entry, sizeof(entry)); /* set head and tail to zero manually */ writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); iommu_feature_enable(iommu, CONTROL_PPFLOG_EN); iommu_feature_enable(iommu, CONTROL_PPR_EN); } static void __init free_ppr_log(struct amd_iommu *iommu) { if (iommu->ppr_log == NULL) return; free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); } static void iommu_enable_gt(struct amd_iommu *iommu) { if (!iommu_feature(iommu, FEATURE_GT)) return; iommu_feature_enable(iommu, CONTROL_GT_EN); } /* sets a specific bit in the device table entry. */ static void set_dev_entry_bit(u16 devid, u8 bit) { int i = (bit >> 6) & 0x03; int _bit = bit & 0x3f; amd_iommu_dev_table[devid].data[i] |= (1UL << _bit); } static int get_dev_entry_bit(u16 devid, u8 bit) { int i = (bit >> 6) & 0x03; int _bit = bit & 0x3f; return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit; } void amd_iommu_apply_erratum_63(u16 devid) { int sysmgt; sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); if (sysmgt == 0x01) set_dev_entry_bit(devid, DEV_ENTRY_IW); } /* Writes the specific IOMMU for a device into the rlookup table */ static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) { amd_iommu_rlookup_table[devid] = iommu; } /* * This function takes the device specific flags read from the ACPI * table and sets up the device table entry with that information */ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, u16 devid, u32 flags, u32 ext_flags) { if (flags & ACPI_DEVFLAG_INITPASS) set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); if (flags & ACPI_DEVFLAG_EXTINT) set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); if (flags & ACPI_DEVFLAG_NMI) set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); if (flags & ACPI_DEVFLAG_SYSMGT1) set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); if (flags & ACPI_DEVFLAG_SYSMGT2) set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); if (flags & ACPI_DEVFLAG_LINT0) set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); if (flags & ACPI_DEVFLAG_LINT1) set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); amd_iommu_apply_erratum_63(devid); set_iommu_for_device(iommu, devid); } static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line) { struct devid_map *entry; struct list_head *list; if (type == IVHD_SPECIAL_IOAPIC) list = &ioapic_map; else if (type == IVHD_SPECIAL_HPET) list = &hpet_map; else return -EINVAL; list_for_each_entry(entry, list, list) { if (!(entry->id == id && entry->cmd_line)) continue; pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n", type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); return 0; } entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; entry->id = id; entry->devid = devid; entry->cmd_line = cmd_line; list_add_tail(&entry->list, list); return 0; } static int __init add_early_maps(void) { int i, ret; for (i = 0; i < early_ioapic_map_size; ++i) { ret = add_special_device(IVHD_SPECIAL_IOAPIC, early_ioapic_map[i].id, early_ioapic_map[i].devid, early_ioapic_map[i].cmd_line); if (ret) return ret; } for (i = 0; i < early_hpet_map_size; ++i) { ret = add_special_device(IVHD_SPECIAL_HPET, early_hpet_map[i].id, early_hpet_map[i].devid, early_hpet_map[i].cmd_line); if (ret) return ret; } return 0; } /* * Reads the device exclusion range from ACPI and initializes the IOMMU with * it */ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) { struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) return; if (iommu) { /* * We only can configure exclusion ranges per IOMMU, not * per device. But we can enable the exclusion range per * device. This is done here */ set_dev_entry_bit(devid, DEV_ENTRY_EX); iommu->exclusion_start = m->range_start; iommu->exclusion_length = m->range_length; } } /* * Takes a pointer to an AMD IOMMU entry in the ACPI table and * initializes the hardware and our data structures with it. */ static int __init init_iommu_from_acpi(struct amd_iommu *iommu, struct ivhd_header *h) { u8 *p = (u8 *)h; u8 *end = p, flags = 0; u16 devid = 0, devid_start = 0, devid_to = 0; u32 dev_i, ext_flags = 0; bool alias = false; struct ivhd_entry *e; int ret; ret = add_early_maps(); if (ret) return ret; /* * First save the recommended feature enable bits from ACPI */ iommu->acpi_flags = h->flags; /* * Done. Now parse the device entries */ p += sizeof(struct ivhd_header); end += h->length; while (p < end) { e = (struct ivhd_entry *)p; switch (e->type) { case IVHD_DEV_ALL: DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x" " last device %02x:%02x.%x flags: %02x\n", PCI_BUS_NUM(iommu->first_device), PCI_SLOT(iommu->first_device), PCI_FUNC(iommu->first_device), PCI_BUS_NUM(iommu->last_device), PCI_SLOT(iommu->last_device), PCI_FUNC(iommu->last_device), e->flags); for (dev_i = iommu->first_device; dev_i <= iommu->last_device; ++dev_i) set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0); break; case IVHD_DEV_SELECT: DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " "flags: %02x\n", PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags); devid = e->devid; set_dev_entry_from_acpi(iommu, devid, e->flags, 0); break; case IVHD_DEV_SELECT_RANGE_START: DUMP_printk(" DEV_SELECT_RANGE_START\t " "devid: %02x:%02x.%x flags: %02x\n", PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags); devid_start = e->devid; flags = e->flags; ext_flags = 0; alias = false; break; case IVHD_DEV_ALIAS: DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " "flags: %02x devid_to: %02x:%02x.%x\n", PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags, PCI_BUS_NUM(e->ext >> 8), PCI_SLOT(e->ext >> 8), PCI_FUNC(e->ext >> 8)); devid = e->devid; devid_to = e->ext >> 8; set_dev_entry_from_acpi(iommu, devid , e->flags, 0); set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); amd_iommu_alias_table[devid] = devid_to; break; case IVHD_DEV_ALIAS_RANGE: DUMP_printk(" DEV_ALIAS_RANGE\t\t " "devid: %02x:%02x.%x flags: %02x " "devid_to: %02x:%02x.%x\n", PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags, PCI_BUS_NUM(e->ext >> 8), PCI_SLOT(e->ext >> 8), PCI_FUNC(e->ext >> 8)); devid_start = e->devid; flags = e->flags; devid_to = e->ext >> 8; ext_flags = 0; alias = true; break; case IVHD_DEV_EXT_SELECT: DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " "flags: %02x ext: %08x\n", PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags, e->ext); devid = e->devid; set_dev_entry_from_acpi(iommu, devid, e->flags, e->ext); break; case IVHD_DEV_EXT_SELECT_RANGE: DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " "%02x:%02x.%x flags: %02x ext: %08x\n", PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags, e->ext); devid_start = e->devid; flags = e->flags; ext_flags = e->ext; alias = false; break; case IVHD_DEV_RANGE_END: DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", PCI_BUS_NUM(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid)); devid = e->devid; for (dev_i = devid_start; dev_i <= devid; ++dev_i) { if (alias) { amd_iommu_alias_table[dev_i] = devid_to; set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags); } set_dev_entry_from_acpi(iommu, dev_i, flags, ext_flags); } break; case IVHD_DEV_SPECIAL: { u8 handle, type; const char *var; u16 devid; int ret; handle = e->ext & 0xff; devid = (e->ext >> 8) & 0xffff; type = (e->ext >> 24) & 0xff; if (type == IVHD_SPECIAL_IOAPIC) var = "IOAPIC"; else if (type == IVHD_SPECIAL_HPET) var = "HPET"; else var = "UNKNOWN"; DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n", var, (int)handle, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid)); set_dev_entry_from_acpi(iommu, devid, e->flags, 0); ret = add_special_device(type, handle, devid, false); if (ret) return ret; break; } default: break; } p += ivhd_entry_length(p); } return 0; } /* Initializes the device->iommu mapping for the driver */ static int __init init_iommu_devices(struct amd_iommu *iommu) { u32 i; for (i = iommu->first_device; i <= iommu->last_device; ++i) set_iommu_for_device(iommu, i); return 0; } static void __init free_iommu_one(struct amd_iommu *iommu) { free_command_buffer(iommu); free_event_buffer(iommu); free_ppr_log(iommu); iommu_unmap_mmio_space(iommu); } static void __init free_iommu_all(void) { struct amd_iommu *iommu, *next; for_each_iommu_safe(iommu, next) { list_del(&iommu->list); free_iommu_one(iommu); kfree(iommu); } } /* * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) * Workaround: * BIOS should disable L2B micellaneous clock gating by setting * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b */ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) { u32 value; if ((boot_cpu_data.x86 != 0x15) || (boot_cpu_data.x86_model < 0x10) || (boot_cpu_data.x86_model > 0x1f)) return; pci_write_config_dword(iommu->dev, 0xf0, 0x90); pci_read_config_dword(iommu->dev, 0xf4, &value); if (value & BIT(2)) return; /* Select NB indirect register 0x90 and enable writing */ pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n", dev_name(&iommu->dev->dev)); /* Clear the enable writing bit */ pci_write_config_dword(iommu->dev, 0xf0, 0x90); } /* * This function clues the initialization function for one IOMMU * together and also allocates the command buffer and programs the * hardware. It does NOT enable the IOMMU. This is done afterwards. */ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) { int ret; spin_lock_init(&iommu->lock); /* Add IOMMU to internal data structures */ list_add_tail(&iommu->list, &amd_iommu_list); iommu->index = amd_iommus_present++; if (unlikely(iommu->index >= MAX_IOMMUS)) { WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n"); return -ENOSYS; } /* Index is fine - add IOMMU to the array */ amd_iommus[iommu->index] = iommu; /* * Copy data from ACPI table entry to the iommu struct */ iommu->devid = h->devid; iommu->cap_ptr = h->cap_ptr; iommu->pci_seg = h->pci_seg; iommu->mmio_phys = h->mmio_phys; /* Check if IVHD EFR contains proper max banks/counters */ if ((h->efr != 0) && ((h->efr & (0xF << 13)) != 0) && ((h->efr & (0x3F << 17)) != 0)) { iommu->mmio_phys_end = MMIO_REG_END_OFFSET; } else { iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; } iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, iommu->mmio_phys_end); if (!iommu->mmio_base) return -ENOMEM; iommu->cmd_buf = alloc_command_buffer(iommu); if (!iommu->cmd_buf) return -ENOMEM; iommu->evt_buf = alloc_event_buffer(iommu); if (!iommu->evt_buf) return -ENOMEM; iommu->int_enabled = false; ret = init_iommu_from_acpi(iommu, h); if (ret) return ret; /* * Make sure IOMMU is not considered to translate itself. The IVRS * table tells us so, but this is a lie! */ amd_iommu_rlookup_table[iommu->devid] = NULL; init_iommu_devices(iommu); return 0; } /* * Iterates over all IOMMU entries in the ACPI table, allocates the * IOMMU structure and initializes it with init_iommu_one() */ static int __init init_iommu_all(struct acpi_table_header *table) { u8 *p = (u8 *)table, *end = (u8 *)table; struct ivhd_header *h; struct amd_iommu *iommu; int ret; end += table->length; p += IVRS_HEADER_LENGTH; while (p < end) { h = (struct ivhd_header *)p; switch (*p) { case ACPI_IVHD_TYPE: DUMP_printk("device: %02x:%02x.%01x cap: %04x " "seg: %d flags: %01x info %04x\n", PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), PCI_FUNC(h->devid), h->cap_ptr, h->pci_seg, h->flags, h->info); DUMP_printk(" mmio-addr: %016llx\n", h->mmio_phys); iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); if (iommu == NULL) return -ENOMEM; ret = init_iommu_one(iommu, h); if (ret) return ret; break; default: break; } p += h->length; } WARN_ON(p != end); return 0; } static void init_iommu_perf_ctr(struct amd_iommu *iommu) { u64 val = 0xabcd, val2 = 0; if (!iommu_feature(iommu, FEATURE_PC)) return; amd_iommu_pc_present = true; /* Check if the performance counters can be written to */ if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) || (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) || (val != val2)) { pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n"); amd_iommu_pc_present = false; return; } pr_info("AMD-Vi: IOMMU performance counters supported\n"); val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); iommu->max_banks = (u8) ((val >> 12) & 0x3f); iommu->max_counters = (u8) ((val >> 7) & 0xf); } static ssize_t amd_iommu_show_cap(struct device *dev, struct device_attribute *attr, char *buf) { struct amd_iommu *iommu = dev_get_drvdata(dev); return sprintf(buf, "%x\n", iommu->cap); } static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL); static ssize_t amd_iommu_show_features(struct device *dev, struct device_attribute *attr, char *buf) { struct amd_iommu *iommu = dev_get_drvdata(dev); return sprintf(buf, "%llx\n", iommu->features); } static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); static struct attribute *amd_iommu_attrs[] = { &dev_attr_cap.attr, &dev_attr_features.attr, NULL, }; static struct attribute_group amd_iommu_group = { .name = "amd-iommu", .attrs = amd_iommu_attrs, }; static const struct attribute_group *amd_iommu_groups[] = { &amd_iommu_group, NULL, }; static int iommu_init_pci(struct amd_iommu *iommu) { int cap_ptr = iommu->cap_ptr; u32 range, misc, low, high; iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid), iommu->devid & 0xff); if (!iommu->dev) return -ENODEV; pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, &iommu->cap); pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, &range); pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET, &misc); iommu->first_device = PCI_DEVID(MMIO_GET_BUS(range), MMIO_GET_FD(range)); iommu->last_device = PCI_DEVID(MMIO_GET_BUS(range), MMIO_GET_LD(range)); if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) amd_iommu_iotlb_sup = false; /* read extended feature bits */ low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); iommu->features = ((u64)high << 32) | low; if (iommu_feature(iommu, FEATURE_GT)) { int glxval; u32 max_pasid; u64 pasmax; pasmax = iommu->features & FEATURE_PASID_MASK; pasmax >>= FEATURE_PASID_SHIFT; max_pasid = (1 << (pasmax + 1)) - 1; amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid); BUG_ON(amd_iommu_max_pasid & ~PASID_MASK); glxval = iommu->features & FEATURE_GLXVAL_MASK; glxval >>= FEATURE_GLXVAL_SHIFT; if (amd_iommu_max_glx_val == -1) amd_iommu_max_glx_val = glxval; else amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); } if (iommu_feature(iommu, FEATURE_GT) && iommu_feature(iommu, FEATURE_PPR)) { iommu->is_iommu_v2 = true; amd_iommu_v2_present = true; } if (iommu_feature(iommu, FEATURE_PPR)) { iommu->ppr_log = alloc_ppr_log(iommu); if (!iommu->ppr_log) return -ENOMEM; } if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) amd_iommu_np_cache = true; init_iommu_perf_ctr(iommu); if (is_rd890_iommu(iommu->dev)) { int i, j; iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0)); /* * Some rd890 systems may not be fully reconfigured by the * BIOS, so it's necessary for us to store this information so * it can be reprogrammed on resume */ pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, &iommu->stored_addr_lo); pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, &iommu->stored_addr_hi); /* Low bit locks writes to configuration space */ iommu->stored_addr_lo &= ~1; for (i = 0; i < 6; i++) for (j = 0; j < 0x12; j++) iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); for (i = 0; i < 0x83; i++) iommu->stored_l2[i] = iommu_read_l2(iommu, i); } amd_iommu_erratum_746_workaround(iommu); iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu, amd_iommu_groups, "ivhd%d", iommu->index); return pci_enable_device(iommu->dev); } static void print_iommu_info(void) { static const char * const feat_str[] = { "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", "IA", "GA", "HE", "PC" }; struct amd_iommu *iommu; for_each_iommu(iommu) { int i; pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n", dev_name(&iommu->dev->dev), iommu->cap_ptr); if (iommu->cap & (1 << IOMMU_CAP_EFR)) { pr_info("AMD-Vi: Extended features: "); for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { if (iommu_feature(iommu, (1ULL << i))) pr_cont(" %s", feat_str[i]); } pr_cont("\n"); } } if (irq_remapping_enabled) pr_info("AMD-Vi: Interrupt remapping enabled\n"); } static int __init amd_iommu_init_pci(void) { struct amd_iommu *iommu; int ret = 0; for_each_iommu(iommu) { ret = iommu_init_pci(iommu); if (ret) break; } ret = amd_iommu_init_devices(); print_iommu_info(); return ret; } /**************************************************************************** * * The following functions initialize the MSI interrupts for all IOMMUs * in the system. It's a bit challenging because there could be multiple * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per * pci_dev. * ****************************************************************************/ static int iommu_setup_msi(struct amd_iommu *iommu) { int r; r = pci_enable_msi(iommu->dev); if (r) return r; r = request_threaded_irq(iommu->dev->irq, amd_iommu_int_handler, amd_iommu_int_thread, 0, "AMD-Vi", iommu); if (r) { pci_disable_msi(iommu->dev); return r; } iommu->int_enabled = true; return 0; } static int iommu_init_msi(struct amd_iommu *iommu) { int ret; if (iommu->int_enabled) goto enable_faults; if (iommu->dev->msi_cap) ret = iommu_setup_msi(iommu); else ret = -ENODEV; if (ret) return ret; enable_faults: iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); if (iommu->ppr_log != NULL) iommu_feature_enable(iommu, CONTROL_PPFINT_EN); return 0; } /**************************************************************************** * * The next functions belong to the third pass of parsing the ACPI * table. In this last pass the memory mapping requirements are * gathered (like exclusion and unity mapping ranges). * ****************************************************************************/ static void __init free_unity_maps(void) { struct unity_map_entry *entry, *next; list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { list_del(&entry->list); kfree(entry); } } /* called when we find an exclusion range definition in ACPI */ static int __init init_exclusion_range(struct ivmd_header *m) { int i; switch (m->type) { case ACPI_IVMD_TYPE: set_device_exclusion_range(m->devid, m); break; case ACPI_IVMD_TYPE_ALL: for (i = 0; i <= amd_iommu_last_bdf; ++i) set_device_exclusion_range(i, m); break; case ACPI_IVMD_TYPE_RANGE: for (i = m->devid; i <= m->aux; ++i) set_device_exclusion_range(i, m); break; default: break; } return 0; } /* called for unity map ACPI definition */ static int __init init_unity_map_range(struct ivmd_header *m) { struct unity_map_entry *e = NULL; char *s; e = kzalloc(sizeof(*e), GFP_KERNEL); if (e == NULL) return -ENOMEM; switch (m->type) { default: kfree(e); return 0; case ACPI_IVMD_TYPE: s = "IVMD_TYPEi\t\t\t"; e->devid_start = e->devid_end = m->devid; break; case ACPI_IVMD_TYPE_ALL: s = "IVMD_TYPE_ALL\t\t"; e->devid_start = 0; e->devid_end = amd_iommu_last_bdf; break; case ACPI_IVMD_TYPE_RANGE: s = "IVMD_TYPE_RANGE\t\t"; e->devid_start = m->devid; e->devid_end = m->aux; break; } e->address_start = PAGE_ALIGN(m->range_start); e->address_end = e->address_start + PAGE_ALIGN(m->range_length); e->prot = m->flags >> 1; DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" " range_start: %016llx range_end: %016llx flags: %x\n", s, PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end), PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), e->address_start, e->address_end, m->flags); list_add_tail(&e->list, &amd_iommu_unity_map); return 0; } /* iterates over all memory definitions we find in the ACPI table */ static int __init init_memory_definitions(struct acpi_table_header *table) { u8 *p = (u8 *)table, *end = (u8 *)table; struct ivmd_header *m; end += table->length; p += IVRS_HEADER_LENGTH; while (p < end) { m = (struct ivmd_header *)p; if (m->flags & IVMD_FLAG_EXCL_RANGE) init_exclusion_range(m); else if (m->flags & IVMD_FLAG_UNITY_MAP) init_unity_map_range(m); p += m->length; } return 0; } /* * Init the device table to not allow DMA access for devices and * suppress all page faults */ static void init_device_table_dma(void) { u32 devid; for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { set_dev_entry_bit(devid, DEV_ENTRY_VALID); set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); } } static void __init uninit_device_table_dma(void) { u32 devid; for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { amd_iommu_dev_table[devid].data[0] = 0ULL; amd_iommu_dev_table[devid].data[1] = 0ULL; } } static void init_device_table(void) { u32 devid; if (!amd_iommu_irq_remap) return; for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN); } static void iommu_init_flags(struct amd_iommu *iommu) { iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : iommu_feature_disable(iommu, CONTROL_PASSPW_EN); iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? iommu_feature_enable(iommu, CONTROL_ISOC_EN) : iommu_feature_disable(iommu, CONTROL_ISOC_EN); /* * make IOMMU memory accesses cache coherent */ iommu_feature_enable(iommu, CONTROL_COHERENT_EN); /* Set IOTLB invalidation timeout to 1s */ iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); } static void iommu_apply_resume_quirks(struct amd_iommu *iommu) { int i, j; u32 ioc_feature_control; struct pci_dev *pdev = iommu->root_pdev; /* RD890 BIOSes may not have completely reconfigured the iommu */ if (!is_rd890_iommu(iommu->dev) || !pdev) return; /* * First, we need to ensure that the iommu is enabled. This is * controlled by a register in the northbridge */ /* Select Northbridge indirect register 0x75 and enable writing */ pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); pci_read_config_dword(pdev, 0x64, &ioc_feature_control); /* Enable the iommu */ if (!(ioc_feature_control & 0x1)) pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); /* Restore the iommu BAR */ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, iommu->stored_addr_lo); pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, iommu->stored_addr_hi); /* Restore the l1 indirect regs for each of the 6 l1s */ for (i = 0; i < 6; i++) for (j = 0; j < 0x12; j++) iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); /* Restore the l2 indirect regs */ for (i = 0; i < 0x83; i++) iommu_write_l2(iommu, i, iommu->stored_l2[i]); /* Lock PCI setup registers */ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, iommu->stored_addr_lo | 1); } /* * This function finally enables all IOMMUs found in the system after * they have been initialized */ static void early_enable_iommus(void) { struct amd_iommu *iommu; for_each_iommu(iommu) { iommu_disable(iommu); iommu_init_flags(iommu); iommu_set_device_table(iommu); iommu_enable_command_buffer(iommu); iommu_enable_event_buffer(iommu); iommu_set_exclusion_range(iommu); iommu_enable(iommu); iommu_flush_all_caches(iommu); } } static void enable_iommus_v2(void) { struct amd_iommu *iommu; for_each_iommu(iommu) { iommu_enable_ppr_log(iommu); iommu_enable_gt(iommu); } } static void enable_iommus(void) { early_enable_iommus(); enable_iommus_v2(); } static void disable_iommus(void) { struct amd_iommu *iommu; for_each_iommu(iommu) iommu_disable(iommu); } /* * Suspend/Resume support * disable suspend until real resume implemented */ static void amd_iommu_resume(void) { struct amd_iommu *iommu; for_each_iommu(iommu) iommu_apply_resume_quirks(iommu); /* re-load the hardware */ enable_iommus(); amd_iommu_enable_interrupts(); } static int amd_iommu_suspend(void) { /* disable IOMMUs to go out of the way for BIOS */ disable_iommus(); return 0; } static struct syscore_ops amd_iommu_syscore_ops = { .suspend = amd_iommu_suspend, .resume = amd_iommu_resume, }; static void __init free_on_init_error(void) { free_pages((unsigned long)irq_lookup_table, get_order(rlookup_table_size)); if (amd_iommu_irq_cache) { kmem_cache_destroy(amd_iommu_irq_cache); amd_iommu_irq_cache = NULL; } free_pages((unsigned long)amd_iommu_rlookup_table, get_order(rlookup_table_size)); free_pages((unsigned long)amd_iommu_alias_table, get_order(alias_table_size)); free_pages((unsigned long)amd_iommu_dev_table, get_order(dev_table_size)); free_iommu_all(); #ifdef CONFIG_GART_IOMMU /* * We failed to initialize the AMD IOMMU - try fallback to GART * if possible. */ gart_iommu_init(); #endif } /* SB IOAPIC is always on this device in AMD systems */ #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) static bool __init check_ioapic_information(void) { const char *fw_bug = FW_BUG; bool ret, has_sb_ioapic; int idx; has_sb_ioapic = false; ret = false; /* * If we have map overrides on the kernel command line the * messages in this function might not describe firmware bugs * anymore - so be careful */ if (cmdline_maps) fw_bug = ""; for (idx = 0; idx < nr_ioapics; idx++) { int devid, id = mpc_ioapic_id(idx); devid = get_ioapic_devid(id); if (devid < 0) { pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n", fw_bug, id); ret = false; } else if (devid == IOAPIC_SB_DEVID) { has_sb_ioapic = true; ret = true; } } if (!has_sb_ioapic) { /* * We expect the SB IOAPIC to be listed in the IVRS * table. The system timer is connected to the SB IOAPIC * and if we don't have it in the list the system will * panic at boot time. This situation usually happens * when the BIOS is buggy and provides us the wrong * device id for the IOAPIC in the system. */ pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug); } if (!ret) pr_err("AMD-Vi: Disabling interrupt remapping\n"); return ret; } static void __init free_dma_resources(void) { amd_iommu_uninit_devices(); free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, get_order(MAX_DOMAIN_ID/8)); free_unity_maps(); } /* * This is the hardware init function for AMD IOMMU in the system. * This function is called either from amd_iommu_init or from the interrupt * remapping setup code. * * This function basically parses the ACPI table for AMD IOMMU (IVRS) * three times: * * 1 pass) Find the highest PCI device id the driver has to handle. * Upon this information the size of the data structures is * determined that needs to be allocated. * * 2 pass) Initialize the data structures just allocated with the * information in the ACPI table about available AMD IOMMUs * in the system. It also maps the PCI devices in the * system to specific IOMMUs * * 3 pass) After the basic data structures are allocated and * initialized we update them with information about memory * remapping requirements parsed out of the ACPI table in * this last pass. * * After everything is set up the IOMMUs are enabled and the necessary * hotplug and suspend notifiers are registered. */ static int __init early_amd_iommu_init(void) { struct acpi_table_header *ivrs_base; acpi_size ivrs_size; acpi_status status; int i, ret = 0; if (!amd_iommu_detected) return -ENODEV; status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size); if (status == AE_NOT_FOUND) return -ENODEV; else if (ACPI_FAILURE(status)) { const char *err = acpi_format_exception(status); pr_err("AMD-Vi: IVRS table error: %s\n", err); return -EINVAL; } /* * First parse ACPI tables to find the largest Bus/Dev/Func * we need to handle. Upon this information the shared data * structures for the IOMMUs in the system will be allocated */ ret = find_last_devid_acpi(ivrs_base); if (ret) goto out; dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); /* Device table - directly used by all IOMMUs */ ret = -ENOMEM; amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(dev_table_size)); if (amd_iommu_dev_table == NULL) goto out; /* * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the * IOMMU see for that device */ amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, get_order(alias_table_size)); if (amd_iommu_alias_table == NULL) goto out; /* IOMMU rlookup table - find the IOMMU for a specific device */ amd_iommu_rlookup_table = (void *)__get_free_pages( GFP_KERNEL | __GFP_ZERO, get_order(rlookup_table_size)); if (amd_iommu_rlookup_table == NULL) goto out; amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( GFP_KERNEL | __GFP_ZERO, get_order(MAX_DOMAIN_ID/8)); if (amd_iommu_pd_alloc_bitmap == NULL) goto out; /* * let all alias entries point to itself */ for (i = 0; i <= amd_iommu_last_bdf; ++i) amd_iommu_alias_table[i] = i; /* * never allocate domain 0 because its used as the non-allocated and * error value placeholder */ amd_iommu_pd_alloc_bitmap[0] = 1; spin_lock_init(&amd_iommu_pd_lock); /* * now the data structures are allocated and basically initialized * start the real acpi table scan */ ret = init_iommu_all(ivrs_base); if (ret) goto out; if (amd_iommu_irq_remap) amd_iommu_irq_remap = check_ioapic_information(); if (amd_iommu_irq_remap) { /* * Interrupt remapping enabled, create kmem_cache for the * remapping tables. */ ret = -ENOMEM; amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", MAX_IRQS_PER_TABLE * sizeof(u32), IRQ_TABLE_ALIGNMENT, 0, NULL); if (!amd_iommu_irq_cache) goto out; irq_lookup_table = (void *)__get_free_pages( GFP_KERNEL | __GFP_ZERO, get_order(rlookup_table_size)); if (!irq_lookup_table) goto out; } ret = init_memory_definitions(ivrs_base); if (ret) goto out; /* init the device table */ init_device_table(); out: /* Don't leak any ACPI memory */ early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size); ivrs_base = NULL; return ret; } static int amd_iommu_enable_interrupts(void) { struct amd_iommu *iommu; int ret = 0; for_each_iommu(iommu) { ret = iommu_init_msi(iommu); if (ret) goto out; } out: return ret; } static bool detect_ivrs(void) { struct acpi_table_header *ivrs_base; acpi_size ivrs_size; acpi_status status; status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size); if (status == AE_NOT_FOUND) return false; else if (ACPI_FAILURE(status)) { const char *err = acpi_format_exception(status); pr_err("AMD-Vi: IVRS table error: %s\n", err); return false; } early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size); /* Make sure ACS will be enabled during PCI probe */ pci_request_acs(); if (!disable_irq_remap) amd_iommu_irq_remap = true; return true; } static int amd_iommu_init_dma(void) { struct amd_iommu *iommu; int ret; if (iommu_pass_through) ret = amd_iommu_init_passthrough(); else ret = amd_iommu_init_dma_ops(); if (ret) return ret; init_device_table_dma(); for_each_iommu(iommu) iommu_flush_all_caches(iommu); amd_iommu_init_api(); amd_iommu_init_notifier(); return 0; } /**************************************************************************** * * AMD IOMMU Initialization State Machine * ****************************************************************************/ static int __init state_next(void) { int ret = 0; switch (init_state) { case IOMMU_START_STATE: if (!detect_ivrs()) { init_state = IOMMU_NOT_FOUND; ret = -ENODEV; } else { init_state = IOMMU_IVRS_DETECTED; } break; case IOMMU_IVRS_DETECTED: ret = early_amd_iommu_init(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; break; case IOMMU_ACPI_FINISHED: early_enable_iommus(); register_syscore_ops(&amd_iommu_syscore_ops); x86_platform.iommu_shutdown = disable_iommus; init_state = IOMMU_ENABLED; break; case IOMMU_ENABLED: ret = amd_iommu_init_pci(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; enable_iommus_v2(); break; case IOMMU_PCI_INIT: ret = amd_iommu_enable_interrupts(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; break; case IOMMU_INTERRUPTS_EN: ret = amd_iommu_init_dma(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; break; case IOMMU_DMA_OPS: init_state = IOMMU_INITIALIZED; break; case IOMMU_INITIALIZED: /* Nothing to do */ break; case IOMMU_NOT_FOUND: case IOMMU_INIT_ERROR: /* Error states => do nothing */ ret = -EINVAL; break; default: /* Unknown state */ BUG(); } return ret; } static int __init iommu_go_to_state(enum iommu_init_state state) { int ret = 0; while (init_state != state) { ret = state_next(); if (init_state == IOMMU_NOT_FOUND || init_state == IOMMU_INIT_ERROR) break; } return ret; } #ifdef CONFIG_IRQ_REMAP int __init amd_iommu_prepare(void) { return iommu_go_to_state(IOMMU_ACPI_FINISHED); } int __init amd_iommu_supported(void) { return amd_iommu_irq_remap ? 1 : 0; } int __init amd_iommu_enable(void) { int ret; ret = iommu_go_to_state(IOMMU_ENABLED); if (ret) return ret; irq_remapping_enabled = 1; return 0; } void amd_iommu_disable(void) { amd_iommu_suspend(); } int amd_iommu_reenable(int mode) { amd_iommu_resume(); return 0; } int __init amd_iommu_enable_faulting(void) { /* We enable MSI later when PCI is initialized */ return 0; } #endif /* * This is the core init function for AMD IOMMU hardware in the system. * This function is called from the generic x86 DMA layer initialization * code. */ static int __init amd_iommu_init(void) { int ret; ret = iommu_go_to_state(IOMMU_INITIALIZED); if (ret) { free_dma_resources(); if (!irq_remapping_enabled) { disable_iommus(); free_on_init_error(); } else { struct amd_iommu *iommu; uninit_device_table_dma(); for_each_iommu(iommu) iommu_flush_all_caches(iommu); } } return ret; } /**************************************************************************** * * Early detect code. This code runs at IOMMU detection time in the DMA * layer. It just looks if there is an IVRS ACPI table to detect AMD * IOMMUs * ****************************************************************************/ int __init amd_iommu_detect(void) { int ret; if (no_iommu || (iommu_detected && !gart_iommu_aperture)) return -ENODEV; if (amd_iommu_disabled) return -ENODEV; ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); if (ret) return ret; amd_iommu_detected = true; iommu_detected = 1; x86_init.iommu.iommu_init = amd_iommu_init; return 0; } /**************************************************************************** * * Parsing functions for the AMD IOMMU specific kernel command line * options. * ****************************************************************************/ static int __init parse_amd_iommu_dump(char *str) { amd_iommu_dump = true; return 1; } static int __init parse_amd_iommu_options(char *str) { for (; *str; ++str) { if (strncmp(str, "fullflush", 9) == 0) amd_iommu_unmap_flush = true; if (strncmp(str, "off", 3) == 0) amd_iommu_disabled = true; if (strncmp(str, "force_isolation", 15) == 0) amd_iommu_force_isolation = true; } return 1; } static int __init parse_ivrs_ioapic(char *str) { unsigned int bus, dev, fn; int ret, id, i; u16 devid; ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); if (ret != 4) { pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str); return 1; } if (early_ioapic_map_size == EARLY_MAP_SIZE) { pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", str); return 1; } devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); cmdline_maps = true; i = early_ioapic_map_size++; early_ioapic_map[i].id = id; early_ioapic_map[i].devid = devid; early_ioapic_map[i].cmd_line = true; return 1; } static int __init parse_ivrs_hpet(char *str) { unsigned int bus, dev, fn; int ret, id, i; u16 devid; ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); if (ret != 4) { pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str); return 1; } if (early_hpet_map_size == EARLY_MAP_SIZE) { pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n", str); return 1; } devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); cmdline_maps = true; i = early_hpet_map_size++; early_hpet_map[i].id = id; early_hpet_map[i].devid = devid; early_hpet_map[i].cmd_line = true; return 1; } __setup("amd_iommu_dump", parse_amd_iommu_dump); __setup("amd_iommu=", parse_amd_iommu_options); __setup("ivrs_ioapic", parse_ivrs_ioapic); __setup("ivrs_hpet", parse_ivrs_hpet); IOMMU_INIT_FINISH(amd_iommu_detect, gart_iommu_hole_init, NULL, NULL); bool amd_iommu_v2_supported(void) { return amd_iommu_v2_present; } EXPORT_SYMBOL(amd_iommu_v2_supported); /**************************************************************************** * * IOMMU EFR Performance Counter support functionality. This code allows * access to the IOMMU PC functionality. * ****************************************************************************/ u8 amd_iommu_pc_get_max_banks(u16 devid) { struct amd_iommu *iommu; u8 ret = 0; /* locate the iommu governing the devid */ iommu = amd_iommu_rlookup_table[devid]; if (iommu) ret = iommu->max_banks; return ret; } EXPORT_SYMBOL(amd_iommu_pc_get_max_banks); bool amd_iommu_pc_supported(void) { return amd_iommu_pc_present; } EXPORT_SYMBOL(amd_iommu_pc_supported); u8 amd_iommu_pc_get_max_counters(u16 devid) { struct amd_iommu *iommu; u8 ret = 0; /* locate the iommu governing the devid */ iommu = amd_iommu_rlookup_table[devid]; if (iommu) ret = iommu->max_counters; return ret; } EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, u64 *value, bool is_write) { struct amd_iommu *iommu; u32 offset; u32 max_offset_lim; /* Make sure the IOMMU PC resource is available */ if (!amd_iommu_pc_present) return -ENODEV; /* Locate the iommu associated with the device ID */ iommu = amd_iommu_rlookup_table[devid]; /* Check for valid iommu and pc register indexing */ if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7))) return -ENODEV; offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn); /* Limit the offset to the hw defined mmio region aperture */ max_offset_lim = (u32)(((0x40|iommu->max_banks) << 12) | (iommu->max_counters << 8) | 0x28); if ((offset < MMIO_CNTR_REG_OFFSET) || (offset > max_offset_lim)) return -EINVAL; if (is_write) { writel((u32)*value, iommu->mmio_base + offset); writel((*value >> 32), iommu->mmio_base + offset + 4); } else { *value = readl(iommu->mmio_base + offset + 4); *value <<= 32; *value = readl(iommu->mmio_base + offset); } return 0; } EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
gpl-2.0
morely/linux-xlnx
arch/s390/kvm/priv.c
90
26746
/* * handling privileged instructions * * Copyright IBM Corp. 2008, 2013 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2 only) * as published by the Free Software Foundation. * * Author(s): Carsten Otte <cotte@de.ibm.com> * Christian Borntraeger <borntraeger@de.ibm.com> */ #include <linux/kvm.h> #include <linux/gfp.h> #include <linux/errno.h> #include <linux/compat.h> #include <asm/asm-offsets.h> #include <asm/facility.h> #include <asm/current.h> #include <asm/debug.h> #include <asm/ebcdic.h> #include <asm/sysinfo.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/io.h> #include <asm/ptrace.h> #include <asm/compat.h> #include "gaccess.h" #include "kvm-s390.h" #include "trace.h" /* Handle SCK (SET CLOCK) interception */ static int handle_set_clock(struct kvm_vcpu *vcpu) { struct kvm_vcpu *cpup; s64 hostclk, val; int i, rc; u64 op2; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); op2 = kvm_s390_get_base_disp_s(vcpu); if (op2 & 7) /* Operand must be on a doubleword boundary */ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); rc = read_guest(vcpu, op2, &val, sizeof(val)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); if (store_tod_clock(&hostclk)) { kvm_s390_set_psw_cc(vcpu, 3); return 0; } val = (val - hostclk) & ~0x3fUL; mutex_lock(&vcpu->kvm->lock); kvm_for_each_vcpu(i, cpup, vcpu->kvm) cpup->arch.sie_block->epoch = val; mutex_unlock(&vcpu->kvm->lock); kvm_s390_set_psw_cc(vcpu, 0); return 0; } static int handle_set_prefix(struct kvm_vcpu *vcpu) { u64 operand2; u32 address; int rc; vcpu->stat.instruction_spx++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); operand2 = kvm_s390_get_base_disp_s(vcpu); /* must be word boundary */ if (operand2 & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); /* get the value */ rc = read_guest(vcpu, operand2, &address, sizeof(address)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); address &= 0x7fffe000u; /* * Make sure the new value is valid memory. We only need to check the * first page, since address is 8k aligned and memory pieces are always * at least 1MB aligned and have at least a size of 1MB. */ if (kvm_is_error_gpa(vcpu->kvm, address)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); kvm_s390_set_prefix(vcpu, address); VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); trace_kvm_s390_handle_prefix(vcpu, 1, address); return 0; } static int handle_store_prefix(struct kvm_vcpu *vcpu) { u64 operand2; u32 address; int rc; vcpu->stat.instruction_stpx++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); operand2 = kvm_s390_get_base_disp_s(vcpu); /* must be word boundary */ if (operand2 & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); address = kvm_s390_get_prefix(vcpu); /* get the value */ rc = write_guest(vcpu, operand2, &address, sizeof(address)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); trace_kvm_s390_handle_prefix(vcpu, 0, address); return 0; } static int handle_store_cpu_address(struct kvm_vcpu *vcpu) { u16 vcpu_id = vcpu->vcpu_id; u64 ga; int rc; vcpu->stat.instruction_stap++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); ga = kvm_s390_get_base_disp_s(vcpu); if (ga & 1) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga); trace_kvm_s390_handle_stap(vcpu, ga); return 0; } static void __skey_check_enable(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) return; s390_enable_skey(); trace_kvm_s390_skey_related_inst(vcpu); vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); } static int handle_skey(struct kvm_vcpu *vcpu) { __skey_check_enable(vcpu); vcpu->stat.instruction_storage_key++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); vcpu->arch.sie_block->gpsw.addr = __rewind_psw(vcpu->arch.sie_block->gpsw, 4); VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); return 0; } static int handle_ipte_interlock(struct kvm_vcpu *vcpu) { psw_t *psw = &vcpu->arch.sie_block->gpsw; vcpu->stat.instruction_ipte_interlock++; if (psw_bits(*psw).p) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); psw->addr = __rewind_psw(*psw, 4); VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation"); return 0; } static int handle_test_block(struct kvm_vcpu *vcpu) { gpa_t addr; int reg2; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); kvm_s390_get_regs_rre(vcpu, NULL, &reg2); addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; addr = kvm_s390_logical_to_effective(vcpu, addr); if (kvm_s390_check_low_addr_protection(vcpu, addr)) return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); addr = kvm_s390_real_to_abs(vcpu, addr); if (kvm_is_error_gpa(vcpu->kvm, addr)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); /* * We don't expect errors on modern systems, and do not care * about storage keys (yet), so let's just clear the page. */ if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE)) return -EFAULT; kvm_s390_set_psw_cc(vcpu, 0); vcpu->run->s.regs.gprs[0] = 0; return 0; } static int handle_tpi(struct kvm_vcpu *vcpu) { struct kvm_s390_interrupt_info *inti; unsigned long len; u32 tpi_data[3]; int cc, rc; u64 addr; rc = 0; addr = kvm_s390_get_base_disp_s(vcpu); if (addr & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); cc = 0; inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); if (!inti) goto no_interrupt; cc = 1; tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; tpi_data[1] = inti->io.io_int_parm; tpi_data[2] = inti->io.io_int_word; if (addr) { /* * Store the two-word I/O interruption code into the * provided area. */ len = sizeof(tpi_data) - 4; rc = write_guest(vcpu, addr, &tpi_data, len); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); } else { /* * Store the three-word I/O interruption code into * the appropriate lowcore area. */ len = sizeof(tpi_data); if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) rc = -EFAULT; } /* * If we encounter a problem storing the interruption code, the * instruction is suppressed from the guest's view: reinject the * interrupt. */ if (!rc) kfree(inti); else kvm_s390_reinject_io_int(vcpu->kvm, inti); no_interrupt: /* Set condition code and we're done. */ if (!rc) kvm_s390_set_psw_cc(vcpu, cc); return rc ? -EFAULT : 0; } static int handle_tsch(struct kvm_vcpu *vcpu) { struct kvm_s390_interrupt_info *inti; inti = kvm_s390_get_io_int(vcpu->kvm, 0, vcpu->run->s.regs.gprs[1]); /* * Prepare exit to userspace. * We indicate whether we dequeued a pending I/O interrupt * so that userspace can re-inject it if the instruction gets * a program check. While this may re-order the pending I/O * interrupts, this is no problem since the priority is kept * intact. */ vcpu->run->exit_reason = KVM_EXIT_S390_TSCH; vcpu->run->s390_tsch.dequeued = !!inti; if (inti) { vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id; vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr; vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm; vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word; } vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb; kfree(inti); return -EREMOTE; } static int handle_io_inst(struct kvm_vcpu *vcpu) { VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); if (vcpu->kvm->arch.css_support) { /* * Most I/O instructions will be handled by userspace. * Exceptions are tpi and the interrupt portion of tsch. */ if (vcpu->arch.sie_block->ipa == 0xb236) return handle_tpi(vcpu); if (vcpu->arch.sie_block->ipa == 0xb235) return handle_tsch(vcpu); /* Handle in userspace. */ return -EOPNOTSUPP; } else { /* * Set condition code 3 to stop the guest from issuing channel * I/O instructions. */ kvm_s390_set_psw_cc(vcpu, 3); return 0; } } static int handle_stfl(struct kvm_vcpu *vcpu) { int rc; vcpu->stat.instruction_stfl++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), vfacilities, 4); if (rc) return rc; VCPU_EVENT(vcpu, 5, "store facility list value %x", *(unsigned int *) vfacilities); trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities); return 0; } #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA) #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL #define PSW_ADDR_24 0x0000000000ffffffUL #define PSW_ADDR_31 0x000000007fffffffUL int is_valid_psw(psw_t *psw) { if (psw->mask & PSW_MASK_UNASSIGNED) return 0; if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) { if (psw->addr & ~PSW_ADDR_31) return 0; } if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24)) return 0; if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA) return 0; if (psw->addr & 1) return 0; return 1; } int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) { psw_t *gpsw = &vcpu->arch.sie_block->gpsw; psw_compat_t new_psw; u64 addr; int rc; if (gpsw->mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); addr = kvm_s390_get_base_disp_s(vcpu); if (addr & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); if (!(new_psw.mask & PSW32_MASK_BASE)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; if (!is_valid_psw(gpsw)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return 0; } static int handle_lpswe(struct kvm_vcpu *vcpu) { psw_t new_psw; u64 addr; int rc; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); addr = kvm_s390_get_base_disp_s(vcpu); if (addr & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); vcpu->arch.sie_block->gpsw = new_psw; if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return 0; } static int handle_stidp(struct kvm_vcpu *vcpu) { u64 stidp_data = vcpu->arch.stidp_data; u64 operand2; int rc; vcpu->stat.instruction_stidp++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); operand2 = kvm_s390_get_base_disp_s(vcpu); if (operand2 & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); return 0; } static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) { int cpus = 0; int n; cpus = atomic_read(&vcpu->kvm->online_vcpus); /* deal with other level 3 hypervisors */ if (stsi(mem, 3, 2, 2)) mem->count = 0; if (mem->count < 8) mem->count++; for (n = mem->count - 1; n > 0 ; n--) memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); mem->vm[0].cpus_total = cpus; mem->vm[0].cpus_configured = cpus; mem->vm[0].cpus_standby = 0; mem->vm[0].cpus_reserved = 0; mem->vm[0].caf = 1000; memcpy(mem->vm[0].name, "KVMguest", 8); ASCEBC(mem->vm[0].name, 8); memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); ASCEBC(mem->vm[0].cpi, 16); } static int handle_stsi(struct kvm_vcpu *vcpu) { int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; int sel1 = vcpu->run->s.regs.gprs[0] & 0xff; int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff; unsigned long mem = 0; u64 operand2; int rc = 0; vcpu->stat.instruction_stsi++; VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); if (fc > 3) { kvm_s390_set_psw_cc(vcpu, 3); return 0; } if (vcpu->run->s.regs.gprs[0] & 0x0fffff00 || vcpu->run->s.regs.gprs[1] & 0xffff0000) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); if (fc == 0) { vcpu->run->s.regs.gprs[0] = 3 << 28; kvm_s390_set_psw_cc(vcpu, 0); return 0; } operand2 = kvm_s390_get_base_disp_s(vcpu); if (operand2 & 0xfff) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); switch (fc) { case 1: /* same handling for 1 and 2 */ case 2: mem = get_zeroed_page(GFP_KERNEL); if (!mem) goto out_no_data; if (stsi((void *) mem, fc, sel1, sel2)) goto out_no_data; break; case 3: if (sel1 != 2 || sel2 != 2) goto out_no_data; mem = get_zeroed_page(GFP_KERNEL); if (!mem) goto out_no_data; handle_stsi_3_2_2(vcpu, (void *) mem); break; } rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE); if (rc) { rc = kvm_s390_inject_prog_cond(vcpu, rc); goto out; } trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); free_page(mem); kvm_s390_set_psw_cc(vcpu, 0); vcpu->run->s.regs.gprs[0] = 0; return 0; out_no_data: kvm_s390_set_psw_cc(vcpu, 3); out: free_page(mem); return rc; } static const intercept_handler_t b2_handlers[256] = { [0x02] = handle_stidp, [0x04] = handle_set_clock, [0x10] = handle_set_prefix, [0x11] = handle_store_prefix, [0x12] = handle_store_cpu_address, [0x21] = handle_ipte_interlock, [0x29] = handle_skey, [0x2a] = handle_skey, [0x2b] = handle_skey, [0x2c] = handle_test_block, [0x30] = handle_io_inst, [0x31] = handle_io_inst, [0x32] = handle_io_inst, [0x33] = handle_io_inst, [0x34] = handle_io_inst, [0x35] = handle_io_inst, [0x36] = handle_io_inst, [0x37] = handle_io_inst, [0x38] = handle_io_inst, [0x39] = handle_io_inst, [0x3a] = handle_io_inst, [0x3b] = handle_io_inst, [0x3c] = handle_io_inst, [0x50] = handle_ipte_interlock, [0x5f] = handle_io_inst, [0x74] = handle_io_inst, [0x76] = handle_io_inst, [0x7d] = handle_stsi, [0xb1] = handle_stfl, [0xb2] = handle_lpswe, }; int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) { intercept_handler_t handler; /* * A lot of B2 instructions are priviledged. Here we check for * the privileged ones, that we can handle in the kernel. * Anything else goes to userspace. */ handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; if (handler) return handler(vcpu); return -EOPNOTSUPP; } static int handle_epsw(struct kvm_vcpu *vcpu) { int reg1, reg2; kvm_s390_get_regs_rre(vcpu, &reg1, &reg2); /* This basically extracts the mask half of the psw. */ vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL; vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; if (reg2) { vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL; vcpu->run->s.regs.gprs[reg2] |= vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL; } return 0; } #define PFMF_RESERVED 0xfffc0101UL #define PFMF_SK 0x00020000UL #define PFMF_CF 0x00010000UL #define PFMF_UI 0x00008000UL #define PFMF_FSC 0x00007000UL #define PFMF_NQ 0x00000800UL #define PFMF_MR 0x00000400UL #define PFMF_MC 0x00000200UL #define PFMF_KEY 0x000000feUL static int handle_pfmf(struct kvm_vcpu *vcpu) { int reg1, reg2; unsigned long start, end; vcpu->stat.instruction_pfmf++; kvm_s390_get_regs_rre(vcpu, &reg1, &reg2); if (!MACHINE_HAS_PFMF) return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); /* Only provide non-quiescing support if the host supports it */ if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); /* No support for conditional-SSKE */ if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { if (kvm_s390_check_low_addr_protection(vcpu, start)) return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); } switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { case 0x00000000: end = (start + (1UL << 12)) & ~((1UL << 12) - 1); break; case 0x00001000: end = (start + (1UL << 20)) & ~((1UL << 20) - 1); break; /* We dont support EDAT2 case 0x00002000: end = (start + (1UL << 31)) & ~((1UL << 31) - 1); break;*/ default: return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); } while (start < end) { unsigned long useraddr, abs_addr; /* Translate guest address to host address */ if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0) abs_addr = kvm_s390_real_to_abs(vcpu, start); else abs_addr = start; useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr)); if (kvm_is_error_hva(useraddr)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { if (clear_user((void __user *)useraddr, PAGE_SIZE)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); } if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { __skey_check_enable(vcpu); if (set_guest_storage_key(current->mm, useraddr, vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); } start += PAGE_SIZE; } if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) vcpu->run->s.regs.gprs[reg2] = end; return 0; } static int handle_essa(struct kvm_vcpu *vcpu) { /* entries expected to be 1FF */ int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; unsigned long *cbrlo, cbrle; struct gmap *gmap; int i; VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries); gmap = vcpu->arch.gmap; vcpu->stat.instruction_essa++; if (!kvm_s390_cmma_enabled(vcpu->kvm)) return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); /* Rewind PSW to repeat the ESSA instruction */ vcpu->arch.sie_block->gpsw.addr = __rewind_psw(vcpu->arch.sie_block->gpsw, 4); vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */ cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo); down_read(&gmap->mm->mmap_sem); for (i = 0; i < entries; ++i) { cbrle = cbrlo[i]; if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE)) /* invalid entry */ break; /* try to free backing */ __gmap_zap(gmap, cbrle); } up_read(&gmap->mm->mmap_sem); if (i < entries) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return 0; } static const intercept_handler_t b9_handlers[256] = { [0x8a] = handle_ipte_interlock, [0x8d] = handle_epsw, [0x8e] = handle_ipte_interlock, [0x8f] = handle_ipte_interlock, [0xab] = handle_essa, [0xaf] = handle_pfmf, }; int kvm_s390_handle_b9(struct kvm_vcpu *vcpu) { intercept_handler_t handler; /* This is handled just as for the B2 instructions. */ handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; if (handler) return handler(vcpu); return -EOPNOTSUPP; } int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; u32 val = 0; int reg, rc; u64 ga; vcpu->stat.instruction_lctl++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); ga = kvm_s390_get_base_disp_rs(vcpu); if (ga & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); reg = reg1; do { rc = read_guest(vcpu, ga, &val, sizeof(val)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; vcpu->arch.sie_block->gcr[reg] |= val; ga += 4; if (reg == reg3) break; reg = (reg + 1) % 16; } while (1); return 0; } int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; u64 ga; u32 val; int reg, rc; vcpu->stat.instruction_stctl++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); ga = kvm_s390_get_base_disp_rs(vcpu); if (ga & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga); reg = reg1; do { val = vcpu->arch.sie_block->gcr[reg] & 0x00000000fffffffful; rc = write_guest(vcpu, ga, &val, sizeof(val)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); ga += 4; if (reg == reg3) break; reg = (reg + 1) % 16; } while (1); return 0; } static int handle_lctlg(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; u64 ga, val; int reg, rc; vcpu->stat.instruction_lctlg++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); ga = kvm_s390_get_base_disp_rsy(vcpu); if (ga & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); reg = reg1; VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); do { rc = read_guest(vcpu, ga, &val, sizeof(val)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); vcpu->arch.sie_block->gcr[reg] = val; ga += 8; if (reg == reg3) break; reg = (reg + 1) % 16; } while (1); return 0; } static int handle_stctg(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; u64 ga, val; int reg, rc; vcpu->stat.instruction_stctg++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); ga = kvm_s390_get_base_disp_rsy(vcpu); if (ga & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); reg = reg1; VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga); trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga); do { val = vcpu->arch.sie_block->gcr[reg]; rc = write_guest(vcpu, ga, &val, sizeof(val)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); ga += 8; if (reg == reg3) break; reg = (reg + 1) % 16; } while (1); return 0; } static const intercept_handler_t eb_handlers[256] = { [0x2f] = handle_lctlg, [0x25] = handle_stctg, }; int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) { intercept_handler_t handler; handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; if (handler) return handler(vcpu); return -EOPNOTSUPP; } static int handle_tprot(struct kvm_vcpu *vcpu) { u64 address1, address2; unsigned long hva, gpa; int ret = 0, cc = 0; bool writable; vcpu->stat.instruction_tprot++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); /* we only handle the Linux memory detection case: * access key == 0 * everything else goes to userspace. */ if (address2 & 0xf0) return -EOPNOTSUPP; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) ipte_lock(vcpu); ret = guest_translate_address(vcpu, address1, &gpa, 1); if (ret == PGM_PROTECTION) { /* Write protected? Try again with read-only... */ cc = 1; ret = guest_translate_address(vcpu, address1, &gpa, 0); } if (ret) { if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { ret = kvm_s390_inject_program_int(vcpu, ret); } else if (ret > 0) { /* Translation not available */ kvm_s390_set_psw_cc(vcpu, 3); ret = 0; } goto out_unlock; } hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); if (kvm_is_error_hva(hva)) { ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); } else { if (!writable) cc = 1; /* Write not permitted ==> read-only */ kvm_s390_set_psw_cc(vcpu, cc); /* Note: CC2 only occurs for storage keys (not supported yet) */ } out_unlock: if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) ipte_unlock(vcpu); return ret; } int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) { /* For e5xx... instructions we only handle TPROT */ if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) return handle_tprot(vcpu); return -EOPNOTSUPP; } static int handle_sckpf(struct kvm_vcpu *vcpu) { u32 value; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; vcpu->arch.sie_block->todpr = value; return 0; } static const intercept_handler_t x01_handlers[256] = { [0x07] = handle_sckpf, }; int kvm_s390_handle_01(struct kvm_vcpu *vcpu) { intercept_handler_t handler; handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; if (handler) return handler(vcpu); return -EOPNOTSUPP; }
gpl-2.0
xingrz/android_kernel_nubia_msm8996
drivers/input/touchscreen/synaptics_i2c_rmi4.c
90
119119
/* * Synaptics RMI4 touchscreen driver * * Copyright (C) 2012 Synaptics Incorporated * * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com> * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com> * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/pinctrl/consumer.h> #include <linux/input/synaptics_dsx.h> #include <linux/of_gpio.h> #if defined(CONFIG_SECURE_TOUCH) #include <linux/pm_runtime.h> #include <linux/errno.h> #endif #include "synaptics_i2c_rmi4.h" #include <linux/input/mt.h> #define DRIVER_NAME "synaptics_rmi4_i2c" #define INPUT_PHYS_NAME "synaptics_rmi4_i2c/input0" #define DEBUGFS_DIR_NAME "ts_debug" #define RESET_DELAY 100 #define TYPE_B_PROTOCOL #define NO_0D_WHILE_2D /* #define REPORT_2D_Z */ #define REPORT_2D_W #define RPT_TYPE (1 << 0) #define RPT_X_LSB (1 << 1) #define RPT_X_MSB (1 << 2) #define RPT_Y_LSB (1 << 3) #define RPT_Y_MSB (1 << 4) #define RPT_Z (1 << 5) #define RPT_WX (1 << 6) #define RPT_WY (1 << 7) #define RPT_DEFAULT (RPT_TYPE | RPT_X_LSB | RPT_X_MSB | RPT_Y_LSB | RPT_Y_MSB) #define EXP_FN_DET_INTERVAL 1000 /* ms */ #define POLLING_PERIOD 1 /* ms */ #define SYN_I2C_RETRY_TIMES 10 #define MAX_ABS_MT_TOUCH_MAJOR 15 #define F01_STD_QUERY_LEN 21 #define F01_PACKAGE_ID_OFFSET 17 #define F01_BUID_ID_OFFSET 18 #define F11_STD_QUERY_LEN 9 #define F11_STD_CTRL_LEN 10 #define F11_STD_DATA_LEN 12 #define NORMAL_OPERATION 0 #define SENSOR_SLEEP 1 #define NO_SLEEP_OFF 0 #define NO_SLEEP_ON 1 enum device_status { STATUS_NO_ERROR = 0x00, STATUS_RESET_OCCURRED = 0x01, STATUS_INVALID_CONFIG = 0x02, STATUS_DEVICE_FAILURE = 0x03, STATUS_CONFIG_CRC_FAILURE = 0x04, STATUS_FIRMWARE_CRC_FAILURE = 0x05, STATUS_CRC_IN_PROGRESS = 0x06, STATUS_UNCONFIGURED = 0x80 }; #define DEVICE_CONFIGURED 0x1 #define RMI4_VTG_MIN_UV 2700000 #define RMI4_VTG_MAX_UV 3300000 #define RMI4_ACTIVE_LOAD_UA 15000 #define RMI4_LPM_LOAD_UA 10 #define RMI4_I2C_VTG_MIN_UV 1800000 #define RMI4_I2C_VTG_MAX_UV 1800000 #define RMI4_I2C_LOAD_UA 10000 #define RMI4_I2C_LPM_LOAD_UA 10 #define RMI4_GPIO_SLEEP_LOW_US 10000 #define F12_FINGERS_TO_SUPPORT 10 #define MAX_F11_TOUCH_WIDTH 15 #define RMI4_COORDS_ARR_SIZE 4 #define F11_MAX_X 4096 #define F11_MAX_Y 4096 #define F12_MAX_X 65536 #define F12_MAX_Y 65536 static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data, unsigned short addr, unsigned char *data, unsigned short length); static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data, unsigned short addr, unsigned char *data, unsigned short length); static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data); static void synaptics_rmi4_sensor_wake(struct synaptics_rmi4_data *rmi4_data); static void __maybe_unused synaptics_rmi4_sensor_sleep( struct synaptics_rmi4_data *rmi4_data); static int __maybe_unused synaptics_rmi4_regulator_lpm( struct synaptics_rmi4_data *rmi4_data, bool on); static void __maybe_unused synaptics_rmi4_release_all( struct synaptics_rmi4_data *rmi4_data); static int synaptics_rmi4_check_configuration(struct synaptics_rmi4_data *rmi4_data); static int synaptics_rmi4_suspend(struct device *dev); static int synaptics_rmi4_resume(struct device *dev); static ssize_t synaptics_rmi4_full_pm_cycle_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_full_pm_cycle_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); #if defined(CONFIG_FB) static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data); #elif defined(CONFIG_HAS_EARLYSUSPEND) static void synaptics_rmi4_early_suspend(struct early_suspend *h); static void synaptics_rmi4_late_resume(struct early_suspend *h); #endif static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); static ssize_t synaptics_rmi4_flipx_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_flipx_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); static ssize_t synaptics_rmi4_flipy_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_rmi4_flipy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); static int synaptics_rmi4_capacitance_button_map( struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler); static irqreturn_t synaptics_rmi4_irq(int irq, void *data); #if defined(CONFIG_SECURE_TOUCH) static ssize_t synaptics_secure_touch_enable_show(struct device *dev, struct device_attribute *attr, char *buf); static ssize_t synaptics_secure_touch_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); static ssize_t synaptics_secure_touch_show(struct device *dev, struct device_attribute *attr, char *buf); #endif struct synaptics_rmi4_f01_device_status { union { struct { unsigned char status_code:4; unsigned char reserved:2; unsigned char flash_prog:1; unsigned char unconfigured:1; } __packed; unsigned char data[1]; }; }; struct synaptics_rmi4_f01_device_control_0 { union { struct { unsigned char sleep_mode:2; unsigned char nosleep:1; unsigned char reserved:2; unsigned char charger_input:1; unsigned char report_rate:1; unsigned char configured:1; } __packed; unsigned char data[1]; }; }; struct synaptics_rmi4_f12_query_5 { union { struct { unsigned char size_of_query6; struct { unsigned char ctrl0_is_present:1; unsigned char ctrl1_is_present:1; unsigned char ctrl2_is_present:1; unsigned char ctrl3_is_present:1; unsigned char ctrl4_is_present:1; unsigned char ctrl5_is_present:1; unsigned char ctrl6_is_present:1; unsigned char ctrl7_is_present:1; } __packed; struct { unsigned char ctrl8_is_present:1; unsigned char ctrl9_is_present:1; unsigned char ctrl10_is_present:1; unsigned char ctrl11_is_present:1; unsigned char ctrl12_is_present:1; unsigned char ctrl13_is_present:1; unsigned char ctrl14_is_present:1; unsigned char ctrl15_is_present:1; } __packed; struct { unsigned char ctrl16_is_present:1; unsigned char ctrl17_is_present:1; unsigned char ctrl18_is_present:1; unsigned char ctrl19_is_present:1; unsigned char ctrl20_is_present:1; unsigned char ctrl21_is_present:1; unsigned char ctrl22_is_present:1; unsigned char ctrl23_is_present:1; } __packed; struct { unsigned char ctrl24_is_present:1; unsigned char ctrl25_is_present:1; unsigned char ctrl26_is_present:1; unsigned char ctrl27_is_present:1; unsigned char ctrl28_is_present:1; unsigned char ctrl29_is_present:1; unsigned char ctrl30_is_present:1; unsigned char ctrl31_is_present:1; } __packed; }; unsigned char data[5]; }; }; struct synaptics_rmi4_f12_query_8 { union { struct { unsigned char size_of_query9; struct { unsigned char data0_is_present:1; unsigned char data1_is_present:1; unsigned char data2_is_present:1; unsigned char data3_is_present:1; unsigned char data4_is_present:1; unsigned char data5_is_present:1; unsigned char data6_is_present:1; unsigned char data7_is_present:1; } __packed; struct { unsigned char data8_is_present:1; unsigned char data9_is_present:1; unsigned char data10_is_present:1; unsigned char data11_is_present:1; unsigned char data12_is_present:1; unsigned char data13_is_present:1; unsigned char data14_is_present:1; unsigned char data15_is_present:1; } __packed; }; unsigned char data[3]; }; }; struct synaptics_rmi4_f12_ctrl_8 { union { struct { unsigned char max_x_coord_lsb; unsigned char max_x_coord_msb; unsigned char max_y_coord_lsb; unsigned char max_y_coord_msb; unsigned char rx_pitch_lsb; unsigned char rx_pitch_msb; unsigned char tx_pitch_lsb; unsigned char tx_pitch_msb; unsigned char low_rx_clip; unsigned char high_rx_clip; unsigned char low_tx_clip; unsigned char high_tx_clip; unsigned char num_of_rx; unsigned char num_of_tx; }; unsigned char data[14]; }; }; struct synaptics_rmi4_f12_ctrl_23 { union { struct { unsigned char obj_type_enable; unsigned char max_reported_objects; }; unsigned char data[2]; }; }; struct synaptics_rmi4_f12_finger_data { unsigned char object_type_and_status; unsigned char x_lsb; unsigned char x_msb; unsigned char y_lsb; unsigned char y_msb; #ifdef REPORT_2D_Z unsigned char z; #endif #ifdef REPORT_2D_W unsigned char wx; unsigned char wy; #endif }; struct synaptics_rmi4_f1a_query { union { struct { unsigned char max_button_count:3; unsigned char reserved:5; unsigned char has_general_control:1; unsigned char has_interrupt_enable:1; unsigned char has_multibutton_select:1; unsigned char has_tx_rx_map:1; unsigned char has_perbutton_threshold:1; unsigned char has_release_threshold:1; unsigned char has_strongestbtn_hysteresis:1; unsigned char has_filter_strength:1; } __packed; unsigned char data[2]; }; }; struct synaptics_rmi4_f1a_control_0 { union { struct { unsigned char multibutton_report:2; unsigned char filter_mode:2; unsigned char reserved:4; } __packed; unsigned char data[1]; }; }; struct synaptics_rmi4_f1a_control_3_4 { unsigned char transmitterbutton; unsigned char receiverbutton; }; struct synaptics_rmi4_f1a_control { struct synaptics_rmi4_f1a_control_0 general_control; unsigned char *button_int_enable; unsigned char *multi_button; struct synaptics_rmi4_f1a_control_3_4 *electrode_map; unsigned char *button_threshold; unsigned char button_release_threshold; unsigned char strongest_button_hysteresis; unsigned char filter_strength; }; struct synaptics_rmi4_f1a_handle { int button_bitmask_size; unsigned char button_count; unsigned char valid_button_count; unsigned char *button_data_buffer; unsigned char *button_map; struct synaptics_rmi4_f1a_query button_query; struct synaptics_rmi4_f1a_control button_control; }; struct synaptics_rmi4_f12_extra_data { unsigned char data1_offset; unsigned char data15_offset; unsigned char data15_size; unsigned char data15_data[(F12_FINGERS_TO_SUPPORT + 7) / 8]; }; struct synaptics_rmi4_exp_fn { enum exp_fn fn_type; bool inserted; int (*func_init)(struct synaptics_rmi4_data *rmi4_data); void (*func_remove)(struct synaptics_rmi4_data *rmi4_data); void (*func_attn)(struct synaptics_rmi4_data *rmi4_data, unsigned char intr_mask); struct list_head link; }; static struct device_attribute attrs[] = { __ATTR(full_pm_cycle, (S_IRUGO | S_IWUSR | S_IWGRP), synaptics_rmi4_full_pm_cycle_show, synaptics_rmi4_full_pm_cycle_store), __ATTR(reset, S_IWUSR | S_IWGRP, NULL, synaptics_rmi4_f01_reset_store), __ATTR(productinfo, S_IRUGO, synaptics_rmi4_f01_productinfo_show, synaptics_rmi4_store_error), __ATTR(buildid, S_IRUGO, synaptics_rmi4_f01_buildid_show, synaptics_rmi4_store_error), __ATTR(flashprog, S_IRUGO, synaptics_rmi4_f01_flashprog_show, synaptics_rmi4_store_error), __ATTR(0dbutton, (S_IRUGO | S_IWUSR | S_IWGRP), synaptics_rmi4_0dbutton_show, synaptics_rmi4_0dbutton_store), __ATTR(flipx, (S_IRUGO | S_IWUSR | S_IWGRP), synaptics_rmi4_flipx_show, synaptics_rmi4_flipx_store), __ATTR(flipy, (S_IRUGO | S_IWUSR | S_IWGRP), synaptics_rmi4_flipy_show, synaptics_rmi4_flipy_store), #if defined(CONFIG_SECURE_TOUCH) __ATTR(secure_touch_enable, (S_IRUGO | S_IWUSR | S_IWGRP), synaptics_secure_touch_enable_show, synaptics_secure_touch_enable_store), __ATTR(secure_touch, S_IRUGO , synaptics_secure_touch_show, NULL), #endif }; static bool exp_fn_inited; static struct mutex exp_fn_list_mutex; static struct list_head exp_fn_list; #if defined(CONFIG_SECURE_TOUCH) static int synaptics_secure_touch_clk_prepare_enable( struct synaptics_rmi4_data *rmi4_data) { int ret; ret = clk_prepare_enable(rmi4_data->iface_clk); if (ret) { dev_err(&rmi4_data->i2c_client->dev, "error on clk_prepare_enable(iface_clk):%d\n", ret); return ret; } ret = clk_prepare_enable(rmi4_data->core_clk); if (ret) { clk_disable_unprepare(rmi4_data->iface_clk); dev_err(&rmi4_data->i2c_client->dev, "error clk_prepare_enable(core_clk):%d\n", ret); } return ret; } static void synaptics_secure_touch_clk_disable_unprepare( struct synaptics_rmi4_data *rmi4_data) { clk_disable_unprepare(rmi4_data->core_clk); clk_disable_unprepare(rmi4_data->iface_clk); } static void synaptics_secure_touch_init(struct synaptics_rmi4_data *data) { int ret = 0; data->st_initialized = 0; init_completion(&data->st_powerdown); init_completion(&data->st_irq_processed); /* Get clocks */ data->core_clk = clk_get(&data->i2c_client->dev, "core_clk"); if (IS_ERR(data->core_clk)) { ret = PTR_ERR(data->core_clk); dev_err(&data->i2c_client->dev, "%s: error on clk_get(core_clk):%d\n", __func__, ret); return; } data->iface_clk = clk_get(&data->i2c_client->dev, "iface_clk"); if (IS_ERR(data->iface_clk)) { ret = PTR_ERR(data->iface_clk); dev_err(&data->i2c_client->dev, "%s: error on clk_get(iface_clk)\n", __func__); goto err_iface_clk; } data->st_initialized = 1; return; err_iface_clk: clk_put(data->core_clk); data->core_clk = NULL; } static void synaptics_secure_touch_notify(struct synaptics_rmi4_data *data) { sysfs_notify(&data->i2c_client->dev.kobj, NULL, "secure_touch"); } static irqreturn_t synaptics_filter_interrupt(struct synaptics_rmi4_data *data) { if (atomic_read(&data->st_enabled)) { if (atomic_cmpxchg(&data->st_pending_irqs, 0, 1) == 0) { synaptics_secure_touch_notify(data); wait_for_completion_interruptible( &data->st_irq_processed); } return IRQ_HANDLED; } return IRQ_NONE; } static void synaptics_secure_touch_stop( struct synaptics_rmi4_data *data, int blocking) { if (atomic_read(&data->st_enabled)) { atomic_set(&data->st_pending_irqs, -1); synaptics_secure_touch_notify(data); if (blocking) wait_for_completion_interruptible(&data->st_powerdown); } } #else static void synaptics_secure_touch_init(struct synaptics_rmi4_data *data) { } static irqreturn_t synaptics_filter_interrupt(struct synaptics_rmi4_data *data) { return IRQ_NONE; } static void synaptics_secure_touch_stop( struct synaptics_rmi4_data *data, int blocking) { } #endif #if defined(CONFIG_SECURE_TOUCH) static ssize_t synaptics_secure_touch_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_rmi4_data *data = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%d", atomic_read(&data->st_enabled)); } /* * Accept only "0" and "1" valid values. * "0" will reset the st_enabled flag, then wake up the reading process and * the interrupt handler. * The bus driver is notified via pm_runtime that it is not required to stay * awake anymore. * It will also make sure the queue of events is emptied in the controller, * in case a touch happened in between the secure touch being disabled and * the local ISR being ungated. * "1" will set the st_enabled flag and clear the st_pending_irqs flag. * The bus driver is requested via pm_runtime to stay awake. */ static ssize_t synaptics_secure_touch_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct synaptics_rmi4_data *data = dev_get_drvdata(dev); struct device *adapter = data->i2c_client->adapter->dev.parent; unsigned long value; int err = 0; if (count > 2) return -EINVAL; err = kstrtoul(buf, 10, &value); if (err != 0) return err; if (!data->st_initialized) return -EIO; err = count; switch (value) { case 0: if (atomic_read(&data->st_enabled) == 0) break; synaptics_secure_touch_clk_disable_unprepare(data); pm_runtime_put_sync(adapter); atomic_set(&data->st_enabled, 0); synaptics_secure_touch_notify(data); complete(&data->st_irq_processed); synaptics_rmi4_irq(data->irq, data); complete(&data->st_powerdown); break; case 1: if (atomic_read(&data->st_enabled)) { err = -EBUSY; break; } synchronize_irq(data->irq); if (pm_runtime_get_sync(adapter) < 0) { dev_err(&data->i2c_client->dev, "pm_runtime_get_sync failed\n"); err = -EIO; break; } if (synaptics_secure_touch_clk_prepare_enable(data) < 0) { pm_runtime_put_sync(adapter); err = -EIO; break; } reinit_completion(&data->st_powerdown); reinit_completion(&data->st_irq_processed); atomic_set(&data->st_enabled, 1); atomic_set(&data->st_pending_irqs, 0); break; default: dev_err(&data->i2c_client->dev, "unsupported value: %lu\n", value); err = -EINVAL; break; } return err; } /* * This function returns whether there are pending interrupts, or * other error conditions that need to be signaled to the userspace library, * according tot he following logic: * - st_enabled is 0 if secure touch is not enabled, returning -EBADF * - st_pending_irqs is -1 to signal that secure touch is in being stopped, * returning -EINVAL * - st_pending_irqs is 1 to signal that there is a pending irq, returning * the value "1" to the sysfs read operation * - st_pending_irqs is 0 (only remaining case left) if the pending interrupt * has been processed, so the interrupt handler can be allowed to continue. */ static ssize_t synaptics_secure_touch_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_rmi4_data *data = dev_get_drvdata(dev); int val = 0; if (atomic_read(&data->st_enabled) == 0) return -EBADF; if (atomic_cmpxchg(&data->st_pending_irqs, -1, 0) == -1) return -EINVAL; if (atomic_cmpxchg(&data->st_pending_irqs, 1, 0) == 1) val = 1; else complete(&data->st_irq_processed); return scnprintf(buf, PAGE_SIZE, "%u", val); } #endif static int synaptics_rmi4_debug_suspend_set(void *_data, u64 val) { struct synaptics_rmi4_data *rmi4_data = _data; if (val) synaptics_rmi4_suspend(&rmi4_data->input_dev->dev); else synaptics_rmi4_resume(&rmi4_data->input_dev->dev); return 0; } static int synaptics_rmi4_debug_suspend_get(void *_data, u64 *val) { struct synaptics_rmi4_data *rmi4_data = _data; *val = rmi4_data->suspended; return 0; } DEFINE_SIMPLE_ATTRIBUTE(debug_suspend_fops, synaptics_rmi4_debug_suspend_get, synaptics_rmi4_debug_suspend_set, "%lld\n"); static ssize_t synaptics_rmi4_full_pm_cycle_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%u\n", rmi4_data->full_pm_cycle); } static ssize_t synaptics_rmi4_full_pm_cycle_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int input, retval; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); retval = kstrtouint(buf, 10, &input); if (retval) return retval; rmi4_data->full_pm_cycle = input > 0 ? 1 : 0; return count; } #ifdef CONFIG_FB static void configure_sleep(struct synaptics_rmi4_data *rmi4_data) { int retval = 0; rmi4_data->fb_notif.notifier_call = fb_notifier_callback; retval = fb_register_client(&rmi4_data->fb_notif); if (retval) dev_err(&rmi4_data->i2c_client->dev, "Unable to register fb_notifier: %d\n", retval); } #elif defined CONFIG_HAS_EARLYSUSPEND static void configure_sleep(struct synaptics_rmi4_data *rmi4_data) { rmi4_data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1; rmi4_data->early_suspend.suspend = synaptics_rmi4_early_suspend; rmi4_data->early_suspend.resume = synaptics_rmi4_late_resume; register_early_suspend(&rmi4_data->early_suspend); } #else static void configure_sleep(struct synaptics_rmi4_data *rmi4_data) { } #endif static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int retval; unsigned int reset; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); retval = kstrtouint(buf, 10, &reset); if (retval) return retval; if (reset != 1) return -EINVAL; retval = synaptics_rmi4_reset_device(rmi4_data); if (retval < 0) { dev_err(dev, "%s: Failed to issue reset command, error = %d\n", __func__, retval); return retval; } return count; } static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "0x%02x 0x%02x\n", (rmi4_data->rmi4_mod_info.product_info[0]), (rmi4_data->rmi4_mod_info.product_info[1])); } static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int build_id; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); build_id = (unsigned int)rmi->build_id[0] + (unsigned int)rmi->build_id[1] * 0x100 + (unsigned int)rmi->build_id[2] * 0x10000; return snprintf(buf, PAGE_SIZE, "%u\n", build_id); } static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev, struct device_attribute *attr, char *buf) { int retval; struct synaptics_rmi4_f01_device_status device_status; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_data_base_addr, device_status.data, sizeof(device_status.data)); if (retval < 0) { dev_err(dev, "%s: Failed to read device status, error = %d\n", __func__, retval); return retval; } return snprintf(buf, PAGE_SIZE, "%u\n", device_status.flash_prog); } static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%u\n", rmi4_data->button_0d_enabled); } static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int retval; unsigned int input; unsigned char ii; unsigned char intr_enable; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); retval = kstrtouint(buf, 10, &input); if (retval) return retval; input = input > 0 ? 1 : 0; if (rmi4_data->button_0d_enabled == input) return count; mutex_lock(&rmi->support_fn_list_mutex); if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) { ii = fhandler->intr_reg_num; retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_ctrl_base_addr + 1 + ii, &intr_enable, sizeof(intr_enable)); if (retval < 0) goto exit; if (input == 1) intr_enable |= fhandler->intr_mask; else intr_enable &= ~fhandler->intr_mask; retval = synaptics_rmi4_i2c_write(rmi4_data, rmi4_data->f01_ctrl_base_addr + 1 + ii, &intr_enable, sizeof(intr_enable)); if (retval < 0) goto exit; } } } mutex_unlock(&rmi->support_fn_list_mutex); rmi4_data->button_0d_enabled = input; return count; exit: mutex_unlock(&rmi->support_fn_list_mutex); return retval; } static ssize_t synaptics_rmi4_flipx_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%u\n", rmi4_data->flip_x); } static ssize_t synaptics_rmi4_flipx_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int input; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); if (sscanf(buf, "%u", &input) != 1) return -EINVAL; rmi4_data->flip_x = input > 0 ? 1 : 0; return count; } static ssize_t synaptics_rmi4_flipy_show(struct device *dev, struct device_attribute *attr, char *buf) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%u\n", rmi4_data->flip_y); } static ssize_t synaptics_rmi4_flipy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int input; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); if (sscanf(buf, "%u", &input) != 1) return -EINVAL; rmi4_data->flip_y = input > 0 ? 1 : 0; return count; } /** * synaptics_rmi4_set_page() * * Called by synaptics_rmi4_i2c_read() and synaptics_rmi4_i2c_write(). * * This function writes to the page select register to switch to the * assigned page. */ static int synaptics_rmi4_set_page(struct synaptics_rmi4_data *rmi4_data, unsigned int address) { int retval = 0; unsigned char retry; unsigned char buf[PAGE_SELECT_LEN]; unsigned char page; struct i2c_client *i2c = rmi4_data->i2c_client; page = ((address >> 8) & MASK_8BIT); if (page != rmi4_data->current_page) { buf[0] = MASK_8BIT; buf[1] = page; for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) { retval = i2c_master_send(i2c, buf, PAGE_SELECT_LEN); if (retval != PAGE_SELECT_LEN) { dev_err(&i2c->dev, "%s: I2C retry %d\n", __func__, retry + 1); msleep(20); } else { rmi4_data->current_page = page; break; } } } else return PAGE_SELECT_LEN; return (retval == PAGE_SELECT_LEN) ? retval : -EIO; } /** * synaptics_rmi4_i2c_read() * * Called by various functions in this driver, and also exported to * other expansion Function modules such as rmi_dev. * * This function reads data of an arbitrary length from the sensor, * starting from an assigned register address of the sensor, via I2C * with a retry mechanism. */ static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data, unsigned short addr, unsigned char *data, unsigned short length) { int retval; unsigned char retry; unsigned char buf; struct i2c_msg msg[] = { { .addr = rmi4_data->i2c_client->addr, .flags = 0, .len = 1, .buf = &buf, }, { .addr = rmi4_data->i2c_client->addr, .flags = I2C_M_RD, .len = length, .buf = data, }, }; buf = addr & MASK_8BIT; mutex_lock(&(rmi4_data->rmi4_io_ctrl_mutex)); retval = synaptics_rmi4_set_page(rmi4_data, addr); if (retval != PAGE_SELECT_LEN) goto exit; for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) { if (i2c_transfer(rmi4_data->i2c_client->adapter, msg, 2) == 2) { retval = length; break; } dev_err(&rmi4_data->i2c_client->dev, "%s: I2C retry %d\n", __func__, retry + 1); msleep(20); } if (retry == SYN_I2C_RETRY_TIMES) { dev_err(&rmi4_data->i2c_client->dev, "%s: I2C read over retry limit\n", __func__); retval = -EIO; } exit: mutex_unlock(&(rmi4_data->rmi4_io_ctrl_mutex)); return retval; } /** * synaptics_rmi4_i2c_write() * * Called by various functions in this driver, and also exported to * other expansion Function modules such as rmi_dev. * * This function writes data of an arbitrary length to the sensor, * starting from an assigned register address of the sensor, via I2C with * a retry mechanism. */ static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data, unsigned short addr, unsigned char *data, unsigned short length) { int retval; unsigned char retry; unsigned char buf[length + 1]; struct i2c_msg msg[] = { { .addr = rmi4_data->i2c_client->addr, .flags = 0, .len = length + 1, .buf = buf, } }; mutex_lock(&(rmi4_data->rmi4_io_ctrl_mutex)); retval = synaptics_rmi4_set_page(rmi4_data, addr); if (retval != PAGE_SELECT_LEN) goto exit; buf[0] = addr & MASK_8BIT; memcpy(&buf[1], &data[0], length); for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) { if (i2c_transfer(rmi4_data->i2c_client->adapter, msg, 1) == 1) { retval = length; break; } dev_err(&rmi4_data->i2c_client->dev, "%s: I2C retry %d\n", __func__, retry + 1); msleep(20); } if (retry == SYN_I2C_RETRY_TIMES) { dev_err(&rmi4_data->i2c_client->dev, "%s: I2C write over retry limit\n", __func__); retval = -EIO; } exit: mutex_unlock(&(rmi4_data->rmi4_io_ctrl_mutex)); return retval; } /** * synaptics_rmi4_release_all() * * Called by synaptics_rmi4_suspend() * * Release all touch data during the touch device switch to suspend state. */ static void synaptics_rmi4_release_all(struct synaptics_rmi4_data *rmi4_data) { int finger; int max_num_fingers = rmi4_data->num_of_fingers; for (finger = 0; finger < max_num_fingers; finger++) { input_mt_slot(rmi4_data->input_dev, finger); input_mt_report_slot_state(rmi4_data->input_dev, MT_TOOL_FINGER, 0); } input_report_key(rmi4_data->input_dev, BTN_TOUCH, 0); input_report_key(rmi4_data->input_dev, BTN_TOOL_FINGER, 0); input_sync(rmi4_data->input_dev); } /** * synaptics_rmi4_f11_abs_report() * * Called by synaptics_rmi4_report_touch() when valid Function $11 * finger data has been detected. * * This function reads the Function $11 data registers, determines the * status of each finger supported by the Function, processes any * necessary coordinate manipulation, reports the finger data to * the input subsystem, and returns the number of fingers detected. */ static int synaptics_rmi4_f11_abs_report(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler) { int retval; unsigned char touch_count = 0; /* number of touch points */ unsigned char reg_index; unsigned char finger; unsigned char fingers_supported; unsigned char num_of_finger_status_regs; unsigned char finger_shift; unsigned char finger_status; unsigned char data_reg_blk_size; unsigned char finger_status_reg[3]; unsigned char data[F11_STD_DATA_LEN]; unsigned short data_addr; unsigned short data_offset; int x; int y; int wx; int wy; int z; /* * The number of finger status registers is determined by the * maximum number of fingers supported - 2 bits per finger. So * the number of finger status registers to read is: * register_count = ceil(max_num_of_fingers / 4) */ fingers_supported = fhandler->num_of_data_points; num_of_finger_status_regs = (fingers_supported + 3) / 4; data_addr = fhandler->full_addr.data_base; data_reg_blk_size = fhandler->size_of_data_register_block; retval = synaptics_rmi4_i2c_read(rmi4_data, data_addr, finger_status_reg, num_of_finger_status_regs); if (retval < 0) return 0; for (finger = 0; finger < fingers_supported; finger++) { reg_index = finger / 4; finger_shift = (finger % 4) * 2; finger_status = (finger_status_reg[reg_index] >> finger_shift) & MASK_2BIT; /* * Each 2-bit finger status field represents the following: * 00 = finger not present * 01 = finger present and data accurate * 10 = finger present but data may be inaccurate * 11 = reserved */ #ifdef TYPE_B_PROTOCOL input_mt_slot(rmi4_data->input_dev, finger); input_mt_report_slot_state(rmi4_data->input_dev, MT_TOOL_FINGER, finger_status != 0); #endif if (finger_status) { data_offset = data_addr + num_of_finger_status_regs + (finger * data_reg_blk_size); retval = synaptics_rmi4_i2c_read(rmi4_data, data_offset, data, data_reg_blk_size); if (retval < 0) return 0; x = (data[0] << 4) | (data[2] & MASK_4BIT); y = (data[1] << 4) | ((data[2] >> 4) & MASK_4BIT); wx = (data[3] & MASK_4BIT); wy = (data[3] >> 4) & MASK_4BIT; z = data[4]; if (rmi4_data->flip_x) x = rmi4_data->sensor_max_x - x; if (rmi4_data->flip_y) y = rmi4_data->sensor_max_y - y; dev_dbg(&rmi4_data->i2c_client->dev, "%s: Finger %d:\n" "status = 0x%02x\n" "x = %d\n" "y = %d\n" "wx = %d\n" "wy = %d\n", __func__, finger, finger_status, x, y, wx, wy); input_report_abs(rmi4_data->input_dev, ABS_MT_POSITION_X, x); input_report_abs(rmi4_data->input_dev, ABS_MT_POSITION_Y, y); input_report_abs(rmi4_data->input_dev, ABS_MT_PRESSURE, z); #ifdef REPORT_2D_W input_report_abs(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, max(wx, wy)); input_report_abs(rmi4_data->input_dev, ABS_MT_TOUCH_MINOR, min(wx, wy)); #endif #ifndef TYPE_B_PROTOCOL input_mt_sync(rmi4_data->input_dev); #endif touch_count++; } } input_report_key(rmi4_data->input_dev, BTN_TOUCH, touch_count > 0); input_report_key(rmi4_data->input_dev, BTN_TOOL_FINGER, touch_count > 0); #ifndef TYPE_B_PROTOCOL if (!touch_count) input_mt_sync(rmi4_data->input_dev); #else input_mt_report_pointer_emulation(rmi4_data->input_dev, false); #endif input_sync(rmi4_data->input_dev); return touch_count; } /** * synaptics_rmi4_f12_abs_report() * * Called by synaptics_rmi4_report_touch() when valid Function $12 * finger data has been detected. * * This function reads the Function $12 data registers, determines the * status of each finger supported by the Function, processes any * necessary coordinate manipulation, reports the finger data to * the input subsystem, and returns the number of fingers detected. */ static int synaptics_rmi4_f12_abs_report(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler) { int retval; unsigned char touch_count = 0; /* number of touch points */ unsigned char finger; unsigned char fingers_to_process; unsigned char finger_status; unsigned char size_of_2d_data; unsigned short data_addr; int x; int y; int wx; int wy; struct synaptics_rmi4_f12_extra_data *extra_data; struct synaptics_rmi4_f12_finger_data *data; struct synaptics_rmi4_f12_finger_data *finger_data; fingers_to_process = fhandler->num_of_data_points; data_addr = fhandler->full_addr.data_base; extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra; size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data); retval = synaptics_rmi4_i2c_read(rmi4_data, data_addr + extra_data->data1_offset, (unsigned char *)fhandler->data, fingers_to_process * size_of_2d_data); if (retval < 0) return 0; data = (struct synaptics_rmi4_f12_finger_data *)fhandler->data; for (finger = 0; finger < fingers_to_process; finger++) { finger_data = data + finger; finger_status = finger_data->object_type_and_status & MASK_2BIT; /* * Each 2-bit finger status field represents the following: * 00 = finger not present * 01 = finger present and data accurate * 10 = finger present but data may be inaccurate * 11 = reserved */ #ifdef TYPE_B_PROTOCOL input_mt_slot(rmi4_data->input_dev, finger); input_mt_report_slot_state(rmi4_data->input_dev, MT_TOOL_FINGER, finger_status != 0); #endif if (finger_status) { x = (finger_data->x_msb << 8) | (finger_data->x_lsb); y = (finger_data->y_msb << 8) | (finger_data->y_lsb); #ifdef REPORT_2D_W wx = finger_data->wx; wy = finger_data->wy; #endif if (rmi4_data->flip_x) x = rmi4_data->sensor_max_x - x; if (rmi4_data->flip_y) y = rmi4_data->sensor_max_y - y; dev_dbg(&rmi4_data->i2c_client->dev, "%s: Finger %d:\n" "status = 0x%02x\n" "x = %d\n" "y = %d\n" "wx = %d\n" "wy = %d\n", __func__, finger, finger_status, x, y, wx, wy); input_report_key(rmi4_data->input_dev, BTN_TOUCH, 1); input_report_key(rmi4_data->input_dev, BTN_TOOL_FINGER, 1); input_report_abs(rmi4_data->input_dev, ABS_MT_POSITION_X, x); input_report_abs(rmi4_data->input_dev, ABS_MT_POSITION_Y, y); #ifdef REPORT_2D_W input_report_abs(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, max(wx, wy)); input_report_abs(rmi4_data->input_dev, ABS_MT_TOUCH_MINOR, min(wx, wy)); #endif #ifndef TYPE_B_PROTOCOL input_mt_sync(rmi4_data->input_dev); #endif touch_count++; } } input_report_key(rmi4_data->input_dev, BTN_TOUCH, touch_count > 0); input_report_key(rmi4_data->input_dev, BTN_TOOL_FINGER, touch_count > 0); #ifndef TYPE_B_PROTOCOL if (!touch_count) input_mt_sync(rmi4_data->input_dev); #endif input_mt_report_pointer_emulation(rmi4_data->input_dev, false); input_sync(rmi4_data->input_dev); return touch_count; } static void synaptics_rmi4_f1a_report(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler) { int retval; unsigned char button; unsigned char index; unsigned char shift; unsigned char status; unsigned char *data; unsigned short data_addr = fhandler->full_addr.data_base; struct synaptics_rmi4_f1a_handle *f1a = fhandler->data; static unsigned char do_once = 1; static bool current_status[MAX_NUMBER_OF_BUTTONS]; #ifdef NO_0D_WHILE_2D static bool before_2d_status[MAX_NUMBER_OF_BUTTONS]; static bool while_2d_status[MAX_NUMBER_OF_BUTTONS]; #endif if (do_once) { memset(current_status, 0, sizeof(current_status)); #ifdef NO_0D_WHILE_2D memset(before_2d_status, 0, sizeof(before_2d_status)); memset(while_2d_status, 0, sizeof(while_2d_status)); #endif do_once = 0; } retval = synaptics_rmi4_i2c_read(rmi4_data, data_addr, f1a->button_data_buffer, f1a->button_bitmask_size); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to read button data registers\n", __func__); return; } data = f1a->button_data_buffer; for (button = 0; button < f1a->valid_button_count; button++) { index = button / 8; shift = button % 8; status = ((data[index] >> shift) & MASK_1BIT); if (current_status[button] == status) continue; else current_status[button] = status; dev_dbg(&rmi4_data->i2c_client->dev, "%s: Button %d (code %d) ->%d\n", __func__, button, f1a->button_map[button], status); #ifdef NO_0D_WHILE_2D if (rmi4_data->fingers_on_2d == false) { if (status == 1) { before_2d_status[button] = 1; } else { if (while_2d_status[button] == 1) { while_2d_status[button] = 0; continue; } else { before_2d_status[button] = 0; } } input_report_key(rmi4_data->input_dev, f1a->button_map[button], status); } else { if (before_2d_status[button] == 1) { before_2d_status[button] = 0; input_report_key(rmi4_data->input_dev, f1a->button_map[button], status); } else { if (status == 1) while_2d_status[button] = 1; else while_2d_status[button] = 0; } } #else input_report_key(rmi4_data->input_dev, f1a->button_map[button], status); #endif } input_sync(rmi4_data->input_dev); } /** * synaptics_rmi4_report_touch() * * Called by synaptics_rmi4_sensor_report(). * * This function calls the appropriate finger data reporting function * based on the function handler it receives and returns the number of * fingers detected. */ static void synaptics_rmi4_report_touch(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler, unsigned char *touch_count) { unsigned char touch_count_2d; dev_dbg(&rmi4_data->i2c_client->dev, "%s: Function %02x reporting\n", __func__, fhandler->fn_number); switch (fhandler->fn_number) { case SYNAPTICS_RMI4_F11: touch_count_2d = synaptics_rmi4_f11_abs_report(rmi4_data, fhandler); *touch_count += touch_count_2d; if (touch_count_2d) rmi4_data->fingers_on_2d = true; else rmi4_data->fingers_on_2d = false; break; case SYNAPTICS_RMI4_F12: touch_count_2d = synaptics_rmi4_f12_abs_report(rmi4_data, fhandler); if (touch_count_2d) rmi4_data->fingers_on_2d = true; else rmi4_data->fingers_on_2d = false; break; case SYNAPTICS_RMI4_F1A: synaptics_rmi4_f1a_report(rmi4_data, fhandler); break; default: break; } } /** * synaptics_rmi4_sensor_report() * * Called by synaptics_rmi4_irq(). * * This function determines the interrupt source(s) from the sensor * and calls synaptics_rmi4_report_touch() with the appropriate * function handler for each function with valid data inputs. */ static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data) { int retval; unsigned char touch_count = 0; unsigned char intr[MAX_INTR_REGISTERS]; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_exp_fn *exp_fhandler; struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); /* * Get interrupt status information from F01 Data1 register to * determine the source(s) that are flagging the interrupt. */ retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_data_base_addr + 1, intr, rmi4_data->num_of_intr_regs); if (retval < 0) return retval; /* * Traverse the function handler list and service the source(s) * of the interrupt accordingly. */ mutex_lock(&rmi->support_fn_list_mutex); if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) { if (fhandler->num_of_data_sources) { if (fhandler->intr_mask & intr[fhandler->intr_reg_num]) { synaptics_rmi4_report_touch(rmi4_data, fhandler, &touch_count); } } } } mutex_unlock(&rmi->support_fn_list_mutex); mutex_lock(&exp_fn_list_mutex); if (!list_empty(&exp_fn_list)) { list_for_each_entry(exp_fhandler, &exp_fn_list, link) { if (exp_fhandler->inserted && (exp_fhandler->func_attn != NULL)) exp_fhandler->func_attn(rmi4_data, intr[0]); } } mutex_unlock(&exp_fn_list_mutex); return touch_count; } /** * synaptics_rmi4_irq() * * Called by the kernel when an interrupt occurs (when the sensor * asserts the attention irq). * * This function is the ISR thread and handles the acquisition * and the reporting of finger data when the presence of fingers * is detected. */ static irqreturn_t synaptics_rmi4_irq(int irq, void *data) { struct synaptics_rmi4_data *rmi4_data = data; if (IRQ_HANDLED == synaptics_filter_interrupt(data)) return IRQ_HANDLED; synaptics_rmi4_sensor_report(rmi4_data); return IRQ_HANDLED; } #ifdef CONFIG_OF static int synaptics_rmi4_get_button_map(struct device *dev, char *name, struct synaptics_rmi4_platform_data *rmi4_pdata, struct device_node *np) { struct property *prop; int rc, i; u32 temp_val, num_buttons; u32 button_map[MAX_NUMBER_OF_BUTTONS]; prop = of_find_property(np, "synaptics,button-map", NULL); if (prop) { num_buttons = prop->length / sizeof(temp_val); rmi4_pdata->capacitance_button_map = devm_kzalloc(dev, sizeof(*rmi4_pdata->capacitance_button_map), GFP_KERNEL); if (!rmi4_pdata->capacitance_button_map) return -ENOMEM; rmi4_pdata->capacitance_button_map->map = devm_kzalloc(dev, sizeof(*rmi4_pdata->capacitance_button_map->map) * MAX_NUMBER_OF_BUTTONS, GFP_KERNEL); if (!rmi4_pdata->capacitance_button_map->map) return -ENOMEM; if (num_buttons <= MAX_NUMBER_OF_BUTTONS) { rc = of_property_read_u32_array(np, "synaptics,button-map", button_map, num_buttons); if (rc) { dev_err(dev, "Unable to read key codes\n"); return rc; } for (i = 0; i < num_buttons; i++) rmi4_pdata->capacitance_button_map->map[i] = button_map[i]; rmi4_pdata->capacitance_button_map->nbuttons = num_buttons; } else { return -EINVAL; } } return 0; } static int synaptics_rmi4_get_dt_coords(struct device *dev, char *name, struct synaptics_rmi4_platform_data *pdata, struct device_node *node) { u32 coords[RMI4_COORDS_ARR_SIZE]; struct property *prop; struct device_node *np = (node == NULL) ? (dev->of_node) : (node); int coords_size, rc; prop = of_find_property(np, name, NULL); if (!prop) return -EINVAL; if (!prop->value) return -ENODATA; coords_size = prop->length / sizeof(u32); if (coords_size != RMI4_COORDS_ARR_SIZE) { dev_err(dev, "invalid %s\n", name); return -EINVAL; } rc = of_property_read_u32_array(np, name, coords, coords_size); if (rc && (rc != -EINVAL)) { dev_err(dev, "Unable to read %s\n", name); return rc; } if (strcmp(name, "synaptics,panel-coords") == 0) { pdata->panel_minx = coords[0]; pdata->panel_miny = coords[1]; pdata->panel_maxx = coords[2]; pdata->panel_maxy = coords[3]; if (pdata->panel_maxx == 0 || pdata->panel_minx > 0) rc = -EINVAL; else if (pdata->panel_maxy == 0 || pdata->panel_miny > 0) rc = -EINVAL; if (rc) { dev_err(dev, "Invalid panel resolution %d\n", rc); return rc; } } else if (strcmp(name, "synaptics,display-coords") == 0) { pdata->disp_minx = coords[0]; pdata->disp_miny = coords[1]; pdata->disp_maxx = coords[2]; pdata->disp_maxy = coords[3]; } else { dev_err(dev, "unsupported property %s\n", name); return -EINVAL; } return 0; } static int synaptics_rmi4_parse_dt_children(struct device *dev, struct synaptics_rmi4_platform_data *rmi4_pdata, struct synaptics_rmi4_data *rmi4_data) { struct synaptics_rmi4_device_info *rmi = &(rmi4_data->rmi4_mod_info); struct device_node *node = dev->of_node, *child; int rc = 0; struct synaptics_rmi4_fn *fhandler = NULL; for_each_child_of_node(node, child) { rc = of_property_read_u32(child, "synaptics,package-id", &rmi4_pdata->package_id); if (rc && (rc != -EINVAL)) { dev_err(dev, "Unable to read package_id\n"); return rc; } else if (rc == -EINVAL) { rmi4_pdata->package_id = 0x00; } if (rmi4_pdata->package_id) { if (rmi4_pdata->package_id != rmi->package_id) { dev_err(dev, "%s: Synaptics package id don't match %d %d\n", __func__, rmi4_pdata->package_id, rmi->package_id); continue; } else { /* * If package id read from DT matches the * package id value read from touch controller, * also check if sensor dimensions read from DT * match those read from controller, before * moving further. For this first check if touch * panel coordinates are defined in DT or not. */ if (of_find_property(child, "synaptics,panel-coords", NULL)) { synaptics_rmi4_get_dt_coords(dev, "synaptics,panel-coords", rmi4_pdata, child); dev_info(dev, "Pmax_x Pmax_y = %d:%d\n", rmi4_pdata->panel_maxx, rmi4_pdata->panel_maxy); dev_info(dev, "Smax_x Smax_y = %d:%d\n", rmi4_data->sensor_max_x, rmi4_data->sensor_max_y); if ((rmi4_pdata->panel_maxx != rmi4_data->sensor_max_x) || (rmi4_pdata->panel_maxy != rmi4_data->sensor_max_y)) continue; } } } rc = synaptics_rmi4_get_dt_coords(dev, "synaptics,display-coords", rmi4_pdata, child); if (rc && (rc != -EINVAL)) return rc; rc = synaptics_rmi4_get_button_map(dev, "synaptics,button-map", rmi4_pdata, child); if (rc < 0) { dev_err(dev, "Unable to read key codes\n"); return rc; } mutex_lock(&rmi->support_fn_list_mutex); if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) break; } } mutex_unlock(&rmi->support_fn_list_mutex); if (fhandler != NULL && fhandler->fn_number == SYNAPTICS_RMI4_F1A) { rc = synaptics_rmi4_capacitance_button_map(rmi4_data, fhandler); if (rc < 0) { dev_err(dev, "Fail to register F1A %d\n", rc); return rc; } } break; } return 0; } static int synaptics_rmi4_parse_dt(struct device *dev, struct synaptics_rmi4_platform_data *rmi4_pdata) { struct device_node *np = dev->of_node; struct property *prop; u32 temp_val, num_buttons; u32 button_map[MAX_NUMBER_OF_BUTTONS]; int rc, i; rmi4_pdata->i2c_pull_up = of_property_read_bool(np, "synaptics,i2c-pull-up"); rmi4_pdata->power_down_enable = of_property_read_bool(np, "synaptics,power-down"); rmi4_pdata->disable_gpios = of_property_read_bool(np, "synaptics,disable-gpios"); rmi4_pdata->modify_reso = of_property_read_bool(np, "synaptics,modify-reso"); rmi4_pdata->x_flip = of_property_read_bool(np, "synaptics,x-flip"); rmi4_pdata->y_flip = of_property_read_bool(np, "synaptics,y-flip"); rmi4_pdata->do_lockdown = of_property_read_bool(np, "synaptics,do-lockdown"); rc = synaptics_rmi4_get_dt_coords(dev, "synaptics,display-coords", rmi4_pdata, NULL); if (rc && (rc != -EINVAL)) return rc; rc = synaptics_rmi4_get_dt_coords(dev, "synaptics,panel-coords", rmi4_pdata, NULL); if (rc && (rc != -EINVAL)) return rc; rmi4_pdata->reset_delay = RESET_DELAY; rc = of_property_read_u32(np, "synaptics,reset-delay", &temp_val); if (!rc) rmi4_pdata->reset_delay = temp_val; else if (rc != -EINVAL) { dev_err(dev, "Unable to read reset delay\n"); return rc; } rc = of_property_read_string(np, "synaptics,fw-image-name", &rmi4_pdata->fw_image_name); if (rc && (rc != -EINVAL)) { dev_err(dev, "Unable to read fw image name\n"); return rc; } /* reset, irq gpio info */ rmi4_pdata->reset_gpio = of_get_named_gpio_flags(np, "synaptics,reset-gpio", 0, &rmi4_pdata->reset_flags); rmi4_pdata->irq_gpio = of_get_named_gpio_flags(np, "synaptics,irq-gpio", 0, &rmi4_pdata->irq_flags); rmi4_pdata->detect_device = of_property_read_bool(np, "synaptics,detect-device"); if (rmi4_pdata->detect_device) return 0; prop = of_find_property(np, "synaptics,button-map", NULL); if (prop) { num_buttons = prop->length / sizeof(temp_val); rmi4_pdata->capacitance_button_map = devm_kzalloc(dev, sizeof(*rmi4_pdata->capacitance_button_map), GFP_KERNEL); if (!rmi4_pdata->capacitance_button_map) return -ENOMEM; rmi4_pdata->capacitance_button_map->map = devm_kzalloc(dev, sizeof(*rmi4_pdata->capacitance_button_map->map) * MAX_NUMBER_OF_BUTTONS, GFP_KERNEL); if (!rmi4_pdata->capacitance_button_map->map) return -ENOMEM; if (num_buttons <= MAX_NUMBER_OF_BUTTONS) { rc = of_property_read_u32_array(np, "synaptics,button-map", button_map, num_buttons); if (rc) { dev_err(dev, "Unable to read key codes\n"); return rc; } for (i = 0; i < num_buttons; i++) rmi4_pdata->capacitance_button_map->map[i] = button_map[i]; rmi4_pdata->capacitance_button_map->nbuttons = num_buttons; } else { return -EINVAL; } } return 0; } #else static inline int synaptics_rmi4_parse_dt(struct device *dev, struct synaptics_rmi4_platform_data *rmi4_pdata) { return 0; } #endif /** * synaptics_rmi4_irq_enable() * * Called by synaptics_rmi4_probe() and the power management functions * in this driver and also exported to other expansion Function modules * such as rmi_dev. * * This function handles the enabling and disabling of the attention * irq including the setting up of the ISR thread. */ static int synaptics_rmi4_irq_enable(struct synaptics_rmi4_data *rmi4_data, bool enable) { int retval = 0; unsigned char *intr_status; if (enable) { if (rmi4_data->irq_enabled) return retval; intr_status = kzalloc(rmi4_data->num_of_intr_regs, GFP_KERNEL); if (!intr_status) return -ENOMEM; /* Clear interrupts first */ retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_data_base_addr + 1, intr_status, rmi4_data->num_of_intr_regs); kfree(intr_status); if (retval < 0) return retval; enable_irq(rmi4_data->irq); rmi4_data->irq_enabled = true; } else { if (rmi4_data->irq_enabled) { disable_irq(rmi4_data->irq); rmi4_data->irq_enabled = false; } } return retval; } /** * synaptics_rmi4_f11_init() * * Called by synaptics_rmi4_query_device(). * * This function parses information from the Function 11 registers * and determines the number of fingers supported, x and y data ranges, * offset to the associated interrupt status register, interrupt bit * mask, and gathers finger data acquisition capabilities from the query * registers. */ static int synaptics_rmi4_f11_init(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler, struct synaptics_rmi4_fn_desc *fd, unsigned int intr_count) { int retval; unsigned char ii; unsigned char intr_offset; unsigned char abs_data_size; unsigned char abs_data_blk_size; unsigned char query[F11_STD_QUERY_LEN]; unsigned char control[F11_STD_CTRL_LEN]; fhandler->fn_number = fd->fn_number; fhandler->num_of_data_sources = fd->intr_src_count; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.query_base, query, sizeof(query)); if (retval < 0) return retval; /* Maximum number of fingers supported */ if ((query[1] & MASK_3BIT) <= 4) fhandler->num_of_data_points = (query[1] & MASK_3BIT) + 1; else if ((query[1] & MASK_3BIT) == 5) fhandler->num_of_data_points = 10; rmi4_data->num_of_fingers = fhandler->num_of_data_points; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.ctrl_base, control, sizeof(control)); if (retval < 0) return retval; /* Maximum x */ rmi4_data->sensor_max_x = ((control[6] & MASK_8BIT) << 0) | ((control[7] & MASK_4BIT) << 8); if (rmi4_data->board->modify_reso) { if (rmi4_data->board->panel_maxx) { if (rmi4_data->board->panel_maxx >= F11_MAX_X) { dev_err(&rmi4_data->i2c_client->dev, "F11 max_x value out of bound."); return -EINVAL; } if (rmi4_data->sensor_max_x != rmi4_data->board->panel_maxx) { rmi4_data->sensor_max_x = rmi4_data->board->panel_maxx; control[6] = rmi4_data->board->panel_maxx & MASK_8BIT; control[7] = (rmi4_data->board->panel_maxx >> 8) & MASK_4BIT; retval = synaptics_rmi4_i2c_write(rmi4_data, fhandler->full_addr.ctrl_base, control, sizeof(control)); if (retval < 0) return retval; } } } /* Maximum y */ rmi4_data->sensor_max_y = ((control[8] & MASK_8BIT) << 0) | ((control[9] & MASK_4BIT) << 8); if (rmi4_data->board->modify_reso) { if (rmi4_data->board->panel_maxy) { if (rmi4_data->board->panel_maxy >= F11_MAX_Y) { dev_err(&rmi4_data->i2c_client->dev, "F11 max_y value out of bound."); return -EINVAL; } if (rmi4_data->sensor_max_y != rmi4_data->board->panel_maxy) { rmi4_data->sensor_max_y = rmi4_data->board->panel_maxy; control[8] = rmi4_data->board->panel_maxy & MASK_8BIT; control[9] = (rmi4_data->board->panel_maxy >> 8) & MASK_4BIT; retval = synaptics_rmi4_i2c_write(rmi4_data, fhandler->full_addr.ctrl_base, control, sizeof(control)); if (retval < 0) return retval; } } } dev_dbg(&rmi4_data->i2c_client->dev, "%s: Function %02x max x = %d max y = %d\n", __func__, fhandler->fn_number, rmi4_data->sensor_max_x, rmi4_data->sensor_max_y); rmi4_data->max_touch_width = MAX_F11_TOUCH_WIDTH; fhandler->intr_reg_num = (intr_count + 7) / 8; if (fhandler->intr_reg_num != 0) fhandler->intr_reg_num -= 1; /* Set an enable bit for each data source */ intr_offset = intr_count % 8; fhandler->intr_mask = 0; for (ii = intr_offset; ii < ((fd->intr_src_count & MASK_3BIT) + intr_offset); ii++) fhandler->intr_mask |= 1 << ii; abs_data_size = query[5] & MASK_2BIT; abs_data_blk_size = 3 + (2 * (abs_data_size == 0 ? 1 : 0)); fhandler->size_of_data_register_block = abs_data_blk_size; return retval; } static int synaptics_rmi4_f12_set_enables(struct synaptics_rmi4_data *rmi4_data, unsigned short ctrl28) { int retval; static unsigned short ctrl_28_address; if (ctrl28) ctrl_28_address = ctrl28; retval = synaptics_rmi4_i2c_write(rmi4_data, ctrl_28_address, &rmi4_data->report_enable, sizeof(rmi4_data->report_enable)); if (retval < 0) return retval; return retval; } /** * synaptics_rmi4_f12_init() * * Called by synaptics_rmi4_query_device(). * * This funtion parses information from the Function 12 registers and * determines the number of fingers supported, offset to the data1 * register, x and y data ranges, offset to the associated interrupt * status register, interrupt bit mask, and allocates memory resources * for finger data acquisition. */ static int synaptics_rmi4_f12_init(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler, struct synaptics_rmi4_fn_desc *fd, unsigned int intr_count) { int retval; unsigned char ii; unsigned char intr_offset; unsigned char size_of_2d_data; unsigned char size_of_query8; unsigned char ctrl_8_offset; unsigned char ctrl_23_offset; unsigned char ctrl_28_offset; unsigned char num_of_fingers; struct synaptics_rmi4_f12_extra_data *extra_data; struct synaptics_rmi4_f12_query_5 query_5; struct synaptics_rmi4_f12_query_8 query_8; struct synaptics_rmi4_f12_ctrl_8 ctrl_8; struct synaptics_rmi4_f12_ctrl_23 ctrl_23; fhandler->fn_number = fd->fn_number; fhandler->num_of_data_sources = fd->intr_src_count; size_of_2d_data = sizeof(struct synaptics_rmi4_f12_finger_data); fhandler->extra = kmalloc(sizeof(*extra_data), GFP_KERNEL); if (!fhandler->extra) return -ENOMEM; extra_data = (struct synaptics_rmi4_f12_extra_data *)fhandler->extra; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.query_base + 5, query_5.data, sizeof(query_5.data)); if (retval < 0) goto free_function_handler_mem; ctrl_8_offset = query_5.ctrl0_is_present + query_5.ctrl1_is_present + query_5.ctrl2_is_present + query_5.ctrl3_is_present + query_5.ctrl4_is_present + query_5.ctrl5_is_present + query_5.ctrl6_is_present + query_5.ctrl7_is_present; ctrl_23_offset = ctrl_8_offset + query_5.ctrl8_is_present + query_5.ctrl9_is_present + query_5.ctrl10_is_present + query_5.ctrl11_is_present + query_5.ctrl12_is_present + query_5.ctrl13_is_present + query_5.ctrl14_is_present + query_5.ctrl15_is_present + query_5.ctrl16_is_present + query_5.ctrl17_is_present + query_5.ctrl18_is_present + query_5.ctrl19_is_present + query_5.ctrl20_is_present + query_5.ctrl21_is_present + query_5.ctrl22_is_present; ctrl_28_offset = ctrl_23_offset + query_5.ctrl23_is_present + query_5.ctrl24_is_present + query_5.ctrl25_is_present + query_5.ctrl26_is_present + query_5.ctrl27_is_present; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.ctrl_base + ctrl_23_offset, ctrl_23.data, sizeof(ctrl_23.data)); if (retval < 0) goto free_function_handler_mem; /* Maximum number of fingers supported */ fhandler->num_of_data_points = min(ctrl_23.max_reported_objects, (unsigned char)F12_FINGERS_TO_SUPPORT); num_of_fingers = fhandler->num_of_data_points; rmi4_data->num_of_fingers = num_of_fingers; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.query_base + 7, &size_of_query8, sizeof(size_of_query8)); if (retval < 0) goto free_function_handler_mem; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.query_base + 8, query_8.data, size_of_query8); if (retval < 0) goto free_function_handler_mem; /* Determine the presence of the Data0 register */ extra_data->data1_offset = query_8.data0_is_present; if ((size_of_query8 >= 3) && (query_8.data15_is_present)) { extra_data->data15_offset = query_8.data0_is_present + query_8.data1_is_present + query_8.data2_is_present + query_8.data3_is_present + query_8.data4_is_present + query_8.data5_is_present + query_8.data6_is_present + query_8.data7_is_present + query_8.data8_is_present + query_8.data9_is_present + query_8.data10_is_present + query_8.data11_is_present + query_8.data12_is_present + query_8.data13_is_present + query_8.data14_is_present; extra_data->data15_size = (num_of_fingers + 7) / 8; } else { extra_data->data15_size = 0; } rmi4_data->report_enable = RPT_DEFAULT; #ifdef REPORT_2D_Z rmi4_data->report_enable |= RPT_Z; #endif #ifdef REPORT_2D_W rmi4_data->report_enable |= (RPT_WX | RPT_WY); #endif retval = synaptics_rmi4_f12_set_enables(rmi4_data, fhandler->full_addr.ctrl_base + ctrl_28_offset); if (retval < 0) goto free_function_handler_mem; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.ctrl_base + ctrl_8_offset, ctrl_8.data, sizeof(ctrl_8.data)); if (retval < 0) goto free_function_handler_mem; /* Maximum x */ rmi4_data->sensor_max_x = ((unsigned short)ctrl_8.max_x_coord_lsb << 0) | ((unsigned short)ctrl_8.max_x_coord_msb << 8); if (rmi4_data->board->modify_reso) { if (rmi4_data->board->panel_maxx) { if (rmi4_data->board->panel_maxx >= F12_MAX_X) { dev_err(&rmi4_data->i2c_client->dev, "F12 max_x value out of bound."); retval = -EINVAL; goto free_function_handler_mem; } if (rmi4_data->sensor_max_x != rmi4_data->board->panel_maxx) { rmi4_data->sensor_max_x = rmi4_data->board->panel_maxx; ctrl_8.max_x_coord_lsb = (unsigned char) (rmi4_data->board->panel_maxx & MASK_8BIT); ctrl_8.max_x_coord_msb = (unsigned char) ((rmi4_data->board->panel_maxx >> 8) & MASK_8BIT); retval = synaptics_rmi4_i2c_write(rmi4_data, fhandler->full_addr.ctrl_base + ctrl_8_offset, ctrl_8.data, sizeof(ctrl_8.data)); if (retval < 0) goto free_function_handler_mem; } } } /* Maximum y */ rmi4_data->sensor_max_y = ((unsigned short)ctrl_8.max_y_coord_lsb << 0) | ((unsigned short)ctrl_8.max_y_coord_msb << 8); if (rmi4_data->board->modify_reso) { if (rmi4_data->board->panel_maxy) { if (rmi4_data->board->panel_maxy >= F12_MAX_Y) { dev_err(&rmi4_data->i2c_client->dev, "F12 max_y value out of bound."); retval = -EINVAL; goto free_function_handler_mem; } if (rmi4_data->sensor_max_y != rmi4_data->board->panel_maxy) { rmi4_data->sensor_max_y = rmi4_data->board->panel_maxy; ctrl_8.max_y_coord_lsb = (unsigned char) (rmi4_data->board->panel_maxy & MASK_8BIT); ctrl_8.max_y_coord_msb = (unsigned char) ((rmi4_data->board->panel_maxy >> 8) & MASK_8BIT); retval = synaptics_rmi4_i2c_write(rmi4_data, fhandler->full_addr.ctrl_base + ctrl_8_offset, ctrl_8.data, sizeof(ctrl_8.data)); if (retval < 0) goto free_function_handler_mem; } } } dev_dbg(&rmi4_data->i2c_client->dev, "%s: Function %02x max x = %d max y = %d\n", __func__, fhandler->fn_number, rmi4_data->sensor_max_x, rmi4_data->sensor_max_y); rmi4_data->num_of_rx = ctrl_8.num_of_rx; rmi4_data->num_of_tx = ctrl_8.num_of_tx; rmi4_data->max_touch_width = max(rmi4_data->num_of_rx, rmi4_data->num_of_tx); fhandler->intr_reg_num = (intr_count + 7) / 8; if (fhandler->intr_reg_num != 0) fhandler->intr_reg_num -= 1; /* Set an enable bit for each data source */ intr_offset = intr_count % 8; fhandler->intr_mask = 0; for (ii = intr_offset; ii < ((fd->intr_src_count & MASK_3BIT) + intr_offset); ii++) fhandler->intr_mask |= 1 << ii; /* Allocate memory for finger data storage space */ fhandler->data_size = num_of_fingers * size_of_2d_data; fhandler->data = kmalloc(fhandler->data_size, GFP_KERNEL); if (!fhandler->data) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc mem for function handler data\n", __func__); retval = -ENOMEM; goto free_function_handler_mem; } return retval; free_function_handler_mem: kfree(fhandler->extra); return retval; } static int synaptics_rmi4_f1a_alloc_mem(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler) { int retval; struct synaptics_rmi4_f1a_handle *f1a; f1a = kzalloc(sizeof(*f1a), GFP_KERNEL); if (!f1a) return -ENOMEM; fhandler->data = (void *)f1a; retval = synaptics_rmi4_i2c_read(rmi4_data, fhandler->full_addr.query_base, f1a->button_query.data, sizeof(f1a->button_query.data)); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to read query registers\n", __func__); return retval; } f1a->button_count = f1a->button_query.max_button_count + 1; f1a->button_bitmask_size = (f1a->button_count + 7) / 8; f1a->button_data_buffer = kcalloc(f1a->button_bitmask_size, sizeof(*(f1a->button_data_buffer)), GFP_KERNEL); if (!f1a->button_data_buffer) return -ENOMEM; f1a->button_map = kcalloc(f1a->button_count, sizeof(*(f1a->button_map)), GFP_KERNEL); if (!f1a->button_map) return -ENOMEM; return 0; } static int synaptics_rmi4_capacitance_button_map( struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler) { unsigned char ii; struct synaptics_rmi4_f1a_handle *f1a = fhandler->data; const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board; if (!pdata->capacitance_button_map) { dev_info(&rmi4_data->i2c_client->dev, "%s: capacitance_button_map not in use\n", __func__); return 0; } else if (!pdata->capacitance_button_map->map) { dev_err(&rmi4_data->i2c_client->dev, "%s: Button map is missing in board file\n", __func__); return -ENODEV; } else { if (pdata->capacitance_button_map->nbuttons != f1a->button_count) { f1a->valid_button_count = min(f1a->button_count, pdata->capacitance_button_map->nbuttons); } else { f1a->valid_button_count = f1a->button_count; } for (ii = 0; ii < f1a->valid_button_count; ii++) f1a->button_map[ii] = pdata->capacitance_button_map->map[ii]; } return 0; } static void synaptics_rmi4_f1a_kfree(struct synaptics_rmi4_fn *fhandler) { struct synaptics_rmi4_f1a_handle *f1a = fhandler->data; if (f1a) { kfree(f1a->button_data_buffer); kfree(f1a->button_map); kfree(f1a); fhandler->data = NULL; } } static int synaptics_rmi4_f1a_init(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler, struct synaptics_rmi4_fn_desc *fd, unsigned int intr_count) { int retval; unsigned char ii; unsigned short intr_offset; fhandler->fn_number = fd->fn_number; fhandler->num_of_data_sources = fd->intr_src_count; fhandler->intr_reg_num = (intr_count + 7) / 8; if (fhandler->intr_reg_num != 0) fhandler->intr_reg_num -= 1; /* Set an enable bit for each data source */ intr_offset = intr_count % 8; fhandler->intr_mask = 0; for (ii = intr_offset; ii < ((fd->intr_src_count & MASK_3BIT) + intr_offset); ii++) fhandler->intr_mask |= 1 << ii; retval = synaptics_rmi4_f1a_alloc_mem(rmi4_data, fhandler); if (retval < 0) goto error_exit; retval = synaptics_rmi4_capacitance_button_map(rmi4_data, fhandler); if (retval < 0) goto error_exit; rmi4_data->button_0d_enabled = 1; return 0; error_exit: synaptics_rmi4_f1a_kfree(fhandler); return retval; } static int synaptics_rmi4_alloc_fh(struct synaptics_rmi4_fn **fhandler, struct synaptics_rmi4_fn_desc *rmi_fd, int page_number) { *fhandler = kzalloc(sizeof(**fhandler), GFP_KERNEL); if (!(*fhandler)) return -ENOMEM; (*fhandler)->full_addr.data_base = (rmi_fd->data_base_addr | (page_number << 8)); (*fhandler)->full_addr.ctrl_base = (rmi_fd->ctrl_base_addr | (page_number << 8)); (*fhandler)->full_addr.cmd_base = (rmi_fd->cmd_base_addr | (page_number << 8)); (*fhandler)->full_addr.query_base = (rmi_fd->query_base_addr | (page_number << 8)); (*fhandler)->fn_number = rmi_fd->fn_number; return 0; } /** * synaptics_rmi4_query_device_info() * * Called by synaptics_rmi4_query_device(). * */ static int synaptics_rmi4_query_device_info( struct synaptics_rmi4_data *rmi4_data) { int retval; unsigned char f01_query[F01_STD_QUERY_LEN]; struct synaptics_rmi4_device_info *rmi = &(rmi4_data->rmi4_mod_info); unsigned char pkg_id[4]; retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_query_base_addr, f01_query, sizeof(f01_query)); if (retval < 0) return retval; /* RMI Version 4.0 currently supported */ rmi->version_major = 4; rmi->version_minor = 0; rmi->manufacturer_id = f01_query[0]; rmi->product_props = f01_query[1]; rmi->product_info[0] = f01_query[2] & MASK_7BIT; rmi->product_info[1] = f01_query[3] & MASK_7BIT; rmi->date_code[0] = f01_query[4] & MASK_5BIT; rmi->date_code[1] = f01_query[5] & MASK_4BIT; rmi->date_code[2] = f01_query[6] & MASK_5BIT; rmi->tester_id = ((f01_query[7] & MASK_7BIT) << 8) | (f01_query[8] & MASK_7BIT); rmi->serial_number = ((f01_query[9] & MASK_7BIT) << 8) | (f01_query[10] & MASK_7BIT); memcpy(rmi->product_id_string, &f01_query[11], 10); if (rmi->manufacturer_id != 1) { dev_err(&rmi4_data->i2c_client->dev, "%s: Non-Synaptics device found, manufacturer ID = %d\n", __func__, rmi->manufacturer_id); } retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_query_base_addr + F01_PACKAGE_ID_OFFSET, pkg_id, sizeof(pkg_id)); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to read device package id (code %d)\n", __func__, retval); return retval; } rmi->package_id = (pkg_id[1] << 8) | pkg_id[0]; rmi->package_id_rev = (pkg_id[3] << 8) | pkg_id[2]; retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_query_base_addr + F01_BUID_ID_OFFSET, rmi->build_id, sizeof(rmi->build_id)); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to read firmware build id (code %d)\n", __func__, retval); return retval; } return 0; } /* * This function checks whether the fhandler already existis in the * support_fn_list or not. * If it exists then return 1 as found or return 0 as not found. * * Called by synaptics_rmi4_query_device(). */ static int synaptics_rmi4_check_fn_list(struct synaptics_rmi4_data *rmi4_data, struct synaptics_rmi4_fn *fhandler) { int found = 0; struct synaptics_rmi4_fn *new_fhandler; struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); mutex_lock(&rmi->support_fn_list_mutex); if (!list_empty(&rmi->support_fn_list)) list_for_each_entry(new_fhandler, &rmi->support_fn_list, link) if (new_fhandler->fn_number == fhandler->fn_number) found = 1; mutex_unlock(&rmi->support_fn_list_mutex); return found; } /** * synaptics_rmi4_query_device() * * Called by synaptics_rmi4_probe(). * * This function scans the page description table, records the offsets * to the register types of Function $01, sets up the function handlers * for Function $11 and Function $12, determines the number of interrupt * sources from the sensor, adds valid Functions with data inputs to the * Function linked list, parses information from the query registers of * Function $01, and enables the interrupt sources from the valid Functions * with data inputs. */ static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data) { int retval, found; unsigned char ii; unsigned char page_number; unsigned char intr_count = 0; unsigned char data_sources = 0; unsigned short pdt_entry_addr; unsigned short intr_addr; struct synaptics_rmi4_f01_device_status status; struct synaptics_rmi4_fn_desc rmi_fd; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); /* Scan the page description tables of the pages to service */ for (page_number = 0; page_number < PAGES_TO_SERVICE; page_number++) { for (pdt_entry_addr = PDT_START; pdt_entry_addr > PDT_END; pdt_entry_addr -= PDT_ENTRY_SIZE) { pdt_entry_addr |= (page_number << 8); retval = synaptics_rmi4_i2c_read(rmi4_data, pdt_entry_addr, (unsigned char *)&rmi_fd, sizeof(rmi_fd)); if (retval < 0) return retval; fhandler = NULL; found = 0; if (rmi_fd.fn_number == 0) { dev_dbg(&rmi4_data->i2c_client->dev, "%s: Reached end of PDT\n", __func__); break; } dev_dbg(&rmi4_data->i2c_client->dev, "%s: F%02x found (page %d)\n", __func__, rmi_fd.fn_number, page_number); switch (rmi_fd.fn_number) { case SYNAPTICS_RMI4_F01: rmi4_data->f01_query_base_addr = rmi_fd.query_base_addr; rmi4_data->f01_ctrl_base_addr = rmi_fd.ctrl_base_addr; rmi4_data->f01_data_base_addr = rmi_fd.data_base_addr; rmi4_data->f01_cmd_base_addr = rmi_fd.cmd_base_addr; retval = synaptics_rmi4_query_device_info(rmi4_data); if (retval < 0) return retval; retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_data_base_addr, status.data, sizeof(status.data)); if (retval < 0) return retval; while (status.status_code == STATUS_CRC_IN_PROGRESS) { usleep_range(1000, 1001); retval = synaptics_rmi4_i2c_read( rmi4_data, rmi4_data->f01_data_base_addr, status.data, sizeof(status.data)); if (retval < 0) return retval; } if (status.flash_prog == 1) { pr_notice("%s: In flash prog mode, status = 0x%02x\n", __func__, status.status_code); goto flash_prog_mode; } break; case SYNAPTICS_RMI4_F11: if (rmi_fd.intr_src_count == 0) break; retval = synaptics_rmi4_alloc_fh(&fhandler, &rmi_fd, page_number); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc for F%d\n", __func__, rmi_fd.fn_number); return retval; } retval = synaptics_rmi4_f11_init(rmi4_data, fhandler, &rmi_fd, intr_count); if (retval < 0) return retval; break; case SYNAPTICS_RMI4_F12: if (rmi_fd.intr_src_count == 0) break; retval = synaptics_rmi4_alloc_fh(&fhandler, &rmi_fd, page_number); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc for F%d\n", __func__, rmi_fd.fn_number); return retval; } retval = synaptics_rmi4_f12_init(rmi4_data, fhandler, &rmi_fd, intr_count); if (retval < 0) return retval; break; case SYNAPTICS_RMI4_F1A: if (rmi_fd.intr_src_count == 0) break; retval = synaptics_rmi4_alloc_fh(&fhandler, &rmi_fd, page_number); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to alloc for F%d\n", __func__, rmi_fd.fn_number); return retval; } retval = synaptics_rmi4_f1a_init(rmi4_data, fhandler, &rmi_fd, intr_count); if (retval < 0) return retval; break; } /* Accumulate the interrupt count */ intr_count += (rmi_fd.intr_src_count & MASK_3BIT); if (fhandler && rmi_fd.intr_src_count) { /* Want to check whether the fhandler already exists in the support_fn_list or not. If not found then add it to the list, otherwise free the memory allocated to it. */ found = synaptics_rmi4_check_fn_list(rmi4_data, fhandler); if (!found) { mutex_lock(&rmi->support_fn_list_mutex); list_add_tail(&fhandler->link, &rmi->support_fn_list); mutex_unlock( &rmi->support_fn_list_mutex); } else { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) { synaptics_rmi4_f1a_kfree( fhandler); } else { kfree(fhandler->data); kfree(fhandler->extra); } kfree(fhandler); } } } } flash_prog_mode: rmi4_data->num_of_intr_regs = (intr_count + 7) / 8; dev_dbg(&rmi4_data->i2c_client->dev, "%s: Number of interrupt registers = %d\n", __func__, rmi4_data->num_of_intr_regs); memset(rmi4_data->intr_mask, 0x00, sizeof(rmi4_data->intr_mask)); /* * Map out the interrupt bit masks for the interrupt sources * from the registered function handlers. */ mutex_lock(&rmi->support_fn_list_mutex); if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) data_sources += fhandler->num_of_data_sources; } mutex_unlock(&rmi->support_fn_list_mutex); if (data_sources) { mutex_lock(&rmi->support_fn_list_mutex); if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) { if (fhandler->num_of_data_sources) { rmi4_data->intr_mask[fhandler-> intr_reg_num] |= fhandler->intr_mask; } } } mutex_unlock(&rmi->support_fn_list_mutex); } /* Enable the interrupt sources */ for (ii = 0; ii < rmi4_data->num_of_intr_regs; ii++) { if (rmi4_data->intr_mask[ii] != 0x00) { dev_dbg(&rmi4_data->i2c_client->dev, "%s: Interrupt enable mask %d = 0x%02x\n", __func__, ii, rmi4_data->intr_mask[ii]); intr_addr = rmi4_data->f01_ctrl_base_addr + 1 + ii; retval = synaptics_rmi4_i2c_write(rmi4_data, intr_addr, &(rmi4_data->intr_mask[ii]), sizeof(rmi4_data->intr_mask[ii])); if (retval < 0) return retval; } } return 0; } static int synaptics_rmi4_reset_command(struct synaptics_rmi4_data *rmi4_data) { int retval; int page_number; unsigned char command = 0x01; unsigned short pdt_entry_addr; struct synaptics_rmi4_fn_desc rmi_fd; bool done = false; /* Scan the page description tables of the pages to service */ for (page_number = 0; page_number < PAGES_TO_SERVICE; page_number++) { for (pdt_entry_addr = PDT_START; pdt_entry_addr > PDT_END; pdt_entry_addr -= PDT_ENTRY_SIZE) { retval = synaptics_rmi4_i2c_read(rmi4_data, pdt_entry_addr, (unsigned char *)&rmi_fd, sizeof(rmi_fd)); if (retval < 0) return retval; if (rmi_fd.fn_number == 0) break; switch (rmi_fd.fn_number) { case SYNAPTICS_RMI4_F01: rmi4_data->f01_cmd_base_addr = rmi_fd.cmd_base_addr; done = true; break; } } if (done) { dev_info(&rmi4_data->i2c_client->dev, "%s: Find F01 in page description table 0x%x\n", __func__, rmi4_data->f01_cmd_base_addr); break; } } if (!done) { dev_err(&rmi4_data->i2c_client->dev, "%s: Cannot find F01 in page description table\n", __func__); return -EINVAL; } retval = synaptics_rmi4_i2c_write(rmi4_data, rmi4_data->f01_cmd_base_addr, &command, sizeof(command)); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to issue reset command, error = %d\n", __func__, retval); return retval; } msleep(rmi4_data->board->reset_delay); return retval; }; static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data) { int retval; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_fn *next_fhandler; struct synaptics_rmi4_device_info *rmi; rmi = &(rmi4_data->rmi4_mod_info); retval = synaptics_rmi4_reset_command(rmi4_data); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to send command reset\n", __func__); return retval; } if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry_safe(fhandler, next_fhandler, &rmi->support_fn_list, link) { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) synaptics_rmi4_f1a_kfree(fhandler); else { kfree(fhandler->data); kfree(fhandler->extra); } kfree(fhandler); } } INIT_LIST_HEAD(&rmi->support_fn_list); retval = synaptics_rmi4_query_device(rmi4_data); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to query device\n", __func__); return retval; } return 0; } /** * synaptics_rmi4_detection_work() * * Called by the kernel at the scheduled time. * * This function is a self-rearming work thread that checks for the * insertion and removal of other expansion Function modules such as * rmi_dev and calls their initialization and removal callback functions * accordingly. */ static void synaptics_rmi4_detection_work(struct work_struct *work) { struct synaptics_rmi4_exp_fn *exp_fhandler, *next_list_entry; struct synaptics_rmi4_data *rmi4_data = container_of(work, struct synaptics_rmi4_data, det_work.work); mutex_lock(&exp_fn_list_mutex); if (!list_empty(&exp_fn_list)) { list_for_each_entry_safe(exp_fhandler, next_list_entry, &exp_fn_list, link) { if ((exp_fhandler->func_init != NULL) && (exp_fhandler->inserted == false)) { if (exp_fhandler->func_init(rmi4_data) < 0) { list_del(&exp_fhandler->link); kfree(exp_fhandler); } else { exp_fhandler->inserted = true; } } else if ((exp_fhandler->func_init == NULL) && (exp_fhandler->inserted == true)) { exp_fhandler->func_remove(rmi4_data); list_del(&exp_fhandler->link); kfree(exp_fhandler); } } } mutex_unlock(&exp_fn_list_mutex); } /** * synaptics_rmi4_new_function() * * Called by other expansion Function modules in their module init and * module exit functions. * * This function is used by other expansion Function modules such as * rmi_dev to register themselves with the driver by providing their * initialization and removal callback function pointers so that they * can be inserted or removed dynamically at module init and exit times, * respectively. */ void synaptics_rmi4_new_function(enum exp_fn fn_type, bool insert, int (*func_init)(struct synaptics_rmi4_data *rmi4_data), void (*func_remove)(struct synaptics_rmi4_data *rmi4_data), void (*func_attn)(struct synaptics_rmi4_data *rmi4_data, unsigned char intr_mask)) { struct synaptics_rmi4_exp_fn *exp_fhandler; if (!exp_fn_inited) { mutex_init(&exp_fn_list_mutex); INIT_LIST_HEAD(&exp_fn_list); exp_fn_inited = 1; } mutex_lock(&exp_fn_list_mutex); if (insert) { exp_fhandler = kzalloc(sizeof(*exp_fhandler), GFP_KERNEL); if (!exp_fhandler) { pr_err("%s: Failed to alloc mem for expansion function\n", __func__); goto exit; } exp_fhandler->fn_type = fn_type; exp_fhandler->func_init = func_init; exp_fhandler->func_attn = func_attn; exp_fhandler->func_remove = func_remove; exp_fhandler->inserted = false; list_add_tail(&exp_fhandler->link, &exp_fn_list); } else { if (!list_empty(&exp_fn_list)) { list_for_each_entry(exp_fhandler, &exp_fn_list, link) { if (exp_fhandler->func_init == func_init) { exp_fhandler->inserted = false; exp_fhandler->func_init = NULL; exp_fhandler->func_attn = NULL; goto exit; } } } } exit: mutex_unlock(&exp_fn_list_mutex); } EXPORT_SYMBOL(synaptics_rmi4_new_function); static int reg_set_optimum_mode_check(struct regulator *reg, int load_uA) { return (regulator_count_voltages(reg) > 0) ? regulator_set_optimum_mode(reg, load_uA) : 0; } static int synaptics_rmi4_regulator_configure(struct synaptics_rmi4_data *rmi4_data, bool on) { int retval; if (on == false) goto hw_shutdown; rmi4_data->vdd = regulator_get(&rmi4_data->i2c_client->dev, "vdd"); if (IS_ERR(rmi4_data->vdd)) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to get vdd regulator\n", __func__); return PTR_ERR(rmi4_data->vdd); } if (regulator_count_voltages(rmi4_data->vdd) > 0) { retval = regulator_set_voltage(rmi4_data->vdd, RMI4_VTG_MIN_UV, RMI4_VTG_MAX_UV); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "regulator set_vtg failed retval =%d\n", retval); goto err_set_vtg_vdd; } } if (rmi4_data->board->i2c_pull_up) { rmi4_data->vcc_i2c = regulator_get(&rmi4_data->i2c_client->dev, "vcc_i2c"); if (IS_ERR(rmi4_data->vcc_i2c)) { dev_err(&rmi4_data->i2c_client->dev, "%s: Failed to get i2c regulator\n", __func__); retval = PTR_ERR(rmi4_data->vcc_i2c); goto err_get_vtg_i2c; } if (regulator_count_voltages(rmi4_data->vcc_i2c) > 0) { retval = regulator_set_voltage(rmi4_data->vcc_i2c, RMI4_I2C_VTG_MIN_UV, RMI4_I2C_VTG_MAX_UV); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "reg set i2c vtg failed retval =%d\n", retval); goto err_set_vtg_i2c; } } } return 0; err_set_vtg_i2c: if (rmi4_data->board->i2c_pull_up) regulator_put(rmi4_data->vcc_i2c); err_get_vtg_i2c: if (regulator_count_voltages(rmi4_data->vdd) > 0) regulator_set_voltage(rmi4_data->vdd, 0, RMI4_VTG_MAX_UV); err_set_vtg_vdd: regulator_put(rmi4_data->vdd); return retval; hw_shutdown: if (regulator_count_voltages(rmi4_data->vdd) > 0) regulator_set_voltage(rmi4_data->vdd, 0, RMI4_VTG_MAX_UV); regulator_put(rmi4_data->vdd); if (rmi4_data->board->i2c_pull_up) { if (regulator_count_voltages(rmi4_data->vcc_i2c) > 0) regulator_set_voltage(rmi4_data->vcc_i2c, 0, RMI4_I2C_VTG_MAX_UV); regulator_put(rmi4_data->vcc_i2c); } return 0; }; static int synaptics_rmi4_power_on(struct synaptics_rmi4_data *rmi4_data, bool on) { int retval; if (on == false) goto power_off; retval = reg_set_optimum_mode_check(rmi4_data->vdd, RMI4_ACTIVE_LOAD_UA); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vdd set_opt failed rc=%d\n", retval); return retval; } retval = regulator_enable(rmi4_data->vdd); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vdd enable failed rc=%d\n", retval); goto error_reg_en_vdd; } if (rmi4_data->board->i2c_pull_up) { retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c, RMI4_I2C_LOAD_UA); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_i2c set_opt failed rc=%d\n", retval); goto error_reg_opt_i2c; } retval = regulator_enable(rmi4_data->vcc_i2c); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_i2c enable failed rc=%d\n", retval); goto error_reg_en_vcc_i2c; } } return 0; error_reg_en_vcc_i2c: if (rmi4_data->board->i2c_pull_up) reg_set_optimum_mode_check(rmi4_data->vcc_i2c, 0); error_reg_opt_i2c: regulator_disable(rmi4_data->vdd); error_reg_en_vdd: reg_set_optimum_mode_check(rmi4_data->vdd, 0); return retval; power_off: reg_set_optimum_mode_check(rmi4_data->vdd, 0); regulator_disable(rmi4_data->vdd); if (rmi4_data->board->i2c_pull_up) { reg_set_optimum_mode_check(rmi4_data->vcc_i2c, 0); regulator_disable(rmi4_data->vcc_i2c); } return 0; } static int synaptics_rmi4_pinctrl_init(struct synaptics_rmi4_data *rmi4_data) { int retval; /* Get pinctrl if target uses pinctrl */ rmi4_data->ts_pinctrl = devm_pinctrl_get(&(rmi4_data->i2c_client->dev)); if (IS_ERR_OR_NULL(rmi4_data->ts_pinctrl)) { retval = PTR_ERR(rmi4_data->ts_pinctrl); dev_dbg(&rmi4_data->i2c_client->dev, "Target does not use pinctrl %d\n", retval); goto err_pinctrl_get; } rmi4_data->pinctrl_state_active = pinctrl_lookup_state(rmi4_data->ts_pinctrl, PINCTRL_STATE_ACTIVE); if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_active)) { retval = PTR_ERR(rmi4_data->pinctrl_state_active); dev_err(&rmi4_data->i2c_client->dev, "Can not lookup %s pinstate %d\n", PINCTRL_STATE_ACTIVE, retval); goto err_pinctrl_lookup; } rmi4_data->pinctrl_state_suspend = pinctrl_lookup_state(rmi4_data->ts_pinctrl, PINCTRL_STATE_SUSPEND); if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_suspend)) { retval = PTR_ERR(rmi4_data->pinctrl_state_suspend); dev_err(&rmi4_data->i2c_client->dev, "Can not lookup %s pinstate %d\n", PINCTRL_STATE_SUSPEND, retval); goto err_pinctrl_lookup; } rmi4_data->pinctrl_state_release = pinctrl_lookup_state(rmi4_data->ts_pinctrl, PINCTRL_STATE_RELEASE); if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) { retval = PTR_ERR(rmi4_data->pinctrl_state_release); dev_dbg(&rmi4_data->i2c_client->dev, "Can not lookup %s pinstate %d\n", PINCTRL_STATE_RELEASE, retval); } return 0; err_pinctrl_lookup: devm_pinctrl_put(rmi4_data->ts_pinctrl); err_pinctrl_get: rmi4_data->ts_pinctrl = NULL; return retval; } static int synaptics_rmi4_gpio_configure(struct synaptics_rmi4_data *rmi4_data, bool on) { int retval = 0; if (on) { if (gpio_is_valid(rmi4_data->board->irq_gpio)) { /* configure touchscreen irq gpio */ retval = gpio_request(rmi4_data->board->irq_gpio, "rmi4_irq_gpio"); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "unable to request gpio [%d]\n", rmi4_data->board->irq_gpio); goto err_irq_gpio_req; } retval = gpio_direction_input(rmi4_data->board-> irq_gpio); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "unable to set direction for gpio [%d]\n", rmi4_data->board->irq_gpio); goto err_irq_gpio_dir; } } else { dev_err(&rmi4_data->i2c_client->dev, "irq gpio not provided\n"); goto err_irq_gpio_req; } if (gpio_is_valid(rmi4_data->board->reset_gpio)) { /* configure touchscreen reset out gpio */ retval = gpio_request(rmi4_data->board->reset_gpio, "rmi4_reset_gpio"); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "unable to request gpio [%d]\n", rmi4_data->board->reset_gpio); goto err_irq_gpio_dir; } retval = gpio_direction_output(rmi4_data->board-> reset_gpio, 1); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "unable to set direction for gpio [%d]\n", rmi4_data->board->reset_gpio); goto err_reset_gpio_dir; } gpio_set_value(rmi4_data->board->reset_gpio, 1); msleep(rmi4_data->board->reset_delay); } else synaptics_rmi4_reset_command(rmi4_data); return 0; } if (rmi4_data->board->disable_gpios) { if (gpio_is_valid(rmi4_data->board->irq_gpio)) gpio_free(rmi4_data->board->irq_gpio); if (gpio_is_valid(rmi4_data->board->reset_gpio)) { /* * This is intended to save leakage current * only. Even if the call(gpio_direction_input) * fails, only leakage current will be more but * functionality will not be affected. */ retval = gpio_direction_input(rmi4_data-> board->reset_gpio); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "unable to set direction for gpio [%d]\n", rmi4_data->board->irq_gpio); } gpio_free(rmi4_data->board->reset_gpio); } } return 0; err_reset_gpio_dir: if (gpio_is_valid(rmi4_data->board->reset_gpio)) gpio_free(rmi4_data->board->reset_gpio); err_irq_gpio_dir: if (gpio_is_valid(rmi4_data->board->irq_gpio)) gpio_free(rmi4_data->board->irq_gpio); err_irq_gpio_req: return retval; } /** * synaptics_rmi4_probe() * * Called by the kernel when an association with an I2C device of the * same name is made (after doing i2c_add_driver). * * This function allocates and initializes the resources for the driver * as an input driver, turns on the power to the sensor, queries the * sensor for its supported Functions and characteristics, registers * the driver to the input subsystem, sets up the interrupt, handles * the registration of the early_suspend and late_resume functions, * and creates a work queue for detection of other expansion Function * modules. */ static int synaptics_rmi4_probe(struct i2c_client *client, const struct i2c_device_id *dev_id) { int retval = 0; unsigned char ii; unsigned char attr_count; struct synaptics_rmi4_f1a_handle *f1a; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_fn *next_fhandler; struct synaptics_rmi4_data *rmi4_data; struct synaptics_rmi4_device_info *rmi; struct synaptics_rmi4_platform_data *platform_data = client->dev.platform_data; struct dentry *temp; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&client->dev, "%s: SMBus byte data not supported\n", __func__); return -EIO; } if (client->dev.of_node) { platform_data = devm_kzalloc(&client->dev, sizeof(*platform_data), GFP_KERNEL); if (!platform_data) { dev_err(&client->dev, "Failed to allocate memory\n"); return -ENOMEM; } retval = synaptics_rmi4_parse_dt(&client->dev, platform_data); if (retval) return retval; } else { platform_data = client->dev.platform_data; } if (!platform_data) { dev_err(&client->dev, "%s: No platform data found\n", __func__); return -EINVAL; } rmi4_data = kzalloc(sizeof(*rmi4_data) * 2, GFP_KERNEL); if (!rmi4_data) return -ENOMEM; rmi = &(rmi4_data->rmi4_mod_info); rmi4_data->input_dev = input_allocate_device(); if (rmi4_data->input_dev == NULL) { retval = -ENOMEM; goto err_input_device; } rmi4_data->i2c_client = client; rmi4_data->current_page = MASK_8BIT; rmi4_data->board = platform_data; rmi4_data->touch_stopped = false; rmi4_data->sensor_sleep = false; rmi4_data->irq_enabled = false; rmi4_data->fw_updating = false; rmi4_data->suspended = false; rmi4_data->i2c_read = synaptics_rmi4_i2c_read; rmi4_data->i2c_write = synaptics_rmi4_i2c_write; rmi4_data->irq_enable = synaptics_rmi4_irq_enable; rmi4_data->reset_device = synaptics_rmi4_reset_device; rmi4_data->flip_x = rmi4_data->board->x_flip; rmi4_data->flip_y = rmi4_data->board->y_flip; if (rmi4_data->board->fw_image_name) snprintf(rmi4_data->fw_image_name, NAME_BUFFER_SIZE, "%s", rmi4_data->board->fw_image_name); rmi4_data->input_dev->name = DRIVER_NAME; rmi4_data->input_dev->phys = INPUT_PHYS_NAME; rmi4_data->input_dev->id.bustype = BUS_I2C; rmi4_data->input_dev->id.product = SYNAPTICS_DSX_DRIVER_PRODUCT; rmi4_data->input_dev->id.version = SYNAPTICS_DSX_DRIVER_VERSION; rmi4_data->input_dev->dev.parent = &client->dev; input_set_drvdata(rmi4_data->input_dev, rmi4_data); set_bit(EV_SYN, rmi4_data->input_dev->evbit); set_bit(EV_KEY, rmi4_data->input_dev->evbit); set_bit(EV_ABS, rmi4_data->input_dev->evbit); set_bit(BTN_TOUCH, rmi4_data->input_dev->keybit); set_bit(BTN_TOOL_FINGER, rmi4_data->input_dev->keybit); #ifdef INPUT_PROP_DIRECT set_bit(INPUT_PROP_DIRECT, rmi4_data->input_dev->propbit); #endif retval = synaptics_rmi4_regulator_configure(rmi4_data, true); if (retval < 0) { dev_err(&client->dev, "Failed to configure regulators\n"); goto err_reg_configure; } retval = synaptics_rmi4_power_on(rmi4_data, true); if (retval < 0) { dev_err(&client->dev, "Failed to power on\n"); goto err_power_device; } retval = synaptics_rmi4_pinctrl_init(rmi4_data); if (!retval && rmi4_data->ts_pinctrl) { /* * Pinctrl handle is optional. If pinctrl handle is found * let pins to be configured in active state. If not found * continue further without error */ if (pinctrl_select_state(rmi4_data->ts_pinctrl, rmi4_data->pinctrl_state_active)) dev_err(&rmi4_data->i2c_client->dev, "Can not select %s pinstate\n", PINCTRL_STATE_ACTIVE); } retval = synaptics_rmi4_gpio_configure(rmi4_data, true); if (retval < 0) { dev_err(&client->dev, "Failed to configure gpios\n"); goto err_gpio_config; } init_waitqueue_head(&rmi4_data->wait); mutex_init(&(rmi4_data->rmi4_io_ctrl_mutex)); INIT_LIST_HEAD(&rmi->support_fn_list); mutex_init(&rmi->support_fn_list_mutex); retval = synaptics_rmi4_query_device(rmi4_data); if (retval < 0) { dev_err(&client->dev, "%s: Failed to query device\n", __func__); goto err_free_gpios; } if (platform_data->detect_device) { retval = synaptics_rmi4_parse_dt_children(&client->dev, platform_data, rmi4_data); if (retval < 0) dev_err(&client->dev, "%s: Failed to parse device tree property\n", __func__); } if (rmi4_data->board->disp_maxx) rmi4_data->disp_maxx = rmi4_data->board->disp_maxx; else rmi4_data->disp_maxx = rmi4_data->sensor_max_x; if (rmi4_data->board->disp_maxy) rmi4_data->disp_maxy = rmi4_data->board->disp_maxy; else rmi4_data->disp_maxy = rmi4_data->sensor_max_y; if (rmi4_data->board->disp_minx) rmi4_data->disp_minx = rmi4_data->board->disp_minx; else rmi4_data->disp_minx = 0; if (rmi4_data->board->disp_miny) rmi4_data->disp_miny = rmi4_data->board->disp_miny; else rmi4_data->disp_miny = 0; input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_X, rmi4_data->disp_minx, rmi4_data->disp_maxx, 0, 0); input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_Y, rmi4_data->disp_miny, rmi4_data->disp_maxy, 0, 0); input_set_abs_params(rmi4_data->input_dev, ABS_PRESSURE, 0, 255, 0, 0); #ifdef REPORT_2D_W input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, 0, rmi4_data->max_touch_width, 0, 0); input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MINOR, 0, rmi4_data->max_touch_width, 0, 0); #endif #ifdef TYPE_B_PROTOCOL input_mt_init_slots(rmi4_data->input_dev, rmi4_data->num_of_fingers, 0); #endif i2c_set_clientdata(client, rmi4_data); f1a = NULL; mutex_lock(&rmi->support_fn_list_mutex); if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry(fhandler, &rmi->support_fn_list, link) { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) f1a = fhandler->data; } } mutex_unlock(&rmi->support_fn_list_mutex); if (f1a) { for (ii = 0; ii < f1a->valid_button_count; ii++) { set_bit(f1a->button_map[ii], rmi4_data->input_dev->keybit); input_set_capability(rmi4_data->input_dev, EV_KEY, f1a->button_map[ii]); } } retval = input_register_device(rmi4_data->input_dev); if (retval) { dev_err(&client->dev, "%s: Failed to register input device\n", __func__); goto err_register_input; } configure_sleep(rmi4_data); if (!exp_fn_inited) { mutex_init(&exp_fn_list_mutex); INIT_LIST_HEAD(&exp_fn_list); exp_fn_inited = 1; } rmi4_data->det_workqueue = create_singlethread_workqueue("rmi_det_workqueue"); INIT_DELAYED_WORK(&rmi4_data->det_work, synaptics_rmi4_detection_work); queue_delayed_work(rmi4_data->det_workqueue, &rmi4_data->det_work, msecs_to_jiffies(EXP_FN_DET_INTERVAL)); rmi4_data->irq = gpio_to_irq(platform_data->irq_gpio); retval = request_threaded_irq(rmi4_data->irq, NULL, synaptics_rmi4_irq, platform_data->irq_flags, DRIVER_NAME, rmi4_data); rmi4_data->irq_enabled = true; if (retval < 0) { dev_err(&client->dev, "%s: Failed to create irq thread\n", __func__); goto err_enable_irq; } rmi4_data->dir = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL); if (rmi4_data->dir == NULL || IS_ERR(rmi4_data->dir)) { dev_err(&client->dev, "%s: Failed to create debugfs directory, rc = %ld\n", __func__, PTR_ERR(rmi4_data->dir)); retval = PTR_ERR(rmi4_data->dir); goto err_create_debugfs_dir; } temp = debugfs_create_file("suspend", S_IRUSR | S_IWUSR, rmi4_data->dir, rmi4_data, &debug_suspend_fops); if (temp == NULL || IS_ERR(temp)) { dev_err(&client->dev, "%s: Failed to create suspend debugfs file, rc = %ld\n", __func__, PTR_ERR(temp)); retval = PTR_ERR(temp); goto err_create_debugfs_file; } for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) { retval = sysfs_create_file(&client->dev.kobj, &attrs[attr_count].attr); if (retval < 0) { dev_err(&client->dev, "%s: Failed to create sysfs attributes\n", __func__); goto err_sysfs; } } synaptics_rmi4_sensor_wake(rmi4_data); retval = synaptics_rmi4_irq_enable(rmi4_data, true); if (retval < 0) { dev_err(&client->dev, "%s: Failed to enable attention interrupt\n", __func__); goto err_sysfs; } synaptics_secure_touch_init(rmi4_data); synaptics_secure_touch_stop(rmi4_data, 1); retval = synaptics_rmi4_check_configuration(rmi4_data); if (retval < 0) { dev_err(&client->dev, "Failed to check configuration\n"); return retval; } return retval; err_sysfs: for (attr_count--; attr_count >= 0; attr_count--) { sysfs_remove_file(&rmi4_data->input_dev->dev.kobj, &attrs[attr_count].attr); } err_create_debugfs_file: debugfs_remove_recursive(rmi4_data->dir); err_create_debugfs_dir: free_irq(rmi4_data->irq, rmi4_data); err_enable_irq: cancel_delayed_work_sync(&rmi4_data->det_work); flush_workqueue(rmi4_data->det_workqueue); destroy_workqueue(rmi4_data->det_workqueue); input_unregister_device(rmi4_data->input_dev); err_register_input: mutex_lock(&rmi->support_fn_list_mutex); if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry_safe(fhandler, next_fhandler, &rmi->support_fn_list, link) { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) synaptics_rmi4_f1a_kfree(fhandler); else { kfree(fhandler->data); kfree(fhandler->extra); } kfree(fhandler); } } mutex_unlock(&rmi->support_fn_list_mutex); err_free_gpios: if (gpio_is_valid(rmi4_data->board->reset_gpio)) gpio_free(rmi4_data->board->reset_gpio); if (gpio_is_valid(rmi4_data->board->irq_gpio)) gpio_free(rmi4_data->board->irq_gpio); err_gpio_config: if (rmi4_data->ts_pinctrl) { if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) { devm_pinctrl_put(rmi4_data->ts_pinctrl); rmi4_data->ts_pinctrl = NULL; } else { retval = pinctrl_select_state(rmi4_data->ts_pinctrl, rmi4_data->pinctrl_state_release); if (retval) pr_err("failed to select release pinctrl state\n"); } } synaptics_rmi4_power_on(rmi4_data, false); err_power_device: synaptics_rmi4_regulator_configure(rmi4_data, false); err_reg_configure: input_free_device(rmi4_data->input_dev); rmi4_data->input_dev = NULL; err_input_device: kfree(rmi4_data); return retval; } /** * synaptics_rmi4_remove() * * Called by the kernel when the association with an I2C device of the * same name is broken (when the driver is unloaded). * * This function terminates the work queue, stops sensor data acquisition, * frees the interrupt, unregisters the driver from the input subsystem, * turns off the power to the sensor, and frees other allocated resources. */ static int synaptics_rmi4_remove(struct i2c_client *client) { unsigned char attr_count; struct synaptics_rmi4_fn *fhandler; struct synaptics_rmi4_fn *next_fhandler; struct synaptics_rmi4_data *rmi4_data = i2c_get_clientdata(client); struct synaptics_rmi4_device_info *rmi; int retval; rmi = &(rmi4_data->rmi4_mod_info); debugfs_remove_recursive(rmi4_data->dir); cancel_delayed_work_sync(&rmi4_data->det_work); flush_workqueue(rmi4_data->det_workqueue); destroy_workqueue(rmi4_data->det_workqueue); rmi4_data->touch_stopped = true; wake_up(&rmi4_data->wait); free_irq(rmi4_data->irq, rmi4_data); for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) { sysfs_remove_file(&rmi4_data->input_dev->dev.kobj, &attrs[attr_count].attr); } input_unregister_device(rmi4_data->input_dev); mutex_lock(&rmi->support_fn_list_mutex); if (!list_empty(&rmi->support_fn_list)) { list_for_each_entry_safe(fhandler, next_fhandler, &rmi->support_fn_list, link) { if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) synaptics_rmi4_f1a_kfree(fhandler); else { kfree(fhandler->data); kfree(fhandler->extra); } kfree(fhandler); } } mutex_unlock(&rmi->support_fn_list_mutex); if (gpio_is_valid(rmi4_data->board->reset_gpio)) gpio_free(rmi4_data->board->reset_gpio); if (gpio_is_valid(rmi4_data->board->irq_gpio)) gpio_free(rmi4_data->board->irq_gpio); if (rmi4_data->ts_pinctrl) { if (IS_ERR_OR_NULL(rmi4_data->pinctrl_state_release)) { devm_pinctrl_put(rmi4_data->ts_pinctrl); rmi4_data->ts_pinctrl = NULL; } else { retval = pinctrl_select_state(rmi4_data->ts_pinctrl, rmi4_data->pinctrl_state_release); if (retval < 0) pr_err("failed to select release pinctrl state\n"); } } synaptics_rmi4_power_on(rmi4_data, false); synaptics_rmi4_regulator_configure(rmi4_data, false); kfree(rmi4_data); return 0; } /** * synaptics_rmi4_sensor_sleep() * * Called by synaptics_rmi4_early_suspend() and synaptics_rmi4_suspend(). * * This function stops finger data acquisition and puts the sensor to sleep. */ static void synaptics_rmi4_sensor_sleep(struct synaptics_rmi4_data *rmi4_data) { int retval; struct synaptics_rmi4_f01_device_control_0 device_ctrl; retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_ctrl_base_addr, device_ctrl.data, sizeof(device_ctrl.data)); if (retval < 0) { dev_err(&(rmi4_data->input_dev->dev), "%s: Failed to enter sleep mode\n", __func__); rmi4_data->sensor_sleep = false; return; } device_ctrl.sleep_mode = SENSOR_SLEEP; device_ctrl.nosleep = NO_SLEEP_OFF; retval = synaptics_rmi4_i2c_write(rmi4_data, rmi4_data->f01_ctrl_base_addr, device_ctrl.data, sizeof(device_ctrl.data)); if (retval < 0) { dev_err(&(rmi4_data->input_dev->dev), "%s: Failed to enter sleep mode\n", __func__); rmi4_data->sensor_sleep = false; return; } rmi4_data->sensor_sleep = true; } /** * synaptics_rmi4_sensor_wake() * * Called by synaptics_rmi4_resume() and synaptics_rmi4_late_resume(). * * This function wakes the sensor from sleep. */ static void synaptics_rmi4_sensor_wake(struct synaptics_rmi4_data *rmi4_data) { int retval; struct synaptics_rmi4_f01_device_control_0 device_ctrl; retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_ctrl_base_addr, device_ctrl.data, sizeof(device_ctrl.data)); if (retval < 0) { dev_err(&(rmi4_data->input_dev->dev), "%s: Failed to wake from sleep mode\n", __func__); rmi4_data->sensor_sleep = true; return; } if (device_ctrl.nosleep == NO_SLEEP_OFF && device_ctrl.sleep_mode == NORMAL_OPERATION) { rmi4_data->sensor_sleep = false; return; } device_ctrl.sleep_mode = NORMAL_OPERATION; device_ctrl.nosleep = NO_SLEEP_OFF; retval = synaptics_rmi4_i2c_write(rmi4_data, rmi4_data->f01_ctrl_base_addr, device_ctrl.data, sizeof(device_ctrl.data)); if (retval < 0) { dev_err(&(rmi4_data->input_dev->dev), "%s: Failed to wake from sleep mode\n", __func__); rmi4_data->sensor_sleep = true; return; } rmi4_data->sensor_sleep = false; } #if defined(CONFIG_FB) static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data) { struct fb_event *evdata = data; int *blank; struct synaptics_rmi4_data *rmi4_data = container_of(self, struct synaptics_rmi4_data, fb_notif); if (evdata && evdata->data && rmi4_data && rmi4_data->i2c_client) { if (event == FB_EARLY_EVENT_BLANK) synaptics_secure_touch_stop(rmi4_data, 0); else if (event == FB_EVENT_BLANK) { blank = evdata->data; if (*blank == FB_BLANK_UNBLANK) synaptics_rmi4_resume( &(rmi4_data->input_dev->dev)); else if (*blank == FB_BLANK_POWERDOWN) synaptics_rmi4_suspend( &(rmi4_data->input_dev->dev)); } } return 0; } #elif defined(CONFIG_HAS_EARLYSUSPEND) /** * synaptics_rmi4_early_suspend() * * Called by the kernel during the early suspend phase when the system * enters suspend. * * This function calls synaptics_rmi4_sensor_sleep() to stop finger * data acquisition and put the sensor to sleep. */ static void synaptics_rmi4_early_suspend(struct early_suspend *h) { struct synaptics_rmi4_data *rmi4_data = container_of(h, struct synaptics_rmi4_data, early_suspend); if (rmi4_data->stay_awake) rmi4_data->staying_awake = true; else rmi4_data->staying_awake = false; synaptics_secure_touch_stop(rmi4_data, 0); rmi4_data->touch_stopped = true; wake_up(&rmi4_data->wait); synaptics_rmi4_irq_enable(rmi4_data, false); synaptics_rmi4_sensor_sleep(rmi4_data); if (rmi4_data->full_pm_cycle) synaptics_rmi4_suspend(&(rmi4_data->input_dev->dev)); } /** * synaptics_rmi4_late_resume() * * Called by the kernel during the late resume phase when the system * wakes up from suspend. * * This function goes through the sensor wake process if the system wakes * up from early suspend (without going into suspend). */ static void synaptics_rmi4_late_resume(struct early_suspend *h) { struct synaptics_rmi4_data *rmi4_data = container_of(h, struct synaptics_rmi4_data, early_suspend); if (rmi4_data->staying_awake) return; synaptics_secure_touch_stop(rmi4_data, 0); if (rmi4_data->full_pm_cycle) synaptics_rmi4_resume(&(rmi4_data->input_dev->dev)); if (rmi4_data->sensor_sleep == true) { synaptics_rmi4_sensor_wake(rmi4_data); rmi4_data->touch_stopped = false; synaptics_rmi4_irq_enable(rmi4_data, true); } } #endif static int synaptics_rmi4_regulator_lpm(struct synaptics_rmi4_data *rmi4_data, bool on) { int retval; int load_ua; if (on == false) goto regulator_hpm; if (rmi4_data->board->i2c_pull_up) { load_ua = rmi4_data->board->power_down_enable ? 0 : RMI4_I2C_LPM_LOAD_UA; retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c, load_ua); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_i2c set_opt failed rc=%d\n", retval); goto fail_regulator_lpm; } if (rmi4_data->board->power_down_enable) { retval = regulator_disable(rmi4_data->vcc_i2c); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_i2c disable failed rc=%d\n", retval); goto fail_regulator_lpm; } } } load_ua = rmi4_data->board->power_down_enable ? 0 : RMI4_LPM_LOAD_UA; retval = reg_set_optimum_mode_check(rmi4_data->vdd, load_ua); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vdd_ana set_opt failed rc=%d\n", retval); goto fail_regulator_lpm; } if (rmi4_data->board->power_down_enable) { retval = regulator_disable(rmi4_data->vdd); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vdd disable failed rc=%d\n", retval); goto fail_regulator_lpm; } } return 0; regulator_hpm: retval = reg_set_optimum_mode_check(rmi4_data->vdd, RMI4_ACTIVE_LOAD_UA); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_ana set_opt failed rc=%d\n", retval); goto fail_regulator_hpm; } if (rmi4_data->board->power_down_enable) { retval = regulator_enable(rmi4_data->vdd); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vdd enable failed rc=%d\n", retval); goto fail_regulator_hpm; } } if (rmi4_data->board->i2c_pull_up) { retval = reg_set_optimum_mode_check(rmi4_data->vcc_i2c, RMI4_I2C_LOAD_UA); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_i2c set_opt failed rc=%d\n", retval); goto fail_regulator_hpm; } if (rmi4_data->board->power_down_enable) { retval = regulator_enable(rmi4_data->vcc_i2c); if (retval) { dev_err(&rmi4_data->i2c_client->dev, "Regulator vcc_i2c enable failed rc=%d\n", retval); goto fail_regulator_hpm; } } } return 0; fail_regulator_lpm: reg_set_optimum_mode_check(rmi4_data->vdd, RMI4_ACTIVE_LOAD_UA); if (rmi4_data->board->i2c_pull_up) reg_set_optimum_mode_check(rmi4_data->vcc_i2c, RMI4_I2C_LOAD_UA); return retval; fail_regulator_hpm: load_ua = rmi4_data->board->power_down_enable ? 0 : RMI4_LPM_LOAD_UA; reg_set_optimum_mode_check(rmi4_data->vdd, load_ua); if (rmi4_data->board->i2c_pull_up) { load_ua = rmi4_data->board->power_down_enable ? 0 : RMI4_I2C_LPM_LOAD_UA; reg_set_optimum_mode_check(rmi4_data->vcc_i2c, load_ua); } return retval; } static int synaptics_rmi4_check_configuration(struct synaptics_rmi4_data *rmi4_data) { int retval; struct synaptics_rmi4_f01_device_control_0 device_control; struct synaptics_rmi4_f01_device_status device_status; retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_data_base_addr, device_status.data, sizeof(device_status.data)); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Failed to read device status, rc=%d\n", retval); return retval; } if (device_status.unconfigured) { retval = synaptics_rmi4_query_device(rmi4_data); if (retval < 0) { dev_err(&rmi4_data->i2c_client->dev, "Failed to query device, rc=%d\n", retval); return retval; } retval = synaptics_rmi4_i2c_read(rmi4_data, rmi4_data->f01_ctrl_base_addr, device_control.data, sizeof(device_control.data)); if (retval < 0) return retval; device_control.configured = DEVICE_CONFIGURED; retval = synaptics_rmi4_i2c_write(rmi4_data, rmi4_data->f01_ctrl_base_addr, device_control.data, sizeof(device_control.data)); if (retval < 0) return retval; } return 0; } /** * synaptics_rmi4_suspend() * * Called by the kernel during the suspend phase when the system * enters suspend. * * This function stops finger data acquisition and puts the sensor to * sleep (if not already done so during the early suspend phase), * disables the interrupt, and turns off the power to the sensor. */ #ifdef CONFIG_PM static int synaptics_rmi4_suspend(struct device *dev) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); int retval; if (rmi4_data->stay_awake) { rmi4_data->staying_awake = true; return 0; } else rmi4_data->staying_awake = false; if (rmi4_data->suspended) { dev_info(dev, "Already in suspend state\n"); return 0; } synaptics_secure_touch_stop(rmi4_data, 1); if (!rmi4_data->fw_updating) { if (!rmi4_data->sensor_sleep) { rmi4_data->touch_stopped = true; wake_up(&rmi4_data->wait); synaptics_rmi4_irq_enable(rmi4_data, false); synaptics_rmi4_sensor_sleep(rmi4_data); } synaptics_rmi4_release_all(rmi4_data); retval = synaptics_rmi4_regulator_lpm(rmi4_data, true); if (retval < 0) { dev_err(dev, "failed to enter low power mode\n"); goto err_lpm_regulator; } } else { dev_err(dev, "Firmware updating, cannot go into suspend mode\n"); return 0; } if (rmi4_data->board->disable_gpios) { if (rmi4_data->ts_pinctrl) { retval = pinctrl_select_state(rmi4_data->ts_pinctrl, rmi4_data->pinctrl_state_suspend); if (retval < 0) dev_err(dev, "failed to select idle pinctrl state\n"); } retval = synaptics_rmi4_gpio_configure(rmi4_data, false); if (retval < 0) { dev_err(dev, "failed to put gpios in suspend state\n"); goto err_gpio_configure; } } rmi4_data->suspended = true; return 0; err_gpio_configure: if (rmi4_data->ts_pinctrl) { retval = pinctrl_select_state(rmi4_data->ts_pinctrl, rmi4_data->pinctrl_state_active); if (retval < 0) dev_err(dev, "failed to select get default pinctrl state\n"); } synaptics_rmi4_regulator_lpm(rmi4_data, false); err_lpm_regulator: if (rmi4_data->sensor_sleep) { synaptics_rmi4_sensor_wake(rmi4_data); synaptics_rmi4_irq_enable(rmi4_data, true); rmi4_data->touch_stopped = false; } return retval; } /** * synaptics_rmi4_resume() * * Called by the kernel during the resume phase when the system * wakes up from suspend. * * This function turns on the power to the sensor, wakes the sensor * from sleep, enables the interrupt, and starts finger data * acquisition. */ static int synaptics_rmi4_resume(struct device *dev) { struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); int retval; if (rmi4_data->staying_awake) return 0; if (!rmi4_data->suspended) { dev_info(dev, "Already in awake state\n"); return 0; } synaptics_secure_touch_stop(rmi4_data, 1); retval = synaptics_rmi4_regulator_lpm(rmi4_data, false); if (retval < 0) { dev_err(dev, "Failed to enter active power mode\n"); return retval; } if (rmi4_data->board->disable_gpios) { if (rmi4_data->ts_pinctrl) { retval = pinctrl_select_state(rmi4_data->ts_pinctrl, rmi4_data->pinctrl_state_active); if (retval < 0) dev_err(dev, "failed to select default pinctrl state\n"); } retval = synaptics_rmi4_gpio_configure(rmi4_data, true); if (retval < 0) { dev_err(dev, "Failed to put gpios in active state\n"); goto err_gpio_configure; } } synaptics_rmi4_sensor_wake(rmi4_data); rmi4_data->touch_stopped = false; synaptics_rmi4_irq_enable(rmi4_data, true); retval = synaptics_rmi4_check_configuration(rmi4_data); if (retval < 0) { dev_err(dev, "Failed to check configuration\n"); goto err_check_configuration; } rmi4_data->suspended = false; return 0; err_check_configuration: synaptics_rmi4_irq_enable(rmi4_data, false); rmi4_data->touch_stopped = true; synaptics_rmi4_sensor_sleep(rmi4_data); if (rmi4_data->board->disable_gpios) { if (rmi4_data->ts_pinctrl) { retval = pinctrl_select_state(rmi4_data->ts_pinctrl, rmi4_data->pinctrl_state_suspend); if (retval < 0) dev_err(dev, "failed to select idle pinctrl state\n"); } synaptics_rmi4_gpio_configure(rmi4_data, false); } synaptics_rmi4_regulator_lpm(rmi4_data, true); wake_up(&rmi4_data->wait); return retval; err_gpio_configure: if (rmi4_data->ts_pinctrl) { retval = pinctrl_select_state(rmi4_data->ts_pinctrl, rmi4_data->pinctrl_state_suspend); if (retval < 0) pr_err("failed to select idle pinctrl state\n"); } synaptics_rmi4_regulator_lpm(rmi4_data, true); wake_up(&rmi4_data->wait); return retval; } #if (!defined(CONFIG_FB) && !defined(CONFIG_HAS_EARLYSUSPEND)) static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = { .suspend = synaptics_rmi4_suspend, .resume = synaptics_rmi4_resume, }; #else static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = { }; #endif #else static int synaptics_rmi4_suspend(struct device *dev) { return 0; } static int synaptics_rmi4_resume(struct device *dev) { return 0; } #endif static const struct i2c_device_id synaptics_rmi4_id_table[] = { {DRIVER_NAME, 0}, {}, }; MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table); #ifdef CONFIG_OF static struct of_device_id rmi4_match_table[] = { { .compatible = "synaptics,rmi4",}, { }, }; #else #define rmi4_match_table NULL #endif static struct i2c_driver synaptics_rmi4_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = rmi4_match_table, #ifdef CONFIG_PM .pm = &synaptics_rmi4_dev_pm_ops, #endif }, .probe = synaptics_rmi4_probe, .remove = synaptics_rmi4_remove, .id_table = synaptics_rmi4_id_table, }; /** * synaptics_rmi4_init() * * Called by the kernel during do_initcalls (if built-in) * or when the driver is loaded (if a module). * * This function registers the driver to the I2C subsystem. * */ static int __init synaptics_rmi4_init(void) { return i2c_add_driver(&synaptics_rmi4_driver); } /** * synaptics_rmi4_exit() * * Called by the kernel when the driver is unloaded. * * This function unregisters the driver from the I2C subsystem. * */ static void __exit synaptics_rmi4_exit(void) { i2c_del_driver(&synaptics_rmi4_driver); } module_init(synaptics_rmi4_init); module_exit(synaptics_rmi4_exit); MODULE_AUTHOR("Synaptics, Inc."); MODULE_DESCRIPTION("Synaptics RMI4 I2C Touch Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
focuslau/kernel-moto-photon-ics
drivers/macintosh/windfarm_smu_sensors.c
858
11562
/* * Windfarm PowerMac thermal control. SMU based sensors * * (c) Copyright 2005 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * Released under the term of the GNU GPL v2. */ #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/completion.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/system.h> #include <asm/sections.h> #include <asm/smu.h> #include "windfarm.h" #define VERSION "0.2" #undef DEBUG #ifdef DEBUG #define DBG(args...) printk(args) #else #define DBG(args...) do { } while(0) #endif /* * Various SMU "partitions" calibration objects for which we * keep pointers here for use by bits & pieces of the driver */ static struct smu_sdbp_cpuvcp *cpuvcp; static int cpuvcp_version; static struct smu_sdbp_cpudiode *cpudiode; static struct smu_sdbp_slotspow *slotspow; static u8 *debugswitches; /* * SMU basic sensors objects */ static LIST_HEAD(smu_ads); struct smu_ad_sensor { struct list_head link; u32 reg; /* index in SMU */ struct wf_sensor sens; }; #define to_smu_ads(c) container_of(c, struct smu_ad_sensor, sens) static void smu_ads_release(struct wf_sensor *sr) { struct smu_ad_sensor *ads = to_smu_ads(sr); kfree(ads); } static int smu_read_adc(u8 id, s32 *value) { struct smu_simple_cmd cmd; DECLARE_COMPLETION_ONSTACK(comp); int rc; rc = smu_queue_simple(&cmd, SMU_CMD_READ_ADC, 1, smu_done_complete, &comp, id); if (rc) return rc; wait_for_completion(&comp); if (cmd.cmd.status != 0) return cmd.cmd.status; if (cmd.cmd.reply_len != 2) { printk(KERN_ERR "winfarm: read ADC 0x%x returned %d bytes !\n", id, cmd.cmd.reply_len); return -EIO; } *value = *((u16 *)cmd.buffer); return 0; } static int smu_cputemp_get(struct wf_sensor *sr, s32 *value) { struct smu_ad_sensor *ads = to_smu_ads(sr); int rc; s32 val; s64 scaled; rc = smu_read_adc(ads->reg, &val); if (rc) { printk(KERN_ERR "windfarm: read CPU temp failed, err %d\n", rc); return rc; } /* Ok, we have to scale & adjust, taking units into account */ scaled = (s64)(((u64)val) * (u64)cpudiode->m_value); scaled >>= 3; scaled += ((s64)cpudiode->b_value) << 9; *value = (s32)(scaled << 1); return 0; } static int smu_cpuamp_get(struct wf_sensor *sr, s32 *value) { struct smu_ad_sensor *ads = to_smu_ads(sr); s32 val, scaled; int rc; rc = smu_read_adc(ads->reg, &val); if (rc) { printk(KERN_ERR "windfarm: read CPU current failed, err %d\n", rc); return rc; } /* Ok, we have to scale & adjust, taking units into account */ scaled = (s32)(val * (u32)cpuvcp->curr_scale); scaled += (s32)cpuvcp->curr_offset; *value = scaled << 4; return 0; } static int smu_cpuvolt_get(struct wf_sensor *sr, s32 *value) { struct smu_ad_sensor *ads = to_smu_ads(sr); s32 val, scaled; int rc; rc = smu_read_adc(ads->reg, &val); if (rc) { printk(KERN_ERR "windfarm: read CPU voltage failed, err %d\n", rc); return rc; } /* Ok, we have to scale & adjust, taking units into account */ scaled = (s32)(val * (u32)cpuvcp->volt_scale); scaled += (s32)cpuvcp->volt_offset; *value = scaled << 4; return 0; } static int smu_slotspow_get(struct wf_sensor *sr, s32 *value) { struct smu_ad_sensor *ads = to_smu_ads(sr); s32 val, scaled; int rc; rc = smu_read_adc(ads->reg, &val); if (rc) { printk(KERN_ERR "windfarm: read slots power failed, err %d\n", rc); return rc; } /* Ok, we have to scale & adjust, taking units into account */ scaled = (s32)(val * (u32)slotspow->pow_scale); scaled += (s32)slotspow->pow_offset; *value = scaled << 4; return 0; } static struct wf_sensor_ops smu_cputemp_ops = { .get_value = smu_cputemp_get, .release = smu_ads_release, .owner = THIS_MODULE, }; static struct wf_sensor_ops smu_cpuamp_ops = { .get_value = smu_cpuamp_get, .release = smu_ads_release, .owner = THIS_MODULE, }; static struct wf_sensor_ops smu_cpuvolt_ops = { .get_value = smu_cpuvolt_get, .release = smu_ads_release, .owner = THIS_MODULE, }; static struct wf_sensor_ops smu_slotspow_ops = { .get_value = smu_slotspow_get, .release = smu_ads_release, .owner = THIS_MODULE, }; static struct smu_ad_sensor *smu_ads_create(struct device_node *node) { struct smu_ad_sensor *ads; const char *c, *l; const u32 *v; ads = kmalloc(sizeof(struct smu_ad_sensor), GFP_KERNEL); if (ads == NULL) return NULL; c = of_get_property(node, "device_type", NULL); l = of_get_property(node, "location", NULL); if (c == NULL || l == NULL) goto fail; /* We currently pick the sensors based on the OF name and location * properties, while Darwin uses the sensor-id's. * The problem with the IDs is that they are model specific while it * looks like apple has been doing a reasonably good job at keeping * the names and locations consistents so I'll stick with the names * and locations for now. */ if (!strcmp(c, "temp-sensor") && !strcmp(l, "CPU T-Diode")) { ads->sens.ops = &smu_cputemp_ops; ads->sens.name = "cpu-temp"; if (cpudiode == NULL) { DBG("wf: cpudiode partition (%02x) not found\n", SMU_SDB_CPUDIODE_ID); goto fail; } } else if (!strcmp(c, "current-sensor") && !strcmp(l, "CPU Current")) { ads->sens.ops = &smu_cpuamp_ops; ads->sens.name = "cpu-current"; if (cpuvcp == NULL) { DBG("wf: cpuvcp partition (%02x) not found\n", SMU_SDB_CPUVCP_ID); goto fail; } } else if (!strcmp(c, "voltage-sensor") && !strcmp(l, "CPU Voltage")) { ads->sens.ops = &smu_cpuvolt_ops; ads->sens.name = "cpu-voltage"; if (cpuvcp == NULL) { DBG("wf: cpuvcp partition (%02x) not found\n", SMU_SDB_CPUVCP_ID); goto fail; } } else if (!strcmp(c, "power-sensor") && !strcmp(l, "Slots Power")) { ads->sens.ops = &smu_slotspow_ops; ads->sens.name = "slots-power"; if (slotspow == NULL) { DBG("wf: slotspow partition (%02x) not found\n", SMU_SDB_SLOTSPOW_ID); goto fail; } } else goto fail; v = of_get_property(node, "reg", NULL); if (v == NULL) goto fail; ads->reg = *v; if (wf_register_sensor(&ads->sens)) goto fail; return ads; fail: kfree(ads); return NULL; } /* * SMU Power combo sensor object */ struct smu_cpu_power_sensor { struct list_head link; struct wf_sensor *volts; struct wf_sensor *amps; int fake_volts : 1; int quadratic : 1; struct wf_sensor sens; }; #define to_smu_cpu_power(c) container_of(c, struct smu_cpu_power_sensor, sens) static struct smu_cpu_power_sensor *smu_cpu_power; static void smu_cpu_power_release(struct wf_sensor *sr) { struct smu_cpu_power_sensor *pow = to_smu_cpu_power(sr); if (pow->volts) wf_put_sensor(pow->volts); if (pow->amps) wf_put_sensor(pow->amps); kfree(pow); } static int smu_cpu_power_get(struct wf_sensor *sr, s32 *value) { struct smu_cpu_power_sensor *pow = to_smu_cpu_power(sr); s32 volts, amps, power; u64 tmps, tmpa, tmpb; int rc; rc = pow->amps->ops->get_value(pow->amps, &amps); if (rc) return rc; if (pow->fake_volts) { *value = amps * 12 - 0x30000; return 0; } rc = pow->volts->ops->get_value(pow->volts, &volts); if (rc) return rc; power = (s32)((((u64)volts) * ((u64)amps)) >> 16); if (!pow->quadratic) { *value = power; return 0; } tmps = (((u64)power) * ((u64)power)) >> 16; tmpa = ((u64)cpuvcp->power_quads[0]) * tmps; tmpb = ((u64)cpuvcp->power_quads[1]) * ((u64)power); *value = (tmpa >> 28) + (tmpb >> 28) + (cpuvcp->power_quads[2] >> 12); return 0; } static struct wf_sensor_ops smu_cpu_power_ops = { .get_value = smu_cpu_power_get, .release = smu_cpu_power_release, .owner = THIS_MODULE, }; static struct smu_cpu_power_sensor * smu_cpu_power_create(struct wf_sensor *volts, struct wf_sensor *amps) { struct smu_cpu_power_sensor *pow; pow = kmalloc(sizeof(struct smu_cpu_power_sensor), GFP_KERNEL); if (pow == NULL) return NULL; pow->sens.ops = &smu_cpu_power_ops; pow->sens.name = "cpu-power"; wf_get_sensor(volts); pow->volts = volts; wf_get_sensor(amps); pow->amps = amps; /* Some early machines need a faked voltage */ if (debugswitches && ((*debugswitches) & 0x80)) { printk(KERN_INFO "windfarm: CPU Power sensor using faked" " voltage !\n"); pow->fake_volts = 1; } else pow->fake_volts = 0; /* Try to use quadratic transforms on PowerMac8,1 and 9,1 for now, * I yet have to figure out what's up with 8,2 and will have to * adjust for later, unless we can 100% trust the SDB partition... */ if ((machine_is_compatible("PowerMac8,1") || machine_is_compatible("PowerMac8,2") || machine_is_compatible("PowerMac9,1")) && cpuvcp_version >= 2) { pow->quadratic = 1; DBG("windfarm: CPU Power using quadratic transform\n"); } else pow->quadratic = 0; if (wf_register_sensor(&pow->sens)) goto fail; return pow; fail: kfree(pow); return NULL; } static void smu_fetch_param_partitions(void) { const struct smu_sdbp_header *hdr; /* Get CPU voltage/current/power calibration data */ hdr = smu_get_sdb_partition(SMU_SDB_CPUVCP_ID, NULL); if (hdr != NULL) { cpuvcp = (struct smu_sdbp_cpuvcp *)&hdr[1]; /* Keep version around */ cpuvcp_version = hdr->version; } /* Get CPU diode calibration data */ hdr = smu_get_sdb_partition(SMU_SDB_CPUDIODE_ID, NULL); if (hdr != NULL) cpudiode = (struct smu_sdbp_cpudiode *)&hdr[1]; /* Get slots power calibration data if any */ hdr = smu_get_sdb_partition(SMU_SDB_SLOTSPOW_ID, NULL); if (hdr != NULL) slotspow = (struct smu_sdbp_slotspow *)&hdr[1]; /* Get debug switches if any */ hdr = smu_get_sdb_partition(SMU_SDB_DEBUG_SWITCHES_ID, NULL); if (hdr != NULL) debugswitches = (u8 *)&hdr[1]; } static int __init smu_sensors_init(void) { struct device_node *smu, *sensors, *s; struct smu_ad_sensor *volt_sensor = NULL, *curr_sensor = NULL; if (!smu_present()) return -ENODEV; /* Get parameters partitions */ smu_fetch_param_partitions(); smu = of_find_node_by_type(NULL, "smu"); if (smu == NULL) return -ENODEV; /* Look for sensors subdir */ for (sensors = NULL; (sensors = of_get_next_child(smu, sensors)) != NULL;) if (!strcmp(sensors->name, "sensors")) break; of_node_put(smu); /* Create basic sensors */ for (s = NULL; sensors && (s = of_get_next_child(sensors, s)) != NULL;) { struct smu_ad_sensor *ads; ads = smu_ads_create(s); if (ads == NULL) continue; list_add(&ads->link, &smu_ads); /* keep track of cpu voltage & current */ if (!strcmp(ads->sens.name, "cpu-voltage")) volt_sensor = ads; else if (!strcmp(ads->sens.name, "cpu-current")) curr_sensor = ads; } of_node_put(sensors); /* Create CPU power sensor if possible */ if (volt_sensor && curr_sensor) smu_cpu_power = smu_cpu_power_create(&volt_sensor->sens, &curr_sensor->sens); return 0; } static void __exit smu_sensors_exit(void) { struct smu_ad_sensor *ads; /* dispose of power sensor */ if (smu_cpu_power) wf_unregister_sensor(&smu_cpu_power->sens); /* dispose of basic sensors */ while (!list_empty(&smu_ads)) { ads = list_entry(smu_ads.next, struct smu_ad_sensor, link); list_del(&ads->link); wf_unregister_sensor(&ads->sens); } } module_init(smu_sensors_init); module_exit(smu_sensors_exit); MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); MODULE_DESCRIPTION("SMU sensor objects for PowerMacs thermal control"); MODULE_LICENSE("GPL");
gpl-2.0
slz/arco-samsung-kernel-msm7x30
arch/arm/mach-msm/msm_memory_dump.c
858
2446
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <asm/cacheflush.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/init.h> #include <linux/notifier.h> #include <linux/export.h> #include <mach/msm_iomap.h> #include <mach/msm_memory_dump.h> /*TODO: Needs to be set to correct value */ #define DUMP_TABLE_OFFSET 0x14 #define MSM_DUMP_TABLE_VERSION MK_TABLE(1, 0) static struct msm_memory_dump mem_dump_data; static int msm_memory_dump_panic(struct notifier_block *this, unsigned long event, void *ptr) { writel_relaxed(0, MSM_IMEM_BASE + DUMP_TABLE_OFFSET); return 0; } static struct notifier_block msm_memory_dump_blk = { .notifier_call = msm_memory_dump_panic, }; int msm_dump_table_register(struct msm_client_dump *client_entry) { struct msm_client_dump *entry; struct msm_dump_table *table = mem_dump_data.dump_table_ptr; if (!table || table->num_entries >= MAX_NUM_CLIENTS) return -EINVAL; entry = &table->client_entries[table->num_entries]; entry->id = client_entry->id; entry->start_addr = client_entry->start_addr; entry->end_addr = client_entry->end_addr; table->num_entries++; /* flush cache */ dmac_flush_range(table, table + sizeof(struct msm_dump_table)); return 0; } EXPORT_SYMBOL(msm_dump_table_register); static int __init init_memory_dump(void) { struct msm_dump_table *table; mem_dump_data.dump_table_ptr = kzalloc(sizeof(struct msm_dump_table), GFP_KERNEL); if (!mem_dump_data.dump_table_ptr) { printk(KERN_ERR "unable to allocate memory for dump table\n"); return -ENOMEM; } table = mem_dump_data.dump_table_ptr; table->version = MSM_DUMP_TABLE_VERSION; mem_dump_data.dump_table_phys = virt_to_phys(table); writel_relaxed(mem_dump_data.dump_table_phys, MSM_IMEM_BASE + DUMP_TABLE_OFFSET); atomic_notifier_chain_register(&panic_notifier_list, &msm_memory_dump_blk); printk(KERN_INFO "MSM Memory Dump table set up\n"); return 0; } early_initcall(init_memory_dump);
gpl-2.0
bachtk/linux
drivers/pci/hotplug/pciehp_pci.c
1626
3658
/* * PCI Express Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include "../pci.h" #include "pciehp.h" int pciehp_configure_device(struct slot *p_slot) { struct pci_dev *dev; struct pci_dev *bridge = p_slot->ctrl->pcie->port; struct pci_bus *parent = bridge->subordinate; int num, ret = 0; struct controller *ctrl = p_slot->ctrl; pci_lock_rescan_remove(); dev = pci_get_slot(parent, PCI_DEVFN(0, 0)); if (dev) { ctrl_err(ctrl, "Device %s already exists at %04x:%02x:00, cannot hot-add\n", pci_name(dev), pci_domain_nr(parent), parent->number); pci_dev_put(dev); ret = -EEXIST; goto out; } num = pci_scan_slot(parent, PCI_DEVFN(0, 0)); if (num == 0) { ctrl_err(ctrl, "No new device found\n"); ret = -ENODEV; goto out; } list_for_each_entry(dev, &parent->devices, bus_list) if (pci_is_bridge(dev)) pci_hp_add_bridge(dev); pci_assign_unassigned_bridge_resources(bridge); pcie_bus_configure_settings(parent); pci_bus_add_devices(parent); out: pci_unlock_rescan_remove(); return ret; } int pciehp_unconfigure_device(struct slot *p_slot) { int rc = 0; u8 bctl = 0; u8 presence = 0; struct pci_dev *dev, *temp; struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate; u16 command; struct controller *ctrl = p_slot->ctrl; ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n", __func__, pci_domain_nr(parent), parent->number); pciehp_get_adapter_status(p_slot, &presence); pci_lock_rescan_remove(); /* * Stopping an SR-IOV PF device removes all the associated VFs, * which will update the bus->devices list and confuse the * iterator. Therefore, iterate in reverse so we remove the VFs * first, then the PF. We do the same in pci_stop_bus_device(). */ list_for_each_entry_safe_reverse(dev, temp, &parent->devices, bus_list) { pci_dev_get(dev); if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) { pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl); if (bctl & PCI_BRIDGE_CTL_VGA) { ctrl_err(ctrl, "Cannot remove display device %s\n", pci_name(dev)); pci_dev_put(dev); rc = -EINVAL; break; } } pci_stop_and_remove_bus_device(dev); /* * Ensure that no new Requests will be generated from * the device. */ if (presence) { pci_read_config_word(dev, PCI_COMMAND, &command); command &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_SERR); command |= PCI_COMMAND_INTX_DISABLE; pci_write_config_word(dev, PCI_COMMAND, command); } pci_dev_put(dev); } pci_unlock_rescan_remove(); return rc; }
gpl-2.0
stedman420/android_kernel_lge_lgl55c
arch/parisc/kernel/pdc_cons.c
1626
4808
/* * PDC Console support - ie use firmware to dump text via boot console * * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org> * Copyright (C) 2000 Martin K Petersen <mkp at mkp.net> * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org> * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org> * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org> * Copyright (C) 2000 Michael Ang <mang with subcarrier.org> * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org> * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org> * Copyright (C) 2001 Helge Deller <deller at parisc-linux.org> * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org> * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * The PDC console is a simple console, which can be used for debugging * boot related problems on HP PA-RISC machines. * * This code uses the ROM (=PDC) based functions to read and write characters * from and to PDC's boot path. * Since all character read from that path must be polled, this code never * can or will be a fully functional linux console. */ /* Define EARLY_BOOTUP_DEBUG to debug kernel related boot problems. * On production kernels EARLY_BOOTUP_DEBUG should be undefined. */ #define EARLY_BOOTUP_DEBUG #include <linux/kernel.h> #include <linux/console.h> #include <linux/string.h> #include <linux/init.h> #include <linux/major.h> #include <linux/tty.h> #include <asm/pdc.h> /* for iodc_call() proto and friends */ static DEFINE_SPINLOCK(pdc_console_lock); static void pdc_console_write(struct console *co, const char *s, unsigned count) { int i = 0; unsigned long flags; spin_lock_irqsave(&pdc_console_lock, flags); do { i += pdc_iodc_print(s + i, count - i); } while (i < count); spin_unlock_irqrestore(&pdc_console_lock, flags); } int pdc_console_poll_key(struct console *co) { int c; unsigned long flags; spin_lock_irqsave(&pdc_console_lock, flags); c = pdc_iodc_getc(); spin_unlock_irqrestore(&pdc_console_lock, flags); return c; } static int pdc_console_setup(struct console *co, char *options) { return 0; } #if defined(CONFIG_PDC_CONSOLE) #include <linux/vt_kern.h> static struct tty_driver * pdc_console_device (struct console *c, int *index) { extern struct tty_driver console_driver; *index = c->index ? c->index-1 : fg_console; return &console_driver; } #else #define pdc_console_device NULL #endif static struct console pdc_cons = { .name = "ttyB", .write = pdc_console_write, .device = pdc_console_device, .setup = pdc_console_setup, .flags = CON_BOOT | CON_PRINTBUFFER | CON_ENABLED, .index = -1, }; static int pdc_console_initialized; static void pdc_console_init_force(void) { if (pdc_console_initialized) return; ++pdc_console_initialized; /* If the console is duplex then copy the COUT parameters to CIN. */ if (PAGE0->mem_cons.cl_class == CL_DUPLEX) memcpy(&PAGE0->mem_kbd, &PAGE0->mem_cons, sizeof(PAGE0->mem_cons)); /* register the pdc console */ register_console(&pdc_cons); } void __init pdc_console_init(void) { #if defined(EARLY_BOOTUP_DEBUG) || defined(CONFIG_PDC_CONSOLE) pdc_console_init_force(); #endif #ifdef EARLY_BOOTUP_DEBUG printk(KERN_INFO "Initialized PDC Console for debugging.\n"); #endif } /* * Used for emergencies. Currently only used if an HPMC occurs. If an * HPMC occurs, it is possible that the current console may not be * properly initialised after the PDC IO reset. This routine unregisters * all of the current consoles, reinitializes the pdc console and * registers it. */ void pdc_console_restart(void) { struct console *console; if (pdc_console_initialized) return; /* If we've already seen the output, don't bother to print it again */ if (console_drivers != NULL) pdc_cons.flags &= ~CON_PRINTBUFFER; while ((console = console_drivers) != NULL) unregister_console(console_drivers); /* force registering the pdc console */ pdc_console_init_force(); }
gpl-2.0
steev/luna-kernel
drivers/net/wireless/mwifiex/11ac.c
1882
9443
/* * Marvell Wireless LAN device driver: 802.11ac * * Copyright (C) 2013, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include "decl.h" #include "ioctl.h" #include "fw.h" #include "main.h" #include "11ac.h" /* This function converts the 2-bit MCS map to the highest long GI * VHT data rate. */ static u16 mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv, u8 bands, u16 mcs_map) { u8 i, nss, max_mcs; u16 max_rate = 0; u32 usr_vht_cap_info = 0; struct mwifiex_adapter *adapter = priv->adapter; /* tables of the MCS map to the highest data rate (in Mbps) * supported for long GI */ u16 max_rate_lgi_80MHZ[8][3] = { {0x124, 0x15F, 0x186}, /* NSS = 1 */ {0x249, 0x2BE, 0x30C}, /* NSS = 2 */ {0x36D, 0x41D, 0x492}, /* NSS = 3 */ {0x492, 0x57C, 0x618}, /* NSS = 4 */ {0x5B6, 0x6DB, 0x79E}, /* NSS = 5 */ {0x6DB, 0x83A, 0x0}, /* NSS = 6 */ {0x7FF, 0x999, 0xAAA}, /* NSS = 7 */ {0x924, 0xAF8, 0xC30} /* NSS = 8 */ }; u16 max_rate_lgi_160MHZ[8][3] = { {0x249, 0x2BE, 0x30C}, /* NSS = 1 */ {0x492, 0x57C, 0x618}, /* NSS = 2 */ {0x6DB, 0x83A, 0x0}, /* NSS = 3 */ {0x924, 0xAF8, 0xC30}, /* NSS = 4 */ {0xB6D, 0xDB6, 0xF3C}, /* NSS = 5 */ {0xDB6, 0x1074, 0x1248}, /* NSS = 6 */ {0xFFF, 0x1332, 0x1554}, /* NSS = 7 */ {0x1248, 0x15F0, 0x1860} /* NSS = 8 */ }; if (bands & BAND_AAC) usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a; else usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg; /* find the max NSS supported */ nss = 0; for (i = 0; i < 8; i++) { max_mcs = (mcs_map >> (2 * i)) & 0x3; if (max_mcs < 3) nss = i; } max_mcs = (mcs_map >> (2 * nss)) & 0x3; /* if max_mcs is 3, nss must be 0 (SS = 1). Thus, max mcs is MCS 9 */ if (max_mcs >= 3) max_mcs = 2; if (GET_VHTCAP_CHWDSET(usr_vht_cap_info)) { /* support 160 MHz */ max_rate = max_rate_lgi_160MHZ[nss][max_mcs]; if (!max_rate) /* MCS9 is not supported in NSS6 */ max_rate = max_rate_lgi_160MHZ[nss][max_mcs - 1]; } else { max_rate = max_rate_lgi_80MHZ[nss][max_mcs]; if (!max_rate) /* MCS9 is not supported in NSS3 */ max_rate = max_rate_lgi_80MHZ[nss][max_mcs - 1]; } return max_rate; } static void mwifiex_fill_vht_cap_info(struct mwifiex_private *priv, struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands) { struct mwifiex_adapter *adapter = priv->adapter; if (bands & BAND_A) vht_cap->vht_cap.vht_cap_info = cpu_to_le32(adapter->usr_dot_11ac_dev_cap_a); else vht_cap->vht_cap.vht_cap_info = cpu_to_le32(adapter->usr_dot_11ac_dev_cap_bg); } static void mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv, struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands) { struct mwifiex_adapter *adapter = priv->adapter; u16 mcs_map_user, mcs_map_resp, mcs_map_result; u16 mcs_user, mcs_resp, nss, tmp; /* Fill VHT cap info */ mwifiex_fill_vht_cap_info(priv, vht_cap, bands); /* rx MCS Set: find the minimum of the user rx mcs and ap rx mcs */ mcs_map_user = GET_DEVRXMCSMAP(adapter->usr_dot_11ac_mcs_support); mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.rx_mcs_map); mcs_map_result = 0; for (nss = 1; nss <= 8; nss++) { mcs_user = GET_VHTNSSMCS(mcs_map_user, nss); mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss); if ((mcs_user == NO_NSS_SUPPORT) || (mcs_resp == NO_NSS_SUPPORT)) SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT); else SET_VHTNSSMCS(mcs_map_result, nss, min(mcs_user, mcs_resp)); } vht_cap->vht_cap.supp_mcs.rx_mcs_map = cpu_to_le16(mcs_map_result); tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result); vht_cap->vht_cap.supp_mcs.rx_highest = cpu_to_le16(tmp); /* tx MCS Set: find the minimum of the user tx mcs and ap tx mcs */ mcs_map_user = GET_DEVTXMCSMAP(adapter->usr_dot_11ac_mcs_support); mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.tx_mcs_map); mcs_map_result = 0; for (nss = 1; nss <= 8; nss++) { mcs_user = GET_VHTNSSMCS(mcs_map_user, nss); mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss); if ((mcs_user == NO_NSS_SUPPORT) || (mcs_resp == NO_NSS_SUPPORT)) SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT); else SET_VHTNSSMCS(mcs_map_result, nss, min(mcs_user, mcs_resp)); } vht_cap->vht_cap.supp_mcs.tx_mcs_map = cpu_to_le16(mcs_map_result); tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result); vht_cap->vht_cap.supp_mcs.tx_highest = cpu_to_le16(tmp); return; } int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc, u8 **buffer) { struct mwifiex_ie_types_vhtcap *vht_cap; struct mwifiex_ie_types_oper_mode_ntf *oper_ntf; struct ieee_types_oper_mode_ntf *ieee_oper_ntf; struct mwifiex_ie_types_vht_oper *vht_op; struct mwifiex_adapter *adapter = priv->adapter; u8 supp_chwd_set; u32 usr_vht_cap_info; int ret_len = 0; if (bss_desc->bss_band & BAND_A) usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a; else usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg; /* VHT Capabilities IE */ if (bss_desc->bcn_vht_cap) { vht_cap = (struct mwifiex_ie_types_vhtcap *)*buffer; memset(vht_cap, 0, sizeof(*vht_cap)); vht_cap->header.type = cpu_to_le16(WLAN_EID_VHT_CAPABILITY); vht_cap->header.len = cpu_to_le16(sizeof(struct ieee80211_vht_cap)); memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header), (u8 *)bss_desc->bcn_vht_cap, le16_to_cpu(vht_cap->header.len)); mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band); *buffer += sizeof(*vht_cap); ret_len += sizeof(*vht_cap); } /* VHT Operation IE */ if (bss_desc->bcn_vht_oper) { if (priv->bss_mode == NL80211_IFTYPE_STATION) { vht_op = (struct mwifiex_ie_types_vht_oper *)*buffer; memset(vht_op, 0, sizeof(*vht_op)); vht_op->header.type = cpu_to_le16(WLAN_EID_VHT_OPERATION); vht_op->header.len = cpu_to_le16(sizeof(*vht_op) - sizeof(struct mwifiex_ie_types_header)); memcpy((u8 *)vht_op + sizeof(struct mwifiex_ie_types_header), (u8 *)bss_desc->bcn_vht_oper + sizeof(struct ieee_types_header), le16_to_cpu(vht_op->header.len)); /* negotiate the channel width and central freq * and keep the central freq as the peer suggests */ supp_chwd_set = GET_VHTCAP_CHWDSET(usr_vht_cap_info); switch (supp_chwd_set) { case 0: vht_op->chan_width = min_t(u8, IEEE80211_VHT_CHANWIDTH_80MHZ, bss_desc->bcn_vht_oper->chan_width); break; case 1: vht_op->chan_width = min_t(u8, IEEE80211_VHT_CHANWIDTH_160MHZ, bss_desc->bcn_vht_oper->chan_width); break; case 2: vht_op->chan_width = min_t(u8, IEEE80211_VHT_CHANWIDTH_80P80MHZ, bss_desc->bcn_vht_oper->chan_width); break; default: vht_op->chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT; break; } *buffer += sizeof(*vht_op); ret_len += sizeof(*vht_op); } } /* Operating Mode Notification IE */ if (bss_desc->oper_mode) { ieee_oper_ntf = bss_desc->oper_mode; oper_ntf = (void *)*buffer; memset(oper_ntf, 0, sizeof(*oper_ntf)); oper_ntf->header.type = cpu_to_le16(WLAN_EID_OPMODE_NOTIF); oper_ntf->header.len = cpu_to_le16(sizeof(u8)); oper_ntf->oper_mode = ieee_oper_ntf->oper_mode; *buffer += sizeof(*oper_ntf); ret_len += sizeof(*oper_ntf); } return ret_len; } int mwifiex_cmd_11ac_cfg(struct mwifiex_private *priv, struct host_cmd_ds_command *cmd, u16 cmd_action, struct mwifiex_11ac_vht_cfg *cfg) { struct host_cmd_11ac_vht_cfg *vhtcfg = &cmd->params.vht_cfg; cmd->command = cpu_to_le16(HostCmd_CMD_11AC_CFG); cmd->size = cpu_to_le16(sizeof(struct host_cmd_11ac_vht_cfg) + S_DS_GEN); vhtcfg->action = cpu_to_le16(cmd_action); vhtcfg->band_config = cfg->band_config; vhtcfg->misc_config = cfg->misc_config; vhtcfg->cap_info = cpu_to_le32(cfg->cap_info); vhtcfg->mcs_tx_set = cpu_to_le32(cfg->mcs_tx_set); vhtcfg->mcs_rx_set = cpu_to_le32(cfg->mcs_rx_set); return 0; } /* This function initializes the BlockACK setup information for given * mwifiex_private structure for 11ac enabled networks. */ void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv) { priv->add_ba_param.timeout = MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT; if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) { priv->add_ba_param.tx_win_size = MWIFIEX_11AC_UAP_AMPDU_DEF_TXWINSIZE; priv->add_ba_param.rx_win_size = MWIFIEX_11AC_UAP_AMPDU_DEF_RXWINSIZE; } else { priv->add_ba_param.tx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE; priv->add_ba_param.rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE; } return; }
gpl-2.0
VRToxin-AOSP/android_kernel_moto_shamu
drivers/usb/host/ehci-xilinx-of.c
2138
6926
/* * EHCI HCD (Host Controller Driver) for USB. * * Bus Glue for Xilinx EHCI core on the of_platform bus * * Copyright (c) 2009 Xilinx, Inc. * * Based on "ehci-ppc-of.c" by Valentine Barshak <vbarshak@ru.mvista.com> * and "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de> * and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/err.h> #include <linux/signal.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_address.h> /** * ehci_xilinx_port_handed_over - hand the port out if failed to enable it * @hcd: Pointer to the usb_hcd device to which the host controller bound * @portnum:Port number to which the device is attached. * * This function is used as a place to tell the user that the Xilinx USB host * controller does support LS devices. And in an HS only configuration, it * does not support FS devices either. It is hoped that this can help a * confused user. * * There are cases when the host controller fails to enable the port due to, * for example, insufficient power that can be supplied to the device from * the USB bus. In those cases, the messages printed here are not helpful. */ static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum) { dev_warn(hcd->self.controller, "port %d cannot be enabled\n", portnum); if (hcd->has_tt) { dev_warn(hcd->self.controller, "Maybe you have connected a low speed device?\n"); dev_warn(hcd->self.controller, "We do not support low speed devices\n"); } else { dev_warn(hcd->self.controller, "Maybe your device is not a high speed device?\n"); dev_warn(hcd->self.controller, "The USB host controller does not support full speed " "nor low speed devices\n"); dev_warn(hcd->self.controller, "You can reconfigure the host controller to have " "full speed support\n"); } return 0; } static const struct hc_driver ehci_xilinx_of_hc_driver = { .description = hcd_name, .product_desc = "OF EHCI", .hcd_priv_size = sizeof(struct ehci_hcd), /* * generic hardware linkage */ .irq = ehci_irq, .flags = HCD_MEMORY | HCD_USB2, /* * basic lifecycle operations */ .reset = ehci_setup, .start = ehci_run, .stop = ehci_stop, .shutdown = ehci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = ehci_urb_enqueue, .urb_dequeue = ehci_urb_dequeue, .endpoint_disable = ehci_endpoint_disable, .endpoint_reset = ehci_endpoint_reset, /* * scheduling support */ .get_frame_number = ehci_get_frame, /* * root hub support */ .hub_status_data = ehci_hub_status_data, .hub_control = ehci_hub_control, #ifdef CONFIG_PM .bus_suspend = ehci_bus_suspend, .bus_resume = ehci_bus_resume, #endif .relinquish_port = NULL, .port_handed_over = ehci_xilinx_port_handed_over, .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, }; /** * ehci_hcd_xilinx_of_probe - Probe method for the USB host controller * @op: pointer to the platform_device bound to the host controller * * This function requests resources and sets up appropriate properties for the * host controller. Because the Xilinx USB host controller can be configured * as HS only or HS/FS only, it checks the configuration in the device tree * entry, and sets an appropriate value for hcd->has_tt. */ static int ehci_hcd_xilinx_of_probe(struct platform_device *op) { struct device_node *dn = op->dev.of_node; struct usb_hcd *hcd; struct ehci_hcd *ehci; struct resource res; int irq; int rv; int *value; if (usb_disabled()) return -ENODEV; dev_dbg(&op->dev, "initializing XILINX-OF USB Controller\n"); rv = of_address_to_resource(dn, 0, &res); if (rv) return rv; hcd = usb_create_hcd(&ehci_xilinx_of_hc_driver, &op->dev, "XILINX-OF USB"); if (!hcd) return -ENOMEM; hcd->rsrc_start = res.start; hcd->rsrc_len = resource_size(&res); irq = irq_of_parse_and_map(dn, 0); if (!irq) { printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__); rv = -EBUSY; goto err_irq; } hcd->regs = devm_ioremap_resource(&op->dev, &res); if (IS_ERR(hcd->regs)) { rv = PTR_ERR(hcd->regs); goto err_irq; } ehci = hcd_to_ehci(hcd); /* This core always has big-endian register interface and uses * big-endian memory descriptors. */ ehci->big_endian_mmio = 1; ehci->big_endian_desc = 1; /* Check whether the FS support option is selected in the hardware. */ value = (int *)of_get_property(dn, "xlnx,support-usb-fs", NULL); if (value && (*value == 1)) { ehci_dbg(ehci, "USB host controller supports FS devices\n"); hcd->has_tt = 1; } else { ehci_dbg(ehci, "USB host controller is HS only\n"); hcd->has_tt = 0; } /* Debug registers are at the first 0x100 region */ ehci->caps = hcd->regs + 0x100; rv = usb_add_hcd(hcd, irq, 0); if (rv == 0) return 0; err_irq: usb_put_hcd(hcd); return rv; } /** * ehci_hcd_xilinx_of_remove - shutdown hcd and release resources * @op: pointer to platform_device structure that is to be removed * * Remove the hcd structure, and release resources that has been requested * during probe. */ static int ehci_hcd_xilinx_of_remove(struct platform_device *op) { struct usb_hcd *hcd = dev_get_drvdata(&op->dev); dev_set_drvdata(&op->dev, NULL); dev_dbg(&op->dev, "stopping XILINX-OF USB Controller\n"); usb_remove_hcd(hcd); usb_put_hcd(hcd); return 0; } /** * ehci_hcd_xilinx_of_shutdown - shutdown the hcd * @op: pointer to platform_device structure that is to be removed * * Properly shutdown the hcd, call driver's shutdown routine. */ static void ehci_hcd_xilinx_of_shutdown(struct platform_device *op) { struct usb_hcd *hcd = dev_get_drvdata(&op->dev); if (hcd->driver->shutdown) hcd->driver->shutdown(hcd); } static const struct of_device_id ehci_hcd_xilinx_of_match[] = { {.compatible = "xlnx,xps-usb-host-1.00.a",}, {}, }; MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match); static struct platform_driver ehci_hcd_xilinx_of_driver = { .probe = ehci_hcd_xilinx_of_probe, .remove = ehci_hcd_xilinx_of_remove, .shutdown = ehci_hcd_xilinx_of_shutdown, .driver = { .name = "xilinx-of-ehci", .owner = THIS_MODULE, .of_match_table = ehci_hcd_xilinx_of_match, }, };
gpl-2.0
pombredanne/bcm11351
drivers/video/fb_defio.c
2906
6515
/* * linux/drivers/video/fb_defio.c * * Copyright (C) 2006 Jaya Kumar * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/list.h> /* to support deferred IO */ #include <linux/rmap.h> #include <linux/pagemap.h> static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs) { void *screen_base = (void __force *) info->screen_base; struct page *page; if (is_vmalloc_addr(screen_base + offs)) page = vmalloc_to_page(screen_base + offs); else page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT); return page; } /* this is to find and return the vmalloc-ed fb pages */ static int fb_deferred_io_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { unsigned long offset; struct page *page; struct fb_info *info = vma->vm_private_data; offset = vmf->pgoff << PAGE_SHIFT; if (offset >= info->fix.smem_len) return VM_FAULT_SIGBUS; page = fb_deferred_io_page(info, offset); if (!page) return VM_FAULT_SIGBUS; get_page(page); if (vma->vm_file) page->mapping = vma->vm_file->f_mapping; else printk(KERN_ERR "no mapping available\n"); BUG_ON(!page->mapping); page->index = vmf->pgoff; vmf->page = page; return 0; } int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct fb_info *info = file->private_data; struct inode *inode = file_inode(file); int err = filemap_write_and_wait_range(inode->i_mapping, start, end); if (err) return err; /* Skip if deferred io is compiled-in but disabled on this fbdev */ if (!info->fbdefio) return 0; mutex_lock(&inode->i_mutex); /* Kill off the delayed work */ cancel_delayed_work_sync(&info->deferred_work); /* Run it immediately */ err = schedule_delayed_work(&info->deferred_work, 0); mutex_unlock(&inode->i_mutex); return err; } EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); /* vm_ops->page_mkwrite handler */ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct fb_info *info = vma->vm_private_data; struct fb_deferred_io *fbdefio = info->fbdefio; struct page *cur; /* this is a callback we get when userspace first tries to write to the page. we schedule a workqueue. that workqueue will eventually mkclean the touched pages and execute the deferred framebuffer IO. then if userspace touches a page again, we repeat the same scheme */ file_update_time(vma->vm_file); /* protect against the workqueue changing the page list */ mutex_lock(&fbdefio->lock); /* first write in this cycle, notify the driver */ if (fbdefio->first_io && list_empty(&fbdefio->pagelist)) fbdefio->first_io(info); /* * We want the page to remain locked from ->page_mkwrite until * the PTE is marked dirty to avoid page_mkclean() being called * before the PTE is updated, which would leave the page ignored * by defio. * Do this by locking the page here and informing the caller * about it with VM_FAULT_LOCKED. */ lock_page(page); /* we loop through the pagelist before adding in order to keep the pagelist sorted */ list_for_each_entry(cur, &fbdefio->pagelist, lru) { /* this check is to catch the case where a new process could start writing to the same page through a new pte. this new access can cause the mkwrite even when the original ps's pte is marked writable */ if (unlikely(cur == page)) goto page_already_added; else if (cur->index > page->index) break; } list_add_tail(&page->lru, &cur->lru); page_already_added: mutex_unlock(&fbdefio->lock); /* come back after delay to process the deferred IO */ schedule_delayed_work(&info->deferred_work, fbdefio->delay); return VM_FAULT_LOCKED; } static const struct vm_operations_struct fb_deferred_io_vm_ops = { .fault = fb_deferred_io_fault, .page_mkwrite = fb_deferred_io_mkwrite, }; static int fb_deferred_io_set_page_dirty(struct page *page) { if (!PageDirty(page)) SetPageDirty(page); return 0; } static const struct address_space_operations fb_deferred_io_aops = { .set_page_dirty = fb_deferred_io_set_page_dirty, }; static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) { vma->vm_ops = &fb_deferred_io_vm_ops; vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; if (!(info->flags & FBINFO_VIRTFB)) vma->vm_flags |= VM_IO; vma->vm_private_data = info; return 0; } /* workqueue callback */ static void fb_deferred_io_work(struct work_struct *work) { struct fb_info *info = container_of(work, struct fb_info, deferred_work.work); struct list_head *node, *next; struct page *cur; struct fb_deferred_io *fbdefio = info->fbdefio; /* here we mkclean the pages, then do all deferred IO */ mutex_lock(&fbdefio->lock); list_for_each_entry(cur, &fbdefio->pagelist, lru) { lock_page(cur); page_mkclean(cur); unlock_page(cur); } /* driver's callback with pagelist */ fbdefio->deferred_io(info, &fbdefio->pagelist); /* clear the list */ list_for_each_safe(node, next, &fbdefio->pagelist) { list_del(node); } mutex_unlock(&fbdefio->lock); } void fb_deferred_io_init(struct fb_info *info) { struct fb_deferred_io *fbdefio = info->fbdefio; BUG_ON(!fbdefio); mutex_init(&fbdefio->lock); info->fbops->fb_mmap = fb_deferred_io_mmap; INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work); INIT_LIST_HEAD(&fbdefio->pagelist); if (fbdefio->delay == 0) /* set a default of 1 s */ fbdefio->delay = HZ; } EXPORT_SYMBOL_GPL(fb_deferred_io_init); void fb_deferred_io_open(struct fb_info *info, struct inode *inode, struct file *file) { file->f_mapping->a_ops = &fb_deferred_io_aops; } EXPORT_SYMBOL_GPL(fb_deferred_io_open); void fb_deferred_io_cleanup(struct fb_info *info) { struct fb_deferred_io *fbdefio = info->fbdefio; struct page *page; int i; BUG_ON(!fbdefio); cancel_delayed_work_sync(&info->deferred_work); /* clear out the mapping that we setup */ for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { page = fb_deferred_io_page(info, i); page->mapping = NULL; } info->fbops->fb_mmap = NULL; mutex_destroy(&fbdefio->lock); } EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
a1d3s/linux-bpi
fs/cifs/cache.c
4442
8154
/* * fs/cifs/cache.c - CIFS filesystem cache index structure definitions * * Copyright (c) 2010 Novell, Inc. * Authors(s): Suresh Jayaraman (sjayaraman@suse.de> * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "fscache.h" #include "cifs_debug.h" /* * CIFS filesystem definition for FS-Cache */ struct fscache_netfs cifs_fscache_netfs = { .name = "cifs", .version = 0, }; /* * Register CIFS for caching with FS-Cache */ int cifs_fscache_register(void) { return fscache_register_netfs(&cifs_fscache_netfs); } /* * Unregister CIFS for caching */ void cifs_fscache_unregister(void) { fscache_unregister_netfs(&cifs_fscache_netfs); } /* * Key layout of CIFS server cache index object */ struct cifs_server_key { uint16_t family; /* address family */ __be16 port; /* IP port */ union { struct in_addr ipv4_addr; struct in6_addr ipv6_addr; } addr[0]; }; /* * Server object keyed by {IPaddress,port,family} tuple */ static uint16_t cifs_server_get_key(const void *cookie_netfs_data, void *buffer, uint16_t maxbuf) { const struct TCP_Server_Info *server = cookie_netfs_data; const struct sockaddr *sa = (struct sockaddr *) &server->dstaddr; const struct sockaddr_in *addr = (struct sockaddr_in *) sa; const struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) sa; struct cifs_server_key *key = buffer; uint16_t key_len = sizeof(struct cifs_server_key); memset(key, 0, key_len); /* * Should not be a problem as sin_family/sin6_family overlays * sa_family field */ switch (sa->sa_family) { case AF_INET: key->family = sa->sa_family; key->port = addr->sin_port; key->addr[0].ipv4_addr = addr->sin_addr; key_len += sizeof(key->addr[0].ipv4_addr); break; case AF_INET6: key->family = sa->sa_family; key->port = addr6->sin6_port; key->addr[0].ipv6_addr = addr6->sin6_addr; key_len += sizeof(key->addr[0].ipv6_addr); break; default: cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family); key_len = 0; break; } return key_len; } /* * Server object for FS-Cache */ const struct fscache_cookie_def cifs_fscache_server_index_def = { .name = "CIFS.server", .type = FSCACHE_COOKIE_TYPE_INDEX, .get_key = cifs_server_get_key, }; /* * Auxiliary data attached to CIFS superblock within the cache */ struct cifs_fscache_super_auxdata { u64 resource_id; /* unique server resource id */ }; static char *extract_sharename(const char *treename) { const char *src; char *delim, *dst; int len; /* skip double chars at the beginning */ src = treename + 2; /* share name is always preceded by '\\' now */ delim = strchr(src, '\\'); if (!delim) return ERR_PTR(-EINVAL); delim++; len = strlen(delim); /* caller has to free the memory */ dst = kstrndup(delim, len, GFP_KERNEL); if (!dst) return ERR_PTR(-ENOMEM); return dst; } /* * Superblock object currently keyed by share name */ static uint16_t cifs_super_get_key(const void *cookie_netfs_data, void *buffer, uint16_t maxbuf) { const struct cifs_tcon *tcon = cookie_netfs_data; char *sharename; uint16_t len; sharename = extract_sharename(tcon->treeName); if (IS_ERR(sharename)) { cifs_dbg(FYI, "%s: couldn't extract sharename\n", __func__); sharename = NULL; return 0; } len = strlen(sharename); if (len > maxbuf) return 0; memcpy(buffer, sharename, len); kfree(sharename); return len; } static uint16_t cifs_fscache_super_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t maxbuf) { struct cifs_fscache_super_auxdata auxdata; const struct cifs_tcon *tcon = cookie_netfs_data; memset(&auxdata, 0, sizeof(auxdata)); auxdata.resource_id = tcon->resource_id; if (maxbuf > sizeof(auxdata)) maxbuf = sizeof(auxdata); memcpy(buffer, &auxdata, maxbuf); return maxbuf; } static enum fscache_checkaux cifs_fscache_super_check_aux(void *cookie_netfs_data, const void *data, uint16_t datalen) { struct cifs_fscache_super_auxdata auxdata; const struct cifs_tcon *tcon = cookie_netfs_data; if (datalen != sizeof(auxdata)) return FSCACHE_CHECKAUX_OBSOLETE; memset(&auxdata, 0, sizeof(auxdata)); auxdata.resource_id = tcon->resource_id; if (memcmp(data, &auxdata, datalen) != 0) return FSCACHE_CHECKAUX_OBSOLETE; return FSCACHE_CHECKAUX_OKAY; } /* * Superblock object for FS-Cache */ const struct fscache_cookie_def cifs_fscache_super_index_def = { .name = "CIFS.super", .type = FSCACHE_COOKIE_TYPE_INDEX, .get_key = cifs_super_get_key, .get_aux = cifs_fscache_super_get_aux, .check_aux = cifs_fscache_super_check_aux, }; /* * Auxiliary data attached to CIFS inode within the cache */ struct cifs_fscache_inode_auxdata { struct timespec last_write_time; struct timespec last_change_time; u64 eof; }; static uint16_t cifs_fscache_inode_get_key(const void *cookie_netfs_data, void *buffer, uint16_t maxbuf) { const struct cifsInodeInfo *cifsi = cookie_netfs_data; uint16_t keylen; /* use the UniqueId as the key */ keylen = sizeof(cifsi->uniqueid); if (keylen > maxbuf) keylen = 0; else memcpy(buffer, &cifsi->uniqueid, keylen); return keylen; } static void cifs_fscache_inode_get_attr(const void *cookie_netfs_data, uint64_t *size) { const struct cifsInodeInfo *cifsi = cookie_netfs_data; *size = cifsi->vfs_inode.i_size; } static uint16_t cifs_fscache_inode_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t maxbuf) { struct cifs_fscache_inode_auxdata auxdata; const struct cifsInodeInfo *cifsi = cookie_netfs_data; memset(&auxdata, 0, sizeof(auxdata)); auxdata.eof = cifsi->server_eof; auxdata.last_write_time = cifsi->vfs_inode.i_mtime; auxdata.last_change_time = cifsi->vfs_inode.i_ctime; if (maxbuf > sizeof(auxdata)) maxbuf = sizeof(auxdata); memcpy(buffer, &auxdata, maxbuf); return maxbuf; } static enum fscache_checkaux cifs_fscache_inode_check_aux(void *cookie_netfs_data, const void *data, uint16_t datalen) { struct cifs_fscache_inode_auxdata auxdata; struct cifsInodeInfo *cifsi = cookie_netfs_data; if (datalen != sizeof(auxdata)) return FSCACHE_CHECKAUX_OBSOLETE; memset(&auxdata, 0, sizeof(auxdata)); auxdata.eof = cifsi->server_eof; auxdata.last_write_time = cifsi->vfs_inode.i_mtime; auxdata.last_change_time = cifsi->vfs_inode.i_ctime; if (memcmp(data, &auxdata, datalen) != 0) return FSCACHE_CHECKAUX_OBSOLETE; return FSCACHE_CHECKAUX_OKAY; } static void cifs_fscache_inode_now_uncached(void *cookie_netfs_data) { struct cifsInodeInfo *cifsi = cookie_netfs_data; struct pagevec pvec; pgoff_t first; int loop, nr_pages; pagevec_init(&pvec, 0); first = 0; cifs_dbg(FYI, "%s: cifs inode 0x%p now uncached\n", __func__, cifsi); for (;;) { nr_pages = pagevec_lookup(&pvec, cifsi->vfs_inode.i_mapping, first, PAGEVEC_SIZE - pagevec_count(&pvec)); if (!nr_pages) break; for (loop = 0; loop < nr_pages; loop++) ClearPageFsCache(pvec.pages[loop]); first = pvec.pages[nr_pages - 1]->index + 1; pvec.nr = nr_pages; pagevec_release(&pvec); cond_resched(); } } const struct fscache_cookie_def cifs_fscache_inode_object_def = { .name = "CIFS.uniqueid", .type = FSCACHE_COOKIE_TYPE_DATAFILE, .get_key = cifs_fscache_inode_get_key, .get_attr = cifs_fscache_inode_get_attr, .get_aux = cifs_fscache_inode_get_aux, .check_aux = cifs_fscache_inode_check_aux, .now_uncached = cifs_fscache_inode_now_uncached, };
gpl-2.0
MoKee/android_kernel_zte_x9180
drivers/hsi/clients/hsi_char.c
4954
19854
/* * HSI character device driver, implements the character device * interface. * * Copyright (C) 2010 Nokia Corporation. All rights reserved. * * Contact: Andras Domokos <andras.domokos@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/errno.h> #include <linux/types.h> #include <linux/atomic.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/kmemleak.h> #include <linux/ioctl.h> #include <linux/wait.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/device.h> #include <linux/cdev.h> #include <linux/uaccess.h> #include <linux/scatterlist.h> #include <linux/stat.h> #include <linux/hsi/hsi.h> #include <linux/hsi/hsi_char.h> #define HSC_DEVS 16 /* Num of channels */ #define HSC_MSGS 4 #define HSC_RXBREAK 0 #define HSC_ID_BITS 6 #define HSC_PORT_ID_BITS 4 #define HSC_ID_MASK 3 #define HSC_PORT_ID_MASK 3 #define HSC_CH_MASK 0xf /* * We support up to 4 controllers that can have up to 4 * ports, which should currently be more than enough. */ #define HSC_BASEMINOR(id, port_id) \ ((((id) & HSC_ID_MASK) << HSC_ID_BITS) | \ (((port_id) & HSC_PORT_ID_MASK) << HSC_PORT_ID_BITS)) enum { HSC_CH_OPEN, HSC_CH_READ, HSC_CH_WRITE, HSC_CH_WLINE, }; enum { HSC_RX, HSC_TX, }; struct hsc_client_data; /** * struct hsc_channel - hsi_char internal channel data * @ch: channel number * @flags: Keeps state of the channel (open/close, reading, writing) * @free_msgs_list: List of free HSI messages/requests * @rx_msgs_queue: List of pending RX requests * @tx_msgs_queue: List of pending TX requests * @lock: Serialize access to the lists * @cl: reference to the associated hsi_client * @cl_data: reference to the client data that this channels belongs to * @rx_wait: RX requests wait queue * @tx_wait: TX requests wait queue */ struct hsc_channel { unsigned int ch; unsigned long flags; struct list_head free_msgs_list; struct list_head rx_msgs_queue; struct list_head tx_msgs_queue; spinlock_t lock; struct hsi_client *cl; struct hsc_client_data *cl_data; wait_queue_head_t rx_wait; wait_queue_head_t tx_wait; }; /** * struct hsc_client_data - hsi_char internal client data * @cdev: Characther device associated to the hsi_client * @lock: Lock to serialize open/close access * @flags: Keeps track of port state (rx hwbreak armed) * @usecnt: Use count for claiming the HSI port (mutex protected) * @cl: Referece to the HSI client * @channels: Array of channels accessible by the client */ struct hsc_client_data { struct cdev cdev; struct mutex lock; unsigned long flags; unsigned int usecnt; struct hsi_client *cl; struct hsc_channel channels[HSC_DEVS]; }; /* Stores the major number dynamically allocated for hsi_char */ static unsigned int hsc_major; /* Maximum buffer size that hsi_char will accept from userspace */ static unsigned int max_data_size = 0x1000; module_param(max_data_size, uint, 0); MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)"); static void hsc_add_tail(struct hsc_channel *channel, struct hsi_msg *msg, struct list_head *queue) { unsigned long flags; spin_lock_irqsave(&channel->lock, flags); list_add_tail(&msg->link, queue); spin_unlock_irqrestore(&channel->lock, flags); } static struct hsi_msg *hsc_get_first_msg(struct hsc_channel *channel, struct list_head *queue) { struct hsi_msg *msg = NULL; unsigned long flags; spin_lock_irqsave(&channel->lock, flags); if (list_empty(queue)) goto out; msg = list_first_entry(queue, struct hsi_msg, link); list_del(&msg->link); out: spin_unlock_irqrestore(&channel->lock, flags); return msg; } static inline void hsc_msg_free(struct hsi_msg *msg) { kfree(sg_virt(msg->sgt.sgl)); hsi_free_msg(msg); } static void hsc_free_list(struct list_head *list) { struct hsi_msg *msg, *tmp; list_for_each_entry_safe(msg, tmp, list, link) { list_del(&msg->link); hsc_msg_free(msg); } } static void hsc_reset_list(struct hsc_channel *channel, struct list_head *l) { unsigned long flags; LIST_HEAD(list); spin_lock_irqsave(&channel->lock, flags); list_splice_init(l, &list); spin_unlock_irqrestore(&channel->lock, flags); hsc_free_list(&list); } static inline struct hsi_msg *hsc_msg_alloc(unsigned int alloc_size) { struct hsi_msg *msg; void *buf; msg = hsi_alloc_msg(1, GFP_KERNEL); if (!msg) goto out; buf = kmalloc(alloc_size, GFP_KERNEL); if (!buf) { hsi_free_msg(msg); goto out; } sg_init_one(msg->sgt.sgl, buf, alloc_size); /* Ignore false positive, due to sg pointer handling */ kmemleak_ignore(buf); return msg; out: return NULL; } static inline int hsc_msgs_alloc(struct hsc_channel *channel) { struct hsi_msg *msg; int i; for (i = 0; i < HSC_MSGS; i++) { msg = hsc_msg_alloc(max_data_size); if (!msg) goto out; msg->channel = channel->ch; list_add_tail(&msg->link, &channel->free_msgs_list); } return 0; out: hsc_free_list(&channel->free_msgs_list); return -ENOMEM; } static inline unsigned int hsc_msg_len_get(struct hsi_msg *msg) { return msg->sgt.sgl->length; } static inline void hsc_msg_len_set(struct hsi_msg *msg, unsigned int len) { msg->sgt.sgl->length = len; } static void hsc_rx_completed(struct hsi_msg *msg) { struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); struct hsc_channel *channel = cl_data->channels + msg->channel; if (test_bit(HSC_CH_READ, &channel->flags)) { hsc_add_tail(channel, msg, &channel->rx_msgs_queue); wake_up(&channel->rx_wait); } else { hsc_add_tail(channel, msg, &channel->free_msgs_list); } } static void hsc_rx_msg_destructor(struct hsi_msg *msg) { msg->status = HSI_STATUS_ERROR; hsc_msg_len_set(msg, 0); hsc_rx_completed(msg); } static void hsc_tx_completed(struct hsi_msg *msg) { struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); struct hsc_channel *channel = cl_data->channels + msg->channel; if (test_bit(HSC_CH_WRITE, &channel->flags)) { hsc_add_tail(channel, msg, &channel->tx_msgs_queue); wake_up(&channel->tx_wait); } else { hsc_add_tail(channel, msg, &channel->free_msgs_list); } } static void hsc_tx_msg_destructor(struct hsi_msg *msg) { msg->status = HSI_STATUS_ERROR; hsc_msg_len_set(msg, 0); hsc_tx_completed(msg); } static void hsc_break_req_destructor(struct hsi_msg *msg) { struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); hsi_free_msg(msg); clear_bit(HSC_RXBREAK, &cl_data->flags); } static void hsc_break_received(struct hsi_msg *msg) { struct hsc_client_data *cl_data = hsi_client_drvdata(msg->cl); struct hsc_channel *channel = cl_data->channels; int i, ret; /* Broadcast HWBREAK on all channels */ for (i = 0; i < HSC_DEVS; i++, channel++) { struct hsi_msg *msg2; if (!test_bit(HSC_CH_READ, &channel->flags)) continue; msg2 = hsc_get_first_msg(channel, &channel->free_msgs_list); if (!msg2) continue; clear_bit(HSC_CH_READ, &channel->flags); hsc_msg_len_set(msg2, 0); msg2->status = HSI_STATUS_COMPLETED; hsc_add_tail(channel, msg2, &channel->rx_msgs_queue); wake_up(&channel->rx_wait); } hsi_flush(msg->cl); ret = hsi_async_read(msg->cl, msg); if (ret < 0) hsc_break_req_destructor(msg); } static int hsc_break_request(struct hsi_client *cl) { struct hsc_client_data *cl_data = hsi_client_drvdata(cl); struct hsi_msg *msg; int ret; if (test_and_set_bit(HSC_RXBREAK, &cl_data->flags)) return -EBUSY; msg = hsi_alloc_msg(0, GFP_KERNEL); if (!msg) { clear_bit(HSC_RXBREAK, &cl_data->flags); return -ENOMEM; } msg->break_frame = 1; msg->complete = hsc_break_received; msg->destructor = hsc_break_req_destructor; ret = hsi_async_read(cl, msg); if (ret < 0) hsc_break_req_destructor(msg); return ret; } static int hsc_break_send(struct hsi_client *cl) { struct hsi_msg *msg; int ret; msg = hsi_alloc_msg(0, GFP_ATOMIC); if (!msg) return -ENOMEM; msg->break_frame = 1; msg->complete = hsi_free_msg; msg->destructor = hsi_free_msg; ret = hsi_async_write(cl, msg); if (ret < 0) hsi_free_msg(msg); return ret; } static int hsc_rx_set(struct hsi_client *cl, struct hsc_rx_config *rxc) { struct hsi_config tmp; int ret; if ((rxc->mode != HSI_MODE_STREAM) && (rxc->mode != HSI_MODE_FRAME)) return -EINVAL; if ((rxc->channels == 0) || (rxc->channels > HSC_DEVS)) return -EINVAL; if (rxc->channels & (rxc->channels - 1)) return -EINVAL; if ((rxc->flow != HSI_FLOW_SYNC) && (rxc->flow != HSI_FLOW_PIPE)) return -EINVAL; tmp = cl->rx_cfg; cl->rx_cfg.mode = rxc->mode; cl->rx_cfg.channels = rxc->channels; cl->rx_cfg.flow = rxc->flow; ret = hsi_setup(cl); if (ret < 0) { cl->rx_cfg = tmp; return ret; } if (rxc->mode == HSI_MODE_FRAME) hsc_break_request(cl); return ret; } static inline void hsc_rx_get(struct hsi_client *cl, struct hsc_rx_config *rxc) { rxc->mode = cl->rx_cfg.mode; rxc->channels = cl->rx_cfg.channels; rxc->flow = cl->rx_cfg.flow; } static int hsc_tx_set(struct hsi_client *cl, struct hsc_tx_config *txc) { struct hsi_config tmp; int ret; if ((txc->mode != HSI_MODE_STREAM) && (txc->mode != HSI_MODE_FRAME)) return -EINVAL; if ((txc->channels == 0) || (txc->channels > HSC_DEVS)) return -EINVAL; if (txc->channels & (txc->channels - 1)) return -EINVAL; if ((txc->arb_mode != HSI_ARB_RR) && (txc->arb_mode != HSI_ARB_PRIO)) return -EINVAL; tmp = cl->tx_cfg; cl->tx_cfg.mode = txc->mode; cl->tx_cfg.channels = txc->channels; cl->tx_cfg.speed = txc->speed; cl->tx_cfg.arb_mode = txc->arb_mode; ret = hsi_setup(cl); if (ret < 0) { cl->tx_cfg = tmp; return ret; } return ret; } static inline void hsc_tx_get(struct hsi_client *cl, struct hsc_tx_config *txc) { txc->mode = cl->tx_cfg.mode; txc->channels = cl->tx_cfg.channels; txc->speed = cl->tx_cfg.speed; txc->arb_mode = cl->tx_cfg.arb_mode; } static ssize_t hsc_read(struct file *file, char __user *buf, size_t len, loff_t *ppos __maybe_unused) { struct hsc_channel *channel = file->private_data; struct hsi_msg *msg; ssize_t ret; if (len == 0) return 0; if (!IS_ALIGNED(len, sizeof(u32))) return -EINVAL; if (len > max_data_size) len = max_data_size; if (channel->ch >= channel->cl->rx_cfg.channels) return -ECHRNG; if (test_and_set_bit(HSC_CH_READ, &channel->flags)) return -EBUSY; msg = hsc_get_first_msg(channel, &channel->free_msgs_list); if (!msg) { ret = -ENOSPC; goto out; } hsc_msg_len_set(msg, len); msg->complete = hsc_rx_completed; msg->destructor = hsc_rx_msg_destructor; ret = hsi_async_read(channel->cl, msg); if (ret < 0) { hsc_add_tail(channel, msg, &channel->free_msgs_list); goto out; } ret = wait_event_interruptible(channel->rx_wait, !list_empty(&channel->rx_msgs_queue)); if (ret < 0) { clear_bit(HSC_CH_READ, &channel->flags); hsi_flush(channel->cl); return -EINTR; } msg = hsc_get_first_msg(channel, &channel->rx_msgs_queue); if (msg) { if (msg->status != HSI_STATUS_ERROR) { ret = copy_to_user((void __user *)buf, sg_virt(msg->sgt.sgl), hsc_msg_len_get(msg)); if (ret) ret = -EFAULT; else ret = hsc_msg_len_get(msg); } else { ret = -EIO; } hsc_add_tail(channel, msg, &channel->free_msgs_list); } out: clear_bit(HSC_CH_READ, &channel->flags); return ret; } static ssize_t hsc_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos __maybe_unused) { struct hsc_channel *channel = file->private_data; struct hsi_msg *msg; ssize_t ret; if ((len == 0) || !IS_ALIGNED(len, sizeof(u32))) return -EINVAL; if (len > max_data_size) len = max_data_size; if (channel->ch >= channel->cl->tx_cfg.channels) return -ECHRNG; if (test_and_set_bit(HSC_CH_WRITE, &channel->flags)) return -EBUSY; msg = hsc_get_first_msg(channel, &channel->free_msgs_list); if (!msg) { clear_bit(HSC_CH_WRITE, &channel->flags); return -ENOSPC; } if (copy_from_user(sg_virt(msg->sgt.sgl), (void __user *)buf, len)) { ret = -EFAULT; goto out; } hsc_msg_len_set(msg, len); msg->complete = hsc_tx_completed; msg->destructor = hsc_tx_msg_destructor; ret = hsi_async_write(channel->cl, msg); if (ret < 0) goto out; ret = wait_event_interruptible(channel->tx_wait, !list_empty(&channel->tx_msgs_queue)); if (ret < 0) { clear_bit(HSC_CH_WRITE, &channel->flags); hsi_flush(channel->cl); return -EINTR; } msg = hsc_get_first_msg(channel, &channel->tx_msgs_queue); if (msg) { if (msg->status == HSI_STATUS_ERROR) ret = -EIO; else ret = hsc_msg_len_get(msg); hsc_add_tail(channel, msg, &channel->free_msgs_list); } out: clear_bit(HSC_CH_WRITE, &channel->flags); return ret; } static long hsc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct hsc_channel *channel = file->private_data; unsigned int state; struct hsc_rx_config rxc; struct hsc_tx_config txc; long ret = 0; switch (cmd) { case HSC_RESET: hsi_flush(channel->cl); break; case HSC_SET_PM: if (copy_from_user(&state, (void __user *)arg, sizeof(state))) return -EFAULT; if (state == HSC_PM_DISABLE) { if (test_and_set_bit(HSC_CH_WLINE, &channel->flags)) return -EINVAL; ret = hsi_start_tx(channel->cl); } else if (state == HSC_PM_ENABLE) { if (!test_and_clear_bit(HSC_CH_WLINE, &channel->flags)) return -EINVAL; ret = hsi_stop_tx(channel->cl); } else { ret = -EINVAL; } break; case HSC_SEND_BREAK: return hsc_break_send(channel->cl); case HSC_SET_RX: if (copy_from_user(&rxc, (void __user *)arg, sizeof(rxc))) return -EFAULT; return hsc_rx_set(channel->cl, &rxc); case HSC_GET_RX: hsc_rx_get(channel->cl, &rxc); if (copy_to_user((void __user *)arg, &rxc, sizeof(rxc))) return -EFAULT; break; case HSC_SET_TX: if (copy_from_user(&txc, (void __user *)arg, sizeof(txc))) return -EFAULT; return hsc_tx_set(channel->cl, &txc); case HSC_GET_TX: hsc_tx_get(channel->cl, &txc); if (copy_to_user((void __user *)arg, &txc, sizeof(txc))) return -EFAULT; break; default: return -ENOIOCTLCMD; } return ret; } static inline void __hsc_port_release(struct hsc_client_data *cl_data) { BUG_ON(cl_data->usecnt == 0); if (--cl_data->usecnt == 0) { hsi_flush(cl_data->cl); hsi_release_port(cl_data->cl); } } static int hsc_open(struct inode *inode, struct file *file) { struct hsc_client_data *cl_data; struct hsc_channel *channel; int ret = 0; pr_debug("open, minor = %d\n", iminor(inode)); cl_data = container_of(inode->i_cdev, struct hsc_client_data, cdev); mutex_lock(&cl_data->lock); channel = cl_data->channels + (iminor(inode) & HSC_CH_MASK); if (test_and_set_bit(HSC_CH_OPEN, &channel->flags)) { ret = -EBUSY; goto out; } /* * Check if we have already claimed the port associated to the HSI * client. If not then try to claim it, else increase its refcount */ if (cl_data->usecnt == 0) { ret = hsi_claim_port(cl_data->cl, 0); if (ret < 0) goto out; hsi_setup(cl_data->cl); } cl_data->usecnt++; ret = hsc_msgs_alloc(channel); if (ret < 0) { __hsc_port_release(cl_data); goto out; } file->private_data = channel; mutex_unlock(&cl_data->lock); return ret; out: mutex_unlock(&cl_data->lock); return ret; } static int hsc_release(struct inode *inode __maybe_unused, struct file *file) { struct hsc_channel *channel = file->private_data; struct hsc_client_data *cl_data = channel->cl_data; mutex_lock(&cl_data->lock); file->private_data = NULL; if (test_and_clear_bit(HSC_CH_WLINE, &channel->flags)) hsi_stop_tx(channel->cl); __hsc_port_release(cl_data); hsc_reset_list(channel, &channel->rx_msgs_queue); hsc_reset_list(channel, &channel->tx_msgs_queue); hsc_reset_list(channel, &channel->free_msgs_list); clear_bit(HSC_CH_READ, &channel->flags); clear_bit(HSC_CH_WRITE, &channel->flags); clear_bit(HSC_CH_OPEN, &channel->flags); wake_up(&channel->rx_wait); wake_up(&channel->tx_wait); mutex_unlock(&cl_data->lock); return 0; } static const struct file_operations hsc_fops = { .owner = THIS_MODULE, .read = hsc_read, .write = hsc_write, .unlocked_ioctl = hsc_ioctl, .open = hsc_open, .release = hsc_release, }; static void __devinit hsc_channel_init(struct hsc_channel *channel) { init_waitqueue_head(&channel->rx_wait); init_waitqueue_head(&channel->tx_wait); spin_lock_init(&channel->lock); INIT_LIST_HEAD(&channel->free_msgs_list); INIT_LIST_HEAD(&channel->rx_msgs_queue); INIT_LIST_HEAD(&channel->tx_msgs_queue); } static int __devinit hsc_probe(struct device *dev) { const char devname[] = "hsi_char"; struct hsc_client_data *cl_data; struct hsc_channel *channel; struct hsi_client *cl = to_hsi_client(dev); unsigned int hsc_baseminor; dev_t hsc_dev; int ret; int i; cl_data = kzalloc(sizeof(*cl_data), GFP_KERNEL); if (!cl_data) { dev_err(dev, "Could not allocate hsc_client_data\n"); return -ENOMEM; } hsc_baseminor = HSC_BASEMINOR(hsi_id(cl), hsi_port_id(cl)); if (!hsc_major) { ret = alloc_chrdev_region(&hsc_dev, hsc_baseminor, HSC_DEVS, devname); if (ret > 0) hsc_major = MAJOR(hsc_dev); } else { hsc_dev = MKDEV(hsc_major, hsc_baseminor); ret = register_chrdev_region(hsc_dev, HSC_DEVS, devname); } if (ret < 0) { dev_err(dev, "Device %s allocation failed %d\n", hsc_major ? "minor" : "major", ret); goto out1; } mutex_init(&cl_data->lock); hsi_client_set_drvdata(cl, cl_data); cdev_init(&cl_data->cdev, &hsc_fops); cl_data->cdev.owner = THIS_MODULE; cl_data->cl = cl; for (i = 0, channel = cl_data->channels; i < HSC_DEVS; i++, channel++) { hsc_channel_init(channel); channel->ch = i; channel->cl = cl; channel->cl_data = cl_data; } /* 1 hsi client -> N char devices (one for each channel) */ ret = cdev_add(&cl_data->cdev, hsc_dev, HSC_DEVS); if (ret) { dev_err(dev, "Could not add char device %d\n", ret); goto out2; } return 0; out2: unregister_chrdev_region(hsc_dev, HSC_DEVS); out1: kfree(cl_data); return ret; } static int __devexit hsc_remove(struct device *dev) { struct hsi_client *cl = to_hsi_client(dev); struct hsc_client_data *cl_data = hsi_client_drvdata(cl); dev_t hsc_dev = cl_data->cdev.dev; cdev_del(&cl_data->cdev); unregister_chrdev_region(hsc_dev, HSC_DEVS); hsi_client_set_drvdata(cl, NULL); kfree(cl_data); return 0; } static struct hsi_client_driver hsc_driver = { .driver = { .name = "hsi_char", .owner = THIS_MODULE, .probe = hsc_probe, .remove = __devexit_p(hsc_remove), }, }; static int __init hsc_init(void) { int ret; if ((max_data_size < 4) || (max_data_size > 0x10000) || (max_data_size & (max_data_size - 1))) { pr_err("Invalid max read/write data size"); return -EINVAL; } ret = hsi_register_client_driver(&hsc_driver); if (ret) { pr_err("Error while registering HSI/SSI driver %d", ret); return ret; } pr_info("HSI/SSI char device loaded\n"); return 0; } module_init(hsc_init); static void __exit hsc_exit(void) { hsi_unregister_client_driver(&hsc_driver); pr_info("HSI char device removed\n"); } module_exit(hsc_exit); MODULE_AUTHOR("Andras Domokos <andras.domokos@nokia.com>"); MODULE_ALIAS("hsi:hsi_char"); MODULE_DESCRIPTION("HSI character device"); MODULE_LICENSE("GPL v2");
gpl-2.0
oppo-source/Neo5-kernel-source
drivers/char/ps3flash.c
5210
11226
/* * PS3 FLASH ROM Storage Driver * * Copyright (C) 2007 Sony Computer Entertainment Inc. * Copyright 2007 Sony Corp. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published * by the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/module.h> #include <asm/lv1call.h> #include <asm/ps3stor.h> #define DEVICE_NAME "ps3flash" #define FLASH_BLOCK_SIZE (256*1024) struct ps3flash_private { struct mutex mutex; /* Bounce buffer mutex */ u64 chunk_sectors; int tag; /* Start sector of buffer, -1 if invalid */ bool dirty; }; static struct ps3_storage_device *ps3flash_dev; static int ps3flash_read_write_sectors(struct ps3_storage_device *dev, u64 start_sector, int write) { struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); u64 res = ps3stor_read_write_sectors(dev, dev->bounce_lpar, start_sector, priv->chunk_sectors, write); if (res) { dev_err(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__, __LINE__, write ? "write" : "read", res); return -EIO; } return 0; } static int ps3flash_writeback(struct ps3_storage_device *dev) { struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); int res; if (!priv->dirty || priv->tag < 0) return 0; res = ps3flash_read_write_sectors(dev, priv->tag, 1); if (res) return res; priv->dirty = false; return 0; } static int ps3flash_fetch(struct ps3_storage_device *dev, u64 start_sector) { struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); int res; if (start_sector == priv->tag) return 0; res = ps3flash_writeback(dev); if (res) return res; priv->tag = -1; res = ps3flash_read_write_sectors(dev, start_sector, 0); if (res) return res; priv->tag = start_sector; return 0; } static loff_t ps3flash_llseek(struct file *file, loff_t offset, int origin) { struct ps3_storage_device *dev = ps3flash_dev; loff_t res; mutex_lock(&file->f_mapping->host->i_mutex); switch (origin) { case 0: break; case 1: offset += file->f_pos; break; case 2: offset += dev->regions[dev->region_idx].size*dev->blk_size; break; default: offset = -1; } if (offset < 0) { res = -EINVAL; goto out; } file->f_pos = offset; res = file->f_pos; out: mutex_unlock(&file->f_mapping->host->i_mutex); return res; } static ssize_t ps3flash_read(char __user *userbuf, void *kernelbuf, size_t count, loff_t *pos) { struct ps3_storage_device *dev = ps3flash_dev; struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); u64 size, sector, offset; int res; size_t remaining, n; const void *src; dev_dbg(&dev->sbd.core, "%s:%u: Reading %zu bytes at position %lld to U0x%p/K0x%p\n", __func__, __LINE__, count, *pos, userbuf, kernelbuf); size = dev->regions[dev->region_idx].size*dev->blk_size; if (*pos >= size || !count) return 0; if (*pos + count > size) { dev_dbg(&dev->sbd.core, "%s:%u Truncating count from %zu to %llu\n", __func__, __LINE__, count, size - *pos); count = size - *pos; } sector = *pos / dev->bounce_size * priv->chunk_sectors; offset = *pos % dev->bounce_size; remaining = count; do { n = min_t(u64, remaining, dev->bounce_size - offset); src = dev->bounce_buf + offset; mutex_lock(&priv->mutex); res = ps3flash_fetch(dev, sector); if (res) goto fail; dev_dbg(&dev->sbd.core, "%s:%u: copy %lu bytes from 0x%p to U0x%p/K0x%p\n", __func__, __LINE__, n, src, userbuf, kernelbuf); if (userbuf) { if (copy_to_user(userbuf, src, n)) { res = -EFAULT; goto fail; } userbuf += n; } if (kernelbuf) { memcpy(kernelbuf, src, n); kernelbuf += n; } mutex_unlock(&priv->mutex); *pos += n; remaining -= n; sector += priv->chunk_sectors; offset = 0; } while (remaining > 0); return count; fail: mutex_unlock(&priv->mutex); return res; } static ssize_t ps3flash_write(const char __user *userbuf, const void *kernelbuf, size_t count, loff_t *pos) { struct ps3_storage_device *dev = ps3flash_dev; struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); u64 size, sector, offset; int res = 0; size_t remaining, n; void *dst; dev_dbg(&dev->sbd.core, "%s:%u: Writing %zu bytes at position %lld from U0x%p/K0x%p\n", __func__, __LINE__, count, *pos, userbuf, kernelbuf); size = dev->regions[dev->region_idx].size*dev->blk_size; if (*pos >= size || !count) return 0; if (*pos + count > size) { dev_dbg(&dev->sbd.core, "%s:%u Truncating count from %zu to %llu\n", __func__, __LINE__, count, size - *pos); count = size - *pos; } sector = *pos / dev->bounce_size * priv->chunk_sectors; offset = *pos % dev->bounce_size; remaining = count; do { n = min_t(u64, remaining, dev->bounce_size - offset); dst = dev->bounce_buf + offset; mutex_lock(&priv->mutex); if (n != dev->bounce_size) res = ps3flash_fetch(dev, sector); else if (sector != priv->tag) res = ps3flash_writeback(dev); if (res) goto fail; dev_dbg(&dev->sbd.core, "%s:%u: copy %lu bytes from U0x%p/K0x%p to 0x%p\n", __func__, __LINE__, n, userbuf, kernelbuf, dst); if (userbuf) { if (copy_from_user(dst, userbuf, n)) { res = -EFAULT; goto fail; } userbuf += n; } if (kernelbuf) { memcpy(dst, kernelbuf, n); kernelbuf += n; } priv->tag = sector; priv->dirty = true; mutex_unlock(&priv->mutex); *pos += n; remaining -= n; sector += priv->chunk_sectors; offset = 0; } while (remaining > 0); return count; fail: mutex_unlock(&priv->mutex); return res; } static ssize_t ps3flash_user_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { return ps3flash_read(buf, NULL, count, pos); } static ssize_t ps3flash_user_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { return ps3flash_write(buf, NULL, count, pos); } static ssize_t ps3flash_kernel_read(void *buf, size_t count, loff_t pos) { return ps3flash_read(NULL, buf, count, &pos); } static ssize_t ps3flash_kernel_write(const void *buf, size_t count, loff_t pos) { ssize_t res; int wb; res = ps3flash_write(NULL, buf, count, &pos); if (res < 0) return res; /* Make kernel writes synchronous */ wb = ps3flash_writeback(ps3flash_dev); if (wb) return wb; return res; } static int ps3flash_flush(struct file *file, fl_owner_t id) { return ps3flash_writeback(ps3flash_dev); } static int ps3flash_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_path.dentry->d_inode; int err; mutex_lock(&inode->i_mutex); err = ps3flash_writeback(ps3flash_dev); mutex_unlock(&inode->i_mutex); return err; } static irqreturn_t ps3flash_interrupt(int irq, void *data) { struct ps3_storage_device *dev = data; int res; u64 tag, status; res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status); if (tag != dev->tag) dev_err(&dev->sbd.core, "%s:%u: tag mismatch, got %llx, expected %llx\n", __func__, __LINE__, tag, dev->tag); if (res) { dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n", __func__, __LINE__, res, status); } else { dev->lv1_status = status; complete(&dev->done); } return IRQ_HANDLED; } static const struct file_operations ps3flash_fops = { .owner = THIS_MODULE, .llseek = ps3flash_llseek, .read = ps3flash_user_read, .write = ps3flash_user_write, .flush = ps3flash_flush, .fsync = ps3flash_fsync, }; static const struct ps3_os_area_flash_ops ps3flash_kernel_ops = { .read = ps3flash_kernel_read, .write = ps3flash_kernel_write, }; static struct miscdevice ps3flash_misc = { .minor = MISC_DYNAMIC_MINOR, .name = DEVICE_NAME, .fops = &ps3flash_fops, }; static int __devinit ps3flash_probe(struct ps3_system_bus_device *_dev) { struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); struct ps3flash_private *priv; int error; unsigned long tmp; tmp = dev->regions[dev->region_idx].start*dev->blk_size; if (tmp % FLASH_BLOCK_SIZE) { dev_err(&dev->sbd.core, "%s:%u region start %lu is not aligned\n", __func__, __LINE__, tmp); return -EINVAL; } tmp = dev->regions[dev->region_idx].size*dev->blk_size; if (tmp % FLASH_BLOCK_SIZE) { dev_err(&dev->sbd.core, "%s:%u region size %lu is not aligned\n", __func__, __LINE__, tmp); return -EINVAL; } /* use static buffer, kmalloc cannot allocate 256 KiB */ if (!ps3flash_bounce_buffer.address) return -ENODEV; if (ps3flash_dev) { dev_err(&dev->sbd.core, "Only one FLASH device is supported\n"); return -EBUSY; } ps3flash_dev = dev; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { error = -ENOMEM; goto fail; } ps3_system_bus_set_drvdata(&dev->sbd, priv); mutex_init(&priv->mutex); priv->tag = -1; dev->bounce_size = ps3flash_bounce_buffer.size; dev->bounce_buf = ps3flash_bounce_buffer.address; priv->chunk_sectors = dev->bounce_size / dev->blk_size; error = ps3stor_setup(dev, ps3flash_interrupt); if (error) goto fail_free_priv; ps3flash_misc.parent = &dev->sbd.core; error = misc_register(&ps3flash_misc); if (error) { dev_err(&dev->sbd.core, "%s:%u: misc_register failed %d\n", __func__, __LINE__, error); goto fail_teardown; } dev_info(&dev->sbd.core, "%s:%u: registered misc device %d\n", __func__, __LINE__, ps3flash_misc.minor); ps3_os_area_flash_register(&ps3flash_kernel_ops); return 0; fail_teardown: ps3stor_teardown(dev); fail_free_priv: kfree(priv); ps3_system_bus_set_drvdata(&dev->sbd, NULL); fail: ps3flash_dev = NULL; return error; } static int ps3flash_remove(struct ps3_system_bus_device *_dev) { struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); ps3_os_area_flash_register(NULL); misc_deregister(&ps3flash_misc); ps3stor_teardown(dev); kfree(ps3_system_bus_get_drvdata(&dev->sbd)); ps3_system_bus_set_drvdata(&dev->sbd, NULL); ps3flash_dev = NULL; return 0; } static struct ps3_system_bus_driver ps3flash = { .match_id = PS3_MATCH_ID_STOR_FLASH, .core.name = DEVICE_NAME, .core.owner = THIS_MODULE, .probe = ps3flash_probe, .remove = ps3flash_remove, .shutdown = ps3flash_remove, }; static int __init ps3flash_init(void) { return ps3_system_bus_driver_register(&ps3flash); } static void __exit ps3flash_exit(void) { ps3_system_bus_driver_unregister(&ps3flash); } module_init(ps3flash_init); module_exit(ps3flash_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PS3 FLASH ROM Storage Driver"); MODULE_AUTHOR("Sony Corporation"); MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_FLASH);
gpl-2.0
aloksinha2001/rk3x_kernel_3.0.36
drivers/staging/comedi/drivers/pcl725.c
8282
2677
/* * comedi/drivers/pcl725.c * Driver for PCL725 and clones * David A. Schleef */ /* Driver: pcl725 Description: Advantech PCL-725 (& compatibles) Author: ds Status: unknown Devices: [Advantech] PCL-725 (pcl725) */ #include "../comedidev.h" #include <linux/ioport.h> #define PCL725_SIZE 2 #define PCL725_DO 0 #define PCL725_DI 1 static int pcl725_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int pcl725_detach(struct comedi_device *dev); static struct comedi_driver driver_pcl725 = { .driver_name = "pcl725", .module = THIS_MODULE, .attach = pcl725_attach, .detach = pcl725_detach, }; static int __init driver_pcl725_init_module(void) { return comedi_driver_register(&driver_pcl725); } static void __exit driver_pcl725_cleanup_module(void) { comedi_driver_unregister(&driver_pcl725); } module_init(driver_pcl725_init_module); module_exit(driver_pcl725_cleanup_module); static int pcl725_do_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (insn->n != 2) return -EINVAL; if (data[0]) { s->state &= ~data[0]; s->state |= (data[0] & data[1]); outb(s->state, dev->iobase + PCL725_DO); } data[1] = s->state; return 2; } static int pcl725_di_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (insn->n != 2) return -EINVAL; data[1] = inb(dev->iobase + PCL725_DI); return 2; } static int pcl725_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; unsigned long iobase; iobase = it->options[0]; printk(KERN_INFO "comedi%d: pcl725: 0x%04lx ", dev->minor, iobase); if (!request_region(iobase, PCL725_SIZE, "pcl725")) { printk("I/O port conflict\n"); return -EIO; } dev->board_name = "pcl725"; dev->iobase = iobase; dev->irq = 0; if (alloc_subdevices(dev, 2) < 0) return -ENOMEM; s = dev->subdevices + 0; /* do */ s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE; s->maxdata = 1; s->n_chan = 8; s->insn_bits = pcl725_do_insn; s->range_table = &range_digital; s = dev->subdevices + 1; /* di */ s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->maxdata = 1; s->n_chan = 8; s->insn_bits = pcl725_di_insn; s->range_table = &range_digital; printk(KERN_INFO "\n"); return 0; } static int pcl725_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: pcl725: remove\n", dev->minor); if (dev->iobase) release_region(dev->iobase, PCL725_SIZE); return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
TeamVee/SKernel_Vee
arch/mips/txx9/generic/smsc_fdc37m81x.c
8794
4978
/* * Interface for smsc fdc48m81x Super IO chip * * Author: MontaVista Software, Inc. source@mvista.com * * 2001-2003 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. * * Copyright 2004 (c) MontaVista Software, Inc. */ #include <linux/init.h> #include <linux/types.h> #include <asm/io.h> #include <asm/txx9/smsc_fdc37m81x.h> /* Common Registers */ #define SMSC_FDC37M81X_CONFIG_INDEX 0x00 #define SMSC_FDC37M81X_CONFIG_DATA 0x01 #define SMSC_FDC37M81X_CONF 0x02 #define SMSC_FDC37M81X_INDEX 0x03 #define SMSC_FDC37M81X_DNUM 0x07 #define SMSC_FDC37M81X_DID 0x20 #define SMSC_FDC37M81X_DREV 0x21 #define SMSC_FDC37M81X_PCNT 0x22 #define SMSC_FDC37M81X_PMGT 0x23 #define SMSC_FDC37M81X_OSC 0x24 #define SMSC_FDC37M81X_CONFPA0 0x26 #define SMSC_FDC37M81X_CONFPA1 0x27 #define SMSC_FDC37M81X_TEST4 0x2B #define SMSC_FDC37M81X_TEST5 0x2C #define SMSC_FDC37M81X_TEST1 0x2D #define SMSC_FDC37M81X_TEST2 0x2E #define SMSC_FDC37M81X_TEST3 0x2F /* Logical device numbers */ #define SMSC_FDC37M81X_FDD 0x00 #define SMSC_FDC37M81X_SERIAL1 0x04 #define SMSC_FDC37M81X_SERIAL2 0x05 #define SMSC_FDC37M81X_KBD 0x07 /* Logical device Config Registers */ #define SMSC_FDC37M81X_ACTIVE 0x30 #define SMSC_FDC37M81X_BASEADDR0 0x60 #define SMSC_FDC37M81X_BASEADDR1 0x61 #define SMSC_FDC37M81X_INT 0x70 #define SMSC_FDC37M81X_INT2 0x72 #define SMSC_FDC37M81X_MODE 0xF0 /* Chip Config Values */ #define SMSC_FDC37M81X_CONFIG_ENTER 0x55 #define SMSC_FDC37M81X_CONFIG_EXIT 0xaa #define SMSC_FDC37M81X_CHIP_ID 0x4d static unsigned long g_smsc_fdc37m81x_base; static inline unsigned char smsc_fdc37m81x_rd(unsigned char index) { outb(index, g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_INDEX); return inb(g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_DATA); } static inline void smsc_dc37m81x_wr(unsigned char index, unsigned char data) { outb(index, g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_INDEX); outb(data, g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_DATA); } void smsc_fdc37m81x_config_beg(void) { if (g_smsc_fdc37m81x_base) { outb(SMSC_FDC37M81X_CONFIG_ENTER, g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_INDEX); } } void smsc_fdc37m81x_config_end(void) { if (g_smsc_fdc37m81x_base) outb(SMSC_FDC37M81X_CONFIG_EXIT, g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_INDEX); } u8 smsc_fdc37m81x_config_get(u8 reg) { u8 val = 0; if (g_smsc_fdc37m81x_base) val = smsc_fdc37m81x_rd(reg); return val; } void smsc_fdc37m81x_config_set(u8 reg, u8 val) { if (g_smsc_fdc37m81x_base) smsc_dc37m81x_wr(reg, val); } unsigned long __init smsc_fdc37m81x_init(unsigned long port) { const int field = sizeof(unsigned long) * 2; u8 chip_id; if (g_smsc_fdc37m81x_base) printk(KERN_WARNING "%s: stepping on old base=0x%0*lx\n", __func__, field, g_smsc_fdc37m81x_base); g_smsc_fdc37m81x_base = port; smsc_fdc37m81x_config_beg(); chip_id = smsc_fdc37m81x_rd(SMSC_FDC37M81X_DID); if (chip_id == SMSC_FDC37M81X_CHIP_ID) smsc_fdc37m81x_config_end(); else { printk(KERN_WARNING "%s: unknown chip id 0x%02x\n", __func__, chip_id); g_smsc_fdc37m81x_base = 0; } return g_smsc_fdc37m81x_base; } #ifdef DEBUG static void smsc_fdc37m81x_config_dump_one(const char *key, u8 dev, u8 reg) { printk(KERN_INFO "%s: dev=0x%02x reg=0x%02x val=0x%02x\n", key, dev, reg, smsc_fdc37m81x_rd(reg)); } void smsc_fdc37m81x_config_dump(void) { u8 orig; const char *fname = __func__; smsc_fdc37m81x_config_beg(); orig = smsc_fdc37m81x_rd(SMSC_FDC37M81X_DNUM); printk(KERN_INFO "%s: common\n", fname); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE, SMSC_FDC37M81X_DNUM); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE, SMSC_FDC37M81X_DID); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE, SMSC_FDC37M81X_DREV); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE, SMSC_FDC37M81X_PCNT); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE, SMSC_FDC37M81X_PMGT); printk(KERN_INFO "%s: keyboard\n", fname); smsc_dc37m81x_wr(SMSC_FDC37M81X_DNUM, SMSC_FDC37M81X_KBD); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_KBD, SMSC_FDC37M81X_ACTIVE); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_KBD, SMSC_FDC37M81X_INT); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_KBD, SMSC_FDC37M81X_INT2); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_KBD, SMSC_FDC37M81X_LDCR_F0); smsc_dc37m81x_wr(SMSC_FDC37M81X_DNUM, orig); smsc_fdc37m81x_config_end(); } #endif
gpl-2.0
12thmantec/linux-3.5
arch/powerpc/platforms/52xx/lite5200_pm.c
9050
6385
#include <linux/init.h> #include <linux/suspend.h> #include <asm/io.h> #include <asm/time.h> #include <asm/mpc52xx.h> #include <asm/switch_to.h> /* defined in lite5200_sleep.S and only used here */ extern void lite5200_low_power(void __iomem *sram, void __iomem *mbar); static struct mpc52xx_cdm __iomem *cdm; static struct mpc52xx_intr __iomem *pic; static struct mpc52xx_sdma __iomem *bes; static struct mpc52xx_xlb __iomem *xlb; static struct mpc52xx_gpio __iomem *gps; static struct mpc52xx_gpio_wkup __iomem *gpw; static void __iomem *pci; static void __iomem *sram; static const int sram_size = 0x4000; /* 16 kBytes */ static void __iomem *mbar; static suspend_state_t lite5200_pm_target_state; static int lite5200_pm_valid(suspend_state_t state) { switch (state) { case PM_SUSPEND_STANDBY: case PM_SUSPEND_MEM: return 1; default: return 0; } } static int lite5200_pm_begin(suspend_state_t state) { if (lite5200_pm_valid(state)) { lite5200_pm_target_state = state; return 0; } return -EINVAL; } static int lite5200_pm_prepare(void) { struct device_node *np; const struct of_device_id immr_ids[] = { { .compatible = "fsl,mpc5200-immr", }, { .compatible = "fsl,mpc5200b-immr", }, { .type = "soc", .compatible = "mpc5200", }, /* lite5200 */ { .type = "builtin", .compatible = "mpc5200", }, /* efika */ {} }; u64 regaddr64 = 0; const u32 *regaddr_p; /* deep sleep? let mpc52xx code handle that */ if (lite5200_pm_target_state == PM_SUSPEND_STANDBY) return mpc52xx_pm_prepare(); if (lite5200_pm_target_state != PM_SUSPEND_MEM) return -EINVAL; /* map registers */ np = of_find_matching_node(NULL, immr_ids); regaddr_p = of_get_address(np, 0, NULL, NULL); if (regaddr_p) regaddr64 = of_translate_address(np, regaddr_p); of_node_put(np); mbar = ioremap((u32) regaddr64, 0xC000); if (!mbar) { printk(KERN_ERR "%s:%i Error mapping registers\n", __func__, __LINE__); return -ENOSYS; } cdm = mbar + 0x200; pic = mbar + 0x500; gps = mbar + 0xb00; gpw = mbar + 0xc00; pci = mbar + 0xd00; bes = mbar + 0x1200; xlb = mbar + 0x1f00; sram = mbar + 0x8000; return 0; } /* save and restore registers not bound to any real devices */ static struct mpc52xx_cdm scdm; static struct mpc52xx_intr spic; static struct mpc52xx_sdma sbes; static struct mpc52xx_xlb sxlb; static struct mpc52xx_gpio sgps; static struct mpc52xx_gpio_wkup sgpw; static char spci[0x200]; static void lite5200_save_regs(void) { _memcpy_fromio(&spic, pic, sizeof(*pic)); _memcpy_fromio(&sbes, bes, sizeof(*bes)); _memcpy_fromio(&scdm, cdm, sizeof(*cdm)); _memcpy_fromio(&sxlb, xlb, sizeof(*xlb)); _memcpy_fromio(&sgps, gps, sizeof(*gps)); _memcpy_fromio(&sgpw, gpw, sizeof(*gpw)); _memcpy_fromio(spci, pci, 0x200); _memcpy_fromio(saved_sram, sram, sram_size); } static void lite5200_restore_regs(void) { int i; _memcpy_toio(sram, saved_sram, sram_size); /* PCI Configuration */ _memcpy_toio(pci, spci, 0x200); /* * GPIOs. Interrupt Master Enable has higher address then other * registers, so just memcpy is ok. */ _memcpy_toio(gpw, &sgpw, sizeof(*gpw)); _memcpy_toio(gps, &sgps, sizeof(*gps)); /* XLB Arbitrer */ out_be32(&xlb->snoop_window, sxlb.snoop_window); out_be32(&xlb->master_priority, sxlb.master_priority); out_be32(&xlb->master_pri_enable, sxlb.master_pri_enable); /* enable */ out_be32(&xlb->int_enable, sxlb.int_enable); out_be32(&xlb->config, sxlb.config); /* CDM - Clock Distribution Module */ out_8(&cdm->ipb_clk_sel, scdm.ipb_clk_sel); out_8(&cdm->pci_clk_sel, scdm.pci_clk_sel); out_8(&cdm->ext_48mhz_en, scdm.ext_48mhz_en); out_8(&cdm->fd_enable, scdm.fd_enable); out_be16(&cdm->fd_counters, scdm.fd_counters); out_be32(&cdm->clk_enables, scdm.clk_enables); out_8(&cdm->osc_disable, scdm.osc_disable); out_be16(&cdm->mclken_div_psc1, scdm.mclken_div_psc1); out_be16(&cdm->mclken_div_psc2, scdm.mclken_div_psc2); out_be16(&cdm->mclken_div_psc3, scdm.mclken_div_psc3); out_be16(&cdm->mclken_div_psc6, scdm.mclken_div_psc6); /* BESTCOMM */ out_be32(&bes->taskBar, sbes.taskBar); out_be32(&bes->currentPointer, sbes.currentPointer); out_be32(&bes->endPointer, sbes.endPointer); out_be32(&bes->variablePointer, sbes.variablePointer); out_8(&bes->IntVect1, sbes.IntVect1); out_8(&bes->IntVect2, sbes.IntVect2); out_be16(&bes->PtdCntrl, sbes.PtdCntrl); for (i=0; i<32; i++) out_8(&bes->ipr[i], sbes.ipr[i]); out_be32(&bes->cReqSelect, sbes.cReqSelect); out_be32(&bes->task_size0, sbes.task_size0); out_be32(&bes->task_size1, sbes.task_size1); out_be32(&bes->MDEDebug, sbes.MDEDebug); out_be32(&bes->ADSDebug, sbes.ADSDebug); out_be32(&bes->Value1, sbes.Value1); out_be32(&bes->Value2, sbes.Value2); out_be32(&bes->Control, sbes.Control); out_be32(&bes->Status, sbes.Status); out_be32(&bes->PTDDebug, sbes.PTDDebug); /* restore tasks */ for (i=0; i<16; i++) out_be16(&bes->tcr[i], sbes.tcr[i]); /* enable interrupts */ out_be32(&bes->IntPend, sbes.IntPend); out_be32(&bes->IntMask, sbes.IntMask); /* PIC */ out_be32(&pic->per_pri1, spic.per_pri1); out_be32(&pic->per_pri2, spic.per_pri2); out_be32(&pic->per_pri3, spic.per_pri3); out_be32(&pic->main_pri1, spic.main_pri1); out_be32(&pic->main_pri2, spic.main_pri2); out_be32(&pic->enc_status, spic.enc_status); /* unmask and enable interrupts */ out_be32(&pic->per_mask, spic.per_mask); out_be32(&pic->main_mask, spic.main_mask); out_be32(&pic->ctrl, spic.ctrl); } static int lite5200_pm_enter(suspend_state_t state) { /* deep sleep? let mpc52xx code handle that */ if (state == PM_SUSPEND_STANDBY) { return mpc52xx_pm_enter(state); } lite5200_save_regs(); /* effectively save FP regs */ enable_kernel_fp(); lite5200_low_power(sram, mbar); lite5200_restore_regs(); iounmap(mbar); return 0; } static void lite5200_pm_finish(void) { /* deep sleep? let mpc52xx code handle that */ if (lite5200_pm_target_state == PM_SUSPEND_STANDBY) mpc52xx_pm_finish(); } static void lite5200_pm_end(void) { lite5200_pm_target_state = PM_SUSPEND_ON; } static const struct platform_suspend_ops lite5200_pm_ops = { .valid = lite5200_pm_valid, .begin = lite5200_pm_begin, .prepare = lite5200_pm_prepare, .enter = lite5200_pm_enter, .finish = lite5200_pm_finish, .end = lite5200_pm_end, }; int __init lite5200_pm_init(void) { suspend_set_ops(&lite5200_pm_ops); return 0; }
gpl-2.0
TeamTwisted/kernel_lge_hammerhead
arch/powerpc/platforms/52xx/lite5200_pm.c
9050
6385
#include <linux/init.h> #include <linux/suspend.h> #include <asm/io.h> #include <asm/time.h> #include <asm/mpc52xx.h> #include <asm/switch_to.h> /* defined in lite5200_sleep.S and only used here */ extern void lite5200_low_power(void __iomem *sram, void __iomem *mbar); static struct mpc52xx_cdm __iomem *cdm; static struct mpc52xx_intr __iomem *pic; static struct mpc52xx_sdma __iomem *bes; static struct mpc52xx_xlb __iomem *xlb; static struct mpc52xx_gpio __iomem *gps; static struct mpc52xx_gpio_wkup __iomem *gpw; static void __iomem *pci; static void __iomem *sram; static const int sram_size = 0x4000; /* 16 kBytes */ static void __iomem *mbar; static suspend_state_t lite5200_pm_target_state; static int lite5200_pm_valid(suspend_state_t state) { switch (state) { case PM_SUSPEND_STANDBY: case PM_SUSPEND_MEM: return 1; default: return 0; } } static int lite5200_pm_begin(suspend_state_t state) { if (lite5200_pm_valid(state)) { lite5200_pm_target_state = state; return 0; } return -EINVAL; } static int lite5200_pm_prepare(void) { struct device_node *np; const struct of_device_id immr_ids[] = { { .compatible = "fsl,mpc5200-immr", }, { .compatible = "fsl,mpc5200b-immr", }, { .type = "soc", .compatible = "mpc5200", }, /* lite5200 */ { .type = "builtin", .compatible = "mpc5200", }, /* efika */ {} }; u64 regaddr64 = 0; const u32 *regaddr_p; /* deep sleep? let mpc52xx code handle that */ if (lite5200_pm_target_state == PM_SUSPEND_STANDBY) return mpc52xx_pm_prepare(); if (lite5200_pm_target_state != PM_SUSPEND_MEM) return -EINVAL; /* map registers */ np = of_find_matching_node(NULL, immr_ids); regaddr_p = of_get_address(np, 0, NULL, NULL); if (regaddr_p) regaddr64 = of_translate_address(np, regaddr_p); of_node_put(np); mbar = ioremap((u32) regaddr64, 0xC000); if (!mbar) { printk(KERN_ERR "%s:%i Error mapping registers\n", __func__, __LINE__); return -ENOSYS; } cdm = mbar + 0x200; pic = mbar + 0x500; gps = mbar + 0xb00; gpw = mbar + 0xc00; pci = mbar + 0xd00; bes = mbar + 0x1200; xlb = mbar + 0x1f00; sram = mbar + 0x8000; return 0; } /* save and restore registers not bound to any real devices */ static struct mpc52xx_cdm scdm; static struct mpc52xx_intr spic; static struct mpc52xx_sdma sbes; static struct mpc52xx_xlb sxlb; static struct mpc52xx_gpio sgps; static struct mpc52xx_gpio_wkup sgpw; static char spci[0x200]; static void lite5200_save_regs(void) { _memcpy_fromio(&spic, pic, sizeof(*pic)); _memcpy_fromio(&sbes, bes, sizeof(*bes)); _memcpy_fromio(&scdm, cdm, sizeof(*cdm)); _memcpy_fromio(&sxlb, xlb, sizeof(*xlb)); _memcpy_fromio(&sgps, gps, sizeof(*gps)); _memcpy_fromio(&sgpw, gpw, sizeof(*gpw)); _memcpy_fromio(spci, pci, 0x200); _memcpy_fromio(saved_sram, sram, sram_size); } static void lite5200_restore_regs(void) { int i; _memcpy_toio(sram, saved_sram, sram_size); /* PCI Configuration */ _memcpy_toio(pci, spci, 0x200); /* * GPIOs. Interrupt Master Enable has higher address then other * registers, so just memcpy is ok. */ _memcpy_toio(gpw, &sgpw, sizeof(*gpw)); _memcpy_toio(gps, &sgps, sizeof(*gps)); /* XLB Arbitrer */ out_be32(&xlb->snoop_window, sxlb.snoop_window); out_be32(&xlb->master_priority, sxlb.master_priority); out_be32(&xlb->master_pri_enable, sxlb.master_pri_enable); /* enable */ out_be32(&xlb->int_enable, sxlb.int_enable); out_be32(&xlb->config, sxlb.config); /* CDM - Clock Distribution Module */ out_8(&cdm->ipb_clk_sel, scdm.ipb_clk_sel); out_8(&cdm->pci_clk_sel, scdm.pci_clk_sel); out_8(&cdm->ext_48mhz_en, scdm.ext_48mhz_en); out_8(&cdm->fd_enable, scdm.fd_enable); out_be16(&cdm->fd_counters, scdm.fd_counters); out_be32(&cdm->clk_enables, scdm.clk_enables); out_8(&cdm->osc_disable, scdm.osc_disable); out_be16(&cdm->mclken_div_psc1, scdm.mclken_div_psc1); out_be16(&cdm->mclken_div_psc2, scdm.mclken_div_psc2); out_be16(&cdm->mclken_div_psc3, scdm.mclken_div_psc3); out_be16(&cdm->mclken_div_psc6, scdm.mclken_div_psc6); /* BESTCOMM */ out_be32(&bes->taskBar, sbes.taskBar); out_be32(&bes->currentPointer, sbes.currentPointer); out_be32(&bes->endPointer, sbes.endPointer); out_be32(&bes->variablePointer, sbes.variablePointer); out_8(&bes->IntVect1, sbes.IntVect1); out_8(&bes->IntVect2, sbes.IntVect2); out_be16(&bes->PtdCntrl, sbes.PtdCntrl); for (i=0; i<32; i++) out_8(&bes->ipr[i], sbes.ipr[i]); out_be32(&bes->cReqSelect, sbes.cReqSelect); out_be32(&bes->task_size0, sbes.task_size0); out_be32(&bes->task_size1, sbes.task_size1); out_be32(&bes->MDEDebug, sbes.MDEDebug); out_be32(&bes->ADSDebug, sbes.ADSDebug); out_be32(&bes->Value1, sbes.Value1); out_be32(&bes->Value2, sbes.Value2); out_be32(&bes->Control, sbes.Control); out_be32(&bes->Status, sbes.Status); out_be32(&bes->PTDDebug, sbes.PTDDebug); /* restore tasks */ for (i=0; i<16; i++) out_be16(&bes->tcr[i], sbes.tcr[i]); /* enable interrupts */ out_be32(&bes->IntPend, sbes.IntPend); out_be32(&bes->IntMask, sbes.IntMask); /* PIC */ out_be32(&pic->per_pri1, spic.per_pri1); out_be32(&pic->per_pri2, spic.per_pri2); out_be32(&pic->per_pri3, spic.per_pri3); out_be32(&pic->main_pri1, spic.main_pri1); out_be32(&pic->main_pri2, spic.main_pri2); out_be32(&pic->enc_status, spic.enc_status); /* unmask and enable interrupts */ out_be32(&pic->per_mask, spic.per_mask); out_be32(&pic->main_mask, spic.main_mask); out_be32(&pic->ctrl, spic.ctrl); } static int lite5200_pm_enter(suspend_state_t state) { /* deep sleep? let mpc52xx code handle that */ if (state == PM_SUSPEND_STANDBY) { return mpc52xx_pm_enter(state); } lite5200_save_regs(); /* effectively save FP regs */ enable_kernel_fp(); lite5200_low_power(sram, mbar); lite5200_restore_regs(); iounmap(mbar); return 0; } static void lite5200_pm_finish(void) { /* deep sleep? let mpc52xx code handle that */ if (lite5200_pm_target_state == PM_SUSPEND_STANDBY) mpc52xx_pm_finish(); } static void lite5200_pm_end(void) { lite5200_pm_target_state = PM_SUSPEND_ON; } static const struct platform_suspend_ops lite5200_pm_ops = { .valid = lite5200_pm_valid, .begin = lite5200_pm_begin, .prepare = lite5200_pm_prepare, .enter = lite5200_pm_enter, .finish = lite5200_pm_finish, .end = lite5200_pm_end, }; int __init lite5200_pm_init(void) { suspend_set_ops(&lite5200_pm_ops); return 0; }
gpl-2.0
VentureROM-Legacy/android_kernel_lge_d85x
net/bridge/netfilter/ebt_stp.c
9050
5003
/* * ebt_stp * * Authors: * Bart De Schuymer <bdschuym@pandora.be> * Stephen Hemminger <shemminger@osdl.org> * * July, 2003 */ #include <linux/etherdevice.h> #include <linux/module.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_stp.h> #define BPDU_TYPE_CONFIG 0 #define BPDU_TYPE_TCN 0x80 struct stp_header { uint8_t dsap; uint8_t ssap; uint8_t ctrl; uint8_t pid; uint8_t vers; uint8_t type; }; struct stp_config_pdu { uint8_t flags; uint8_t root[8]; uint8_t root_cost[4]; uint8_t sender[8]; uint8_t port[2]; uint8_t msg_age[2]; uint8_t max_age[2]; uint8_t hello_time[2]; uint8_t forward_delay[2]; }; #define NR16(p) (p[0] << 8 | p[1]) #define NR32(p) ((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]) static bool ebt_filter_config(const struct ebt_stp_info *info, const struct stp_config_pdu *stpc) { const struct ebt_stp_config_info *c; uint16_t v16; uint32_t v32; int verdict, i; c = &info->config; if ((info->bitmask & EBT_STP_FLAGS) && FWINV(c->flags != stpc->flags, EBT_STP_FLAGS)) return false; if (info->bitmask & EBT_STP_ROOTPRIO) { v16 = NR16(stpc->root); if (FWINV(v16 < c->root_priol || v16 > c->root_priou, EBT_STP_ROOTPRIO)) return false; } if (info->bitmask & EBT_STP_ROOTADDR) { verdict = 0; for (i = 0; i < 6; i++) verdict |= (stpc->root[2+i] ^ c->root_addr[i]) & c->root_addrmsk[i]; if (FWINV(verdict != 0, EBT_STP_ROOTADDR)) return false; } if (info->bitmask & EBT_STP_ROOTCOST) { v32 = NR32(stpc->root_cost); if (FWINV(v32 < c->root_costl || v32 > c->root_costu, EBT_STP_ROOTCOST)) return false; } if (info->bitmask & EBT_STP_SENDERPRIO) { v16 = NR16(stpc->sender); if (FWINV(v16 < c->sender_priol || v16 > c->sender_priou, EBT_STP_SENDERPRIO)) return false; } if (info->bitmask & EBT_STP_SENDERADDR) { verdict = 0; for (i = 0; i < 6; i++) verdict |= (stpc->sender[2+i] ^ c->sender_addr[i]) & c->sender_addrmsk[i]; if (FWINV(verdict != 0, EBT_STP_SENDERADDR)) return false; } if (info->bitmask & EBT_STP_PORT) { v16 = NR16(stpc->port); if (FWINV(v16 < c->portl || v16 > c->portu, EBT_STP_PORT)) return false; } if (info->bitmask & EBT_STP_MSGAGE) { v16 = NR16(stpc->msg_age); if (FWINV(v16 < c->msg_agel || v16 > c->msg_ageu, EBT_STP_MSGAGE)) return false; } if (info->bitmask & EBT_STP_MAXAGE) { v16 = NR16(stpc->max_age); if (FWINV(v16 < c->max_agel || v16 > c->max_ageu, EBT_STP_MAXAGE)) return false; } if (info->bitmask & EBT_STP_HELLOTIME) { v16 = NR16(stpc->hello_time); if (FWINV(v16 < c->hello_timel || v16 > c->hello_timeu, EBT_STP_HELLOTIME)) return false; } if (info->bitmask & EBT_STP_FWDD) { v16 = NR16(stpc->forward_delay); if (FWINV(v16 < c->forward_delayl || v16 > c->forward_delayu, EBT_STP_FWDD)) return false; } return true; } static bool ebt_stp_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct ebt_stp_info *info = par->matchinfo; const struct stp_header *sp; struct stp_header _stph; const uint8_t header[6] = {0x42, 0x42, 0x03, 0x00, 0x00, 0x00}; sp = skb_header_pointer(skb, 0, sizeof(_stph), &_stph); if (sp == NULL) return false; /* The stp code only considers these */ if (memcmp(sp, header, sizeof(header))) return false; if (info->bitmask & EBT_STP_TYPE && FWINV(info->type != sp->type, EBT_STP_TYPE)) return false; if (sp->type == BPDU_TYPE_CONFIG && info->bitmask & EBT_STP_CONFIG_MASK) { const struct stp_config_pdu *st; struct stp_config_pdu _stpc; st = skb_header_pointer(skb, sizeof(_stph), sizeof(_stpc), &_stpc); if (st == NULL) return false; return ebt_filter_config(info, st); } return true; } static int ebt_stp_mt_check(const struct xt_mtchk_param *par) { const struct ebt_stp_info *info = par->matchinfo; const uint8_t bridge_ula[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00}; const uint8_t msk[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; const struct ebt_entry *e = par->entryinfo; if (info->bitmask & ~EBT_STP_MASK || info->invflags & ~EBT_STP_MASK || !(info->bitmask & EBT_STP_MASK)) return -EINVAL; /* Make sure the match only receives stp frames */ if (compare_ether_addr(e->destmac, bridge_ula) || compare_ether_addr(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC)) return -EINVAL; return 0; } static struct xt_match ebt_stp_mt_reg __read_mostly = { .name = "stp", .revision = 0, .family = NFPROTO_BRIDGE, .match = ebt_stp_mt, .checkentry = ebt_stp_mt_check, .matchsize = sizeof(struct ebt_stp_info), .me = THIS_MODULE, }; static int __init ebt_stp_init(void) { return xt_register_match(&ebt_stp_mt_reg); } static void __exit ebt_stp_fini(void) { xt_unregister_match(&ebt_stp_mt_reg); } module_init(ebt_stp_init); module_exit(ebt_stp_fini); MODULE_DESCRIPTION("Ebtables: Spanning Tree Protocol packet match"); MODULE_LICENSE("GPL");
gpl-2.0
davidmueller13/android_kernel_samsung_lt03lte
arch/mips/powertv/reset.c
10842
1425
/* * Carsten Langgaard, carstenl@mips.com * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. * Portions copyright (C) 2009 Cisco Systems, Inc. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include <linux/pm.h> #include <linux/io.h> #include <asm/reboot.h> /* Not included by linux/reboot.h */ #ifdef CONFIG_BOOTLOADER_DRIVER #include <asm/mach-powertv/kbldr.h> #endif #include <asm/mach-powertv/asic_regs.h> #include "reset.h" static void mips_machine_restart(char *command) { #ifdef CONFIG_BOOTLOADER_DRIVER /* * Call the bootloader's reset function to ensure * that persistent data is flushed before hard reset */ kbldr_SetCauseAndReset(); #else writel(0x1, asic_reg_addr(watchdog)); #endif } void mips_reboot_setup(void) { _machine_restart = mips_machine_restart; }
gpl-2.0
AICP/kernel_samsung_exynos5410
arch/mips/sibyte/common/cfe_console.c
13658
1737
#include <linux/init.h> #include <linux/errno.h> #include <linux/console.h> #include <asm/sibyte/board.h> #include <asm/fw/cfe/cfe_api.h> #include <asm/fw/cfe/cfe_error.h> extern int cfe_cons_handle; static void cfe_console_write(struct console *cons, const char *str, unsigned int count) { int i, last, written; for (i=0, last=0; i<count; i++) { if (!str[i]) /* XXXKW can/should this ever happen? */ return; if (str[i] == '\n') { do { written = cfe_write(cfe_cons_handle, &str[last], i-last); if (written < 0) ; last += written; } while (last < i); while (cfe_write(cfe_cons_handle, "\r", 1) <= 0) ; } } if (last != count) { do { written = cfe_write(cfe_cons_handle, &str[last], count-last); if (written < 0) ; last += written; } while (last < count); } } static int cfe_console_setup(struct console *cons, char *str) { char consdev[32]; /* XXXKW think about interaction with 'console=' cmdline arg */ /* If none of the console options are configured, the build will break. */ if (cfe_getenv("BOOT_CONSOLE", consdev, 32) >= 0) { #ifdef CONFIG_SERIAL_SB1250_DUART if (!strcmp(consdev, "uart0")) { setleds("u0cn"); } else if (!strcmp(consdev, "uart1")) { setleds("u1cn"); } else #endif #ifdef CONFIG_VGA_CONSOLE if (!strcmp(consdev, "pcconsole0")) { setleds("pccn"); } else #endif return -ENODEV; } return 0; } static struct console sb1250_cfe_cons = { .name = "cfe", .write = cfe_console_write, .setup = cfe_console_setup, .flags = CON_PRINTBUFFER, .index = -1, }; static int __init sb1250_cfe_console_init(void) { register_console(&sb1250_cfe_cons); return 0; } console_initcall(sb1250_cfe_console_init);
gpl-2.0
rensuiyi/boot2013RC1
drivers/gpio/spear_gpio.c
91
2221
/* * Copyright (C) 2012 Stefan Roese <sr@denx.de> * * See file CREDITS for list of people who contributed to this * project. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ /* * Driver for SPEAr600 GPIO controller */ #include <common.h> #include <asm/arch/hardware.h> #include <asm/gpio.h> #include <asm/io.h> #include <errno.h> static int gpio_direction(unsigned gpio, enum gpio_direction direction) { struct gpio_regs *regs = (struct gpio_regs *)CONFIG_GPIO_BASE; u32 val; val = readl(&regs->gpiodir); if (direction == GPIO_DIRECTION_OUT) val |= 1 << gpio; else val &= ~(1 << gpio); writel(val, &regs->gpiodir); return 0; } int gpio_set_value(unsigned gpio, int value) { struct gpio_regs *regs = (struct gpio_regs *)CONFIG_GPIO_BASE; writel(1 << gpio, &regs->gpiodata[DATA_REG_ADDR(gpio)]); return 0; } int gpio_get_value(unsigned gpio) { struct gpio_regs *regs = (struct gpio_regs *)CONFIG_GPIO_BASE; u32 val; val = readl(&regs->gpiodata[DATA_REG_ADDR(gpio)]); return !!val; } int gpio_request(unsigned gpio, const char *label) { if (gpio >= SPEAR_GPIO_COUNT) return -EINVAL; return 0; } int gpio_free(unsigned gpio) { return 0; } void gpio_toggle_value(unsigned gpio) { gpio_set_value(gpio, !gpio_get_value(gpio)); } int gpio_direction_input(unsigned gpio) { return gpio_direction(gpio, GPIO_DIRECTION_IN); } int gpio_direction_output(unsigned gpio, int value) { int ret = gpio_direction(gpio, GPIO_DIRECTION_OUT); if (ret < 0) return ret; gpio_set_value(gpio, value); return 0; }
gpl-2.0
TeamLGOG/android_kernel_lge_f320k
arch/arm/mach-msm/board-8930-gpiomux.c
347
17539
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/gpio.h> #include <asm/mach-types.h> #include <mach/gpiomux.h> #include <mach/socinfo.h> #include "devices.h" #include "board-8930.h" /* The SPI configurations apply to GSBI 1*/ static struct gpiomux_setting spi_active = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_12MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting spi_suspended_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting gsbi3_suspended_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_KEEPER, }; static struct gpiomux_setting gsbi3_active_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting gsbi5 = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting gsbi9 = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting gsbi10 = { .func = GPIOMUX_FUNC_2, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting gsbi12 = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting cdc_mclk = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting audio_auxpcm[] = { /* Suspended state */ { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, /* Active state */ { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, }; static struct gpiomux_setting audio_mbhc = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting audio_spkr_boost = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; #if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE) static struct gpiomux_setting gpio_eth_suspend_1_cfg = { .pull = GPIOMUX_PULL_DOWN, .drv = GPIOMUX_DRV_2MA, .func = GPIOMUX_FUNC_GPIO, }; static struct gpiomux_setting gpio_eth_suspend_2_cfg = { .pull = GPIOMUX_PULL_NONE, .drv = GPIOMUX_DRV_2MA, .func = GPIOMUX_FUNC_GPIO, }; #endif static struct gpiomux_setting slimbus = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_KEEPER, }; static struct gpiomux_setting wcnss_5wire_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting wcnss_5wire_active_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting atmel_resout_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting atmel_resout_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting atmel_ldo_en_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting atmel_ldo_en_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting atmel_int_act_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting atmel_int_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; #ifdef MSM8930_PHASE_2 static struct gpiomux_setting hsusb_sus_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_LOW, }; static struct msm_gpiomux_config msm8930_hsusb_configs[] = { { .gpio = 63, /* HSUSB_EXTERNAL_5V_LDO_EN */ .settings = { [GPIOMUX_SUSPENDED] = &hsusb_sus_cfg, }, }, { .gpio = 97, /* HSUSB_5V_EN */ .settings = { [GPIOMUX_SUSPENDED] = &hsusb_sus_cfg, }, }, }; #endif static struct gpiomux_setting hap_lvl_shft_suspended_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting hap_lvl_shft_active_config = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting ap2mdm_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting mdm2ap_status_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting mdm2ap_errfatal_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_16MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting ap2mdm_kpdpwr_n_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }; static struct gpiomux_setting mdp_vsync_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting mdp_vsync_active_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL static struct gpiomux_setting hdmi_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting hdmi_active_1_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting hdmi_active_2_cfg = { .func = GPIOMUX_FUNC_1, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }; static struct gpiomux_setting hdmi_active_3_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_IN, }; static struct gpiomux_setting hdmi_active_4_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_OUT_HIGH, }; static struct gpiomux_setting hdmi_active_5_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, .dir = GPIOMUX_OUT_HIGH, }; #endif static struct gpiomux_setting sitar_reset = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_6MA, .pull = GPIOMUX_PULL_NONE, .dir = GPIOMUX_OUT_LOW, }; #if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE) static struct msm_gpiomux_config msm8960_ethernet_configs[] = { { .gpio = 89, .settings = { [GPIOMUX_SUSPENDED] = &gpio_eth_suspend_1_cfg, } }, { .gpio = 90, .settings = { [GPIOMUX_SUSPENDED] = &gpio_eth_suspend_2_cfg, } }, }; #endif static struct msm_gpiomux_config msm8960_gsbi_configs[] __initdata = { { .gpio = 6, /* GSBI1 QUP SPI_DATA_MOSI */ .settings = { [GPIOMUX_SUSPENDED] = &spi_suspended_config, [GPIOMUX_ACTIVE] = &spi_active, }, }, { .gpio = 7, /* GSBI1 QUP SPI_DATA_MISO */ .settings = { [GPIOMUX_SUSPENDED] = &spi_suspended_config, [GPIOMUX_ACTIVE] = &spi_active, }, }, { .gpio = 8, /* GSBI1 QUP SPI_CS_N */ .settings = { [GPIOMUX_SUSPENDED] = &spi_suspended_config, [GPIOMUX_ACTIVE] = &spi_active, }, }, { .gpio = 9, /* GSBI1 QUP SPI_CLK */ .settings = { [GPIOMUX_SUSPENDED] = &spi_suspended_config, [GPIOMUX_ACTIVE] = &spi_active, }, }, { .gpio = 16, /* GSBI3 I2C QUP SDA */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi3_suspended_cfg, [GPIOMUX_ACTIVE] = &gsbi3_active_cfg, }, }, { .gpio = 17, /* GSBI3 I2C QUP SCL */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi3_suspended_cfg, [GPIOMUX_ACTIVE] = &gsbi3_active_cfg, }, }, { .gpio = 22, /* GSBI5 UART2 */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi5, }, }, { .gpio = 23, /* GSBI5 UART2 */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi5, }, }, { .gpio = 44, /* GSBI12 I2C QUP SDA */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi12, }, }, { .gpio = 95, /* GSBI9 I2C QUP SDA */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi9, }, }, { .gpio = 96, /* GSBI12 I2C QUP SCL */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi9, }, }, { .gpio = 45, /* GSBI12 I2C QUP SCL */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi12, }, }, { .gpio = 73, /* GSBI10 I2C QUP SDA */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi10, }, }, { .gpio = 74, /* GSBI10 I2C QUP SCL */ .settings = { [GPIOMUX_SUSPENDED] = &gsbi10, }, }, }; static struct msm_gpiomux_config msm8960_slimbus_config[] __initdata = { { .gpio = 60, /* slimbus data */ .settings = { [GPIOMUX_SUSPENDED] = &slimbus, }, }, { .gpio = 61, /* slimbus clk */ .settings = { [GPIOMUX_SUSPENDED] = &slimbus, }, }, }; static struct msm_gpiomux_config msm8960_audio_codec_configs[] __initdata = { { .gpio = 59, .settings = { [GPIOMUX_SUSPENDED] = &cdc_mclk, }, }, }; static struct msm_gpiomux_config msm8960_audio_mbhc_configs[] __initdata = { { .gpio = 37, .settings = { [GPIOMUX_SUSPENDED] = &audio_mbhc, }, }, }; static struct msm_gpiomux_config msm8960_audio_spkr_configs[] __initdata = { { .gpio = 15, .settings = { [GPIOMUX_SUSPENDED] = &audio_spkr_boost, }, }, }; static struct msm_gpiomux_config msm8960_audio_auxpcm_configs[] __initdata = { { .gpio = 63, .settings = { [GPIOMUX_SUSPENDED] = &audio_auxpcm[0], [GPIOMUX_ACTIVE] = &audio_auxpcm[1], }, }, { .gpio = 64, .settings = { [GPIOMUX_SUSPENDED] = &audio_auxpcm[0], [GPIOMUX_ACTIVE] = &audio_auxpcm[1], }, }, { .gpio = 65, .settings = { [GPIOMUX_SUSPENDED] = &audio_auxpcm[0], [GPIOMUX_ACTIVE] = &audio_auxpcm[1], }, }, { .gpio = 66, .settings = { [GPIOMUX_SUSPENDED] = &audio_auxpcm[0], [GPIOMUX_ACTIVE] = &audio_auxpcm[1], }, }, }; static struct msm_gpiomux_config wcnss_5wire_interface[] = { { .gpio = 84, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, { .gpio = 85, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, { .gpio = 86, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, { .gpio = 87, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, { .gpio = 88, .settings = { [GPIOMUX_ACTIVE] = &wcnss_5wire_active_cfg, [GPIOMUX_SUSPENDED] = &wcnss_5wire_suspend_cfg, }, }, }; static struct msm_gpiomux_config msm8960_atmel_configs[] __initdata = { { /* TS INTERRUPT */ .gpio = 11, .settings = { [GPIOMUX_ACTIVE] = &atmel_int_act_cfg, [GPIOMUX_SUSPENDED] = &atmel_int_sus_cfg, }, }, { /* TS LDO ENABLE */ .gpio = 50, .settings = { [GPIOMUX_ACTIVE] = &atmel_ldo_en_act_cfg, [GPIOMUX_SUSPENDED] = &atmel_ldo_en_sus_cfg, }, }, { /* TS RESOUT */ .gpio = 52, .settings = { [GPIOMUX_ACTIVE] = &atmel_resout_act_cfg, [GPIOMUX_SUSPENDED] = &atmel_resout_sus_cfg, }, }, }; static struct msm_gpiomux_config hap_lvl_shft_config[] __initdata = { { .gpio = 47, .settings = { [GPIOMUX_SUSPENDED] = &hap_lvl_shft_suspended_config, [GPIOMUX_ACTIVE] = &hap_lvl_shft_active_config, }, }, }; static struct msm_gpiomux_config mdm_configs[] __initdata = { /* AP2MDM_STATUS */ { .gpio = 94, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_cfg, } }, /* MDM2AP_STATUS */ { .gpio = 69, .settings = { [GPIOMUX_SUSPENDED] = &mdm2ap_status_cfg, } }, /* MDM2AP_ERRFATAL */ { .gpio = 70, .settings = { [GPIOMUX_SUSPENDED] = &mdm2ap_errfatal_cfg, } }, /* AP2MDM_ERRFATAL */ { .gpio = 95, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_cfg, } }, /* AP2MDM_KPDPWR_N */ { .gpio = 81, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_kpdpwr_n_cfg, } }, /* AP2MDM_PMIC_RESET_N */ { .gpio = 80, .settings = { [GPIOMUX_SUSPENDED] = &ap2mdm_kpdpwr_n_cfg, } } }; static struct msm_gpiomux_config msm8960_mdp_vsync_configs[] __initdata = { { .gpio = 0, .settings = { [GPIOMUX_ACTIVE] = &mdp_vsync_active_cfg, [GPIOMUX_SUSPENDED] = &mdp_vsync_suspend_cfg, }, } }; #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL static struct msm_gpiomux_config msm8960_hdmi_configs[] __initdata = { { .gpio = 99, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_1_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, { .gpio = 100, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_1_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, { .gpio = 101, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_1_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, { .gpio = 102, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_2_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, }; static struct msm_gpiomux_config msm8930_mhl_configs[] __initdata = { { .gpio = 72, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_3_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, { .gpio = 71, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_4_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, { .gpio = 73, .settings = { [GPIOMUX_ACTIVE] = &hdmi_active_5_cfg, [GPIOMUX_SUSPENDED] = &hdmi_suspend_cfg, }, }, }; #endif static struct gpiomux_setting haptics_active_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }; static struct gpiomux_setting haptics_suspend_cfg = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_DOWN, }; static struct msm_gpiomux_config msm8930_haptics_configs[] __initdata = { { .gpio = 77, .settings = { [GPIOMUX_ACTIVE] = &haptics_active_cfg, [GPIOMUX_SUSPENDED] = &haptics_suspend_cfg, }, }, { .gpio = 78, .settings = { [GPIOMUX_ACTIVE] = &haptics_active_cfg, [GPIOMUX_SUSPENDED] = &haptics_suspend_cfg, }, }, }; static struct gpiomux_setting sd_det_line = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct msm_gpiomux_config msm8930_sd_det_config[] __initdata = { { .gpio = 94, /* SD Card Detect Line */ .settings = { [GPIOMUX_SUSPENDED] = &sd_det_line, [GPIOMUX_ACTIVE] = &sd_det_line, }, }, }; static struct gpiomux_setting gyro_int_line = { .func = GPIOMUX_FUNC_GPIO, .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }; static struct msm_gpiomux_config msm8930_gyro_int_config[] __initdata = { { .gpio = 69, /* Gyro Interrupt Line */ .settings = { [GPIOMUX_SUSPENDED] = &gyro_int_line, [GPIOMUX_ACTIVE] = &gyro_int_line, }, }, }; static struct msm_gpiomux_config msm_sitar_config[] __initdata = { { .gpio = 42, /* SYS_RST_N */ .settings = { [GPIOMUX_SUSPENDED] = &sitar_reset, }, } }; int __init msm8930_init_gpiomux(void) { int rc = msm_gpiomux_init(NR_GPIO_IRQS); if (rc) { pr_err(KERN_ERR "msm_gpiomux_init failed %d\n", rc); return rc; } #if defined(CONFIG_KS8851) || defined(CONFIG_KS8851_MODULE) msm_gpiomux_install(msm8960_ethernet_configs, ARRAY_SIZE(msm8960_ethernet_configs)); #endif msm_gpiomux_install(msm8960_gsbi_configs, ARRAY_SIZE(msm8960_gsbi_configs)); msm_gpiomux_install(msm8960_atmel_configs, ARRAY_SIZE(msm8960_atmel_configs)); msm_gpiomux_install(msm8960_slimbus_config, ARRAY_SIZE(msm8960_slimbus_config)); msm_gpiomux_install(msm8960_audio_codec_configs, ARRAY_SIZE(msm8960_audio_codec_configs)); msm_gpiomux_install(msm8960_audio_mbhc_configs, ARRAY_SIZE(msm8960_audio_mbhc_configs)); msm_gpiomux_install(msm8960_audio_spkr_configs, ARRAY_SIZE(msm8960_audio_spkr_configs)); msm_gpiomux_install(msm8960_audio_auxpcm_configs, ARRAY_SIZE(msm8960_audio_auxpcm_configs)); msm_gpiomux_install(wcnss_5wire_interface, ARRAY_SIZE(wcnss_5wire_interface)); if (machine_is_msm8930_mtp() || machine_is_msm8930_fluid() || machine_is_msm8930_cdp()) { msm_gpiomux_install(hap_lvl_shft_config, ARRAY_SIZE(hap_lvl_shft_config)); #ifdef MSM8930_PHASE_2 msm_gpiomux_install(msm8930_hsusb_configs, ARRAY_SIZE(msm8930_hsusb_configs)); #endif } if (PLATFORM_IS_CHARM25()) msm_gpiomux_install(mdm_configs, ARRAY_SIZE(mdm_configs)); if (machine_is_msm8930_cdp() || machine_is_msm8930_mtp() || machine_is_msm8930_fluid()) msm_gpiomux_install(msm8930_haptics_configs, ARRAY_SIZE(msm8930_haptics_configs)); #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL msm_gpiomux_install(msm8960_hdmi_configs, ARRAY_SIZE(msm8960_hdmi_configs)); if (msm8930_mhl_display_enabled()) msm_gpiomux_install(msm8930_mhl_configs, ARRAY_SIZE(msm8930_mhl_configs)); #endif msm_gpiomux_install(msm8960_mdp_vsync_configs, ARRAY_SIZE(msm8960_mdp_vsync_configs)); msm_gpiomux_install(msm8930_sd_det_config, ARRAY_SIZE(msm8930_sd_det_config)); if (machine_is_msm8930_fluid() || machine_is_msm8930_mtp()) msm_gpiomux_install(msm8930_gyro_int_config, ARRAY_SIZE(msm8930_gyro_int_config)); msm_gpiomux_install(msm_sitar_config, ARRAY_SIZE(msm_sitar_config)); return 0; }
gpl-2.0
justsoso8/linux-2.6.32.9
fs/nfsd/nfssvc.c
347
15381
/* * linux/fs/nfsd/nfssvc.c * * Central processing for nfsd. * * Authors: Olaf Kirch (okir@monad.swb.de) * * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> */ #include <linux/module.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/errno.h> #include <linux/nfs.h> #include <linux/in.h> #include <linux/uio.h> #include <linux/unistd.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/freezer.h> #include <linux/fs_struct.h> #include <linux/kthread.h> #include <linux/swap.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/cache.h> #include <linux/nfsd/nfsd.h> #include <linux/nfsd/stats.h> #include <linux/nfsd/cache.h> #include <linux/nfsd/syscall.h> #include <linux/lockd/bind.h> #include <linux/nfsacl.h> #include <linux/seq_file.h> #define NFSDDBG_FACILITY NFSDDBG_SVC extern struct svc_program nfsd_program; static int nfsd(void *vrqstp); struct timeval nfssvc_boot; /* * nfsd_mutex protects nfsd_serv -- both the pointer itself and the members * of the svc_serv struct. In particular, ->sv_nrthreads but also to some * extent ->sv_temp_socks and ->sv_permsocks. It also protects nfsdstats.th_cnt * * If (out side the lock) nfsd_serv is non-NULL, then it must point to a * properly initialised 'struct svc_serv' with ->sv_nrthreads > 0. That number * of nfsd threads must exist and each must listed in ->sp_all_threads in each * entry of ->sv_pools[]. * * Transitions of the thread count between zero and non-zero are of particular * interest since the svc_serv needs to be created and initialized at that * point, or freed. * * Finally, the nfsd_mutex also protects some of the global variables that are * accessed when nfsd starts and that are settable via the write_* routines in * nfsctl.c. In particular: * * user_recovery_dirname * user_lease_time * nfsd_versions */ DEFINE_MUTEX(nfsd_mutex); struct svc_serv *nfsd_serv; /* * nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used. * nfsd_drc_max_pages limits the total amount of memory available for * version 4.1 DRC caches. * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage. */ spinlock_t nfsd_drc_lock; unsigned int nfsd_drc_max_mem; unsigned int nfsd_drc_mem_used; #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) static struct svc_stat nfsd_acl_svcstats; static struct svc_version * nfsd_acl_version[] = { [2] = &nfsd_acl_version2, [3] = &nfsd_acl_version3, }; #define NFSD_ACL_MINVERS 2 #define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version) static struct svc_version *nfsd_acl_versions[NFSD_ACL_NRVERS]; static struct svc_program nfsd_acl_program = { .pg_prog = NFS_ACL_PROGRAM, .pg_nvers = NFSD_ACL_NRVERS, .pg_vers = nfsd_acl_versions, .pg_name = "nfsacl", .pg_class = "nfsd", .pg_stats = &nfsd_acl_svcstats, .pg_authenticate = &svc_set_client, }; static struct svc_stat nfsd_acl_svcstats = { .program = &nfsd_acl_program, }; #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */ static struct svc_version * nfsd_version[] = { [2] = &nfsd_version2, #if defined(CONFIG_NFSD_V3) [3] = &nfsd_version3, #endif #if defined(CONFIG_NFSD_V4) [4] = &nfsd_version4, #endif }; #define NFSD_MINVERS 2 #define NFSD_NRVERS ARRAY_SIZE(nfsd_version) static struct svc_version *nfsd_versions[NFSD_NRVERS]; struct svc_program nfsd_program = { #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) .pg_next = &nfsd_acl_program, #endif .pg_prog = NFS_PROGRAM, /* program number */ .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */ .pg_vers = nfsd_versions, /* version table */ .pg_name = "nfsd", /* program name */ .pg_class = "nfsd", /* authentication class */ .pg_stats = &nfsd_svcstats, /* version table */ .pg_authenticate = &svc_set_client, /* export authentication */ }; u32 nfsd_supported_minorversion; int nfsd_vers(int vers, enum vers_op change) { if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS) return -1; switch(change) { case NFSD_SET: nfsd_versions[vers] = nfsd_version[vers]; #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) if (vers < NFSD_ACL_NRVERS) nfsd_acl_versions[vers] = nfsd_acl_version[vers]; #endif break; case NFSD_CLEAR: nfsd_versions[vers] = NULL; #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) if (vers < NFSD_ACL_NRVERS) nfsd_acl_versions[vers] = NULL; #endif break; case NFSD_TEST: return nfsd_versions[vers] != NULL; case NFSD_AVAIL: return nfsd_version[vers] != NULL; } return 0; } int nfsd_minorversion(u32 minorversion, enum vers_op change) { if (minorversion > NFSD_SUPPORTED_MINOR_VERSION) return -1; switch(change) { case NFSD_SET: nfsd_supported_minorversion = minorversion; break; case NFSD_CLEAR: if (minorversion == 0) return -1; nfsd_supported_minorversion = minorversion - 1; break; case NFSD_TEST: return minorversion <= nfsd_supported_minorversion; case NFSD_AVAIL: return minorversion <= NFSD_SUPPORTED_MINOR_VERSION; } return 0; } /* * Maximum number of nfsd processes */ #define NFSD_MAXSERVS 8192 int nfsd_nrthreads(void) { int rv = 0; mutex_lock(&nfsd_mutex); if (nfsd_serv) rv = nfsd_serv->sv_nrthreads; mutex_unlock(&nfsd_mutex); return rv; } static void nfsd_last_thread(struct svc_serv *serv) { /* When last nfsd thread exits we need to do some clean-up */ struct svc_xprt *xprt; list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) lockd_down(); nfsd_serv = NULL; nfsd_racache_shutdown(); nfs4_state_shutdown(); printk(KERN_WARNING "nfsd: last server has exited, flushing export " "cache\n"); nfsd_export_flush(); } void nfsd_reset_versions(void) { int found_one = 0; int i; for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) { if (nfsd_program.pg_vers[i]) found_one = 1; } if (!found_one) { for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) nfsd_program.pg_vers[i] = nfsd_version[i]; #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++) nfsd_acl_program.pg_vers[i] = nfsd_acl_version[i]; #endif } } /* * Each session guarantees a negotiated per slot memory cache for replies * which in turn consumes memory beyond the v2/v3/v4.0 server. A dedicated * NFSv4.1 server might want to use more memory for a DRC than a machine * with mutiple services. * * Impose a hard limit on the number of pages for the DRC which varies * according to the machines free pages. This is of course only a default. * * For now this is a #defined shift which could be under admin control * in the future. */ static void set_max_drc(void) { #define NFSD_DRC_SIZE_SHIFT 10 nfsd_drc_max_mem = (nr_free_buffer_pages() >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE; nfsd_drc_mem_used = 0; spin_lock_init(&nfsd_drc_lock); dprintk("%s nfsd_drc_max_mem %u \n", __func__, nfsd_drc_max_mem); } int nfsd_create_serv(void) { int err = 0; WARN_ON(!mutex_is_locked(&nfsd_mutex)); if (nfsd_serv) { svc_get(nfsd_serv); return 0; } if (nfsd_max_blksize == 0) { /* choose a suitable default */ struct sysinfo i; si_meminfo(&i); /* Aim for 1/4096 of memory per thread * This gives 1MB on 4Gig machines * But only uses 32K on 128M machines. * Bottom out at 8K on 32M and smaller. * Of course, this is only a default. */ nfsd_max_blksize = NFSSVC_MAXBLKSIZE; i.totalram <<= PAGE_SHIFT - 12; while (nfsd_max_blksize > i.totalram && nfsd_max_blksize >= 8*1024*2) nfsd_max_blksize /= 2; } nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize, nfsd_last_thread, nfsd, THIS_MODULE); if (nfsd_serv == NULL) err = -ENOMEM; else set_max_drc(); do_gettimeofday(&nfssvc_boot); /* record boot time */ return err; } static int nfsd_init_socks(int port) { int error; if (!list_empty(&nfsd_serv->sv_permsocks)) return 0; error = svc_create_xprt(nfsd_serv, "udp", PF_INET, port, SVC_SOCK_DEFAULTS); if (error < 0) return error; error = lockd_up(); if (error < 0) return error; error = svc_create_xprt(nfsd_serv, "tcp", PF_INET, port, SVC_SOCK_DEFAULTS); if (error < 0) return error; error = lockd_up(); if (error < 0) return error; return 0; } int nfsd_nrpools(void) { if (nfsd_serv == NULL) return 0; else return nfsd_serv->sv_nrpools; } int nfsd_get_nrthreads(int n, int *nthreads) { int i = 0; if (nfsd_serv != NULL) { for (i = 0; i < nfsd_serv->sv_nrpools && i < n; i++) nthreads[i] = nfsd_serv->sv_pools[i].sp_nrthreads; } return 0; } int nfsd_set_nrthreads(int n, int *nthreads) { int i = 0; int tot = 0; int err = 0; WARN_ON(!mutex_is_locked(&nfsd_mutex)); if (nfsd_serv == NULL || n <= 0) return 0; if (n > nfsd_serv->sv_nrpools) n = nfsd_serv->sv_nrpools; /* enforce a global maximum number of threads */ tot = 0; for (i = 0; i < n; i++) { if (nthreads[i] > NFSD_MAXSERVS) nthreads[i] = NFSD_MAXSERVS; tot += nthreads[i]; } if (tot > NFSD_MAXSERVS) { /* total too large: scale down requested numbers */ for (i = 0; i < n && tot > 0; i++) { int new = nthreads[i] * NFSD_MAXSERVS / tot; tot -= (nthreads[i] - new); nthreads[i] = new; } for (i = 0; i < n && tot > 0; i++) { nthreads[i]--; tot--; } } /* * There must always be a thread in pool 0; the admin * can't shut down NFS completely using pool_threads. */ if (nthreads[0] == 0) nthreads[0] = 1; /* apply the new numbers */ svc_get(nfsd_serv); for (i = 0; i < n; i++) { err = svc_set_num_threads(nfsd_serv, &nfsd_serv->sv_pools[i], nthreads[i]); if (err) break; } svc_destroy(nfsd_serv); return err; } int nfsd_svc(unsigned short port, int nrservs) { int error; mutex_lock(&nfsd_mutex); dprintk("nfsd: creating service\n"); if (nrservs <= 0) nrservs = 0; if (nrservs > NFSD_MAXSERVS) nrservs = NFSD_MAXSERVS; error = 0; if (nrservs == 0 && nfsd_serv == NULL) goto out; /* Readahead param cache - will no-op if it already exists */ error = nfsd_racache_init(2*nrservs); if (error<0) goto out; error = nfs4_state_start(); if (error) goto out; nfsd_reset_versions(); error = nfsd_create_serv(); if (error) goto out; error = nfsd_init_socks(port); if (error) goto failure; error = svc_set_num_threads(nfsd_serv, NULL, nrservs); if (error == 0) /* We are holding a reference to nfsd_serv which * we don't want to count in the return value, * so subtract 1 */ error = nfsd_serv->sv_nrthreads - 1; failure: svc_destroy(nfsd_serv); /* Release server */ out: mutex_unlock(&nfsd_mutex); return error; } /* * This is the NFS server kernel thread */ static int nfsd(void *vrqstp) { struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp; int err, preverr = 0; /* Lock module and set up kernel thread */ mutex_lock(&nfsd_mutex); /* At this point, the thread shares current->fs * with the init process. We need to create files with a * umask of 0 instead of init's umask. */ if (unshare_fs_struct() < 0) { printk("Unable to start nfsd thread: out of memory\n"); goto out; } current->fs->umask = 0; /* * thread is spawned with all signals set to SIG_IGN, re-enable * the ones that will bring down the thread */ allow_signal(SIGKILL); allow_signal(SIGHUP); allow_signal(SIGINT); allow_signal(SIGQUIT); nfsdstats.th_cnt++; mutex_unlock(&nfsd_mutex); /* * We want less throttling in balance_dirty_pages() so that nfs to * localhost doesn't cause nfsd to lock up due to all the client's * dirty pages. */ current->flags |= PF_LESS_THROTTLE; set_freezable(); /* * The main request loop */ for (;;) { /* * Find a socket with data available and call its * recvfrom routine. */ while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN) ; if (err == -EINTR) break; else if (err < 0) { if (err != preverr) { printk(KERN_WARNING "%s: unexpected error " "from svc_recv (%d)\n", __func__, -err); preverr = err; } schedule_timeout_uninterruptible(HZ); continue; } /* Lock the export hash tables for reading. */ exp_readlock(); validate_process_creds(); svc_process(rqstp); validate_process_creds(); /* Unlock export hash tables */ exp_readunlock(); } /* Clear signals before calling svc_exit_thread() */ flush_signals(current); mutex_lock(&nfsd_mutex); nfsdstats.th_cnt --; out: /* Release the thread */ svc_exit_thread(rqstp); /* Release module */ mutex_unlock(&nfsd_mutex); module_put_and_exit(0); return 0; } static __be32 map_new_errors(u32 vers, __be32 nfserr) { if (nfserr == nfserr_jukebox && vers == 2) return nfserr_dropit; if (nfserr == nfserr_wrongsec && vers < 4) return nfserr_acces; return nfserr; } int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) { struct svc_procedure *proc; kxdrproc_t xdr; __be32 nfserr; __be32 *nfserrp; dprintk("nfsd_dispatch: vers %d proc %d\n", rqstp->rq_vers, rqstp->rq_proc); proc = rqstp->rq_procinfo; /* Check whether we have this call in the cache. */ switch (nfsd_cache_lookup(rqstp, proc->pc_cachetype)) { case RC_INTR: case RC_DROPIT: return 0; case RC_REPLY: return 1; case RC_DOIT:; /* do it */ } /* Decode arguments */ xdr = proc->pc_decode; if (xdr && !xdr(rqstp, (__be32*)rqstp->rq_arg.head[0].iov_base, rqstp->rq_argp)) { dprintk("nfsd: failed to decode arguments!\n"); nfsd_cache_update(rqstp, RC_NOCACHE, NULL); *statp = rpc_garbage_args; return 1; } /* need to grab the location to store the status, as * nfsv4 does some encoding while processing */ nfserrp = rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len; rqstp->rq_res.head[0].iov_len += sizeof(__be32); /* Now call the procedure handler, and encode NFS status. */ nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); nfserr = map_new_errors(rqstp->rq_vers, nfserr); if (nfserr == nfserr_dropit) { dprintk("nfsd: Dropping request; may be revisited later\n"); nfsd_cache_update(rqstp, RC_NOCACHE, NULL); return 0; } if (rqstp->rq_proc != 0) *nfserrp++ = nfserr; /* Encode result. * For NFSv2, additional info is never returned in case of an error. */ if (!(nfserr && rqstp->rq_vers == 2)) { xdr = proc->pc_encode; if (xdr && !xdr(rqstp, nfserrp, rqstp->rq_resp)) { /* Failed to encode result. Release cache entry */ dprintk("nfsd: failed to encode result!\n"); nfsd_cache_update(rqstp, RC_NOCACHE, NULL); *statp = rpc_system_err; return 1; } } /* Store reply in cache. */ nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1); return 1; } int nfsd_pool_stats_open(struct inode *inode, struct file *file) { int ret; mutex_lock(&nfsd_mutex); if (nfsd_serv == NULL) { mutex_unlock(&nfsd_mutex); return -ENODEV; } /* bump up the psudo refcount while traversing */ svc_get(nfsd_serv); ret = svc_pool_stats_open(nfsd_serv, file); mutex_unlock(&nfsd_mutex); return ret; } int nfsd_pool_stats_release(struct inode *inode, struct file *file) { int ret = seq_release(inode, file); mutex_lock(&nfsd_mutex); /* this function really, really should have been called svc_put() */ svc_destroy(nfsd_serv); mutex_unlock(&nfsd_mutex); return ret; }
gpl-2.0
lacvapps/linux
drivers/media/i2c/bt819.c
347
13498
/* * bt819 - BT819A VideoStream Decoder (Rockwell Part) * * Copyright (C) 1999 Mike Bernson <mike@mlb.org> * Copyright (C) 1998 Dave Perks <dperks@ibm.net> * * Modifications for LML33/DC10plus unified driver * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx> * * Changes by Ronald Bultje <rbultje@ronald.bitfreak.net> * - moved over to linux>=2.4.x i2c protocol (9/9/2002) * * This code was modify/ported from the saa7111 driver written * by Dave Perks. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/ioctl.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> #include <media/bt819.h> MODULE_DESCRIPTION("Brooktree-819 video decoder driver"); MODULE_AUTHOR("Mike Bernson & Dave Perks"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /* ----------------------------------------------------------------------- */ struct bt819 { struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; unsigned char reg[32]; v4l2_std_id norm; int input; int enable; }; static inline struct bt819 *to_bt819(struct v4l2_subdev *sd) { return container_of(sd, struct bt819, sd); } static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct bt819, hdl)->sd; } struct timing { int hactive; int hdelay; int vactive; int vdelay; int hscale; int vscale; }; /* for values, see the bt819 datasheet */ static struct timing timing_data[] = { {864 - 24, 20, 625 - 2, 1, 0x0504, 0x0000}, {858 - 24, 20, 525 - 2, 1, 0x00f8, 0x0000}, }; /* ----------------------------------------------------------------------- */ static inline int bt819_write(struct bt819 *decoder, u8 reg, u8 value) { struct i2c_client *client = v4l2_get_subdevdata(&decoder->sd); decoder->reg[reg] = value; return i2c_smbus_write_byte_data(client, reg, value); } static inline int bt819_setbit(struct bt819 *decoder, u8 reg, u8 bit, u8 value) { return bt819_write(decoder, reg, (decoder->reg[reg] & ~(1 << bit)) | (value ? (1 << bit) : 0)); } static int bt819_write_block(struct bt819 *decoder, const u8 *data, unsigned int len) { struct i2c_client *client = v4l2_get_subdevdata(&decoder->sd); int ret = -1; u8 reg; /* the bt819 has an autoincrement function, use it if * the adapter understands raw I2C */ if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { /* do raw I2C, not smbus compatible */ u8 block_data[32]; int block_len; while (len >= 2) { block_len = 0; block_data[block_len++] = reg = data[0]; do { block_data[block_len++] = decoder->reg[reg++] = data[1]; len -= 2; data += 2; } while (len >= 2 && data[0] == reg && block_len < 32); ret = i2c_master_send(client, block_data, block_len); if (ret < 0) break; } } else { /* do some slow I2C emulation kind of thing */ while (len >= 2) { reg = *data++; ret = bt819_write(decoder, reg, *data++); if (ret < 0) break; len -= 2; } } return ret; } static inline int bt819_read(struct bt819 *decoder, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(&decoder->sd); return i2c_smbus_read_byte_data(client, reg); } static int bt819_init(struct v4l2_subdev *sd) { static unsigned char init[] = { /*0x1f, 0x00,*/ /* Reset */ 0x01, 0x59, /* 0x01 input format */ 0x02, 0x00, /* 0x02 temporal decimation */ 0x03, 0x12, /* 0x03 Cropping msb */ 0x04, 0x16, /* 0x04 Vertical Delay, lsb */ 0x05, 0xe0, /* 0x05 Vertical Active lsb */ 0x06, 0x80, /* 0x06 Horizontal Delay lsb */ 0x07, 0xd0, /* 0x07 Horizontal Active lsb */ 0x08, 0x00, /* 0x08 Horizontal Scaling msb */ 0x09, 0xf8, /* 0x09 Horizontal Scaling lsb */ 0x0a, 0x00, /* 0x0a Brightness control */ 0x0b, 0x30, /* 0x0b Miscellaneous control */ 0x0c, 0xd8, /* 0x0c Luma Gain lsb */ 0x0d, 0xfe, /* 0x0d Chroma Gain (U) lsb */ 0x0e, 0xb4, /* 0x0e Chroma Gain (V) msb */ 0x0f, 0x00, /* 0x0f Hue control */ 0x12, 0x04, /* 0x12 Output Format */ 0x13, 0x20, /* 0x13 Vertial Scaling msb 0x00 chroma comb OFF, line drop scaling, interlace scaling BUG? Why does turning the chroma comb on fuck up color? Bug in the bt819 stepping on my board? */ 0x14, 0x00, /* 0x14 Vertial Scaling lsb */ 0x16, 0x07, /* 0x16 Video Timing Polarity ACTIVE=active low FIELD: high=odd, vreset=active high, hreset=active high */ 0x18, 0x68, /* 0x18 AGC Delay */ 0x19, 0x5d, /* 0x19 Burst Gate Delay */ 0x1a, 0x80, /* 0x1a ADC Interface */ }; struct bt819 *decoder = to_bt819(sd); struct timing *timing = &timing_data[(decoder->norm & V4L2_STD_525_60) ? 1 : 0]; init[0x03 * 2 - 1] = (((timing->vdelay >> 8) & 0x03) << 6) | (((timing->vactive >> 8) & 0x03) << 4) | (((timing->hdelay >> 8) & 0x03) << 2) | ((timing->hactive >> 8) & 0x03); init[0x04 * 2 - 1] = timing->vdelay & 0xff; init[0x05 * 2 - 1] = timing->vactive & 0xff; init[0x06 * 2 - 1] = timing->hdelay & 0xff; init[0x07 * 2 - 1] = timing->hactive & 0xff; init[0x08 * 2 - 1] = timing->hscale >> 8; init[0x09 * 2 - 1] = timing->hscale & 0xff; /* 0x15 in array is address 0x19 */ init[0x15 * 2 - 1] = (decoder->norm & V4L2_STD_625_50) ? 115 : 93; /* Chroma burst delay */ /* reset */ bt819_write(decoder, 0x1f, 0x00); mdelay(1); /* init */ return bt819_write_block(decoder, init, sizeof(init)); } /* ----------------------------------------------------------------------- */ static int bt819_status(struct v4l2_subdev *sd, u32 *pstatus, v4l2_std_id *pstd) { struct bt819 *decoder = to_bt819(sd); int status = bt819_read(decoder, 0x00); int res = V4L2_IN_ST_NO_SIGNAL; v4l2_std_id std = pstd ? *pstd : V4L2_STD_ALL; if ((status & 0x80)) res = 0; else std = V4L2_STD_UNKNOWN; if ((status & 0x10)) std &= V4L2_STD_PAL; else std &= V4L2_STD_NTSC; if (pstd) *pstd = std; if (pstatus) *pstatus = res; v4l2_dbg(1, debug, sd, "get status %x\n", status); return 0; } static int bt819_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) { return bt819_status(sd, NULL, std); } static int bt819_g_input_status(struct v4l2_subdev *sd, u32 *status) { return bt819_status(sd, status, NULL); } static int bt819_s_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct bt819 *decoder = to_bt819(sd); struct timing *timing = NULL; v4l2_dbg(1, debug, sd, "set norm %llx\n", (unsigned long long)std); if (sd->v4l2_dev == NULL || sd->v4l2_dev->notify == NULL) v4l2_err(sd, "no notify found!\n"); if (std & V4L2_STD_NTSC) { v4l2_subdev_notify(sd, BT819_FIFO_RESET_LOW, NULL); bt819_setbit(decoder, 0x01, 0, 1); bt819_setbit(decoder, 0x01, 1, 0); bt819_setbit(decoder, 0x01, 5, 0); bt819_write(decoder, 0x18, 0x68); bt819_write(decoder, 0x19, 0x5d); /* bt819_setbit(decoder, 0x1a, 5, 1); */ timing = &timing_data[1]; } else if (std & V4L2_STD_PAL) { v4l2_subdev_notify(sd, BT819_FIFO_RESET_LOW, NULL); bt819_setbit(decoder, 0x01, 0, 1); bt819_setbit(decoder, 0x01, 1, 1); bt819_setbit(decoder, 0x01, 5, 1); bt819_write(decoder, 0x18, 0x7f); bt819_write(decoder, 0x19, 0x72); /* bt819_setbit(decoder, 0x1a, 5, 0); */ timing = &timing_data[0]; } else { v4l2_dbg(1, debug, sd, "unsupported norm %llx\n", (unsigned long long)std); return -EINVAL; } bt819_write(decoder, 0x03, (((timing->vdelay >> 8) & 0x03) << 6) | (((timing->vactive >> 8) & 0x03) << 4) | (((timing->hdelay >> 8) & 0x03) << 2) | ((timing->hactive >> 8) & 0x03)); bt819_write(decoder, 0x04, timing->vdelay & 0xff); bt819_write(decoder, 0x05, timing->vactive & 0xff); bt819_write(decoder, 0x06, timing->hdelay & 0xff); bt819_write(decoder, 0x07, timing->hactive & 0xff); bt819_write(decoder, 0x08, (timing->hscale >> 8) & 0xff); bt819_write(decoder, 0x09, timing->hscale & 0xff); decoder->norm = std; v4l2_subdev_notify(sd, BT819_FIFO_RESET_HIGH, NULL); return 0; } static int bt819_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct bt819 *decoder = to_bt819(sd); v4l2_dbg(1, debug, sd, "set input %x\n", input); if (input > 7) return -EINVAL; if (sd->v4l2_dev == NULL || sd->v4l2_dev->notify == NULL) v4l2_err(sd, "no notify found!\n"); if (decoder->input != input) { v4l2_subdev_notify(sd, BT819_FIFO_RESET_LOW, NULL); decoder->input = input; /* select mode */ if (decoder->input == 0) { bt819_setbit(decoder, 0x0b, 6, 0); bt819_setbit(decoder, 0x1a, 1, 1); } else { bt819_setbit(decoder, 0x0b, 6, 1); bt819_setbit(decoder, 0x1a, 1, 0); } v4l2_subdev_notify(sd, BT819_FIFO_RESET_HIGH, NULL); } return 0; } static int bt819_s_stream(struct v4l2_subdev *sd, int enable) { struct bt819 *decoder = to_bt819(sd); v4l2_dbg(1, debug, sd, "enable output %x\n", enable); if (decoder->enable != enable) { decoder->enable = enable; bt819_setbit(decoder, 0x16, 7, !enable); } return 0; } static int bt819_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct bt819 *decoder = to_bt819(sd); int temp; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: bt819_write(decoder, 0x0a, ctrl->val); break; case V4L2_CID_CONTRAST: bt819_write(decoder, 0x0c, ctrl->val & 0xff); bt819_setbit(decoder, 0x0b, 2, ((ctrl->val >> 8) & 0x01)); break; case V4L2_CID_SATURATION: bt819_write(decoder, 0x0d, (ctrl->val >> 7) & 0xff); bt819_setbit(decoder, 0x0b, 1, ((ctrl->val >> 15) & 0x01)); /* Ratio between U gain and V gain must stay the same as the ratio between the default U and V gain values. */ temp = (ctrl->val * 180) / 254; bt819_write(decoder, 0x0e, (temp >> 7) & 0xff); bt819_setbit(decoder, 0x0b, 0, (temp >> 15) & 0x01); break; case V4L2_CID_HUE: bt819_write(decoder, 0x0f, ctrl->val); break; default: return -EINVAL; } return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_ctrl_ops bt819_ctrl_ops = { .s_ctrl = bt819_s_ctrl, }; static const struct v4l2_subdev_video_ops bt819_video_ops = { .s_std = bt819_s_std, .s_routing = bt819_s_routing, .s_stream = bt819_s_stream, .querystd = bt819_querystd, .g_input_status = bt819_g_input_status, }; static const struct v4l2_subdev_ops bt819_ops = { .video = &bt819_video_ops, }; /* ----------------------------------------------------------------------- */ static int bt819_probe(struct i2c_client *client, const struct i2c_device_id *id) { int i, ver; struct bt819 *decoder; struct v4l2_subdev *sd; const char *name; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; decoder = devm_kzalloc(&client->dev, sizeof(*decoder), GFP_KERNEL); if (decoder == NULL) return -ENOMEM; sd = &decoder->sd; v4l2_i2c_subdev_init(sd, client, &bt819_ops); ver = bt819_read(decoder, 0x17); switch (ver & 0xf0) { case 0x70: name = "bt819a"; break; case 0x60: name = "bt817a"; break; case 0x20: name = "bt815a"; break; default: v4l2_dbg(1, debug, sd, "unknown chip version 0x%02x\n", ver); return -ENODEV; } v4l_info(client, "%s found @ 0x%x (%s)\n", name, client->addr << 1, client->adapter->name); decoder->norm = V4L2_STD_NTSC; decoder->input = 0; decoder->enable = 1; i = bt819_init(sd); if (i < 0) v4l2_dbg(1, debug, sd, "init status %d\n", i); v4l2_ctrl_handler_init(&decoder->hdl, 4); v4l2_ctrl_new_std(&decoder->hdl, &bt819_ctrl_ops, V4L2_CID_BRIGHTNESS, -128, 127, 1, 0); v4l2_ctrl_new_std(&decoder->hdl, &bt819_ctrl_ops, V4L2_CID_CONTRAST, 0, 511, 1, 0xd8); v4l2_ctrl_new_std(&decoder->hdl, &bt819_ctrl_ops, V4L2_CID_SATURATION, 0, 511, 1, 0xfe); v4l2_ctrl_new_std(&decoder->hdl, &bt819_ctrl_ops, V4L2_CID_HUE, -128, 127, 1, 0); sd->ctrl_handler = &decoder->hdl; if (decoder->hdl.error) { int err = decoder->hdl.error; v4l2_ctrl_handler_free(&decoder->hdl); return err; } v4l2_ctrl_handler_setup(&decoder->hdl); return 0; } static int bt819_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct bt819 *decoder = to_bt819(sd); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(&decoder->hdl); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id bt819_id[] = { { "bt819a", 0 }, { "bt817a", 0 }, { "bt815a", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, bt819_id); static struct i2c_driver bt819_driver = { .driver = { .name = "bt819", }, .probe = bt819_probe, .remove = bt819_remove, .id_table = bt819_id, }; module_i2c_driver(bt819_driver);
gpl-2.0
cmenard/OverStock_I897
drivers/watchdog/iTCO_vendor_support.c
603
11549
/* * intel TCO vendor specific watchdog driver support * * (c) Copyright 2006-2009 Wim Van Sebroeck <wim@iguana.be>. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Neither Wim Van Sebroeck nor Iguana vzw. admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. */ /* * Includes, defines, variables, module parameters, ... */ /* Module and version information */ #define DRV_NAME "iTCO_vendor_support" #define DRV_VERSION "1.04" #define PFX DRV_NAME ": " /* Includes */ #include <linux/module.h> /* For module specific items */ #include <linux/moduleparam.h> /* For new moduleparam's */ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ #include <linux/init.h> /* For __init/__exit/... */ #include <linux/ioport.h> /* For io-port access */ #include <linux/io.h> /* For inb/outb/... */ #include "iTCO_vendor.h" /* iTCO defines */ #define SMI_EN (acpibase + 0x30) /* SMI Control and Enable Register */ #define TCOBASE (acpibase + 0x60) /* TCO base address */ #define TCO1_STS (TCOBASE + 0x04) /* TCO1 Status Register */ /* List of vendor support modes */ /* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */ #define SUPERMICRO_OLD_BOARD 1 /* SuperMicro Pentium 4 / Xeon 4 / EMT64T Era Systems */ #define SUPERMICRO_NEW_BOARD 2 /* Broken BIOS */ #define BROKEN_BIOS 911 static int vendorsupport; module_param(vendorsupport, int, 0); MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default=" "0 (none), 1=SuperMicro Pent3, 2=SuperMicro Pent4+, " "911=Broken SMI BIOS"); /* * Vendor Specific Support */ /* * Vendor Support: 1 * Board: Super Micro Computer Inc. 370SSE+-OEM1/P3TSSE * iTCO chipset: ICH2 * * Code contributed by: R. Seretny <lkpatches@paypc.com> * Documentation obtained by R. Seretny from SuperMicro Technical Support * * To enable Watchdog function: * BIOS setup -> Power -> TCO Logic SMI Enable -> Within5Minutes * This setting enables SMI to clear the watchdog expired flag. * If BIOS or CPU fail which may cause SMI hang, then system will * reboot. When application starts to use watchdog function, * application has to take over the control from SMI. * * For P3TSSE, J36 jumper needs to be removed to enable the Watchdog * function. * * Note: The system will reboot when Expire Flag is set TWICE. * So, if the watchdog timer is 20 seconds, then the maximum hang * time is about 40 seconds, and the minimum hang time is about * 20.6 seconds. */ static void supermicro_old_pre_start(unsigned long acpibase) { unsigned long val32; /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */ val32 = inl(SMI_EN); val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */ outl(val32, SMI_EN); /* Needed to activate watchdog */ } static void supermicro_old_pre_stop(unsigned long acpibase) { unsigned long val32; /* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */ val32 = inl(SMI_EN); val32 |= 0x00002000; /* Turn on SMI clearing watchdog */ outl(val32, SMI_EN); /* Needed to deactivate watchdog */ } static void supermicro_old_pre_keepalive(unsigned long acpibase) { /* Reload TCO Timer (done in iTCO_wdt_keepalive) + */ /* Clear "Expire Flag" (Bit 3 of TC01_STS register) */ outb(0x08, TCO1_STS); } /* * Vendor Support: 2 * Board: Super Micro Computer Inc. P4SBx, P4DPx * iTCO chipset: ICH4 * * Code contributed by: R. Seretny <lkpatches@paypc.com> * Documentation obtained by R. Seretny from SuperMicro Technical Support * * To enable Watchdog function: * 1. BIOS * For P4SBx: * BIOS setup -> Advanced -> Integrated Peripherals -> Watch Dog Feature * For P4DPx: * BIOS setup -> Advanced -> I/O Device Configuration -> Watch Dog * This setting enables or disables Watchdog function. When enabled, the * default watchdog timer is set to be 5 minutes (about 4m35s). It is * enough to load and run the OS. The application (service or driver) has * to take over the control once OS is running up and before watchdog * expires. * * 2. JUMPER * For P4SBx: JP39 * For P4DPx: JP37 * This jumper is used for safety. Closed is enabled. This jumper * prevents user enables watchdog in BIOS by accident. * * To enable Watch Dog function, both BIOS and JUMPER must be enabled. * * The documentation lists motherboards P4SBx and P4DPx series as of * 20-March-2002. However, this code works flawlessly with much newer * motherboards, such as my X6DHR-8G2 (SuperServer 6014H-82). * * The original iTCO driver as written does not actually reset the * watchdog timer on these machines, as a result they reboot after five * minutes. * * NOTE: You may leave the Watchdog function disabled in the SuperMicro * BIOS to avoid a "boot-race"... This driver will enable watchdog * functionality even if it's disabled in the BIOS once the /dev/watchdog * file is opened. */ /* I/O Port's */ #define SM_REGINDEX 0x2e /* SuperMicro ICH4+ Register Index */ #define SM_DATAIO 0x2f /* SuperMicro ICH4+ Register Data I/O */ /* Control Register's */ #define SM_CTLPAGESW 0x07 /* SuperMicro ICH4+ Control Page Switch */ #define SM_CTLPAGE 0x08 /* SuperMicro ICH4+ Control Page Num */ #define SM_WATCHENABLE 0x30 /* Watchdog enable: Bit 0: 0=off, 1=on */ #define SM_WATCHPAGE 0x87 /* Watchdog unlock control page */ #define SM_ENDWATCH 0xAA /* Watchdog lock control page */ #define SM_COUNTMODE 0xf5 /* Watchdog count mode select */ /* (Bit 3: 0 = seconds, 1 = minutes */ #define SM_WATCHTIMER 0xf6 /* 8-bits, Watchdog timer counter (RW) */ #define SM_RESETCONTROL 0xf7 /* Watchdog reset control */ /* Bit 6: timer is reset by kbd interrupt */ /* Bit 7: timer is reset by mouse interrupt */ static void supermicro_new_unlock_watchdog(void) { /* Write 0x87 to port 0x2e twice */ outb(SM_WATCHPAGE, SM_REGINDEX); outb(SM_WATCHPAGE, SM_REGINDEX); /* Switch to watchdog control page */ outb(SM_CTLPAGESW, SM_REGINDEX); outb(SM_CTLPAGE, SM_DATAIO); } static void supermicro_new_lock_watchdog(void) { outb(SM_ENDWATCH, SM_REGINDEX); } static void supermicro_new_pre_start(unsigned int heartbeat) { unsigned int val; supermicro_new_unlock_watchdog(); /* Watchdog timer setting needs to be in seconds*/ outb(SM_COUNTMODE, SM_REGINDEX); val = inb(SM_DATAIO); val &= 0xF7; outb(val, SM_DATAIO); /* Write heartbeat interval to WDOG */ outb(SM_WATCHTIMER, SM_REGINDEX); outb((heartbeat & 255), SM_DATAIO); /* Make sure keyboard/mouse interrupts don't interfere */ outb(SM_RESETCONTROL, SM_REGINDEX); val = inb(SM_DATAIO); val &= 0x3f; outb(val, SM_DATAIO); /* enable watchdog by setting bit 0 of Watchdog Enable to 1 */ outb(SM_WATCHENABLE, SM_REGINDEX); val = inb(SM_DATAIO); val |= 0x01; outb(val, SM_DATAIO); supermicro_new_lock_watchdog(); } static void supermicro_new_pre_stop(void) { unsigned int val; supermicro_new_unlock_watchdog(); /* disable watchdog by setting bit 0 of Watchdog Enable to 0 */ outb(SM_WATCHENABLE, SM_REGINDEX); val = inb(SM_DATAIO); val &= 0xFE; outb(val, SM_DATAIO); supermicro_new_lock_watchdog(); } static void supermicro_new_pre_set_heartbeat(unsigned int heartbeat) { supermicro_new_unlock_watchdog(); /* reset watchdog timeout to heartveat value */ outb(SM_WATCHTIMER, SM_REGINDEX); outb((heartbeat & 255), SM_DATAIO); supermicro_new_lock_watchdog(); } /* * Vendor Support: 911 * Board: Some Intel ICHx based motherboards * iTCO chipset: ICH7+ * * Some Intel motherboards have a broken BIOS implementation: i.e. * the SMI handler clear's the TIMEOUT bit in the TC01_STS register * and does not reload the time. Thus the TCO watchdog does not reboot * the system. * * These are the conclusions of Andriy Gapon <avg@icyb.net.ua> after * debugging: the SMI handler is quite simple - it tests value in * TCO1_CNT against 0x800, i.e. checks TCO_TMR_HLT. If the bit is set * the handler goes into an infinite loop, apparently to allow the * second timeout and reboot. Otherwise it simply clears TIMEOUT bit * in TCO1_STS and that's it. * So the logic seems to be reversed, because it is hard to see how * TIMEOUT can get set to 1 and SMI generated when TCO_TMR_HLT is set * (other than a transitional effect). * * The only fix found to get the motherboard(s) to reboot is to put * the glb_smi_en bit to 0. This is a dirty hack that bypasses the * broken code by disabling Global SMI. * * WARNING: globally disabling SMI could possibly lead to dramatic * problems, especially on laptops! I.e. various ACPI things where * SMI is used for communication between OS and firmware. * * Don't use this fix if you don't need to!!! */ static void broken_bios_start(unsigned long acpibase) { unsigned long val32; val32 = inl(SMI_EN); /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# Bit 0: GBL_SMI_EN -> 0 = No SMI# will be generated by ICH. */ val32 &= 0xffffdffe; outl(val32, SMI_EN); } static void broken_bios_stop(unsigned long acpibase) { unsigned long val32; val32 = inl(SMI_EN); /* Bit 13: TCO_EN -> 1 = Enables TCO logic generating an SMI# Bit 0: GBL_SMI_EN -> 1 = Turn global SMI on again. */ val32 |= 0x00002001; outl(val32, SMI_EN); } /* * Generic Support Functions */ void iTCO_vendor_pre_start(unsigned long acpibase, unsigned int heartbeat) { switch (vendorsupport) { case SUPERMICRO_OLD_BOARD: supermicro_old_pre_start(acpibase); break; case SUPERMICRO_NEW_BOARD: supermicro_new_pre_start(heartbeat); break; case BROKEN_BIOS: broken_bios_start(acpibase); break; } } EXPORT_SYMBOL(iTCO_vendor_pre_start); void iTCO_vendor_pre_stop(unsigned long acpibase) { switch (vendorsupport) { case SUPERMICRO_OLD_BOARD: supermicro_old_pre_stop(acpibase); break; case SUPERMICRO_NEW_BOARD: supermicro_new_pre_stop(); break; case BROKEN_BIOS: broken_bios_stop(acpibase); break; } } EXPORT_SYMBOL(iTCO_vendor_pre_stop); void iTCO_vendor_pre_keepalive(unsigned long acpibase, unsigned int heartbeat) { if (vendorsupport == SUPERMICRO_OLD_BOARD) supermicro_old_pre_keepalive(acpibase); else if (vendorsupport == SUPERMICRO_NEW_BOARD) supermicro_new_pre_set_heartbeat(heartbeat); } EXPORT_SYMBOL(iTCO_vendor_pre_keepalive); void iTCO_vendor_pre_set_heartbeat(unsigned int heartbeat) { if (vendorsupport == SUPERMICRO_NEW_BOARD) supermicro_new_pre_set_heartbeat(heartbeat); } EXPORT_SYMBOL(iTCO_vendor_pre_set_heartbeat); int iTCO_vendor_check_noreboot_on(void) { switch (vendorsupport) { case SUPERMICRO_OLD_BOARD: return 0; default: return 1; } } EXPORT_SYMBOL(iTCO_vendor_check_noreboot_on); static int __init iTCO_vendor_init_module(void) { printk(KERN_INFO PFX "vendor-support=%d\n", vendorsupport); return 0; } static void __exit iTCO_vendor_exit_module(void) { printk(KERN_INFO PFX "Module Unloaded\n"); } module_init(iTCO_vendor_init_module); module_exit(iTCO_vendor_exit_module); MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>, " "R. Seretny <lkpatches@paypc.com>"); MODULE_DESCRIPTION("Intel TCO Vendor Specific WatchDog Timer Driver Support"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
Evervolv/android_kernel_htc_leo
mm/failslab.c
603
1304
#include <linux/fault-inject.h> #include <linux/gfp.h> static struct { struct fault_attr attr; u32 ignore_gfp_wait; #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS struct dentry *ignore_gfp_wait_file; #endif } failslab = { .attr = FAULT_ATTR_INITIALIZER, .ignore_gfp_wait = 1, }; bool should_failslab(size_t size, gfp_t gfpflags) { if (gfpflags & __GFP_NOFAIL) return false; if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT)) return false; return should_fail(&failslab.attr, size); } static int __init setup_failslab(char *str) { return setup_fault_attr(&failslab.attr, str); } __setup("failslab=", setup_failslab); #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS static int __init failslab_debugfs_init(void) { mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; struct dentry *dir; int err; err = init_fault_attr_dentries(&failslab.attr, "failslab"); if (err) return err; dir = failslab.attr.dentries.dir; failslab.ignore_gfp_wait_file = debugfs_create_bool("ignore-gfp-wait", mode, dir, &failslab.ignore_gfp_wait); if (!failslab.ignore_gfp_wait_file) { err = -ENOMEM; debugfs_remove(failslab.ignore_gfp_wait_file); cleanup_fault_attr_dentries(&failslab.attr); } return err; } late_initcall(failslab_debugfs_init); #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
gpl-2.0
carz2/cm-kernel
drivers/staging/comedi/drivers/adl_pci7296.c
859
4851
/* comedi/drivers/adl_pci7296.c COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: adl_pci7296 Description: Driver for the Adlink PCI-7296 96 ch. digital io board Devices: [ADLink] PCI-7296 (adl_pci7296) Author: Jon Grierson <jd@renko.co.uk> Updated: Mon, 14 Apr 2008 15:05:56 +0100 Status: testing Configuration Options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first supported PCI device found will be used. */ #include "../comedidev.h" #include <linux/kernel.h> #include "comedi_pci.h" #include "8255.h" /* #include "8253.h" */ #define PORT1A 0 #define PORT2A 4 #define PORT3A 8 #define PORT4A 12 #define PCI_DEVICE_ID_PCI7296 0x7296 static DEFINE_PCI_DEVICE_TABLE(adl_pci7296_pci_table) = { { PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7296, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { 0} }; MODULE_DEVICE_TABLE(pci, adl_pci7296_pci_table); struct adl_pci7296_private { int data; struct pci_dev *pci_dev; }; #define devpriv ((struct adl_pci7296_private *)dev->private) static int adl_pci7296_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int adl_pci7296_detach(struct comedi_device *dev); static struct comedi_driver driver_adl_pci7296 = { .driver_name = "adl_pci7296", .module = THIS_MODULE, .attach = adl_pci7296_attach, .detach = adl_pci7296_detach, }; static int adl_pci7296_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct pci_dev *pcidev; struct comedi_subdevice *s; int bus, slot; int ret; printk(KERN_INFO "comedi%d: attach adl_pci7432\n", dev->minor); dev->board_name = "pci7432"; bus = it->options[0]; slot = it->options[1]; if (alloc_private(dev, sizeof(struct adl_pci7296_private)) < 0) return -ENOMEM; if (alloc_subdevices(dev, 4) < 0) return -ENOMEM; for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL); pcidev != NULL; pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) { if (pcidev->vendor == PCI_VENDOR_ID_ADLINK && pcidev->device == PCI_DEVICE_ID_PCI7296) { if (bus || slot) { /* requested particular bus/slot */ if (pcidev->bus->number != bus || PCI_SLOT(pcidev->devfn) != slot) { continue; } } devpriv->pci_dev = pcidev; if (comedi_pci_enable(pcidev, "adl_pci7296") < 0) { printk(KERN_ERR "comedi%d: Failed to enable PCI device and request regions\n", dev->minor); return -EIO; } dev->iobase = pci_resource_start(pcidev, 2); printk(KERN_INFO "comedi: base addr %4lx\n", dev->iobase); /* four 8255 digital io subdevices */ s = dev->subdevices + 0; subdev_8255_init(dev, s, NULL, (unsigned long)(dev->iobase)); s = dev->subdevices + 1; ret = subdev_8255_init(dev, s, NULL, (unsigned long)(dev->iobase + PORT2A)); if (ret < 0) return ret; s = dev->subdevices + 2; ret = subdev_8255_init(dev, s, NULL, (unsigned long)(dev->iobase + PORT3A)); if (ret < 0) return ret; s = dev->subdevices + 3; ret = subdev_8255_init(dev, s, NULL, (unsigned long)(dev->iobase + PORT4A)); if (ret < 0) return ret; printk(KERN_DEBUG "comedi%d: adl_pci7432 attached\n", dev->minor); return 1; } } printk(KERN_ERR "comedi%d: no supported board found! (req. bus/slot : %d/%d)\n", dev->minor, bus, slot); return -EIO; } static int adl_pci7296_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: pci7432: remove\n", dev->minor); if (devpriv && devpriv->pci_dev) { if (dev->iobase) comedi_pci_disable(devpriv->pci_dev); pci_dev_put(devpriv->pci_dev); } /* detach four 8255 digital io subdevices */ if (dev->subdevices) { subdev_8255_cleanup(dev, dev->subdevices + 0); subdev_8255_cleanup(dev, dev->subdevices + 1); subdev_8255_cleanup(dev, dev->subdevices + 2); subdev_8255_cleanup(dev, dev->subdevices + 3); } return 0; } COMEDI_PCI_INITCLEANUP(driver_adl_pci7296, adl_pci7296_pci_table);
gpl-2.0
epic4g/samsung-kernel-c1spr-EK02
arch/arm/mach-kirkwood/guruplug-setup.c
859
3002
/* * arch/arm/mach-kirkwood/guruplug-setup.c * * Marvell GuruPlug Reference Board Setup * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/partitions.h> #include <linux/ata_platform.h> #include <linux/mv643xx_eth.h> #include <linux/gpio.h> #include <linux/leds.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/kirkwood.h> #include <plat/mvsdio.h> #include "common.h" #include "mpp.h" static struct mtd_partition guruplug_nand_parts[] = { { .name = "u-boot", .offset = 0, .size = SZ_1M }, { .name = "uImage", .offset = MTDPART_OFS_NXTBLK, .size = SZ_4M }, { .name = "root", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL }, }; static struct mv643xx_eth_platform_data guruplug_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(0), }; static struct mv643xx_eth_platform_data guruplug_ge01_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(1), }; static struct mv_sata_platform_data guruplug_sata_data = { .n_ports = 1, }; static struct mvsdio_platform_data guruplug_mvsdio_data = { /* unfortunately the CD signal has not been connected */ }; static struct gpio_led guruplug_led_pins[] = { { .name = "guruplug:red:health", .gpio = 46, .active_low = 1, }, { .name = "guruplug:green:health", .gpio = 47, .active_low = 1, }, { .name = "guruplug:red:wmode", .gpio = 48, .active_low = 1, }, { .name = "guruplug:green:wmode", .gpio = 49, .active_low = 1, }, }; static struct gpio_led_platform_data guruplug_led_data = { .leds = guruplug_led_pins, .num_leds = ARRAY_SIZE(guruplug_led_pins), }; static struct platform_device guruplug_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &guruplug_led_data, } }; static unsigned int guruplug_mpp_config[] __initdata = { MPP46_GPIO, /* M_RLED */ MPP47_GPIO, /* M_GLED */ MPP48_GPIO, /* B_RLED */ MPP49_GPIO, /* B_GLED */ 0 }; static void __init guruplug_init(void) { /* * Basic setup. Needs to be called early. */ kirkwood_init(); kirkwood_mpp_conf(guruplug_mpp_config); kirkwood_uart0_init(); kirkwood_nand_init(ARRAY_AND_SIZE(guruplug_nand_parts), 25); kirkwood_ehci_init(); kirkwood_ge00_init(&guruplug_ge00_data); kirkwood_ge01_init(&guruplug_ge01_data); kirkwood_sata_init(&guruplug_sata_data); kirkwood_sdio_init(&guruplug_mvsdio_data); platform_device_register(&guruplug_leds); } MACHINE_START(GURUPLUG, "Marvell GuruPlug Reference Board") /* Maintainer: Siddarth Gore <gores@marvell.com> */ .phys_io = KIRKWOOD_REGS_PHYS_BASE, .io_pg_offst = ((KIRKWOOD_REGS_VIRT_BASE) >> 18) & 0xfffc, .boot_params = 0x00000100, .init_machine = guruplug_init, .map_io = kirkwood_map_io, .init_irq = kirkwood_init_irq, .timer = &kirkwood_timer, MACHINE_END
gpl-2.0
zyrgit/linux-yocto-3.10-work
arch/mips/mm/c-r4k.c
1115
38974
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ #include <linux/hardirq.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/kernel.h> #include <linux/linkage.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/bitops.h> #include <asm/bcache.h> #include <asm/bootinfo.h> #include <asm/cache.h> #include <asm/cacheops.h> #include <asm/cpu.h> #include <asm/cpu-features.h> #include <asm/io.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/r4kcache.h> #include <asm/sections.h> #include <asm/mmu_context.h> #include <asm/war.h> #include <asm/cacheflush.h> /* for run_uncached() */ #include <asm/traps.h> #include <asm/dma-coherence.h> /* * Special Variant of smp_call_function for use by cache functions: * * o No return value * o collapses to normal function call on UP kernels * o collapses to normal function call on systems with a single shared * primary cache. * o doesn't disable interrupts on the local CPU */ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) { preempt_disable(); #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) smp_call_function(func, info, 1); #endif func(info); preempt_enable(); } #if defined(CONFIG_MIPS_CMP) #define cpu_has_safe_index_cacheops 0 #else #define cpu_has_safe_index_cacheops 1 #endif /* * Must die. */ static unsigned long icache_size __read_mostly; static unsigned long dcache_size __read_mostly; static unsigned long scache_size __read_mostly; /* * Dummy cache handling routines for machines without boardcaches */ static void cache_noop(void) {} static struct bcache_ops no_sc_ops = { .bc_enable = (void *)cache_noop, .bc_disable = (void *)cache_noop, .bc_wback_inv = (void *)cache_noop, .bc_inv = (void *)cache_noop }; struct bcache_ops *bcops = &no_sc_ops; #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) #define R4600_HIT_CACHEOP_WAR_IMPL \ do { \ if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \ *(volatile unsigned long *)CKSEG1; \ if (R4600_V1_HIT_CACHEOP_WAR) \ __asm__ __volatile__("nop;nop;nop;nop"); \ } while (0) static void (*r4k_blast_dcache_page)(unsigned long addr); static inline void r4k_blast_dcache_page_dc32(unsigned long addr) { R4600_HIT_CACHEOP_WAR_IMPL; blast_dcache32_page(addr); } static inline void r4k_blast_dcache_page_dc64(unsigned long addr) { R4600_HIT_CACHEOP_WAR_IMPL; blast_dcache64_page(addr); } static void __cpuinit r4k_blast_dcache_page_setup(void) { unsigned long dc_lsize = cpu_dcache_line_size(); if (dc_lsize == 0) r4k_blast_dcache_page = (void *)cache_noop; else if (dc_lsize == 16) r4k_blast_dcache_page = blast_dcache16_page; else if (dc_lsize == 32) r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; else if (dc_lsize == 64) r4k_blast_dcache_page = r4k_blast_dcache_page_dc64; } static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); static void __cpuinit r4k_blast_dcache_page_indexed_setup(void) { unsigned long dc_lsize = cpu_dcache_line_size(); if (dc_lsize == 0) r4k_blast_dcache_page_indexed = (void *)cache_noop; else if (dc_lsize == 16) r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; else if (dc_lsize == 32) r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; else if (dc_lsize == 64) r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; } void (* r4k_blast_dcache)(void); EXPORT_SYMBOL(r4k_blast_dcache); static void __cpuinit r4k_blast_dcache_setup(void) { unsigned long dc_lsize = cpu_dcache_line_size(); if (dc_lsize == 0) r4k_blast_dcache = (void *)cache_noop; else if (dc_lsize == 16) r4k_blast_dcache = blast_dcache16; else if (dc_lsize == 32) r4k_blast_dcache = blast_dcache32; else if (dc_lsize == 64) r4k_blast_dcache = blast_dcache64; } /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */ #define JUMP_TO_ALIGN(order) \ __asm__ __volatile__( \ "b\t1f\n\t" \ ".align\t" #order "\n\t" \ "1:\n\t" \ ) #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */ #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11) static inline void blast_r4600_v1_icache32(void) { unsigned long flags; local_irq_save(flags); blast_icache32(); local_irq_restore(flags); } static inline void tx49_blast_icache32(void) { unsigned long start = INDEX_BASE; unsigned long end = start + current_cpu_data.icache.waysize; unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; unsigned long ws_end = current_cpu_data.icache.ways << current_cpu_data.icache.waybit; unsigned long ws, addr; CACHE32_UNROLL32_ALIGN2; /* I'm in even chunk. blast odd chunks */ for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start + 0x400; addr < end; addr += 0x400 * 2) cache32_unroll32(addr|ws, Index_Invalidate_I); CACHE32_UNROLL32_ALIGN; /* I'm in odd chunk. blast even chunks */ for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 0x400 * 2) cache32_unroll32(addr|ws, Index_Invalidate_I); } static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page) { unsigned long flags; local_irq_save(flags); blast_icache32_page_indexed(page); local_irq_restore(flags); } static inline void tx49_blast_icache32_page_indexed(unsigned long page) { unsigned long indexmask = current_cpu_data.icache.waysize - 1; unsigned long start = INDEX_BASE + (page & indexmask); unsigned long end = start + PAGE_SIZE; unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; unsigned long ws_end = current_cpu_data.icache.ways << current_cpu_data.icache.waybit; unsigned long ws, addr; CACHE32_UNROLL32_ALIGN2; /* I'm in even chunk. blast odd chunks */ for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start + 0x400; addr < end; addr += 0x400 * 2) cache32_unroll32(addr|ws, Index_Invalidate_I); CACHE32_UNROLL32_ALIGN; /* I'm in odd chunk. blast even chunks */ for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 0x400 * 2) cache32_unroll32(addr|ws, Index_Invalidate_I); } static void (* r4k_blast_icache_page)(unsigned long addr); static void __cpuinit r4k_blast_icache_page_setup(void) { unsigned long ic_lsize = cpu_icache_line_size(); if (ic_lsize == 0) r4k_blast_icache_page = (void *)cache_noop; else if (ic_lsize == 16) r4k_blast_icache_page = blast_icache16_page; else if (ic_lsize == 32) r4k_blast_icache_page = blast_icache32_page; else if (ic_lsize == 64) r4k_blast_icache_page = blast_icache64_page; } static void (* r4k_blast_icache_page_indexed)(unsigned long addr); static void __cpuinit r4k_blast_icache_page_indexed_setup(void) { unsigned long ic_lsize = cpu_icache_line_size(); if (ic_lsize == 0) r4k_blast_icache_page_indexed = (void *)cache_noop; else if (ic_lsize == 16) r4k_blast_icache_page_indexed = blast_icache16_page_indexed; else if (ic_lsize == 32) { if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) r4k_blast_icache_page_indexed = blast_icache32_r4600_v1_page_indexed; else if (TX49XX_ICACHE_INDEX_INV_WAR) r4k_blast_icache_page_indexed = tx49_blast_icache32_page_indexed; else r4k_blast_icache_page_indexed = blast_icache32_page_indexed; } else if (ic_lsize == 64) r4k_blast_icache_page_indexed = blast_icache64_page_indexed; } void (* r4k_blast_icache)(void); EXPORT_SYMBOL(r4k_blast_icache); static void __cpuinit r4k_blast_icache_setup(void) { unsigned long ic_lsize = cpu_icache_line_size(); if (ic_lsize == 0) r4k_blast_icache = (void *)cache_noop; else if (ic_lsize == 16) r4k_blast_icache = blast_icache16; else if (ic_lsize == 32) { if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) r4k_blast_icache = blast_r4600_v1_icache32; else if (TX49XX_ICACHE_INDEX_INV_WAR) r4k_blast_icache = tx49_blast_icache32; else r4k_blast_icache = blast_icache32; } else if (ic_lsize == 64) r4k_blast_icache = blast_icache64; } static void (* r4k_blast_scache_page)(unsigned long addr); static void __cpuinit r4k_blast_scache_page_setup(void) { unsigned long sc_lsize = cpu_scache_line_size(); if (scache_size == 0) r4k_blast_scache_page = (void *)cache_noop; else if (sc_lsize == 16) r4k_blast_scache_page = blast_scache16_page; else if (sc_lsize == 32) r4k_blast_scache_page = blast_scache32_page; else if (sc_lsize == 64) r4k_blast_scache_page = blast_scache64_page; else if (sc_lsize == 128) r4k_blast_scache_page = blast_scache128_page; } static void (* r4k_blast_scache_page_indexed)(unsigned long addr); static void __cpuinit r4k_blast_scache_page_indexed_setup(void) { unsigned long sc_lsize = cpu_scache_line_size(); if (scache_size == 0) r4k_blast_scache_page_indexed = (void *)cache_noop; else if (sc_lsize == 16) r4k_blast_scache_page_indexed = blast_scache16_page_indexed; else if (sc_lsize == 32) r4k_blast_scache_page_indexed = blast_scache32_page_indexed; else if (sc_lsize == 64) r4k_blast_scache_page_indexed = blast_scache64_page_indexed; else if (sc_lsize == 128) r4k_blast_scache_page_indexed = blast_scache128_page_indexed; } static void (* r4k_blast_scache)(void); static void __cpuinit r4k_blast_scache_setup(void) { unsigned long sc_lsize = cpu_scache_line_size(); if (scache_size == 0) r4k_blast_scache = (void *)cache_noop; else if (sc_lsize == 16) r4k_blast_scache = blast_scache16; else if (sc_lsize == 32) r4k_blast_scache = blast_scache32; else if (sc_lsize == 64) r4k_blast_scache = blast_scache64; else if (sc_lsize == 128) r4k_blast_scache = blast_scache128; } static inline void local_r4k___flush_cache_all(void * args) { #if defined(CONFIG_CPU_LOONGSON2) r4k_blast_scache(); return; #endif r4k_blast_dcache(); r4k_blast_icache(); switch (current_cpu_type()) { case CPU_R4000SC: case CPU_R4000MC: case CPU_R4400SC: case CPU_R4400MC: case CPU_R10000: case CPU_R12000: case CPU_R14000: r4k_blast_scache(); } } static void r4k___flush_cache_all(void) { r4k_on_each_cpu(local_r4k___flush_cache_all, NULL); } static inline int has_valid_asid(const struct mm_struct *mm) { #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) int i; for_each_online_cpu(i) if (cpu_context(i, mm)) return 1; return 0; #else return cpu_context(smp_processor_id(), mm); #endif } static void r4k__flush_cache_vmap(void) { r4k_blast_dcache(); } static void r4k__flush_cache_vunmap(void) { r4k_blast_dcache(); } static inline void local_r4k_flush_cache_range(void * args) { struct vm_area_struct *vma = args; int exec = vma->vm_flags & VM_EXEC; if (!(has_valid_asid(vma->vm_mm))) return; r4k_blast_dcache(); if (exec) r4k_blast_icache(); } static void r4k_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { int exec = vma->vm_flags & VM_EXEC; if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) r4k_on_each_cpu(local_r4k_flush_cache_range, vma); } static inline void local_r4k_flush_cache_mm(void * args) { struct mm_struct *mm = args; if (!has_valid_asid(mm)) return; /* * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we * only flush the primary caches but R10000 and R12000 behave sane ... * R4000SC and R4400SC indexed S-cache ops also invalidate primary * caches, so we can bail out early. */ if (current_cpu_type() == CPU_R4000SC || current_cpu_type() == CPU_R4000MC || current_cpu_type() == CPU_R4400SC || current_cpu_type() == CPU_R4400MC) { r4k_blast_scache(); return; } r4k_blast_dcache(); } static void r4k_flush_cache_mm(struct mm_struct *mm) { if (!cpu_has_dc_aliases) return; r4k_on_each_cpu(local_r4k_flush_cache_mm, mm); } struct flush_cache_page_args { struct vm_area_struct *vma; unsigned long addr; unsigned long pfn; }; static inline void local_r4k_flush_cache_page(void *args) { struct flush_cache_page_args *fcp_args = args; struct vm_area_struct *vma = fcp_args->vma; unsigned long addr = fcp_args->addr; struct page *page = pfn_to_page(fcp_args->pfn); int exec = vma->vm_flags & VM_EXEC; struct mm_struct *mm = vma->vm_mm; int map_coherent = 0; pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; void *vaddr; /* * If ownes no valid ASID yet, cannot possibly have gotten * this page into the cache. */ if (!has_valid_asid(mm)) return; addr &= PAGE_MASK; pgdp = pgd_offset(mm, addr); pudp = pud_offset(pgdp, addr); pmdp = pmd_offset(pudp, addr); ptep = pte_offset(pmdp, addr); /* * If the page isn't marked valid, the page cannot possibly be * in the cache. */ if (!(pte_present(*ptep))) return; if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) vaddr = NULL; else { /* * Use kmap_coherent or kmap_atomic to do flushes for * another ASID than the current one. */ map_coherent = (cpu_has_dc_aliases && page_mapped(page) && !Page_dcache_dirty(page)); if (map_coherent) vaddr = kmap_coherent(page, addr); else vaddr = kmap_atomic(page); addr = (unsigned long)vaddr; } if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { r4k_blast_dcache_page(addr); if (exec && !cpu_icache_snoops_remote_store) r4k_blast_scache_page(addr); } if (exec) { if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) { int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) drop_mmu_context(mm, cpu); } else r4k_blast_icache_page(addr); } if (vaddr) { if (map_coherent) kunmap_coherent(); else kunmap_atomic(vaddr); } } static void r4k_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { struct flush_cache_page_args args; args.vma = vma; args.addr = addr; args.pfn = pfn; r4k_on_each_cpu(local_r4k_flush_cache_page, &args); } static inline void local_r4k_flush_data_cache_page(void * addr) { r4k_blast_dcache_page((unsigned long) addr); } static void r4k_flush_data_cache_page(unsigned long addr) { if (in_atomic()) local_r4k_flush_data_cache_page((void *)addr); else r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr); } struct flush_icache_range_args { unsigned long start; unsigned long end; }; static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end) { if (!cpu_has_ic_fills_f_dc) { if (end - start >= dcache_size) { r4k_blast_dcache(); } else { R4600_HIT_CACHEOP_WAR_IMPL; protected_blast_dcache_range(start, end); } } if (end - start > icache_size) r4k_blast_icache(); else protected_blast_icache_range(start, end); } static inline void local_r4k_flush_icache_range_ipi(void *args) { struct flush_icache_range_args *fir_args = args; unsigned long start = fir_args->start; unsigned long end = fir_args->end; local_r4k_flush_icache_range(start, end); } static void r4k_flush_icache_range(unsigned long start, unsigned long end) { struct flush_icache_range_args args; args.start = start; args.end = end; r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args); instruction_hazard(); } #ifdef CONFIG_DMA_NONCOHERENT static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) { /* Catch bad driver code */ BUG_ON(size == 0); if (cpu_has_inclusive_pcaches) { if (size >= scache_size) r4k_blast_scache(); else blast_scache_range(addr, addr + size); __sync(); return; } /* * Either no secondary cache or the available caches don't have the * subset property so we have to flush the primary caches * explicitly */ if (cpu_has_safe_index_cacheops && size >= dcache_size) { r4k_blast_dcache(); } else { R4600_HIT_CACHEOP_WAR_IMPL; blast_dcache_range(addr, addr + size); } bc_wback_inv(addr, size); __sync(); } static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) { /* Catch bad driver code */ BUG_ON(size == 0); if (cpu_has_inclusive_pcaches) { if (size >= scache_size) r4k_blast_scache(); else { /* * There is no clearly documented alignment requirement * for the cache instruction on MIPS processors and * some processors, among them the RM5200 and RM7000 * QED processors will throw an address error for cache * hit ops with insufficient alignment. Solved by * aligning the address to cache line size. */ blast_inv_scache_range(addr, addr + size); } __sync(); return; } if (cpu_has_safe_index_cacheops && size >= dcache_size) { r4k_blast_dcache(); } else { R4600_HIT_CACHEOP_WAR_IMPL; blast_inv_dcache_range(addr, addr + size); } bc_inv(addr, size); __sync(); } #endif /* CONFIG_DMA_NONCOHERENT */ /* * While we're protected against bad userland addresses we don't care * very much about what happens in that case. Usually a segmentation * fault will dump the process later on anyway ... */ static void local_r4k_flush_cache_sigtramp(void * arg) { unsigned long ic_lsize = cpu_icache_line_size(); unsigned long dc_lsize = cpu_dcache_line_size(); unsigned long sc_lsize = cpu_scache_line_size(); unsigned long addr = (unsigned long) arg; R4600_HIT_CACHEOP_WAR_IMPL; if (dc_lsize) protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); if (!cpu_icache_snoops_remote_store && scache_size) protected_writeback_scache_line(addr & ~(sc_lsize - 1)); if (ic_lsize) protected_flush_icache_line(addr & ~(ic_lsize - 1)); if (MIPS4K_ICACHE_REFILL_WAR) { __asm__ __volatile__ ( ".set push\n\t" ".set noat\n\t" ".set mips3\n\t" #ifdef CONFIG_32BIT "la $at,1f\n\t" #endif #ifdef CONFIG_64BIT "dla $at,1f\n\t" #endif "cache %0,($at)\n\t" "nop; nop; nop\n" "1:\n\t" ".set pop" : : "i" (Hit_Invalidate_I)); } if (MIPS_CACHE_SYNC_WAR) __asm__ __volatile__ ("sync"); } static void r4k_flush_cache_sigtramp(unsigned long addr) { r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr); } static void r4k_flush_icache_all(void) { if (cpu_has_vtag_icache) r4k_blast_icache(); } struct flush_kernel_vmap_range_args { unsigned long vaddr; int size; }; static inline void local_r4k_flush_kernel_vmap_range(void *args) { struct flush_kernel_vmap_range_args *vmra = args; unsigned long vaddr = vmra->vaddr; int size = vmra->size; /* * Aliases only affect the primary caches so don't bother with * S-caches or T-caches. */ if (cpu_has_safe_index_cacheops && size >= dcache_size) r4k_blast_dcache(); else { R4600_HIT_CACHEOP_WAR_IMPL; blast_dcache_range(vaddr, vaddr + size); } } static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size) { struct flush_kernel_vmap_range_args args; args.vaddr = (unsigned long) vaddr; args.size = size; r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args); } static inline void rm7k_erratum31(void) { const unsigned long ic_lsize = 32; unsigned long addr; /* RM7000 erratum #31. The icache is screwed at startup. */ write_c0_taglo(0); write_c0_taghi(0); for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) { __asm__ __volatile__ ( ".set push\n\t" ".set noreorder\n\t" ".set mips3\n\t" "cache\t%1, 0(%0)\n\t" "cache\t%1, 0x1000(%0)\n\t" "cache\t%1, 0x2000(%0)\n\t" "cache\t%1, 0x3000(%0)\n\t" "cache\t%2, 0(%0)\n\t" "cache\t%2, 0x1000(%0)\n\t" "cache\t%2, 0x2000(%0)\n\t" "cache\t%2, 0x3000(%0)\n\t" "cache\t%1, 0(%0)\n\t" "cache\t%1, 0x1000(%0)\n\t" "cache\t%1, 0x2000(%0)\n\t" "cache\t%1, 0x3000(%0)\n\t" ".set pop\n" : : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill)); } } static inline void alias_74k_erratum(struct cpuinfo_mips *c) { /* * Early versions of the 74K do not update the cache tags on a * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG * aliases. In this case it is better to treat the cache as always * having aliases. */ if ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(2, 4, 0)) c->dcache.flags |= MIPS_CACHE_VTAG; if ((c->processor_id & 0xff) == PRID_REV_ENCODE_332(2, 4, 0)) write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); if (((c->processor_id & 0xff00) == PRID_IMP_1074K) && ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(1, 1, 0))) { c->dcache.flags |= MIPS_CACHE_VTAG; write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); } } static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way", "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" }; static void __cpuinit probe_pcache(void) { struct cpuinfo_mips *c = &current_cpu_data; unsigned int config = read_c0_config(); unsigned int prid = read_c0_prid(); unsigned long config1; unsigned int lsize; switch (c->cputype) { case CPU_R4600: /* QED style two way caches? */ case CPU_R4700: case CPU_R5000: case CPU_NEVADA: icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 2; c->icache.waybit = __ffs(icache_size/2); dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 2; c->dcache.waybit= __ffs(dcache_size/2); c->options |= MIPS_CPU_CACHE_CDEX_P; break; case CPU_R5432: case CPU_R5500: icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 2; c->icache.waybit= 0; dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 2; c->dcache.waybit = 0; c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH; break; case CPU_TX49XX: icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 4; c->icache.waybit= 0; dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 4; c->dcache.waybit = 0; c->options |= MIPS_CPU_CACHE_CDEX_P; c->options |= MIPS_CPU_PREFETCH; break; case CPU_R4000PC: case CPU_R4000SC: case CPU_R4000MC: case CPU_R4400PC: case CPU_R4400SC: case CPU_R4400MC: case CPU_R4300: icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 1; c->icache.waybit = 0; /* doesn't matter */ dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 1; c->dcache.waybit = 0; /* does not matter */ c->options |= MIPS_CPU_CACHE_CDEX_P; break; case CPU_R10000: case CPU_R12000: case CPU_R14000: icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29)); c->icache.linesz = 64; c->icache.ways = 2; c->icache.waybit = 0; dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26)); c->dcache.linesz = 32; c->dcache.ways = 2; c->dcache.waybit = 0; c->options |= MIPS_CPU_PREFETCH; break; case CPU_VR4133: write_c0_config(config & ~VR41_CONF_P4K); case CPU_VR4131: /* Workaround for cache instruction bug of VR4131 */ if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U || c->processor_id == 0x0c82U) { config |= 0x00400000U; if (c->processor_id == 0x0c80U) config |= VR41_CONF_BP; write_c0_config(config); } else c->options |= MIPS_CPU_CACHE_CDEX_P; icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 2; c->icache.waybit = __ffs(icache_size/2); dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 2; c->dcache.waybit = __ffs(dcache_size/2); break; case CPU_VR41XX: case CPU_VR4111: case CPU_VR4121: case CPU_VR4122: case CPU_VR4181: case CPU_VR4181A: icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 1; c->icache.waybit = 0; /* doesn't matter */ dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 1; c->dcache.waybit = 0; /* does not matter */ c->options |= MIPS_CPU_CACHE_CDEX_P; break; case CPU_RM7000: rm7k_erratum31(); icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 4; c->icache.waybit = __ffs(icache_size / c->icache.ways); dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 4; c->dcache.waybit = __ffs(dcache_size / c->dcache.ways); c->options |= MIPS_CPU_CACHE_CDEX_P; c->options |= MIPS_CPU_PREFETCH; break; case CPU_LOONGSON2: icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); if (prid & 0x3) c->icache.ways = 4; else c->icache.ways = 2; c->icache.waybit = 0; dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); if (prid & 0x3) c->dcache.ways = 4; else c->dcache.ways = 2; c->dcache.waybit = 0; break; default: if (!(config & MIPS_CONF_M)) panic("Don't know how to probe P-caches on this cpu."); /* * So we seem to be a MIPS32 or MIPS64 CPU * So let's probe the I-cache ... */ config1 = read_c0_config1(); if ((lsize = ((config1 >> 19) & 7))) c->icache.linesz = 2 << lsize; else c->icache.linesz = lsize; c->icache.sets = 32 << (((config1 >> 22) + 1) & 7); c->icache.ways = 1 + ((config1 >> 16) & 7); icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; c->icache.waybit = __ffs(icache_size/c->icache.ways); if (config & 0x8) /* VI bit */ c->icache.flags |= MIPS_CACHE_VTAG; /* * Now probe the MIPS32 / MIPS64 data cache. */ c->dcache.flags = 0; if ((lsize = ((config1 >> 10) & 7))) c->dcache.linesz = 2 << lsize; else c->dcache.linesz= lsize; c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7); c->dcache.ways = 1 + ((config1 >> 7) & 7); dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; c->dcache.waybit = __ffs(dcache_size/c->dcache.ways); c->options |= MIPS_CPU_PREFETCH; break; } /* * Processor configuration sanity check for the R4000SC erratum * #5. With page sizes larger than 32kB there is no possibility * to get a VCE exception anymore so we don't care about this * misconfiguration. The case is rather theoretical anyway; * presumably no vendor is shipping his hardware in the "bad" * configuration. */ if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 && !(config & CONF_SC) && c->icache.linesz != 16 && PAGE_SIZE <= 0x8000) panic("Improper R4000SC processor configuration detected"); /* compute a couple of other cache variables */ c->icache.waysize = icache_size / c->icache.ways; c->dcache.waysize = dcache_size / c->dcache.ways; c->icache.sets = c->icache.linesz ? icache_size / (c->icache.linesz * c->icache.ways) : 0; c->dcache.sets = c->dcache.linesz ? dcache_size / (c->dcache.linesz * c->dcache.ways) : 0; /* * R10000 and R12000 P-caches are odd in a positive way. They're 32kB * 2-way virtually indexed so normally would suffer from aliases. So * normally they'd suffer from aliases but magic in the hardware deals * with that for us so we don't need to take care ourselves. */ switch (c->cputype) { case CPU_20KC: case CPU_25KF: case CPU_SB1: case CPU_SB1A: case CPU_XLR: c->dcache.flags |= MIPS_CACHE_PINDEX; break; case CPU_R10000: case CPU_R12000: case CPU_R14000: break; case CPU_M14KC: case CPU_M14KEC: case CPU_24K: case CPU_34K: case CPU_74K: case CPU_1004K: if (c->cputype == CPU_74K) alias_74k_erratum(c); if ((read_c0_config7() & (1 << 16))) { /* effectively physically indexed dcache, thus no virtual aliases. */ c->dcache.flags |= MIPS_CACHE_PINDEX; break; } default: if (c->dcache.waysize > PAGE_SIZE) c->dcache.flags |= MIPS_CACHE_ALIASES; } switch (c->cputype) { case CPU_20KC: /* * Some older 20Kc chips doesn't have the 'VI' bit in * the config register. */ c->icache.flags |= MIPS_CACHE_VTAG; break; case CPU_ALCHEMY: c->icache.flags |= MIPS_CACHE_IC_F_DC; break; } #ifdef CONFIG_CPU_LOONGSON2 /* * LOONGSON2 has 4 way icache, but when using indexed cache op, * one op will act on all 4 ways */ c->icache.ways = 1; #endif printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", icache_size >> 10, c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT", way_string[c->icache.ways], c->icache.linesz); printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n", dcache_size >> 10, way_string[c->dcache.ways], (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT", (c->dcache.flags & MIPS_CACHE_ALIASES) ? "cache aliases" : "no aliases", c->dcache.linesz); } /* * If you even _breathe_ on this function, look at the gcc output and make sure * it does not pop things on and off the stack for the cache sizing loop that * executes in KSEG1 space or else you will crash and burn badly. You have * been warned. */ static int __cpuinit probe_scache(void) { unsigned long flags, addr, begin, end, pow2; unsigned int config = read_c0_config(); struct cpuinfo_mips *c = &current_cpu_data; if (config & CONF_SC) return 0; begin = (unsigned long) &_stext; begin &= ~((4 * 1024 * 1024) - 1); end = begin + (4 * 1024 * 1024); /* * This is such a bitch, you'd think they would make it easy to do * this. Away you daemons of stupidity! */ local_irq_save(flags); /* Fill each size-multiple cache line with a valid tag. */ pow2 = (64 * 1024); for (addr = begin; addr < end; addr = (begin + pow2)) { unsigned long *p = (unsigned long *) addr; __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */ pow2 <<= 1; } /* Load first line with zero (therefore invalid) tag. */ write_c0_taglo(0); write_c0_taghi(0); __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */ cache_op(Index_Store_Tag_I, begin); cache_op(Index_Store_Tag_D, begin); cache_op(Index_Store_Tag_SD, begin); /* Now search for the wrap around point. */ pow2 = (128 * 1024); for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { cache_op(Index_Load_Tag_SD, addr); __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ if (!read_c0_taglo()) break; pow2 <<= 1; } local_irq_restore(flags); addr -= begin; scache_size = addr; c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22); c->scache.ways = 1; c->dcache.waybit = 0; /* does not matter */ return 1; } #if defined(CONFIG_CPU_LOONGSON2) static void __init loongson2_sc_init(void) { struct cpuinfo_mips *c = &current_cpu_data; scache_size = 512*1024; c->scache.linesz = 32; c->scache.ways = 4; c->scache.waybit = 0; c->scache.waysize = scache_size / (c->scache.ways); c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); c->options |= MIPS_CPU_INCLUSIVE_CACHES; } #endif extern int r5k_sc_init(void); extern int rm7k_sc_init(void); extern int mips_sc_init(void); static void __cpuinit setup_scache(void) { struct cpuinfo_mips *c = &current_cpu_data; unsigned int config = read_c0_config(); int sc_present = 0; /* * Do the probing thing on R4000SC and R4400SC processors. Other * processors don't have a S-cache that would be relevant to the * Linux memory management. */ switch (c->cputype) { case CPU_R4000SC: case CPU_R4000MC: case CPU_R4400SC: case CPU_R4400MC: sc_present = run_uncached(probe_scache); if (sc_present) c->options |= MIPS_CPU_CACHE_CDEX_S; break; case CPU_R10000: case CPU_R12000: case CPU_R14000: scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16); c->scache.linesz = 64 << ((config >> 13) & 1); c->scache.ways = 2; c->scache.waybit= 0; sc_present = 1; break; case CPU_R5000: case CPU_NEVADA: #ifdef CONFIG_R5000_CPU_SCACHE r5k_sc_init(); #endif return; case CPU_RM7000: #ifdef CONFIG_RM7000_CPU_SCACHE rm7k_sc_init(); #endif return; #if defined(CONFIG_CPU_LOONGSON2) case CPU_LOONGSON2: loongson2_sc_init(); return; #endif case CPU_XLP: /* don't need to worry about L2, fully coherent */ return; default: if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { #ifdef CONFIG_MIPS_CPU_SCACHE if (mips_sc_init ()) { scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n", scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); } #else if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); #endif return; } sc_present = 0; } if (!sc_present) return; /* compute a couple of other cache variables */ c->scache.waysize = scache_size / c->scache.ways; c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n", scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); c->options |= MIPS_CPU_INCLUSIVE_CACHES; } void au1x00_fixup_config_od(void) { /* * c0_config.od (bit 19) was write only (and read as 0) * on the early revisions of Alchemy SOCs. It disables the bus * transaction overlapping and needs to be set to fix various errata. */ switch (read_c0_prid()) { case 0x00030100: /* Au1000 DA */ case 0x00030201: /* Au1000 HA */ case 0x00030202: /* Au1000 HB */ case 0x01030200: /* Au1500 AB */ /* * Au1100 errata actually keeps silence about this bit, so we set it * just in case for those revisions that require it to be set according * to the (now gone) cpu table. */ case 0x02030200: /* Au1100 AB */ case 0x02030201: /* Au1100 BA */ case 0x02030202: /* Au1100 BC */ set_c0_config(1 << 19); break; } } /* CP0 hazard avoidance. */ #define NXP_BARRIER() \ __asm__ __volatile__( \ ".set noreorder\n\t" \ "nop; nop; nop; nop; nop; nop;\n\t" \ ".set reorder\n\t") static void nxp_pr4450_fixup_config(void) { unsigned long config0; config0 = read_c0_config(); /* clear all three cache coherency fields */ config0 &= ~(0x7 | (7 << 25) | (7 << 28)); config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) | ((_page_cachable_default >> _CACHE_SHIFT) << 25) | ((_page_cachable_default >> _CACHE_SHIFT) << 28)); write_c0_config(config0); NXP_BARRIER(); } static int __cpuinitdata cca = -1; static int __init cca_setup(char *str) { get_option(&str, &cca); return 0; } early_param("cca", cca_setup); static void __cpuinit coherency_setup(void) { if (cca < 0 || cca > 7) cca = read_c0_config() & CONF_CM_CMASK; _page_cachable_default = cca << _CACHE_SHIFT; pr_debug("Using cache attribute %d\n", cca); change_c0_config(CONF_CM_CMASK, cca); /* * c0_status.cu=0 specifies that updates by the sc instruction use * the coherency mode specified by the TLB; 1 means cachable * coherent update on write will be used. Not all processors have * this bit and; some wire it to zero, others like Toshiba had the * silly idea of putting something else there ... */ switch (current_cpu_type()) { case CPU_R4000PC: case CPU_R4000SC: case CPU_R4000MC: case CPU_R4400PC: case CPU_R4400SC: case CPU_R4400MC: clear_c0_config(CONF_CU); break; /* * We need to catch the early Alchemy SOCs with * the write-only co_config.od bit and set it back to one on: * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB */ case CPU_ALCHEMY: au1x00_fixup_config_od(); break; case PRID_IMP_PR4450: nxp_pr4450_fixup_config(); break; } } static void __cpuinit r4k_cache_error_setup(void) { extern char __weak except_vec2_generic; extern char __weak except_vec2_sb1; struct cpuinfo_mips *c = &current_cpu_data; switch (c->cputype) { case CPU_SB1: case CPU_SB1A: set_uncached_handler(0x100, &except_vec2_sb1, 0x80); break; default: set_uncached_handler(0x100, &except_vec2_generic, 0x80); break; } } void __cpuinit r4k_cache_init(void) { extern void build_clear_page(void); extern void build_copy_page(void); struct cpuinfo_mips *c = &current_cpu_data; probe_pcache(); setup_scache(); r4k_blast_dcache_page_setup(); r4k_blast_dcache_page_indexed_setup(); r4k_blast_dcache_setup(); r4k_blast_icache_page_setup(); r4k_blast_icache_page_indexed_setup(); r4k_blast_icache_setup(); r4k_blast_scache_page_setup(); r4k_blast_scache_page_indexed_setup(); r4k_blast_scache_setup(); /* * Some MIPS32 and MIPS64 processors have physically indexed caches. * This code supports virtually indexed processors and will be * unnecessarily inefficient on physically indexed processors. */ if (c->dcache.linesz) shm_align_mask = max_t( unsigned long, c->dcache.sets * c->dcache.linesz - 1, PAGE_SIZE - 1); else shm_align_mask = PAGE_SIZE-1; __flush_cache_vmap = r4k__flush_cache_vmap; __flush_cache_vunmap = r4k__flush_cache_vunmap; flush_cache_all = cache_noop; __flush_cache_all = r4k___flush_cache_all; flush_cache_mm = r4k_flush_cache_mm; flush_cache_page = r4k_flush_cache_page; flush_cache_range = r4k_flush_cache_range; __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range; flush_cache_sigtramp = r4k_flush_cache_sigtramp; flush_icache_all = r4k_flush_icache_all; local_flush_data_cache_page = local_r4k_flush_data_cache_page; flush_data_cache_page = r4k_flush_data_cache_page; flush_icache_range = r4k_flush_icache_range; local_flush_icache_range = local_r4k_flush_icache_range; #if defined(CONFIG_DMA_NONCOHERENT) if (coherentio) { _dma_cache_wback_inv = (void *)cache_noop; _dma_cache_wback = (void *)cache_noop; _dma_cache_inv = (void *)cache_noop; } else { _dma_cache_wback_inv = r4k_dma_cache_wback_inv; _dma_cache_wback = r4k_dma_cache_wback_inv; _dma_cache_inv = r4k_dma_cache_inv; } #endif build_clear_page(); build_copy_page(); /* * We want to run CMP kernels on core with and without coherent * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether * or not to flush caches. */ local_r4k___flush_cache_all(NULL); coherency_setup(); board_cache_error_setup = r4k_cache_error_setup; }
gpl-2.0
lab11/bluetooth-next
drivers/phy/phy-pxa-28nm-hsic.c
1115
5898
/* * Copyright (C) 2015 Linaro, Ltd. * Rob Herring <robh@kernel.org> * * Based on vendor driver: * Copyright (C) 2013 Marvell Inc. * Author: Chao Xie <xiechao.mail@gmail.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/delay.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/io.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/phy/phy.h> #define PHY_28NM_HSIC_CTRL 0x08 #define PHY_28NM_HSIC_IMPCAL_CAL 0x18 #define PHY_28NM_HSIC_PLL_CTRL01 0x1c #define PHY_28NM_HSIC_PLL_CTRL2 0x20 #define PHY_28NM_HSIC_INT 0x28 #define PHY_28NM_HSIC_PLL_SELLPFR_SHIFT 26 #define PHY_28NM_HSIC_PLL_FBDIV_SHIFT 0 #define PHY_28NM_HSIC_PLL_REFDIV_SHIFT 9 #define PHY_28NM_HSIC_S2H_PU_PLL BIT(10) #define PHY_28NM_HSIC_H2S_PLL_LOCK BIT(15) #define PHY_28NM_HSIC_S2H_HSIC_EN BIT(7) #define S2H_DRV_SE0_4RESUME BIT(14) #define PHY_28NM_HSIC_H2S_IMPCAL_DONE BIT(27) #define PHY_28NM_HSIC_CONNECT_INT BIT(1) #define PHY_28NM_HSIC_HS_READY_INT BIT(2) struct mv_hsic_phy { struct phy *phy; struct platform_device *pdev; void __iomem *base; struct clk *clk; }; static bool wait_for_reg(void __iomem *reg, u32 mask, unsigned long timeout) { timeout += jiffies; while (time_is_after_eq_jiffies(timeout)) { if ((readl(reg) & mask) == mask) return true; msleep(1); } return false; } static int mv_hsic_phy_init(struct phy *phy) { struct mv_hsic_phy *mv_phy = phy_get_drvdata(phy); struct platform_device *pdev = mv_phy->pdev; void __iomem *base = mv_phy->base; clk_prepare_enable(mv_phy->clk); /* Set reference clock */ writel(0x1 << PHY_28NM_HSIC_PLL_SELLPFR_SHIFT | 0xf0 << PHY_28NM_HSIC_PLL_FBDIV_SHIFT | 0xd << PHY_28NM_HSIC_PLL_REFDIV_SHIFT, base + PHY_28NM_HSIC_PLL_CTRL01); /* Turn on PLL */ writel(readl(base + PHY_28NM_HSIC_PLL_CTRL2) | PHY_28NM_HSIC_S2H_PU_PLL, base + PHY_28NM_HSIC_PLL_CTRL2); /* Make sure PHY PLL is locked */ if (!wait_for_reg(base + PHY_28NM_HSIC_PLL_CTRL2, PHY_28NM_HSIC_H2S_PLL_LOCK, HZ / 10)) { dev_err(&pdev->dev, "HSIC PHY PLL not locked after 100mS."); clk_disable_unprepare(mv_phy->clk); return -ETIMEDOUT; } return 0; } static int mv_hsic_phy_power_on(struct phy *phy) { struct mv_hsic_phy *mv_phy = phy_get_drvdata(phy); struct platform_device *pdev = mv_phy->pdev; void __iomem *base = mv_phy->base; u32 reg; reg = readl(base + PHY_28NM_HSIC_CTRL); /* Avoid SE0 state when resume for some device will take it as reset */ reg &= ~S2H_DRV_SE0_4RESUME; reg |= PHY_28NM_HSIC_S2H_HSIC_EN; /* Enable HSIC PHY */ writel(reg, base + PHY_28NM_HSIC_CTRL); /* * Calibration Timing * ____________________________ * CAL START ___| * ____________________ * CAL_DONE ___________| * | 400us | */ /* Make sure PHY Calibration is ready */ if (!wait_for_reg(base + PHY_28NM_HSIC_IMPCAL_CAL, PHY_28NM_HSIC_H2S_IMPCAL_DONE, HZ / 10)) { dev_warn(&pdev->dev, "HSIC PHY READY not set after 100mS."); return -ETIMEDOUT; } /* Waiting for HSIC connect int*/ if (!wait_for_reg(base + PHY_28NM_HSIC_INT, PHY_28NM_HSIC_CONNECT_INT, HZ / 5)) { dev_warn(&pdev->dev, "HSIC wait for connect interrupt timeout."); return -ETIMEDOUT; } return 0; } static int mv_hsic_phy_power_off(struct phy *phy) { struct mv_hsic_phy *mv_phy = phy_get_drvdata(phy); void __iomem *base = mv_phy->base; writel(readl(base + PHY_28NM_HSIC_CTRL) & ~PHY_28NM_HSIC_S2H_HSIC_EN, base + PHY_28NM_HSIC_CTRL); return 0; } static int mv_hsic_phy_exit(struct phy *phy) { struct mv_hsic_phy *mv_phy = phy_get_drvdata(phy); void __iomem *base = mv_phy->base; /* Turn off PLL */ writel(readl(base + PHY_28NM_HSIC_PLL_CTRL2) & ~PHY_28NM_HSIC_S2H_PU_PLL, base + PHY_28NM_HSIC_PLL_CTRL2); clk_disable_unprepare(mv_phy->clk); return 0; } static const struct phy_ops hsic_ops = { .init = mv_hsic_phy_init, .power_on = mv_hsic_phy_power_on, .power_off = mv_hsic_phy_power_off, .exit = mv_hsic_phy_exit, .owner = THIS_MODULE, }; static int mv_hsic_phy_probe(struct platform_device *pdev) { struct phy_provider *phy_provider; struct mv_hsic_phy *mv_phy; struct resource *r; mv_phy = devm_kzalloc(&pdev->dev, sizeof(*mv_phy), GFP_KERNEL); if (!mv_phy) return -ENOMEM; mv_phy->pdev = pdev; mv_phy->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(mv_phy->clk)) { dev_err(&pdev->dev, "failed to get clock.\n"); return PTR_ERR(mv_phy->clk); } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); mv_phy->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(mv_phy->base)) return PTR_ERR(mv_phy->base); mv_phy->phy = devm_phy_create(&pdev->dev, pdev->dev.of_node, &hsic_ops); if (IS_ERR(mv_phy->phy)) return PTR_ERR(mv_phy->phy); phy_set_drvdata(mv_phy->phy, mv_phy); phy_provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate); return PTR_ERR_OR_ZERO(phy_provider); } static const struct of_device_id mv_hsic_phy_dt_match[] = { { .compatible = "marvell,pxa1928-hsic-phy", }, {}, }; MODULE_DEVICE_TABLE(of, mv_hsic_phy_dt_match); static struct platform_driver mv_hsic_phy_driver = { .probe = mv_hsic_phy_probe, .driver = { .name = "mv-hsic-phy", .of_match_table = of_match_ptr(mv_hsic_phy_dt_match), }, }; module_platform_driver(mv_hsic_phy_driver); MODULE_AUTHOR("Rob Herring <robh@kernel.org>"); MODULE_DESCRIPTION("Marvell HSIC phy driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
lamassu/kernel
arch/arm/plat-s3c24xx/dma.c
1115
33271
/* linux/arch/arm/plat-s3c24xx/dma.c * * Copyright 2003-2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C2410 DMA core * * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifdef CONFIG_S3C2410_DMA_DEBUG #define DEBUG #endif #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/syscore_ops.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/io.h> #include <asm/system.h> #include <asm/irq.h> #include <mach/hardware.h> #include <mach/dma.h> #include <mach/map.h> #include <plat/dma-s3c24xx.h> #include <plat/regs-dma.h> /* io map for dma */ static void __iomem *dma_base; static struct kmem_cache *dma_kmem; static int dma_channels; static struct s3c24xx_dma_selection dma_sel; /* debugging functions */ #define BUF_MAGIC (0xcafebabe) #define dmawarn(fmt...) printk(KERN_DEBUG fmt) #define dma_regaddr(chan, reg) ((chan)->regs + (reg)) #if 1 #define dma_wrreg(chan, reg, val) writel((val), (chan)->regs + (reg)) #else static inline void dma_wrreg(struct s3c2410_dma_chan *chan, int reg, unsigned long val) { pr_debug("writing %08x to register %08x\n",(unsigned int)val,reg); writel(val, dma_regaddr(chan, reg)); } #endif #define dma_rdreg(chan, reg) readl((chan)->regs + (reg)) /* captured register state for debug */ struct s3c2410_dma_regstate { unsigned long dcsrc; unsigned long disrc; unsigned long dstat; unsigned long dcon; unsigned long dmsktrig; }; #ifdef CONFIG_S3C2410_DMA_DEBUG /* dmadbg_showregs * * simple debug routine to print the current state of the dma registers */ static void dmadbg_capture(struct s3c2410_dma_chan *chan, struct s3c2410_dma_regstate *regs) { regs->dcsrc = dma_rdreg(chan, S3C2410_DMA_DCSRC); regs->disrc = dma_rdreg(chan, S3C2410_DMA_DISRC); regs->dstat = dma_rdreg(chan, S3C2410_DMA_DSTAT); regs->dcon = dma_rdreg(chan, S3C2410_DMA_DCON); regs->dmsktrig = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG); } static void dmadbg_dumpregs(const char *fname, int line, struct s3c2410_dma_chan *chan, struct s3c2410_dma_regstate *regs) { printk(KERN_DEBUG "dma%d: %s:%d: DCSRC=%08lx, DISRC=%08lx, DSTAT=%08lx DMT=%02lx, DCON=%08lx\n", chan->number, fname, line, regs->dcsrc, regs->disrc, regs->dstat, regs->dmsktrig, regs->dcon); } static void dmadbg_showchan(const char *fname, int line, struct s3c2410_dma_chan *chan) { struct s3c2410_dma_regstate state; dmadbg_capture(chan, &state); printk(KERN_DEBUG "dma%d: %s:%d: ls=%d, cur=%p, %p %p\n", chan->number, fname, line, chan->load_state, chan->curr, chan->next, chan->end); dmadbg_dumpregs(fname, line, chan, &state); } static void dmadbg_showregs(const char *fname, int line, struct s3c2410_dma_chan *chan) { struct s3c2410_dma_regstate state; dmadbg_capture(chan, &state); dmadbg_dumpregs(fname, line, chan, &state); } #define dbg_showregs(chan) dmadbg_showregs(__func__, __LINE__, (chan)) #define dbg_showchan(chan) dmadbg_showchan(__func__, __LINE__, (chan)) #else #define dbg_showregs(chan) do { } while(0) #define dbg_showchan(chan) do { } while(0) #endif /* CONFIG_S3C2410_DMA_DEBUG */ /* s3c2410_dma_stats_timeout * * Update DMA stats from timeout info */ static void s3c2410_dma_stats_timeout(struct s3c2410_dma_stats *stats, int val) { if (stats == NULL) return; if (val > stats->timeout_longest) stats->timeout_longest = val; if (val < stats->timeout_shortest) stats->timeout_shortest = val; stats->timeout_avg += val; } /* s3c2410_dma_waitforload * * wait for the DMA engine to load a buffer, and update the state accordingly */ static int s3c2410_dma_waitforload(struct s3c2410_dma_chan *chan, int line) { int timeout = chan->load_timeout; int took; if (chan->load_state != S3C2410_DMALOAD_1LOADED) { printk(KERN_ERR "dma%d: s3c2410_dma_waitforload() called in loadstate %d from line %d\n", chan->number, chan->load_state, line); return 0; } if (chan->stats != NULL) chan->stats->loads++; while (--timeout > 0) { if ((dma_rdreg(chan, S3C2410_DMA_DSTAT) << (32-20)) != 0) { took = chan->load_timeout - timeout; s3c2410_dma_stats_timeout(chan->stats, took); switch (chan->load_state) { case S3C2410_DMALOAD_1LOADED: chan->load_state = S3C2410_DMALOAD_1RUNNING; break; default: printk(KERN_ERR "dma%d: unknown load_state in s3c2410_dma_waitforload() %d\n", chan->number, chan->load_state); } return 1; } } if (chan->stats != NULL) { chan->stats->timeout_failed++; } return 0; } /* s3c2410_dma_loadbuffer * * load a buffer, and update the channel state */ static inline int s3c2410_dma_loadbuffer(struct s3c2410_dma_chan *chan, struct s3c2410_dma_buf *buf) { unsigned long reload; if (buf == NULL) { dmawarn("buffer is NULL\n"); return -EINVAL; } pr_debug("s3c2410_chan_loadbuffer: loading buff %p (0x%08lx,0x%06x)\n", buf, (unsigned long)buf->data, buf->size); /* check the state of the channel before we do anything */ if (chan->load_state == S3C2410_DMALOAD_1LOADED) { dmawarn("load_state is S3C2410_DMALOAD_1LOADED\n"); } if (chan->load_state == S3C2410_DMALOAD_1LOADED_1RUNNING) { dmawarn("state is S3C2410_DMALOAD_1LOADED_1RUNNING\n"); } /* it would seem sensible if we are the last buffer to not bother * with the auto-reload bit, so that the DMA engine will not try * and load another transfer after this one has finished... */ if (chan->load_state == S3C2410_DMALOAD_NONE) { pr_debug("load_state is none, checking for noreload (next=%p)\n", buf->next); reload = (buf->next == NULL) ? S3C2410_DCON_NORELOAD : 0; } else { //pr_debug("load_state is %d => autoreload\n", chan->load_state); reload = S3C2410_DCON_AUTORELOAD; } if ((buf->data & 0xf0000000) != 0x30000000) { dmawarn("dmaload: buffer is %p\n", (void *)buf->data); } writel(buf->data, chan->addr_reg); dma_wrreg(chan, S3C2410_DMA_DCON, chan->dcon | reload | (buf->size/chan->xfer_unit)); chan->next = buf->next; /* update the state of the channel */ switch (chan->load_state) { case S3C2410_DMALOAD_NONE: chan->load_state = S3C2410_DMALOAD_1LOADED; break; case S3C2410_DMALOAD_1RUNNING: chan->load_state = S3C2410_DMALOAD_1LOADED_1RUNNING; break; default: dmawarn("dmaload: unknown state %d in loadbuffer\n", chan->load_state); break; } return 0; } /* s3c2410_dma_call_op * * small routine to call the op routine with the given op if it has been * registered */ static void s3c2410_dma_call_op(struct s3c2410_dma_chan *chan, enum s3c2410_chan_op op) { if (chan->op_fn != NULL) { (chan->op_fn)(chan, op); } } /* s3c2410_dma_buffdone * * small wrapper to check if callback routine needs to be called, and * if so, call it */ static inline void s3c2410_dma_buffdone(struct s3c2410_dma_chan *chan, struct s3c2410_dma_buf *buf, enum s3c2410_dma_buffresult result) { #if 0 pr_debug("callback_fn=%p, buf=%p, id=%p, size=%d, result=%d\n", chan->callback_fn, buf, buf->id, buf->size, result); #endif if (chan->callback_fn != NULL) { (chan->callback_fn)(chan, buf->id, buf->size, result); } } /* s3c2410_dma_start * * start a dma channel going */ static int s3c2410_dma_start(struct s3c2410_dma_chan *chan) { unsigned long tmp; unsigned long flags; pr_debug("s3c2410_start_dma: channel=%d\n", chan->number); local_irq_save(flags); if (chan->state == S3C2410_DMA_RUNNING) { pr_debug("s3c2410_start_dma: already running (%d)\n", chan->state); local_irq_restore(flags); return 0; } chan->state = S3C2410_DMA_RUNNING; /* check wether there is anything to load, and if not, see * if we can find anything to load */ if (chan->load_state == S3C2410_DMALOAD_NONE) { if (chan->next == NULL) { printk(KERN_ERR "dma%d: channel has nothing loaded\n", chan->number); chan->state = S3C2410_DMA_IDLE; local_irq_restore(flags); return -EINVAL; } s3c2410_dma_loadbuffer(chan, chan->next); } dbg_showchan(chan); /* enable the channel */ if (!chan->irq_enabled) { enable_irq(chan->irq); chan->irq_enabled = 1; } /* start the channel going */ tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG); tmp &= ~S3C2410_DMASKTRIG_STOP; tmp |= S3C2410_DMASKTRIG_ON; dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp); pr_debug("dma%d: %08lx to DMASKTRIG\n", chan->number, tmp); #if 0 /* the dma buffer loads should take care of clearing the AUTO * reloading feature */ tmp = dma_rdreg(chan, S3C2410_DMA_DCON); tmp &= ~S3C2410_DCON_NORELOAD; dma_wrreg(chan, S3C2410_DMA_DCON, tmp); #endif s3c2410_dma_call_op(chan, S3C2410_DMAOP_START); dbg_showchan(chan); /* if we've only loaded one buffer onto the channel, then chec * to see if we have another, and if so, try and load it so when * the first buffer is finished, the new one will be loaded onto * the channel */ if (chan->next != NULL) { if (chan->load_state == S3C2410_DMALOAD_1LOADED) { if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { pr_debug("%s: buff not yet loaded, no more todo\n", __func__); } else { chan->load_state = S3C2410_DMALOAD_1RUNNING; s3c2410_dma_loadbuffer(chan, chan->next); } } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) { s3c2410_dma_loadbuffer(chan, chan->next); } } local_irq_restore(flags); return 0; } /* s3c2410_dma_canload * * work out if we can queue another buffer into the DMA engine */ static int s3c2410_dma_canload(struct s3c2410_dma_chan *chan) { if (chan->load_state == S3C2410_DMALOAD_NONE || chan->load_state == S3C2410_DMALOAD_1RUNNING) return 1; return 0; } /* s3c2410_dma_enqueue * * queue an given buffer for dma transfer. * * id the device driver's id information for this buffer * data the physical address of the buffer data * size the size of the buffer in bytes * * If the channel is not running, then the flag S3C2410_DMAF_AUTOSTART * is checked, and if set, the channel is started. If this flag isn't set, * then an error will be returned. * * It is possible to queue more than one DMA buffer onto a channel at * once, and the code will deal with the re-loading of the next buffer * when necessary. */ int s3c2410_dma_enqueue(unsigned int channel, void *id, dma_addr_t data, int size) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); struct s3c2410_dma_buf *buf; unsigned long flags; if (chan == NULL) return -EINVAL; pr_debug("%s: id=%p, data=%08x, size=%d\n", __func__, id, (unsigned int)data, size); buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC); if (buf == NULL) { pr_debug("%s: out of memory (%ld alloc)\n", __func__, (long)sizeof(*buf)); return -ENOMEM; } //pr_debug("%s: new buffer %p\n", __func__, buf); //dbg_showchan(chan); buf->next = NULL; buf->data = buf->ptr = data; buf->size = size; buf->id = id; buf->magic = BUF_MAGIC; local_irq_save(flags); if (chan->curr == NULL) { /* we've got nothing loaded... */ pr_debug("%s: buffer %p queued onto empty channel\n", __func__, buf); chan->curr = buf; chan->end = buf; chan->next = NULL; } else { pr_debug("dma%d: %s: buffer %p queued onto non-empty channel\n", chan->number, __func__, buf); if (chan->end == NULL) pr_debug("dma%d: %s: %p not empty, and chan->end==NULL?\n", chan->number, __func__, chan); chan->end->next = buf; chan->end = buf; } /* if necessary, update the next buffer field */ if (chan->next == NULL) chan->next = buf; /* check to see if we can load a buffer */ if (chan->state == S3C2410_DMA_RUNNING) { if (chan->load_state == S3C2410_DMALOAD_1LOADED && 1) { if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { printk(KERN_ERR "dma%d: loadbuffer:" "timeout loading buffer\n", chan->number); dbg_showchan(chan); local_irq_restore(flags); return -EINVAL; } } while (s3c2410_dma_canload(chan) && chan->next != NULL) { s3c2410_dma_loadbuffer(chan, chan->next); } } else if (chan->state == S3C2410_DMA_IDLE) { if (chan->flags & S3C2410_DMAF_AUTOSTART) { s3c2410_dma_ctrl(chan->number | DMACH_LOW_LEVEL, S3C2410_DMAOP_START); } } local_irq_restore(flags); return 0; } EXPORT_SYMBOL(s3c2410_dma_enqueue); static inline void s3c2410_dma_freebuf(struct s3c2410_dma_buf *buf) { int magicok = (buf->magic == BUF_MAGIC); buf->magic = -1; if (magicok) { kmem_cache_free(dma_kmem, buf); } else { printk("s3c2410_dma_freebuf: buff %p with bad magic\n", buf); } } /* s3c2410_dma_lastxfer * * called when the system is out of buffers, to ensure that the channel * is prepared for shutdown. */ static inline void s3c2410_dma_lastxfer(struct s3c2410_dma_chan *chan) { #if 0 pr_debug("dma%d: s3c2410_dma_lastxfer: load_state %d\n", chan->number, chan->load_state); #endif switch (chan->load_state) { case S3C2410_DMALOAD_NONE: break; case S3C2410_DMALOAD_1LOADED: if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { /* flag error? */ printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n", chan->number, __func__); return; } break; case S3C2410_DMALOAD_1LOADED_1RUNNING: /* I believe in this case we do not have anything to do * until the next buffer comes along, and we turn off the * reload */ return; default: pr_debug("dma%d: lastxfer: unhandled load_state %d with no next\n", chan->number, chan->load_state); return; } /* hopefully this'll shut the damned thing up after the transfer... */ dma_wrreg(chan, S3C2410_DMA_DCON, chan->dcon | S3C2410_DCON_NORELOAD); } #define dmadbg2(x...) static irqreturn_t s3c2410_dma_irq(int irq, void *devpw) { struct s3c2410_dma_chan *chan = (struct s3c2410_dma_chan *)devpw; struct s3c2410_dma_buf *buf; buf = chan->curr; dbg_showchan(chan); /* modify the channel state */ switch (chan->load_state) { case S3C2410_DMALOAD_1RUNNING: /* TODO - if we are running only one buffer, we probably * want to reload here, and then worry about the buffer * callback */ chan->load_state = S3C2410_DMALOAD_NONE; break; case S3C2410_DMALOAD_1LOADED: /* iirc, we should go back to NONE loaded here, we * had a buffer, and it was never verified as being * loaded. */ chan->load_state = S3C2410_DMALOAD_NONE; break; case S3C2410_DMALOAD_1LOADED_1RUNNING: /* we'll worry about checking to see if another buffer is * ready after we've called back the owner. This should * ensure we do not wait around too long for the DMA * engine to start the next transfer */ chan->load_state = S3C2410_DMALOAD_1LOADED; break; case S3C2410_DMALOAD_NONE: printk(KERN_ERR "dma%d: IRQ with no loaded buffer?\n", chan->number); break; default: printk(KERN_ERR "dma%d: IRQ in invalid load_state %d\n", chan->number, chan->load_state); break; } if (buf != NULL) { /* update the chain to make sure that if we load any more * buffers when we call the callback function, things should * work properly */ chan->curr = buf->next; buf->next = NULL; if (buf->magic != BUF_MAGIC) { printk(KERN_ERR "dma%d: %s: buf %p incorrect magic\n", chan->number, __func__, buf); return IRQ_HANDLED; } s3c2410_dma_buffdone(chan, buf, S3C2410_RES_OK); /* free resouces */ s3c2410_dma_freebuf(buf); } else { } /* only reload if the channel is still running... our buffer done * routine may have altered the state by requesting the dma channel * to stop or shutdown... */ /* todo: check that when the channel is shut-down from inside this * function, we cope with unsetting reload, etc */ if (chan->next != NULL && chan->state != S3C2410_DMA_IDLE) { unsigned long flags; switch (chan->load_state) { case S3C2410_DMALOAD_1RUNNING: /* don't need to do anything for this state */ break; case S3C2410_DMALOAD_NONE: /* can load buffer immediately */ break; case S3C2410_DMALOAD_1LOADED: if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { /* flag error? */ printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n", chan->number, __func__); return IRQ_HANDLED; } break; case S3C2410_DMALOAD_1LOADED_1RUNNING: goto no_load; default: printk(KERN_ERR "dma%d: unknown load_state in irq, %d\n", chan->number, chan->load_state); return IRQ_HANDLED; } local_irq_save(flags); s3c2410_dma_loadbuffer(chan, chan->next); local_irq_restore(flags); } else { s3c2410_dma_lastxfer(chan); /* see if we can stop this channel.. */ if (chan->load_state == S3C2410_DMALOAD_NONE) { pr_debug("dma%d: end of transfer, stopping channel (%ld)\n", chan->number, jiffies); s3c2410_dma_ctrl(chan->number | DMACH_LOW_LEVEL, S3C2410_DMAOP_STOP); } } no_load: return IRQ_HANDLED; } static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel); /* s3c2410_request_dma * * get control of an dma channel */ int s3c2410_dma_request(enum dma_ch channel, struct s3c2410_dma_client *client, void *dev) { struct s3c2410_dma_chan *chan; unsigned long flags; int err; pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n", channel, client->name, dev); local_irq_save(flags); chan = s3c2410_dma_map_channel(channel); if (chan == NULL) { local_irq_restore(flags); return -EBUSY; } dbg_showchan(chan); chan->client = client; chan->in_use = 1; if (!chan->irq_claimed) { pr_debug("dma%d: %s : requesting irq %d\n", channel, __func__, chan->irq); chan->irq_claimed = 1; local_irq_restore(flags); err = request_irq(chan->irq, s3c2410_dma_irq, IRQF_DISABLED, client->name, (void *)chan); local_irq_save(flags); if (err) { chan->in_use = 0; chan->irq_claimed = 0; local_irq_restore(flags); printk(KERN_ERR "%s: cannot get IRQ %d for DMA %d\n", client->name, chan->irq, chan->number); return err; } chan->irq_enabled = 1; } local_irq_restore(flags); /* need to setup */ pr_debug("%s: channel initialised, %p\n", __func__, chan); return chan->number | DMACH_LOW_LEVEL; } EXPORT_SYMBOL(s3c2410_dma_request); /* s3c2410_dma_free * * release the given channel back to the system, will stop and flush * any outstanding transfers, and ensure the channel is ready for the * next claimant. * * Note, although a warning is currently printed if the freeing client * info is not the same as the registrant's client info, the free is still * allowed to go through. */ int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); unsigned long flags; if (chan == NULL) return -EINVAL; local_irq_save(flags); if (chan->client != client) { printk(KERN_WARNING "dma%d: possible free from different client (channel %p, passed %p)\n", channel, chan->client, client); } /* sort out stopping and freeing the channel */ if (chan->state != S3C2410_DMA_IDLE) { pr_debug("%s: need to stop dma channel %p\n", __func__, chan); /* possibly flush the channel */ s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STOP); } chan->client = NULL; chan->in_use = 0; if (chan->irq_claimed) free_irq(chan->irq, (void *)chan); chan->irq_claimed = 0; if (!(channel & DMACH_LOW_LEVEL)) s3c_dma_chan_map[channel] = NULL; local_irq_restore(flags); return 0; } EXPORT_SYMBOL(s3c2410_dma_free); static int s3c2410_dma_dostop(struct s3c2410_dma_chan *chan) { unsigned long flags; unsigned long tmp; pr_debug("%s:\n", __func__); dbg_showchan(chan); local_irq_save(flags); s3c2410_dma_call_op(chan, S3C2410_DMAOP_STOP); tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG); tmp |= S3C2410_DMASKTRIG_STOP; //tmp &= ~S3C2410_DMASKTRIG_ON; dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp); #if 0 /* should also clear interrupts, according to WinCE BSP */ tmp = dma_rdreg(chan, S3C2410_DMA_DCON); tmp |= S3C2410_DCON_NORELOAD; dma_wrreg(chan, S3C2410_DMA_DCON, tmp); #endif /* should stop do this, or should we wait for flush? */ chan->state = S3C2410_DMA_IDLE; chan->load_state = S3C2410_DMALOAD_NONE; local_irq_restore(flags); return 0; } static void s3c2410_dma_waitforstop(struct s3c2410_dma_chan *chan) { unsigned long tmp; unsigned int timeout = 0x10000; while (timeout-- > 0) { tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG); if (!(tmp & S3C2410_DMASKTRIG_ON)) return; } pr_debug("dma%d: failed to stop?\n", chan->number); } /* s3c2410_dma_flush * * stop the channel, and remove all current and pending transfers */ static int s3c2410_dma_flush(struct s3c2410_dma_chan *chan) { struct s3c2410_dma_buf *buf, *next; unsigned long flags; pr_debug("%s: chan %p (%d)\n", __func__, chan, chan->number); dbg_showchan(chan); local_irq_save(flags); if (chan->state != S3C2410_DMA_IDLE) { pr_debug("%s: stopping channel...\n", __func__ ); s3c2410_dma_ctrl(chan->number, S3C2410_DMAOP_STOP); } buf = chan->curr; if (buf == NULL) buf = chan->next; chan->curr = chan->next = chan->end = NULL; if (buf != NULL) { for ( ; buf != NULL; buf = next) { next = buf->next; pr_debug("%s: free buffer %p, next %p\n", __func__, buf, buf->next); s3c2410_dma_buffdone(chan, buf, S3C2410_RES_ABORT); s3c2410_dma_freebuf(buf); } } dbg_showregs(chan); s3c2410_dma_waitforstop(chan); #if 0 /* should also clear interrupts, according to WinCE BSP */ { unsigned long tmp; tmp = dma_rdreg(chan, S3C2410_DMA_DCON); tmp |= S3C2410_DCON_NORELOAD; dma_wrreg(chan, S3C2410_DMA_DCON, tmp); } #endif dbg_showregs(chan); local_irq_restore(flags); return 0; } static int s3c2410_dma_started(struct s3c2410_dma_chan *chan) { unsigned long flags; local_irq_save(flags); dbg_showchan(chan); /* if we've only loaded one buffer onto the channel, then chec * to see if we have another, and if so, try and load it so when * the first buffer is finished, the new one will be loaded onto * the channel */ if (chan->next != NULL) { if (chan->load_state == S3C2410_DMALOAD_1LOADED) { if (s3c2410_dma_waitforload(chan, __LINE__) == 0) { pr_debug("%s: buff not yet loaded, no more todo\n", __func__); } else { chan->load_state = S3C2410_DMALOAD_1RUNNING; s3c2410_dma_loadbuffer(chan, chan->next); } } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) { s3c2410_dma_loadbuffer(chan, chan->next); } } local_irq_restore(flags); return 0; } int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); if (chan == NULL) return -EINVAL; switch (op) { case S3C2410_DMAOP_START: return s3c2410_dma_start(chan); case S3C2410_DMAOP_STOP: return s3c2410_dma_dostop(chan); case S3C2410_DMAOP_PAUSE: case S3C2410_DMAOP_RESUME: return -ENOENT; case S3C2410_DMAOP_FLUSH: return s3c2410_dma_flush(chan); case S3C2410_DMAOP_STARTED: return s3c2410_dma_started(chan); case S3C2410_DMAOP_TIMEOUT: return 0; } return -ENOENT; /* unknown, don't bother */ } EXPORT_SYMBOL(s3c2410_dma_ctrl); /* DMA configuration for each channel * * DISRCC -> source of the DMA (AHB,APB) * DISRC -> source address of the DMA * DIDSTC -> destination of the DMA (AHB,APD) * DIDST -> destination address of the DMA */ /* s3c2410_dma_config * * xfersize: size of unit in bytes (1,2,4) */ int s3c2410_dma_config(enum dma_ch channel, int xferunit) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); unsigned int dcon; pr_debug("%s: chan=%d, xfer_unit=%d\n", __func__, channel, xferunit); if (chan == NULL) return -EINVAL; dcon = chan->dcon & dma_sel.dcon_mask; pr_debug("%s: dcon is %08x\n", __func__, dcon); switch (chan->req_ch) { case DMACH_I2S_IN: case DMACH_I2S_OUT: case DMACH_PCM_IN: case DMACH_PCM_OUT: case DMACH_MIC_IN: default: dcon |= S3C2410_DCON_HANDSHAKE; dcon |= S3C2410_DCON_SYNC_PCLK; break; case DMACH_SDI: /* note, ensure if need HANDSHAKE or not */ dcon |= S3C2410_DCON_SYNC_PCLK; break; case DMACH_XD0: case DMACH_XD1: dcon |= S3C2410_DCON_HANDSHAKE; dcon |= S3C2410_DCON_SYNC_HCLK; break; } switch (xferunit) { case 1: dcon |= S3C2410_DCON_BYTE; break; case 2: dcon |= S3C2410_DCON_HALFWORD; break; case 4: dcon |= S3C2410_DCON_WORD; break; default: pr_debug("%s: bad transfer size %d\n", __func__, xferunit); return -EINVAL; } dcon |= S3C2410_DCON_HWTRIG; dcon |= S3C2410_DCON_INTREQ; pr_debug("%s: dcon now %08x\n", __func__, dcon); chan->dcon = dcon; chan->xfer_unit = xferunit; return 0; } EXPORT_SYMBOL(s3c2410_dma_config); /* s3c2410_dma_devconfig * * configure the dma source/destination hardware type and address * * source: S3C2410_DMASRC_HW: source is hardware * S3C2410_DMASRC_MEM: source is memory * * devaddr: physical address of the source */ int s3c2410_dma_devconfig(enum dma_ch channel, enum s3c2410_dmasrc source, unsigned long devaddr) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); unsigned int hwcfg; if (chan == NULL) return -EINVAL; pr_debug("%s: source=%d, devaddr=%08lx\n", __func__, (int)source, devaddr); chan->source = source; chan->dev_addr = devaddr; switch (chan->req_ch) { case DMACH_XD0: case DMACH_XD1: hwcfg = 0; /* AHB */ break; default: hwcfg = S3C2410_DISRCC_APB; } /* always assume our peripheral desintation is a fixed * address in memory. */ hwcfg |= S3C2410_DISRCC_INC; switch (source) { case S3C2410_DMASRC_HW: /* source is hardware */ pr_debug("%s: hw source, devaddr=%08lx, hwcfg=%d\n", __func__, devaddr, hwcfg); dma_wrreg(chan, S3C2410_DMA_DISRCC, hwcfg & 3); dma_wrreg(chan, S3C2410_DMA_DISRC, devaddr); dma_wrreg(chan, S3C2410_DMA_DIDSTC, (0<<1) | (0<<0)); chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DIDST); break; case S3C2410_DMASRC_MEM: /* source is memory */ pr_debug("%s: mem source, devaddr=%08lx, hwcfg=%d\n", __func__, devaddr, hwcfg); dma_wrreg(chan, S3C2410_DMA_DISRCC, (0<<1) | (0<<0)); dma_wrreg(chan, S3C2410_DMA_DIDST, devaddr); dma_wrreg(chan, S3C2410_DMA_DIDSTC, hwcfg & 3); chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DISRC); break; default: printk(KERN_ERR "dma%d: invalid source type (%d)\n", channel, source); return -EINVAL; } if (dma_sel.direction != NULL) (dma_sel.direction)(chan, chan->map, source); return 0; } EXPORT_SYMBOL(s3c2410_dma_devconfig); /* s3c2410_dma_getposition * * returns the current transfer points for the dma source and destination */ int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dst) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); if (chan == NULL) return -EINVAL; if (src != NULL) *src = dma_rdreg(chan, S3C2410_DMA_DCSRC); if (dst != NULL) *dst = dma_rdreg(chan, S3C2410_DMA_DCDST); return 0; } EXPORT_SYMBOL(s3c2410_dma_getposition); /* system core operations */ #ifdef CONFIG_PM static void s3c2410_dma_suspend_chan(struct s3c2410_dma_chan *cp) { printk(KERN_DEBUG "suspending dma channel %d\n", cp->number); if (dma_rdreg(cp, S3C2410_DMA_DMASKTRIG) & S3C2410_DMASKTRIG_ON) { /* the dma channel is still working, which is probably * a bad thing to do over suspend/resume. We stop the * channel and assume that the client is either going to * retry after resume, or that it is broken. */ printk(KERN_INFO "dma: stopping channel %d due to suspend\n", cp->number); s3c2410_dma_dostop(cp); } } static int s3c2410_dma_suspend(void) { struct s3c2410_dma_chan *cp = s3c2410_chans; int channel; for (channel = 0; channel < dma_channels; cp++, channel++) s3c2410_dma_suspend_chan(cp); return 0; } static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp) { unsigned int no = cp->number | DMACH_LOW_LEVEL; /* restore channel's hardware configuration */ if (!cp->in_use) return; printk(KERN_INFO "dma%d: restoring configuration\n", cp->number); s3c2410_dma_config(no, cp->xfer_unit); s3c2410_dma_devconfig(no, cp->source, cp->dev_addr); /* re-select the dma source for this channel */ if (cp->map != NULL) dma_sel.select(cp, cp->map); } static void s3c2410_dma_resume(void) { struct s3c2410_dma_chan *cp = s3c2410_chans + dma_channels - 1; int channel; for (channel = dma_channels - 1; channel >= 0; cp++, channel--) s3c2410_dma_resume_chan(cp); } #else #define s3c2410_dma_suspend NULL #define s3c2410_dma_resume NULL #endif /* CONFIG_PM */ struct syscore_ops dma_syscore_ops = { .suspend = s3c2410_dma_suspend, .resume = s3c2410_dma_resume, }; /* kmem cache implementation */ static void s3c2410_dma_cache_ctor(void *p) { memset(p, 0, sizeof(struct s3c2410_dma_buf)); } /* initialisation code */ static int __init s3c24xx_dma_syscore_init(void) { register_syscore_ops(&dma_syscore_ops); return 0; } late_initcall(s3c24xx_dma_syscore_init); int __init s3c24xx_dma_init(unsigned int channels, unsigned int irq, unsigned int stride) { struct s3c2410_dma_chan *cp; int channel; int ret; printk("S3C24XX DMA Driver, Copyright 2003-2006 Simtec Electronics\n"); dma_channels = channels; dma_base = ioremap(S3C24XX_PA_DMA, stride * channels); if (dma_base == NULL) { printk(KERN_ERR "dma failed to remap register block\n"); return -ENOMEM; } dma_kmem = kmem_cache_create("dma_desc", sizeof(struct s3c2410_dma_buf), 0, SLAB_HWCACHE_ALIGN, s3c2410_dma_cache_ctor); if (dma_kmem == NULL) { printk(KERN_ERR "dma failed to make kmem cache\n"); ret = -ENOMEM; goto err; } for (channel = 0; channel < channels; channel++) { cp = &s3c2410_chans[channel]; memset(cp, 0, sizeof(struct s3c2410_dma_chan)); /* dma channel irqs are in order.. */ cp->number = channel; cp->irq = channel + irq; cp->regs = dma_base + (channel * stride); /* point current stats somewhere */ cp->stats = &cp->stats_store; cp->stats_store.timeout_shortest = LONG_MAX; /* basic channel configuration */ cp->load_timeout = 1<<18; printk("DMA channel %d at %p, irq %d\n", cp->number, cp->regs, cp->irq); } return 0; err: kmem_cache_destroy(dma_kmem); iounmap(dma_base); dma_base = NULL; return ret; } int __init s3c2410_dma_init(void) { return s3c24xx_dma_init(4, IRQ_DMA0, 0x40); } static inline int is_channel_valid(unsigned int channel) { return (channel & DMA_CH_VALID); } static struct s3c24xx_dma_order *dma_order; /* s3c2410_dma_map_channel() * * turn the virtual channel number into a real, and un-used hardware * channel. * * first, try the dma ordering given to us by either the relevant * dma code, or the board. Then just find the first usable free * channel */ static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel) { struct s3c24xx_dma_order_ch *ord = NULL; struct s3c24xx_dma_map *ch_map; struct s3c2410_dma_chan *dmach; int ch; if (dma_sel.map == NULL || channel > dma_sel.map_size) return NULL; ch_map = dma_sel.map + channel; /* first, try the board mapping */ if (dma_order) { ord = &dma_order->channels[channel]; for (ch = 0; ch < dma_channels; ch++) { int tmp; if (!is_channel_valid(ord->list[ch])) continue; tmp = ord->list[ch] & ~DMA_CH_VALID; if (s3c2410_chans[tmp].in_use == 0) { ch = tmp; goto found; } } if (ord->flags & DMA_CH_NEVER) return NULL; } /* second, search the channel map for first free */ for (ch = 0; ch < dma_channels; ch++) { if (!is_channel_valid(ch_map->channels[ch])) continue; if (s3c2410_chans[ch].in_use == 0) { printk("mapped channel %d to %d\n", channel, ch); break; } } if (ch >= dma_channels) return NULL; /* update our channel mapping */ found: dmach = &s3c2410_chans[ch]; dmach->map = ch_map; dmach->req_ch = channel; s3c_dma_chan_map[channel] = dmach; /* select the channel */ (dma_sel.select)(dmach, ch_map); return dmach; } static int s3c24xx_dma_check_entry(struct s3c24xx_dma_map *map, int ch) { return 0; } int __init s3c24xx_dma_init_map(struct s3c24xx_dma_selection *sel) { struct s3c24xx_dma_map *nmap; size_t map_sz = sizeof(*nmap) * sel->map_size; int ptr; nmap = kmalloc(map_sz, GFP_KERNEL); if (nmap == NULL) return -ENOMEM; memcpy(nmap, sel->map, map_sz); memcpy(&dma_sel, sel, sizeof(*sel)); dma_sel.map = nmap; for (ptr = 0; ptr < sel->map_size; ptr++) s3c24xx_dma_check_entry(nmap+ptr, ptr); return 0; } int __init s3c24xx_dma_order_set(struct s3c24xx_dma_order *ord) { struct s3c24xx_dma_order *nord = dma_order; if (nord == NULL) nord = kmalloc(sizeof(struct s3c24xx_dma_order), GFP_KERNEL); if (nord == NULL) { printk(KERN_ERR "no memory to store dma channel order\n"); return -ENOMEM; } dma_order = nord; memcpy(nord, ord, sizeof(struct s3c24xx_dma_order)); return 0; }
gpl-2.0
maniacx/android_kernel_htcleo-3.0_old
arch/arm/kernel/signal.c
1627
22227
/* * linux/arch/arm/kernel/signal.c * * Copyright (C) 1995-2009 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/errno.h> #include <linux/signal.h> #include <linux/personality.h> #include <linux/freezer.h> #include <linux/uaccess.h> #include <linux/tracehook.h> #include <asm/elf.h> #include <asm/cacheflush.h> #include <asm/ucontext.h> #include <asm/unistd.h> #include <asm/vfp.h> #include "signal.h" #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) /* * For ARM syscalls, we encode the syscall number into the instruction. */ #define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)) #define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)) #define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE) /* * With EABI, the syscall number has to be loaded into r7. */ #define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE)) #define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) /* * For Thumb syscalls, we pass the syscall number via r7. We therefore * need two 16-bit instructions. */ #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) const unsigned long sigreturn_codes[7] = { MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, }; /* * Either we support OABI only, or we have EABI with the OABI * compat layer enabled. In the later case we don't know if * user space is EABI or not, and if not we must not clobber r7. * Always using the OABI syscall solves that issue and works for * all those cases. */ const unsigned long syscall_restart_code[2] = { SWI_SYS_RESTART, /* swi __NR_restart_syscall */ 0xe49df004, /* ldr pc, [sp], #4 */ }; /* * atomically swap in the new signal mask, and wait for a signal. */ asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask) { mask &= _BLOCKABLE; spin_lock_irq(&current->sighand->siglock); current->saved_sigmask = current->blocked; siginitset(&current->blocked, mask); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); current->state = TASK_INTERRUPTIBLE; schedule(); set_restore_sigmask(); return -ERESTARTNOHAND; } asmlinkage int sys_sigaction(int sig, const struct old_sigaction __user *act, struct old_sigaction __user *oact) { struct k_sigaction new_ka, old_ka; int ret; if (act) { old_sigset_t mask; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) return -EFAULT; __get_user(new_ka.sa.sa_flags, &act->sa_flags); __get_user(mask, &act->sa_mask); siginitset(&new_ka.sa.sa_mask, mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) return -EFAULT; __put_user(old_ka.sa.sa_flags, &oact->sa_flags); __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); } return ret; } #ifdef CONFIG_CRUNCH static int preserve_crunch_context(struct crunch_sigframe __user *frame) { char kbuf[sizeof(*frame) + 8]; struct crunch_sigframe *kframe; /* the crunch context must be 64 bit aligned */ kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); kframe->magic = CRUNCH_MAGIC; kframe->size = CRUNCH_STORAGE_SIZE; crunch_task_copy(current_thread_info(), &kframe->storage); return __copy_to_user(frame, kframe, sizeof(*frame)); } static int restore_crunch_context(struct crunch_sigframe __user *frame) { char kbuf[sizeof(*frame) + 8]; struct crunch_sigframe *kframe; /* the crunch context must be 64 bit aligned */ kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); if (__copy_from_user(kframe, frame, sizeof(*frame))) return -1; if (kframe->magic != CRUNCH_MAGIC || kframe->size != CRUNCH_STORAGE_SIZE) return -1; crunch_task_restore(current_thread_info(), &kframe->storage); return 0; } #endif #ifdef CONFIG_IWMMXT static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame) { char kbuf[sizeof(*frame) + 8]; struct iwmmxt_sigframe *kframe; /* the iWMMXt context must be 64 bit aligned */ kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); kframe->magic = IWMMXT_MAGIC; kframe->size = IWMMXT_STORAGE_SIZE; iwmmxt_task_copy(current_thread_info(), &kframe->storage); return __copy_to_user(frame, kframe, sizeof(*frame)); } static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) { char kbuf[sizeof(*frame) + 8]; struct iwmmxt_sigframe *kframe; /* the iWMMXt context must be 64 bit aligned */ kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); if (__copy_from_user(kframe, frame, sizeof(*frame))) return -1; if (kframe->magic != IWMMXT_MAGIC || kframe->size != IWMMXT_STORAGE_SIZE) return -1; iwmmxt_task_restore(current_thread_info(), &kframe->storage); return 0; } #endif #ifdef CONFIG_VFP static int preserve_vfp_context(struct vfp_sigframe __user *frame) { struct thread_info *thread = current_thread_info(); struct vfp_hard_struct *h = &thread->vfpstate.hard; const unsigned long magic = VFP_MAGIC; const unsigned long size = VFP_STORAGE_SIZE; int err = 0; vfp_sync_hwstate(thread); __put_user_error(magic, &frame->magic, err); __put_user_error(size, &frame->size, err); /* * Copy the floating point registers. There can be unused * registers see asm/hwcap.h for details. */ err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs, sizeof(h->fpregs)); /* * Copy the status and control register. */ __put_user_error(h->fpscr, &frame->ufp.fpscr, err); /* * Copy the exception registers. */ __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err); __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); return err ? -EFAULT : 0; } static int restore_vfp_context(struct vfp_sigframe __user *frame) { struct thread_info *thread = current_thread_info(); struct vfp_hard_struct *h = &thread->vfpstate.hard; unsigned long magic; unsigned long size; unsigned long fpexc; int err = 0; __get_user_error(magic, &frame->magic, err); __get_user_error(size, &frame->size, err); if (err) return -EFAULT; if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) return -EINVAL; vfp_flush_hwstate(thread); /* * Copy the floating point registers. There can be unused * registers see asm/hwcap.h for details. */ err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, sizeof(h->fpregs)); /* * Copy the status and control register. */ __get_user_error(h->fpscr, &frame->ufp.fpscr, err); /* * Sanitise and restore the exception registers. */ __get_user_error(fpexc, &frame->ufp_exc.fpexc, err); /* Ensure the VFP is enabled. */ fpexc |= FPEXC_EN; /* Ensure FPINST2 is invalid and the exception flag is cleared. */ fpexc &= ~(FPEXC_EX | FPEXC_FP2V); h->fpexc = fpexc; __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); return err ? -EFAULT : 0; } #endif /* * Do a signal return; undo the signal stack. These are aligned to 64-bit. */ struct sigframe { struct ucontext uc; unsigned long retcode[2]; }; struct rt_sigframe { struct siginfo info; struct sigframe sig; }; static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) { struct aux_sigframe __user *aux; sigset_t set; int err; err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); if (err == 0) { sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); } __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); err |= !valid_user_regs(regs); aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; #ifdef CONFIG_CRUNCH if (err == 0) err |= restore_crunch_context(&aux->crunch); #endif #ifdef CONFIG_IWMMXT if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) err |= restore_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP if (err == 0) err |= restore_vfp_context(&aux->vfp); #endif return err; } asmlinkage int sys_sigreturn(struct pt_regs *regs) { struct sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; /* * Since we stacked the signal on a 64-bit boundary, * then 'sp' should be word aligned here. If it's * not, then the user is trying to mess with us. */ if (regs->ARM_sp & 7) goto badframe; frame = (struct sigframe __user *)regs->ARM_sp; if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) goto badframe; if (restore_sigframe(regs, frame)) goto badframe; return regs->ARM_r0; badframe: force_sig(SIGSEGV, current); return 0; } asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; /* * Since we stacked the signal on a 64-bit boundary, * then 'sp' should be word aligned here. If it's * not, then the user is trying to mess with us. */ if (regs->ARM_sp & 7) goto badframe; frame = (struct rt_sigframe __user *)regs->ARM_sp; if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) goto badframe; if (restore_sigframe(regs, &frame->sig)) goto badframe; if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT) goto badframe; return regs->ARM_r0; badframe: force_sig(SIGSEGV, current); return 0; } static int setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) { struct aux_sigframe __user *aux; int err = 0; __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err); __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err); __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err); __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; #ifdef CONFIG_CRUNCH if (err == 0) err |= preserve_crunch_context(&aux->crunch); #endif #ifdef CONFIG_IWMMXT if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) err |= preserve_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP if (err == 0) err |= preserve_vfp_context(&aux->vfp); #endif __put_user_error(0, &aux->end_magic, err); return err; } static inline void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, int framesize) { unsigned long sp = regs->ARM_sp; void __user *frame; /* * This is the X/Open sanctioned signal stack switching. */ if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) sp = current->sas_ss_sp + current->sas_ss_size; /* * ATPCS B01 mandates 8-byte alignment */ frame = (void __user *)((sp - framesize) & ~7); /* * Check that we can actually write to the signal frame. */ if (!access_ok(VERIFY_WRITE, frame, framesize)) frame = NULL; return frame; } static int setup_return(struct pt_regs *regs, struct k_sigaction *ka, unsigned long __user *rc, void __user *frame, int usig) { unsigned long handler = (unsigned long)ka->sa.sa_handler; unsigned long retcode; int thumb = 0; unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); cpsr |= PSR_ENDSTATE; /* * Maybe we need to deliver a 32-bit signal to a 26-bit task. */ if (ka->sa.sa_flags & SA_THIRTYTWO) cpsr = (cpsr & ~MODE_MASK) | USR_MODE; #ifdef CONFIG_ARM_THUMB if (elf_hwcap & HWCAP_THUMB) { /* * The LSB of the handler determines if we're going to * be using THUMB or ARM mode for this signal handler. */ thumb = handler & 1; if (thumb) { cpsr |= PSR_T_BIT; #if __LINUX_ARM_ARCH__ >= 7 /* clear the If-Then Thumb-2 execution state */ cpsr &= ~PSR_IT_MASK; #endif } else cpsr &= ~PSR_T_BIT; } #endif if (ka->sa.sa_flags & SA_RESTORER) { retcode = (unsigned long)ka->sa.sa_restorer; } else { unsigned int idx = thumb << 1; if (ka->sa.sa_flags & SA_SIGINFO) idx += 3; if (__put_user(sigreturn_codes[idx], rc) || __put_user(sigreturn_codes[idx+1], rc+1)) return 1; if (cpsr & MODE32_BIT) { /* * 32-bit code can use the new high-page * signal return code support. */ retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; } else { /* * Ensure that the instruction cache sees * the return code written onto the stack. */ flush_icache_range((unsigned long)rc, (unsigned long)(rc + 2)); retcode = ((unsigned long)rc) + thumb; } } regs->ARM_r0 = usig; regs->ARM_sp = (unsigned long)frame; regs->ARM_lr = retcode; regs->ARM_pc = handler; regs->ARM_cpsr = cpsr; return 0; } static int setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); int err = 0; if (!frame) return 1; /* * Set uc.uc_flags to a value which sc.trap_no would never have. */ __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); err |= setup_sigframe(frame, regs, set); if (err == 0) err = setup_return(regs, ka, frame->retcode, frame, usig); return err; } static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame)); stack_t stack; int err = 0; if (!frame) return 1; err |= copy_siginfo_to_user(&frame->info, info); __put_user_error(0, &frame->sig.uc.uc_flags, err); __put_user_error(NULL, &frame->sig.uc.uc_link, err); memset(&stack, 0, sizeof(stack)); stack.ss_sp = (void __user *)current->sas_ss_sp; stack.ss_flags = sas_ss_flags(regs->ARM_sp); stack.ss_size = current->sas_ss_size; err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack)); err |= setup_sigframe(&frame->sig, regs, set); if (err == 0) err = setup_return(regs, ka, frame->sig.retcode, frame, usig); if (err == 0) { /* * For realtime signals we must also set the second and third * arguments for the signal handler. * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06 */ regs->ARM_r1 = (unsigned long)&frame->info; regs->ARM_r2 = (unsigned long)&frame->sig.uc; } return err; } /* * OK, we're invoking a handler */ static int handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs * regs) { struct thread_info *thread = current_thread_info(); struct task_struct *tsk = current; int usig = sig; int ret; /* * translate the signal */ if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) usig = thread->exec_domain->signal_invmap[usig]; /* * Set up the stack frame */ if (ka->sa.sa_flags & SA_SIGINFO) ret = setup_rt_frame(usig, ka, info, oldset, regs); else ret = setup_frame(usig, ka, oldset, regs); /* * Check that the resulting registers are actually sane. */ ret |= !valid_user_regs(regs); if (ret != 0) { force_sigsegv(sig, tsk); return ret; } /* * Block the signal if we were successful. */ spin_lock_irq(&tsk->sighand->siglock); sigorsets(&tsk->blocked, &tsk->blocked, &ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NODEFER)) sigaddset(&tsk->blocked, sig); recalc_sigpending(); spin_unlock_irq(&tsk->sighand->siglock); return 0; } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Note that we go through the signals twice: once to check the signals that * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ static void do_signal(struct pt_regs *regs, int syscall) { unsigned int retval = 0, continue_addr = 0, restart_addr = 0; struct k_sigaction ka; siginfo_t info; int signr; /* * We want the common case to go fast, which * is why we may in certain cases get here from * kernel mode. Just return without doing anything * if so. */ if (!user_mode(regs)) return; /* * If we were from a system call, check for system call restarting... */ if (syscall) { continue_addr = regs->ARM_pc; restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4); retval = regs->ARM_r0; /* * Prepare for system call restart. We do this here so that a * debugger will see the already changed PSW. */ switch (retval) { case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: regs->ARM_r0 = regs->ARM_ORIG_r0; regs->ARM_pc = restart_addr; break; case -ERESTART_RESTARTBLOCK: regs->ARM_r0 = -EINTR; break; } } if (try_to_freeze()) goto no_signal; /* * Get the signal to deliver. When running under ptrace, at this * point the debugger may change all our registers ... */ signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { sigset_t *oldset; /* * Depending on the signal settings we may need to revert the * decision to restart the system call. But skip this if a * debugger has chosen to restart at a different PC. */ if (regs->ARM_pc == restart_addr) { if (retval == -ERESTARTNOHAND || (retval == -ERESTARTSYS && !(ka.sa.sa_flags & SA_RESTART))) { regs->ARM_r0 = -EINTR; regs->ARM_pc = continue_addr; } } if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = &current->saved_sigmask; else oldset = &current->blocked; if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { /* * A signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, * and will be restored by sigreturn, so we can simply * clear the TIF_RESTORE_SIGMASK flag. */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) clear_thread_flag(TIF_RESTORE_SIGMASK); } return; } no_signal: if (syscall) { /* * Handle restarting a different system call. As above, * if a debugger has chosen to restart at a different PC, * ignore the restart. */ if (retval == -ERESTART_RESTARTBLOCK && regs->ARM_pc == continue_addr) { if (thumb_mode(regs)) { regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; regs->ARM_pc -= 2; } else { #if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT) regs->ARM_r7 = __NR_restart_syscall; regs->ARM_pc -= 4; #else u32 __user *usp; regs->ARM_sp -= 4; usp = (u32 __user *)regs->ARM_sp; if (put_user(regs->ARM_pc, usp) == 0) { regs->ARM_pc = KERN_RESTART_CODE; } else { regs->ARM_sp += 4; force_sigsegv(0, current); } #endif } } /* If there's no signal to deliver, we just put the saved sigmask * back. */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { clear_thread_flag(TIF_RESTORE_SIGMASK); sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } } } asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall) { if (thread_flags & _TIF_SIGPENDING) do_signal(regs, syscall); if (thread_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); if (current->replacement_session_keyring) key_replace_session_keyring(); } }
gpl-2.0
Orange-OpenSource/linux
drivers/video/pmagb-b-fb.c
2395
10392
/* * linux/drivers/video/pmagb-b-fb.c * * PMAGB-B TURBOchannel Smart Frame Buffer (SFB) card support, * derived from: * "HP300 Topcat framebuffer support (derived from macfb of all things) * Phil Blundell <philb@gnu.org> 1998", the original code can be * found in the file hpfb.c in the same directory. * * DECstation related code Copyright (C) 1999, 2000, 2001 by * Michael Engel <engel@unix-ag.org>, * Karsten Merker <merker@linuxtag.org> and * Harald Koerfgen. * Copyright (c) 2005, 2006 Maciej W. Rozycki * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of this * archive for more details. */ #include <linux/compiler.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/tc.h> #include <linux/types.h> #include <asm/io.h> #include <video/pmagb-b-fb.h> struct pmagbbfb_par { volatile void __iomem *mmio; volatile void __iomem *smem; volatile u32 __iomem *sfb; volatile u32 __iomem *dac; unsigned int osc0; unsigned int osc1; int slot; }; static struct fb_var_screeninfo pmagbbfb_defined = { .bits_per_pixel = 8, .red.length = 8, .green.length = 8, .blue.length = 8, .activate = FB_ACTIVATE_NOW, .height = -1, .width = -1, .accel_flags = FB_ACCEL_NONE, .sync = FB_SYNC_ON_GREEN, .vmode = FB_VMODE_NONINTERLACED, }; static struct fb_fix_screeninfo pmagbbfb_fix = { .id = "PMAGB-BA", .smem_len = (2048 * 1024), .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, .mmio_len = PMAGB_B_FBMEM, }; static inline void sfb_write(struct pmagbbfb_par *par, unsigned int reg, u32 v) { writel(v, par->sfb + reg / 4); } static inline u32 sfb_read(struct pmagbbfb_par *par, unsigned int reg) { return readl(par->sfb + reg / 4); } static inline void dac_write(struct pmagbbfb_par *par, unsigned int reg, u8 v) { writeb(v, par->dac + reg / 4); } static inline u8 dac_read(struct pmagbbfb_par *par, unsigned int reg) { return readb(par->dac + reg / 4); } static inline void gp0_write(struct pmagbbfb_par *par, u32 v) { writel(v, par->mmio + PMAGB_B_GP0); } /* * Set the palette. */ static int pmagbbfb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int transp, struct fb_info *info) { struct pmagbbfb_par *par = info->par; if (regno >= info->cmap.len) return 1; red >>= 8; /* The cmap fields are 16 bits */ green >>= 8; /* wide, but the hardware colormap */ blue >>= 8; /* registers are only 8 bits wide */ mb(); dac_write(par, BT459_ADDR_LO, regno); dac_write(par, BT459_ADDR_HI, 0x00); wmb(); dac_write(par, BT459_CMAP, red); wmb(); dac_write(par, BT459_CMAP, green); wmb(); dac_write(par, BT459_CMAP, blue); return 0; } static struct fb_ops pmagbbfb_ops = { .owner = THIS_MODULE, .fb_setcolreg = pmagbbfb_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; /* * Turn the hardware cursor off. */ static void __init pmagbbfb_erase_cursor(struct fb_info *info) { struct pmagbbfb_par *par = info->par; mb(); dac_write(par, BT459_ADDR_LO, 0x00); dac_write(par, BT459_ADDR_HI, 0x03); wmb(); dac_write(par, BT459_DATA, 0x00); } /* * Set up screen parameters. */ static void pmagbbfb_screen_setup(struct fb_info *info) { struct pmagbbfb_par *par = info->par; info->var.xres = ((sfb_read(par, SFB_REG_VID_HOR) >> SFB_VID_HOR_PIX_SHIFT) & SFB_VID_HOR_PIX_MASK) * 4; info->var.xres_virtual = info->var.xres; info->var.yres = (sfb_read(par, SFB_REG_VID_VER) >> SFB_VID_VER_SL_SHIFT) & SFB_VID_VER_SL_MASK; info->var.yres_virtual = info->var.yres; info->var.left_margin = ((sfb_read(par, SFB_REG_VID_HOR) >> SFB_VID_HOR_BP_SHIFT) & SFB_VID_HOR_BP_MASK) * 4; info->var.right_margin = ((sfb_read(par, SFB_REG_VID_HOR) >> SFB_VID_HOR_FP_SHIFT) & SFB_VID_HOR_FP_MASK) * 4; info->var.upper_margin = (sfb_read(par, SFB_REG_VID_VER) >> SFB_VID_VER_BP_SHIFT) & SFB_VID_VER_BP_MASK; info->var.lower_margin = (sfb_read(par, SFB_REG_VID_VER) >> SFB_VID_VER_FP_SHIFT) & SFB_VID_VER_FP_MASK; info->var.hsync_len = ((sfb_read(par, SFB_REG_VID_HOR) >> SFB_VID_HOR_SYN_SHIFT) & SFB_VID_HOR_SYN_MASK) * 4; info->var.vsync_len = (sfb_read(par, SFB_REG_VID_VER) >> SFB_VID_VER_SYN_SHIFT) & SFB_VID_VER_SYN_MASK; info->fix.line_length = info->var.xres; }; /* * Determine oscillator configuration. */ static void pmagbbfb_osc_setup(struct fb_info *info) { static unsigned int pmagbbfb_freqs[] = { 130808, 119843, 104000, 92980, 74370, 72800, 69197, 66000, 65000, 50350, 36000, 32000, 25175 }; struct pmagbbfb_par *par = info->par; struct tc_bus *tbus = to_tc_dev(info->device)->bus; u32 count0 = 8, count1 = 8, counttc = 16 * 256 + 8; u32 freq0, freq1, freqtc = tc_get_speed(tbus) / 250; int i, j; gp0_write(par, 0); /* select Osc0 */ for (j = 0; j < 16; j++) { mb(); sfb_write(par, SFB_REG_TCCLK_COUNT, 0); mb(); for (i = 0; i < 100; i++) { /* nominally max. 20.5us */ if (sfb_read(par, SFB_REG_TCCLK_COUNT) == 0) break; udelay(1); } count0 += sfb_read(par, SFB_REG_VIDCLK_COUNT); } gp0_write(par, 1); /* select Osc1 */ for (j = 0; j < 16; j++) { mb(); sfb_write(par, SFB_REG_TCCLK_COUNT, 0); for (i = 0; i < 100; i++) { /* nominally max. 20.5us */ if (sfb_read(par, SFB_REG_TCCLK_COUNT) == 0) break; udelay(1); } count1 += sfb_read(par, SFB_REG_VIDCLK_COUNT); } freq0 = (freqtc * count0 + counttc / 2) / counttc; par->osc0 = freq0; if (freq0 >= pmagbbfb_freqs[0] - (pmagbbfb_freqs[0] + 32) / 64 && freq0 <= pmagbbfb_freqs[0] + (pmagbbfb_freqs[0] + 32) / 64) par->osc0 = pmagbbfb_freqs[0]; freq1 = (par->osc0 * count1 + count0 / 2) / count0; par->osc1 = freq1; for (i = 0; i < ARRAY_SIZE(pmagbbfb_freqs); i++) if (freq1 >= pmagbbfb_freqs[i] - (pmagbbfb_freqs[i] + 128) / 256 && freq1 <= pmagbbfb_freqs[i] + (pmagbbfb_freqs[i] + 128) / 256) { par->osc1 = pmagbbfb_freqs[i]; break; } if (par->osc0 - par->osc1 <= (par->osc0 + par->osc1 + 256) / 512 || par->osc1 - par->osc0 <= (par->osc0 + par->osc1 + 256) / 512) par->osc1 = 0; gp0_write(par, par->osc1 != 0); /* reselect OscX */ info->var.pixclock = par->osc1 ? (1000000000 + par->osc1 / 2) / par->osc1 : (1000000000 + par->osc0 / 2) / par->osc0; }; static int pmagbbfb_probe(struct device *dev) { struct tc_dev *tdev = to_tc_dev(dev); resource_size_t start, len; struct fb_info *info; struct pmagbbfb_par *par; char freq0[12], freq1[12]; u32 vid_base; int err; info = framebuffer_alloc(sizeof(struct pmagbbfb_par), dev); if (!info) { printk(KERN_ERR "%s: Cannot allocate memory\n", dev_name(dev)); return -ENOMEM; } par = info->par; dev_set_drvdata(dev, info); if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { printk(KERN_ERR "%s: Cannot allocate color map\n", dev_name(dev)); err = -ENOMEM; goto err_alloc; } info->fbops = &pmagbbfb_ops; info->fix = pmagbbfb_fix; info->var = pmagbbfb_defined; info->flags = FBINFO_DEFAULT; /* Request the I/O MEM resource. */ start = tdev->resource.start; len = tdev->resource.end - start + 1; if (!request_mem_region(start, len, dev_name(dev))) { printk(KERN_ERR "%s: Cannot reserve FB region\n", dev_name(dev)); err = -EBUSY; goto err_cmap; } /* MMIO mapping setup. */ info->fix.mmio_start = start; par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len); if (!par->mmio) { printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev)); err = -ENOMEM; goto err_resource; } par->sfb = par->mmio + PMAGB_B_SFB; par->dac = par->mmio + PMAGB_B_BT459; /* Frame buffer mapping setup. */ info->fix.smem_start = start + PMAGB_B_FBMEM; par->smem = ioremap_nocache(info->fix.smem_start, info->fix.smem_len); if (!par->smem) { printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev)); err = -ENOMEM; goto err_mmio_map; } vid_base = sfb_read(par, SFB_REG_VID_BASE); info->screen_base = (void __iomem *)par->smem + vid_base * 0x1000; info->screen_size = info->fix.smem_len - 2 * vid_base * 0x1000; pmagbbfb_erase_cursor(info); pmagbbfb_screen_setup(info); pmagbbfb_osc_setup(info); err = register_framebuffer(info); if (err < 0) { printk(KERN_ERR "%s: Cannot register framebuffer\n", dev_name(dev)); goto err_smem_map; } get_device(dev); snprintf(freq0, sizeof(freq0), "%u.%03uMHz", par->osc0 / 1000, par->osc0 % 1000); snprintf(freq1, sizeof(freq1), "%u.%03uMHz", par->osc1 / 1000, par->osc1 % 1000); pr_info("fb%d: %s frame buffer device at %s\n", info->node, info->fix.id, dev_name(dev)); pr_info("fb%d: Osc0: %s, Osc1: %s, Osc%u selected\n", info->node, freq0, par->osc1 ? freq1 : "disabled", par->osc1 != 0); return 0; err_smem_map: iounmap(par->smem); err_mmio_map: iounmap(par->mmio); err_resource: release_mem_region(start, len); err_cmap: fb_dealloc_cmap(&info->cmap); err_alloc: framebuffer_release(info); return err; } static int __exit pmagbbfb_remove(struct device *dev) { struct tc_dev *tdev = to_tc_dev(dev); struct fb_info *info = dev_get_drvdata(dev); struct pmagbbfb_par *par = info->par; resource_size_t start, len; put_device(dev); unregister_framebuffer(info); iounmap(par->smem); iounmap(par->mmio); start = tdev->resource.start; len = tdev->resource.end - start + 1; release_mem_region(start, len); fb_dealloc_cmap(&info->cmap); framebuffer_release(info); return 0; } /* * Initialize the framebuffer. */ static const struct tc_device_id pmagbbfb_tc_table[] = { { "DEC ", "PMAGB-BA" }, { } }; MODULE_DEVICE_TABLE(tc, pmagbbfb_tc_table); static struct tc_driver pmagbbfb_driver = { .id_table = pmagbbfb_tc_table, .driver = { .name = "pmagbbfb", .bus = &tc_bus_type, .probe = pmagbbfb_probe, .remove = __exit_p(pmagbbfb_remove), }, }; static int __init pmagbbfb_init(void) { #ifndef MODULE if (fb_get_options("pmagbbfb", NULL)) return -ENXIO; #endif return tc_register_driver(&pmagbbfb_driver); } static void __exit pmagbbfb_exit(void) { tc_unregister_driver(&pmagbbfb_driver); } module_init(pmagbbfb_init); module_exit(pmagbbfb_exit); MODULE_LICENSE("GPL");
gpl-2.0
svimes/android_kernel_motorola_msm8960-common
drivers/gpio/ml_ioh_gpio.c
2395
8426
/* * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/gpio.h> #define PCI_VENDOR_ID_ROHM 0x10DB struct ioh_reg_comn { u32 ien; u32 istatus; u32 idisp; u32 iclr; u32 imask; u32 imaskclr; u32 po; u32 pi; u32 pm; u32 im_0; u32 im_1; u32 reserved; }; struct ioh_regs { struct ioh_reg_comn regs[8]; u32 reserve1[16]; u32 ioh_sel_reg[4]; u32 reserve2[11]; u32 srst; }; /** * struct ioh_gpio_reg_data - The register store data. * @po_reg: To store contents of PO register. * @pm_reg: To store contents of PM register. */ struct ioh_gpio_reg_data { u32 po_reg; u32 pm_reg; }; /** * struct ioh_gpio - GPIO private data structure. * @base: PCI base address of Memory mapped I/O register. * @reg: Memory mapped IOH GPIO register list. * @dev: Pointer to device structure. * @gpio: Data for GPIO infrastructure. * @ioh_gpio_reg: Memory mapped Register data is saved here * when suspend. * @ch: Indicate GPIO channel */ struct ioh_gpio { void __iomem *base; struct ioh_regs __iomem *reg; struct device *dev; struct gpio_chip gpio; struct ioh_gpio_reg_data ioh_gpio_reg; struct mutex lock; int ch; }; static const int num_ports[] = {6, 12, 16, 16, 15, 16, 16, 12}; static void ioh_gpio_set(struct gpio_chip *gpio, unsigned nr, int val) { u32 reg_val; struct ioh_gpio *chip = container_of(gpio, struct ioh_gpio, gpio); mutex_lock(&chip->lock); reg_val = ioread32(&chip->reg->regs[chip->ch].po); if (val) reg_val |= (1 << nr); else reg_val &= ~(1 << nr); iowrite32(reg_val, &chip->reg->regs[chip->ch].po); mutex_unlock(&chip->lock); } static int ioh_gpio_get(struct gpio_chip *gpio, unsigned nr) { struct ioh_gpio *chip = container_of(gpio, struct ioh_gpio, gpio); return ioread32(&chip->reg->regs[chip->ch].pi) & (1 << nr); } static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr, int val) { struct ioh_gpio *chip = container_of(gpio, struct ioh_gpio, gpio); u32 pm; u32 reg_val; mutex_lock(&chip->lock); pm = ioread32(&chip->reg->regs[chip->ch].pm) & ((1 << num_ports[chip->ch]) - 1); pm |= (1 << nr); iowrite32(pm, &chip->reg->regs[chip->ch].pm); reg_val = ioread32(&chip->reg->regs[chip->ch].po); if (val) reg_val |= (1 << nr); else reg_val &= ~(1 << nr); iowrite32(reg_val, &chip->reg->regs[chip->ch].po); mutex_unlock(&chip->lock); return 0; } static int ioh_gpio_direction_input(struct gpio_chip *gpio, unsigned nr) { struct ioh_gpio *chip = container_of(gpio, struct ioh_gpio, gpio); u32 pm; mutex_lock(&chip->lock); pm = ioread32(&chip->reg->regs[chip->ch].pm) & ((1 << num_ports[chip->ch]) - 1); pm &= ~(1 << nr); iowrite32(pm, &chip->reg->regs[chip->ch].pm); mutex_unlock(&chip->lock); return 0; } #ifdef CONFIG_PM /* * Save register configuration and disable interrupts. */ static void ioh_gpio_save_reg_conf(struct ioh_gpio *chip) { chip->ioh_gpio_reg.po_reg = ioread32(&chip->reg->regs[chip->ch].po); chip->ioh_gpio_reg.pm_reg = ioread32(&chip->reg->regs[chip->ch].pm); } /* * This function restores the register configuration of the GPIO device. */ static void ioh_gpio_restore_reg_conf(struct ioh_gpio *chip) { /* to store contents of PO register */ iowrite32(chip->ioh_gpio_reg.po_reg, &chip->reg->regs[chip->ch].po); /* to store contents of PM register */ iowrite32(chip->ioh_gpio_reg.pm_reg, &chip->reg->regs[chip->ch].pm); } #endif static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port) { struct gpio_chip *gpio = &chip->gpio; gpio->label = dev_name(chip->dev); gpio->owner = THIS_MODULE; gpio->direction_input = ioh_gpio_direction_input; gpio->get = ioh_gpio_get; gpio->direction_output = ioh_gpio_direction_output; gpio->set = ioh_gpio_set; gpio->dbg_show = NULL; gpio->base = -1; gpio->ngpio = num_port; gpio->can_sleep = 0; } static int __devinit ioh_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int ret; int i; struct ioh_gpio *chip; void __iomem *base; void __iomem *chip_save; ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "%s : pci_enable_device failed", __func__); goto err_pci_enable; } ret = pci_request_regions(pdev, KBUILD_MODNAME); if (ret) { dev_err(&pdev->dev, "pci_request_regions failed-%d", ret); goto err_request_regions; } base = pci_iomap(pdev, 1, 0); if (base == 0) { dev_err(&pdev->dev, "%s : pci_iomap failed", __func__); ret = -ENOMEM; goto err_iomap; } chip_save = kzalloc(sizeof(*chip) * 8, GFP_KERNEL); if (chip_save == NULL) { dev_err(&pdev->dev, "%s : kzalloc failed", __func__); ret = -ENOMEM; goto err_kzalloc; } chip = chip_save; for (i = 0; i < 8; i++, chip++) { chip->dev = &pdev->dev; chip->base = base; chip->reg = chip->base; chip->ch = i; mutex_init(&chip->lock); ioh_gpio_setup(chip, num_ports[i]); ret = gpiochip_add(&chip->gpio); if (ret) { dev_err(&pdev->dev, "IOH gpio: Failed to register GPIO\n"); goto err_gpiochip_add; } } chip = chip_save; pci_set_drvdata(pdev, chip); return 0; err_gpiochip_add: for (; i != 0; i--) { chip--; ret = gpiochip_remove(&chip->gpio); if (ret) dev_err(&pdev->dev, "Failed gpiochip_remove(%d)\n", i); } kfree(chip_save); err_kzalloc: pci_iounmap(pdev, base); err_iomap: pci_release_regions(pdev); err_request_regions: pci_disable_device(pdev); err_pci_enable: dev_err(&pdev->dev, "%s Failed returns %d\n", __func__, ret); return ret; } static void __devexit ioh_gpio_remove(struct pci_dev *pdev) { int err; int i; struct ioh_gpio *chip = pci_get_drvdata(pdev); void __iomem *chip_save; chip_save = chip; for (i = 0; i < 8; i++, chip++) { err = gpiochip_remove(&chip->gpio); if (err) dev_err(&pdev->dev, "Failed gpiochip_remove\n"); } chip = chip_save; pci_iounmap(pdev, chip->base); pci_release_regions(pdev); pci_disable_device(pdev); kfree(chip); } #ifdef CONFIG_PM static int ioh_gpio_suspend(struct pci_dev *pdev, pm_message_t state) { s32 ret; struct ioh_gpio *chip = pci_get_drvdata(pdev); ioh_gpio_save_reg_conf(chip); ioh_gpio_restore_reg_conf(chip); ret = pci_save_state(pdev); if (ret) { dev_err(&pdev->dev, "pci_save_state Failed-%d\n", ret); return ret; } pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D0); ret = pci_enable_wake(pdev, PCI_D0, 1); if (ret) dev_err(&pdev->dev, "pci_enable_wake Failed -%d\n", ret); return 0; } static int ioh_gpio_resume(struct pci_dev *pdev) { s32 ret; struct ioh_gpio *chip = pci_get_drvdata(pdev); ret = pci_enable_wake(pdev, PCI_D0, 0); pci_set_power_state(pdev, PCI_D0); ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "pci_enable_device Failed-%d ", ret); return ret; } pci_restore_state(pdev); iowrite32(0x01, &chip->reg->srst); iowrite32(0x00, &chip->reg->srst); ioh_gpio_restore_reg_conf(chip); return 0; } #else #define ioh_gpio_suspend NULL #define ioh_gpio_resume NULL #endif static DEFINE_PCI_DEVICE_TABLE(ioh_gpio_pcidev_id) = { { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) }, { 0, } }; MODULE_DEVICE_TABLE(pci, ioh_gpio_pcidev_id); static struct pci_driver ioh_gpio_driver = { .name = "ml_ioh_gpio", .id_table = ioh_gpio_pcidev_id, .probe = ioh_gpio_probe, .remove = __devexit_p(ioh_gpio_remove), .suspend = ioh_gpio_suspend, .resume = ioh_gpio_resume }; static int __init ioh_gpio_pci_init(void) { return pci_register_driver(&ioh_gpio_driver); } module_init(ioh_gpio_pci_init); static void __exit ioh_gpio_pci_exit(void) { pci_unregister_driver(&ioh_gpio_driver); } module_exit(ioh_gpio_pci_exit); MODULE_DESCRIPTION("OKI SEMICONDUCTOR ML-IOH series GPIO Driver"); MODULE_LICENSE("GPL");
gpl-2.0
AOSPA/android_kernel_samsung_tuna
arch/arm/mach-exynos4/setup-sdhci.c
2395
1797
/* linux/arch/arm/mach-exynos4/setup-sdhci.c * * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * EXYNOS4 - Helper functions for settign up SDHCI device(s) (HSMMC) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <plat/regs-sdhci.h> /* clock sources for the mmc bus clock, order as for the ctrl2[5..4] */ char *exynos4_hsmmc_clksrcs[4] = { [0] = NULL, [1] = NULL, [2] = "sclk_mmc", /* mmc_bus */ [3] = NULL, }; void exynos4_setup_sdhci_cfg_card(struct platform_device *dev, void __iomem *r, struct mmc_ios *ios, struct mmc_card *card) { u32 ctrl2, ctrl3; /* don't need to alter anything according to card-type */ ctrl2 = readl(r + S3C_SDHCI_CONTROL2); /* select base clock source to HCLK */ ctrl2 &= S3C_SDHCI_CTRL2_SELBASECLK_MASK; /* * clear async mode, enable conflict mask, rx feedback ctrl, SD * clk hold and no use debounce count */ ctrl2 |= (S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR | S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK | S3C_SDHCI_CTRL2_ENFBCLKRX | S3C_SDHCI_CTRL2_DFCNT_NONE | S3C_SDHCI_CTRL2_ENCLKOUTHOLD); /* Tx and Rx feedback clock delay control */ if (ios->clock < 25 * 1000000) ctrl3 = (S3C_SDHCI_CTRL3_FCSEL3 | S3C_SDHCI_CTRL3_FCSEL2 | S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0); else ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0); writel(ctrl2, r + S3C_SDHCI_CONTROL2); writel(ctrl3, r + S3C_SDHCI_CONTROL3); }
gpl-2.0
Evervolv/android_kernel_samsung_jf
drivers/input/touchscreen/mcs5000_ts.c
4955
8396
/* * mcs5000_ts.c - Touchscreen driver for MELFAS MCS-5000 controller * * Copyright (C) 2009 Samsung Electronics Co.Ltd * Author: Joonyoung Shim <jy0922.shim@samsung.com> * * Based on wm97xx-core.c * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/i2c/mcs.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/irq.h> #include <linux/slab.h> /* Registers */ #define MCS5000_TS_STATUS 0x00 #define STATUS_OFFSET 0 #define STATUS_NO (0 << STATUS_OFFSET) #define STATUS_INIT (1 << STATUS_OFFSET) #define STATUS_SENSING (2 << STATUS_OFFSET) #define STATUS_COORD (3 << STATUS_OFFSET) #define STATUS_GESTURE (4 << STATUS_OFFSET) #define ERROR_OFFSET 4 #define ERROR_NO (0 << ERROR_OFFSET) #define ERROR_POWER_ON_RESET (1 << ERROR_OFFSET) #define ERROR_INT_RESET (2 << ERROR_OFFSET) #define ERROR_EXT_RESET (3 << ERROR_OFFSET) #define ERROR_INVALID_REG_ADDRESS (8 << ERROR_OFFSET) #define ERROR_INVALID_REG_VALUE (9 << ERROR_OFFSET) #define MCS5000_TS_OP_MODE 0x01 #define RESET_OFFSET 0 #define RESET_NO (0 << RESET_OFFSET) #define RESET_EXT_SOFT (1 << RESET_OFFSET) #define OP_MODE_OFFSET 1 #define OP_MODE_SLEEP (0 << OP_MODE_OFFSET) #define OP_MODE_ACTIVE (1 << OP_MODE_OFFSET) #define GESTURE_OFFSET 4 #define GESTURE_DISABLE (0 << GESTURE_OFFSET) #define GESTURE_ENABLE (1 << GESTURE_OFFSET) #define PROXIMITY_OFFSET 5 #define PROXIMITY_DISABLE (0 << PROXIMITY_OFFSET) #define PROXIMITY_ENABLE (1 << PROXIMITY_OFFSET) #define SCAN_MODE_OFFSET 6 #define SCAN_MODE_INTERRUPT (0 << SCAN_MODE_OFFSET) #define SCAN_MODE_POLLING (1 << SCAN_MODE_OFFSET) #define REPORT_RATE_OFFSET 7 #define REPORT_RATE_40 (0 << REPORT_RATE_OFFSET) #define REPORT_RATE_80 (1 << REPORT_RATE_OFFSET) #define MCS5000_TS_SENS_CTL 0x02 #define MCS5000_TS_FILTER_CTL 0x03 #define PRI_FILTER_OFFSET 0 #define SEC_FILTER_OFFSET 4 #define MCS5000_TS_X_SIZE_UPPER 0x08 #define MCS5000_TS_X_SIZE_LOWER 0x09 #define MCS5000_TS_Y_SIZE_UPPER 0x0A #define MCS5000_TS_Y_SIZE_LOWER 0x0B #define MCS5000_TS_INPUT_INFO 0x10 #define INPUT_TYPE_OFFSET 0 #define INPUT_TYPE_NONTOUCH (0 << INPUT_TYPE_OFFSET) #define INPUT_TYPE_SINGLE (1 << INPUT_TYPE_OFFSET) #define INPUT_TYPE_DUAL (2 << INPUT_TYPE_OFFSET) #define INPUT_TYPE_PALM (3 << INPUT_TYPE_OFFSET) #define INPUT_TYPE_PROXIMITY (7 << INPUT_TYPE_OFFSET) #define GESTURE_CODE_OFFSET 3 #define GESTURE_CODE_NO (0 << GESTURE_CODE_OFFSET) #define MCS5000_TS_X_POS_UPPER 0x11 #define MCS5000_TS_X_POS_LOWER 0x12 #define MCS5000_TS_Y_POS_UPPER 0x13 #define MCS5000_TS_Y_POS_LOWER 0x14 #define MCS5000_TS_Z_POS 0x15 #define MCS5000_TS_WIDTH 0x16 #define MCS5000_TS_GESTURE_VAL 0x17 #define MCS5000_TS_MODULE_REV 0x20 #define MCS5000_TS_FIRMWARE_VER 0x21 /* Touchscreen absolute values */ #define MCS5000_MAX_XC 0x3ff #define MCS5000_MAX_YC 0x3ff enum mcs5000_ts_read_offset { READ_INPUT_INFO, READ_X_POS_UPPER, READ_X_POS_LOWER, READ_Y_POS_UPPER, READ_Y_POS_LOWER, READ_BLOCK_SIZE, }; /* Each client has this additional data */ struct mcs5000_ts_data { struct i2c_client *client; struct input_dev *input_dev; const struct mcs_platform_data *platform_data; }; static irqreturn_t mcs5000_ts_interrupt(int irq, void *dev_id) { struct mcs5000_ts_data *data = dev_id; struct i2c_client *client = data->client; u8 buffer[READ_BLOCK_SIZE]; int err; int x; int y; err = i2c_smbus_read_i2c_block_data(client, MCS5000_TS_INPUT_INFO, READ_BLOCK_SIZE, buffer); if (err < 0) { dev_err(&client->dev, "%s, err[%d]\n", __func__, err); goto out; } switch (buffer[READ_INPUT_INFO]) { case INPUT_TYPE_NONTOUCH: input_report_key(data->input_dev, BTN_TOUCH, 0); input_sync(data->input_dev); break; case INPUT_TYPE_SINGLE: x = (buffer[READ_X_POS_UPPER] << 8) | buffer[READ_X_POS_LOWER]; y = (buffer[READ_Y_POS_UPPER] << 8) | buffer[READ_Y_POS_LOWER]; input_report_key(data->input_dev, BTN_TOUCH, 1); input_report_abs(data->input_dev, ABS_X, x); input_report_abs(data->input_dev, ABS_Y, y); input_sync(data->input_dev); break; case INPUT_TYPE_DUAL: /* TODO */ break; case INPUT_TYPE_PALM: /* TODO */ break; case INPUT_TYPE_PROXIMITY: /* TODO */ break; default: dev_err(&client->dev, "Unknown ts input type %d\n", buffer[READ_INPUT_INFO]); break; } out: return IRQ_HANDLED; } static void mcs5000_ts_phys_init(struct mcs5000_ts_data *data) { const struct mcs_platform_data *platform_data = data->platform_data; struct i2c_client *client = data->client; /* Touch reset & sleep mode */ i2c_smbus_write_byte_data(client, MCS5000_TS_OP_MODE, RESET_EXT_SOFT | OP_MODE_SLEEP); /* Touch size */ i2c_smbus_write_byte_data(client, MCS5000_TS_X_SIZE_UPPER, platform_data->x_size >> 8); i2c_smbus_write_byte_data(client, MCS5000_TS_X_SIZE_LOWER, platform_data->x_size & 0xff); i2c_smbus_write_byte_data(client, MCS5000_TS_Y_SIZE_UPPER, platform_data->y_size >> 8); i2c_smbus_write_byte_data(client, MCS5000_TS_Y_SIZE_LOWER, platform_data->y_size & 0xff); /* Touch active mode & 80 report rate */ i2c_smbus_write_byte_data(data->client, MCS5000_TS_OP_MODE, OP_MODE_ACTIVE | REPORT_RATE_80); } static int __devinit mcs5000_ts_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct mcs5000_ts_data *data; struct input_dev *input_dev; int ret; if (!client->dev.platform_data) return -EINVAL; data = kzalloc(sizeof(struct mcs5000_ts_data), GFP_KERNEL); input_dev = input_allocate_device(); if (!data || !input_dev) { dev_err(&client->dev, "Failed to allocate memory\n"); ret = -ENOMEM; goto err_free_mem; } data->client = client; data->input_dev = input_dev; data->platform_data = client->dev.platform_data; input_dev->name = "MELPAS MCS-5000 Touchscreen"; input_dev->id.bustype = BUS_I2C; input_dev->dev.parent = &client->dev; __set_bit(EV_ABS, input_dev->evbit); __set_bit(EV_KEY, input_dev->evbit); __set_bit(BTN_TOUCH, input_dev->keybit); input_set_abs_params(input_dev, ABS_X, 0, MCS5000_MAX_XC, 0, 0); input_set_abs_params(input_dev, ABS_Y, 0, MCS5000_MAX_YC, 0, 0); input_set_drvdata(input_dev, data); if (data->platform_data->cfg_pin) data->platform_data->cfg_pin(); ret = request_threaded_irq(client->irq, NULL, mcs5000_ts_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "mcs5000_ts", data); if (ret < 0) { dev_err(&client->dev, "Failed to register interrupt\n"); goto err_free_mem; } ret = input_register_device(data->input_dev); if (ret < 0) goto err_free_irq; mcs5000_ts_phys_init(data); i2c_set_clientdata(client, data); return 0; err_free_irq: free_irq(client->irq, data); err_free_mem: input_free_device(input_dev); kfree(data); return ret; } static int __devexit mcs5000_ts_remove(struct i2c_client *client) { struct mcs5000_ts_data *data = i2c_get_clientdata(client); free_irq(client->irq, data); input_unregister_device(data->input_dev); kfree(data); return 0; } #ifdef CONFIG_PM static int mcs5000_ts_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); /* Touch sleep mode */ i2c_smbus_write_byte_data(client, MCS5000_TS_OP_MODE, OP_MODE_SLEEP); return 0; } static int mcs5000_ts_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct mcs5000_ts_data *data = i2c_get_clientdata(client); mcs5000_ts_phys_init(data); return 0; } static SIMPLE_DEV_PM_OPS(mcs5000_ts_pm, mcs5000_ts_suspend, mcs5000_ts_resume); #endif static const struct i2c_device_id mcs5000_ts_id[] = { { "mcs5000_ts", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, mcs5000_ts_id); static struct i2c_driver mcs5000_ts_driver = { .probe = mcs5000_ts_probe, .remove = __devexit_p(mcs5000_ts_remove), .driver = { .name = "mcs5000_ts", #ifdef CONFIG_PM .pm = &mcs5000_ts_pm, #endif }, .id_table = mcs5000_ts_id, }; module_i2c_driver(mcs5000_ts_driver); /* Module information */ MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); MODULE_DESCRIPTION("Touchscreen driver for MELFAS MCS-5000 controller"); MODULE_LICENSE("GPL");
gpl-2.0
varunchitre15/thunderzap_sprout
arch/powerpc/platforms/44x/warp.c
7003
6627
/* * PIKA Warp(tm) board specific routines * * Copyright (c) 2008-2009 PIKA Technologies * Sean MacLennan <smaclennan@pikatech.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/of_platform.h> #include <linux/kthread.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/of_gpio.h> #include <linux/of_i2c.h> #include <linux/slab.h> #include <linux/export.h> #include <asm/machdep.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/time.h> #include <asm/uic.h> #include <asm/ppc4xx.h> static __initdata struct of_device_id warp_of_bus[] = { { .compatible = "ibm,plb4", }, { .compatible = "ibm,opb", }, { .compatible = "ibm,ebc", }, {}, }; static int __init warp_device_probe(void) { of_platform_bus_probe(NULL, warp_of_bus, NULL); return 0; } machine_device_initcall(warp, warp_device_probe); static int __init warp_probe(void) { unsigned long root = of_get_flat_dt_root(); if (!of_flat_dt_is_compatible(root, "pika,warp")) return 0; /* For __dma_alloc_coherent */ ISA_DMA_THRESHOLD = ~0L; return 1; } define_machine(warp) { .name = "Warp", .probe = warp_probe, .progress = udbg_progress, .init_IRQ = uic_init_tree, .get_irq = uic_get_irq, .restart = ppc4xx_reset_system, .calibrate_decr = generic_calibrate_decr, }; static int __init warp_post_info(void) { struct device_node *np; void __iomem *fpga; u32 post1, post2; /* Sighhhh... POST information is in the sd area. */ np = of_find_compatible_node(NULL, NULL, "pika,fpga-sd"); if (np == NULL) return -ENOENT; fpga = of_iomap(np, 0); of_node_put(np); if (fpga == NULL) return -ENOENT; post1 = in_be32(fpga + 0x40); post2 = in_be32(fpga + 0x44); iounmap(fpga); if (post1 || post2) printk(KERN_INFO "Warp POST %08x %08x\n", post1, post2); else printk(KERN_INFO "Warp POST OK\n"); return 0; } #ifdef CONFIG_SENSORS_AD7414 static LIST_HEAD(dtm_shutdown_list); static void __iomem *dtm_fpga; static unsigned green_led, red_led; struct dtm_shutdown { struct list_head list; void (*func)(void *arg); void *arg; }; int pika_dtm_register_shutdown(void (*func)(void *arg), void *arg) { struct dtm_shutdown *shutdown; shutdown = kmalloc(sizeof(struct dtm_shutdown), GFP_KERNEL); if (shutdown == NULL) return -ENOMEM; shutdown->func = func; shutdown->arg = arg; list_add(&shutdown->list, &dtm_shutdown_list); return 0; } int pika_dtm_unregister_shutdown(void (*func)(void *arg), void *arg) { struct dtm_shutdown *shutdown; list_for_each_entry(shutdown, &dtm_shutdown_list, list) if (shutdown->func == func && shutdown->arg == arg) { list_del(&shutdown->list); kfree(shutdown); return 0; } return -EINVAL; } static irqreturn_t temp_isr(int irq, void *context) { struct dtm_shutdown *shutdown; int value = 1; local_irq_disable(); gpio_set_value(green_led, 0); /* Run through the shutdown list. */ list_for_each_entry(shutdown, &dtm_shutdown_list, list) shutdown->func(shutdown->arg); printk(KERN_EMERG "\n\nCritical Temperature Shutdown\n\n"); while (1) { if (dtm_fpga) { unsigned reset = in_be32(dtm_fpga + 0x14); out_be32(dtm_fpga + 0x14, reset); } gpio_set_value(red_led, value); value ^= 1; mdelay(500); } /* Not reached */ return IRQ_HANDLED; } static int pika_setup_leds(void) { struct device_node *np, *child; np = of_find_compatible_node(NULL, NULL, "gpio-leds"); if (!np) { printk(KERN_ERR __FILE__ ": Unable to find leds\n"); return -ENOENT; } for_each_child_of_node(np, child) if (strcmp(child->name, "green") == 0) green_led = of_get_gpio(child, 0); else if (strcmp(child->name, "red") == 0) red_led = of_get_gpio(child, 0); of_node_put(np); return 0; } static void pika_setup_critical_temp(struct device_node *np, struct i2c_client *client) { int irq, rc; /* Do this before enabling critical temp interrupt since we * may immediately interrupt. */ pika_setup_leds(); /* These registers are in 1 degree increments. */ i2c_smbus_write_byte_data(client, 2, 65); /* Thigh */ i2c_smbus_write_byte_data(client, 3, 0); /* Tlow */ irq = irq_of_parse_and_map(np, 0); if (irq == NO_IRQ) { printk(KERN_ERR __FILE__ ": Unable to get ad7414 irq\n"); return; } rc = request_irq(irq, temp_isr, 0, "ad7414", NULL); if (rc) { printk(KERN_ERR __FILE__ ": Unable to request ad7414 irq %d = %d\n", irq, rc); return; } } static inline void pika_dtm_check_fan(void __iomem *fpga) { static int fan_state; u32 fan = in_be32(fpga + 0x34) & (1 << 14); if (fan_state != fan) { fan_state = fan; if (fan) printk(KERN_WARNING "Fan rotation error detected." " Please check hardware.\n"); } } static int pika_dtm_thread(void __iomem *fpga) { struct device_node *np; struct i2c_client *client; np = of_find_compatible_node(NULL, NULL, "adi,ad7414"); if (np == NULL) return -ENOENT; client = of_find_i2c_device_by_node(np); if (client == NULL) { of_node_put(np); return -ENOENT; } pika_setup_critical_temp(np, client); of_node_put(np); printk(KERN_INFO "Warp DTM thread running.\n"); while (!kthread_should_stop()) { int val; val = i2c_smbus_read_word_data(client, 0); if (val < 0) dev_dbg(&client->dev, "DTM read temp failed.\n"); else { s16 temp = swab16(val); out_be32(fpga + 0x20, temp); } pika_dtm_check_fan(fpga); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ); } return 0; } static int __init pika_dtm_start(void) { struct task_struct *dtm_thread; struct device_node *np; np = of_find_compatible_node(NULL, NULL, "pika,fpga"); if (np == NULL) return -ENOENT; dtm_fpga = of_iomap(np, 0); of_node_put(np); if (dtm_fpga == NULL) return -ENOENT; /* Must get post info before thread starts. */ warp_post_info(); dtm_thread = kthread_run(pika_dtm_thread, dtm_fpga, "pika-dtm"); if (IS_ERR(dtm_thread)) { iounmap(dtm_fpga); return PTR_ERR(dtm_thread); } return 0; } machine_late_initcall(warp, pika_dtm_start); #else /* !CONFIG_SENSORS_AD7414 */ int pika_dtm_register_shutdown(void (*func)(void *arg), void *arg) { return 0; } int pika_dtm_unregister_shutdown(void (*func)(void *arg), void *arg) { return 0; } machine_late_initcall(warp, warp_post_info); #endif EXPORT_SYMBOL(pika_dtm_register_shutdown); EXPORT_SYMBOL(pika_dtm_unregister_shutdown);
gpl-2.0
zhaolin1230/kernel_sony_msm8x60
drivers/net/arcnet/arcnet.c
7771
30420
/* * Linux ARCnet driver - device-independent routines * * Written 1997 by David Woodhouse. * Written 1994-1999 by Avery Pennarun. * Written 1999-2000 by Martin Mares <mj@ucw.cz>. * Derived from skeleton.c by Donald Becker. * * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com) * for sponsoring the further development of this driver. * * ********************** * * The original copyright was as follows: * * skeleton.c Written 1993 by Donald Becker. * Copyright 1993 United States Government as represented by the * Director, National Security Agency. This software may only be used * and distributed according to the terms of the GNU General Public License as * modified by SRC, incorporated herein by reference. * * ********************** * * The change log is now in a file called ChangeLog in this directory. * * Sources: * - Crynwr arcnet.com/arcether.com packet drivers. * - arcnet.c v0.00 dated 1/1/94 and apparently by * Donald Becker - it didn't work :) * - skeleton.c v0.05 dated 11/16/93 by Donald Becker * (from Linux Kernel 1.1.45) * - RFC's 1201 and 1051 - re: TCP/IP over ARCnet * - The official ARCnet COM9026 data sheets (!) thanks to * Ken Cornetet <kcornete@nyx10.cs.du.edu> * - The official ARCnet COM20020 data sheets. * - Information on some more obscure ARCnet controller chips, thanks * to the nice people at SMSC. * - net/inet/eth.c (from kernel 1.1.50) for header-building info. * - Alternate Linux ARCnet source by V.Shergin <vsher@sao.stavropol.su> * - Textual information and more alternate source from Joachim Koenig * <jojo@repas.de> */ #define VERSION "arcnet: v3.94 BETA 2007/02/08 - by Avery Pennarun et al.\n" #include <linux/module.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <net/arp.h> #include <linux/init.h> #include <linux/arcdevice.h> #include <linux/jiffies.h> /* "do nothing" functions for protocol drivers */ static void null_rx(struct net_device *dev, int bufnum, struct archdr *pkthdr, int length); static int null_build_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, uint8_t daddr); static int null_prepare_tx(struct net_device *dev, struct archdr *pkt, int length, int bufnum); static void arcnet_rx(struct net_device *dev, int bufnum); /* * one ArcProto per possible proto ID. None of the elements of * arc_proto_map are allowed to be NULL; they will get set to * arc_proto_default instead. It also must not be NULL; if you would like * to set it to NULL, set it to &arc_proto_null instead. */ struct ArcProto *arc_proto_map[256], *arc_proto_default, *arc_bcast_proto, *arc_raw_proto; static struct ArcProto arc_proto_null = { .suffix = '?', .mtu = XMTU, .is_ip = 0, .rx = null_rx, .build_header = null_build_header, .prepare_tx = null_prepare_tx, .continue_tx = NULL, .ack_tx = NULL }; /* Exported function prototypes */ int arcnet_debug = ARCNET_DEBUG; EXPORT_SYMBOL(arc_proto_map); EXPORT_SYMBOL(arc_proto_default); EXPORT_SYMBOL(arc_bcast_proto); EXPORT_SYMBOL(arc_raw_proto); EXPORT_SYMBOL(arcnet_unregister_proto); EXPORT_SYMBOL(arcnet_debug); EXPORT_SYMBOL(alloc_arcdev); EXPORT_SYMBOL(arcnet_interrupt); EXPORT_SYMBOL(arcnet_open); EXPORT_SYMBOL(arcnet_close); EXPORT_SYMBOL(arcnet_send_packet); EXPORT_SYMBOL(arcnet_timeout); /* Internal function prototypes */ static int arcnet_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len); static int arcnet_rebuild_header(struct sk_buff *skb); static int go_tx(struct net_device *dev); static int debug = ARCNET_DEBUG; module_param(debug, int, 0); MODULE_LICENSE("GPL"); static int __init arcnet_init(void) { int count; arcnet_debug = debug; printk("arcnet loaded.\n"); #ifdef ALPHA_WARNING BUGLVL(D_EXTRA) { printk("arcnet: ***\n" "arcnet: * Read arcnet.txt for important release notes!\n" "arcnet: *\n" "arcnet: * This is an ALPHA version! (Last stable release: v3.02) E-mail\n" "arcnet: * me if you have any questions, comments, or bug reports.\n" "arcnet: ***\n"); } #endif /* initialize the protocol map */ arc_raw_proto = arc_proto_default = arc_bcast_proto = &arc_proto_null; for (count = 0; count < 256; count++) arc_proto_map[count] = arc_proto_default; BUGLVL(D_DURING) printk("arcnet: struct sizes: %Zd %Zd %Zd %Zd %Zd\n", sizeof(struct arc_hardware), sizeof(struct arc_rfc1201), sizeof(struct arc_rfc1051), sizeof(struct arc_eth_encap), sizeof(struct archdr)); return 0; } static void __exit arcnet_exit(void) { } module_init(arcnet_init); module_exit(arcnet_exit); /* * Dump the contents of an sk_buff */ #if ARCNET_DEBUG_MAX & D_SKB void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc) { char hdr[32]; /* dump the packet */ snprintf(hdr, sizeof(hdr), "%6s:%s skb->data:", dev->name, desc); print_hex_dump(KERN_DEBUG, hdr, DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb->len, true); } EXPORT_SYMBOL(arcnet_dump_skb); #endif /* * Dump the contents of an ARCnet buffer */ #if (ARCNET_DEBUG_MAX & (D_RX | D_TX)) static void arcnet_dump_packet(struct net_device *dev, int bufnum, char *desc, int take_arcnet_lock) { struct arcnet_local *lp = netdev_priv(dev); int i, length; unsigned long flags = 0; static uint8_t buf[512]; char hdr[32]; /* hw.copy_from_card expects IRQ context so take the IRQ lock to keep it single threaded */ if(take_arcnet_lock) spin_lock_irqsave(&lp->lock, flags); lp->hw.copy_from_card(dev, bufnum, 0, buf, 512); if(take_arcnet_lock) spin_unlock_irqrestore(&lp->lock, flags); /* if the offset[0] byte is nonzero, this is a 256-byte packet */ length = (buf[2] ? 256 : 512); /* dump the packet */ snprintf(hdr, sizeof(hdr), "%6s:%s packet dump:", dev->name, desc); print_hex_dump(KERN_DEBUG, hdr, DUMP_PREFIX_OFFSET, 16, 1, buf, length, true); } #else #define arcnet_dump_packet(dev, bufnum, desc,take_arcnet_lock) do { } while (0) #endif /* * Unregister a protocol driver from the arc_proto_map. Protocol drivers * are responsible for registering themselves, but the unregister routine * is pretty generic so we'll do it here. */ void arcnet_unregister_proto(struct ArcProto *proto) { int count; if (arc_proto_default == proto) arc_proto_default = &arc_proto_null; if (arc_bcast_proto == proto) arc_bcast_proto = arc_proto_default; if (arc_raw_proto == proto) arc_raw_proto = arc_proto_default; for (count = 0; count < 256; count++) { if (arc_proto_map[count] == proto) arc_proto_map[count] = arc_proto_default; } } /* * Add a buffer to the queue. Only the interrupt handler is allowed to do * this, unless interrupts are disabled. * * Note: we don't check for a full queue, since there aren't enough buffers * to more than fill it. */ static void release_arcbuf(struct net_device *dev, int bufnum) { struct arcnet_local *lp = netdev_priv(dev); int i; lp->buf_queue[lp->first_free_buf++] = bufnum; lp->first_free_buf %= 5; BUGLVL(D_DURING) { BUGMSG(D_DURING, "release_arcbuf: freed #%d; buffer queue is now: ", bufnum); for (i = lp->next_buf; i != lp->first_free_buf; i = (i+1) % 5) BUGMSG2(D_DURING, "#%d ", lp->buf_queue[i]); BUGMSG2(D_DURING, "\n"); } } /* * Get a buffer from the queue. If this returns -1, there are no buffers * available. */ static int get_arcbuf(struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); int buf = -1, i; if (!atomic_dec_and_test(&lp->buf_lock)) { /* already in this function */ BUGMSG(D_NORMAL, "get_arcbuf: overlap (%d)!\n", lp->buf_lock.counter); } else { /* we can continue */ if (lp->next_buf >= 5) lp->next_buf -= 5; if (lp->next_buf == lp->first_free_buf) BUGMSG(D_NORMAL, "get_arcbuf: BUG: no buffers are available??\n"); else { buf = lp->buf_queue[lp->next_buf++]; lp->next_buf %= 5; } } BUGLVL(D_DURING) { BUGMSG(D_DURING, "get_arcbuf: got #%d; buffer queue is now: ", buf); for (i = lp->next_buf; i != lp->first_free_buf; i = (i+1) % 5) BUGMSG2(D_DURING, "#%d ", lp->buf_queue[i]); BUGMSG2(D_DURING, "\n"); } atomic_inc(&lp->buf_lock); return buf; } static int choose_mtu(void) { int count, mtu = 65535; /* choose the smallest MTU of all available encaps */ for (count = 0; count < 256; count++) { if (arc_proto_map[count] != &arc_proto_null && arc_proto_map[count]->mtu < mtu) { mtu = arc_proto_map[count]->mtu; } } return mtu == 65535 ? XMTU : mtu; } static const struct header_ops arcnet_header_ops = { .create = arcnet_header, .rebuild = arcnet_rebuild_header, }; static const struct net_device_ops arcnet_netdev_ops = { .ndo_open = arcnet_open, .ndo_stop = arcnet_close, .ndo_start_xmit = arcnet_send_packet, .ndo_tx_timeout = arcnet_timeout, }; /* Setup a struct device for ARCnet. */ static void arcdev_setup(struct net_device *dev) { dev->type = ARPHRD_ARCNET; dev->netdev_ops = &arcnet_netdev_ops; dev->header_ops = &arcnet_header_ops; dev->hard_header_len = sizeof(struct archdr); dev->mtu = choose_mtu(); dev->addr_len = ARCNET_ALEN; dev->tx_queue_len = 100; dev->broadcast[0] = 0x00; /* for us, broadcasts are address 0 */ dev->watchdog_timeo = TX_TIMEOUT; /* New-style flags. */ dev->flags = IFF_BROADCAST; } struct net_device *alloc_arcdev(const char *name) { struct net_device *dev; dev = alloc_netdev(sizeof(struct arcnet_local), name && *name ? name : "arc%d", arcdev_setup); if(dev) { struct arcnet_local *lp = netdev_priv(dev); spin_lock_init(&lp->lock); } return dev; } /* * Open/initialize the board. This is called sometime after booting when * the 'ifconfig' program is run. * * This routine should set everything up anew at each open, even registers * that "should" only need to be set once at boot, so that there is * non-reboot way to recover if something goes wrong. */ int arcnet_open(struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); int count, newmtu, error; BUGMSG(D_INIT,"opened."); if (!try_module_get(lp->hw.owner)) return -ENODEV; BUGLVL(D_PROTO) { BUGMSG(D_PROTO, "protocol map (default is '%c'): ", arc_proto_default->suffix); for (count = 0; count < 256; count++) BUGMSG2(D_PROTO, "%c", arc_proto_map[count]->suffix); BUGMSG2(D_PROTO, "\n"); } BUGMSG(D_INIT, "arcnet_open: resetting card.\n"); /* try to put the card in a defined state - if it fails the first * time, actually reset it. */ error = -ENODEV; if (ARCRESET(0) && ARCRESET(1)) goto out_module_put; newmtu = choose_mtu(); if (newmtu < dev->mtu) dev->mtu = newmtu; BUGMSG(D_INIT, "arcnet_open: mtu: %d.\n", dev->mtu); /* autodetect the encapsulation for each host. */ memset(lp->default_proto, 0, sizeof(lp->default_proto)); /* the broadcast address is special - use the 'bcast' protocol */ for (count = 0; count < 256; count++) { if (arc_proto_map[count] == arc_bcast_proto) { lp->default_proto[0] = count; break; } } /* initialize buffers */ atomic_set(&lp->buf_lock, 1); lp->next_buf = lp->first_free_buf = 0; release_arcbuf(dev, 0); release_arcbuf(dev, 1); release_arcbuf(dev, 2); release_arcbuf(dev, 3); lp->cur_tx = lp->next_tx = -1; lp->cur_rx = -1; lp->rfc1201.sequence = 1; /* bring up the hardware driver */ if (lp->hw.open) lp->hw.open(dev); if (dev->dev_addr[0] == 0) BUGMSG(D_NORMAL, "WARNING! Station address 00 is reserved " "for broadcasts!\n"); else if (dev->dev_addr[0] == 255) BUGMSG(D_NORMAL, "WARNING! Station address FF may confuse " "DOS networking programs!\n"); BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); if (ASTATUS() & RESETflag) { BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); ACOMMAND(CFLAGScmd | RESETclear); } BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); /* make sure we're ready to receive IRQ's. */ AINTMASK(0); udelay(1); /* give it time to set the mask before * we reset it again. (may not even be * necessary) */ BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); lp->intmask = NORXflag | RECONflag; AINTMASK(lp->intmask); BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); netif_start_queue(dev); return 0; out_module_put: module_put(lp->hw.owner); return error; } /* The inverse routine to arcnet_open - shuts down the card. */ int arcnet_close(struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); netif_stop_queue(dev); /* flush TX and disable RX */ AINTMASK(0); ACOMMAND(NOTXcmd); /* stop transmit */ ACOMMAND(NORXcmd); /* disable receive */ mdelay(1); /* shut down the card */ lp->hw.close(dev); module_put(lp->hw.owner); return 0; } static int arcnet_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { const struct arcnet_local *lp = netdev_priv(dev); uint8_t _daddr, proto_num; struct ArcProto *proto; BUGMSG(D_DURING, "create header from %d to %d; protocol %d (%Xh); size %u.\n", saddr ? *(uint8_t *) saddr : -1, daddr ? *(uint8_t *) daddr : -1, type, type, len); if (skb->len!=0 && len != skb->len) BUGMSG(D_NORMAL, "arcnet_header: Yikes! skb->len(%d) != len(%d)!\n", skb->len, len); /* Type is host order - ? */ if(type == ETH_P_ARCNET) { proto = arc_raw_proto; BUGMSG(D_DEBUG, "arc_raw_proto used. proto='%c'\n",proto->suffix); _daddr = daddr ? *(uint8_t *) daddr : 0; } else if (!daddr) { /* * if the dest addr isn't provided, we can't choose an encapsulation! * Store the packet type (eg. ETH_P_IP) for now, and we'll push on a * real header when we do rebuild_header. */ *(uint16_t *) skb_push(skb, 2) = type; /* * XXX: Why not use skb->mac_len? */ if (skb->network_header - skb->mac_header != 2) BUGMSG(D_NORMAL, "arcnet_header: Yikes! diff (%d) is not 2!\n", (int)(skb->network_header - skb->mac_header)); return -2; /* return error -- can't transmit yet! */ } else { /* otherwise, we can just add the header as usual. */ _daddr = *(uint8_t *) daddr; proto_num = lp->default_proto[_daddr]; proto = arc_proto_map[proto_num]; BUGMSG(D_DURING, "building header for %02Xh using protocol '%c'\n", proto_num, proto->suffix); if (proto == &arc_proto_null && arc_bcast_proto != proto) { BUGMSG(D_DURING, "actually, let's use '%c' instead.\n", arc_bcast_proto->suffix); proto = arc_bcast_proto; } } return proto->build_header(skb, dev, type, _daddr); } /* * Rebuild the ARCnet hard header. This is called after an ARP (or in the * future other address resolution) has completed on this sk_buff. We now * let ARP fill in the destination field. */ static int arcnet_rebuild_header(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct arcnet_local *lp = netdev_priv(dev); int status = 0; /* default is failure */ unsigned short type; uint8_t daddr=0; struct ArcProto *proto; /* * XXX: Why not use skb->mac_len? */ if (skb->network_header - skb->mac_header != 2) { BUGMSG(D_NORMAL, "rebuild_header: shouldn't be here! (hdrsize=%d)\n", (int)(skb->network_header - skb->mac_header)); return 0; } type = *(uint16_t *) skb_pull(skb, 2); BUGMSG(D_DURING, "rebuild header for protocol %Xh\n", type); if (type == ETH_P_IP) { #ifdef CONFIG_INET BUGMSG(D_DURING, "rebuild header for ethernet protocol %Xh\n", type); status = arp_find(&daddr, skb) ? 1 : 0; BUGMSG(D_DURING, " rebuilt: dest is %d; protocol %Xh\n", daddr, type); #endif } else { BUGMSG(D_NORMAL, "I don't understand ethernet protocol %Xh addresses!\n", type); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; } /* if we couldn't resolve the address... give up. */ if (!status) return 0; /* add the _real_ header this time! */ proto = arc_proto_map[lp->default_proto[daddr]]; proto->build_header(skb, dev, type, daddr); return 1; /* success */ } /* Called by the kernel in order to transmit a packet. */ netdev_tx_t arcnet_send_packet(struct sk_buff *skb, struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); struct archdr *pkt; struct arc_rfc1201 *soft; struct ArcProto *proto; int txbuf; unsigned long flags; int freeskb, retval; BUGMSG(D_DURING, "transmit requested (status=%Xh, txbufs=%d/%d, len=%d, protocol %x)\n", ASTATUS(), lp->cur_tx, lp->next_tx, skb->len,skb->protocol); pkt = (struct archdr *) skb->data; soft = &pkt->soft.rfc1201; proto = arc_proto_map[soft->proto]; BUGMSG(D_SKB_SIZE, "skb: transmitting %d bytes to %02X\n", skb->len, pkt->hard.dest); BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "tx"); /* fits in one packet? */ if (skb->len - ARC_HDR_SIZE > XMTU && !proto->continue_tx) { BUGMSG(D_NORMAL, "fixme: packet too large: compensating badly!\n"); dev_kfree_skb(skb); return NETDEV_TX_OK; /* don't try again */ } /* We're busy transmitting a packet... */ netif_stop_queue(dev); spin_lock_irqsave(&lp->lock, flags); AINTMASK(0); if(lp->next_tx == -1) txbuf = get_arcbuf(dev); else { txbuf = -1; } if (txbuf != -1) { if (proto->prepare_tx(dev, pkt, skb->len, txbuf) && !proto->ack_tx) { /* done right away and we don't want to acknowledge the package later - forget about it now */ dev->stats.tx_bytes += skb->len; freeskb = 1; } else { /* do it the 'split' way */ lp->outgoing.proto = proto; lp->outgoing.skb = skb; lp->outgoing.pkt = pkt; freeskb = 0; if (proto->continue_tx && proto->continue_tx(dev, txbuf)) { BUGMSG(D_NORMAL, "bug! continue_tx finished the first time! " "(proto='%c')\n", proto->suffix); } } retval = NETDEV_TX_OK; lp->next_tx = txbuf; } else { retval = NETDEV_TX_BUSY; freeskb = 0; } BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS()); /* make sure we didn't ignore a TX IRQ while we were in here */ AINTMASK(0); BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); lp->intmask |= TXFREEflag|EXCNAKflag; AINTMASK(lp->intmask); BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS()); spin_unlock_irqrestore(&lp->lock, flags); if (freeskb) { dev_kfree_skb(skb); } return retval; /* no need to try again */ } /* * Actually start transmitting a packet that was loaded into a buffer * by prepare_tx. This should _only_ be called by the interrupt handler. */ static int go_tx(struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); BUGMSG(D_DURING, "go_tx: status=%Xh, intmask=%Xh, next_tx=%d, cur_tx=%d\n", ASTATUS(), lp->intmask, lp->next_tx, lp->cur_tx); if (lp->cur_tx != -1 || lp->next_tx == -1) return 0; BUGLVL(D_TX) arcnet_dump_packet(dev, lp->next_tx, "go_tx", 0); lp->cur_tx = lp->next_tx; lp->next_tx = -1; /* start sending */ ACOMMAND(TXcmd | (lp->cur_tx << 3)); dev->stats.tx_packets++; lp->lasttrans_dest = lp->lastload_dest; lp->lastload_dest = 0; lp->excnak_pending = 0; lp->intmask |= TXFREEflag|EXCNAKflag; return 1; } /* Called by the kernel when transmit times out */ void arcnet_timeout(struct net_device *dev) { unsigned long flags; struct arcnet_local *lp = netdev_priv(dev); int status = ASTATUS(); char *msg; spin_lock_irqsave(&lp->lock, flags); if (status & TXFREEflag) { /* transmit _DID_ finish */ msg = " - missed IRQ?"; } else { msg = ""; dev->stats.tx_aborted_errors++; lp->timed_out = 1; ACOMMAND(NOTXcmd | (lp->cur_tx << 3)); } dev->stats.tx_errors++; /* make sure we didn't miss a TX or a EXC NAK IRQ */ AINTMASK(0); lp->intmask |= TXFREEflag|EXCNAKflag; AINTMASK(lp->intmask); spin_unlock_irqrestore(&lp->lock, flags); if (time_after(jiffies, lp->last_timeout + 10*HZ)) { BUGMSG(D_EXTRA, "tx timed out%s (status=%Xh, intmask=%Xh, dest=%02Xh)\n", msg, status, lp->intmask, lp->lasttrans_dest); lp->last_timeout = jiffies; } if (lp->cur_tx == -1) netif_wake_queue(dev); } /* * The typical workload of the driver: Handle the network interface * interrupts. Establish which device needs attention, and call the correct * chipset interrupt handler. */ irqreturn_t arcnet_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct arcnet_local *lp; int recbuf, status, diagstatus, didsomething, boguscount; int retval = IRQ_NONE; BUGMSG(D_DURING, "\n"); BUGMSG(D_DURING, "in arcnet_interrupt\n"); lp = netdev_priv(dev); BUG_ON(!lp); spin_lock(&lp->lock); /* * RESET flag was enabled - if device is not running, we must clear it right * away (but nothing else). */ if (!netif_running(dev)) { if (ASTATUS() & RESETflag) ACOMMAND(CFLAGScmd | RESETclear); AINTMASK(0); spin_unlock(&lp->lock); return IRQ_HANDLED; } BUGMSG(D_DURING, "in arcnet_inthandler (status=%Xh, intmask=%Xh)\n", ASTATUS(), lp->intmask); boguscount = 5; do { status = ASTATUS(); diagstatus = (status >> 8) & 0xFF; BUGMSG(D_DEBUG, "%s: %d: %s: status=%x\n", __FILE__,__LINE__,__func__,status); didsomething = 0; /* * RESET flag was enabled - card is resetting and if RX is * disabled, it's NOT because we just got a packet. * * The card is in an undefined state. Clear it out and start over. */ if (status & RESETflag) { BUGMSG(D_NORMAL, "spurious reset (status=%Xh)\n", status); arcnet_close(dev); arcnet_open(dev); /* get out of the interrupt handler! */ break; } /* * RX is inhibited - we must have received something. Prepare to * receive into the next buffer. * * We don't actually copy the received packet from the card until * after the transmit handler runs (and possibly launches the next * tx); this should improve latency slightly if we get both types * of interrupts at once. */ recbuf = -1; if (status & lp->intmask & NORXflag) { recbuf = lp->cur_rx; BUGMSG(D_DURING, "Buffer #%d: receive irq (status=%Xh)\n", recbuf, status); lp->cur_rx = get_arcbuf(dev); if (lp->cur_rx != -1) { BUGMSG(D_DURING, "enabling receive to buffer #%d\n", lp->cur_rx); ACOMMAND(RXcmd | (lp->cur_rx << 3) | RXbcasts); } didsomething++; } if((diagstatus & EXCNAKflag)) { BUGMSG(D_DURING, "EXCNAK IRQ (diagstat=%Xh)\n", diagstatus); ACOMMAND(NOTXcmd); /* disable transmit */ lp->excnak_pending = 1; ACOMMAND(EXCNAKclear); lp->intmask &= ~(EXCNAKflag); didsomething++; } /* a transmit finished, and we're interested in it. */ if ((status & lp->intmask & TXFREEflag) || lp->timed_out) { lp->intmask &= ~(TXFREEflag|EXCNAKflag); BUGMSG(D_DURING, "TX IRQ (stat=%Xh)\n", status); if (lp->cur_tx != -1 && !lp->timed_out) { if(!(status & TXACKflag)) { if (lp->lasttrans_dest != 0) { BUGMSG(D_EXTRA, "transmit was not acknowledged! " "(status=%Xh, dest=%02Xh)\n", status, lp->lasttrans_dest); dev->stats.tx_errors++; dev->stats.tx_carrier_errors++; } else { BUGMSG(D_DURING, "broadcast was not acknowledged; that's normal " "(status=%Xh, dest=%02Xh)\n", status, lp->lasttrans_dest); } } if (lp->outgoing.proto && lp->outgoing.proto->ack_tx) { int ackstatus; if(status & TXACKflag) ackstatus=2; else if(lp->excnak_pending) ackstatus=1; else ackstatus=0; lp->outgoing.proto ->ack_tx(dev, ackstatus); } } if (lp->cur_tx != -1) release_arcbuf(dev, lp->cur_tx); lp->cur_tx = -1; lp->timed_out = 0; didsomething++; /* send another packet if there is one */ go_tx(dev); /* continue a split packet, if any */ if (lp->outgoing.proto && lp->outgoing.proto->continue_tx) { int txbuf = get_arcbuf(dev); if (txbuf != -1) { if (lp->outgoing.proto->continue_tx(dev, txbuf)) { /* that was the last segment */ dev->stats.tx_bytes += lp->outgoing.skb->len; if(!lp->outgoing.proto->ack_tx) { dev_kfree_skb_irq(lp->outgoing.skb); lp->outgoing.proto = NULL; } } lp->next_tx = txbuf; } } /* inform upper layers of idleness, if necessary */ if (lp->cur_tx == -1) netif_wake_queue(dev); } /* now process the received packet, if any */ if (recbuf != -1) { BUGLVL(D_RX) arcnet_dump_packet(dev, recbuf, "rx irq", 0); arcnet_rx(dev, recbuf); release_arcbuf(dev, recbuf); didsomething++; } if (status & lp->intmask & RECONflag) { ACOMMAND(CFLAGScmd | CONFIGclear); dev->stats.tx_carrier_errors++; BUGMSG(D_RECON, "Network reconfiguration detected (status=%Xh)\n", status); /* MYRECON bit is at bit 7 of diagstatus */ if(diagstatus & 0x80) BUGMSG(D_RECON,"Put out that recon myself\n"); /* is the RECON info empty or old? */ if (!lp->first_recon || !lp->last_recon || time_after(jiffies, lp->last_recon + HZ * 10)) { if (lp->network_down) BUGMSG(D_NORMAL, "reconfiguration detected: cabling restored?\n"); lp->first_recon = lp->last_recon = jiffies; lp->num_recons = lp->network_down = 0; BUGMSG(D_DURING, "recon: clearing counters.\n"); } else { /* add to current RECON counter */ lp->last_recon = jiffies; lp->num_recons++; BUGMSG(D_DURING, "recon: counter=%d, time=%lds, net=%d\n", lp->num_recons, (lp->last_recon - lp->first_recon) / HZ, lp->network_down); /* if network is marked up; * and first_recon and last_recon are 60+ apart; * and the average no. of recons counted is * > RECON_THRESHOLD/min; * then print a warning message. */ if (!lp->network_down && (lp->last_recon - lp->first_recon) <= HZ * 60 && lp->num_recons >= RECON_THRESHOLD) { lp->network_down = 1; BUGMSG(D_NORMAL, "many reconfigurations detected: cabling problem?\n"); } else if (!lp->network_down && lp->last_recon - lp->first_recon > HZ * 60) { /* reset counters if we've gone for over a minute. */ lp->first_recon = lp->last_recon; lp->num_recons = 1; } } } else if (lp->network_down && time_after(jiffies, lp->last_recon + HZ * 10)) { if (lp->network_down) BUGMSG(D_NORMAL, "cabling restored?\n"); lp->first_recon = lp->last_recon = 0; lp->num_recons = lp->network_down = 0; BUGMSG(D_DURING, "not recon: clearing counters anyway.\n"); } if(didsomething) { retval |= IRQ_HANDLED; } } while (--boguscount && didsomething); BUGMSG(D_DURING, "arcnet_interrupt complete (status=%Xh, count=%d)\n", ASTATUS(), boguscount); BUGMSG(D_DURING, "\n"); AINTMASK(0); udelay(1); AINTMASK(lp->intmask); spin_unlock(&lp->lock); return retval; } /* * This is a generic packet receiver that calls arcnet??_rx depending on the * protocol ID found. */ static void arcnet_rx(struct net_device *dev, int bufnum) { struct arcnet_local *lp = netdev_priv(dev); struct archdr pkt; struct arc_rfc1201 *soft; int length, ofs; soft = &pkt.soft.rfc1201; lp->hw.copy_from_card(dev, bufnum, 0, &pkt, sizeof(ARC_HDR_SIZE)); if (pkt.hard.offset[0]) { ofs = pkt.hard.offset[0]; length = 256 - ofs; } else { ofs = pkt.hard.offset[1]; length = 512 - ofs; } /* get the full header, if possible */ if (sizeof(pkt.soft) <= length) lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft)); else { memset(&pkt.soft, 0, sizeof(pkt.soft)); lp->hw.copy_from_card(dev, bufnum, ofs, soft, length); } BUGMSG(D_DURING, "Buffer #%d: received packet from %02Xh to %02Xh " "(%d+4 bytes)\n", bufnum, pkt.hard.source, pkt.hard.dest, length); dev->stats.rx_packets++; dev->stats.rx_bytes += length + ARC_HDR_SIZE; /* call the right receiver for the protocol */ if (arc_proto_map[soft->proto]->is_ip) { BUGLVL(D_PROTO) { struct ArcProto *oldp = arc_proto_map[lp->default_proto[pkt.hard.source]], *newp = arc_proto_map[soft->proto]; if (oldp != newp) { BUGMSG(D_PROTO, "got protocol %02Xh; encap for host %02Xh is now '%c'" " (was '%c')\n", soft->proto, pkt.hard.source, newp->suffix, oldp->suffix); } } /* broadcasts will always be done with the last-used encap. */ lp->default_proto[0] = soft->proto; /* in striking contrast, the following isn't a hack. */ lp->default_proto[pkt.hard.source] = soft->proto; } /* call the protocol-specific receiver. */ arc_proto_map[soft->proto]->rx(dev, bufnum, &pkt, length); } static void null_rx(struct net_device *dev, int bufnum, struct archdr *pkthdr, int length) { BUGMSG(D_PROTO, "rx: don't know how to deal with proto %02Xh from host %02Xh.\n", pkthdr->soft.rfc1201.proto, pkthdr->hard.source); } static int null_build_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, uint8_t daddr) { struct arcnet_local *lp = netdev_priv(dev); BUGMSG(D_PROTO, "tx: can't build header for encap %02Xh; load a protocol driver.\n", lp->default_proto[daddr]); /* always fails */ return 0; } /* the "do nothing" prepare_tx function warns that there's nothing to do. */ static int null_prepare_tx(struct net_device *dev, struct archdr *pkt, int length, int bufnum) { struct arcnet_local *lp = netdev_priv(dev); struct arc_hardware newpkt; BUGMSG(D_PROTO, "tx: no encap for this host; load a protocol driver.\n"); /* send a packet to myself -- will never get received, of course */ newpkt.source = newpkt.dest = dev->dev_addr[0]; /* only one byte of actual data (and it's random) */ newpkt.offset[0] = 0xFF; lp->hw.copy_to_card(dev, bufnum, 0, &newpkt, ARC_HDR_SIZE); return 1; /* done */ }
gpl-2.0
ibanezchen/linux-8173
arch/powerpc/platforms/cell/spider-pci.c
11867
4985
/* * IO workarounds for PCI on Celleb/Cell platform * * (C) Copyright 2006-2007 TOSHIBA CORPORATION * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #undef DEBUG #include <linux/kernel.h> #include <linux/of_platform.h> #include <linux/slab.h> #include <linux/io.h> #include <asm/ppc-pci.h> #include <asm/pci-bridge.h> #include <asm/io-workarounds.h> #define SPIDER_PCI_DISABLE_PREFETCH struct spiderpci_iowa_private { void __iomem *regs; }; static void spiderpci_io_flush(struct iowa_bus *bus) { struct spiderpci_iowa_private *priv; u32 val; priv = bus->private; val = in_be32(priv->regs + SPIDER_PCI_DUMMY_READ); iosync(); } #define SPIDER_PCI_MMIO_READ(name, ret) \ static ret spiderpci_##name(const PCI_IO_ADDR addr) \ { \ ret val = __do_##name(addr); \ spiderpci_io_flush(iowa_mem_find_bus(addr)); \ return val; \ } #define SPIDER_PCI_MMIO_READ_STR(name) \ static void spiderpci_##name(const PCI_IO_ADDR addr, void *buf, \ unsigned long count) \ { \ __do_##name(addr, buf, count); \ spiderpci_io_flush(iowa_mem_find_bus(addr)); \ } SPIDER_PCI_MMIO_READ(readb, u8) SPIDER_PCI_MMIO_READ(readw, u16) SPIDER_PCI_MMIO_READ(readl, u32) SPIDER_PCI_MMIO_READ(readq, u64) SPIDER_PCI_MMIO_READ(readw_be, u16) SPIDER_PCI_MMIO_READ(readl_be, u32) SPIDER_PCI_MMIO_READ(readq_be, u64) SPIDER_PCI_MMIO_READ_STR(readsb) SPIDER_PCI_MMIO_READ_STR(readsw) SPIDER_PCI_MMIO_READ_STR(readsl) static void spiderpci_memcpy_fromio(void *dest, const PCI_IO_ADDR src, unsigned long n) { __do_memcpy_fromio(dest, src, n); spiderpci_io_flush(iowa_mem_find_bus(src)); } static int __init spiderpci_pci_setup_chip(struct pci_controller *phb, void __iomem *regs) { void *dummy_page_va; dma_addr_t dummy_page_da; #ifdef SPIDER_PCI_DISABLE_PREFETCH u32 val = in_be32(regs + SPIDER_PCI_VCI_CNTL_STAT); pr_debug("SPIDER_IOWA:PVCI_Control_Status was 0x%08x\n", val); out_be32(regs + SPIDER_PCI_VCI_CNTL_STAT, val | 0x8); #endif /* SPIDER_PCI_DISABLE_PREFETCH */ /* setup dummy read */ /* * On CellBlade, we can't know that which XDR memory is used by * kmalloc() to allocate dummy_page_va. * In order to imporve the performance, the XDR which is used to * allocate dummy_page_va is the nearest the spider-pci. * We have to select the CBE which is the nearest the spider-pci * to allocate memory from the best XDR, but I don't know that * how to do. * * Celleb does not have this problem, because it has only one XDR. */ dummy_page_va = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!dummy_page_va) { pr_err("SPIDERPCI-IOWA:Alloc dummy_page_va failed.\n"); return -1; } dummy_page_da = dma_map_single(phb->parent, dummy_page_va, PAGE_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(phb->parent, dummy_page_da)) { pr_err("SPIDER-IOWA:Map dummy page filed.\n"); kfree(dummy_page_va); return -1; } out_be32(regs + SPIDER_PCI_DUMMY_READ_BASE, dummy_page_da); return 0; } int __init spiderpci_iowa_init(struct iowa_bus *bus, void *data) { void __iomem *regs = NULL; struct spiderpci_iowa_private *priv; struct device_node *np = bus->phb->dn; struct resource r; unsigned long offset = (unsigned long)data; pr_debug("SPIDERPCI-IOWA:Bus initialize for spider(%s)\n", np->full_name); priv = kzalloc(sizeof(struct spiderpci_iowa_private), GFP_KERNEL); if (!priv) { pr_err("SPIDERPCI-IOWA:" "Can't allocate struct spiderpci_iowa_private"); return -1; } if (of_address_to_resource(np, 0, &r)) { pr_err("SPIDERPCI-IOWA:Can't get resource.\n"); goto error; } regs = ioremap(r.start + offset, SPIDER_PCI_REG_SIZE); if (!regs) { pr_err("SPIDERPCI-IOWA:ioremap failed.\n"); goto error; } priv->regs = regs; bus->private = priv; if (spiderpci_pci_setup_chip(bus->phb, regs)) goto error; return 0; error: kfree(priv); bus->private = NULL; if (regs) iounmap(regs); return -1; } struct ppc_pci_io spiderpci_ops = { .readb = spiderpci_readb, .readw = spiderpci_readw, .readl = spiderpci_readl, .readq = spiderpci_readq, .readw_be = spiderpci_readw_be, .readl_be = spiderpci_readl_be, .readq_be = spiderpci_readq_be, .readsb = spiderpci_readsb, .readsw = spiderpci_readsw, .readsl = spiderpci_readsl, .memcpy_fromio = spiderpci_memcpy_fromio, };
gpl-2.0
antmicro/linux-tk1
kernel/backtracetest.c
12891
2135
/* * Simple stack backtrace regression test module * * (C) Copyright 2008 Intel Corporation * Author: Arjan van de Ven <arjan@linux.intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ #include <linux/completion.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/stacktrace.h> static void backtrace_test_normal(void) { printk("Testing a backtrace from process context.\n"); printk("The following trace is a kernel self test and not a bug!\n"); dump_stack(); } static DECLARE_COMPLETION(backtrace_work); static void backtrace_test_irq_callback(unsigned long data) { dump_stack(); complete(&backtrace_work); } static DECLARE_TASKLET(backtrace_tasklet, &backtrace_test_irq_callback, 0); static void backtrace_test_irq(void) { printk("Testing a backtrace from irq context.\n"); printk("The following trace is a kernel self test and not a bug!\n"); init_completion(&backtrace_work); tasklet_schedule(&backtrace_tasklet); wait_for_completion(&backtrace_work); } #ifdef CONFIG_STACKTRACE static void backtrace_test_saved(void) { struct stack_trace trace; unsigned long entries[8]; printk("Testing a saved backtrace.\n"); printk("The following trace is a kernel self test and not a bug!\n"); trace.nr_entries = 0; trace.max_entries = ARRAY_SIZE(entries); trace.entries = entries; trace.skip = 0; save_stack_trace(&trace); print_stack_trace(&trace, 0); } #else static void backtrace_test_saved(void) { printk("Saved backtrace test skipped.\n"); } #endif static int backtrace_regression_test(void) { printk("====[ backtrace testing ]===========\n"); backtrace_test_normal(); backtrace_test_irq(); backtrace_test_saved(); printk("====[ end of backtrace testing ]====\n"); return 0; } static void exitf(void) { } module_init(backtrace_regression_test); module_exit(exitf); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
gpl-2.0
jrior001/android_kernel_samsung_d2
drivers/ide/ide-iops.c
13659
13870
/* * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2003 Red Hat * */ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/genhd.h> #include <linux/blkpg.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/ide.h> #include <linux/bitops.h> #include <linux/nmi.h> #include <asm/byteorder.h> #include <asm/irq.h> #include <asm/uaccess.h> #include <asm/io.h> void SELECT_MASK(ide_drive_t *drive, int mask) { const struct ide_port_ops *port_ops = drive->hwif->port_ops; if (port_ops && port_ops->maskproc) port_ops->maskproc(drive, mask); } u8 ide_read_error(ide_drive_t *drive) { struct ide_taskfile tf; drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_ERROR); return tf.error; } EXPORT_SYMBOL_GPL(ide_read_error); void ide_fix_driveid(u16 *id) { #ifndef __LITTLE_ENDIAN # ifdef __BIG_ENDIAN int i; for (i = 0; i < 256; i++) id[i] = __le16_to_cpu(id[i]); # else # error "Please fix <asm/byteorder.h>" # endif #endif } /* * ide_fixstring() cleans up and (optionally) byte-swaps a text string, * removing leading/trailing blanks and compressing internal blanks. * It is primarily used to tidy up the model name/number fields as * returned by the ATA_CMD_ID_ATA[PI] commands. */ void ide_fixstring(u8 *s, const int bytecount, const int byteswap) { u8 *p, *end = &s[bytecount & ~1]; /* bytecount must be even */ if (byteswap) { /* convert from big-endian to host byte order */ for (p = s ; p != end ; p += 2) be16_to_cpus((u16 *) p); } /* strip leading blanks */ p = s; while (s != end && *s == ' ') ++s; /* compress internal blanks and strip trailing blanks */ while (s != end && *s) { if (*s++ != ' ' || (s != end && *s && *s != ' ')) *p++ = *(s-1); } /* wipe out trailing garbage */ while (p != end) *p++ = '\0'; } EXPORT_SYMBOL(ide_fixstring); /* * This routine busy-waits for the drive status to be not "busy". * It then checks the status for all of the "good" bits and none * of the "bad" bits, and if all is okay it returns 0. All other * cases return error -- caller may then invoke ide_error(). * * This routine should get fixed to not hog the cpu during extra long waits.. * That could be done by busy-waiting for the first jiffy or two, and then * setting a timer to wake up at half second intervals thereafter, * until timeout is achieved, before timing out. */ int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat) { ide_hwif_t *hwif = drive->hwif; const struct ide_tp_ops *tp_ops = hwif->tp_ops; unsigned long flags; int i; u8 stat; udelay(1); /* spec allows drive 400ns to assert "BUSY" */ stat = tp_ops->read_status(hwif); if (stat & ATA_BUSY) { local_save_flags(flags); local_irq_enable_in_hardirq(); timeout += jiffies; while ((stat = tp_ops->read_status(hwif)) & ATA_BUSY) { if (time_after(jiffies, timeout)) { /* * One last read after the timeout in case * heavy interrupt load made us not make any * progress during the timeout.. */ stat = tp_ops->read_status(hwif); if ((stat & ATA_BUSY) == 0) break; local_irq_restore(flags); *rstat = stat; return -EBUSY; } } local_irq_restore(flags); } /* * Allow status to settle, then read it again. * A few rare drives vastly violate the 400ns spec here, * so we'll wait up to 10usec for a "good" status * rather than expensively fail things immediately. * This fix courtesy of Matthew Faupel & Niccolo Rigacci. */ for (i = 0; i < 10; i++) { udelay(1); stat = tp_ops->read_status(hwif); if (OK_STAT(stat, good, bad)) { *rstat = stat; return 0; } } *rstat = stat; return -EFAULT; } /* * In case of error returns error value after doing "*startstop = ide_error()". * The caller should return the updated value of "startstop" in this case, * "startstop" is unchanged when the function returns 0. */ int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout) { int err; u8 stat; /* bail early if we've exceeded max_failures */ if (drive->max_failures && (drive->failures > drive->max_failures)) { *startstop = ide_stopped; return 1; } err = __ide_wait_stat(drive, good, bad, timeout, &stat); if (err) { char *s = (err == -EBUSY) ? "status timeout" : "status error"; *startstop = ide_error(drive, s, stat); } return err; } EXPORT_SYMBOL(ide_wait_stat); /** * ide_in_drive_list - look for drive in black/white list * @id: drive identifier * @table: list to inspect * * Look for a drive in the blacklist and the whitelist tables * Returns 1 if the drive is found in the table. */ int ide_in_drive_list(u16 *id, const struct drive_list_entry *table) { for ( ; table->id_model; table++) if ((!strcmp(table->id_model, (char *)&id[ATA_ID_PROD])) && (!table->id_firmware || strstr((char *)&id[ATA_ID_FW_REV], table->id_firmware))) return 1; return 0; } EXPORT_SYMBOL_GPL(ide_in_drive_list); /* * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid. * Some optical devices with the buggy firmwares have the same problem. */ static const struct drive_list_entry ivb_list[] = { { "QUANTUM FIREBALLlct10 05" , "A03.0900" }, { "QUANTUM FIREBALLlct20 30" , "APL.0900" }, { "TSSTcorp CDDVDW SH-S202J" , "SB00" }, { "TSSTcorp CDDVDW SH-S202J" , "SB01" }, { "TSSTcorp CDDVDW SH-S202N" , "SB00" }, { "TSSTcorp CDDVDW SH-S202N" , "SB01" }, { "TSSTcorp CDDVDW SH-S202H" , "SB00" }, { "TSSTcorp CDDVDW SH-S202H" , "SB01" }, { "SAMSUNG SP0822N" , "WA100-10" }, { NULL , NULL } }; /* * All hosts that use the 80c ribbon must use! * The name is derived from upper byte of word 93 and the 80c ribbon. */ u8 eighty_ninty_three(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u16 *id = drive->id; int ivb = ide_in_drive_list(id, ivb_list); if (hwif->cbl == ATA_CBL_SATA || hwif->cbl == ATA_CBL_PATA40_SHORT) return 1; if (ivb) printk(KERN_DEBUG "%s: skipping word 93 validity check\n", drive->name); if (ata_id_is_sata(id) && !ivb) return 1; if (hwif->cbl != ATA_CBL_PATA80 && !ivb) goto no_80w; /* * FIXME: * - change master/slave IDENTIFY order * - force bit13 (80c cable present) check also for !ivb devices * (unless the slave device is pre-ATA3) */ if (id[ATA_ID_HW_CONFIG] & 0x4000) return 1; if (ivb) { const char *model = (char *)&id[ATA_ID_PROD]; if (strstr(model, "TSSTcorp CDDVDW SH-S202")) { /* * These ATAPI devices always report 80c cable * so we have to depend on the host in this case. */ if (hwif->cbl == ATA_CBL_PATA80) return 1; } else { /* Depend on the device side cable detection. */ if (id[ATA_ID_HW_CONFIG] & 0x2000) return 1; } } no_80w: if (drive->dev_flags & IDE_DFLAG_UDMA33_WARNED) return 0; printk(KERN_WARNING "%s: %s side 80-wire cable detection failed, " "limiting max speed to UDMA33\n", drive->name, hwif->cbl == ATA_CBL_PATA80 ? "drive" : "host"); drive->dev_flags |= IDE_DFLAG_UDMA33_WARNED; return 0; } static const char *nien_quirk_list[] = { "QUANTUM FIREBALLlct08 08", "QUANTUM FIREBALLP KA6.4", "QUANTUM FIREBALLP KA9.1", "QUANTUM FIREBALLP KX13.6", "QUANTUM FIREBALLP KX20.5", "QUANTUM FIREBALLP KX27.3", "QUANTUM FIREBALLP LM20.4", "QUANTUM FIREBALLP LM20.5", "FUJITSU MHZ2160BH G2", NULL }; void ide_check_nien_quirk_list(ide_drive_t *drive) { const char **list, *m = (char *)&drive->id[ATA_ID_PROD]; for (list = nien_quirk_list; *list != NULL; list++) if (strstr(m, *list) != NULL) { drive->dev_flags |= IDE_DFLAG_NIEN_QUIRK; return; } } int ide_driveid_update(ide_drive_t *drive) { u16 *id; int rc; id = kmalloc(SECTOR_SIZE, GFP_ATOMIC); if (id == NULL) return 0; SELECT_MASK(drive, 1); rc = ide_dev_read_id(drive, ATA_CMD_ID_ATA, id, 1); SELECT_MASK(drive, 0); if (rc) goto out_err; drive->id[ATA_ID_UDMA_MODES] = id[ATA_ID_UDMA_MODES]; drive->id[ATA_ID_MWDMA_MODES] = id[ATA_ID_MWDMA_MODES]; drive->id[ATA_ID_SWDMA_MODES] = id[ATA_ID_SWDMA_MODES]; drive->id[ATA_ID_CFA_MODES] = id[ATA_ID_CFA_MODES]; /* anything more ? */ kfree(id); return 1; out_err: if (rc == 2) printk(KERN_ERR "%s: %s: bad status\n", drive->name, __func__); kfree(id); return 0; } int ide_config_drive_speed(ide_drive_t *drive, u8 speed) { ide_hwif_t *hwif = drive->hwif; const struct ide_tp_ops *tp_ops = hwif->tp_ops; struct ide_taskfile tf; u16 *id = drive->id, i; int error = 0; u8 stat; #ifdef CONFIG_BLK_DEV_IDEDMA if (hwif->dma_ops) /* check if host supports DMA */ hwif->dma_ops->dma_host_set(drive, 0); #endif /* Skip setting PIO flow-control modes on pre-EIDE drives */ if ((speed & 0xf8) == XFER_PIO_0 && ata_id_has_iordy(drive->id) == 0) goto skip; /* * Don't use ide_wait_cmd here - it will * attempt to set_geometry and recalibrate, * but for some reason these don't work at * this point (lost interrupt). */ udelay(1); tp_ops->dev_select(drive); SELECT_MASK(drive, 1); udelay(1); tp_ops->write_devctl(hwif, ATA_NIEN | ATA_DEVCTL_OBS); memset(&tf, 0, sizeof(tf)); tf.feature = SETFEATURES_XFER; tf.nsect = speed; tp_ops->tf_load(drive, &tf, IDE_VALID_FEATURE | IDE_VALID_NSECT); tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES); if (drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); error = __ide_wait_stat(drive, drive->ready_stat, ATA_BUSY | ATA_DRQ | ATA_ERR, WAIT_CMD, &stat); SELECT_MASK(drive, 0); if (error) { (void) ide_dump_status(drive, "set_drive_speed_status", stat); return error; } if (speed >= XFER_SW_DMA_0) { id[ATA_ID_UDMA_MODES] &= ~0xFF00; id[ATA_ID_MWDMA_MODES] &= ~0x0700; id[ATA_ID_SWDMA_MODES] &= ~0x0700; if (ata_id_is_cfa(id)) id[ATA_ID_CFA_MODES] &= ~0x0E00; } else if (ata_id_is_cfa(id)) id[ATA_ID_CFA_MODES] &= ~0x01C0; skip: #ifdef CONFIG_BLK_DEV_IDEDMA if (speed >= XFER_SW_DMA_0 && (drive->dev_flags & IDE_DFLAG_USING_DMA)) hwif->dma_ops->dma_host_set(drive, 1); else if (hwif->dma_ops) /* check if host supports DMA */ ide_dma_off_quietly(drive); #endif if (speed >= XFER_UDMA_0) { i = 1 << (speed - XFER_UDMA_0); id[ATA_ID_UDMA_MODES] |= (i << 8 | i); } else if (ata_id_is_cfa(id) && speed >= XFER_MW_DMA_3) { i = speed - XFER_MW_DMA_2; id[ATA_ID_CFA_MODES] |= i << 9; } else if (speed >= XFER_MW_DMA_0) { i = 1 << (speed - XFER_MW_DMA_0); id[ATA_ID_MWDMA_MODES] |= (i << 8 | i); } else if (speed >= XFER_SW_DMA_0) { i = 1 << (speed - XFER_SW_DMA_0); id[ATA_ID_SWDMA_MODES] |= (i << 8 | i); } else if (ata_id_is_cfa(id) && speed >= XFER_PIO_5) { i = speed - XFER_PIO_4; id[ATA_ID_CFA_MODES] |= i << 6; } if (!drive->init_speed) drive->init_speed = speed; drive->current_speed = speed; return error; } /* * This should get invoked any time we exit the driver to * wait for an interrupt response from a drive. handler() points * at the appropriate code to handle the next interrupt, and a * timer is started to prevent us from waiting forever in case * something goes wrong (see the ide_timer_expiry() handler later on). * * See also ide_execute_command */ void __ide_set_handler(ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout) { ide_hwif_t *hwif = drive->hwif; BUG_ON(hwif->handler); hwif->handler = handler; hwif->timer.expires = jiffies + timeout; hwif->req_gen_timer = hwif->req_gen; add_timer(&hwif->timer); } void ide_set_handler(ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout) { ide_hwif_t *hwif = drive->hwif; unsigned long flags; spin_lock_irqsave(&hwif->lock, flags); __ide_set_handler(drive, handler, timeout); spin_unlock_irqrestore(&hwif->lock, flags); } EXPORT_SYMBOL(ide_set_handler); /** * ide_execute_command - execute an IDE command * @drive: IDE drive to issue the command against * @cmd: command * @handler: handler for next phase * @timeout: timeout for command * * Helper function to issue an IDE command. This handles the * atomicity requirements, command timing and ensures that the * handler and IRQ setup do not race. All IDE command kick off * should go via this function or do equivalent locking. */ void ide_execute_command(ide_drive_t *drive, struct ide_cmd *cmd, ide_handler_t *handler, unsigned timeout) { ide_hwif_t *hwif = drive->hwif; unsigned long flags; spin_lock_irqsave(&hwif->lock, flags); if ((cmd->protocol != ATAPI_PROT_DMA && cmd->protocol != ATAPI_PROT_PIO) || (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT)) __ide_set_handler(drive, handler, timeout); hwif->tp_ops->exec_command(hwif, cmd->tf.command); /* * Drive takes 400nS to respond, we must avoid the IRQ being * serviced before that. * * FIXME: we could skip this delay with care on non shared devices */ ndelay(400); spin_unlock_irqrestore(&hwif->lock, flags); } /* * ide_wait_not_busy() waits for the currently selected device on the hwif * to report a non-busy status, see comments in ide_probe_port(). */ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout) { u8 stat = 0; while (timeout--) { /* * Turn this into a schedule() sleep once I'm sure * about locking issues (2.5 work ?). */ mdelay(1); stat = hwif->tp_ops->read_status(hwif); if ((stat & ATA_BUSY) == 0) return 0; /* * Assume a value of 0xff means nothing is connected to * the interface and it doesn't implement the pull-down * resistor on D7. */ if (stat == 0xff) return -ENODEV; touch_softlockup_watchdog(); touch_nmi_watchdog(); } return -EBUSY; }
gpl-2.0
embecosm/epiphany-gcc
gcc/testsuite/gcc.dg/vect/costmodel/ppc/costmodel-vect-76b.c
92
1388
/* { dg-require-effective-target vect_int } */ #include <stdarg.h> #include "../../tree-vect.h" #define N 16 #define OFF 4 /* Check handling of accesses for which the "initial condition" - the expression that represents the first location accessed - is more involved than just an ssa_name. */ int ib[N+OFF] __attribute__ ((__aligned__(16))) = {0, 1, 3, 5, 7, 11, 13, 17, 0, 2, 6, 10}; int main1 (int *pib) { int i; int ia[N+OFF]; int ic[N+OFF] = {0, 1, 3, 5, 7, 11, 13, 17, 0, 2, 6, 10}; for (i = OFF; i < N; i++) { pib[i - OFF] = ic[i]; } /* check results: */ for (i = OFF; i < N; i++) { if (pib[i - OFF] != ic[i]) abort (); } return 0; } int main (void) { check_vect (); main1 (&ib[OFF]); return 0; } /* Peeling to align the store is used. Overhead of peeling is too high. */ /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 0 "vect" { target vector_alignment_reachable } } } */ /* { dg-final { scan-tree-dump-times "vectorization not profitable" 1 "vect" { target { vector_alignment_reachable && {! vect_no_align} } } } } */ /* Versioning to align the store is used. Overhead of versioning is not too high. */ /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target { vect_no_align || {! vector_alignment_reachable} } } } } */ /* { dg-final { cleanup-tree-dump "vect" } } */
gpl-2.0
mcmenaminadrian/vmufat
drivers/gpu/drm/exynos/exynos_drm_encoder.c
92
11649
/* exynos_drm_encoder.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * Authors: * Inki Dae <inki.dae@samsung.com> * Joonyoung Shim <jy0922.shim@samsung.com> * Seung-Woo Kim <sw0312.kim@samsung.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "drmP.h" #include "drm_crtc_helper.h" #include "exynos_drm_drv.h" #include "exynos_drm_crtc.h" #include "exynos_drm_encoder.h" #define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\ drm_encoder) /* * exynos specific encoder structure. * * @drm_encoder: encoder object. * @manager: specific encoder has its own manager to control a hardware * appropriately and we can access a hardware drawing on this manager. * @dpms: store the encoder dpms value. */ struct exynos_drm_encoder { struct drm_encoder drm_encoder; struct exynos_drm_manager *manager; int dpms; }; static void exynos_drm_display_power(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct drm_connector *connector; struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder == encoder) { struct exynos_drm_display_ops *display_ops = manager->display_ops; DRM_DEBUG_KMS("connector[%d] dpms[%d]\n", connector->base.id, mode); if (display_ops && display_ops->power_on) display_ops->power_on(manager->dev, mode); } } } static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); struct exynos_drm_manager_ops *manager_ops = manager->ops; struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode); if (exynos_encoder->dpms == mode) { DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n"); return; } mutex_lock(&dev->struct_mutex); switch (mode) { case DRM_MODE_DPMS_ON: if (manager_ops && manager_ops->apply) manager_ops->apply(manager->dev); exynos_drm_display_power(encoder, mode); exynos_encoder->dpms = mode; break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: exynos_drm_display_power(encoder, mode); exynos_encoder->dpms = mode; break; default: DRM_ERROR("unspecified mode %d\n", mode); break; } mutex_unlock(&dev->struct_mutex); } static bool exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { DRM_DEBUG_KMS("%s\n", __FILE__); /* drm framework doesn't check NULL. */ return true; } static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_connector *connector; struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); struct exynos_drm_manager_ops *manager_ops = manager->ops; struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; struct exynos_drm_overlay *overlay = get_exynos_drm_overlay(dev, encoder->crtc); DRM_DEBUG_KMS("%s\n", __FILE__); mode = adjusted_mode; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { if (connector->encoder == encoder) { if (manager_ops && manager_ops->mode_set) manager_ops->mode_set(manager->dev, mode); if (overlay_ops && overlay_ops->mode_set) overlay_ops->mode_set(manager->dev, overlay); } } } static void exynos_drm_encoder_prepare(struct drm_encoder *encoder) { DRM_DEBUG_KMS("%s\n", __FILE__); /* drm framework doesn't check NULL. */ } static void exynos_drm_encoder_commit(struct drm_encoder *encoder) { struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); struct exynos_drm_manager_ops *manager_ops = manager->ops; DRM_DEBUG_KMS("%s\n", __FILE__); if (manager_ops && manager_ops->commit) manager_ops->commit(manager->dev); } static struct drm_crtc * exynos_drm_encoder_get_crtc(struct drm_encoder *encoder) { return encoder->crtc; } static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { .dpms = exynos_drm_encoder_dpms, .mode_fixup = exynos_drm_encoder_mode_fixup, .mode_set = exynos_drm_encoder_mode_set, .prepare = exynos_drm_encoder_prepare, .commit = exynos_drm_encoder_commit, .get_crtc = exynos_drm_encoder_get_crtc, }; static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) { struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); DRM_DEBUG_KMS("%s\n", __FILE__); exynos_encoder->manager->pipe = -1; drm_encoder_cleanup(encoder); kfree(exynos_encoder); } static struct drm_encoder_funcs exynos_encoder_funcs = { .destroy = exynos_drm_encoder_destroy, }; struct drm_encoder * exynos_drm_encoder_create(struct drm_device *dev, struct exynos_drm_manager *manager, unsigned int possible_crtcs) { struct drm_encoder *encoder; struct exynos_drm_encoder *exynos_encoder; DRM_DEBUG_KMS("%s\n", __FILE__); if (!manager || !possible_crtcs) return NULL; if (!manager->dev) return NULL; exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL); if (!exynos_encoder) { DRM_ERROR("failed to allocate encoder\n"); return NULL; } exynos_encoder->dpms = DRM_MODE_DPMS_OFF; exynos_encoder->manager = manager; encoder = &exynos_encoder->drm_encoder; encoder->possible_crtcs = possible_crtcs; DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); drm_encoder_init(dev, encoder, &exynos_encoder_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &exynos_encoder_helper_funcs); DRM_DEBUG_KMS("encoder has been created\n"); return encoder; } struct exynos_drm_manager *exynos_drm_get_manager(struct drm_encoder *encoder) { return to_exynos_encoder(encoder)->manager; } void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data, void (*fn)(struct drm_encoder *, void *)) { struct drm_device *dev = crtc->dev; struct drm_encoder *encoder; struct exynos_drm_private *private = dev->dev_private; struct exynos_drm_manager *manager; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { /* * if crtc is detached from encoder, check pipe, * otherwise check crtc attached to encoder */ if (!encoder->crtc) { manager = to_exynos_encoder(encoder)->manager; if (manager->pipe < 0 || private->crtc[manager->pipe] != crtc) continue; } else { if (encoder->crtc != crtc) continue; } fn(encoder, data); } } void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_manager_ops *manager_ops = manager->ops; int crtc = *(int *)data; if (manager->pipe == -1) manager->pipe = crtc; if (manager_ops->enable_vblank) manager_ops->enable_vblank(manager->dev); } void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_manager_ops *manager_ops = manager->ops; int crtc = *(int *)data; if (manager->pipe == -1) manager->pipe = crtc; if (manager_ops->disable_vblank) manager_ops->disable_vblank(manager->dev); } void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; int zpos = DEFAULT_ZPOS; if (data) zpos = *(int *)data; if (overlay_ops && overlay_ops->commit) overlay_ops->commit(manager->dev, zpos); } void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; int crtc = *(int *)data; int zpos = DEFAULT_ZPOS; DRM_DEBUG_KMS("%s\n", __FILE__); /* * when crtc is detached from encoder, this pipe is used * to select manager operation */ manager->pipe = crtc; exynos_drm_encoder_crtc_plane_commit(encoder, &zpos); } void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder, void *data) { struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); int mode = *(int *)data; DRM_DEBUG_KMS("%s\n", __FILE__); exynos_drm_encoder_dpms(encoder, mode); exynos_encoder->dpms = mode; } void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data) { struct drm_device *dev = encoder->dev; struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); struct exynos_drm_manager *manager = exynos_encoder->manager; struct exynos_drm_manager_ops *manager_ops = manager->ops; struct drm_connector *connector; int mode = *(int *)data; DRM_DEBUG_KMS("%s\n", __FILE__); if (manager_ops && manager_ops->dpms) manager_ops->dpms(manager->dev, mode); /* * set current dpms mode to the connector connected to * current encoder. connector->dpms would be checked * at drm_helper_connector_dpms() */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->encoder == encoder) connector->dpms = mode; /* * if this condition is ok then it means that the crtc is already * detached from encoder and last function for detaching is properly * done, so clear pipe from manager to prevent repeated call. */ if (mode > DRM_MODE_DPMS_ON) { if (!encoder->crtc) manager->pipe = -1; } } void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; struct exynos_drm_overlay *overlay = data; if (overlay_ops && overlay_ops->mode_set) overlay_ops->mode_set(manager->dev, overlay); } void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; int zpos = DEFAULT_ZPOS; DRM_DEBUG_KMS("\n"); if (data) zpos = *(int *)data; if (overlay_ops && overlay_ops->disable) overlay_ops->disable(manager->dev, zpos); } MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>"); MODULE_DESCRIPTION("Samsung SoC DRM Encoder Driver"); MODULE_LICENSE("GPL");
gpl-2.0
LG-V10/LGV10__pplus_msm8992_kernel
fs/ext4/mballoc.c
92
146431
/* * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com * Written by Alex Tomas <alex@clusterfs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public Licens * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- */ /* * mballoc.c contains the multiblocks allocation routines */ #include "ext4_jbd2.h" #include "mballoc.h" #include <linux/log2.h> #include <linux/module.h> #include <linux/slab.h> #include <trace/events/ext4.h> #ifdef CONFIG_EXT4_DEBUG ushort ext4_mballoc_debug __read_mostly; module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644); MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc"); #endif /* * MUSTDO: * - test ext4_ext_search_left() and ext4_ext_search_right() * - search for metadata in few groups * * TODO v4: * - normalization should take into account whether file is still open * - discard preallocations if no free space left (policy?) * - don't normalize tails * - quota * - reservation for superuser * * TODO v3: * - bitmap read-ahead (proposed by Oleg Drokin aka green) * - track min/max extents in each group for better group selection * - mb_mark_used() may allocate chunk right after splitting buddy * - tree of groups sorted by number of free blocks * - error handling */ /* * The allocation request involve request for multiple number of blocks * near to the goal(block) value specified. * * During initialization phase of the allocator we decide to use the * group preallocation or inode preallocation depending on the size of * the file. The size of the file could be the resulting file size we * would have after allocation, or the current file size, which ever * is larger. If the size is less than sbi->s_mb_stream_request we * select to use the group preallocation. The default value of * s_mb_stream_request is 16 blocks. This can also be tuned via * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in * terms of number of blocks. * * The main motivation for having small file use group preallocation is to * ensure that we have small files closer together on the disk. * * First stage the allocator looks at the inode prealloc list, * ext4_inode_info->i_prealloc_list, which contains list of prealloc * spaces for this particular inode. The inode prealloc space is * represented as: * * pa_lstart -> the logical start block for this prealloc space * pa_pstart -> the physical start block for this prealloc space * pa_len -> length for this prealloc space (in clusters) * pa_free -> free space available in this prealloc space (in clusters) * * The inode preallocation space is used looking at the _logical_ start * block. If only the logical file block falls within the range of prealloc * space we will consume the particular prealloc space. This makes sure that * we have contiguous physical blocks representing the file blocks * * The important thing to be noted in case of inode prealloc space is that * we don't modify the values associated to inode prealloc space except * pa_free. * * If we are not able to find blocks in the inode prealloc space and if we * have the group allocation flag set then we look at the locality group * prealloc space. These are per CPU prealloc list represented as * * ext4_sb_info.s_locality_groups[smp_processor_id()] * * The reason for having a per cpu locality group is to reduce the contention * between CPUs. It is possible to get scheduled at this point. * * The locality group prealloc space is used looking at whether we have * enough free space (pa_free) within the prealloc space. * * If we can't allocate blocks via inode prealloc or/and locality group * prealloc then we look at the buddy cache. The buddy cache is represented * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets * mapped to the buddy and bitmap information regarding different * groups. The buddy information is attached to buddy cache inode so that * we can access them through the page cache. The information regarding * each group is loaded via ext4_mb_load_buddy. The information involve * block bitmap and buddy information. The information are stored in the * inode as: * * { page } * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... * * * one block each for bitmap and buddy information. So for each group we * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / * blocksize) blocks. So it can have information regarding groups_per_page * which is blocks_per_page/2 * * The buddy cache inode is not stored on disk. The inode is thrown * away when the filesystem is unmounted. * * We look for count number of blocks in the buddy cache. If we were able * to locate that many free blocks we return with additional information * regarding rest of the contiguous physical block available * * Before allocating blocks via buddy cache we normalize the request * blocks. This ensure we ask for more blocks that we needed. The extra * blocks that we get after allocation is added to the respective prealloc * list. In case of inode preallocation we follow a list of heuristics * based on file size. This can be found in ext4_mb_normalize_request. If * we are doing a group prealloc we try to normalize the request to * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is * dependent on the cluster size; for non-bigalloc file systems, it is * 512 blocks. This can be tuned via * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in * terms of number of blocks. If we have mounted the file system with -O * stripe=<value> option the group prealloc request is normalized to the * the smallest multiple of the stripe value (sbi->s_stripe) which is * greater than the default mb_group_prealloc. * * The regular allocator (using the buddy cache) supports a few tunables. * * /sys/fs/ext4/<partition>/mb_min_to_scan * /sys/fs/ext4/<partition>/mb_max_to_scan * /sys/fs/ext4/<partition>/mb_order2_req * * The regular allocator uses buddy scan only if the request len is power of * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The * value of s_mb_order2_reqs can be tuned via * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to * stripe size (sbi->s_stripe), we try to search for contiguous block in * stripe size. This should result in better allocation on RAID setups. If * not, we search in the specific group using bitmap for best extents. The * tunable min_to_scan and max_to_scan control the behaviour here. * min_to_scan indicate how long the mballoc __must__ look for a best * extent and max_to_scan indicates how long the mballoc __can__ look for a * best extent in the found extents. Searching for the blocks starts with * the group specified as the goal value in allocation context via * ac_g_ex. Each group is first checked based on the criteria whether it * can be used for allocation. ext4_mb_good_group explains how the groups are * checked. * * Both the prealloc space are getting populated as above. So for the first * request we will hit the buddy cache which will result in this prealloc * space getting filled. The prealloc space is then later used for the * subsequent request. */ /* * mballoc operates on the following data: * - on-disk bitmap * - in-core buddy (actually includes buddy and bitmap) * - preallocation descriptors (PAs) * * there are two types of preallocations: * - inode * assiged to specific inode and can be used for this inode only. * it describes part of inode's space preallocated to specific * physical blocks. any block from that preallocated can be used * independent. the descriptor just tracks number of blocks left * unused. so, before taking some block from descriptor, one must * make sure corresponded logical block isn't allocated yet. this * also means that freeing any block within descriptor's range * must discard all preallocated blocks. * - locality group * assigned to specific locality group which does not translate to * permanent set of inodes: inode can join and leave group. space * from this type of preallocation can be used for any inode. thus * it's consumed from the beginning to the end. * * relation between them can be expressed as: * in-core buddy = on-disk bitmap + preallocation descriptors * * this mean blocks mballoc considers used are: * - allocated blocks (persistent) * - preallocated blocks (non-persistent) * * consistency in mballoc world means that at any time a block is either * free or used in ALL structures. notice: "any time" should not be read * literally -- time is discrete and delimited by locks. * * to keep it simple, we don't use block numbers, instead we count number of * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. * * all operations can be expressed as: * - init buddy: buddy = on-disk + PAs * - new PA: buddy += N; PA = N * - use inode PA: on-disk += N; PA -= N * - discard inode PA buddy -= on-disk - PA; PA = 0 * - use locality group PA on-disk += N; PA -= N * - discard locality group PA buddy -= PA; PA = 0 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap * is used in real operation because we can't know actual used * bits from PA, only from on-disk bitmap * * if we follow this strict logic, then all operations above should be atomic. * given some of them can block, we'd have to use something like semaphores * killing performance on high-end SMP hardware. let's try to relax it using * the following knowledge: * 1) if buddy is referenced, it's already initialized * 2) while block is used in buddy and the buddy is referenced, * nobody can re-allocate that block * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has * bit set and PA claims same block, it's OK. IOW, one can set bit in * on-disk bitmap if buddy has same bit set or/and PA covers corresponded * block * * so, now we're building a concurrency table: * - init buddy vs. * - new PA * blocks for PA are allocated in the buddy, buddy must be referenced * until PA is linked to allocation group to avoid concurrent buddy init * - use inode PA * we need to make sure that either on-disk bitmap or PA has uptodate data * given (3) we care that PA-=N operation doesn't interfere with init * - discard inode PA * the simplest way would be to have buddy initialized by the discard * - use locality group PA * again PA-=N must be serialized with init * - discard locality group PA * the simplest way would be to have buddy initialized by the discard * - new PA vs. * - use inode PA * i_data_sem serializes them * - discard inode PA * discard process must wait until PA isn't used by another process * - use locality group PA * some mutex should serialize them * - discard locality group PA * discard process must wait until PA isn't used by another process * - use inode PA * - use inode PA * i_data_sem or another mutex should serializes them * - discard inode PA * discard process must wait until PA isn't used by another process * - use locality group PA * nothing wrong here -- they're different PAs covering different blocks * - discard locality group PA * discard process must wait until PA isn't used by another process * * now we're ready to make few consequences: * - PA is referenced and while it is no discard is possible * - PA is referenced until block isn't marked in on-disk bitmap * - PA changes only after on-disk bitmap * - discard must not compete with init. either init is done before * any discard or they're serialized somehow * - buddy init as sum of on-disk bitmap and PAs is done atomically * * a special case when we've used PA to emptiness. no need to modify buddy * in this case, but we should care about concurrent init * */ /* * Logic in few words: * * - allocation: * load group * find blocks * mark bits in on-disk bitmap * release group * * - use preallocation: * find proper PA (per-inode or group) * load group * mark bits in on-disk bitmap * release group * release PA * * - free: * load group * mark bits in on-disk bitmap * release group * * - discard preallocations in group: * mark PAs deleted * move them onto local list * load on-disk bitmap * load group * remove PA from object (inode or locality group) * mark free blocks in-core * * - discard inode's preallocations: */ /* * Locking rules * * Locks: * - bitlock on a group (group) * - object (inode/locality) (object) * - per-pa lock (pa) * * Paths: * - new pa * object * group * * - find and use pa: * pa * * - release consumed pa: * pa * group * object * * - generate in-core bitmap: * group * pa * * - discard all for given object (inode, locality group): * object * pa * group * * - discard all for given group: * group * pa * group * object * */ static struct kmem_cache *ext4_pspace_cachep; static struct kmem_cache *ext4_ac_cachep; static struct kmem_cache *ext4_free_data_cachep; /* We create slab caches for groupinfo data structures based on the * superblock block size. There will be one per mounted filesystem for * each unique s_blocksize_bits */ #define NR_GRPINFO_CACHES 8 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", "ext4_groupinfo_64k", "ext4_groupinfo_128k" }; static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, ext4_group_t group); static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, ext4_group_t group); static void ext4_free_data_callback(struct super_block *sb, struct ext4_journal_cb_entry *jce, int rc); static inline void *mb_correct_addr_and_bit(int *bit, void *addr) { #if BITS_PER_LONG == 64 *bit += ((unsigned long) addr & 7UL) << 3; addr = (void *) ((unsigned long) addr & ~7UL); #elif BITS_PER_LONG == 32 *bit += ((unsigned long) addr & 3UL) << 3; addr = (void *) ((unsigned long) addr & ~3UL); #else #error "how many bits you are?!" #endif return addr; } static inline int mb_test_bit(int bit, void *addr) { /* * ext4_test_bit on architecture like powerpc * needs unsigned long aligned address */ addr = mb_correct_addr_and_bit(&bit, addr); return ext4_test_bit(bit, addr); } static inline void mb_set_bit(int bit, void *addr) { addr = mb_correct_addr_and_bit(&bit, addr); ext4_set_bit(bit, addr); } static inline void mb_clear_bit(int bit, void *addr) { addr = mb_correct_addr_and_bit(&bit, addr); ext4_clear_bit(bit, addr); } static inline int mb_test_and_clear_bit(int bit, void *addr) { addr = mb_correct_addr_and_bit(&bit, addr); return ext4_test_and_clear_bit(bit, addr); } static inline int mb_find_next_zero_bit(void *addr, int max, int start) { int fix = 0, ret, tmpmax; addr = mb_correct_addr_and_bit(&fix, addr); tmpmax = max + fix; start += fix; ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; if (ret > max) return max; return ret; } static inline int mb_find_next_bit(void *addr, int max, int start) { int fix = 0, ret, tmpmax; addr = mb_correct_addr_and_bit(&fix, addr); tmpmax = max + fix; start += fix; ret = ext4_find_next_bit(addr, tmpmax, start) - fix; if (ret > max) return max; return ret; } static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) { char *bb; BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); BUG_ON(max == NULL); if (order > e4b->bd_blkbits + 1) { *max = 0; return NULL; } /* at order 0 we see each particular block */ if (order == 0) { *max = 1 << (e4b->bd_blkbits + 3); return e4b->bd_bitmap; } bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; return bb; } #ifdef DOUBLE_CHECK static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, int first, int count) { int i; struct super_block *sb = e4b->bd_sb; if (unlikely(e4b->bd_info->bb_bitmap == NULL)) return; assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); for (i = 0; i < count; i++) { if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { ext4_fsblk_t blocknr; blocknr = ext4_group_first_block_no(sb, e4b->bd_group); blocknr += EXT4_C2B(EXT4_SB(sb), first + i); ext4_grp_locked_error(sb, e4b->bd_group, inode ? inode->i_ino : 0, blocknr, "freeing block already freed " "(bit %u)", first + i); } mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); } } static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) { int i; if (unlikely(e4b->bd_info->bb_bitmap == NULL)) return; assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); for (i = 0; i < count; i++) { BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); mb_set_bit(first + i, e4b->bd_info->bb_bitmap); } } static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) { if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { unsigned char *b1, *b2; int i; b1 = (unsigned char *) e4b->bd_info->bb_bitmap; b2 = (unsigned char *) bitmap; for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { if (b1[i] != b2[i]) { ext4_msg(e4b->bd_sb, KERN_ERR, "corruption in group %u " "at byte %u(%u): %x in copy != %x " "on disk/prealloc", e4b->bd_group, i, i * 8, b1[i], b2[i]); BUG(); } } } } #else static inline void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, int first, int count) { return; } static inline void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) { return; } static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) { return; } #endif #ifdef AGGRESSIVE_CHECK #define MB_CHECK_ASSERT(assert) \ do { \ if (!(assert)) { \ printk(KERN_EMERG \ "Assertion failure in %s() at %s:%d: \"%s\"\n", \ function, file, line, # assert); \ BUG(); \ } \ } while (0) static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, const char *function, int line) { struct super_block *sb = e4b->bd_sb; int order = e4b->bd_blkbits + 1; int max; int max2; int i; int j; int k; int count; struct ext4_group_info *grp; int fragments = 0; int fstart; struct list_head *cur; void *buddy; void *buddy2; { static int mb_check_counter; if (mb_check_counter++ % 100 != 0) return 0; } while (order > 1) { buddy = mb_find_buddy(e4b, order, &max); MB_CHECK_ASSERT(buddy); buddy2 = mb_find_buddy(e4b, order - 1, &max2); MB_CHECK_ASSERT(buddy2); MB_CHECK_ASSERT(buddy != buddy2); MB_CHECK_ASSERT(max * 2 == max2); count = 0; for (i = 0; i < max; i++) { if (mb_test_bit(i, buddy)) { /* only single bit in buddy2 may be 1 */ if (!mb_test_bit(i << 1, buddy2)) { MB_CHECK_ASSERT( mb_test_bit((i<<1)+1, buddy2)); } else if (!mb_test_bit((i << 1) + 1, buddy2)) { MB_CHECK_ASSERT( mb_test_bit(i << 1, buddy2)); } continue; } /* both bits in buddy2 must be 1 */ MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); for (j = 0; j < (1 << order); j++) { k = (i * (1 << order)) + j; MB_CHECK_ASSERT( !mb_test_bit(k, e4b->bd_bitmap)); } count++; } MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); order--; } fstart = -1; buddy = mb_find_buddy(e4b, 0, &max); for (i = 0; i < max; i++) { if (!mb_test_bit(i, buddy)) { MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); if (fstart == -1) { fragments++; fstart = i; } continue; } fstart = -1; /* check used bits only */ for (j = 0; j < e4b->bd_blkbits + 1; j++) { buddy2 = mb_find_buddy(e4b, j, &max2); k = i >> j; MB_CHECK_ASSERT(k < max2); MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); } } MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); grp = ext4_get_group_info(sb, e4b->bd_group); list_for_each(cur, &grp->bb_prealloc_list) { ext4_group_t groupnr; struct ext4_prealloc_space *pa; pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); MB_CHECK_ASSERT(groupnr == e4b->bd_group); for (i = 0; i < pa->pa_len; i++) MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); } return 0; } #undef MB_CHECK_ASSERT #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ __FILE__, __func__, __LINE__) #else #define mb_check_buddy(e4b) #endif /* * Divide blocks started from @first with length @len into * smaller chunks with power of 2 blocks. * Clear the bits in bitmap which the blocks of the chunk(s) covered, * then increase bb_counters[] for corresponded chunk size. */ static void ext4_mb_mark_free_simple(struct super_block *sb, void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, struct ext4_group_info *grp) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_grpblk_t min; ext4_grpblk_t max; ext4_grpblk_t chunk; unsigned short border; BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); border = 2 << sb->s_blocksize_bits; while (len > 0) { /* find how many blocks can be covered since this position */ max = ffs(first | border) - 1; /* find how many blocks of power 2 we need to mark */ min = fls(len) - 1; if (max < min) min = max; chunk = 1 << min; /* mark multiblock chunks only */ grp->bb_counters[min]++; if (min > 0) mb_clear_bit(first >> min, buddy + sbi->s_mb_offsets[min]); len -= chunk; first += chunk; } } /* * Cache the order of the largest free extent we have available in this block * group. */ static void mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) { int i; int bits; grp->bb_largest_free_order = -1; /* uninit */ bits = sb->s_blocksize_bits + 1; for (i = bits; i >= 0; i--) { if (grp->bb_counters[i] > 0) { grp->bb_largest_free_order = i; break; } } } static noinline_for_stack void ext4_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap, ext4_group_t group) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); ext4_grpblk_t i = 0; ext4_grpblk_t first; ext4_grpblk_t len; unsigned free = 0; unsigned fragments = 0; unsigned long long period = get_cycles(); /* initialize buddy from bitmap which is aggregation * of on-disk bitmap and preallocations */ i = mb_find_next_zero_bit(bitmap, max, 0); grp->bb_first_free = i; while (i < max) { fragments++; first = i; i = mb_find_next_bit(bitmap, max, i); len = i - first; free += len; if (len > 1) ext4_mb_mark_free_simple(sb, buddy, first, len, grp); else grp->bb_counters[0]++; if (i < max) i = mb_find_next_zero_bit(bitmap, max, i); } grp->bb_fragments = fragments; if (free != grp->bb_free) { ext4_grp_locked_error(sb, group, 0, 0, "%u clusters in bitmap, %u in gd", free, grp->bb_free); /* * If we intent to continue, we consider group descritor * corrupt and update bb_free using bitmap value */ grp->bb_free = free; } mb_set_largest_free_order(sb, grp); clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); period = get_cycles() - period; spin_lock(&EXT4_SB(sb)->s_bal_lock); EXT4_SB(sb)->s_mb_buddies_generated++; EXT4_SB(sb)->s_mb_generation_time += period; spin_unlock(&EXT4_SB(sb)->s_bal_lock); } static void mb_regenerate_buddy(struct ext4_buddy *e4b) { int count; int order = 1; void *buddy; while ((buddy = mb_find_buddy(e4b, order++, &count))) { ext4_set_bits(buddy, 0, count); } e4b->bd_info->bb_fragments = 0; memset(e4b->bd_info->bb_counters, 0, sizeof(*e4b->bd_info->bb_counters) * (e4b->bd_sb->s_blocksize_bits + 2)); ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy, e4b->bd_bitmap, e4b->bd_group); } /* The buddy information is attached the buddy cache inode * for convenience. The information regarding each group * is loaded via ext4_mb_load_buddy. The information involve * block bitmap and buddy information. The information are * stored in the inode as * * { page } * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... * * * one block each for bitmap and buddy information. * So for each group we take up 2 blocks. A page can * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. * So it can have information regarding groups_per_page which * is blocks_per_page/2 * * Locking note: This routine takes the block group lock of all groups * for this page; do not hold this lock when calling this routine! */ static int ext4_mb_init_cache(struct page *page, char *incore) { ext4_group_t ngroups; int blocksize; int blocks_per_page; int groups_per_page; int err = 0; int i; ext4_group_t first_group, group; int first_block; struct super_block *sb; struct buffer_head *bhs; struct buffer_head **bh = NULL; struct inode *inode; char *data; char *bitmap; struct ext4_group_info *grinfo; mb_debug(1, "init page %lu\n", page->index); inode = page->mapping->host; sb = inode->i_sb; ngroups = ext4_get_groups_count(sb); blocksize = 1 << inode->i_blkbits; blocks_per_page = PAGE_CACHE_SIZE / blocksize; groups_per_page = blocks_per_page >> 1; if (groups_per_page == 0) groups_per_page = 1; /* allocate buffer_heads to read bitmaps */ if (groups_per_page > 1) { i = sizeof(struct buffer_head *) * groups_per_page; bh = kzalloc(i, GFP_NOFS); if (bh == NULL) { err = -ENOMEM; goto out; } } else bh = &bhs; first_group = page->index * blocks_per_page / 2; /* read all groups the page covers into the cache */ for (i = 0, group = first_group; i < groups_per_page; i++, group++) { if (group >= ngroups) break; grinfo = ext4_get_group_info(sb, group); /* * If page is uptodate then we came here after online resize * which added some new uninitialized group info structs, so * we must skip all initialized uptodate buddies on the page, * which may be currently in use by an allocating task. */ if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { bh[i] = NULL; continue; } if (!(bh[i] = ext4_read_block_bitmap_nowait(sb, group))) { err = -ENOMEM; goto out; } mb_debug(1, "read bitmap for group %u\n", group); } /* wait for I/O completion */ for (i = 0, group = first_group; i < groups_per_page; i++, group++) { if (bh[i] && ext4_wait_block_bitmap(sb, group, bh[i])) { err = -EIO; goto out; } } first_block = page->index * blocks_per_page; for (i = 0; i < blocks_per_page; i++) { group = (first_block + i) >> 1; if (group >= ngroups) break; if (!bh[group - first_group]) /* skip initialized uptodate buddy */ continue; /* * data carry information regarding this * particular group in the format specified * above * */ data = page_address(page) + (i * blocksize); bitmap = bh[group - first_group]->b_data; /* * We place the buddy block and bitmap block * close together */ if ((first_block + i) & 1) { /* this is block of buddy */ BUG_ON(incore == NULL); mb_debug(1, "put buddy for group %u in page %lu/%x\n", group, page->index, i * blocksize); trace_ext4_mb_buddy_bitmap_load(sb, group); grinfo = ext4_get_group_info(sb, group); grinfo->bb_fragments = 0; memset(grinfo->bb_counters, 0, sizeof(*grinfo->bb_counters) * (sb->s_blocksize_bits+2)); /* * incore got set to the group block bitmap below */ ext4_lock_group(sb, group); /* init the buddy */ memset(data, 0xff, blocksize); ext4_mb_generate_buddy(sb, data, incore, group); ext4_unlock_group(sb, group); incore = NULL; } else { /* this is block of bitmap */ BUG_ON(incore != NULL); mb_debug(1, "put bitmap for group %u in page %lu/%x\n", group, page->index, i * blocksize); trace_ext4_mb_bitmap_load(sb, group); /* see comments in ext4_mb_put_pa() */ ext4_lock_group(sb, group); memcpy(data, bitmap, blocksize); /* mark all preallocated blks used in in-core bitmap */ ext4_mb_generate_from_pa(sb, data, group); ext4_mb_generate_from_freelist(sb, data, group); ext4_unlock_group(sb, group); /* set incore so that the buddy information can be * generated using this */ incore = data; } } SetPageUptodate(page); out: if (bh) { for (i = 0; i < groups_per_page; i++) brelse(bh[i]); if (bh != &bhs) kfree(bh); } return err; } /* * Lock the buddy and bitmap pages. This make sure other parallel init_group * on the same buddy page doesn't happen whild holding the buddy page lock. * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap * are on the same page e4b->bd_buddy_page is NULL and return value is 0. */ static int ext4_mb_get_buddy_page_lock(struct super_block *sb, ext4_group_t group, struct ext4_buddy *e4b) { struct inode *inode = EXT4_SB(sb)->s_buddy_cache; int block, pnum, poff; int blocks_per_page; struct page *page; e4b->bd_buddy_page = NULL; e4b->bd_bitmap_page = NULL; blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; /* * the buddy cache inode stores the block bitmap * and buddy information in consecutive blocks. * So for each group we need two blocks. */ block = group * 2; pnum = block / blocks_per_page; poff = block % blocks_per_page; page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); if (!page) return -EIO; BUG_ON(page->mapping != inode->i_mapping); e4b->bd_bitmap_page = page; e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); if (blocks_per_page >= 2) { /* buddy and bitmap are on the same page */ return 0; } block++; pnum = block / blocks_per_page; page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); if (!page) return -EIO; BUG_ON(page->mapping != inode->i_mapping); e4b->bd_buddy_page = page; return 0; } static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) { if (e4b->bd_bitmap_page) { unlock_page(e4b->bd_bitmap_page); page_cache_release(e4b->bd_bitmap_page); } if (e4b->bd_buddy_page) { unlock_page(e4b->bd_buddy_page); page_cache_release(e4b->bd_buddy_page); } } /* * Locking note: This routine calls ext4_mb_init_cache(), which takes the * block group lock of all groups for this page; do not hold the BG lock when * calling this routine! */ static noinline_for_stack int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) { struct ext4_group_info *this_grp; struct ext4_buddy e4b; struct page *page; int ret = 0; might_sleep(); mb_debug(1, "init group %u\n", group); this_grp = ext4_get_group_info(sb, group); /* * This ensures that we don't reinit the buddy cache * page which map to the group from which we are already * allocating. If we are looking at the buddy cache we would * have taken a reference using ext4_mb_load_buddy and that * would have pinned buddy page to page cache. */ ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b); if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { /* * somebody initialized the group * return without doing anything */ goto err; } page = e4b.bd_bitmap_page; ret = ext4_mb_init_cache(page, NULL); if (ret) goto err; if (!PageUptodate(page)) { ret = -EIO; goto err; } mark_page_accessed(page); if (e4b.bd_buddy_page == NULL) { /* * If both the bitmap and buddy are in * the same page we don't need to force * init the buddy */ ret = 0; goto err; } /* init buddy cache */ page = e4b.bd_buddy_page; ret = ext4_mb_init_cache(page, e4b.bd_bitmap); if (ret) goto err; if (!PageUptodate(page)) { ret = -EIO; goto err; } mark_page_accessed(page); err: ext4_mb_put_buddy_page_lock(&e4b); return ret; } /* * Locking note: This routine calls ext4_mb_init_cache(), which takes the * block group lock of all groups for this page; do not hold the BG lock when * calling this routine! */ static noinline_for_stack int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, struct ext4_buddy *e4b) { int blocks_per_page; int block; int pnum; int poff; struct page *page; int ret; struct ext4_group_info *grp; struct ext4_sb_info *sbi = EXT4_SB(sb); struct inode *inode = sbi->s_buddy_cache; might_sleep(); mb_debug(1, "load group %u\n", group); blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; grp = ext4_get_group_info(sb, group); e4b->bd_blkbits = sb->s_blocksize_bits; e4b->bd_info = grp; e4b->bd_sb = sb; e4b->bd_group = group; e4b->bd_buddy_page = NULL; e4b->bd_bitmap_page = NULL; if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { /* * we need full data about the group * to make a good selection */ ret = ext4_mb_init_group(sb, group); if (ret) return ret; } /* * the buddy cache inode stores the block bitmap * and buddy information in consecutive blocks. * So for each group we need two blocks. */ block = group * 2; pnum = block / blocks_per_page; poff = block % blocks_per_page; /* we could use find_or_create_page(), but it locks page * what we'd like to avoid in fast path ... */ page = find_get_page(inode->i_mapping, pnum); if (page == NULL || !PageUptodate(page)) { if (page) /* * drop the page reference and try * to get the page with lock. If we * are not uptodate that implies * somebody just created the page but * is yet to initialize the same. So * wait for it to initialize. */ page_cache_release(page); page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); if (page) { BUG_ON(page->mapping != inode->i_mapping); if (!PageUptodate(page)) { ret = ext4_mb_init_cache(page, NULL); if (ret) { unlock_page(page); goto err; } mb_cmp_bitmaps(e4b, page_address(page) + (poff * sb->s_blocksize)); } unlock_page(page); } } if (page == NULL || !PageUptodate(page)) { ret = -EIO; goto err; } e4b->bd_bitmap_page = page; e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); mark_page_accessed(page); block++; pnum = block / blocks_per_page; poff = block % blocks_per_page; page = find_get_page(inode->i_mapping, pnum); if (page == NULL || !PageUptodate(page)) { if (page) page_cache_release(page); page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); if (page) { BUG_ON(page->mapping != inode->i_mapping); if (!PageUptodate(page)) { ret = ext4_mb_init_cache(page, e4b->bd_bitmap); if (ret) { unlock_page(page); goto err; } } unlock_page(page); } } if (page == NULL || !PageUptodate(page)) { ret = -EIO; goto err; } e4b->bd_buddy_page = page; e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); mark_page_accessed(page); BUG_ON(e4b->bd_bitmap_page == NULL); BUG_ON(e4b->bd_buddy_page == NULL); return 0; err: if (page) page_cache_release(page); if (e4b->bd_bitmap_page) page_cache_release(e4b->bd_bitmap_page); if (e4b->bd_buddy_page) page_cache_release(e4b->bd_buddy_page); e4b->bd_buddy = NULL; e4b->bd_bitmap = NULL; return ret; } static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) { if (e4b->bd_bitmap_page) page_cache_release(e4b->bd_bitmap_page); if (e4b->bd_buddy_page) page_cache_release(e4b->bd_buddy_page); } static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) { int order = 1; void *bb; BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); bb = e4b->bd_buddy; while (order <= e4b->bd_blkbits + 1) { block = block >> 1; if (!mb_test_bit(block, bb)) { /* this block is part of buddy of order 'order' */ return order; } bb += 1 << (e4b->bd_blkbits - order); order++; } return 0; } static void mb_clear_bits(void *bm, int cur, int len) { __u32 *addr; len = cur + len; while (cur < len) { if ((cur & 31) == 0 && (len - cur) >= 32) { /* fast path: clear whole word at once */ addr = bm + (cur >> 3); *addr = 0; cur += 32; continue; } mb_clear_bit(cur, bm); cur++; } } /* clear bits in given range * will return first found zero bit if any, -1 otherwise */ static int mb_test_and_clear_bits(void *bm, int cur, int len) { __u32 *addr; int zero_bit = -1; len = cur + len; while (cur < len) { if ((cur & 31) == 0 && (len - cur) >= 32) { /* fast path: clear whole word at once */ addr = bm + (cur >> 3); if (*addr != (__u32)(-1) && zero_bit == -1) zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); *addr = 0; cur += 32; continue; } if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) zero_bit = cur; cur++; } return zero_bit; } void ext4_set_bits(void *bm, int cur, int len) { __u32 *addr; len = cur + len; while (cur < len) { if ((cur & 31) == 0 && (len - cur) >= 32) { /* fast path: set whole word at once */ addr = bm + (cur >> 3); *addr = 0xffffffff; cur += 32; continue; } mb_set_bit(cur, bm); cur++; } } /* * _________________________________________________________________ */ static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) { if (mb_test_bit(*bit + side, bitmap)) { mb_clear_bit(*bit, bitmap); (*bit) -= side; return 1; } else { (*bit) += side; mb_set_bit(*bit, bitmap); return -1; } } static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) { int max; int order = 1; void *buddy = mb_find_buddy(e4b, order, &max); while (buddy) { void *buddy2; /* Bits in range [first; last] are known to be set since * corresponding blocks were allocated. Bits in range * (first; last) will stay set because they form buddies on * upper layer. We just deal with borders if they don't * align with upper layer and then go up. * Releasing entire group is all about clearing * single bit of highest order buddy. */ /* Example: * --------------------------------- * | 1 | 1 | 1 | 1 | * --------------------------------- * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | * --------------------------------- * 0 1 2 3 4 5 6 7 * \_____________________/ * * Neither [1] nor [6] is aligned to above layer. * Left neighbour [0] is free, so mark it busy, * decrease bb_counters and extend range to * [0; 6] * Right neighbour [7] is busy. It can't be coaleasced with [6], so * mark [6] free, increase bb_counters and shrink range to * [0; 5]. * Then shift range to [0; 2], go up and do the same. */ if (first & 1) e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); if (!(last & 1)) e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); if (first > last) break; order++; if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) { mb_clear_bits(buddy, first, last - first + 1); e4b->bd_info->bb_counters[order - 1] += last - first + 1; break; } first >>= 1; last >>= 1; buddy = buddy2; } } static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, int first, int count) { int left_is_free = 0; int right_is_free = 0; int block; int last = first + count - 1; struct super_block *sb = e4b->bd_sb; if (WARN_ON(count == 0)) return; BUG_ON(last >= (sb->s_blocksize << 3)); assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); mb_check_buddy(e4b); mb_free_blocks_double(inode, e4b, first, count); e4b->bd_info->bb_free += count; if (first < e4b->bd_info->bb_first_free) e4b->bd_info->bb_first_free = first; /* access memory sequentially: check left neighbour, * clear range and then check right neighbour */ if (first != 0) left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); if (unlikely(block != -1)) { ext4_fsblk_t blocknr; blocknr = ext4_group_first_block_no(sb, e4b->bd_group); blocknr += EXT4_C2B(EXT4_SB(sb), block); ext4_grp_locked_error(sb, e4b->bd_group, inode ? inode->i_ino : 0, blocknr, "freeing already freed block " "(bit %u)", block); mb_regenerate_buddy(e4b); goto done; } /* let's maintain fragments counter */ if (left_is_free && right_is_free) e4b->bd_info->bb_fragments--; else if (!left_is_free && !right_is_free) e4b->bd_info->bb_fragments++; /* buddy[0] == bd_bitmap is a special case, so handle * it right away and let mb_buddy_mark_free stay free of * zero order checks. * Check if neighbours are to be coaleasced, * adjust bitmap bb_counters and borders appropriately. */ if (first & 1) { first += !left_is_free; e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; } if (!(last & 1)) { last -= !right_is_free; e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; } if (first <= last) mb_buddy_mark_free(e4b, first >> 1, last >> 1); done: mb_set_largest_free_order(sb, e4b->bd_info); mb_check_buddy(e4b); } static int mb_find_extent(struct ext4_buddy *e4b, int block, int needed, struct ext4_free_extent *ex) { int next = block; int max, order; void *buddy; assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); BUG_ON(ex == NULL); buddy = mb_find_buddy(e4b, 0, &max); BUG_ON(buddy == NULL); BUG_ON(block >= max); if (mb_test_bit(block, buddy)) { ex->fe_len = 0; ex->fe_start = 0; ex->fe_group = 0; return 0; } /* find actual order */ order = mb_find_order_for_block(e4b, block); block = block >> order; ex->fe_len = 1 << order; ex->fe_start = block << order; ex->fe_group = e4b->bd_group; /* calc difference from given start */ next = next - ex->fe_start; ex->fe_len -= next; ex->fe_start += next; while (needed > ex->fe_len && mb_find_buddy(e4b, order, &max)) { if (block + 1 >= max) break; next = (block + 1) * (1 << order); if (mb_test_bit(next, e4b->bd_bitmap)) break; order = mb_find_order_for_block(e4b, next); block = next >> order; ex->fe_len += 1 << order; } BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))); return ex->fe_len; } static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) { int ord; int mlen = 0; int max = 0; int cur; int start = ex->fe_start; int len = ex->fe_len; unsigned ret = 0; int len0 = len; void *buddy; BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); BUG_ON(e4b->bd_group != ex->fe_group); assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); mb_check_buddy(e4b); mb_mark_used_double(e4b, start, len); e4b->bd_info->bb_free -= len; if (e4b->bd_info->bb_first_free == start) e4b->bd_info->bb_first_free += len; /* let's maintain fragments counter */ if (start != 0) mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) max = !mb_test_bit(start + len, e4b->bd_bitmap); if (mlen && max) e4b->bd_info->bb_fragments++; else if (!mlen && !max) e4b->bd_info->bb_fragments--; /* let's maintain buddy itself */ while (len) { ord = mb_find_order_for_block(e4b, start); if (((start >> ord) << ord) == start && len >= (1 << ord)) { /* the whole chunk may be allocated at once! */ mlen = 1 << ord; buddy = mb_find_buddy(e4b, ord, &max); BUG_ON((start >> ord) >= max); mb_set_bit(start >> ord, buddy); e4b->bd_info->bb_counters[ord]--; start += mlen; len -= mlen; BUG_ON(len < 0); continue; } /* store for history */ if (ret == 0) ret = len | (ord << 16); /* we have to split large buddy */ BUG_ON(ord <= 0); buddy = mb_find_buddy(e4b, ord, &max); mb_set_bit(start >> ord, buddy); e4b->bd_info->bb_counters[ord]--; ord--; cur = (start >> ord) & ~1U; buddy = mb_find_buddy(e4b, ord, &max); mb_clear_bit(cur, buddy); mb_clear_bit(cur + 1, buddy); e4b->bd_info->bb_counters[ord]++; e4b->bd_info->bb_counters[ord]++; } mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0); mb_check_buddy(e4b); return ret; } /* * Must be called under group lock! */ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); int ret; BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); BUG_ON(ac->ac_status == AC_STATUS_FOUND); ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; ret = mb_mark_used(e4b, &ac->ac_b_ex); /* preallocation can change ac_b_ex, thus we store actually * allocated blocks for history */ ac->ac_f_ex = ac->ac_b_ex; ac->ac_status = AC_STATUS_FOUND; ac->ac_tail = ret & 0xffff; ac->ac_buddy = ret >> 16; /* * take the page reference. We want the page to be pinned * so that we don't get a ext4_mb_init_cache_call for this * group until we update the bitmap. That would mean we * double allocate blocks. The reference is dropped * in ext4_mb_release_context */ ac->ac_bitmap_page = e4b->bd_bitmap_page; get_page(ac->ac_bitmap_page); ac->ac_buddy_page = e4b->bd_buddy_page; get_page(ac->ac_buddy_page); /* store last allocated for subsequent stream allocation */ if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { spin_lock(&sbi->s_md_lock); sbi->s_mb_last_group = ac->ac_f_ex.fe_group; sbi->s_mb_last_start = ac->ac_f_ex.fe_start; spin_unlock(&sbi->s_md_lock); } } /* * regular allocator, for general purposes allocation */ static void ext4_mb_check_limits(struct ext4_allocation_context *ac, struct ext4_buddy *e4b, int finish_group) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_free_extent *bex = &ac->ac_b_ex; struct ext4_free_extent *gex = &ac->ac_g_ex; struct ext4_free_extent ex; int max; if (ac->ac_status == AC_STATUS_FOUND) return; /* * We don't want to scan for a whole year */ if (ac->ac_found > sbi->s_mb_max_to_scan && !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { ac->ac_status = AC_STATUS_BREAK; return; } /* * Haven't found good chunk so far, let's continue */ if (bex->fe_len < gex->fe_len) return; if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan) && bex->fe_group == e4b->bd_group) { /* recheck chunk's availability - we don't know * when it was found (within this lock-unlock * period or not) */ max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex); if (max >= gex->fe_len) { ext4_mb_use_best_found(ac, e4b); return; } } } /* * The routine checks whether found extent is good enough. If it is, * then the extent gets marked used and flag is set to the context * to stop scanning. Otherwise, the extent is compared with the * previous found extent and if new one is better, then it's stored * in the context. Later, the best found extent will be used, if * mballoc can't find good enough extent. * * FIXME: real allocation policy is to be designed yet! */ static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, struct ext4_free_extent *ex, struct ext4_buddy *e4b) { struct ext4_free_extent *bex = &ac->ac_b_ex; struct ext4_free_extent *gex = &ac->ac_g_ex; BUG_ON(ex->fe_len <= 0); BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); ac->ac_found++; /* * The special case - take what you catch first */ if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { *bex = *ex; ext4_mb_use_best_found(ac, e4b); return; } /* * Let's check whether the chuck is good enough */ if (ex->fe_len == gex->fe_len) { *bex = *ex; ext4_mb_use_best_found(ac, e4b); return; } /* * If this is first found extent, just store it in the context */ if (bex->fe_len == 0) { *bex = *ex; return; } /* * If new found extent is better, store it in the context */ if (bex->fe_len < gex->fe_len) { /* if the request isn't satisfied, any found extent * larger than previous best one is better */ if (ex->fe_len > bex->fe_len) *bex = *ex; } else if (ex->fe_len > gex->fe_len) { /* if the request is satisfied, then we try to find * an extent that still satisfy the request, but is * smaller than previous one */ if (ex->fe_len < bex->fe_len) *bex = *ex; } ext4_mb_check_limits(ac, e4b, 0); } static noinline_for_stack int ext4_mb_try_best_found(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct ext4_free_extent ex = ac->ac_b_ex; ext4_group_t group = ex.fe_group; int max; int err; BUG_ON(ex.fe_len <= 0); err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); if (err) return err; ext4_lock_group(ac->ac_sb, group); max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); if (max > 0) { ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); } ext4_unlock_group(ac->ac_sb, group); ext4_mb_unload_buddy(e4b); return 0; } static noinline_for_stack int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { ext4_group_t group = ac->ac_g_ex.fe_group; int max; int err; struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); struct ext4_free_extent ex; if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) return 0; if (grp->bb_free == 0) return 0; err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); if (err) return err; ext4_lock_group(ac->ac_sb, group); max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, ac->ac_g_ex.fe_len, &ex); if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { ext4_fsblk_t start; start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + ex.fe_start; /* use do_div to get remainder (would be 64-bit modulo) */ if (do_div(start, sbi->s_stripe) == 0) { ac->ac_found++; ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); } } else if (max >= ac->ac_g_ex.fe_len) { BUG_ON(ex.fe_len <= 0); BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); ac->ac_found++; ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { /* Sometimes, caller may want to merge even small * number of blocks to an existing extent */ BUG_ON(ex.fe_len <= 0); BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); ac->ac_found++; ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); } ext4_unlock_group(ac->ac_sb, group); ext4_mb_unload_buddy(e4b); return 0; } /* * The routine scans buddy structures (not bitmap!) from given order * to max order and tries to find big enough chunk to satisfy the req */ static noinline_for_stack void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct super_block *sb = ac->ac_sb; struct ext4_group_info *grp = e4b->bd_info; void *buddy; int i; int k; int max; BUG_ON(ac->ac_2order <= 0); for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) { if (grp->bb_counters[i] == 0) continue; buddy = mb_find_buddy(e4b, i, &max); BUG_ON(buddy == NULL); k = mb_find_next_zero_bit(buddy, max, 0); BUG_ON(k >= max); ac->ac_found++; ac->ac_b_ex.fe_len = 1 << i; ac->ac_b_ex.fe_start = k << i; ac->ac_b_ex.fe_group = e4b->bd_group; ext4_mb_use_best_found(ac, e4b); BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len); if (EXT4_SB(sb)->s_mb_stats) atomic_inc(&EXT4_SB(sb)->s_bal_2orders); break; } } /* * The routine scans the group and measures all found extents. * In order to optimize scanning, caller must pass number of * free blocks in the group, so the routine can know upper limit. */ static noinline_for_stack void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct super_block *sb = ac->ac_sb; void *bitmap = e4b->bd_bitmap; struct ext4_free_extent ex; int i; int free; free = e4b->bd_info->bb_free; BUG_ON(free <= 0); i = e4b->bd_info->bb_first_free; while (free && ac->ac_status == AC_STATUS_CONTINUE) { i = mb_find_next_zero_bit(bitmap, EXT4_CLUSTERS_PER_GROUP(sb), i); if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { /* * IF we have corrupt bitmap, we won't find any * free blocks even though group info says we * we have free blocks */ ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, "%d free clusters as per " "group info. But bitmap says 0", free); break; } mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); BUG_ON(ex.fe_len <= 0); if (free < ex.fe_len) { ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, "%d free clusters as per " "group info. But got %d blocks", free, ex.fe_len); /* * The number of free blocks differs. This mostly * indicate that the bitmap is corrupt. So exit * without claiming the space. */ break; } ext4_mb_measure_extent(ac, &ex, e4b); i += ex.fe_len; free -= ex.fe_len; } ext4_mb_check_limits(ac, e4b, 1); } /* * This is a special case for storages like raid5 * we try to find stripe-aligned chunks for stripe-size-multiple requests */ static noinline_for_stack void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct super_block *sb = ac->ac_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); void *bitmap = e4b->bd_bitmap; struct ext4_free_extent ex; ext4_fsblk_t first_group_block; ext4_fsblk_t a; ext4_grpblk_t i; int max; BUG_ON(sbi->s_stripe == 0); /* find first stripe-aligned block in group */ first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); a = first_group_block + sbi->s_stripe - 1; do_div(a, sbi->s_stripe); i = (a * sbi->s_stripe) - first_group_block; while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { if (!mb_test_bit(i, bitmap)) { max = mb_find_extent(e4b, i, sbi->s_stripe, &ex); if (max >= sbi->s_stripe) { ac->ac_found++; ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); break; } } i += sbi->s_stripe; } } /* This is now called BEFORE we load the buddy bitmap. */ static int ext4_mb_good_group(struct ext4_allocation_context *ac, ext4_group_t group, int cr) { unsigned free, fragments; int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); BUG_ON(cr < 0 || cr >= 4); free = grp->bb_free; if (free == 0) return 0; if (cr <= 2 && free < ac->ac_g_ex.fe_len) return 0; /* We only do this if the grp has never been initialized */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { int ret = ext4_mb_init_group(ac->ac_sb, group); if (ret) return 0; } fragments = grp->bb_fragments; if (fragments == 0) return 0; switch (cr) { case 0: BUG_ON(ac->ac_2order == 0); /* Avoid using the first bg of a flexgroup for data files */ if ((ac->ac_flags & EXT4_MB_HINT_DATA) && (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && ((group % flex_size) == 0)) return 0; if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) || (free / fragments) >= ac->ac_g_ex.fe_len) return 1; if (grp->bb_largest_free_order < ac->ac_2order) return 0; return 1; case 1: if ((free / fragments) >= ac->ac_g_ex.fe_len) return 1; break; case 2: if (free >= ac->ac_g_ex.fe_len) return 1; break; case 3: return 1; default: BUG(); } return 0; } static noinline_for_stack int ext4_mb_regular_allocator(struct ext4_allocation_context *ac) { ext4_group_t ngroups, group, i; int cr; int err = 0; struct ext4_sb_info *sbi; struct super_block *sb; struct ext4_buddy e4b; sb = ac->ac_sb; sbi = EXT4_SB(sb); ngroups = ext4_get_groups_count(sb); /* non-extent files are limited to low blocks/groups */ if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) ngroups = sbi->s_blockfile_groups; BUG_ON(ac->ac_status == AC_STATUS_FOUND); /* first, try the goal */ err = ext4_mb_find_by_goal(ac, &e4b); if (err || ac->ac_status == AC_STATUS_FOUND) goto out; if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) goto out; /* * ac->ac2_order is set only if the fe_len is a power of 2 * if ac2_order is set we also set criteria to 0 so that we * try exact allocation using buddy. */ i = fls(ac->ac_g_ex.fe_len); ac->ac_2order = 0; /* * We search using buddy data only if the order of the request * is greater than equal to the sbi_s_mb_order2_reqs * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req */ if (i >= sbi->s_mb_order2_reqs) { /* * This should tell if fe_len is exactly power of 2 */ if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) ac->ac_2order = i - 1; } /* if stream allocation is enabled, use global goal */ if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { /* TBD: may be hot point */ spin_lock(&sbi->s_md_lock); ac->ac_g_ex.fe_group = sbi->s_mb_last_group; ac->ac_g_ex.fe_start = sbi->s_mb_last_start; spin_unlock(&sbi->s_md_lock); } /* Let's just scan groups to find more-less suitable blocks */ cr = ac->ac_2order ? 0 : 1; /* * cr == 0 try to get exact allocation, * cr == 3 try to get anything */ repeat: for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { ac->ac_criteria = cr; /* * searching for the right group start * from the goal value specified */ group = ac->ac_g_ex.fe_group; for (i = 0; i < ngroups; group++, i++) { /* * Artificially restricted ngroups for non-extent * files makes group > ngroups possible on first loop. */ if (group >= ngroups) group = 0; /* This now checks without needing the buddy page */ if (!ext4_mb_good_group(ac, group, cr)) continue; err = ext4_mb_load_buddy(sb, group, &e4b); if (err) goto out; ext4_lock_group(sb, group); /* * We need to check again after locking the * block group */ if (!ext4_mb_good_group(ac, group, cr)) { ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); continue; } ac->ac_groups_scanned++; if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2) ext4_mb_simple_scan_group(ac, &e4b); else if (cr == 1 && sbi->s_stripe && !(ac->ac_g_ex.fe_len % sbi->s_stripe)) ext4_mb_scan_aligned(ac, &e4b); else ext4_mb_complex_scan_group(ac, &e4b); ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); if (ac->ac_status != AC_STATUS_CONTINUE) break; } } if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { /* * We've been searching too long. Let's try to allocate * the best chunk we've found so far */ ext4_mb_try_best_found(ac, &e4b); if (ac->ac_status != AC_STATUS_FOUND) { /* * Someone more lucky has already allocated it. * The only thing we can do is just take first * found block(s) printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n"); */ ac->ac_b_ex.fe_group = 0; ac->ac_b_ex.fe_start = 0; ac->ac_b_ex.fe_len = 0; ac->ac_status = AC_STATUS_CONTINUE; ac->ac_flags |= EXT4_MB_HINT_FIRST; cr = 3; atomic_inc(&sbi->s_mb_lost_chunks); goto repeat; } } out: return err; } static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) { struct super_block *sb = seq->private; ext4_group_t group; if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) return NULL; group = *pos + 1; return (void *) ((unsigned long) group); } static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) { struct super_block *sb = seq->private; ext4_group_t group; ++*pos; if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) return NULL; group = *pos + 1; return (void *) ((unsigned long) group); } static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) { struct super_block *sb = seq->private; ext4_group_t group = (ext4_group_t) ((unsigned long) v); int i; int err, buddy_loaded = 0; struct ext4_buddy e4b; struct ext4_group_info *grinfo; struct sg { struct ext4_group_info info; ext4_grpblk_t counters[16]; } sg; group--; if (group == 0) seq_printf(seq, "#%-5s: %-5s %-5s %-5s " "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s " "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n", "group", "free", "frags", "first", "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6", "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13"); i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + sizeof(struct ext4_group_info); grinfo = ext4_get_group_info(sb, group); /* Load the group info in memory only if not already loaded. */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { err = ext4_mb_load_buddy(sb, group, &e4b); if (err) { seq_printf(seq, "#%-5u: I/O error\n", group); return 0; } buddy_loaded = 1; } memcpy(&sg, ext4_get_group_info(sb, group), i); if (buddy_loaded) ext4_mb_unload_buddy(&e4b); seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, sg.info.bb_fragments, sg.info.bb_first_free); for (i = 0; i <= 13; i++) seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ? sg.info.bb_counters[i] : 0); seq_printf(seq, " ]\n"); return 0; } static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) { } static const struct seq_operations ext4_mb_seq_groups_ops = { .start = ext4_mb_seq_groups_start, .next = ext4_mb_seq_groups_next, .stop = ext4_mb_seq_groups_stop, .show = ext4_mb_seq_groups_show, }; static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file) { struct super_block *sb = PDE_DATA(inode); int rc; rc = seq_open(file, &ext4_mb_seq_groups_ops); if (rc == 0) { struct seq_file *m = file->private_data; m->private = sb; } return rc; } static const struct file_operations ext4_mb_seq_groups_fops = { .owner = THIS_MODULE, .open = ext4_mb_seq_groups_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) { int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; BUG_ON(!cachep); return cachep; } /* * Allocate the top-level s_group_info array for the specified number * of groups */ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) { struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned size; struct ext4_group_info ***new_groupinfo; size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> EXT4_DESC_PER_BLOCK_BITS(sb); if (size <= sbi->s_group_info_size) return 0; size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); new_groupinfo = ext4_kvzalloc(size, GFP_KERNEL); if (!new_groupinfo) { ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); return -ENOMEM; } if (sbi->s_group_info) { memcpy(new_groupinfo, sbi->s_group_info, sbi->s_group_info_size * sizeof(*sbi->s_group_info)); ext4_kvfree(sbi->s_group_info); } sbi->s_group_info = new_groupinfo; sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", sbi->s_group_info_size); return 0; } /* Create and initialize ext4_group_info data for the given group. */ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, struct ext4_group_desc *desc) { int i; int metalen = 0; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_info **meta_group_info; struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); /* * First check if this group is the first of a reserved block. * If it's true, we have to allocate a new table of pointers * to ext4_group_info structures */ if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { metalen = sizeof(*meta_group_info) << EXT4_DESC_PER_BLOCK_BITS(sb); meta_group_info = kmalloc(metalen, GFP_KERNEL); if (meta_group_info == NULL) { ext4_msg(sb, KERN_ERR, "can't allocate mem " "for a buddy group"); goto exit_meta_group_info; } sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = meta_group_info; } meta_group_info = sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]; i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_KERNEL); if (meta_group_info[i] == NULL) { ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); goto exit_group_info; } set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(meta_group_info[i]->bb_state)); /* * initialize bb_free to be able to skip * empty groups without initialization */ if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { meta_group_info[i]->bb_free = ext4_free_clusters_after_init(sb, group, desc); } else { meta_group_info[i]->bb_free = ext4_free_group_clusters(sb, desc); } INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); init_rwsem(&meta_group_info[i]->alloc_sem); meta_group_info[i]->bb_free_root = RB_ROOT; meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ #ifdef DOUBLE_CHECK { struct buffer_head *bh; meta_group_info[i]->bb_bitmap = kmalloc(sb->s_blocksize, GFP_KERNEL); BUG_ON(meta_group_info[i]->bb_bitmap == NULL); bh = ext4_read_block_bitmap(sb, group); BUG_ON(bh == NULL); memcpy(meta_group_info[i]->bb_bitmap, bh->b_data, sb->s_blocksize); put_bh(bh); } #endif return 0; exit_group_info: /* If a meta_group_info table has been allocated, release it now */ if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]); sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL; } exit_meta_group_info: return -ENOMEM; } /* ext4_mb_add_groupinfo */ static int ext4_mb_init_backend(struct super_block *sb) { ext4_group_t ngroups = ext4_get_groups_count(sb); ext4_group_t i; struct ext4_sb_info *sbi = EXT4_SB(sb); int err; struct ext4_group_desc *desc; struct kmem_cache *cachep; err = ext4_mb_alloc_groupinfo(sb, ngroups); if (err) return err; sbi->s_buddy_cache = new_inode(sb); if (sbi->s_buddy_cache == NULL) { ext4_msg(sb, KERN_ERR, "can't get new inode"); goto err_freesgi; } /* To avoid potentially colliding with an valid on-disk inode number, * use EXT4_BAD_INO for the buddy cache inode number. This inode is * not in the inode hash, so it should never be found by iget(), but * this will avoid confusion if it ever shows up during debugging. */ sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; for (i = 0; i < ngroups; i++) { desc = ext4_get_group_desc(sb, i, NULL); if (desc == NULL) { ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); goto err_freebuddy; } if (ext4_mb_add_groupinfo(sb, i, desc) != 0) goto err_freebuddy; } return 0; err_freebuddy: cachep = get_groupinfo_cache(sb->s_blocksize_bits); while (i-- > 0) kmem_cache_free(cachep, ext4_get_group_info(sb, i)); i = sbi->s_group_info_size; while (i-- > 0) kfree(sbi->s_group_info[i]); iput(sbi->s_buddy_cache); err_freesgi: ext4_kvfree(sbi->s_group_info); return -ENOMEM; } static void ext4_groupinfo_destroy_slabs(void) { int i; for (i = 0; i < NR_GRPINFO_CACHES; i++) { if (ext4_groupinfo_caches[i]) kmem_cache_destroy(ext4_groupinfo_caches[i]); ext4_groupinfo_caches[i] = NULL; } } static int ext4_groupinfo_create_slab(size_t size) { static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); int slab_size; int blocksize_bits = order_base_2(size); int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; struct kmem_cache *cachep; if (cache_index >= NR_GRPINFO_CACHES) return -EINVAL; if (unlikely(cache_index < 0)) cache_index = 0; mutex_lock(&ext4_grpinfo_slab_create_mutex); if (ext4_groupinfo_caches[cache_index]) { mutex_unlock(&ext4_grpinfo_slab_create_mutex); return 0; /* Already created */ } slab_size = offsetof(struct ext4_group_info, bb_counters[blocksize_bits + 2]); cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], slab_size, 0, SLAB_RECLAIM_ACCOUNT, NULL); ext4_groupinfo_caches[cache_index] = cachep; mutex_unlock(&ext4_grpinfo_slab_create_mutex); if (!cachep) { printk(KERN_EMERG "EXT4-fs: no memory for groupinfo slab cache\n"); return -ENOMEM; } return 0; } int ext4_mb_init(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned i, j; unsigned offset; unsigned max; int ret; i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); if (sbi->s_mb_offsets == NULL) { ret = -ENOMEM; goto out; } i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs); sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); if (sbi->s_mb_maxs == NULL) { ret = -ENOMEM; goto out; } ret = ext4_groupinfo_create_slab(sb->s_blocksize); if (ret < 0) goto out; /* order 0 is regular bitmap */ sbi->s_mb_maxs[0] = sb->s_blocksize << 3; sbi->s_mb_offsets[0] = 0; i = 1; offset = 0; max = sb->s_blocksize << 2; do { sbi->s_mb_offsets[i] = offset; sbi->s_mb_maxs[i] = max; offset += 1 << (sb->s_blocksize_bits - i); max = max >> 1; i++; } while (i <= sb->s_blocksize_bits + 1); spin_lock_init(&sbi->s_md_lock); spin_lock_init(&sbi->s_bal_lock); sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; sbi->s_mb_stats = MB_DEFAULT_STATS; sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; /* * The default group preallocation is 512, which for 4k block * sizes translates to 2 megabytes. However for bigalloc file * systems, this is probably too big (i.e, if the cluster size * is 1 megabyte, then group preallocation size becomes half a * gigabyte!). As a default, we will keep a two megabyte * group pralloc size for cluster sizes up to 64k, and after * that, we will force a minimum group preallocation size of * 32 clusters. This translates to 8 megs when the cluster * size is 256k, and 32 megs when the cluster size is 1 meg, * which seems reasonable as a default. */ sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> sbi->s_cluster_bits, 32); /* * If there is a s_stripe > 1, then we set the s_mb_group_prealloc * to the lowest multiple of s_stripe which is bigger than * the s_mb_group_prealloc as determined above. We want * the preallocation size to be an exact multiple of the * RAID stripe size so that preallocations don't fragment * the stripes. */ if (sbi->s_stripe > 1) { sbi->s_mb_group_prealloc = roundup( sbi->s_mb_group_prealloc, sbi->s_stripe); } sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); if (sbi->s_locality_groups == NULL) { ret = -ENOMEM; goto out_free_groupinfo_slab; } for_each_possible_cpu(i) { struct ext4_locality_group *lg; lg = per_cpu_ptr(sbi->s_locality_groups, i); mutex_init(&lg->lg_mutex); for (j = 0; j < PREALLOC_TB_SIZE; j++) INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); spin_lock_init(&lg->lg_prealloc_lock); } /* init file for buddy data */ ret = ext4_mb_init_backend(sb); if (ret != 0) goto out_free_locality_groups; if (sbi->s_proc) proc_create_data("mb_groups", S_IRUGO, sbi->s_proc, &ext4_mb_seq_groups_fops, sb); return 0; out_free_locality_groups: free_percpu(sbi->s_locality_groups); sbi->s_locality_groups = NULL; out_free_groupinfo_slab: ext4_groupinfo_destroy_slabs(); out: kfree(sbi->s_mb_offsets); sbi->s_mb_offsets = NULL; kfree(sbi->s_mb_maxs); sbi->s_mb_maxs = NULL; return ret; } /* need to called with the ext4 group lock held */ static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) { struct ext4_prealloc_space *pa; struct list_head *cur, *tmp; int count = 0; list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); list_del(&pa->pa_group_list); count++; kmem_cache_free(ext4_pspace_cachep, pa); } if (count) mb_debug(1, "mballoc: %u PAs left\n", count); } int ext4_mb_release(struct super_block *sb) { ext4_group_t ngroups = ext4_get_groups_count(sb); ext4_group_t i; int num_meta_group_infos; struct ext4_group_info *grinfo; struct ext4_sb_info *sbi = EXT4_SB(sb); struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); if (sbi->s_proc) remove_proc_entry("mb_groups", sbi->s_proc); if (sbi->s_group_info) { for (i = 0; i < ngroups; i++) { grinfo = ext4_get_group_info(sb, i); #ifdef DOUBLE_CHECK kfree(grinfo->bb_bitmap); #endif ext4_lock_group(sb, i); ext4_mb_cleanup_pa(grinfo); ext4_unlock_group(sb, i); kmem_cache_free(cachep, grinfo); } num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> EXT4_DESC_PER_BLOCK_BITS(sb); for (i = 0; i < num_meta_group_infos; i++) kfree(sbi->s_group_info[i]); ext4_kvfree(sbi->s_group_info); } kfree(sbi->s_mb_offsets); kfree(sbi->s_mb_maxs); if (sbi->s_buddy_cache) iput(sbi->s_buddy_cache); if (sbi->s_mb_stats) { ext4_msg(sb, KERN_INFO, "mballoc: %u blocks %u reqs (%u success)", atomic_read(&sbi->s_bal_allocated), atomic_read(&sbi->s_bal_reqs), atomic_read(&sbi->s_bal_success)); ext4_msg(sb, KERN_INFO, "mballoc: %u extents scanned, %u goal hits, " "%u 2^N hits, %u breaks, %u lost", atomic_read(&sbi->s_bal_ex_scanned), atomic_read(&sbi->s_bal_goals), atomic_read(&sbi->s_bal_2orders), atomic_read(&sbi->s_bal_breaks), atomic_read(&sbi->s_mb_lost_chunks)); ext4_msg(sb, KERN_INFO, "mballoc: %lu generated and it took %Lu", sbi->s_mb_buddies_generated, sbi->s_mb_generation_time); ext4_msg(sb, KERN_INFO, "mballoc: %u preallocated, %u discarded", atomic_read(&sbi->s_mb_preallocated), atomic_read(&sbi->s_mb_discarded)); } free_percpu(sbi->s_locality_groups); return 0; } static inline int ext4_issue_discard(struct super_block *sb, ext4_group_t block_group, ext4_grpblk_t cluster, int count, unsigned long flags) { ext4_fsblk_t discard_block; discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + ext4_group_first_block_no(sb, block_group)); count = EXT4_C2B(EXT4_SB(sb), count); trace_ext4_discard_blocks(sb, (unsigned long long) discard_block, count); return sb_issue_discard(sb, discard_block, count, GFP_NOFS, flags); } /* * This function is called by the jbd2 layer once the commit has finished, * so we know we can free the blocks that were released with that commit. */ static void ext4_free_data_callback(struct super_block *sb, struct ext4_journal_cb_entry *jce, int rc) { struct ext4_free_data *entry = (struct ext4_free_data *)jce; struct ext4_buddy e4b; struct ext4_group_info *db; int err, count = 0, count2 = 0; mb_debug(1, "gonna free %u blocks in group %u (0x%p):", entry->efd_count, entry->efd_group, entry); if (test_opt(sb, DISCARD)) { err = ext4_issue_discard(sb, entry->efd_group, entry->efd_start_cluster, entry->efd_count, 0); if (err && err != -EOPNOTSUPP) ext4_msg(sb, KERN_WARNING, "discard request in" " group:%d block:%d count:%d failed" " with %d", entry->efd_group, entry->efd_start_cluster, entry->efd_count, err); } err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); /* we expect to find existing buddy because it's pinned */ BUG_ON(err != 0); db = e4b.bd_info; /* there are blocks to put in buddy to make them really free */ count += entry->efd_count; count2++; ext4_lock_group(sb, entry->efd_group); /* Take it out of per group rb tree */ rb_erase(&entry->efd_node, &(db->bb_free_root)); mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); /* * Clear the trimmed flag for the group so that the next * ext4_trim_fs can trim it. * If the volume is mounted with -o discard, online discard * is supported and the free blocks will be trimmed online. */ if (!test_opt(sb, DISCARD)) EXT4_MB_GRP_CLEAR_TRIMMED(db); if (!db->bb_free_root.rb_node) { /* No more items in the per group rb tree * balance refcounts from ext4_mb_free_metadata() */ page_cache_release(e4b.bd_buddy_page); page_cache_release(e4b.bd_bitmap_page); } ext4_unlock_group(sb, entry->efd_group); kmem_cache_free(ext4_free_data_cachep, entry); ext4_mb_unload_buddy(&e4b); mb_debug(1, "freed %u blocks in %u structures\n", count, count2); } int __init ext4_init_mballoc(void) { ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, SLAB_RECLAIM_ACCOUNT); if (ext4_pspace_cachep == NULL) return -ENOMEM; ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, SLAB_RECLAIM_ACCOUNT); if (ext4_ac_cachep == NULL) { kmem_cache_destroy(ext4_pspace_cachep); return -ENOMEM; } ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, SLAB_RECLAIM_ACCOUNT); if (ext4_free_data_cachep == NULL) { kmem_cache_destroy(ext4_pspace_cachep); kmem_cache_destroy(ext4_ac_cachep); return -ENOMEM; } return 0; } void ext4_exit_mballoc(void) { /* * Wait for completion of call_rcu()'s on ext4_pspace_cachep * before destroying the slab cache. */ rcu_barrier(); kmem_cache_destroy(ext4_pspace_cachep); kmem_cache_destroy(ext4_ac_cachep); kmem_cache_destroy(ext4_free_data_cachep); ext4_groupinfo_destroy_slabs(); } /* * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps * Returns 0 if success or error code */ static noinline_for_stack int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, handle_t *handle, unsigned int reserv_clstrs) { struct buffer_head *bitmap_bh = NULL; struct ext4_group_desc *gdp; struct buffer_head *gdp_bh; struct ext4_sb_info *sbi; struct super_block *sb; ext4_fsblk_t block; int err, len; BUG_ON(ac->ac_status != AC_STATUS_FOUND); BUG_ON(ac->ac_b_ex.fe_len <= 0); sb = ac->ac_sb; sbi = EXT4_SB(sb); err = -EIO; bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); if (!bitmap_bh) goto out_err; err = ext4_journal_get_write_access(handle, bitmap_bh); if (err) goto out_err; err = -EIO; gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); if (!gdp) goto out_err; ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, ext4_free_group_clusters(sb, gdp)); err = ext4_journal_get_write_access(handle, gdp_bh); if (err) goto out_err; block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); if (!ext4_data_block_valid(sbi, block, len)) { ext4_error(sb, "Allocating blocks %llu-%llu which overlap " "fs metadata", block, block+len); /* File system mounted not to panic on error * Fix the bitmap and repeat the block allocation * We leak some of the blocks here. */ ext4_lock_group(sb, ac->ac_b_ex.fe_group); ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len); ext4_unlock_group(sb, ac->ac_b_ex.fe_group); err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); if (!err) err = -EAGAIN; goto out_err; } ext4_lock_group(sb, ac->ac_b_ex.fe_group); #ifdef AGGRESSIVE_CHECK { int i; for (i = 0; i < ac->ac_b_ex.fe_len; i++) { BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, bitmap_bh->b_data)); } } #endif ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len); if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); ext4_free_group_clusters_set(sb, gdp, ext4_free_clusters_after_init(sb, ac->ac_b_ex.fe_group, gdp)); } len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; ext4_free_group_clusters_set(sb, gdp, len); ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh); ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); ext4_unlock_group(sb, ac->ac_b_ex.fe_group); percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); /* * Now reduce the dirty block count also. Should not go negative */ if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) /* release all the reserved blocks if non delalloc */ percpu_counter_sub(&sbi->s_dirtyclusters_counter, reserv_clstrs); if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, ac->ac_b_ex.fe_group); atomic64_sub(ac->ac_b_ex.fe_len, &sbi->s_flex_groups[flex_group].free_clusters); } err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); if (err) goto out_err; err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); out_err: brelse(bitmap_bh); return err; } /* * here we normalize request for locality group * Group request are normalized to s_mb_group_prealloc, which goes to * s_strip if we set the same via mount option. * s_mb_group_prealloc can be configured via * /sys/fs/ext4/<partition>/mb_group_prealloc * * XXX: should we try to preallocate more than the group has now? */ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; struct ext4_locality_group *lg = ac->ac_lg; BUG_ON(lg == NULL); ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; mb_debug(1, "#%u: goal %u blocks for locality group\n", current->pid, ac->ac_g_ex.fe_len); } /* * Normalization means making request better in terms of * size and alignment */ static noinline_for_stack void ext4_mb_normalize_request(struct ext4_allocation_context *ac, struct ext4_allocation_request *ar) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); int bsbits, max; ext4_lblk_t end; loff_t size, start_off; loff_t orig_size __maybe_unused; ext4_lblk_t start; struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); struct ext4_prealloc_space *pa; /* do normalize only data requests, metadata requests do not need preallocation */ if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) return; /* sometime caller may want exact blocks */ if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) return; /* caller may indicate that preallocation isn't * required (it's a tail, for example) */ if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) return; if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { ext4_mb_normalize_group_request(ac); return ; } bsbits = ac->ac_sb->s_blocksize_bits; /* first, let's learn actual file size * given current request is allocated */ size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); size = size << bsbits; if (size < i_size_read(ac->ac_inode)) size = i_size_read(ac->ac_inode); orig_size = size; /* max size of free chunks */ max = 2 << bsbits; #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ (req <= (size) || max <= (chunk_size)) /* first, try to predict filesize */ /* XXX: should this table be tunable? */ start_off = 0; if (size <= 16 * 1024) { size = 16 * 1024; } else if (size <= 32 * 1024) { size = 32 * 1024; } else if (size <= 64 * 1024) { size = 64 * 1024; } else if (size <= 128 * 1024) { size = 128 * 1024; } else if (size <= 256 * 1024) { size = 256 * 1024; } else if (size <= 512 * 1024) { size = 512 * 1024; } else if (size <= 1024 * 1024) { size = 1024 * 1024; } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { start_off = ((loff_t)ac->ac_o_ex.fe_logical >> (21 - bsbits)) << 21; size = 2 * 1024 * 1024; } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { start_off = ((loff_t)ac->ac_o_ex.fe_logical >> (22 - bsbits)) << 22; size = 4 * 1024 * 1024; } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, (8<<20)>>bsbits, max, 8 * 1024)) { start_off = ((loff_t)ac->ac_o_ex.fe_logical >> (23 - bsbits)) << 23; size = 8 * 1024 * 1024; } else { start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits; size = ac->ac_o_ex.fe_len << bsbits; } size = size >> bsbits; start = start_off >> bsbits; /* don't cover already allocated blocks in selected range */ if (ar->pleft && start <= ar->lleft) { size -= ar->lleft + 1 - start; start = ar->lleft + 1; } if (ar->pright && start + size - 1 >= ar->lright) size -= start + size - ar->lright; end = start + size; /* check we don't cross already preallocated blocks */ rcu_read_lock(); list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { ext4_lblk_t pa_end; if (pa->pa_deleted) continue; spin_lock(&pa->pa_lock); if (pa->pa_deleted) { spin_unlock(&pa->pa_lock); continue; } pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), pa->pa_len); /* PA must not overlap original request */ BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end || ac->ac_o_ex.fe_logical < pa->pa_lstart)); /* skip PAs this normalized request doesn't overlap with */ if (pa->pa_lstart >= end || pa_end <= start) { spin_unlock(&pa->pa_lock); continue; } BUG_ON(pa->pa_lstart <= start && pa_end >= end); /* adjust start or end to be adjacent to this pa */ if (pa_end <= ac->ac_o_ex.fe_logical) { BUG_ON(pa_end < start); start = pa_end; } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { BUG_ON(pa->pa_lstart > end); end = pa->pa_lstart; } spin_unlock(&pa->pa_lock); } rcu_read_unlock(); size = end - start; /* XXX: extra loop to check we really don't overlap preallocations */ rcu_read_lock(); list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { ext4_lblk_t pa_end; spin_lock(&pa->pa_lock); if (pa->pa_deleted == 0) { pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), pa->pa_len); BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); } spin_unlock(&pa->pa_lock); } rcu_read_unlock(); if (start + size <= ac->ac_o_ex.fe_logical && start > ac->ac_o_ex.fe_logical) { ext4_msg(ac->ac_sb, KERN_ERR, "start %lu, size %lu, fe_logical %lu", (unsigned long) start, (unsigned long) size, (unsigned long) ac->ac_o_ex.fe_logical); } BUG_ON(start + size <= ac->ac_o_ex.fe_logical && start > ac->ac_o_ex.fe_logical); BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); /* now prepare goal request */ /* XXX: is it better to align blocks WRT to logical * placement or satisfy big request as is */ ac->ac_g_ex.fe_logical = start; ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); /* define goal start in order to merge */ if (ar->pright && (ar->lright == (start + size))) { /* merge to the right */ ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, &ac->ac_f_ex.fe_group, &ac->ac_f_ex.fe_start); ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; } if (ar->pleft && (ar->lleft + 1 == start)) { /* merge to the left */ ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, &ac->ac_f_ex.fe_group, &ac->ac_f_ex.fe_start); ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; } mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size, (unsigned) orig_size, (unsigned) start); } static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { atomic_inc(&sbi->s_bal_reqs); atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) atomic_inc(&sbi->s_bal_success); atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) atomic_inc(&sbi->s_bal_goals); if (ac->ac_found > sbi->s_mb_max_to_scan) atomic_inc(&sbi->s_bal_breaks); } if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) trace_ext4_mballoc_alloc(ac); else trace_ext4_mballoc_prealloc(ac); } /* * Called on failure; free up any blocks from the inode PA for this * context. We don't need this for MB_GROUP_PA because we only change * pa_free in ext4_mb_release_context(), but on failure, we've already * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. */ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) { struct ext4_prealloc_space *pa = ac->ac_pa; struct ext4_buddy e4b; int err; if (pa == NULL) { if (ac->ac_f_ex.fe_len == 0) return; err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); if (err) { /* * This should never happen since we pin the * pages in the ext4_allocation_context so * ext4_mb_load_buddy() should never fail. */ WARN(1, "mb_load_buddy failed (%d)", err); return; } ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, ac->ac_f_ex.fe_len); ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); ext4_mb_unload_buddy(&e4b); return; } if (pa->pa_type == MB_INODE_PA) pa->pa_free += ac->ac_b_ex.fe_len; } /* * use blocks preallocated to inode */ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, struct ext4_prealloc_space *pa) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); ext4_fsblk_t start; ext4_fsblk_t end; int len; /* found preallocated blocks, use them */ start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); len = EXT4_NUM_B2C(sbi, end - start); ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, &ac->ac_b_ex.fe_start); ac->ac_b_ex.fe_len = len; ac->ac_status = AC_STATUS_FOUND; ac->ac_pa = pa; BUG_ON(start < pa->pa_pstart); BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); BUG_ON(pa->pa_free < len); pa->pa_free -= len; mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa); } /* * use blocks preallocated to locality group */ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, struct ext4_prealloc_space *pa) { unsigned int len = ac->ac_o_ex.fe_len; ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, &ac->ac_b_ex.fe_group, &ac->ac_b_ex.fe_start); ac->ac_b_ex.fe_len = len; ac->ac_status = AC_STATUS_FOUND; ac->ac_pa = pa; /* we don't correct pa_pstart or pa_plen here to avoid * possible race when the group is being loaded concurrently * instead we correct pa later, after blocks are marked * in on-disk bitmap -- see ext4_mb_release_context() * Other CPUs are prevented from allocating from this pa by lg_mutex */ mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); } /* * Return the prealloc space that have minimal distance * from the goal block. @cpa is the prealloc * space that is having currently known minimal distance * from the goal block. */ static struct ext4_prealloc_space * ext4_mb_check_group_pa(ext4_fsblk_t goal_block, struct ext4_prealloc_space *pa, struct ext4_prealloc_space *cpa) { ext4_fsblk_t cur_distance, new_distance; if (cpa == NULL) { atomic_inc(&pa->pa_count); return pa; } cur_distance = abs(goal_block - cpa->pa_pstart); new_distance = abs(goal_block - pa->pa_pstart); if (cur_distance <= new_distance) return cpa; /* drop the previous reference */ atomic_dec(&cpa->pa_count); atomic_inc(&pa->pa_count); return pa; } /* * search goal blocks in preallocated space */ static noinline_for_stack int ext4_mb_use_preallocated(struct ext4_allocation_context *ac) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); int order, i; struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); struct ext4_locality_group *lg; struct ext4_prealloc_space *pa, *cpa = NULL; ext4_fsblk_t goal_block; /* only data can be preallocated */ if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) return 0; /* first, try per-file preallocation */ rcu_read_lock(); list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { /* all fields in this condition don't change, * so we can skip locking for them */ if (ac->ac_o_ex.fe_logical < pa->pa_lstart || ac->ac_o_ex.fe_logical >= (pa->pa_lstart + EXT4_C2B(sbi, pa->pa_len))) continue; /* non-extent files can't have physical blocks past 2^32 */ if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) > EXT4_MAX_BLOCK_FILE_PHYS)) continue; /* found preallocated blocks, use them */ spin_lock(&pa->pa_lock); if (pa->pa_deleted == 0 && pa->pa_free) { atomic_inc(&pa->pa_count); ext4_mb_use_inode_pa(ac, pa); spin_unlock(&pa->pa_lock); ac->ac_criteria = 10; rcu_read_unlock(); return 1; } spin_unlock(&pa->pa_lock); } rcu_read_unlock(); /* can we use group allocation? */ if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) return 0; /* inode may have no locality group for some reason */ lg = ac->ac_lg; if (lg == NULL) return 0; order = fls(ac->ac_o_ex.fe_len) - 1; if (order > PREALLOC_TB_SIZE - 1) /* The max size of hash table is PREALLOC_TB_SIZE */ order = PREALLOC_TB_SIZE - 1; goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); /* * search for the prealloc space that is having * minimal distance from the goal block. */ for (i = order; i < PREALLOC_TB_SIZE; i++) { rcu_read_lock(); list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], pa_inode_list) { spin_lock(&pa->pa_lock); if (pa->pa_deleted == 0 && pa->pa_free >= ac->ac_o_ex.fe_len) { cpa = ext4_mb_check_group_pa(goal_block, pa, cpa); } spin_unlock(&pa->pa_lock); } rcu_read_unlock(); } if (cpa) { ext4_mb_use_group_pa(ac, cpa); ac->ac_criteria = 20; return 1; } return 0; } /* * the function goes through all block freed in the group * but not yet committed and marks them used in in-core bitmap. * buddy must be generated from this bitmap * Need to be called with the ext4 group lock held */ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, ext4_group_t group) { struct rb_node *n; struct ext4_group_info *grp; struct ext4_free_data *entry; grp = ext4_get_group_info(sb, group); n = rb_first(&(grp->bb_free_root)); while (n) { entry = rb_entry(n, struct ext4_free_data, efd_node); ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count); n = rb_next(n); } return; } /* * the function goes through all preallocation in this group and marks them * used in in-core bitmap. buddy must be generated from this bitmap * Need to be called with ext4 group lock held */ static noinline_for_stack void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, ext4_group_t group) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct ext4_prealloc_space *pa; struct list_head *cur; ext4_group_t groupnr; ext4_grpblk_t start; int preallocated = 0; int len; /* all form of preallocation discards first load group, * so the only competing code is preallocation use. * we don't need any locking here * notice we do NOT ignore preallocations with pa_deleted * otherwise we could leave used blocks available for * allocation in buddy when concurrent ext4_mb_put_pa() * is dropping preallocation */ list_for_each(cur, &grp->bb_prealloc_list) { pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); spin_lock(&pa->pa_lock); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &start); len = pa->pa_len; spin_unlock(&pa->pa_lock); if (unlikely(len == 0)) continue; BUG_ON(groupnr != group); ext4_set_bits(bitmap, start, len); preallocated += len; } mb_debug(1, "prellocated %u for group %u\n", preallocated, group); } static void ext4_mb_pa_callback(struct rcu_head *head) { struct ext4_prealloc_space *pa; pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); BUG_ON(atomic_read(&pa->pa_count)); BUG_ON(pa->pa_deleted == 0); kmem_cache_free(ext4_pspace_cachep, pa); } /* * drops a reference to preallocated space descriptor * if this was the last reference and the space is consumed */ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, struct super_block *sb, struct ext4_prealloc_space *pa) { ext4_group_t grp; ext4_fsblk_t grp_blk; /* in this short window concurrent discard can set pa_deleted */ spin_lock(&pa->pa_lock); if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { spin_unlock(&pa->pa_lock); return; } if (pa->pa_deleted == 1) { spin_unlock(&pa->pa_lock); return; } pa->pa_deleted = 1; spin_unlock(&pa->pa_lock); grp_blk = pa->pa_pstart; /* * If doing group-based preallocation, pa_pstart may be in the * next group when pa is used up */ if (pa->pa_type == MB_GROUP_PA) grp_blk--; grp = ext4_get_group_number(sb, grp_blk); /* * possible race: * * P1 (buddy init) P2 (regular allocation) * find block B in PA * copy on-disk bitmap to buddy * mark B in on-disk bitmap * drop PA from group * mark all PAs in buddy * * thus, P1 initializes buddy with B available. to prevent this * we make "copy" and "mark all PAs" atomic and serialize "drop PA" * against that pair */ ext4_lock_group(sb, grp); list_del(&pa->pa_group_list); ext4_unlock_group(sb, grp); spin_lock(pa->pa_obj_lock); list_del_rcu(&pa->pa_inode_list); spin_unlock(pa->pa_obj_lock); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } /* * creates new preallocated space for given inode */ static noinline_for_stack int ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_prealloc_space *pa; struct ext4_group_info *grp; struct ext4_inode_info *ei; /* preallocate only when found space is larger then requested */ BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); BUG_ON(ac->ac_status != AC_STATUS_FOUND); BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); if (pa == NULL) return -ENOMEM; if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { int winl; int wins; int win; int offs; /* we can't allocate as much as normalizer wants. * so, found space must get proper lstart * to cover original request */ BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); /* we're limited by original request in that * logical block must be covered any way * winl is window we can move our chunk within */ winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; /* also, we should cover whole original request */ wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len); /* the smallest one defines real window */ win = min(winl, wins); offs = ac->ac_o_ex.fe_logical % EXT4_C2B(sbi, ac->ac_b_ex.fe_len); if (offs && offs < win) win = offs; ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - EXT4_NUM_B2C(sbi, win); BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); } /* preallocation can change ac_b_ex, thus we store actually * allocated blocks for history */ ac->ac_f_ex = ac->ac_b_ex; pa->pa_lstart = ac->ac_b_ex.fe_logical; pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); pa->pa_len = ac->ac_b_ex.fe_len; pa->pa_free = pa->pa_len; atomic_set(&pa->pa_count, 1); spin_lock_init(&pa->pa_lock); INIT_LIST_HEAD(&pa->pa_inode_list); INIT_LIST_HEAD(&pa->pa_group_list); pa->pa_deleted = 0; pa->pa_type = MB_INODE_PA; mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa, pa->pa_pstart, pa->pa_len, pa->pa_lstart); trace_ext4_mb_new_inode_pa(ac, pa); ext4_mb_use_inode_pa(ac, pa); atomic_add(pa->pa_free, &sbi->s_mb_preallocated); ei = EXT4_I(ac->ac_inode); grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); pa->pa_obj_lock = &ei->i_prealloc_lock; pa->pa_inode = ac->ac_inode; ext4_lock_group(sb, ac->ac_b_ex.fe_group); list_add(&pa->pa_group_list, &grp->bb_prealloc_list); ext4_unlock_group(sb, ac->ac_b_ex.fe_group); spin_lock(pa->pa_obj_lock); list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); spin_unlock(pa->pa_obj_lock); return 0; } /* * creates new preallocated space for locality group inodes belongs to */ static noinline_for_stack int ext4_mb_new_group_pa(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; struct ext4_locality_group *lg; struct ext4_prealloc_space *pa; struct ext4_group_info *grp; /* preallocate only when found space is larger then requested */ BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); BUG_ON(ac->ac_status != AC_STATUS_FOUND); BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); BUG_ON(ext4_pspace_cachep == NULL); pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS); if (pa == NULL) return -ENOMEM; /* preallocation can change ac_b_ex, thus we store actually * allocated blocks for history */ ac->ac_f_ex = ac->ac_b_ex; pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); pa->pa_lstart = pa->pa_pstart; pa->pa_len = ac->ac_b_ex.fe_len; pa->pa_free = pa->pa_len; atomic_set(&pa->pa_count, 1); spin_lock_init(&pa->pa_lock); INIT_LIST_HEAD(&pa->pa_inode_list); INIT_LIST_HEAD(&pa->pa_group_list); pa->pa_deleted = 0; pa->pa_type = MB_GROUP_PA; mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa, pa->pa_pstart, pa->pa_len, pa->pa_lstart); trace_ext4_mb_new_group_pa(ac, pa); ext4_mb_use_group_pa(ac, pa); atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); lg = ac->ac_lg; BUG_ON(lg == NULL); pa->pa_obj_lock = &lg->lg_prealloc_lock; pa->pa_inode = NULL; ext4_lock_group(sb, ac->ac_b_ex.fe_group); list_add(&pa->pa_group_list, &grp->bb_prealloc_list); ext4_unlock_group(sb, ac->ac_b_ex.fe_group); /* * We will later add the new pa to the right bucket * after updating the pa_free in ext4_mb_release_context */ return 0; } static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac) { int err; if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) err = ext4_mb_new_group_pa(ac); else err = ext4_mb_new_inode_pa(ac); return err; } /* * finds all unused blocks in on-disk bitmap, frees them in * in-core bitmap and buddy. * @pa must be unlinked from inode and group lists, so that * nobody else can find/use it. * the caller MUST hold group/inode locks. * TODO: optimize the case when there are no in-core structures yet */ static noinline_for_stack int ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, struct ext4_prealloc_space *pa) { struct super_block *sb = e4b->bd_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned int end; unsigned int next; ext4_group_t group; ext4_grpblk_t bit; unsigned long long grp_blk_start; int err = 0; int free = 0; BUG_ON(pa->pa_deleted == 0); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); BUG_ON(group != e4b->bd_group && pa->pa_len != 0); end = bit + pa->pa_len; while (bit < end) { bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); if (bit >= end) break; next = mb_find_next_bit(bitmap_bh->b_data, end, bit); mb_debug(1, " free preallocated %u/%u in group %u\n", (unsigned) ext4_group_first_block_no(sb, group) + bit, (unsigned) next - bit, (unsigned) group); free += next - bit; trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + EXT4_C2B(sbi, bit)), next - bit); mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); bit = next + 1; } if (free != pa->pa_free) { ext4_msg(e4b->bd_sb, KERN_CRIT, "pa %p: logic %lu, phys. %lu, len %lu", pa, (unsigned long) pa->pa_lstart, (unsigned long) pa->pa_pstart, (unsigned long) pa->pa_len); ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", free, pa->pa_free); /* * pa is already deleted so we use the value obtained * from the bitmap and continue. */ } atomic_add(free, &sbi->s_mb_discarded); return err; } static noinline_for_stack int ext4_mb_release_group_pa(struct ext4_buddy *e4b, struct ext4_prealloc_space *pa) { struct super_block *sb = e4b->bd_sb; ext4_group_t group; ext4_grpblk_t bit; trace_ext4_mb_release_group_pa(sb, pa); BUG_ON(pa->pa_deleted == 0); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); BUG_ON(group != e4b->bd_group && pa->pa_len != 0); mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); return 0; } /* * releases all preallocations in given group * * first, we need to decide discard policy: * - when do we discard * 1) ENOSPC * - how many do we discard * 1) how many requested */ static noinline_for_stack int ext4_mb_discard_group_preallocations(struct super_block *sb, ext4_group_t group, int needed) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct buffer_head *bitmap_bh = NULL; struct ext4_prealloc_space *pa, *tmp; struct list_head list; struct ext4_buddy e4b; int err; int busy = 0; int free = 0; mb_debug(1, "discard preallocation for group %u\n", group); if (list_empty(&grp->bb_prealloc_list)) return 0; bitmap_bh = ext4_read_block_bitmap(sb, group); if (bitmap_bh == NULL) { ext4_error(sb, "Error reading block bitmap for %u", group); return 0; } err = ext4_mb_load_buddy(sb, group, &e4b); if (err) { ext4_error(sb, "Error loading buddy information for %u", group); put_bh(bitmap_bh); return 0; } if (needed == 0) needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; INIT_LIST_HEAD(&list); repeat: ext4_lock_group(sb, group); list_for_each_entry_safe(pa, tmp, &grp->bb_prealloc_list, pa_group_list) { spin_lock(&pa->pa_lock); if (atomic_read(&pa->pa_count)) { spin_unlock(&pa->pa_lock); busy = 1; continue; } if (pa->pa_deleted) { spin_unlock(&pa->pa_lock); continue; } /* seems this one can be freed ... */ pa->pa_deleted = 1; /* we can trust pa_free ... */ free += pa->pa_free; spin_unlock(&pa->pa_lock); list_del(&pa->pa_group_list); list_add(&pa->u.pa_tmp_list, &list); } /* if we still need more blocks and some PAs were used, try again */ if (free < needed && busy) { busy = 0; ext4_unlock_group(sb, group); cond_resched(); goto repeat; } /* found anything to free? */ if (list_empty(&list)) { BUG_ON(free != 0); goto out; } /* now free all selected PAs */ list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { /* remove from object (inode or locality group) */ spin_lock(pa->pa_obj_lock); list_del_rcu(&pa->pa_inode_list); spin_unlock(pa->pa_obj_lock); if (pa->pa_type == MB_GROUP_PA) ext4_mb_release_group_pa(&e4b, pa); else ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); list_del(&pa->u.pa_tmp_list); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } out: ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); put_bh(bitmap_bh); return free; } /* * releases all non-used preallocated blocks for given inode * * It's important to discard preallocations under i_data_sem * We don't want another block to be served from the prealloc * space when we are discarding the inode prealloc space. * * FIXME!! Make sure it is valid at all the call sites */ void ext4_discard_preallocations(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct super_block *sb = inode->i_sb; struct buffer_head *bitmap_bh = NULL; struct ext4_prealloc_space *pa, *tmp; ext4_group_t group = 0; struct list_head list; struct ext4_buddy e4b; int err; if (!S_ISREG(inode->i_mode)) { /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/ return; } mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino); trace_ext4_discard_preallocations(inode); INIT_LIST_HEAD(&list); repeat: /* first, collect all pa's in the inode */ spin_lock(&ei->i_prealloc_lock); while (!list_empty(&ei->i_prealloc_list)) { pa = list_entry(ei->i_prealloc_list.next, struct ext4_prealloc_space, pa_inode_list); BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); spin_lock(&pa->pa_lock); if (atomic_read(&pa->pa_count)) { /* this shouldn't happen often - nobody should * use preallocation while we're discarding it */ spin_unlock(&pa->pa_lock); spin_unlock(&ei->i_prealloc_lock); ext4_msg(sb, KERN_ERR, "uh-oh! used pa while discarding"); WARN_ON(1); schedule_timeout_uninterruptible(HZ); goto repeat; } if (pa->pa_deleted == 0) { pa->pa_deleted = 1; spin_unlock(&pa->pa_lock); list_del_rcu(&pa->pa_inode_list); list_add(&pa->u.pa_tmp_list, &list); continue; } /* someone is deleting pa right now */ spin_unlock(&pa->pa_lock); spin_unlock(&ei->i_prealloc_lock); /* we have to wait here because pa_deleted * doesn't mean pa is already unlinked from * the list. as we might be called from * ->clear_inode() the inode will get freed * and concurrent thread which is unlinking * pa from inode's list may access already * freed memory, bad-bad-bad */ /* XXX: if this happens too often, we can * add a flag to force wait only in case * of ->clear_inode(), but not in case of * regular truncate */ schedule_timeout_uninterruptible(HZ); goto repeat; } spin_unlock(&ei->i_prealloc_lock); list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { BUG_ON(pa->pa_type != MB_INODE_PA); group = ext4_get_group_number(sb, pa->pa_pstart); err = ext4_mb_load_buddy(sb, group, &e4b); if (err) { ext4_error(sb, "Error loading buddy information for %u", group); continue; } bitmap_bh = ext4_read_block_bitmap(sb, group); if (bitmap_bh == NULL) { ext4_error(sb, "Error reading block bitmap for %u", group); ext4_mb_unload_buddy(&e4b); continue; } ext4_lock_group(sb, group); list_del(&pa->pa_group_list); ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); put_bh(bitmap_bh); list_del(&pa->u.pa_tmp_list); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } } #ifdef CONFIG_EXT4_DEBUG static void ext4_mb_show_ac(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; ext4_group_t ngroups, i; if (!ext4_mballoc_debug || (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) return; ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:" " Allocation context details:"); ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d", ac->ac_status, ac->ac_flags); ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, " "goal %lu/%lu/%lu@%lu, " "best %lu/%lu/%lu@%lu cr %d", (unsigned long)ac->ac_o_ex.fe_group, (unsigned long)ac->ac_o_ex.fe_start, (unsigned long)ac->ac_o_ex.fe_len, (unsigned long)ac->ac_o_ex.fe_logical, (unsigned long)ac->ac_g_ex.fe_group, (unsigned long)ac->ac_g_ex.fe_start, (unsigned long)ac->ac_g_ex.fe_len, (unsigned long)ac->ac_g_ex.fe_logical, (unsigned long)ac->ac_b_ex.fe_group, (unsigned long)ac->ac_b_ex.fe_start, (unsigned long)ac->ac_b_ex.fe_len, (unsigned long)ac->ac_b_ex.fe_logical, (int)ac->ac_criteria); ext4_msg(ac->ac_sb, KERN_ERR, "%lu scanned, %d found", ac->ac_ex_scanned, ac->ac_found); ext4_msg(ac->ac_sb, KERN_ERR, "groups: "); ngroups = ext4_get_groups_count(sb); for (i = 0; i < ngroups; i++) { struct ext4_group_info *grp = ext4_get_group_info(sb, i); struct ext4_prealloc_space *pa; ext4_grpblk_t start; struct list_head *cur; ext4_lock_group(sb, i); list_for_each(cur, &grp->bb_prealloc_list) { pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); spin_lock(&pa->pa_lock); ext4_get_group_no_and_offset(sb, pa->pa_pstart, NULL, &start); spin_unlock(&pa->pa_lock); printk(KERN_ERR "PA:%u:%d:%u \n", i, start, pa->pa_len); } ext4_unlock_group(sb, i); if (grp->bb_free == 0) continue; printk(KERN_ERR "%u: %d/%d \n", i, grp->bb_free, grp->bb_fragments); } printk(KERN_ERR "\n"); } #else static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) { return; } #endif /* * We use locality group preallocation for small size file. The size of the * file is determined by the current size or the resulting size after * allocation which ever is larger * * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req */ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); int bsbits = ac->ac_sb->s_blocksize_bits; loff_t size, isize; if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) return; if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) return; size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) >> bsbits; if ((size == isize) && !ext4_fs_is_busy(sbi) && (atomic_read(&ac->ac_inode->i_writecount) == 0)) { ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; return; } if (sbi->s_mb_group_prealloc <= 0) { ac->ac_flags |= EXT4_MB_STREAM_ALLOC; return; } /* don't use group allocation for large files */ size = max(size, isize); if (size > sbi->s_mb_stream_request) { ac->ac_flags |= EXT4_MB_STREAM_ALLOC; return; } BUG_ON(ac->ac_lg != NULL); /* * locality group prealloc space are per cpu. The reason for having * per cpu locality group is to reduce the contention between block * request from multiple CPUs. */ ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups); /* we're going to use group allocation */ ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; /* serialize all allocations in the group */ mutex_lock(&ac->ac_lg->lg_mutex); } static noinline_for_stack int ext4_mb_initialize_context(struct ext4_allocation_context *ac, struct ext4_allocation_request *ar) { struct super_block *sb = ar->inode->i_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; ext4_group_t group; unsigned int len; ext4_fsblk_t goal; ext4_grpblk_t block; /* we can't allocate > group size */ len = ar->len; /* just a dirty hack to filter too big requests */ if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) len = EXT4_CLUSTERS_PER_GROUP(sb); /* start searching from the goal */ goal = ar->goal; if (goal < le32_to_cpu(es->s_first_data_block) || goal >= ext4_blocks_count(es)) goal = le32_to_cpu(es->s_first_data_block); ext4_get_group_no_and_offset(sb, goal, &group, &block); /* set up allocation goals */ ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); ac->ac_status = AC_STATUS_CONTINUE; ac->ac_sb = sb; ac->ac_inode = ar->inode; ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; ac->ac_o_ex.fe_group = group; ac->ac_o_ex.fe_start = block; ac->ac_o_ex.fe_len = len; ac->ac_g_ex = ac->ac_o_ex; ac->ac_flags = ar->flags; /* we have to define context: we'll we work with a file or * locality group. this is a policy, actually */ ext4_mb_group_or_file(ac); mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, " "left: %u/%u, right %u/%u to %swritable\n", (unsigned) ar->len, (unsigned) ar->logical, (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, (unsigned) ar->lleft, (unsigned) ar->pleft, (unsigned) ar->lright, (unsigned) ar->pright, atomic_read(&ar->inode->i_writecount) ? "" : "non-"); return 0; } static noinline_for_stack void ext4_mb_discard_lg_preallocations(struct super_block *sb, struct ext4_locality_group *lg, int order, int total_entries) { ext4_group_t group = 0; struct ext4_buddy e4b; struct list_head discard_list; struct ext4_prealloc_space *pa, *tmp; mb_debug(1, "discard locality group preallocation\n"); INIT_LIST_HEAD(&discard_list); spin_lock(&lg->lg_prealloc_lock); list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], pa_inode_list) { spin_lock(&pa->pa_lock); if (atomic_read(&pa->pa_count)) { /* * This is the pa that we just used * for block allocation. So don't * free that */ spin_unlock(&pa->pa_lock); continue; } if (pa->pa_deleted) { spin_unlock(&pa->pa_lock); continue; } /* only lg prealloc space */ BUG_ON(pa->pa_type != MB_GROUP_PA); /* seems this one can be freed ... */ pa->pa_deleted = 1; spin_unlock(&pa->pa_lock); list_del_rcu(&pa->pa_inode_list); list_add(&pa->u.pa_tmp_list, &discard_list); total_entries--; if (total_entries <= 5) { /* * we want to keep only 5 entries * allowing it to grow to 8. This * mak sure we don't call discard * soon for this list. */ break; } } spin_unlock(&lg->lg_prealloc_lock); list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { group = ext4_get_group_number(sb, pa->pa_pstart); if (ext4_mb_load_buddy(sb, group, &e4b)) { ext4_error(sb, "Error loading buddy information for %u", group); continue; } ext4_lock_group(sb, group); list_del(&pa->pa_group_list); ext4_mb_release_group_pa(&e4b, pa); ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); list_del(&pa->u.pa_tmp_list); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } } /* * We have incremented pa_count. So it cannot be freed at this * point. Also we hold lg_mutex. So no parallel allocation is * possible from this lg. That means pa_free cannot be updated. * * A parallel ext4_mb_discard_group_preallocations is possible. * which can cause the lg_prealloc_list to be updated. */ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) { int order, added = 0, lg_prealloc_count = 1; struct super_block *sb = ac->ac_sb; struct ext4_locality_group *lg = ac->ac_lg; struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; order = fls(pa->pa_free) - 1; if (order > PREALLOC_TB_SIZE - 1) /* The max size of hash table is PREALLOC_TB_SIZE */ order = PREALLOC_TB_SIZE - 1; /* Add the prealloc space to lg */ spin_lock(&lg->lg_prealloc_lock); list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], pa_inode_list) { spin_lock(&tmp_pa->pa_lock); if (tmp_pa->pa_deleted) { spin_unlock(&tmp_pa->pa_lock); continue; } if (!added && pa->pa_free < tmp_pa->pa_free) { /* Add to the tail of the previous entry */ list_add_tail_rcu(&pa->pa_inode_list, &tmp_pa->pa_inode_list); added = 1; /* * we want to count the total * number of entries in the list */ } spin_unlock(&tmp_pa->pa_lock); lg_prealloc_count++; } if (!added) list_add_tail_rcu(&pa->pa_inode_list, &lg->lg_prealloc_list[order]); spin_unlock(&lg->lg_prealloc_lock); /* Now trim the list to be not more than 8 elements */ if (lg_prealloc_count > 8) { ext4_mb_discard_lg_preallocations(sb, lg, order, lg_prealloc_count); return; } return ; } /* * release all resource we used in allocation */ static int ext4_mb_release_context(struct ext4_allocation_context *ac) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_prealloc_space *pa = ac->ac_pa; if (pa) { if (pa->pa_type == MB_GROUP_PA) { /* see comment in ext4_mb_use_group_pa() */ spin_lock(&pa->pa_lock); pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); pa->pa_free -= ac->ac_b_ex.fe_len; pa->pa_len -= ac->ac_b_ex.fe_len; spin_unlock(&pa->pa_lock); } } if (pa) { /* * We want to add the pa to the right bucket. * Remove it from the list and while adding * make sure the list to which we are adding * doesn't grow big. */ if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) { spin_lock(pa->pa_obj_lock); list_del_rcu(&pa->pa_inode_list); spin_unlock(pa->pa_obj_lock); ext4_mb_add_n_trim(ac); } ext4_mb_put_pa(ac, ac->ac_sb, pa); } if (ac->ac_bitmap_page) page_cache_release(ac->ac_bitmap_page); if (ac->ac_buddy_page) page_cache_release(ac->ac_buddy_page); if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) mutex_unlock(&ac->ac_lg->lg_mutex); ext4_mb_collect_stats(ac); return 0; } static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) { ext4_group_t i, ngroups = ext4_get_groups_count(sb); int ret; int freed = 0; trace_ext4_mb_discard_preallocations(sb, needed); for (i = 0; i < ngroups && needed > 0; i++) { ret = ext4_mb_discard_group_preallocations(sb, i, needed); freed += ret; needed -= ret; } return freed; } /* * Main entry point into mballoc to allocate blocks * it tries to use preallocation first, then falls back * to usual allocation */ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, struct ext4_allocation_request *ar, int *errp) { int freed; struct ext4_allocation_context *ac = NULL; struct ext4_sb_info *sbi; struct super_block *sb; ext4_fsblk_t block = 0; unsigned int inquota = 0; unsigned int reserv_clstrs = 0; might_sleep(); sb = ar->inode->i_sb; sbi = EXT4_SB(sb); trace_ext4_request_blocks(ar); /* Allow to use superuser reservation for quota file */ if (IS_NOQUOTA(ar->inode)) ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; /* * For delayed allocation, we could skip the ENOSPC and * EDQUOT check, as blocks and quotas have been already * reserved when data being copied into pagecache. */ if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED)) ar->flags |= EXT4_MB_DELALLOC_RESERVED; else { /* Without delayed allocation we need to verify * there is enough free blocks to do block allocation * and verify allocation doesn't exceed the quota limits. */ while (ar->len && ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { /* let others to free the space */ cond_resched(); ar->len = ar->len >> 1; } if (!ar->len) { *errp = -ENOSPC; return 0; } reserv_clstrs = ar->len; if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { dquot_alloc_block_nofail(ar->inode, EXT4_C2B(sbi, ar->len)); } else { while (ar->len && dquot_alloc_block(ar->inode, EXT4_C2B(sbi, ar->len))) { ar->flags |= EXT4_MB_HINT_NOPREALLOC; ar->len--; } } inquota = ar->len; if (ar->len == 0) { *errp = -EDQUOT; goto out; } } ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); if (!ac) { ar->len = 0; *errp = -ENOMEM; goto out; } *errp = ext4_mb_initialize_context(ac, ar); if (*errp) { ar->len = 0; goto out; } ac->ac_op = EXT4_MB_HISTORY_PREALLOC; if (!ext4_mb_use_preallocated(ac)) { ac->ac_op = EXT4_MB_HISTORY_ALLOC; ext4_mb_normalize_request(ac, ar); repeat: /* allocate space in core */ *errp = ext4_mb_regular_allocator(ac); if (*errp) { ext4_discard_allocated_blocks(ac); goto errout; } /* as we've just preallocated more space than * user requested orinally, we store allocated * space in a special descriptor */ if (ac->ac_status == AC_STATUS_FOUND && ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) ext4_mb_new_preallocation(ac); } if (likely(ac->ac_status == AC_STATUS_FOUND)) { *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); if (*errp == -EAGAIN) { /* * drop the reference that we took * in ext4_mb_use_best_found */ ext4_mb_release_context(ac); ac->ac_b_ex.fe_group = 0; ac->ac_b_ex.fe_start = 0; ac->ac_b_ex.fe_len = 0; ac->ac_status = AC_STATUS_CONTINUE; goto repeat; } else if (*errp) { ext4_discard_allocated_blocks(ac); goto errout; } else { block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); ar->len = ac->ac_b_ex.fe_len; } } else { freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); if (freed) goto repeat; *errp = -ENOSPC; } errout: if (*errp) { ac->ac_b_ex.fe_len = 0; ar->len = 0; ext4_mb_show_ac(ac); } ext4_mb_release_context(ac); out: if (ac) kmem_cache_free(ext4_ac_cachep, ac); if (inquota && ar->len < inquota) dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); if (!ar->len) { if (!ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED)) /* release all the reserved blocks if non delalloc */ percpu_counter_sub(&sbi->s_dirtyclusters_counter, reserv_clstrs); } trace_ext4_allocate_blocks(ar, (unsigned long long)block); return block; } /* * We can merge two free data extents only if the physical blocks * are contiguous, AND the extents were freed by the same transaction, * AND the blocks are associated with the same group. */ static int can_merge(struct ext4_free_data *entry1, struct ext4_free_data *entry2) { if ((entry1->efd_tid == entry2->efd_tid) && (entry1->efd_group == entry2->efd_group) && ((entry1->efd_start_cluster + entry1->efd_count) == entry2->efd_start_cluster)) return 1; return 0; } static noinline_for_stack int ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, struct ext4_free_data *new_entry) { ext4_group_t group = e4b->bd_group; ext4_grpblk_t cluster; struct ext4_free_data *entry; struct ext4_group_info *db = e4b->bd_info; struct super_block *sb = e4b->bd_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct rb_node **n = &db->bb_free_root.rb_node, *node; struct rb_node *parent = NULL, *new_node; BUG_ON(!ext4_handle_valid(handle)); BUG_ON(e4b->bd_bitmap_page == NULL); BUG_ON(e4b->bd_buddy_page == NULL); new_node = &new_entry->efd_node; cluster = new_entry->efd_start_cluster; if (!*n) { /* first free block exent. We need to protect buddy cache from being freed, * otherwise we'll refresh it from * on-disk bitmap and lose not-yet-available * blocks */ page_cache_get(e4b->bd_buddy_page); page_cache_get(e4b->bd_bitmap_page); } while (*n) { parent = *n; entry = rb_entry(parent, struct ext4_free_data, efd_node); if (cluster < entry->efd_start_cluster) n = &(*n)->rb_left; else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) n = &(*n)->rb_right; else { ext4_grp_locked_error(sb, group, 0, ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, cluster), "Block already on to-be-freed list"); return 0; } } rb_link_node(new_node, parent, n); rb_insert_color(new_node, &db->bb_free_root); /* Now try to see the extent can be merged to left and right */ node = rb_prev(new_node); if (node) { entry = rb_entry(node, struct ext4_free_data, efd_node); if (can_merge(entry, new_entry) && ext4_journal_callback_try_del(handle, &entry->efd_jce)) { new_entry->efd_start_cluster = entry->efd_start_cluster; new_entry->efd_count += entry->efd_count; rb_erase(node, &(db->bb_free_root)); kmem_cache_free(ext4_free_data_cachep, entry); } } node = rb_next(new_node); if (node) { entry = rb_entry(node, struct ext4_free_data, efd_node); if (can_merge(new_entry, entry) && ext4_journal_callback_try_del(handle, &entry->efd_jce)) { new_entry->efd_count += entry->efd_count; rb_erase(node, &(db->bb_free_root)); kmem_cache_free(ext4_free_data_cachep, entry); } } /* Add the extent to transaction's private list */ ext4_journal_callback_add(handle, ext4_free_data_callback, &new_entry->efd_jce); return 0; } /** * ext4_free_blocks() -- Free given blocks and update quota * @handle: handle for this transaction * @inode: inode * @block: start physical block to free * @count: number of blocks to count * @flags: flags used by ext4_free_blocks */ void ext4_free_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t block, unsigned long count, int flags) { struct buffer_head *bitmap_bh = NULL; struct super_block *sb = inode->i_sb; struct ext4_group_desc *gdp; unsigned int overflow; ext4_grpblk_t bit; struct buffer_head *gd_bh; ext4_group_t block_group; struct ext4_sb_info *sbi; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_buddy e4b; unsigned int count_clusters; int err = 0; int ret; might_sleep(); if (bh) { if (block) BUG_ON(block != bh->b_blocknr); else block = bh->b_blocknr; } sbi = EXT4_SB(sb); if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && !ext4_data_block_valid(sbi, block, count)) { ext4_error(sb, "Freeing blocks not in datazone - " "block = %llu, count = %lu", block, count); goto error_return; } ext4_debug("freeing block %llu\n", block); trace_ext4_free_blocks(inode, block, count, flags); if (flags & EXT4_FREE_BLOCKS_FORGET) { struct buffer_head *tbh = bh; int i; BUG_ON(bh && (count > 1)); for (i = 0; i < count; i++) { if (!bh) tbh = sb_find_get_block(inode->i_sb, block + i); if (unlikely(!tbh)) continue; ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, inode, tbh, block + i); } } /* * We need to make sure we don't reuse the freed block until * after the transaction is committed, which we can do by * treating the block as metadata, below. We make an * exception if the inode is to be written in writeback mode * since writeback mode has weak data consistency guarantees. */ if (!ext4_should_writeback_data(inode)) flags |= EXT4_FREE_BLOCKS_METADATA; /* * If the extent to be freed does not begin on a cluster * boundary, we need to deal with partial clusters at the * beginning and end of the extent. Normally we will free * blocks at the beginning or the end unless we are explicitly * requested to avoid doing so. */ overflow = EXT4_PBLK_COFF(sbi, block); if (overflow) { if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { overflow = sbi->s_cluster_ratio - overflow; block += overflow; if (count > overflow) count -= overflow; else return; } else { block -= overflow; count += overflow; } } overflow = EXT4_LBLK_COFF(sbi, count); if (overflow) { if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { if (count > overflow) count -= overflow; else return; } else count += sbi->s_cluster_ratio - overflow; } do_more: overflow = 0; ext4_get_group_no_and_offset(sb, block, &block_group, &bit); /* * Check to see if we are freeing blocks across a group * boundary. */ if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { overflow = EXT4_C2B(sbi, bit) + count - EXT4_BLOCKS_PER_GROUP(sb); count -= overflow; } count_clusters = EXT4_NUM_B2C(sbi, count); bitmap_bh = ext4_read_block_bitmap(sb, block_group); if (!bitmap_bh) { err = -EIO; goto error_return; } gdp = ext4_get_group_desc(sb, block_group, &gd_bh); if (!gdp) { err = -EIO; goto error_return; } if (in_range(ext4_block_bitmap(sb, gdp), block, count) || in_range(ext4_inode_bitmap(sb, gdp), block, count) || in_range(block, ext4_inode_table(sb, gdp), EXT4_SB(sb)->s_itb_per_group) || in_range(block + count - 1, ext4_inode_table(sb, gdp), EXT4_SB(sb)->s_itb_per_group)) { ext4_error(sb, "Freeing blocks in system zone - " "Block = %llu, count = %lu", block, count); /* err = 0. ext4_std_error should be a no op */ goto error_return; } BUFFER_TRACE(bitmap_bh, "getting write access"); err = ext4_journal_get_write_access(handle, bitmap_bh); if (err) goto error_return; /* * We are about to modify some metadata. Call the journal APIs * to unshare ->b_data if a currently-committing transaction is * using it */ BUFFER_TRACE(gd_bh, "get_write_access"); err = ext4_journal_get_write_access(handle, gd_bh); if (err) goto error_return; #ifdef AGGRESSIVE_CHECK { int i; for (i = 0; i < count_clusters; i++) BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); } #endif trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); err = ext4_mb_load_buddy(sb, block_group, &e4b); if (err) goto error_return; if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) { struct ext4_free_data *new_entry; /* * blocks being freed are metadata. these blocks shouldn't * be used until this transaction is committed */ retry: new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS); if (!new_entry) { /* * We use a retry loop because * ext4_free_blocks() is not allowed to fail. */ cond_resched(); congestion_wait(BLK_RW_ASYNC, HZ/50); goto retry; } new_entry->efd_start_cluster = bit; new_entry->efd_group = block_group; new_entry->efd_count = count_clusters; new_entry->efd_tid = handle->h_transaction->t_tid; ext4_lock_group(sb, block_group); mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); ext4_mb_free_metadata(handle, &e4b, new_entry); } else { /* need to update group_info->bb_free and bitmap * with group lock held. generate_buddy look at * them with group lock_held */ if (test_opt(sb, DISCARD)) { err = ext4_issue_discard(sb, block_group, bit, count, 0); if (err && err != -EOPNOTSUPP) ext4_msg(sb, KERN_WARNING, "discard request in" " group:%d block:%d count:%lu failed" " with %d", block_group, bit, count, err); } else EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); ext4_lock_group(sb, block_group); mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); mb_free_blocks(inode, &e4b, bit, count_clusters); } ret = ext4_free_group_clusters(sb, gdp) + count_clusters; ext4_free_group_clusters_set(sb, gdp, ret); ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh); ext4_group_desc_csum_set(sb, block_group, gdp); ext4_unlock_group(sb, block_group); if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, block_group); atomic64_add(count_clusters, &sbi->s_flex_groups[flex_group].free_clusters); } if (flags & EXT4_FREE_BLOCKS_RESERVE && ei->i_reserved_data_blocks) { percpu_counter_add(&sbi->s_dirtyclusters_counter, count_clusters); spin_lock(&ei->i_block_reservation_lock); if (flags & EXT4_FREE_BLOCKS_METADATA) ei->i_reserved_meta_blocks += count_clusters; else ei->i_reserved_data_blocks += count_clusters; spin_unlock(&ei->i_block_reservation_lock); if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) dquot_reclaim_block(inode, EXT4_C2B(sbi, count_clusters)); } else if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); ext4_mb_unload_buddy(&e4b); /* We dirtied the bitmap block */ BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); /* And the group descriptor block */ BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); if (!err) err = ret; if (overflow && !err) { block += count; count = overflow; put_bh(bitmap_bh); goto do_more; } error_return: brelse(bitmap_bh); ext4_std_error(sb, err); return; } /** * ext4_group_add_blocks() -- Add given blocks to an existing group * @handle: handle to this transaction * @sb: super block * @block: start physical block to add to the block group * @count: number of blocks to free * * This marks the blocks as free in the bitmap and buddy. */ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, ext4_fsblk_t block, unsigned long count) { struct buffer_head *bitmap_bh = NULL; struct buffer_head *gd_bh; ext4_group_t block_group; ext4_grpblk_t bit; unsigned int i; struct ext4_group_desc *desc; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_buddy e4b; int err = 0, ret, blk_free_count; ext4_grpblk_t blocks_freed; ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); if (count == 0) return 0; ext4_get_group_no_and_offset(sb, block, &block_group, &bit); /* * Check to see if we are freeing blocks across a group * boundary. */ if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { ext4_warning(sb, "too much blocks added to group %u\n", block_group); err = -EINVAL; goto error_return; } bitmap_bh = ext4_read_block_bitmap(sb, block_group); if (!bitmap_bh) { err = -EIO; goto error_return; } desc = ext4_get_group_desc(sb, block_group, &gd_bh); if (!desc) { err = -EIO; goto error_return; } if (in_range(ext4_block_bitmap(sb, desc), block, count) || in_range(ext4_inode_bitmap(sb, desc), block, count) || in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || in_range(block + count - 1, ext4_inode_table(sb, desc), sbi->s_itb_per_group)) { ext4_error(sb, "Adding blocks in system zones - " "Block = %llu, count = %lu", block, count); err = -EINVAL; goto error_return; } BUFFER_TRACE(bitmap_bh, "getting write access"); err = ext4_journal_get_write_access(handle, bitmap_bh); if (err) goto error_return; /* * We are about to modify some metadata. Call the journal APIs * to unshare ->b_data if a currently-committing transaction is * using it */ BUFFER_TRACE(gd_bh, "get_write_access"); err = ext4_journal_get_write_access(handle, gd_bh); if (err) goto error_return; for (i = 0, blocks_freed = 0; i < count; i++) { BUFFER_TRACE(bitmap_bh, "clear bit"); if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { ext4_error(sb, "bit already cleared for block %llu", (ext4_fsblk_t)(block + i)); BUFFER_TRACE(bitmap_bh, "bit already cleared"); } else { blocks_freed++; } } err = ext4_mb_load_buddy(sb, block_group, &e4b); if (err) goto error_return; /* * need to update group_info->bb_free and bitmap * with group lock held. generate_buddy look at * them with group lock_held */ ext4_lock_group(sb, block_group); mb_clear_bits(bitmap_bh->b_data, bit, count); mb_free_blocks(NULL, &e4b, bit, count); blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc); ext4_free_group_clusters_set(sb, desc, blk_free_count); ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh); ext4_group_desc_csum_set(sb, block_group, desc); ext4_unlock_group(sb, block_group); percpu_counter_add(&sbi->s_freeclusters_counter, EXT4_NUM_B2C(sbi, blocks_freed)); if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, block_group); atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed), &sbi->s_flex_groups[flex_group].free_clusters); } ext4_mb_unload_buddy(&e4b); /* We dirtied the bitmap block */ BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); /* And the group descriptor block */ BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); if (!err) err = ret; error_return: brelse(bitmap_bh); ext4_std_error(sb, err); return err; } /** * ext4_trim_extent -- function to TRIM one single free extent in the group * @sb: super block for the file system * @start: starting block of the free extent in the alloc. group * @count: number of blocks to TRIM * @group: alloc. group we are working with * @e4b: ext4 buddy for the group * @blkdev_flags: flags for the block device * * Trim "count" blocks starting at "start" in the "group". To assure that no * one will allocate those blocks, mark it as used in buddy bitmap. This must * be called with under the group lock. */ static int ext4_trim_extent(struct super_block *sb, int start, int count, ext4_group_t group, struct ext4_buddy *e4b, unsigned long blkdev_flags) { struct ext4_free_extent ex; int ret = 0; trace_ext4_trim_extent(sb, group, start, count); assert_spin_locked(ext4_group_lock_ptr(sb, group)); ex.fe_start = start; ex.fe_group = group; ex.fe_len = count; /* * Mark blocks used, so no one can reuse them while * being trimmed. */ mb_mark_used(e4b, &ex); ext4_unlock_group(sb, group); ret = ext4_issue_discard(sb, group, start, count, blkdev_flags); ext4_lock_group(sb, group); mb_free_blocks(NULL, e4b, start, ex.fe_len); return ret; } /** * ext4_trim_all_free -- function to trim all free space in alloc. group * @sb: super block for file system * @group: group to be trimmed * @start: first group block to examine * @max: last group block to examine * @minblocks: minimum extent block count * @blkdev_flags: flags for the block device * * ext4_trim_all_free walks through group's buddy bitmap searching for free * extents. When the free block is found, ext4_trim_extent is called to TRIM * the extent. * * * ext4_trim_all_free walks through group's block bitmap searching for free * extents. When the free extent is found, mark it as used in group buddy * bitmap. Then issue a TRIM command on this extent and free the extent in * the group buddy bitmap. This is done until whole group is scanned. */ static ext4_grpblk_t ext4_trim_all_free(struct super_block *sb, ext4_group_t group, ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks, unsigned long blkdev_flags) { void *bitmap; ext4_grpblk_t next, count = 0, free_count = 0; struct ext4_buddy e4b; int ret = 0; trace_ext4_trim_all_free(sb, group, start, max); ret = ext4_mb_load_buddy(sb, group, &e4b); if (ret) { ext4_error(sb, "Error in loading buddy " "information for %u", group); return ret; } bitmap = e4b.bd_bitmap; ext4_lock_group(sb, group); if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) && minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks)) goto out; start = (e4b.bd_info->bb_first_free > start) ? e4b.bd_info->bb_first_free : start; while (start <= max) { start = mb_find_next_zero_bit(bitmap, max + 1, start); if (start > max) break; next = mb_find_next_bit(bitmap, max + 1, start); if ((next - start) >= minblocks) { ret = ext4_trim_extent(sb, start, next - start, group, &e4b, blkdev_flags); if (ret && ret != -EOPNOTSUPP) break; ret = 0; count += next - start; } free_count += next - start; start = next + 1; if (fatal_signal_pending(current)) { count = -ERESTARTSYS; break; } if (need_resched()) { ext4_unlock_group(sb, group); cond_resched(); ext4_lock_group(sb, group); } if ((e4b.bd_info->bb_free - free_count) < minblocks) break; } if (!ret) { ret = count; EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); } out: ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); ext4_debug("trimmed %d blocks in the group %d\n", count, group); return ret; } /** * ext4_trim_fs() -- trim ioctl handle function * @sb: superblock for filesystem * @range: fstrim_range structure * @blkdev_flags: flags for the block device * * start: First Byte to trim * len: number of Bytes to trim from start * minlen: minimum extent length in Bytes * ext4_trim_fs goes through all allocation groups containing Bytes from * start to start+len. For each such a group ext4_trim_all_free function * is invoked to trim all free space. */ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range, unsigned long blkdev_flags) { struct ext4_group_info *grp; ext4_group_t group, first_group, last_group; ext4_grpblk_t cnt = 0, first_cluster, last_cluster; uint64_t start, end, minlen, trimmed = 0; ext4_fsblk_t first_data_blk = le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); int ret = 0; start = range->start >> sb->s_blocksize_bits; end = start + (range->len >> sb->s_blocksize_bits) - 1; minlen = EXT4_NUM_B2C(EXT4_SB(sb), range->minlen >> sb->s_blocksize_bits); if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || start >= max_blks || range->len < sb->s_blocksize) return -EINVAL; if (end >= max_blks) end = max_blks - 1; if (end <= first_data_blk) goto out; if (start < first_data_blk) start = first_data_blk; /* Determine first and last group to examine based on start and end */ ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, &first_group, &first_cluster); ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, &last_group, &last_cluster); /* end now represents the last cluster to discard in this group */ end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; for (group = first_group; group <= last_group; group++) { grp = ext4_get_group_info(sb, group); /* We only do this if the grp has never been initialized */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { ret = ext4_mb_init_group(sb, group); if (ret) break; } /* * For all the groups except the last one, last cluster will * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to * change it for the last group, note that last_cluster is * already computed earlier by ext4_get_group_no_and_offset() */ if (group == last_group) end = last_cluster; if (grp->bb_free >= minlen) { cnt = ext4_trim_all_free(sb, group, first_cluster, end, minlen, blkdev_flags); if (cnt < 0) { ret = cnt; break; } trimmed += cnt; } /* * For every group except the first one, we are sure * that the first cluster to discard will be cluster #0. */ first_cluster = 0; } if (!ret) atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen); out: range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; return ret; }
gpl-2.0
fireshots/linux
arch/arm/mach-pxa/trizeps4.c
604
14590
/* * linux/arch/arm/mach-pxa/trizeps4.c * * Support for the Keith und Koep Trizeps4 Module Platform. * * Author: Jürgen Schindele * Created: 20 02, 2006 * Copyright: Jürgen Schindele * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/bitops.h> #include <linux/fb.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/dm9000.h> #include <linux/mtd/physmap.h> #include <linux/mtd/partitions.h> #include <linux/regulator/machine.h> #include <linux/i2c/pxa-i2c.h> #include <asm/types.h> #include <asm/setup.h> #include <asm/memory.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/sizes.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <asm/mach/flash.h> #include <mach/pxa27x.h> #include <mach/trizeps4.h> #include <mach/audio.h> #include <linux/platform_data/video-pxafb.h> #include <linux/platform_data/mmc-pxamci.h> #include <linux/platform_data/irda-pxaficp.h> #include <linux/platform_data/usb-ohci-pxa27x.h> #include <mach/smemc.h> #include "generic.h" #include "devices.h" /* comment out the following line if you want to use the * Standard UART from PXA for serial / irda transmission * and acivate it if you have status leds connected */ #define STATUS_LEDS_ON_STUART_PINS 1 /***************************************************************************** * MultiFunctionPins of CPU *****************************************************************************/ static unsigned long trizeps4_pin_config[] __initdata = { /* Chip Selects */ GPIO15_nCS_1, /* DiskOnChip CS */ GPIO93_GPIO, /* TRIZEPS4_DOC_IRQ */ GPIO94_GPIO, /* DOC lock */ GPIO78_nCS_2, /* DM9000 CS */ GPIO101_GPIO, /* TRIZEPS4_ETH_IRQ */ GPIO79_nCS_3, /* Logic CS */ GPIO0_GPIO | WAKEUP_ON_EDGE_RISE, /* Logic irq */ /* AC97 */ GPIO28_AC97_BITCLK, GPIO29_AC97_SDATA_IN_0, GPIO30_AC97_SDATA_OUT, GPIO31_AC97_SYNC, /* LCD - 16bpp Active TFT */ GPIOxx_LCD_TFT_16BPP, /* UART */ GPIO9_FFUART_CTS, GPIO10_FFUART_DCD, GPIO16_FFUART_TXD, GPIO33_FFUART_DSR, GPIO38_FFUART_RI, GPIO82_FFUART_DTR, GPIO83_FFUART_RTS, GPIO96_FFUART_RXD, GPIO42_BTUART_RXD, GPIO43_BTUART_TXD, GPIO44_BTUART_CTS, GPIO45_BTUART_RTS, #ifdef STATUS_LEDS_ON_STUART_PINS GPIO46_GPIO, GPIO47_GPIO, #else GPIO46_STUART_RXD, GPIO47_STUART_TXD, #endif /* PCMCIA */ GPIO11_GPIO, /* TRIZEPS4_CD_IRQ */ GPIO13_GPIO, /* TRIZEPS4_READY_NINT */ GPIO48_nPOE, GPIO49_nPWE, GPIO50_nPIOR, GPIO51_nPIOW, GPIO54_nPCE_2, GPIO55_nPREG, GPIO56_nPWAIT, GPIO57_nIOIS16, GPIO102_nPCE_1, GPIO104_PSKTSEL, /* MultiMediaCard */ GPIO32_MMC_CLK, GPIO92_MMC_DAT_0, GPIO109_MMC_DAT_1, GPIO110_MMC_DAT_2, GPIO111_MMC_DAT_3, GPIO112_MMC_CMD, GPIO12_GPIO, /* TRIZEPS4_MMC_IRQ */ /* USB OHCI */ GPIO88_USBH1_PWR, /* USBHPWR1 */ GPIO89_USBH1_PEN, /* USBHPEN1 */ /* I2C */ GPIO117_I2C_SCL, GPIO118_I2C_SDA, }; static unsigned long trizeps4wl_pin_config[] __initdata = { /* SSP 2 */ GPIO14_SSP2_SFRM, GPIO19_SSP2_SCLK, GPIO53_GPIO, /* TRIZEPS4_SPI_IRQ */ GPIO86_SSP2_RXD, GPIO87_SSP2_TXD, }; /**************************************************************************** * ONBOARD FLASH ****************************************************************************/ static struct mtd_partition trizeps4_partitions[] = { { .name = "Bootloader", .offset = 0x00000000, .size = 0x00040000, .mask_flags = MTD_WRITEABLE /* force read-only */ }, { .name = "Backup", .offset = 0x00040000, .size = 0x00040000, }, { .name = "Image", .offset = 0x00080000, .size = 0x01080000, }, { .name = "IPSM", .offset = 0x01100000, .size = 0x00e00000, }, { .name = "Registry", .offset = 0x01f00000, .size = MTDPART_SIZ_FULL, } }; static struct physmap_flash_data trizeps4_flash_data[] = { { .width = 4, /* bankwidth in bytes */ .parts = trizeps4_partitions, .nr_parts = ARRAY_SIZE(trizeps4_partitions) } }; static struct resource flash_resource = { .start = PXA_CS0_PHYS, .end = PXA_CS0_PHYS + SZ_32M - 1, .flags = IORESOURCE_MEM, }; static struct platform_device flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = trizeps4_flash_data, }, .resource = &flash_resource, .num_resources = 1, }; /**************************************************************************** * DAVICOM DM9000 Ethernet ****************************************************************************/ static struct resource dm9000_resources[] = { [0] = { .start = TRIZEPS4_ETH_PHYS+0x300, .end = TRIZEPS4_ETH_PHYS+0x400-1, .flags = IORESOURCE_MEM, }, [1] = { .start = TRIZEPS4_ETH_PHYS+0x8300, .end = TRIZEPS4_ETH_PHYS+0x8400-1, .flags = IORESOURCE_MEM, }, [2] = { .start = TRIZEPS4_ETH_IRQ, .end = TRIZEPS4_ETH_IRQ, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; static struct dm9000_plat_data tri_dm9000_platdata = { .flags = DM9000_PLATF_32BITONLY, }; static struct platform_device dm9000_device = { .name = "dm9000", .id = -1, .num_resources = ARRAY_SIZE(dm9000_resources), .resource = dm9000_resources, .dev = { .platform_data = &tri_dm9000_platdata, } }; /**************************************************************************** * LED's on GPIO pins of PXA ****************************************************************************/ static struct gpio_led trizeps4_led[] = { #ifdef STATUS_LEDS_ON_STUART_PINS { .name = "led0:orange:heartbeat", /* */ .default_trigger = "heartbeat", .gpio = GPIO_HEARTBEAT_LED, .active_low = 1, }, { .name = "led1:yellow:cpubusy", /* */ .default_trigger = "cpu-busy", .gpio = GPIO_SYS_BUSY_LED, .active_low = 1, }, #endif }; static struct gpio_led_platform_data trizeps4_led_data = { .leds = trizeps4_led, .num_leds = ARRAY_SIZE(trizeps4_led), }; static struct platform_device leds_devices = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &trizeps4_led_data, }, }; static struct platform_device *trizeps4_devices[] __initdata = { &flash_device, &dm9000_device, &leds_devices, }; static struct platform_device *trizeps4wl_devices[] __initdata = { &flash_device, &leds_devices, }; static short trizeps_conxs_bcr; /* PCCARD power switching supports only 3,3V */ void board_pcmcia_power(int power) { if (power) { /* switch power on, put in reset and enable buffers */ trizeps_conxs_bcr |= power; trizeps_conxs_bcr |= ConXS_BCR_CF_RESET; trizeps_conxs_bcr &= ~ConXS_BCR_CF_BUF_EN; BCR_writew(trizeps_conxs_bcr); /* wait a little */ udelay(2000); /* take reset away */ trizeps_conxs_bcr &= ~ConXS_BCR_CF_RESET; BCR_writew(trizeps_conxs_bcr); udelay(2000); } else { /* put in reset */ trizeps_conxs_bcr |= ConXS_BCR_CF_RESET; BCR_writew(trizeps_conxs_bcr); udelay(1000); /* switch power off */ trizeps_conxs_bcr &= ~0xf; BCR_writew(trizeps_conxs_bcr); } pr_debug("%s: o%s 0x%x\n", __func__, power ? "n" : "ff", trizeps_conxs_bcr); } EXPORT_SYMBOL(board_pcmcia_power); /* backlight power switching for LCD panel */ static void board_backlight_power(int on) { if (on) trizeps_conxs_bcr |= ConXS_BCR_L_DISP; else trizeps_conxs_bcr &= ~ConXS_BCR_L_DISP; pr_debug("%s: o%s 0x%x\n", __func__, on ? "n" : "ff", trizeps_conxs_bcr); BCR_writew(trizeps_conxs_bcr); } /* a I2C based RTC is known on CONXS board */ static struct i2c_board_info trizeps4_i2c_devices[] __initdata = { { I2C_BOARD_INFO("rtc-pcf8593", 0x51) } }; /**************************************************************************** * MMC card slot external to module ****************************************************************************/ static int trizeps4_mci_init(struct device *dev, irq_handler_t mci_detect_int, void *data) { int err; err = request_irq(TRIZEPS4_MMC_IRQ, mci_detect_int, IRQF_TRIGGER_RISING, "MMC card detect", data); if (err) { printk(KERN_ERR "trizeps4_mci_init: MMC/SD: can't request" "MMC card detect IRQ\n"); return -1; } return 0; } static void trizeps4_mci_exit(struct device *dev, void *data) { free_irq(TRIZEPS4_MMC_IRQ, data); } static struct pxamci_platform_data trizeps4_mci_platform_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .detect_delay_ms= 10, .init = trizeps4_mci_init, .exit = trizeps4_mci_exit, .get_ro = NULL, /* write-protection not supported */ .setpower = NULL, /* power-switching not supported */ .gpio_card_detect = -1, .gpio_card_ro = -1, .gpio_power = -1, }; /**************************************************************************** * IRDA mode switching on stuart ****************************************************************************/ #ifndef STATUS_LEDS_ON_STUART_PINS static short trizeps_conxs_ircr; static int trizeps4_irda_startup(struct device *dev) { trizeps_conxs_ircr &= ~ConXS_IRCR_SD; IRCR_writew(trizeps_conxs_ircr); return 0; } static void trizeps4_irda_shutdown(struct device *dev) { trizeps_conxs_ircr |= ConXS_IRCR_SD; IRCR_writew(trizeps_conxs_ircr); } static void trizeps4_irda_transceiver_mode(struct device *dev, int mode) { unsigned long flags; local_irq_save(flags); /* Switch mode */ if (mode & IR_SIRMODE) trizeps_conxs_ircr &= ~ConXS_IRCR_MODE; /* Slow mode */ else if (mode & IR_FIRMODE) trizeps_conxs_ircr |= ConXS_IRCR_MODE; /* Fast mode */ /* Switch power */ if (mode & IR_OFF) trizeps_conxs_ircr |= ConXS_IRCR_SD; else trizeps_conxs_ircr &= ~ConXS_IRCR_SD; IRCR_writew(trizeps_conxs_ircr); local_irq_restore(flags); pxa2xx_transceiver_mode(dev, mode); } static struct pxaficp_platform_data trizeps4_ficp_platform_data = { .gpio_pwdown = -1, .transceiver_cap = IR_SIRMODE | IR_FIRMODE | IR_OFF, .transceiver_mode = trizeps4_irda_transceiver_mode, .startup = trizeps4_irda_startup, .shutdown = trizeps4_irda_shutdown, }; #endif /**************************************************************************** * OHCI USB port ****************************************************************************/ static struct pxaohci_platform_data trizeps4_ohci_platform_data = { .port_mode = PMM_PERPORT_MODE, .flags = ENABLE_PORT_ALL | POWER_CONTROL_LOW | POWER_SENSE_LOW, }; static struct map_desc trizeps4_io_desc[] __initdata = { { /* ConXS CFSR */ .virtual = TRIZEPS4_CFSR_VIRT, .pfn = __phys_to_pfn(TRIZEPS4_CFSR_PHYS), .length = 0x00001000, .type = MT_DEVICE }, { /* ConXS BCR */ .virtual = TRIZEPS4_BOCR_VIRT, .pfn = __phys_to_pfn(TRIZEPS4_BOCR_PHYS), .length = 0x00001000, .type = MT_DEVICE }, { /* ConXS IRCR */ .virtual = TRIZEPS4_IRCR_VIRT, .pfn = __phys_to_pfn(TRIZEPS4_IRCR_PHYS), .length = 0x00001000, .type = MT_DEVICE }, { /* ConXS DCR */ .virtual = TRIZEPS4_DICR_VIRT, .pfn = __phys_to_pfn(TRIZEPS4_DICR_PHYS), .length = 0x00001000, .type = MT_DEVICE }, { /* ConXS UPSR */ .virtual = TRIZEPS4_UPSR_VIRT, .pfn = __phys_to_pfn(TRIZEPS4_UPSR_PHYS), .length = 0x00001000, .type = MT_DEVICE } }; static struct pxafb_mode_info sharp_lcd_mode = { .pixclock = 78000, .xres = 640, .yres = 480, .bpp = 8, .hsync_len = 4, .left_margin = 4, .right_margin = 4, .vsync_len = 2, .upper_margin = 0, .lower_margin = 0, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .cmap_greyscale = 0, }; static struct pxafb_mach_info sharp_lcd = { .modes = &sharp_lcd_mode, .num_modes = 1, .lcd_conn = LCD_COLOR_DSTN_16BPP | LCD_PCLK_EDGE_FALL, .cmap_inverse = 0, .cmap_static = 0, .pxafb_backlight_power = board_backlight_power, }; static struct pxafb_mode_info toshiba_lcd_mode = { .pixclock = 39720, .xres = 640, .yres = 480, .bpp = 8, .hsync_len = 63, .left_margin = 12, .right_margin = 12, .vsync_len = 4, .upper_margin = 32, .lower_margin = 10, .sync = 0, .cmap_greyscale = 0, }; static struct pxafb_mach_info toshiba_lcd = { .modes = &toshiba_lcd_mode, .num_modes = 1, .lcd_conn = (LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL), .cmap_inverse = 0, .cmap_static = 0, .pxafb_backlight_power = board_backlight_power, }; static void __init trizeps4_init(void) { pxa2xx_mfp_config(ARRAY_AND_SIZE(trizeps4_pin_config)); if (machine_is_trizeps4wl()) { pxa2xx_mfp_config(ARRAY_AND_SIZE(trizeps4wl_pin_config)); platform_add_devices(trizeps4wl_devices, ARRAY_SIZE(trizeps4wl_devices)); } else { platform_add_devices(trizeps4_devices, ARRAY_SIZE(trizeps4_devices)); } pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); if (0) /* dont know how to determine LCD */ pxa_set_fb_info(NULL, &sharp_lcd); else pxa_set_fb_info(NULL, &toshiba_lcd); pxa_set_mci_info(&trizeps4_mci_platform_data); #ifndef STATUS_LEDS_ON_STUART_PINS pxa_set_ficp_info(&trizeps4_ficp_platform_data); #endif pxa_set_ohci_info(&trizeps4_ohci_platform_data); pxa_set_ac97_info(NULL); pxa_set_i2c_info(NULL); i2c_register_board_info(0, trizeps4_i2c_devices, ARRAY_SIZE(trizeps4_i2c_devices)); /* this is the reset value */ trizeps_conxs_bcr = 0x00A0; BCR_writew(trizeps_conxs_bcr); board_backlight_power(1); regulator_has_full_constraints(); } static void __init trizeps4_map_io(void) { pxa27x_map_io(); iotable_init(trizeps4_io_desc, ARRAY_SIZE(trizeps4_io_desc)); if ((__raw_readl(MSC0) & 0x8) && (__raw_readl(BOOT_DEF) & 0x1)) { /* if flash is 16 bit wide its a Trizeps4 WL */ __machine_arch_type = MACH_TYPE_TRIZEPS4WL; trizeps4_flash_data[0].width = 2; } else { /* if flash is 32 bit wide its a Trizeps4 */ __machine_arch_type = MACH_TYPE_TRIZEPS4; trizeps4_flash_data[0].width = 4; } } MACHINE_START(TRIZEPS4, "Keith und Koep Trizeps IV module") /* MAINTAINER("Jürgen Schindele") */ .atag_offset = 0x100, .init_machine = trizeps4_init, .map_io = trizeps4_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = pxa27x_init_irq, .handle_irq = pxa27x_handle_irq, .init_time = pxa_timer_init, .restart = pxa_restart, MACHINE_END MACHINE_START(TRIZEPS4WL, "Keith und Koep Trizeps IV-WL module") /* MAINTAINER("Jürgen Schindele") */ .atag_offset = 0x100, .init_machine = trizeps4_init, .map_io = trizeps4_map_io, .nr_irqs = PXA_NR_IRQS, .init_irq = pxa27x_init_irq, .handle_irq = pxa27x_handle_irq, .init_time = pxa_timer_init, .restart = pxa_restart, MACHINE_END
gpl-2.0
TeamApexQ/android_kernel_samsung_d2
drivers/net/wireless/bcmdhd/dhd_linux_wq.c
604
8687
/* * Broadcom Dongle Host Driver (DHD), Generic work queue framework * Generic interface to handle dhd deferred work events * * Copyright (C) 1999-2014, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: dhd_linux_wq.c 411851 2013-07-10 20:48:00Z $ */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/fcntl.h> #include <linux/fs.h> #include <linux/ip.h> #include <linux/kfifo.h> #include <linuxver.h> #include <osl.h> #include <bcmutils.h> #include <bcmendian.h> #include <bcmdevs.h> #include <dngl_stats.h> #include <dhd.h> #include <dhd_dbg.h> #include <dhd_linux_wq.h> struct dhd_deferred_event_t { u8 event; /* holds the event */ void *event_data; /* Holds event specific data */ event_handler_t event_handler; }; #define DEFRD_EVT_SIZE sizeof(struct dhd_deferred_event_t) struct dhd_deferred_wq { struct work_struct deferred_work; /* should be the first member */ /* * work events may occur simultaneously. * Can hold upto 64 low priority events and 4 high priority events */ #define DHD_PRIO_WORK_FIFO_SIZE (4 * sizeof(struct dhd_deferred_event_t)) #define DHD_WORK_FIFO_SIZE (64 * sizeof(struct dhd_deferred_event_t)) struct kfifo *prio_fifo; struct kfifo *work_fifo; u8 *prio_fifo_buf; u8 *work_fifo_buf; spinlock_t work_lock; void *dhd_info; /* review: does it require */ }; struct dhd_deferred_wq *deferred_wq = NULL; static inline struct kfifo* dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock) { struct kfifo *fifo; gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) fifo = kfifo_init(buf, size, flags, lock); #else fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags); if (!fifo) { return NULL; } kfifo_init(fifo, buf, size); #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */ return fifo; } static inline void dhd_kfifo_free(struct kfifo *fifo) { kfifo_free(fifo); } /* deferred work functions */ static void dhd_deferred_work_handler(struct work_struct *data); void* dhd_deferred_work_init(void *dhd_info) { struct dhd_deferred_wq *work = NULL; u8* buf; unsigned long fifo_size = 0; gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC; if (!dhd_info) { DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__)); goto return_null; } work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq), flags); if (!work) { DHD_ERROR(("%s: work queue creation failed \n", __FUNCTION__)); goto return_null; } INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler); /* initialize event fifo */ spin_lock_init(&work->work_lock); /* allocate buffer to hold prio events */ fifo_size = DHD_PRIO_WORK_FIFO_SIZE; fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size); buf = (u8*)kzalloc(fifo_size, flags); if (!buf) { DHD_ERROR(("%s: prio work fifo allocation failed \n", __FUNCTION__)); goto return_null; } /* Initialize prio event fifo */ work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock); if (!work->prio_fifo) { kfree(buf); goto return_null; } /* allocate buffer to hold work events */ fifo_size = DHD_WORK_FIFO_SIZE; fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size); buf = (u8*)kzalloc(fifo_size, flags); if (!buf) { DHD_ERROR(("%s: work fifo allocation failed \n", __FUNCTION__)); goto return_null; } /* Initialize event fifo */ work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock); if (!work->work_fifo) { kfree(buf); goto return_null; } work->dhd_info = dhd_info; deferred_wq = work; DHD_ERROR(("%s: work queue initialized \n", __FUNCTION__)); return work; return_null: if (work) dhd_deferred_work_deinit(work); return NULL; } void dhd_deferred_work_deinit(void *work) { struct dhd_deferred_wq *deferred_work = work; if (!deferred_work) { DHD_ERROR(("%s: deferred work has been freed alread \n", __FUNCTION__)); return; } /* cancel the deferred work handling */ cancel_work_sync((struct work_struct *)deferred_work); /* * free work event fifo. * kfifo_free frees locally allocated fifo buffer */ if (deferred_work->prio_fifo) dhd_kfifo_free(deferred_work->prio_fifo); if (deferred_work->work_fifo) dhd_kfifo_free(deferred_work->work_fifo); kfree(deferred_work); /* deinit internal reference pointer */ deferred_wq = NULL; } /* * Prepares event to be queued * Schedules the event */ int dhd_deferred_schedule_work(void *event_data, u8 event, event_handler_t event_handler, u8 priority) { struct dhd_deferred_event_t deferred_event; int status; if (!deferred_wq) { DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__)); ASSERT(0); return DHD_WQ_STS_UNINITIALIZED; } if (!event || (event >= DHD_MAX_WQ_EVENTS)) { DHD_ERROR(("%s: Unknown event \n", __FUNCTION__)); return DHD_WQ_STS_UNKNOWN_EVENT; } /* * default element size is 1, which can be changed * using kfifo_esize(). Older kernel(FC11) doesn't support * changing element size. For compatibility changing * element size is not prefered */ ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1); ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1); deferred_event.event = event; deferred_event.event_data = event_data; deferred_event.event_handler = event_handler; if (priority == DHD_WORK_PRIORITY_HIGH) { status = kfifo_in_spinlocked(deferred_wq->prio_fifo, &deferred_event, DEFRD_EVT_SIZE, &deferred_wq->work_lock); } else { status = kfifo_in_spinlocked(deferred_wq->work_fifo, &deferred_event, DEFRD_EVT_SIZE, &deferred_wq->work_lock); } if (!status) { return DHD_WQ_STS_SCHED_FAILED; } schedule_work((struct work_struct *)deferred_wq); return DHD_WQ_STS_OK; } static int dhd_get_scheduled_work(struct dhd_deferred_event_t *event) { int status = 0; if (!deferred_wq) { DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__)); return DHD_WQ_STS_UNINITIALIZED; } /* * default element size is 1 byte, which can be changed * using kfifo_esize(). Older kernel(FC11) doesn't support * changing element size. For compatibility changing * element size is not prefered */ ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1); ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1); /* first read priorit event fifo */ status = kfifo_out_spinlocked(deferred_wq->prio_fifo, event, DEFRD_EVT_SIZE, &deferred_wq->work_lock); if (!status) { /* priority fifo is empty. Now read low prio work fifo */ status = kfifo_out_spinlocked(deferred_wq->work_fifo, event, DEFRD_EVT_SIZE, &deferred_wq->work_lock); } return status; } /* * Called when work is scheduled */ static void dhd_deferred_work_handler(struct work_struct *work) { struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work; struct dhd_deferred_event_t work_event; int status; if (!deferred_work) { DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__)); return; } do { status = dhd_get_scheduled_work(&work_event); DHD_TRACE(("%s: event to handle %d \n", __FUNCTION__, status)); if (!status) { DHD_TRACE(("%s: No event to handle %d \n", __FUNCTION__, status)); break; } if (work_event.event > DHD_MAX_WQ_EVENTS) { DHD_TRACE(("%s: Unknown event %d \n", __FUNCTION__, work_event.event)); break; } if (work_event.event_handler) { work_event.event_handler(deferred_work->dhd_info, work_event.event_data, work_event.event); } else { DHD_ERROR(("%s: event not defined %d\n", __FUNCTION__, work_event.event)); } } while (1); return; }
gpl-2.0
Oleg-k/S8500_Kernel_2.6.32
drivers/isdn/hisax/hfc4s8s_l1.c
860
43668
/*************************************************************************/ /* $Id: hfc4s8s_l1.c,v 1.10 2005/02/09 16:31:09 martinb1 Exp $ */ /* HFC-4S/8S low layer interface for Cologne Chip HFC-4S/8S isdn chips */ /* The low layer (L1) is implemented as a loadable module for usage with */ /* the HiSax isdn driver for passive cards. */ /* */ /* Author: Werner Cornelius */ /* (C) 2003 Cornelius Consult (werner@cornelius-consult.de) */ /* */ /* Driver maintained by Cologne Chip */ /* - Martin Bachem, support@colognechip.com */ /* */ /* This driver only works with chip revisions >= 1, older revision 0 */ /* engineering samples (only first manufacturer sample cards) will not */ /* work and are rejected by the driver. */ /* */ /* This file distributed under the GNU GPL. */ /* */ /* See Version History at the end of this file */ /* */ /*************************************************************************/ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/skbuff.h> #include <linux/wait.h> #include <asm/io.h> #include "hisax_if.h" #include "hfc4s8s_l1.h" static const char hfc4s8s_rev[] = "Revision: 1.10"; /***************************************************************/ /* adjustable transparent mode fifo threshold */ /* The value defines the used fifo threshold with the equation */ /* */ /* notify number of bytes = 2 * 2 ^ TRANS_FIFO_THRES */ /* */ /* The default value is 5 which results in a buffer size of 64 */ /* and an interrupt rate of 8ms. */ /* The maximum value is 7 due to fifo size restrictions. */ /* Values below 3-4 are not recommended due to high interrupt */ /* load of the processor. For non critical applications the */ /* value should be raised to 7 to reduce any interrupt overhead*/ /***************************************************************/ #define TRANS_FIFO_THRES 5 /*************/ /* constants */ /*************/ #define CLOCKMODE_0 0 /* ext. 24.576 MhZ clk freq, int. single clock mode */ #define CLOCKMODE_1 1 /* ext. 49.576 MhZ clk freq, int. single clock mode */ #define CHIP_ID_SHIFT 4 #define HFC_MAX_ST 8 #define MAX_D_FRAME_SIZE 270 #define MAX_B_FRAME_SIZE 1536 #define TRANS_TIMER_MODE (TRANS_FIFO_THRES & 0xf) #define TRANS_FIFO_BYTES (2 << TRANS_FIFO_THRES) #define MAX_F_CNT 0x0f #define CLKDEL_NT 0x6c #define CLKDEL_TE 0xf #define CTRL0_NT 4 #define CTRL0_TE 0 #define L1_TIMER_T4 2 /* minimum in jiffies */ #define L1_TIMER_T3 (7 * HZ) /* activation timeout */ #define L1_TIMER_T1 ((120 * HZ) / 1000) /* NT mode deactivation timeout */ /******************/ /* types and vars */ /******************/ static int card_cnt; /* private driver_data */ typedef struct { int chip_id; int clock_mode; int max_st_ports; char *device_name; } hfc4s8s_param; static struct pci_device_id hfc4s8s_ids[] = { {.vendor = PCI_VENDOR_ID_CCD, .device = PCI_DEVICE_ID_4S, .subvendor = 0x1397, .subdevice = 0x08b4, .driver_data = (unsigned long) &((hfc4s8s_param) {CHIP_ID_4S, CLOCKMODE_0, 4, "HFC-4S Evaluation Board"}), }, {.vendor = PCI_VENDOR_ID_CCD, .device = PCI_DEVICE_ID_8S, .subvendor = 0x1397, .subdevice = 0x16b8, .driver_data = (unsigned long) &((hfc4s8s_param) {CHIP_ID_8S, CLOCKMODE_0, 8, "HFC-8S Evaluation Board"}), }, {.vendor = PCI_VENDOR_ID_CCD, .device = PCI_DEVICE_ID_4S, .subvendor = 0x1397, .subdevice = 0xb520, .driver_data = (unsigned long) &((hfc4s8s_param) {CHIP_ID_4S, CLOCKMODE_1, 4, "IOB4ST"}), }, {.vendor = PCI_VENDOR_ID_CCD, .device = PCI_DEVICE_ID_8S, .subvendor = 0x1397, .subdevice = 0xb522, .driver_data = (unsigned long) &((hfc4s8s_param) {CHIP_ID_8S, CLOCKMODE_1, 8, "IOB8ST"}), }, {} }; MODULE_DEVICE_TABLE(pci, hfc4s8s_ids); MODULE_AUTHOR("Werner Cornelius, werner@cornelius-consult.de"); MODULE_DESCRIPTION("ISDN layer 1 for Cologne Chip HFC-4S/8S chips"); MODULE_LICENSE("GPL"); /***********/ /* layer 1 */ /***********/ struct hfc4s8s_btype { spinlock_t lock; struct hisax_b_if b_if; struct hfc4s8s_l1 *l1p; struct sk_buff_head tx_queue; struct sk_buff *tx_skb; struct sk_buff *rx_skb; __u8 *rx_ptr; int tx_cnt; int bchan; int mode; }; struct _hfc4s8s_hw; struct hfc4s8s_l1 { spinlock_t lock; struct _hfc4s8s_hw *hw; /* pointer to hardware area */ int l1_state; /* actual l1 state */ struct timer_list l1_timer; /* layer 1 timer structure */ int nt_mode; /* set to nt mode */ int st_num; /* own index */ int enabled; /* interface is enabled */ struct sk_buff_head d_tx_queue; /* send queue */ int tx_cnt; /* bytes to send */ struct hisax_d_if d_if; /* D-channel interface */ struct hfc4s8s_btype b_ch[2]; /* B-channel data */ struct hisax_b_if *b_table[2]; }; /**********************/ /* hardware structure */ /**********************/ typedef struct _hfc4s8s_hw { spinlock_t lock; int cardnum; int ifnum; int iobase; int nt_mode; u_char *membase; u_char *hw_membase; void *pdev; int max_fifo; hfc4s8s_param driver_data; int irq; int fifo_sched_cnt; struct work_struct tqueue; struct hfc4s8s_l1 l1[HFC_MAX_ST]; char card_name[60]; struct { u_char r_irq_ctrl; u_char r_ctrl0; volatile u_char r_irq_statech; /* active isdn l1 status */ u_char r_irqmsk_statchg; /* enabled isdn status ints */ u_char r_irq_fifo_blx[8]; /* fifo status registers */ u_char fifo_rx_trans_enables[8]; /* mask for enabled transparent rx fifos */ u_char fifo_slow_timer_service[8]; /* mask for fifos needing slower timer service */ volatile u_char r_irq_oview; /* contents of overview register */ volatile u_char timer_irq; int timer_usg_cnt; /* number of channels using timer */ } mr; } hfc4s8s_hw; /***************************/ /* inline function defines */ /***************************/ #ifdef HISAX_HFC4S8S_PCIMEM /* inline functions memory mapped */ /* memory write and dummy IO read to avoid PCI byte merge problems */ #define Write_hfc8(a,b,c) {(*((volatile u_char *)(a->membase+b)) = c); inb(a->iobase+4);} /* memory write without dummy IO access for fifo data access */ #define fWrite_hfc8(a,b,c) (*((volatile u_char *)(a->membase+b)) = c) #define Read_hfc8(a,b) (*((volatile u_char *)(a->membase+b))) #define Write_hfc16(a,b,c) (*((volatile unsigned short *)(a->membase+b)) = c) #define Read_hfc16(a,b) (*((volatile unsigned short *)(a->membase+b))) #define Write_hfc32(a,b,c) (*((volatile unsigned long *)(a->membase+b)) = c) #define Read_hfc32(a,b) (*((volatile unsigned long *)(a->membase+b))) #define wait_busy(a) {while ((Read_hfc8(a, R_STATUS) & M_BUSY));} #define PCI_ENA_MEMIO 0x03 #else /* inline functions io mapped */ static inline void SetRegAddr(hfc4s8s_hw * a, u_char b) { outb(b, (a->iobase) + 4); } static inline u_char GetRegAddr(hfc4s8s_hw * a) { return (inb((volatile u_int) (a->iobase + 4))); } static inline void Write_hfc8(hfc4s8s_hw * a, u_char b, u_char c) { SetRegAddr(a, b); outb(c, a->iobase); } static inline void fWrite_hfc8(hfc4s8s_hw * a, u_char c) { outb(c, a->iobase); } static inline void Write_hfc16(hfc4s8s_hw * a, u_char b, u_short c) { SetRegAddr(a, b); outw(c, a->iobase); } static inline void Write_hfc32(hfc4s8s_hw * a, u_char b, u_long c) { SetRegAddr(a, b); outl(c, a->iobase); } static inline void fWrite_hfc32(hfc4s8s_hw * a, u_long c) { outl(c, a->iobase); } static inline u_char Read_hfc8(hfc4s8s_hw * a, u_char b) { SetRegAddr(a, b); return (inb((volatile u_int) a->iobase)); } static inline u_char fRead_hfc8(hfc4s8s_hw * a) { return (inb((volatile u_int) a->iobase)); } static inline u_short Read_hfc16(hfc4s8s_hw * a, u_char b) { SetRegAddr(a, b); return (inw((volatile u_int) a->iobase)); } static inline u_long Read_hfc32(hfc4s8s_hw * a, u_char b) { SetRegAddr(a, b); return (inl((volatile u_int) a->iobase)); } static inline u_long fRead_hfc32(hfc4s8s_hw * a) { return (inl((volatile u_int) a->iobase)); } static inline void wait_busy(hfc4s8s_hw * a) { SetRegAddr(a, R_STATUS); while (inb((volatile u_int) a->iobase) & M_BUSY); } #define PCI_ENA_REGIO 0x01 #endif /* HISAX_HFC4S8S_PCIMEM */ /******************************************************/ /* function to read critical counter registers that */ /* may be udpated by the chip during read */ /******************************************************/ static u_char Read_hfc8_stable(hfc4s8s_hw * hw, int reg) { u_char ref8; u_char in8; ref8 = Read_hfc8(hw, reg); while (((in8 = Read_hfc8(hw, reg)) != ref8)) { ref8 = in8; } return in8; } static int Read_hfc16_stable(hfc4s8s_hw * hw, int reg) { int ref16; int in16; ref16 = Read_hfc16(hw, reg); while (((in16 = Read_hfc16(hw, reg)) != ref16)) { ref16 = in16; } return in16; } /*****************************/ /* D-channel call from HiSax */ /*****************************/ static void dch_l2l1(struct hisax_d_if *iface, int pr, void *arg) { struct hfc4s8s_l1 *l1 = iface->ifc.priv; struct sk_buff *skb = (struct sk_buff *) arg; u_long flags; switch (pr) { case (PH_DATA | REQUEST): if (!l1->enabled) { dev_kfree_skb(skb); break; } spin_lock_irqsave(&l1->lock, flags); skb_queue_tail(&l1->d_tx_queue, skb); if ((skb_queue_len(&l1->d_tx_queue) == 1) && (l1->tx_cnt <= 0)) { l1->hw->mr.r_irq_fifo_blx[l1->st_num] |= 0x10; spin_unlock_irqrestore(&l1->lock, flags); schedule_work(&l1->hw->tqueue); } else spin_unlock_irqrestore(&l1->lock, flags); break; case (PH_ACTIVATE | REQUEST): if (!l1->enabled) break; if (!l1->nt_mode) { if (l1->l1_state < 6) { spin_lock_irqsave(&l1->lock, flags); Write_hfc8(l1->hw, R_ST_SEL, l1->st_num); Write_hfc8(l1->hw, A_ST_WR_STA, 0x60); mod_timer(&l1->l1_timer, jiffies + L1_TIMER_T3); spin_unlock_irqrestore(&l1->lock, flags); } else if (l1->l1_state == 7) l1->d_if.ifc.l1l2(&l1->d_if.ifc, PH_ACTIVATE | INDICATION, NULL); } else { if (l1->l1_state != 3) { spin_lock_irqsave(&l1->lock, flags); Write_hfc8(l1->hw, R_ST_SEL, l1->st_num); Write_hfc8(l1->hw, A_ST_WR_STA, 0x60); spin_unlock_irqrestore(&l1->lock, flags); } else if (l1->l1_state == 3) l1->d_if.ifc.l1l2(&l1->d_if.ifc, PH_ACTIVATE | INDICATION, NULL); } break; default: printk(KERN_INFO "HFC-4S/8S: Unknown D-chan cmd 0x%x received, ignored\n", pr); break; } if (!l1->enabled) l1->d_if.ifc.l1l2(&l1->d_if.ifc, PH_DEACTIVATE | INDICATION, NULL); } /* dch_l2l1 */ /*****************************/ /* B-channel call from HiSax */ /*****************************/ static void bch_l2l1(struct hisax_if *ifc, int pr, void *arg) { struct hfc4s8s_btype *bch = ifc->priv; struct hfc4s8s_l1 *l1 = bch->l1p; struct sk_buff *skb = (struct sk_buff *) arg; long mode = (long) arg; u_long flags; switch (pr) { case (PH_DATA | REQUEST): if (!l1->enabled || (bch->mode == L1_MODE_NULL)) { dev_kfree_skb(skb); break; } spin_lock_irqsave(&l1->lock, flags); skb_queue_tail(&bch->tx_queue, skb); if (!bch->tx_skb && (bch->tx_cnt <= 0)) { l1->hw->mr.r_irq_fifo_blx[l1->st_num] |= ((bch->bchan == 1) ? 1 : 4); spin_unlock_irqrestore(&l1->lock, flags); schedule_work(&l1->hw->tqueue); } else spin_unlock_irqrestore(&l1->lock, flags); break; case (PH_ACTIVATE | REQUEST): case (PH_DEACTIVATE | REQUEST): if (!l1->enabled) break; if (pr == (PH_DEACTIVATE | REQUEST)) mode = L1_MODE_NULL; switch (mode) { case L1_MODE_HDLC: spin_lock_irqsave(&l1->lock, flags); l1->hw->mr.timer_usg_cnt++; l1->hw->mr. fifo_slow_timer_service[l1-> st_num] |= ((bch->bchan == 1) ? 0x2 : 0x8); Write_hfc8(l1->hw, R_FIFO, (l1->st_num * 8 + ((bch->bchan == 1) ? 0 : 2))); wait_busy(l1->hw); Write_hfc8(l1->hw, A_CON_HDLC, 0xc); /* HDLC mode, flag fill, connect ST */ Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */ Write_hfc8(l1->hw, A_IRQ_MSK, 1); /* enable TX interrupts for hdlc */ Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */ wait_busy(l1->hw); Write_hfc8(l1->hw, R_FIFO, (l1->st_num * 8 + ((bch->bchan == 1) ? 1 : 3))); wait_busy(l1->hw); Write_hfc8(l1->hw, A_CON_HDLC, 0xc); /* HDLC mode, flag fill, connect ST */ Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */ Write_hfc8(l1->hw, A_IRQ_MSK, 1); /* enable RX interrupts for hdlc */ Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */ Write_hfc8(l1->hw, R_ST_SEL, l1->st_num); l1->hw->mr.r_ctrl0 |= (bch->bchan & 3); Write_hfc8(l1->hw, A_ST_CTRL0, l1->hw->mr.r_ctrl0); bch->mode = L1_MODE_HDLC; spin_unlock_irqrestore(&l1->lock, flags); bch->b_if.ifc.l1l2(&bch->b_if.ifc, PH_ACTIVATE | INDICATION, NULL); break; case L1_MODE_TRANS: spin_lock_irqsave(&l1->lock, flags); l1->hw->mr. fifo_rx_trans_enables[l1-> st_num] |= ((bch->bchan == 1) ? 0x2 : 0x8); l1->hw->mr.timer_usg_cnt++; Write_hfc8(l1->hw, R_FIFO, (l1->st_num * 8 + ((bch->bchan == 1) ? 0 : 2))); wait_busy(l1->hw); Write_hfc8(l1->hw, A_CON_HDLC, 0xf); /* Transparent mode, 1 fill, connect ST */ Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */ Write_hfc8(l1->hw, A_IRQ_MSK, 0); /* disable TX interrupts */ Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */ wait_busy(l1->hw); Write_hfc8(l1->hw, R_FIFO, (l1->st_num * 8 + ((bch->bchan == 1) ? 1 : 3))); wait_busy(l1->hw); Write_hfc8(l1->hw, A_CON_HDLC, 0xf); /* Transparent mode, 1 fill, connect ST */ Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */ Write_hfc8(l1->hw, A_IRQ_MSK, 0); /* disable RX interrupts */ Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */ Write_hfc8(l1->hw, R_ST_SEL, l1->st_num); l1->hw->mr.r_ctrl0 |= (bch->bchan & 3); Write_hfc8(l1->hw, A_ST_CTRL0, l1->hw->mr.r_ctrl0); bch->mode = L1_MODE_TRANS; spin_unlock_irqrestore(&l1->lock, flags); bch->b_if.ifc.l1l2(&bch->b_if.ifc, PH_ACTIVATE | INDICATION, NULL); break; default: if (bch->mode == L1_MODE_NULL) break; spin_lock_irqsave(&l1->lock, flags); l1->hw->mr. fifo_slow_timer_service[l1-> st_num] &= ~((bch->bchan == 1) ? 0x3 : 0xc); l1->hw->mr. fifo_rx_trans_enables[l1-> st_num] &= ~((bch->bchan == 1) ? 0x3 : 0xc); l1->hw->mr.timer_usg_cnt--; Write_hfc8(l1->hw, R_FIFO, (l1->st_num * 8 + ((bch->bchan == 1) ? 0 : 2))); wait_busy(l1->hw); Write_hfc8(l1->hw, A_IRQ_MSK, 0); /* disable TX interrupts */ wait_busy(l1->hw); Write_hfc8(l1->hw, R_FIFO, (l1->st_num * 8 + ((bch->bchan == 1) ? 1 : 3))); wait_busy(l1->hw); Write_hfc8(l1->hw, A_IRQ_MSK, 0); /* disable RX interrupts */ Write_hfc8(l1->hw, R_ST_SEL, l1->st_num); l1->hw->mr.r_ctrl0 &= ~(bch->bchan & 3); Write_hfc8(l1->hw, A_ST_CTRL0, l1->hw->mr.r_ctrl0); spin_unlock_irqrestore(&l1->lock, flags); bch->mode = L1_MODE_NULL; bch->b_if.ifc.l1l2(&bch->b_if.ifc, PH_DEACTIVATE | INDICATION, NULL); if (bch->tx_skb) { dev_kfree_skb(bch->tx_skb); bch->tx_skb = NULL; } if (bch->rx_skb) { dev_kfree_skb(bch->rx_skb); bch->rx_skb = NULL; } skb_queue_purge(&bch->tx_queue); bch->tx_cnt = 0; bch->rx_ptr = NULL; break; } /* timer is only used when at least one b channel */ /* is set up to transparent mode */ if (l1->hw->mr.timer_usg_cnt) { Write_hfc8(l1->hw, R_IRQMSK_MISC, M_TI_IRQMSK); } else { Write_hfc8(l1->hw, R_IRQMSK_MISC, 0); } break; default: printk(KERN_INFO "HFC-4S/8S: Unknown B-chan cmd 0x%x received, ignored\n", pr); break; } if (!l1->enabled) bch->b_if.ifc.l1l2(&bch->b_if.ifc, PH_DEACTIVATE | INDICATION, NULL); } /* bch_l2l1 */ /**************************/ /* layer 1 timer function */ /**************************/ static void hfc_l1_timer(struct hfc4s8s_l1 *l1) { u_long flags; if (!l1->enabled) return; spin_lock_irqsave(&l1->lock, flags); if (l1->nt_mode) { l1->l1_state = 1; Write_hfc8(l1->hw, R_ST_SEL, l1->st_num); Write_hfc8(l1->hw, A_ST_WR_STA, 0x11); spin_unlock_irqrestore(&l1->lock, flags); l1->d_if.ifc.l1l2(&l1->d_if.ifc, PH_DEACTIVATE | INDICATION, NULL); spin_lock_irqsave(&l1->lock, flags); l1->l1_state = 1; Write_hfc8(l1->hw, A_ST_WR_STA, 0x1); spin_unlock_irqrestore(&l1->lock, flags); } else { /* activation timed out */ Write_hfc8(l1->hw, R_ST_SEL, l1->st_num); Write_hfc8(l1->hw, A_ST_WR_STA, 0x13); spin_unlock_irqrestore(&l1->lock, flags); l1->d_if.ifc.l1l2(&l1->d_if.ifc, PH_DEACTIVATE | INDICATION, NULL); spin_lock_irqsave(&l1->lock, flags); Write_hfc8(l1->hw, R_ST_SEL, l1->st_num); Write_hfc8(l1->hw, A_ST_WR_STA, 0x3); spin_unlock_irqrestore(&l1->lock, flags); } } /* hfc_l1_timer */ /****************************************/ /* a complete D-frame has been received */ /****************************************/ static void rx_d_frame(struct hfc4s8s_l1 *l1p, int ech) { int z1, z2; u_char f1, f2, df; struct sk_buff *skb; u_char *cp; if (!l1p->enabled) return; do { /* E/D RX fifo */ Write_hfc8(l1p->hw, R_FIFO, (l1p->st_num * 8 + ((ech) ? 7 : 5))); wait_busy(l1p->hw); f1 = Read_hfc8_stable(l1p->hw, A_F1); f2 = Read_hfc8(l1p->hw, A_F2); df = f1 - f2; if ((f1 - f2) < 0) df = f1 - f2 + MAX_F_CNT + 1; if (!df) { return; /* no complete frame in fifo */ } z1 = Read_hfc16_stable(l1p->hw, A_Z1); z2 = Read_hfc16(l1p->hw, A_Z2); z1 = z1 - z2 + 1; if (z1 < 0) z1 += 384; if (!(skb = dev_alloc_skb(MAX_D_FRAME_SIZE))) { printk(KERN_INFO "HFC-4S/8S: Could not allocate D/E " "channel receive buffer"); Write_hfc8(l1p->hw, A_INC_RES_FIFO, 2); wait_busy(l1p->hw); return; } if (((z1 < 4) || (z1 > MAX_D_FRAME_SIZE))) { if (skb) dev_kfree_skb(skb); /* remove errornous D frame */ if (df == 1) { /* reset fifo */ Write_hfc8(l1p->hw, A_INC_RES_FIFO, 2); wait_busy(l1p->hw); return; } else { /* read errornous D frame */ #ifndef HISAX_HFC4S8S_PCIMEM SetRegAddr(l1p->hw, A_FIFO_DATA0); #endif while (z1 >= 4) { #ifdef HISAX_HFC4S8S_PCIMEM Read_hfc32(l1p->hw, A_FIFO_DATA0); #else fRead_hfc32(l1p->hw); #endif z1 -= 4; } while (z1--) #ifdef HISAX_HFC4S8S_PCIMEM Read_hfc8(l1p->hw, A_FIFO_DATA0); #else fRead_hfc8(l1p->hw); #endif Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); wait_busy(l1p->hw); return; } } cp = skb->data; #ifndef HISAX_HFC4S8S_PCIMEM SetRegAddr(l1p->hw, A_FIFO_DATA0); #endif while (z1 >= 4) { #ifdef HISAX_HFC4S8S_PCIMEM *((unsigned long *) cp) = Read_hfc32(l1p->hw, A_FIFO_DATA0); #else *((unsigned long *) cp) = fRead_hfc32(l1p->hw); #endif cp += 4; z1 -= 4; } while (z1--) #ifdef HISAX_HFC4S8S_PCIMEM *cp++ = Read_hfc8(l1p->hw, A_FIFO_DATA0); #else *cp++ = fRead_hfc8(l1p->hw); #endif Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */ wait_busy(l1p->hw); if (*(--cp)) { dev_kfree_skb(skb); } else { skb->len = (cp - skb->data) - 2; if (ech) l1p->d_if.ifc.l1l2(&l1p->d_if.ifc, PH_DATA_E | INDICATION, skb); else l1p->d_if.ifc.l1l2(&l1p->d_if.ifc, PH_DATA | INDICATION, skb); } } while (1); } /* rx_d_frame */ /*************************************************************/ /* a B-frame has been received (perhaps not fully completed) */ /*************************************************************/ static void rx_b_frame(struct hfc4s8s_btype *bch) { int z1, z2, hdlc_complete; u_char f1, f2; struct hfc4s8s_l1 *l1 = bch->l1p; struct sk_buff *skb; if (!l1->enabled || (bch->mode == L1_MODE_NULL)) return; do { /* RX Fifo */ Write_hfc8(l1->hw, R_FIFO, (l1->st_num * 8 + ((bch->bchan == 1) ? 1 : 3))); wait_busy(l1->hw); if (bch->mode == L1_MODE_HDLC) { f1 = Read_hfc8_stable(l1->hw, A_F1); f2 = Read_hfc8(l1->hw, A_F2); hdlc_complete = ((f1 ^ f2) & MAX_F_CNT); } else hdlc_complete = 0; z1 = Read_hfc16_stable(l1->hw, A_Z1); z2 = Read_hfc16(l1->hw, A_Z2); z1 = (z1 - z2); if (hdlc_complete) z1++; if (z1 < 0) z1 += 384; if (!z1) break; if (!(skb = bch->rx_skb)) { if (! (skb = dev_alloc_skb((bch->mode == L1_MODE_TRANS) ? z1 : (MAX_B_FRAME_SIZE + 3)))) { printk(KERN_ERR "HFC-4S/8S: Could not allocate B " "channel receive buffer"); return; } bch->rx_ptr = skb->data; bch->rx_skb = skb; } skb->len = (bch->rx_ptr - skb->data) + z1; /* HDLC length check */ if ((bch->mode == L1_MODE_HDLC) && ((hdlc_complete && (skb->len < 4)) || (skb->len > (MAX_B_FRAME_SIZE + 3)))) { skb->len = 0; bch->rx_ptr = skb->data; Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */ wait_busy(l1->hw); return; } #ifndef HISAX_HFC4S8S_PCIMEM SetRegAddr(l1->hw, A_FIFO_DATA0); #endif while (z1 >= 4) { #ifdef HISAX_HFC4S8S_PCIMEM *((unsigned long *) bch->rx_ptr) = Read_hfc32(l1->hw, A_FIFO_DATA0); #else *((unsigned long *) bch->rx_ptr) = fRead_hfc32(l1->hw); #endif bch->rx_ptr += 4; z1 -= 4; } while (z1--) #ifdef HISAX_HFC4S8S_PCIMEM *(bch->rx_ptr++) = Read_hfc8(l1->hw, A_FIFO_DATA0); #else *(bch->rx_ptr++) = fRead_hfc8(l1->hw); #endif if (hdlc_complete) { /* increment f counter */ Write_hfc8(l1->hw, A_INC_RES_FIFO, 1); wait_busy(l1->hw); /* hdlc crc check */ bch->rx_ptr--; if (*bch->rx_ptr) { skb->len = 0; bch->rx_ptr = skb->data; continue; } skb->len -= 3; } if (hdlc_complete || (bch->mode == L1_MODE_TRANS)) { bch->rx_skb = NULL; bch->rx_ptr = NULL; bch->b_if.ifc.l1l2(&bch->b_if.ifc, PH_DATA | INDICATION, skb); } } while (1); } /* rx_b_frame */ /********************************************/ /* a D-frame has been/should be transmitted */ /********************************************/ static void tx_d_frame(struct hfc4s8s_l1 *l1p) { struct sk_buff *skb; u_char f1, f2; u_char *cp; long cnt; if (l1p->l1_state != 7) return; /* TX fifo */ Write_hfc8(l1p->hw, R_FIFO, (l1p->st_num * 8 + 4)); wait_busy(l1p->hw); f1 = Read_hfc8(l1p->hw, A_F1); f2 = Read_hfc8_stable(l1p->hw, A_F2); if ((f1 ^ f2) & MAX_F_CNT) return; /* fifo is still filled */ if (l1p->tx_cnt > 0) { cnt = l1p->tx_cnt; l1p->tx_cnt = 0; l1p->d_if.ifc.l1l2(&l1p->d_if.ifc, PH_DATA | CONFIRM, (void *) cnt); } if ((skb = skb_dequeue(&l1p->d_tx_queue))) { cp = skb->data; cnt = skb->len; #ifndef HISAX_HFC4S8S_PCIMEM SetRegAddr(l1p->hw, A_FIFO_DATA0); #endif while (cnt >= 4) { #ifdef HISAX_HFC4S8S_PCIMEM fWrite_hfc32(l1p->hw, A_FIFO_DATA0, *(unsigned long *) cp); #else SetRegAddr(l1p->hw, A_FIFO_DATA0); fWrite_hfc32(l1p->hw, *(unsigned long *) cp); #endif cp += 4; cnt -= 4; } #ifdef HISAX_HFC4S8S_PCIMEM while (cnt--) fWrite_hfc8(l1p->hw, A_FIFO_DATA0, *cp++); #else while (cnt--) fWrite_hfc8(l1p->hw, *cp++); #endif l1p->tx_cnt = skb->truesize; Write_hfc8(l1p->hw, A_INC_RES_FIFO, 1); /* increment f counter */ wait_busy(l1p->hw); dev_kfree_skb(skb); } } /* tx_d_frame */ /******************************************************/ /* a B-frame may be transmitted (or is not completed) */ /******************************************************/ static void tx_b_frame(struct hfc4s8s_btype *bch) { struct sk_buff *skb; struct hfc4s8s_l1 *l1 = bch->l1p; u_char *cp; int cnt, max, hdlc_num; long ack_len = 0; if (!l1->enabled || (bch->mode == L1_MODE_NULL)) return; /* TX fifo */ Write_hfc8(l1->hw, R_FIFO, (l1->st_num * 8 + ((bch->bchan == 1) ? 0 : 2))); wait_busy(l1->hw); do { if (bch->mode == L1_MODE_HDLC) { hdlc_num = Read_hfc8(l1->hw, A_F1) & MAX_F_CNT; hdlc_num -= (Read_hfc8_stable(l1->hw, A_F2) & MAX_F_CNT); if (hdlc_num < 0) hdlc_num += 16; if (hdlc_num >= 15) break; /* fifo still filled up with hdlc frames */ } else hdlc_num = 0; if (!(skb = bch->tx_skb)) { if (!(skb = skb_dequeue(&bch->tx_queue))) { l1->hw->mr.fifo_slow_timer_service[l1-> st_num] &= ~((bch->bchan == 1) ? 1 : 4); break; /* list empty */ } bch->tx_skb = skb; bch->tx_cnt = 0; } if (!hdlc_num) l1->hw->mr.fifo_slow_timer_service[l1->st_num] |= ((bch->bchan == 1) ? 1 : 4); else l1->hw->mr.fifo_slow_timer_service[l1->st_num] &= ~((bch->bchan == 1) ? 1 : 4); max = Read_hfc16_stable(l1->hw, A_Z2); max -= Read_hfc16(l1->hw, A_Z1); if (max <= 0) max += 384; max--; if (max < 16) break; /* don't write to small amounts of bytes */ cnt = skb->len - bch->tx_cnt; if (cnt > max) cnt = max; cp = skb->data + bch->tx_cnt; bch->tx_cnt += cnt; #ifndef HISAX_HFC4S8S_PCIMEM SetRegAddr(l1->hw, A_FIFO_DATA0); #endif while (cnt >= 4) { #ifdef HISAX_HFC4S8S_PCIMEM fWrite_hfc32(l1->hw, A_FIFO_DATA0, *(unsigned long *) cp); #else fWrite_hfc32(l1->hw, *(unsigned long *) cp); #endif cp += 4; cnt -= 4; } while (cnt--) #ifdef HISAX_HFC4S8S_PCIMEM fWrite_hfc8(l1->hw, A_FIFO_DATA0, *cp++); #else fWrite_hfc8(l1->hw, *cp++); #endif if (bch->tx_cnt >= skb->len) { if (bch->mode == L1_MODE_HDLC) { /* increment f counter */ Write_hfc8(l1->hw, A_INC_RES_FIFO, 1); } ack_len += skb->truesize; bch->tx_skb = NULL; bch->tx_cnt = 0; dev_kfree_skb(skb); } else /* Re-Select */ Write_hfc8(l1->hw, R_FIFO, (l1->st_num * 8 + ((bch->bchan == 1) ? 0 : 2))); wait_busy(l1->hw); } while (1); if (ack_len) bch->b_if.ifc.l1l2((struct hisax_if *) &bch->b_if, PH_DATA | CONFIRM, (void *) ack_len); } /* tx_b_frame */ /*************************************/ /* bottom half handler for interrupt */ /*************************************/ static void hfc4s8s_bh(struct work_struct *work) { hfc4s8s_hw *hw = container_of(work, hfc4s8s_hw, tqueue); u_char b; struct hfc4s8s_l1 *l1p; volatile u_char *fifo_stat; int idx; /* handle layer 1 state changes */ b = 1; l1p = hw->l1; while (b) { if ((b & hw->mr.r_irq_statech)) { /* reset l1 event */ hw->mr.r_irq_statech &= ~b; if (l1p->enabled) { if (l1p->nt_mode) { u_char oldstate = l1p->l1_state; Write_hfc8(l1p->hw, R_ST_SEL, l1p->st_num); l1p->l1_state = Read_hfc8(l1p->hw, A_ST_RD_STA) & 0xf; if ((oldstate == 3) && (l1p->l1_state != 3)) l1p->d_if.ifc.l1l2(&l1p-> d_if. ifc, PH_DEACTIVATE | INDICATION, NULL); if (l1p->l1_state != 2) { del_timer(&l1p->l1_timer); if (l1p->l1_state == 3) { l1p->d_if.ifc. l1l2(&l1p-> d_if.ifc, PH_ACTIVATE | INDICATION, NULL); } } else { /* allow transition */ Write_hfc8(hw, A_ST_WR_STA, M_SET_G2_G3); mod_timer(&l1p->l1_timer, jiffies + L1_TIMER_T1); } printk(KERN_INFO "HFC-4S/8S: NT ch %d l1 state %d -> %d\n", l1p->st_num, oldstate, l1p->l1_state); } else { u_char oldstate = l1p->l1_state; Write_hfc8(l1p->hw, R_ST_SEL, l1p->st_num); l1p->l1_state = Read_hfc8(l1p->hw, A_ST_RD_STA) & 0xf; if (((l1p->l1_state == 3) && ((oldstate == 7) || (oldstate == 8))) || ((timer_pending (&l1p->l1_timer)) && (l1p->l1_state == 8))) { mod_timer(&l1p->l1_timer, L1_TIMER_T4 + jiffies); } else { if (l1p->l1_state == 7) { del_timer(&l1p-> l1_timer); l1p->d_if.ifc. l1l2(&l1p-> d_if.ifc, PH_ACTIVATE | INDICATION, NULL); tx_d_frame(l1p); } if (l1p->l1_state == 3) { if (oldstate != 3) l1p->d_if. ifc. l1l2 (&l1p-> d_if. ifc, PH_DEACTIVATE | INDICATION, NULL); } } printk(KERN_INFO "HFC-4S/8S: TE %d ch %d l1 state %d -> %d\n", l1p->hw->cardnum, l1p->st_num, oldstate, l1p->l1_state); } } } b <<= 1; l1p++; } /* now handle the fifos */ idx = 0; fifo_stat = hw->mr.r_irq_fifo_blx; l1p = hw->l1; while (idx < hw->driver_data.max_st_ports) { if (hw->mr.timer_irq) { *fifo_stat |= hw->mr.fifo_rx_trans_enables[idx]; if (hw->fifo_sched_cnt <= 0) { *fifo_stat |= hw->mr.fifo_slow_timer_service[l1p-> st_num]; } } /* ignore fifo 6 (TX E fifo) */ *fifo_stat &= 0xff - 0x40; while (*fifo_stat) { if (!l1p->nt_mode) { /* RX Fifo has data to read */ if ((*fifo_stat & 0x20)) { *fifo_stat &= ~0x20; rx_d_frame(l1p, 0); } /* E Fifo has data to read */ if ((*fifo_stat & 0x80)) { *fifo_stat &= ~0x80; rx_d_frame(l1p, 1); } /* TX Fifo completed send */ if ((*fifo_stat & 0x10)) { *fifo_stat &= ~0x10; tx_d_frame(l1p); } } /* B1 RX Fifo has data to read */ if ((*fifo_stat & 0x2)) { *fifo_stat &= ~0x2; rx_b_frame(l1p->b_ch); } /* B1 TX Fifo has send completed */ if ((*fifo_stat & 0x1)) { *fifo_stat &= ~0x1; tx_b_frame(l1p->b_ch); } /* B2 RX Fifo has data to read */ if ((*fifo_stat & 0x8)) { *fifo_stat &= ~0x8; rx_b_frame(l1p->b_ch + 1); } /* B2 TX Fifo has send completed */ if ((*fifo_stat & 0x4)) { *fifo_stat &= ~0x4; tx_b_frame(l1p->b_ch + 1); } } fifo_stat++; l1p++; idx++; } if (hw->fifo_sched_cnt <= 0) hw->fifo_sched_cnt += (1 << (7 - TRANS_TIMER_MODE)); hw->mr.timer_irq = 0; /* clear requested timer irq */ } /* hfc4s8s_bh */ /*********************/ /* interrupt handler */ /*********************/ static irqreturn_t hfc4s8s_interrupt(int intno, void *dev_id) { hfc4s8s_hw *hw = dev_id; u_char b, ovr; volatile u_char *ovp; int idx; u_char old_ioreg; if (!hw || !(hw->mr.r_irq_ctrl & M_GLOB_IRQ_EN)) return IRQ_NONE; #ifndef HISAX_HFC4S8S_PCIMEM /* read current selected regsister */ old_ioreg = GetRegAddr(hw); #endif /* Layer 1 State change */ hw->mr.r_irq_statech |= (Read_hfc8(hw, R_SCI) & hw->mr.r_irqmsk_statchg); if (! (b = (Read_hfc8(hw, R_STATUS) & (M_MISC_IRQSTA | M_FR_IRQSTA))) && !hw->mr.r_irq_statech) { #ifndef HISAX_HFC4S8S_PCIMEM SetRegAddr(hw, old_ioreg); #endif return IRQ_NONE; } /* timer event */ if (Read_hfc8(hw, R_IRQ_MISC) & M_TI_IRQ) { hw->mr.timer_irq = 1; hw->fifo_sched_cnt--; } /* FIFO event */ if ((ovr = Read_hfc8(hw, R_IRQ_OVIEW))) { hw->mr.r_irq_oview |= ovr; idx = R_IRQ_FIFO_BL0; ovp = hw->mr.r_irq_fifo_blx; while (ovr) { if ((ovr & 1)) { *ovp |= Read_hfc8(hw, idx); } ovp++; idx++; ovr >>= 1; } } /* queue the request to allow other cards to interrupt */ schedule_work(&hw->tqueue); #ifndef HISAX_HFC4S8S_PCIMEM SetRegAddr(hw, old_ioreg); #endif return IRQ_HANDLED; } /* hfc4s8s_interrupt */ /***********************************************************************/ /* reset the complete chip, don't release the chips irq but disable it */ /***********************************************************************/ static void chipreset(hfc4s8s_hw * hw) { u_long flags; spin_lock_irqsave(&hw->lock, flags); Write_hfc8(hw, R_CTRL, 0); /* use internal RAM */ Write_hfc8(hw, R_RAM_MISC, 0); /* 32k*8 RAM */ Write_hfc8(hw, R_FIFO_MD, 0); /* fifo mode 386 byte/fifo simple mode */ Write_hfc8(hw, R_CIRM, M_SRES); /* reset chip */ hw->mr.r_irq_ctrl = 0; /* interrupt is inactive */ spin_unlock_irqrestore(&hw->lock, flags); udelay(3); Write_hfc8(hw, R_CIRM, 0); /* disable reset */ wait_busy(hw); Write_hfc8(hw, R_PCM_MD0, M_PCM_MD); /* master mode */ Write_hfc8(hw, R_RAM_MISC, M_FZ_MD); /* transmit fifo option */ if (hw->driver_data.clock_mode == 1) Write_hfc8(hw, R_BRG_PCM_CFG, M_PCM_CLK); /* PCM clk / 2 */ Write_hfc8(hw, R_TI_WD, TRANS_TIMER_MODE); /* timer interval */ memset(&hw->mr, 0, sizeof(hw->mr)); } /* chipreset */ /********************************************/ /* disable/enable hardware in nt or te mode */ /********************************************/ static void hfc_hardware_enable(hfc4s8s_hw * hw, int enable, int nt_mode) { u_long flags; char if_name[40]; int i; if (enable) { /* save system vars */ hw->nt_mode = nt_mode; /* enable fifo and state irqs, but not global irq enable */ hw->mr.r_irq_ctrl = M_FIFO_IRQ; Write_hfc8(hw, R_IRQ_CTRL, hw->mr.r_irq_ctrl); hw->mr.r_irqmsk_statchg = 0; Write_hfc8(hw, R_SCI_MSK, hw->mr.r_irqmsk_statchg); Write_hfc8(hw, R_PWM_MD, 0x80); Write_hfc8(hw, R_PWM1, 26); if (!nt_mode) Write_hfc8(hw, R_ST_SYNC, M_AUTO_SYNC); /* enable the line interfaces and fifos */ for (i = 0; i < hw->driver_data.max_st_ports; i++) { hw->mr.r_irqmsk_statchg |= (1 << i); Write_hfc8(hw, R_SCI_MSK, hw->mr.r_irqmsk_statchg); Write_hfc8(hw, R_ST_SEL, i); Write_hfc8(hw, A_ST_CLK_DLY, ((nt_mode) ? CLKDEL_NT : CLKDEL_TE)); hw->mr.r_ctrl0 = ((nt_mode) ? CTRL0_NT : CTRL0_TE); Write_hfc8(hw, A_ST_CTRL0, hw->mr.r_ctrl0); Write_hfc8(hw, A_ST_CTRL2, 3); Write_hfc8(hw, A_ST_WR_STA, 0); /* enable state machine */ hw->l1[i].enabled = 1; hw->l1[i].nt_mode = nt_mode; if (!nt_mode) { /* setup E-fifo */ Write_hfc8(hw, R_FIFO, i * 8 + 7); /* E fifo */ wait_busy(hw); Write_hfc8(hw, A_CON_HDLC, 0x11); /* HDLC mode, 1 fill, connect ST */ Write_hfc8(hw, A_SUBCH_CFG, 2); /* only 2 bits */ Write_hfc8(hw, A_IRQ_MSK, 1); /* enable interrupt */ Write_hfc8(hw, A_INC_RES_FIFO, 2); /* reset fifo */ wait_busy(hw); /* setup D RX-fifo */ Write_hfc8(hw, R_FIFO, i * 8 + 5); /* RX fifo */ wait_busy(hw); Write_hfc8(hw, A_CON_HDLC, 0x11); /* HDLC mode, 1 fill, connect ST */ Write_hfc8(hw, A_SUBCH_CFG, 2); /* only 2 bits */ Write_hfc8(hw, A_IRQ_MSK, 1); /* enable interrupt */ Write_hfc8(hw, A_INC_RES_FIFO, 2); /* reset fifo */ wait_busy(hw); /* setup D TX-fifo */ Write_hfc8(hw, R_FIFO, i * 8 + 4); /* TX fifo */ wait_busy(hw); Write_hfc8(hw, A_CON_HDLC, 0x11); /* HDLC mode, 1 fill, connect ST */ Write_hfc8(hw, A_SUBCH_CFG, 2); /* only 2 bits */ Write_hfc8(hw, A_IRQ_MSK, 1); /* enable interrupt */ Write_hfc8(hw, A_INC_RES_FIFO, 2); /* reset fifo */ wait_busy(hw); } sprintf(if_name, "hfc4s8s_%d%d_", hw->cardnum, i); if (hisax_register (&hw->l1[i].d_if, hw->l1[i].b_table, if_name, ((nt_mode) ? 3 : 2))) { hw->l1[i].enabled = 0; hw->mr.r_irqmsk_statchg &= ~(1 << i); Write_hfc8(hw, R_SCI_MSK, hw->mr.r_irqmsk_statchg); printk(KERN_INFO "HFC-4S/8S: Unable to register S/T device %s, break\n", if_name); break; } } spin_lock_irqsave(&hw->lock, flags); hw->mr.r_irq_ctrl |= M_GLOB_IRQ_EN; Write_hfc8(hw, R_IRQ_CTRL, hw->mr.r_irq_ctrl); spin_unlock_irqrestore(&hw->lock, flags); } else { /* disable hardware */ spin_lock_irqsave(&hw->lock, flags); hw->mr.r_irq_ctrl &= ~M_GLOB_IRQ_EN; Write_hfc8(hw, R_IRQ_CTRL, hw->mr.r_irq_ctrl); spin_unlock_irqrestore(&hw->lock, flags); for (i = hw->driver_data.max_st_ports - 1; i >= 0; i--) { hw->l1[i].enabled = 0; hisax_unregister(&hw->l1[i].d_if); del_timer(&hw->l1[i].l1_timer); skb_queue_purge(&hw->l1[i].d_tx_queue); skb_queue_purge(&hw->l1[i].b_ch[0].tx_queue); skb_queue_purge(&hw->l1[i].b_ch[1].tx_queue); } chipreset(hw); } } /* hfc_hardware_enable */ /******************************************/ /* disable memory mapped ports / io ports */ /******************************************/ static void release_pci_ports(hfc4s8s_hw * hw) { pci_write_config_word(hw->pdev, PCI_COMMAND, 0); #ifdef HISAX_HFC4S8S_PCIMEM if (hw->membase) iounmap((void *) hw->membase); #else if (hw->iobase) release_region(hw->iobase, 8); #endif } /*****************************************/ /* enable memory mapped ports / io ports */ /*****************************************/ static void enable_pci_ports(hfc4s8s_hw * hw) { #ifdef HISAX_HFC4S8S_PCIMEM pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_MEMIO); #else pci_write_config_word(hw->pdev, PCI_COMMAND, PCI_ENA_REGIO); #endif } /*************************************/ /* initialise the HFC-4s/8s hardware */ /* return 0 on success. */ /*************************************/ static int __devinit setup_instance(hfc4s8s_hw * hw) { int err = -EIO; int i; for (i = 0; i < HFC_MAX_ST; i++) { struct hfc4s8s_l1 *l1p; l1p = hw->l1 + i; spin_lock_init(&l1p->lock); l1p->hw = hw; l1p->l1_timer.function = (void *) hfc_l1_timer; l1p->l1_timer.data = (long) (l1p); init_timer(&l1p->l1_timer); l1p->st_num = i; skb_queue_head_init(&l1p->d_tx_queue); l1p->d_if.ifc.priv = hw->l1 + i; l1p->d_if.ifc.l2l1 = (void *) dch_l2l1; spin_lock_init(&l1p->b_ch[0].lock); l1p->b_ch[0].b_if.ifc.l2l1 = (void *) bch_l2l1; l1p->b_ch[0].b_if.ifc.priv = (void *) &l1p->b_ch[0]; l1p->b_ch[0].l1p = hw->l1 + i; l1p->b_ch[0].bchan = 1; l1p->b_table[0] = &l1p->b_ch[0].b_if; skb_queue_head_init(&l1p->b_ch[0].tx_queue); spin_lock_init(&l1p->b_ch[1].lock); l1p->b_ch[1].b_if.ifc.l2l1 = (void *) bch_l2l1; l1p->b_ch[1].b_if.ifc.priv = (void *) &l1p->b_ch[1]; l1p->b_ch[1].l1p = hw->l1 + i; l1p->b_ch[1].bchan = 2; l1p->b_table[1] = &l1p->b_ch[1].b_if; skb_queue_head_init(&l1p->b_ch[1].tx_queue); } enable_pci_ports(hw); chipreset(hw); i = Read_hfc8(hw, R_CHIP_ID) >> CHIP_ID_SHIFT; if (i != hw->driver_data.chip_id) { printk(KERN_INFO "HFC-4S/8S: invalid chip id 0x%x instead of 0x%x, card ignored\n", i, hw->driver_data.chip_id); goto out; } i = Read_hfc8(hw, R_CHIP_RV) & 0xf; if (!i) { printk(KERN_INFO "HFC-4S/8S: chip revision 0 not supported, card ignored\n"); goto out; } INIT_WORK(&hw->tqueue, hfc4s8s_bh); if (request_irq (hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) { printk(KERN_INFO "HFC-4S/8S: unable to alloc irq %d, card ignored\n", hw->irq); goto out; } #ifdef HISAX_HFC4S8S_PCIMEM printk(KERN_INFO "HFC-4S/8S: found PCI card at membase 0x%p, irq %d\n", hw->hw_membase, hw->irq); #else printk(KERN_INFO "HFC-4S/8S: found PCI card at iobase 0x%x, irq %d\n", hw->iobase, hw->irq); #endif hfc_hardware_enable(hw, 1, 0); return (0); out: hw->irq = 0; release_pci_ports(hw); kfree(hw); return (err); } /*****************************************/ /* PCI hotplug interface: probe new card */ /*****************************************/ static int __devinit hfc4s8s_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { int err = -ENOMEM; hfc4s8s_param *driver_data = (hfc4s8s_param *) ent->driver_data; hfc4s8s_hw *hw; if (!(hw = kzalloc(sizeof(hfc4s8s_hw), GFP_ATOMIC))) { printk(KERN_ERR "No kmem for HFC-4S/8S card\n"); return (err); } hw->pdev = pdev; err = pci_enable_device(pdev); if (err) goto out; hw->cardnum = card_cnt; sprintf(hw->card_name, "hfc4s8s_%d", hw->cardnum); printk(KERN_INFO "HFC-4S/8S: found adapter %s (%s) at %s\n", driver_data->device_name, hw->card_name, pci_name(pdev)); spin_lock_init(&hw->lock); hw->driver_data = *driver_data; hw->irq = pdev->irq; hw->iobase = pci_resource_start(pdev, 0); #ifdef HISAX_HFC4S8S_PCIMEM hw->hw_membase = (u_char *) pci_resource_start(pdev, 1); hw->membase = ioremap((ulong) hw->hw_membase, 256); #else if (!request_region(hw->iobase, 8, hw->card_name)) { printk(KERN_INFO "HFC-4S/8S: failed to rquest address space at 0x%04x\n", hw->iobase); goto out; } #endif pci_set_drvdata(pdev, hw); err = setup_instance(hw); if (!err) card_cnt++; return (err); out: kfree(hw); return (err); } /**************************************/ /* PCI hotplug interface: remove card */ /**************************************/ static void __devexit hfc4s8s_remove(struct pci_dev *pdev) { hfc4s8s_hw *hw = pci_get_drvdata(pdev); printk(KERN_INFO "HFC-4S/8S: removing card %d\n", hw->cardnum); hfc_hardware_enable(hw, 0, 0); if (hw->irq) free_irq(hw->irq, hw); hw->irq = 0; release_pci_ports(hw); card_cnt--; pci_disable_device(pdev); kfree(hw); return; } static struct pci_driver hfc4s8s_driver = { .name = "hfc4s8s_l1", .probe = hfc4s8s_probe, .remove = __devexit_p(hfc4s8s_remove), .id_table = hfc4s8s_ids, }; /**********************/ /* driver Module init */ /**********************/ static int __init hfc4s8s_module_init(void) { int err; printk(KERN_INFO "HFC-4S/8S: Layer 1 driver module for HFC-4S/8S isdn chips, %s\n", hfc4s8s_rev); printk(KERN_INFO "HFC-4S/8S: (C) 2003 Cornelius Consult, www.cornelius-consult.de\n"); card_cnt = 0; err = pci_register_driver(&hfc4s8s_driver); if (err < 0) { goto out; } printk(KERN_INFO "HFC-4S/8S: found %d cards\n", card_cnt); #if !defined(CONFIG_HOTPLUG) if (err == 0) { err = -ENODEV; pci_unregister_driver(&hfc4s8s_driver); goto out; } #endif return 0; out: return (err); } /* hfc4s8s_init_hw */ /*************************************/ /* driver module exit : */ /* release the HFC-4s/8s hardware */ /*************************************/ static void __exit hfc4s8s_module_exit(void) { pci_unregister_driver(&hfc4s8s_driver); printk(KERN_INFO "HFC-4S/8S: module removed\n"); } /* hfc4s8s_release_hw */ module_init(hfc4s8s_module_init); module_exit(hfc4s8s_module_exit);
gpl-2.0
kratos1988/operating_systems
drivers/net/cxgb3/ael1002.c
1116
23675
/* * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "common.h" #include "regs.h" enum { AEL100X_TX_CONFIG1 = 0xc002, AEL1002_PWR_DOWN_HI = 0xc011, AEL1002_PWR_DOWN_LO = 0xc012, AEL1002_XFI_EQL = 0xc015, AEL1002_LB_EN = 0xc017, AEL_OPT_SETTINGS = 0xc017, AEL_I2C_CTRL = 0xc30a, AEL_I2C_DATA = 0xc30b, AEL_I2C_STAT = 0xc30c, AEL2005_GPIO_CTRL = 0xc214, AEL2005_GPIO_STAT = 0xc215, AEL2020_GPIO_INTR = 0xc103, /* Latch High (LH) */ AEL2020_GPIO_CTRL = 0xc108, /* Store Clear (SC) */ AEL2020_GPIO_STAT = 0xc10c, /* Read Only (RO) */ AEL2020_GPIO_CFG = 0xc110, /* Read Write (RW) */ AEL2020_GPIO_SDA = 0, /* IN: i2c serial data */ AEL2020_GPIO_MODDET = 1, /* IN: Module Detect */ AEL2020_GPIO_0 = 3, /* IN: unassigned */ AEL2020_GPIO_1 = 2, /* OUT: unassigned */ AEL2020_GPIO_LSTAT = AEL2020_GPIO_1, /* wired to link status LED */ }; enum { edc_none, edc_sr, edc_twinax }; /* PHY module I2C device address */ enum { MODULE_DEV_ADDR = 0xa0, SFF_DEV_ADDR = 0xa2, }; /* PHY transceiver type */ enum { phy_transtype_unknown = 0, phy_transtype_sfp = 3, phy_transtype_xfp = 6, }; #define AEL2005_MODDET_IRQ 4 struct reg_val { unsigned short mmd_addr; unsigned short reg_addr; unsigned short clear_bits; unsigned short set_bits; }; static int set_phy_regs(struct cphy *phy, const struct reg_val *rv) { int err; for (err = 0; rv->mmd_addr && !err; rv++) { if (rv->clear_bits == 0xffff) err = t3_mdio_write(phy, rv->mmd_addr, rv->reg_addr, rv->set_bits); else err = t3_mdio_change_bits(phy, rv->mmd_addr, rv->reg_addr, rv->clear_bits, rv->set_bits); } return err; } static void ael100x_txon(struct cphy *phy) { int tx_on_gpio = phy->mdio.prtad == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL; msleep(100); t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio); msleep(30); } /* * Read an 8-bit word from a device attached to the PHY's i2c bus. */ static int ael_i2c_rd(struct cphy *phy, int dev_addr, int word_addr) { int i, err; unsigned int stat, data; err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL_I2C_CTRL, (dev_addr << 8) | (1 << 8) | word_addr); if (err) return err; for (i = 0; i < 200; i++) { msleep(1); err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_STAT, &stat); if (err) return err; if ((stat & 3) == 1) { err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL_I2C_DATA, &data); if (err) return err; return data >> 8; } } CH_WARN(phy->adapter, "PHY %u i2c read of dev.addr %#x.%#x timed out\n", phy->mdio.prtad, dev_addr, word_addr); return -ETIMEDOUT; } static int ael1002_power_down(struct cphy *phy, int enable) { int err; err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS, !!enable); if (!err) err = mdio_set_flag(&phy->mdio, phy->mdio.prtad, MDIO_MMD_PMAPMD, MDIO_CTRL1, MDIO_CTRL1_LPOWER, enable); return err; } static int ael1002_reset(struct cphy *phy, int wait) { int err; if ((err = ael1002_power_down(phy, 0)) || (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL100X_TX_CONFIG1, 1)) || (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_HI, 0)) || (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_PWR_DOWN_LO, 0)) || (err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL1002_XFI_EQL, 0x18)) || (err = t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL1002_LB_EN, 0, 1 << 5))) return err; return 0; } static int ael1002_intr_noop(struct cphy *phy) { return 0; } /* * Get link status for a 10GBASE-R device. */ static int get_link_status_r(struct cphy *phy, int *link_ok, int *speed, int *duplex, int *fc) { if (link_ok) { unsigned int stat0, stat1, stat2; int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_RXDET, &stat0); if (!err) err = t3_mdio_read(phy, MDIO_MMD_PCS, MDIO_PCS_10GBRT_STAT1, &stat1); if (!err) err = t3_mdio_read(phy, MDIO_MMD_PHYXS, MDIO_PHYXS_LNSTAT, &stat2); if (err) return err; *link_ok = (stat0 & stat1 & (stat2 >> 12)) & 1; } if (speed) *speed = SPEED_10000; if (duplex) *duplex = DUPLEX_FULL; return 0; } static struct cphy_ops ael1002_ops = { .reset = ael1002_reset, .intr_enable = ael1002_intr_noop, .intr_disable = ael1002_intr_noop, .intr_clear = ael1002_intr_noop, .intr_handler = ael1002_intr_noop, .get_link_status = get_link_status_r, .power_down = ael1002_power_down, .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, }; int t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, const struct mdio_ops *mdio_ops) { cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops, SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE, "10GBASE-R"); ael100x_txon(phy); return 0; } static int ael1006_reset(struct cphy *phy, int wait) { return t3_phy_reset(phy, MDIO_MMD_PMAPMD, wait); } static struct cphy_ops ael1006_ops = { .reset = ael1006_reset, .intr_enable = t3_phy_lasi_intr_enable, .intr_disable = t3_phy_lasi_intr_disable, .intr_clear = t3_phy_lasi_intr_clear, .intr_handler = t3_phy_lasi_intr_handler, .get_link_status = get_link_status_r, .power_down = ael1002_power_down, .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, }; int t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, const struct mdio_ops *mdio_ops) { cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops, SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE, "10GBASE-SR"); ael100x_txon(phy); return 0; } /* * Decode our module type. */ static int ael2xxx_get_module_type(struct cphy *phy, int delay_ms) { int v; if (delay_ms) msleep(delay_ms); /* see SFF-8472 for below */ v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 3); if (v < 0) return v; if (v == 0x10) return phy_modtype_sr; if (v == 0x20) return phy_modtype_lr; if (v == 0x40) return phy_modtype_lrm; v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 6); if (v < 0) return v; if (v != 4) goto unknown; v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 10); if (v < 0) return v; if (v & 0x80) { v = ael_i2c_rd(phy, MODULE_DEV_ADDR, 0x12); if (v < 0) return v; return v > 10 ? phy_modtype_twinax_long : phy_modtype_twinax; } unknown: return phy_modtype_unknown; } /* * Code to support the Aeluros/NetLogic 2005 10Gb PHY. */ static int ael2005_setup_sr_edc(struct cphy *phy) { static struct reg_val regs[] = { { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x181 }, { MDIO_MMD_PMAPMD, 0xc010, 0xffff, 0x448a }, { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5200 }, { 0, 0, 0, 0 } }; int i, err; err = set_phy_regs(phy, regs); if (err) return err; msleep(50); if (phy->priv != edc_sr) err = t3_get_edc_fw(phy, EDC_OPT_AEL2005, EDC_OPT_AEL2005_SIZE); if (err) return err; for (i = 0; i < EDC_OPT_AEL2005_SIZE / sizeof(u16) && !err; i += 2) err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, phy->phy_cache[i], phy->phy_cache[i + 1]); if (!err) phy->priv = edc_sr; return err; } static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype) { static struct reg_val regs[] = { { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5a00 }, { 0, 0, 0, 0 } }; static struct reg_val preemphasis[] = { { MDIO_MMD_PMAPMD, 0xc014, 0xffff, 0xfe16 }, { MDIO_MMD_PMAPMD, 0xc015, 0xffff, 0xa000 }, { 0, 0, 0, 0 } }; int i, err; err = set_phy_regs(phy, regs); if (!err && modtype == phy_modtype_twinax_long) err = set_phy_regs(phy, preemphasis); if (err) return err; msleep(50); if (phy->priv != edc_twinax) err = t3_get_edc_fw(phy, EDC_TWX_AEL2005, EDC_TWX_AEL2005_SIZE); if (err) return err; for (i = 0; i < EDC_TWX_AEL2005_SIZE / sizeof(u16) && !err; i += 2) err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, phy->phy_cache[i], phy->phy_cache[i + 1]); if (!err) phy->priv = edc_twinax; return err; } static int ael2005_get_module_type(struct cphy *phy, int delay_ms) { int v; unsigned int stat; v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, &stat); if (v) return v; if (stat & (1 << 8)) /* module absent */ return phy_modtype_none; return ael2xxx_get_module_type(phy, delay_ms); } static int ael2005_intr_enable(struct cphy *phy) { int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x200); return err ? err : t3_phy_lasi_intr_enable(phy); } static int ael2005_intr_disable(struct cphy *phy) { int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0x100); return err ? err : t3_phy_lasi_intr_disable(phy); } static int ael2005_intr_clear(struct cphy *phy) { int err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0xd00); return err ? err : t3_phy_lasi_intr_clear(phy); } static int ael2005_reset(struct cphy *phy, int wait) { static struct reg_val regs0[] = { { MDIO_MMD_PMAPMD, 0xc001, 0, 1 << 5 }, { MDIO_MMD_PMAPMD, 0xc017, 0, 1 << 5 }, { MDIO_MMD_PMAPMD, 0xc013, 0xffff, 0xf341 }, { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 }, { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8100 }, { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0x8000 }, { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0 }, { 0, 0, 0, 0 } }; static struct reg_val regs1[] = { { MDIO_MMD_PMAPMD, 0xca00, 0xffff, 0x0080 }, { MDIO_MMD_PMAPMD, 0xca12, 0xffff, 0 }, { 0, 0, 0, 0 } }; int err; unsigned int lasi_ctrl; err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, &lasi_ctrl); if (err) return err; err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 0); if (err) return err; msleep(125); phy->priv = edc_none; err = set_phy_regs(phy, regs0); if (err) return err; msleep(50); err = ael2005_get_module_type(phy, 0); if (err < 0) return err; phy->modtype = err; if (err == phy_modtype_twinax || err == phy_modtype_twinax_long) err = ael2005_setup_twinax_edc(phy, err); else err = ael2005_setup_sr_edc(phy); if (err) return err; err = set_phy_regs(phy, regs1); if (err) return err; /* reset wipes out interrupts, reenable them if they were on */ if (lasi_ctrl & 1) err = ael2005_intr_enable(phy); return err; } static int ael2005_intr_handler(struct cphy *phy) { unsigned int stat; int ret, edc_needed, cause = 0; ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_STAT, &stat); if (ret) return ret; if (stat & AEL2005_MODDET_IRQ) { ret = t3_mdio_write(phy, MDIO_MMD_PMAPMD, AEL2005_GPIO_CTRL, 0xd00); if (ret) return ret; /* modules have max 300 ms init time after hot plug */ ret = ael2005_get_module_type(phy, 300); if (ret < 0) return ret; phy->modtype = ret; if (ret == phy_modtype_none) edc_needed = phy->priv; /* on unplug retain EDC */ else if (ret == phy_modtype_twinax || ret == phy_modtype_twinax_long) edc_needed = edc_twinax; else edc_needed = edc_sr; if (edc_needed != phy->priv) { ret = ael2005_reset(phy, 0); return ret ? ret : cphy_cause_module_change; } cause = cphy_cause_module_change; } ret = t3_phy_lasi_intr_handler(phy); if (ret < 0) return ret; ret |= cause; return ret ? ret : cphy_cause_link_change; } static struct cphy_ops ael2005_ops = { .reset = ael2005_reset, .intr_enable = ael2005_intr_enable, .intr_disable = ael2005_intr_disable, .intr_clear = ael2005_intr_clear, .intr_handler = ael2005_intr_handler, .get_link_status = get_link_status_r, .power_down = ael1002_power_down, .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, }; int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, const struct mdio_ops *mdio_ops) { cphy_init(phy, adapter, phy_addr, &ael2005_ops, mdio_ops, SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE | SUPPORTED_IRQ, "10GBASE-R"); msleep(125); return t3_mdio_change_bits(phy, MDIO_MMD_PMAPMD, AEL_OPT_SETTINGS, 0, 1 << 5); } /* * Setup EDC and other parameters for operation with an optical module. */ static int ael2020_setup_sr_edc(struct cphy *phy) { static struct reg_val regs[] = { /* set CDR offset to 10 */ { MDIO_MMD_PMAPMD, 0xcc01, 0xffff, 0x488a }, /* adjust 10G RX bias current */ { MDIO_MMD_PMAPMD, 0xcb1b, 0xffff, 0x0200 }, { MDIO_MMD_PMAPMD, 0xcb1c, 0xffff, 0x00f0 }, { MDIO_MMD_PMAPMD, 0xcc06, 0xffff, 0x00e0 }, /* end */ { 0, 0, 0, 0 } }; int err; err = set_phy_regs(phy, regs); msleep(50); if (err) return err; phy->priv = edc_sr; return 0; } /* * Setup EDC and other parameters for operation with an TWINAX module. */ static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype) { /* set uC to 40MHz */ static struct reg_val uCclock40MHz[] = { { MDIO_MMD_PMAPMD, 0xff28, 0xffff, 0x4001 }, { MDIO_MMD_PMAPMD, 0xff2a, 0xffff, 0x0002 }, { 0, 0, 0, 0 } }; /* activate uC clock */ static struct reg_val uCclockActivate[] = { { MDIO_MMD_PMAPMD, 0xd000, 0xffff, 0x5200 }, { 0, 0, 0, 0 } }; /* set PC to start of SRAM and activate uC */ static struct reg_val uCactivate[] = { { MDIO_MMD_PMAPMD, 0xd080, 0xffff, 0x0100 }, { MDIO_MMD_PMAPMD, 0xd092, 0xffff, 0x0000 }, { 0, 0, 0, 0 } }; int i, err; /* set uC clock and activate it */ err = set_phy_regs(phy, uCclock40MHz); msleep(500); if (err) return err; err = set_phy_regs(phy, uCclockActivate); msleep(500); if (err) return err; if (phy->priv != edc_twinax) err = t3_get_edc_fw(phy, EDC_TWX_AEL2020, EDC_TWX_AEL2020_SIZE); if (err) return err; for (i = 0; i < EDC_TWX_AEL2020_SIZE / sizeof(u16) && !err; i += 2) err = t3_mdio_write(phy, MDIO_MMD_PMAPMD, phy->phy_cache[i], phy->phy_cache[i + 1]); /* activate uC */ err = set_phy_regs(phy, uCactivate); if (!err) phy->priv = edc_twinax; return err; } /* * Return Module Type. */ static int ael2020_get_module_type(struct cphy *phy, int delay_ms) { int v; unsigned int stat; v = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_STAT, &stat); if (v) return v; if (stat & (0x1 << (AEL2020_GPIO_MODDET*4))) { /* module absent */ return phy_modtype_none; } return ael2xxx_get_module_type(phy, delay_ms); } /* * Enable PHY interrupts. We enable "Module Detection" interrupts (on any * state transition) and then generic Link Alarm Status Interrupt (LASI). */ static int ael2020_intr_enable(struct cphy *phy) { struct reg_val regs[] = { /* output Module's Loss Of Signal (LOS) to LED */ { MDIO_MMD_PMAPMD, AEL2020_GPIO_CFG+AEL2020_GPIO_LSTAT, 0xffff, 0x4 }, { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL, 0xffff, 0x8 << (AEL2020_GPIO_LSTAT*4) }, /* enable module detect status change interrupts */ { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL, 0xffff, 0x2 << (AEL2020_GPIO_MODDET*4) }, /* end */ { 0, 0, 0, 0 } }; int err, link_ok = 0; /* set up "link status" LED and enable module change interrupts */ err = set_phy_regs(phy, regs); if (err) return err; err = get_link_status_r(phy, &link_ok, NULL, NULL, NULL); if (err) return err; if (link_ok) t3_link_changed(phy->adapter, phy2portid(phy)); err = t3_phy_lasi_intr_enable(phy); if (err) return err; return 0; } /* * Disable PHY interrupts. The mirror of the above ... */ static int ael2020_intr_disable(struct cphy *phy) { struct reg_val regs[] = { /* reset "link status" LED to "off" */ { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL, 0xffff, 0xb << (AEL2020_GPIO_LSTAT*4) }, /* disable module detect status change interrupts */ { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL, 0xffff, 0x1 << (AEL2020_GPIO_MODDET*4) }, /* end */ { 0, 0, 0, 0 } }; int err; /* turn off "link status" LED and disable module change interrupts */ err = set_phy_regs(phy, regs); if (err) return err; return t3_phy_lasi_intr_disable(phy); } /* * Clear PHY interrupt state. */ static int ael2020_intr_clear(struct cphy *phy) { /* * The GPIO Interrupt register on the AEL2020 is a "Latching High" * (LH) register which is cleared to the current state when it's read. * Thus, we simply read the register and discard the result. */ unsigned int stat; int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat); return err ? err : t3_phy_lasi_intr_clear(phy); } static struct reg_val ael2020_reset_regs[] = { /* Erratum #2: CDRLOL asserted, causing PMA link down status */ { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x3101 }, /* force XAUI to send LF when RX_LOS is asserted */ { MDIO_MMD_PMAPMD, 0xcd40, 0xffff, 0x0001 }, /* allow writes to transceiver module EEPROM on i2c bus */ { MDIO_MMD_PMAPMD, 0xff02, 0xffff, 0x0023 }, { MDIO_MMD_PMAPMD, 0xff03, 0xffff, 0x0000 }, { MDIO_MMD_PMAPMD, 0xff04, 0xffff, 0x0000 }, /* end */ { 0, 0, 0, 0 } }; /* * Reset the PHY and put it into a canonical operating state. */ static int ael2020_reset(struct cphy *phy, int wait) { int err; unsigned int lasi_ctrl; /* grab current interrupt state */ err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, &lasi_ctrl); if (err) return err; err = t3_phy_reset(phy, MDIO_MMD_PMAPMD, 125); if (err) return err; msleep(100); /* basic initialization for all module types */ phy->priv = edc_none; err = set_phy_regs(phy, ael2020_reset_regs); if (err) return err; /* determine module type and perform appropriate initialization */ err = ael2020_get_module_type(phy, 0); if (err < 0) return err; phy->modtype = (u8)err; if (err == phy_modtype_twinax || err == phy_modtype_twinax_long) err = ael2020_setup_twinax_edc(phy, err); else err = ael2020_setup_sr_edc(phy); if (err) return err; /* reset wipes out interrupts, reenable them if they were on */ if (lasi_ctrl & 1) err = ael2005_intr_enable(phy); return err; } /* * Handle a PHY interrupt. */ static int ael2020_intr_handler(struct cphy *phy) { unsigned int stat; int ret, edc_needed, cause = 0; ret = t3_mdio_read(phy, MDIO_MMD_PMAPMD, AEL2020_GPIO_INTR, &stat); if (ret) return ret; if (stat & (0x1 << AEL2020_GPIO_MODDET)) { /* modules have max 300 ms init time after hot plug */ ret = ael2020_get_module_type(phy, 300); if (ret < 0) return ret; phy->modtype = (u8)ret; if (ret == phy_modtype_none) edc_needed = phy->priv; /* on unplug retain EDC */ else if (ret == phy_modtype_twinax || ret == phy_modtype_twinax_long) edc_needed = edc_twinax; else edc_needed = edc_sr; if (edc_needed != phy->priv) { ret = ael2020_reset(phy, 0); return ret ? ret : cphy_cause_module_change; } cause = cphy_cause_module_change; } ret = t3_phy_lasi_intr_handler(phy); if (ret < 0) return ret; ret |= cause; return ret ? ret : cphy_cause_link_change; } static struct cphy_ops ael2020_ops = { .reset = ael2020_reset, .intr_enable = ael2020_intr_enable, .intr_disable = ael2020_intr_disable, .intr_clear = ael2020_intr_clear, .intr_handler = ael2020_intr_handler, .get_link_status = get_link_status_r, .power_down = ael1002_power_down, .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, }; int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, const struct mdio_ops *mdio_ops) { int err; cphy_init(phy, adapter, phy_addr, &ael2020_ops, mdio_ops, SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE | SUPPORTED_IRQ, "10GBASE-R"); msleep(125); err = set_phy_regs(phy, ael2020_reset_regs); if (err) return err; return 0; } /* * Get link status for a 10GBASE-X device. */ static int get_link_status_x(struct cphy *phy, int *link_ok, int *speed, int *duplex, int *fc) { if (link_ok) { unsigned int stat0, stat1, stat2; int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_RXDET, &stat0); if (!err) err = t3_mdio_read(phy, MDIO_MMD_PCS, MDIO_PCS_10GBX_STAT1, &stat1); if (!err) err = t3_mdio_read(phy, MDIO_MMD_PHYXS, MDIO_PHYXS_LNSTAT, &stat2); if (err) return err; *link_ok = (stat0 & (stat1 >> 12) & (stat2 >> 12)) & 1; } if (speed) *speed = SPEED_10000; if (duplex) *duplex = DUPLEX_FULL; return 0; } static struct cphy_ops qt2045_ops = { .reset = ael1006_reset, .intr_enable = t3_phy_lasi_intr_enable, .intr_disable = t3_phy_lasi_intr_disable, .intr_clear = t3_phy_lasi_intr_clear, .intr_handler = t3_phy_lasi_intr_handler, .get_link_status = get_link_status_x, .power_down = ael1002_power_down, .mmds = MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS, }; int t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, const struct mdio_ops *mdio_ops) { unsigned int stat; cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops, SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, "10GBASE-CX4"); /* * Some cards where the PHY is supposed to be at address 0 actually * have it at 1. */ if (!phy_addr && !t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_STAT1, &stat) && stat == 0xffff) phy->mdio.prtad = 1; return 0; } static int xaui_direct_reset(struct cphy *phy, int wait) { return 0; } static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok, int *speed, int *duplex, int *fc) { if (link_ok) { unsigned int status; int prtad = phy->mdio.prtad; status = t3_read_reg(phy->adapter, XGM_REG(A_XGM_SERDES_STAT0, prtad)) | t3_read_reg(phy->adapter, XGM_REG(A_XGM_SERDES_STAT1, prtad)) | t3_read_reg(phy->adapter, XGM_REG(A_XGM_SERDES_STAT2, prtad)) | t3_read_reg(phy->adapter, XGM_REG(A_XGM_SERDES_STAT3, prtad)); *link_ok = !(status & F_LOWSIG0); } if (speed) *speed = SPEED_10000; if (duplex) *duplex = DUPLEX_FULL; return 0; } static int xaui_direct_power_down(struct cphy *phy, int enable) { return 0; } static struct cphy_ops xaui_direct_ops = { .reset = xaui_direct_reset, .intr_enable = ael1002_intr_noop, .intr_disable = ael1002_intr_noop, .intr_clear = ael1002_intr_noop, .intr_handler = ael1002_intr_noop, .get_link_status = xaui_direct_get_link_status, .power_down = xaui_direct_power_down, }; int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr, const struct mdio_ops *mdio_ops) { cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops, SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, "10GBASE-CX4"); return 0; }
gpl-2.0
k4k/linux
net/netfilter/ipvs/ip_vs_wrr.c
1628
7213
/* * IPVS: Weighted Round-Robin Scheduling module * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Changes: * Wensong Zhang : changed the ip_vs_wrr_schedule to return dest * Wensong Zhang : changed some comestics things for debugging * Wensong Zhang : changed for the d-linked destination list * Wensong Zhang : added the ip_vs_wrr_update_svc * Julian Anastasov : fixed the bug of returning destination * with weight 0 when all weights are zero * */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/net.h> #include <linux/gcd.h> #include <net/ip_vs.h> /* The WRR algorithm depends on some caclulations: * - mw: maximum weight * - di: weight step, greatest common divisor from all weights * - cw: current required weight * As result, all weights are in the [di..mw] range with a step=di. * * First, we start with cw = mw and select dests with weight >= cw. * Then cw is reduced with di and all dests are checked again. * Last pass should be with cw = di. We have mw/di passes in total: * * pass 1: cw = max weight * pass 2: cw = max weight - di * pass 3: cw = max weight - 2 * di * ... * last pass: cw = di * * Weights are supposed to be >= di but we run in parallel with * weight changes, it is possible some dest weight to be reduced * below di, bad if it is the only available dest. * * So, we modify how mw is calculated, now it is reduced with (di - 1), * so that last cw is 1 to catch such dests with weight below di: * pass 1: cw = max weight - (di - 1) * pass 2: cw = max weight - di - (di - 1) * pass 3: cw = max weight - 2 * di - (di - 1) * ... * last pass: cw = 1 * */ /* * current destination pointer for weighted round-robin scheduling */ struct ip_vs_wrr_mark { struct ip_vs_dest *cl; /* current dest or head */ int cw; /* current weight */ int mw; /* maximum weight */ int di; /* decreasing interval */ struct rcu_head rcu_head; }; static int ip_vs_wrr_gcd_weight(struct ip_vs_service *svc) { struct ip_vs_dest *dest; int weight; int g = 0; list_for_each_entry(dest, &svc->destinations, n_list) { weight = atomic_read(&dest->weight); if (weight > 0) { if (g > 0) g = gcd(weight, g); else g = weight; } } return g ? g : 1; } /* * Get the maximum weight of the service destinations. */ static int ip_vs_wrr_max_weight(struct ip_vs_service *svc) { struct ip_vs_dest *dest; int new_weight, weight = 0; list_for_each_entry(dest, &svc->destinations, n_list) { new_weight = atomic_read(&dest->weight); if (new_weight > weight) weight = new_weight; } return weight; } static int ip_vs_wrr_init_svc(struct ip_vs_service *svc) { struct ip_vs_wrr_mark *mark; /* * Allocate the mark variable for WRR scheduling */ mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_KERNEL); if (mark == NULL) return -ENOMEM; mark->cl = list_entry(&svc->destinations, struct ip_vs_dest, n_list); mark->di = ip_vs_wrr_gcd_weight(svc); mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1); mark->cw = mark->mw; svc->sched_data = mark; return 0; } static void ip_vs_wrr_done_svc(struct ip_vs_service *svc) { struct ip_vs_wrr_mark *mark = svc->sched_data; /* * Release the mark variable */ kfree_rcu(mark, rcu_head); } static int ip_vs_wrr_dest_changed(struct ip_vs_service *svc, struct ip_vs_dest *dest) { struct ip_vs_wrr_mark *mark = svc->sched_data; spin_lock_bh(&svc->sched_lock); mark->cl = list_entry(&svc->destinations, struct ip_vs_dest, n_list); mark->di = ip_vs_wrr_gcd_weight(svc); mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1); if (mark->cw > mark->mw || !mark->cw) mark->cw = mark->mw; else if (mark->di > 1) mark->cw = (mark->cw / mark->di) * mark->di + 1; spin_unlock_bh(&svc->sched_lock); return 0; } /* * Weighted Round-Robin Scheduling */ static struct ip_vs_dest * ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, struct ip_vs_iphdr *iph) { struct ip_vs_dest *dest, *last, *stop = NULL; struct ip_vs_wrr_mark *mark = svc->sched_data; bool last_pass = false, restarted = false; IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); spin_lock_bh(&svc->sched_lock); dest = mark->cl; /* No available dests? */ if (mark->mw == 0) goto err_noavail; last = dest; /* Stop only after all dests were checked for weight >= 1 (last pass) */ while (1) { list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) { if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && atomic_read(&dest->weight) >= mark->cw) goto found; if (dest == stop) goto err_over; } mark->cw -= mark->di; if (mark->cw <= 0) { mark->cw = mark->mw; /* Stop if we tried last pass from first dest: * 1. last_pass: we started checks when cw > di but * then all dests were checked for w >= 1 * 2. last was head: the first and only traversal * was for weight >= 1, for all dests. */ if (last_pass || &last->n_list == &svc->destinations) goto err_over; restarted = true; } last_pass = mark->cw <= mark->di; if (last_pass && restarted && &last->n_list != &svc->destinations) { /* First traversal was for w >= 1 but only * for dests after 'last', now do the same * for all dests up to 'last'. */ stop = last; } } found: IP_VS_DBG_BUF(6, "WRR: server %s:%u " "activeconns %d refcnt %d weight %d\n", IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), atomic_read(&dest->activeconns), atomic_read(&dest->refcnt), atomic_read(&dest->weight)); mark->cl = dest; out: spin_unlock_bh(&svc->sched_lock); return dest; err_noavail: mark->cl = dest; dest = NULL; ip_vs_scheduler_err(svc, "no destination available"); goto out; err_over: mark->cl = dest; dest = NULL; ip_vs_scheduler_err(svc, "no destination available: " "all destinations are overloaded"); goto out; } static struct ip_vs_scheduler ip_vs_wrr_scheduler = { .name = "wrr", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, .n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list), .init_service = ip_vs_wrr_init_svc, .done_service = ip_vs_wrr_done_svc, .add_dest = ip_vs_wrr_dest_changed, .del_dest = ip_vs_wrr_dest_changed, .upd_dest = ip_vs_wrr_dest_changed, .schedule = ip_vs_wrr_schedule, }; static int __init ip_vs_wrr_init(void) { return register_ip_vs_scheduler(&ip_vs_wrr_scheduler) ; } static void __exit ip_vs_wrr_cleanup(void) { unregister_ip_vs_scheduler(&ip_vs_wrr_scheduler); synchronize_rcu(); } module_init(ip_vs_wrr_init); module_exit(ip_vs_wrr_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
MoKee/android_kernel_zte_nx511j
arch/xtensa/platforms/xtfpga/setup.c
1628
6655
/* * * arch/xtensa/platform/xtavnet/setup.c * * ... * * Authors: Chris Zankel <chris@zankel.net> * Joe Taylor <joe@tensilica.com> * * Copyright 2001 - 2006 Tensilica Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/kdev_t.h> #include <linux/types.h> #include <linux/major.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/of.h> #include <asm/timex.h> #include <asm/processor.h> #include <asm/platform.h> #include <asm/bootparam.h> #include <platform/lcd.h> #include <platform/hardware.h> void platform_halt(void) { lcd_disp_at_pos(" HALT ", 0); local_irq_disable(); while (1) cpu_relax(); } void platform_power_off(void) { lcd_disp_at_pos("POWEROFF", 0); local_irq_disable(); while (1) cpu_relax(); } void platform_restart(void) { /* Flush and reset the mmu, simulate a processor reset, and * jump to the reset vector. */ __asm__ __volatile__ ("movi a2, 15\n\t" "wsr a2, icountlevel\n\t" "movi a2, 0\n\t" "wsr a2, icount\n\t" #if XCHAL_NUM_IBREAK > 0 "wsr a2, ibreakenable\n\t" #endif "wsr a2, lcount\n\t" "movi a2, 0x1f\n\t" "wsr a2, ps\n\t" "isync\n\t" "jx %0\n\t" : : "a" (XCHAL_RESET_VECTOR_VADDR) : "a2" ); /* control never gets here */ } void __init platform_setup(char **cmdline) { } #ifdef CONFIG_OF static void __init update_clock_frequency(struct device_node *node) { struct property *newfreq; u32 freq; if (!of_property_read_u32(node, "clock-frequency", &freq) && freq != 0) return; newfreq = kzalloc(sizeof(*newfreq) + sizeof(u32), GFP_KERNEL); if (!newfreq) return; newfreq->value = newfreq + 1; newfreq->length = sizeof(freq); newfreq->name = kstrdup("clock-frequency", GFP_KERNEL); if (!newfreq->name) { kfree(newfreq); return; } *(u32 *)newfreq->value = cpu_to_be32(*(u32 *)XTFPGA_CLKFRQ_VADDR); of_update_property(node, newfreq); } #define MAC_LEN 6 static void __init update_local_mac(struct device_node *node) { struct property *newmac; const u8* macaddr; int prop_len; macaddr = of_get_property(node, "local-mac-address", &prop_len); if (macaddr == NULL || prop_len != MAC_LEN) return; newmac = kzalloc(sizeof(*newmac) + MAC_LEN, GFP_KERNEL); if (newmac == NULL) return; newmac->value = newmac + 1; newmac->length = MAC_LEN; newmac->name = kstrdup("local-mac-address", GFP_KERNEL); if (newmac->name == NULL) { kfree(newmac); return; } memcpy(newmac->value, macaddr, MAC_LEN); ((u8*)newmac->value)[5] = (*(u32*)DIP_SWITCHES_VADDR) & 0x3f; of_update_property(node, newmac); } static int __init machine_setup(void) { struct device_node *serial; struct device_node *eth = NULL; for_each_compatible_node(serial, NULL, "ns16550a") update_clock_frequency(serial); if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc"))) update_local_mac(eth); return 0; } arch_initcall(machine_setup); #endif /* early initialization */ void __init platform_init(bp_tag_t *first) { } /* Heartbeat. */ void platform_heartbeat(void) { } #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT void platform_calibrate_ccount(void) { long clk_freq = 0; #ifdef CONFIG_OF struct device_node *cpu = of_find_compatible_node(NULL, NULL, "xtensa,cpu"); if (cpu) { u32 freq; update_clock_frequency(cpu); if (!of_property_read_u32(cpu, "clock-frequency", &freq)) clk_freq = freq; } #endif if (!clk_freq) clk_freq = *(long *)XTFPGA_CLKFRQ_VADDR; ccount_per_jiffy = clk_freq / HZ; nsec_per_ccount = 1000000000UL / clk_freq; } #endif #ifndef CONFIG_OF #include <linux/serial_8250.h> #include <linux/if.h> #include <net/ethoc.h> /*---------------------------------------------------------------------------- * Ethernet -- OpenCores Ethernet MAC (ethoc driver) */ static struct resource ethoc_res[] = { [0] = { /* register space */ .start = OETH_REGS_PADDR, .end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1, .flags = IORESOURCE_MEM, }, [1] = { /* buffer space */ .start = OETH_SRAMBUFF_PADDR, .end = OETH_SRAMBUFF_PADDR + OETH_SRAMBUFF_SIZE - 1, .flags = IORESOURCE_MEM, }, [2] = { /* IRQ number */ .start = OETH_IRQ, .end = OETH_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct ethoc_platform_data ethoc_pdata = { /* * The MAC address for these boards is 00:50:c2:13:6f:xx. * The last byte (here as zero) is read from the DIP switches on the * board. */ .hwaddr = { 0x00, 0x50, 0xc2, 0x13, 0x6f, 0 }, .phy_id = -1, }; static struct platform_device ethoc_device = { .name = "ethoc", .id = -1, .num_resources = ARRAY_SIZE(ethoc_res), .resource = ethoc_res, .dev = { .platform_data = &ethoc_pdata, }, }; /*---------------------------------------------------------------------------- * UART */ static struct resource serial_resource = { .start = DUART16552_PADDR, .end = DUART16552_PADDR + 0x1f, .flags = IORESOURCE_MEM, }; static struct plat_serial8250_port serial_platform_data[] = { [0] = { .mapbase = DUART16552_PADDR, .irq = DUART16552_INTNUM, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM32, .regshift = 2, .uartclk = 0, /* set in xtavnet_init() */ }, { }, }; static struct platform_device xtavnet_uart = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = serial_platform_data, }, .num_resources = 1, .resource = &serial_resource, }; /* platform devices */ static struct platform_device *platform_devices[] __initdata = { &ethoc_device, &xtavnet_uart, }; static int __init xtavnet_init(void) { /* Ethernet MAC address. */ ethoc_pdata.hwaddr[5] = *(u32 *)DIP_SWITCHES_VADDR; /* Clock rate varies among FPGA bitstreams; board specific FPGA register * reports the actual clock rate. */ serial_platform_data[0].uartclk = *(long *)XTFPGA_CLKFRQ_VADDR; /* register platform devices */ platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); /* ETHOC driver is a bit quiet; at least display Ethernet MAC, so user * knows whether they set it correctly on the DIP switches. */ pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr); return 0; } /* * Register to be done during do_initcalls(). */ arch_initcall(xtavnet_init); #endif /* CONFIG_OF */
gpl-2.0
TenchiMasaki/android_kernel_asus_moorefield
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2140
98605
/******************************************************************************* Intel 82599 Virtual Function driver Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ /****************************************************************************** Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code ******************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/bitops.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include <linux/string.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/sctp.h> #include <linux/ipv6.h> #include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <linux/ethtool.h> #include <linux/if.h> #include <linux/if_vlan.h> #include <linux/prefetch.h> #include "ixgbevf.h" const char ixgbevf_driver_name[] = "ixgbevf"; static const char ixgbevf_driver_string[] = "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; #define DRV_VERSION "2.7.12-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = "Copyright (c) 2009 - 2012 Intel Corporation."; static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, [board_X540_vf] = &ixgbevf_X540_vf_info, }; /* ixgbevf_pci_tbl - PCI Device ID Table * * Wildcard entries (PCI_ANY_ID) should come last * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, /* required last entry */ {0, } }; MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); /* forward decls */ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, struct ixgbevf_ring *rx_ring, u32 val) { /* * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val); } /** * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors * @adapter: pointer to adapter struct * @direction: 0 for Rx, 1 for Tx, -1 for other causes * @queue: queue to map the corresponding interrupt to * @msix_vector: the vector to map to the corresponding queue */ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, u8 queue, u8 msix_vector) { u32 ivar, index; struct ixgbe_hw *hw = &adapter->hw; if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); ivar &= ~0xFF; ivar |= msix_vector; IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); } else { /* tx or rx causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; index = ((16 * (queue & 1)) + (8 * direction)); ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); ivar &= ~(0xFF << index); ivar |= (msix_vector << index); IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); } } static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *tx_buffer_info) { if (tx_buffer_info->dma) { if (tx_buffer_info->mapped_as_page) dma_unmap_page(tx_ring->dev, tx_buffer_info->dma, tx_buffer_info->length, DMA_TO_DEVICE); else dma_unmap_single(tx_ring->dev, tx_buffer_info->dma, tx_buffer_info->length, DMA_TO_DEVICE); tx_buffer_info->dma = 0; } if (tx_buffer_info->skb) { dev_kfree_skb_any(tx_buffer_info->skb); tx_buffer_info->skb = NULL; } tx_buffer_info->time_stamp = 0; /* tx_buffer_info must be completely set up in the transmit path */ } #define IXGBE_MAX_TXD_PWR 14 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) #define DESC_NEEDED (MAX_SKB_FRAGS + 4) static void ixgbevf_tx_timeout(struct net_device *netdev); /** * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: board private structure * @tx_ring: tx ring to clean **/ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring *tx_ring) { struct ixgbevf_adapter *adapter = q_vector->adapter; union ixgbe_adv_tx_desc *tx_desc, *eop_desc; struct ixgbevf_tx_buffer *tx_buffer_info; unsigned int i, count = 0; unsigned int total_bytes = 0, total_packets = 0; if (test_bit(__IXGBEVF_DOWN, &adapter->state)) return true; i = tx_ring->next_to_clean; tx_buffer_info = &tx_ring->tx_buffer_info[i]; eop_desc = tx_buffer_info->next_to_watch; do { bool cleaned = false; /* if next_to_watch is not set then there is no work pending */ if (!eop_desc) break; /* prevent any other reads prior to eop_desc */ read_barrier_depends(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) break; /* clear next_to_watch to prevent false hangs */ tx_buffer_info->next_to_watch = NULL; for ( ; !cleaned; count++) { struct sk_buff *skb; tx_desc = IXGBEVF_TX_DESC(tx_ring, i); cleaned = (tx_desc == eop_desc); skb = tx_buffer_info->skb; if (cleaned && skb) { unsigned int segs, bytecount; /* gso_segs is currently only valid for tcp */ segs = skb_shinfo(skb)->gso_segs ?: 1; /* multiply data chunks by size of headers */ bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; total_packets += segs; total_bytes += bytecount; } ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); tx_desc->wb.status = 0; i++; if (i == tx_ring->count) i = 0; tx_buffer_info = &tx_ring->tx_buffer_info[i]; } eop_desc = tx_buffer_info->next_to_watch; } while (count < tx_ring->count); tx_ring->next_to_clean = i; #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && !test_bit(__IXGBEVF_DOWN, &adapter->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++adapter->restart_queue; } } u64_stats_update_begin(&tx_ring->syncp); tx_ring->total_bytes += total_bytes; tx_ring->total_packets += total_packets; u64_stats_update_end(&tx_ring->syncp); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; return count < tx_ring->count; } /** * ixgbevf_receive_skb - Send a completed packet up the stack * @q_vector: structure containing interrupt and ring information * @skb: packet to send up * @status: hardware indication of status of receive * @rx_desc: rx descriptor **/ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, struct sk_buff *skb, u8 status, union ixgbe_adv_rx_desc *rx_desc) { struct ixgbevf_adapter *adapter = q_vector->adapter; bool is_vlan = (status & IXGBE_RXD_STAT_VP); u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) napi_gro_receive(&q_vector->napi, skb); else netif_rx(skb); } /** * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: pointer to Rx descriptor ring structure * @status_err: hardware indication of status of receive * @skb: skb currently being received and modified **/ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, u32 status_err, struct sk_buff *skb) { skb_checksum_none_assert(skb); /* Rx csum disabled */ if (!(ring->netdev->features & NETIF_F_RXCSUM)) return; /* if IP and error */ if ((status_err & IXGBE_RXD_STAT_IPCS) && (status_err & IXGBE_RXDADV_ERR_IPE)) { ring->hw_csum_rx_error++; return; } if (!(status_err & IXGBE_RXD_STAT_L4CS)) return; if (status_err & IXGBE_RXDADV_ERR_TCPE) { ring->hw_csum_rx_error++; return; } /* It must be a TCP or UDP packet with a valid checksum */ skb->ip_summed = CHECKSUM_UNNECESSARY; ring->hw_csum_rx_good++; } /** * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split * @adapter: address of board private structure **/ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *rx_ring, int cleaned_count) { struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc; struct ixgbevf_rx_buffer *bi; unsigned int i = rx_ring->next_to_use; bi = &rx_ring->rx_buffer_info[i]; while (cleaned_count--) { rx_desc = IXGBEVF_RX_DESC(rx_ring, i); if (!bi->skb) { struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(rx_ring->netdev, rx_ring->rx_buf_len); if (!skb) { adapter->alloc_rx_buff_failed++; goto no_buffers; } bi->skb = skb; bi->dma = dma_map_single(&pdev->dev, skb->data, rx_ring->rx_buf_len, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, bi->dma)) { dev_kfree_skb(skb); bi->skb = NULL; dev_err(&pdev->dev, "RX DMA map failed\n"); break; } } rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); i++; if (i == rx_ring->count) i = 0; bi = &rx_ring->rx_buffer_info[i]; } no_buffers: if (rx_ring->next_to_use != i) { rx_ring->next_to_use = i; ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); } } static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, u32 qmask) { struct ixgbe_hw *hw = &adapter->hw; IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); } static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring *rx_ring, int budget) { struct ixgbevf_adapter *adapter = q_vector->adapter; struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc, *next_rxd; struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; struct sk_buff *skb; unsigned int i; u32 len, staterr; int cleaned_count = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0; i = rx_ring->next_to_clean; rx_desc = IXGBEVF_RX_DESC(rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); rx_buffer_info = &rx_ring->rx_buffer_info[i]; while (staterr & IXGBE_RXD_STAT_DD) { if (!budget) break; budget--; rmb(); /* read descriptor and rx_buffer_info after status DD */ len = le16_to_cpu(rx_desc->wb.upper.length); skb = rx_buffer_info->skb; prefetch(skb->data - NET_IP_ALIGN); rx_buffer_info->skb = NULL; if (rx_buffer_info->dma) { dma_unmap_single(&pdev->dev, rx_buffer_info->dma, rx_ring->rx_buf_len, DMA_FROM_DEVICE); rx_buffer_info->dma = 0; skb_put(skb, len); } i++; if (i == rx_ring->count) i = 0; next_rxd = IXGBEVF_RX_DESC(rx_ring, i); prefetch(next_rxd); cleaned_count++; next_buffer = &rx_ring->rx_buffer_info[i]; if (!(staterr & IXGBE_RXD_STAT_EOP)) { skb->next = next_buffer->skb; IXGBE_CB(skb->next)->prev = skb; adapter->non_eop_descs++; goto next_desc; } /* we should not be chaining buffers, if we did drop the skb */ if (IXGBE_CB(skb)->prev) { do { struct sk_buff *this = skb; skb = IXGBE_CB(skb)->prev; dev_kfree_skb(this); } while (skb); goto next_desc; } /* ERR_MASK will only have valid bits if EOP set */ if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { dev_kfree_skb_irq(skb); goto next_desc; } ixgbevf_rx_checksum(rx_ring, staterr, skb); /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; /* * Work around issue of some types of VM to VM loop back * packets not getting split correctly */ if (staterr & IXGBE_RXD_STAT_LB) { u32 header_fixup_len = skb_headlen(skb); if (header_fixup_len < 14) skb_push(skb, header_fixup_len); } skb->protocol = eth_type_trans(skb, rx_ring->netdev); /* Workaround hardware that can't do proper VEPA multicast * source pruning. */ if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && !(compare_ether_addr(adapter->netdev->dev_addr, eth_hdr(skb)->h_source))) { dev_kfree_skb_irq(skb); goto next_desc; } ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc); next_desc: rx_desc->wb.upper.status_error = 0; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); cleaned_count = 0; } /* use prefetched values */ rx_desc = next_rxd; rx_buffer_info = &rx_ring->rx_buffer_info[i]; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); } rx_ring->next_to_clean = i; cleaned_count = IXGBE_DESC_UNUSED(rx_ring); if (cleaned_count) ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); u64_stats_update_begin(&rx_ring->syncp); rx_ring->total_packets += total_rx_packets; rx_ring->total_bytes += total_rx_bytes; u64_stats_update_end(&rx_ring->syncp); q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; return !!budget; } /** * ixgbevf_poll - NAPI polling calback * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets * * This function will clean more than one or more rings associated with a * q_vector. **/ static int ixgbevf_poll(struct napi_struct *napi, int budget) { struct ixgbevf_q_vector *q_vector = container_of(napi, struct ixgbevf_q_vector, napi); struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_ring *ring; int per_ring_budget; bool clean_complete = true; ixgbevf_for_each_ring(ring, q_vector->tx) clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling */ if (q_vector->rx.count > 1) per_ring_budget = max(budget/q_vector->rx.count, 1); else per_ring_budget = budget; adapter->flags |= IXGBE_FLAG_IN_NETPOLL; ixgbevf_for_each_ring(ring, q_vector->rx) clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring, per_ring_budget); adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; /* all work done, exit the polling mode */ napi_complete(napi); if (adapter->rx_itr_setting & 1) ixgbevf_set_itr(q_vector); if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) ixgbevf_irq_enable_queues(adapter, 1 << q_vector->v_idx); return 0; } /** * ixgbevf_write_eitr - write VTEITR register in hardware specific way * @q_vector: structure containing interrupt and ring information */ static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) { struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbe_hw *hw = &adapter->hw; int v_idx = q_vector->v_idx; u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; /* * set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt */ itr_reg |= IXGBE_EITR_CNT_WDIS; IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); } /** * ixgbevf_configure_msix - Configure MSI-X hardware * @adapter: board private structure * * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X * interrupts. **/ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) { struct ixgbevf_q_vector *q_vector; int q_vectors, v_idx; q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; adapter->eims_enable_mask = 0; /* * Populate the IVAR table and set the ITR values to the * corresponding register. */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { struct ixgbevf_ring *ring; q_vector = adapter->q_vector[v_idx]; ixgbevf_for_each_ring(ring, q_vector->rx) ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); ixgbevf_for_each_ring(ring, q_vector->tx) ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); if (q_vector->tx.ring && !q_vector->rx.ring) { /* tx only vector */ if (adapter->tx_itr_setting == 1) q_vector->itr = IXGBE_10K_ITR; else q_vector->itr = adapter->tx_itr_setting; } else { /* rx or rx/tx vector */ if (adapter->rx_itr_setting == 1) q_vector->itr = IXGBE_20K_ITR; else q_vector->itr = adapter->rx_itr_setting; } /* add q_vector eims value to global eims_enable_mask */ adapter->eims_enable_mask |= 1 << v_idx; ixgbevf_write_eitr(q_vector); } ixgbevf_set_ivar(adapter, -1, 1, v_idx); /* setup eims_other and add value to global eims_enable_mask */ adapter->eims_other = 1 << v_idx; adapter->eims_enable_mask |= adapter->eims_other; } enum latency_range { lowest_latency = 0, low_latency = 1, bulk_latency = 2, latency_invalid = 255 }; /** * ixgbevf_update_itr - update the dynamic ITR value based on statistics * @q_vector: structure containing interrupt and ring information * @ring_container: structure containing ring performance data * * Stores a new ITR value based on packets and byte * counts during the last interrupt. The advantage of per interrupt * computation is faster updates and more accurate ITR for the current * traffic pattern. Constants in this function were computed * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring_container *ring_container) { int bytes = ring_container->total_bytes; int packets = ring_container->total_packets; u32 timepassed_us; u64 bytes_perint; u8 itr_setting = ring_container->itr; if (packets == 0) return; /* simple throttlerate management * 0-20MB/s lowest (100000 ints/s) * 20-100MB/s low (20000 ints/s) * 100-1249MB/s bulk (8000 ints/s) */ /* what was last interrupt timeslice? */ timepassed_us = q_vector->itr >> 2; bytes_perint = bytes / timepassed_us; /* bytes/usec */ switch (itr_setting) { case lowest_latency: if (bytes_perint > 10) itr_setting = low_latency; break; case low_latency: if (bytes_perint > 20) itr_setting = bulk_latency; else if (bytes_perint <= 10) itr_setting = lowest_latency; break; case bulk_latency: if (bytes_perint <= 20) itr_setting = low_latency; break; } /* clear work counters since we have the values we need */ ring_container->total_bytes = 0; ring_container->total_packets = 0; /* write updated itr to ring container */ ring_container->itr = itr_setting; } static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) { u32 new_itr = q_vector->itr; u8 current_itr; ixgbevf_update_itr(q_vector, &q_vector->tx); ixgbevf_update_itr(q_vector, &q_vector->rx); current_itr = max(q_vector->rx.itr, q_vector->tx.itr); switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ case lowest_latency: new_itr = IXGBE_100K_ITR; break; case low_latency: new_itr = IXGBE_20K_ITR; break; case bulk_latency: default: new_itr = IXGBE_8K_ITR; break; } if (new_itr != q_vector->itr) { /* do an exponential smoothing */ new_itr = (10 * new_itr * q_vector->itr) / ((9 * new_itr) + q_vector->itr); /* save the algorithm value here */ q_vector->itr = new_itr; ixgbevf_write_eitr(q_vector); } } static irqreturn_t ixgbevf_msix_other(int irq, void *data) { struct ixgbevf_adapter *adapter = data; struct pci_dev *pdev = adapter->pdev; struct ixgbe_hw *hw = &adapter->hw; u32 msg; bool got_ack = false; hw->mac.get_link_status = 1; if (!hw->mbx.ops.check_for_ack(hw)) got_ack = true; if (!hw->mbx.ops.check_for_msg(hw)) { hw->mbx.ops.read(hw, &msg, 1); if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) { mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 1)); adapter->link_up = false; } if (msg & IXGBE_VT_MSGTYPE_NACK) dev_info(&pdev->dev, "Last Request of type %2.2x to PF Nacked\n", msg & 0xFF); hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS; } /* checking for the ack clears the PFACK bit. Place * it back in the v2p_mailbox cache so that anyone * polling for an ack will not miss it */ if (got_ack) hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); return IRQ_HANDLED; } /** * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) * @irq: unused * @data: pointer to our q_vector struct for this interrupt vector **/ static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) { struct ixgbevf_q_vector *q_vector = data; /* EIAM disabled interrupts (on this vector) for us */ if (q_vector->rx.ring || q_vector->tx.ring) napi_schedule(&q_vector->napi); return IRQ_HANDLED; } static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, int r_idx) { struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; a->rx_ring[r_idx].next = q_vector->rx.ring; q_vector->rx.ring = &a->rx_ring[r_idx]; q_vector->rx.count++; } static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, int t_idx) { struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; a->tx_ring[t_idx].next = q_vector->tx.ring; q_vector->tx.ring = &a->tx_ring[t_idx]; q_vector->tx.count++; } /** * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors * @adapter: board private structure to initialize * * This function maps descriptor rings to the queue-specific vectors * we were allotted through the MSI-X enabling code. Ideally, we'd have * one vector per ring/queue, but on a constrained vector budget, we * group the rings as "efficiently" as possible. You would add new * mapping configurations in here. **/ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) { int q_vectors; int v_start = 0; int rxr_idx = 0, txr_idx = 0; int rxr_remaining = adapter->num_rx_queues; int txr_remaining = adapter->num_tx_queues; int i, j; int rqpv, tqpv; int err = 0; q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; /* * The ideal configuration... * We have enough vectors to map one per queue. */ if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) map_vector_to_rxq(adapter, v_start, rxr_idx); for (; txr_idx < txr_remaining; v_start++, txr_idx++) map_vector_to_txq(adapter, v_start, txr_idx); goto out; } /* * If we don't have enough vectors for a 1-to-1 * mapping, we'll have to group them so there are * multiple queues per vector. */ /* Re-adjusting *qpv takes care of the remainder. */ for (i = v_start; i < q_vectors; i++) { rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); for (j = 0; j < rqpv; j++) { map_vector_to_rxq(adapter, i, rxr_idx); rxr_idx++; rxr_remaining--; } } for (i = v_start; i < q_vectors; i++) { tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); for (j = 0; j < tqpv; j++) { map_vector_to_txq(adapter, i, txr_idx); txr_idx++; txr_remaining--; } } out: return err; } /** * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts * @adapter: board private structure * * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests * interrupts from the kernel. **/ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; int vector, err; int ri = 0, ti = 0; for (vector = 0; vector < q_vectors; vector++) { struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; struct msix_entry *entry = &adapter->msix_entries[vector]; if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", netdev->name, "TxRx", ri++); ti++; } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", netdev->name, "rx", ri++); } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", netdev->name, "tx", ti++); } else { /* skip this unused q_vector */ continue; } err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, q_vector->name, q_vector); if (err) { hw_dbg(&adapter->hw, "request_irq failed for MSIX interrupt " "Error: %d\n", err); goto free_queue_irqs; } } err = request_irq(adapter->msix_entries[vector].vector, &ixgbevf_msix_other, 0, netdev->name, adapter); if (err) { hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n", err); goto free_queue_irqs; } return 0; free_queue_irqs: while (vector) { vector--; free_irq(adapter->msix_entries[vector].vector, adapter->q_vector[vector]); } /* This failure is non-recoverable - it indicates the system is * out of MSIX vector resources and the VF driver cannot run * without them. Set the number of msix vectors to zero * indicating that not enough can be allocated. The error * will be returned to the user indicating device open failed. * Any further attempts to force the driver to open will also * fail. The only way to recover is to unload the driver and * reload it again. If the system has recovered some MSIX * vectors then it may succeed. */ adapter->num_msix_vectors = 0; return err; } static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) { int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (i = 0; i < q_vectors; i++) { struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; q_vector->rx.ring = NULL; q_vector->tx.ring = NULL; q_vector->rx.count = 0; q_vector->tx.count = 0; } } /** * ixgbevf_request_irq - initialize interrupts * @adapter: board private structure * * Attempts to configure interrupts using the best available * capabilities of the hardware and kernel. **/ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) { int err = 0; err = ixgbevf_request_msix_irqs(adapter); if (err) hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); return err; } static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) { int i, q_vectors; q_vectors = adapter->num_msix_vectors; i = q_vectors - 1; free_irq(adapter->msix_entries[i].vector, adapter); i--; for (; i >= 0; i--) { /* free only the irqs that were actually requested */ if (!adapter->q_vector[i]->rx.ring && !adapter->q_vector[i]->tx.ring) continue; free_irq(adapter->msix_entries[i].vector, adapter->q_vector[i]); } ixgbevf_reset_q_vectors(adapter); } /** * ixgbevf_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int i; IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); IXGBE_WRITE_FLUSH(hw); for (i = 0; i < adapter->num_msix_vectors; i++) synchronize_irq(adapter->msix_entries[i].vector); } /** * ixgbevf_irq_enable - Enable default interrupt generation settings * @adapter: board private structure **/ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); } /** * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. **/ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) { u64 tdba; struct ixgbe_hw *hw = &adapter->hw; u32 i, j, tdlen, txctrl; /* Setup the HW Tx Head and Tail descriptor pointers */ for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbevf_ring *ring = &adapter->tx_ring[i]; j = ring->reg_idx; tdba = ring->dma; tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), (tdba & DMA_BIT_MASK(32))); IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen); IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0); IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0); adapter->tx_ring[i].head = IXGBE_VFTDH(j); adapter->tx_ring[i].tail = IXGBE_VFTDT(j); /* Disable Tx Head Writeback RO bit, since this hoses * bookkeeping if things aren't delivered in order. */ txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); } } #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) { struct ixgbevf_ring *rx_ring; struct ixgbe_hw *hw = &adapter->hw; u32 srrctl; rx_ring = &adapter->rx_ring[index]; srrctl = IXGBE_SRRCTL_DROP_EN; srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); } static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; int i; u16 rx_buf_len; /* notify the PF of our intent to use this size of frame */ ixgbevf_rlpml_set_vf(hw, max_frame); /* PF will allow an extra 4 bytes past for vlan tagged frames */ max_frame += VLAN_HLEN; /* * Allocate buffer sizes that fit well into 32K and * take into account max frame size of 9.5K */ if ((hw->mac.type == ixgbe_mac_X540_vf) && (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; else if (max_frame <= IXGBEVF_RXBUFFER_2K) rx_buf_len = IXGBEVF_RXBUFFER_2K; else if (max_frame <= IXGBEVF_RXBUFFER_4K) rx_buf_len = IXGBEVF_RXBUFFER_4K; else if (max_frame <= IXGBEVF_RXBUFFER_8K) rx_buf_len = IXGBEVF_RXBUFFER_8K; else rx_buf_len = IXGBEVF_RXBUFFER_10K; for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i].rx_buf_len = rx_buf_len; } /** * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset. **/ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) { u64 rdba; struct ixgbe_hw *hw = &adapter->hw; int i, j; u32 rdlen; /* PSRTYPE must be initialized in 82599 */ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); /* set_rx_buffer_len must be called before ring initialization */ ixgbevf_set_rx_buffer_len(adapter); rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ for (i = 0; i < adapter->num_rx_queues; i++) { rdba = adapter->rx_ring[i].dma; j = adapter->rx_ring[i].reg_idx; IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), (rdba & DMA_BIT_MASK(32))); IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen); IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0); IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); adapter->rx_ring[i].head = IXGBE_VFRDH(j); adapter->rx_ring[i].tail = IXGBE_VFRDT(j); ixgbevf_configure_srrctl(adapter, j); } } static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; int err; spin_lock_bh(&adapter->mbx_lock); /* add VID to filter table */ err = hw->mac.ops.set_vfta(hw, vid, 0, true); spin_unlock_bh(&adapter->mbx_lock); /* translate error return types so error makes sense */ if (err == IXGBE_ERR_MBX) return -EIO; if (err == IXGBE_ERR_INVALID_ARGUMENT) return -EACCES; set_bit(vid, adapter->active_vlans); return err; } static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; int err = -EOPNOTSUPP; spin_lock_bh(&adapter->mbx_lock); /* remove VID from filter table */ err = hw->mac.ops.set_vfta(hw, vid, 0, false); spin_unlock_bh(&adapter->mbx_lock); clear_bit(vid, adapter->active_vlans); return err; } static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) { u16 vid; for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) ixgbevf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } static int ixgbevf_write_uc_addr_list(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; int count = 0; if ((netdev_uc_count(netdev)) > 10) { pr_err("Too many unicast filters - No Space\n"); return -ENOSPC; } if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha; netdev_for_each_uc_addr(ha, netdev) { hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); udelay(200); } } else { /* * If the list is empty then send message to PF driver to * clear all macvlans on this VF. */ hw->mac.ops.set_uc_addr(hw, 0, NULL); } return count; } /** * ixgbevf_set_rx_mode - Multicast and unicast set * @netdev: network interface device structure * * The set_rx_method entry point is called whenever the multicast address * list, unicast address list or the network interface flags are updated. * This routine is responsible for configuring the hardware for proper * multicast mode and configuring requested unicast filters. **/ static void ixgbevf_set_rx_mode(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; spin_lock_bh(&adapter->mbx_lock); /* reprogram multicast list */ hw->mac.ops.update_mc_addr_list(hw, netdev); ixgbevf_write_uc_addr_list(netdev); spin_unlock_bh(&adapter->mbx_lock); } static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) { int q_idx; struct ixgbevf_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; napi_enable(&q_vector->napi); } } static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) { int q_idx; struct ixgbevf_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; napi_disable(&q_vector->napi); } } static void ixgbevf_configure(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int i; ixgbevf_set_rx_mode(netdev); ixgbevf_restore_vlan(adapter); ixgbevf_configure_tx(adapter); ixgbevf_configure_rx(adapter); for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbevf_ring *ring = &adapter->rx_ring[i]; ixgbevf_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring)); } } #define IXGBE_MAX_RX_DESC_POLL 10 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, int rxr) { struct ixgbe_hw *hw = &adapter->hw; int j = adapter->rx_ring[rxr].reg_idx; int k; for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) break; else msleep(1); } if (k >= IXGBE_MAX_RX_DESC_POLL) { hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d " "not set within the polling period\n", rxr); } ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr], adapter->rx_ring[rxr].count - 1); } static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) { /* Only save pre-reset stats if there are some */ if (adapter->stats.vfgprc || adapter->stats.vfgptc) { adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - adapter->stats.base_vfgprc; adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - adapter->stats.base_vfgptc; adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - adapter->stats.base_vfgorc; adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - adapter->stats.base_vfgotc; adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - adapter->stats.base_vfmprc; } } static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); adapter->stats.last_vfgorc |= (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); adapter->stats.last_vfgotc |= (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; } static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int api[] = { ixgbe_mbox_api_11, ixgbe_mbox_api_10, ixgbe_mbox_api_unknown }; int err = 0, idx = 0; spin_lock_bh(&adapter->mbx_lock); while (api[idx] != ixgbe_mbox_api_unknown) { err = ixgbevf_negotiate_api_version(hw, api[idx]); if (!err) break; idx++; } spin_unlock_bh(&adapter->mbx_lock); } static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; int i, j = 0; int num_rx_rings = adapter->num_rx_queues; u32 txdctl, rxdctl; for (i = 0; i < adapter->num_tx_queues; i++) { j = adapter->tx_ring[i].reg_idx; txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); /* enable WTHRESH=8 descriptors, to encourage burst writeback */ txdctl |= (8 << 16); IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); } for (i = 0; i < adapter->num_tx_queues; i++) { j = adapter->tx_ring[i].reg_idx; txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); txdctl |= IXGBE_TXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); } for (i = 0; i < num_rx_rings; i++) { j = adapter->rx_ring[i].reg_idx; rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; if (hw->mac.type == ixgbe_mac_X540_vf) { rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) | IXGBE_RXDCTL_RLPML_EN); } IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); ixgbevf_rx_desc_queue_enable(adapter, i); } ixgbevf_configure_msix(adapter); spin_lock_bh(&adapter->mbx_lock); if (is_valid_ether_addr(hw->mac.addr)) hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); else hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); spin_unlock_bh(&adapter->mbx_lock); clear_bit(__IXGBEVF_DOWN, &adapter->state); ixgbevf_napi_enable_all(adapter); /* enable transmits */ netif_tx_start_all_queues(netdev); ixgbevf_save_reset_stats(adapter); ixgbevf_init_last_counter_stats(adapter); hw->mac.get_link_status = 1; mod_timer(&adapter->watchdog_timer, jiffies); } static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbevf_ring *rx_ring; unsigned int def_q = 0; unsigned int num_tcs = 0; unsigned int num_rx_queues = 1; int err, i; spin_lock_bh(&adapter->mbx_lock); /* fetch queue configuration from the PF */ err = ixgbevf_get_queues(hw, &num_tcs, &def_q); spin_unlock_bh(&adapter->mbx_lock); if (err) return err; if (num_tcs > 1) { /* update default Tx ring register index */ adapter->tx_ring[0].reg_idx = def_q; /* we need as many queues as traffic classes */ num_rx_queues = num_tcs; } /* nothing to do if we have the correct number of queues */ if (adapter->num_rx_queues == num_rx_queues) return 0; /* allocate new rings */ rx_ring = kcalloc(num_rx_queues, sizeof(struct ixgbevf_ring), GFP_KERNEL); if (!rx_ring) return -ENOMEM; /* setup ring fields */ for (i = 0; i < num_rx_queues; i++) { rx_ring[i].count = adapter->rx_ring_count; rx_ring[i].queue_index = i; rx_ring[i].reg_idx = i; rx_ring[i].dev = &adapter->pdev->dev; rx_ring[i].netdev = adapter->netdev; /* allocate resources on the ring */ err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); if (err) { while (i) { i--; ixgbevf_free_rx_resources(adapter, &rx_ring[i]); } kfree(rx_ring); return err; } } /* free the existing rings and queues */ ixgbevf_free_all_rx_resources(adapter); adapter->num_rx_queues = 0; kfree(adapter->rx_ring); /* move new rings into position on the adapter struct */ adapter->rx_ring = rx_ring; adapter->num_rx_queues = num_rx_queues; /* reset ring to vector mapping */ ixgbevf_reset_q_vectors(adapter); ixgbevf_map_rings_to_vectors(adapter); return 0; } void ixgbevf_up(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; ixgbevf_negotiate_api(adapter); ixgbevf_reset_queues(adapter); ixgbevf_configure(adapter); ixgbevf_up_complete(adapter); /* clear any pending interrupts, may auto mask */ IXGBE_READ_REG(hw, IXGBE_VTEICR); ixgbevf_irq_enable(adapter); } /** * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue * @adapter: board private structure * @rx_ring: ring to free buffers from **/ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *rx_ring) { struct pci_dev *pdev = adapter->pdev; unsigned long size; unsigned int i; if (!rx_ring->rx_buffer_info) return; /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct ixgbevf_rx_buffer *rx_buffer_info; rx_buffer_info = &rx_ring->rx_buffer_info[i]; if (rx_buffer_info->dma) { dma_unmap_single(&pdev->dev, rx_buffer_info->dma, rx_ring->rx_buf_len, DMA_FROM_DEVICE); rx_buffer_info->dma = 0; } if (rx_buffer_info->skb) { struct sk_buff *skb = rx_buffer_info->skb; rx_buffer_info->skb = NULL; do { struct sk_buff *this = skb; skb = IXGBE_CB(skb)->prev; dev_kfree_skb(this); } while (skb); } } size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; memset(rx_ring->rx_buffer_info, 0, size); /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; if (rx_ring->head) writel(0, adapter->hw.hw_addr + rx_ring->head); if (rx_ring->tail) writel(0, adapter->hw.hw_addr + rx_ring->tail); } /** * ixgbevf_clean_tx_ring - Free Tx Buffers * @adapter: board private structure * @tx_ring: ring to be cleaned **/ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *tx_ring) { struct ixgbevf_tx_buffer *tx_buffer_info; unsigned long size; unsigned int i; if (!tx_ring->tx_buffer_info) return; /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); } size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; memset(tx_ring->tx_buffer_info, 0, size); memset(tx_ring->desc, 0, tx_ring->size); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; if (tx_ring->head) writel(0, adapter->hw.hw_addr + tx_ring->head); if (tx_ring->tail) writel(0, adapter->hw.hw_addr + tx_ring->tail); } /** * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues * @adapter: board private structure **/ static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]); } /** * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues * @adapter: board private structure **/ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]); } void ixgbevf_down(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; u32 txdctl; int i, j; /* signal that we are down to the interrupt handler */ set_bit(__IXGBEVF_DOWN, &adapter->state); /* disable receives */ netif_tx_disable(netdev); msleep(10); netif_tx_stop_all_queues(netdev); ixgbevf_irq_disable(adapter); ixgbevf_napi_disable_all(adapter); del_timer_sync(&adapter->watchdog_timer); /* can't call flush scheduled work here because it can deadlock * if linkwatch_event tries to acquire the rtnl_lock which we are * holding */ while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK) msleep(1); /* disable transmits in the hardware now that interrupts are off */ for (i = 0; i < adapter->num_tx_queues; i++) { j = adapter->tx_ring[i].reg_idx; txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), (txdctl & ~IXGBE_TXDCTL_ENABLE)); } netif_carrier_off(netdev); if (!pci_channel_offline(adapter->pdev)) ixgbevf_reset(adapter); ixgbevf_clean_all_tx_rings(adapter); ixgbevf_clean_all_rx_rings(adapter); } void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) { WARN_ON(in_interrupt()); while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) msleep(1); ixgbevf_down(adapter); ixgbevf_up(adapter); clear_bit(__IXGBEVF_RESETTING, &adapter->state); } void ixgbevf_reset(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; if (hw->mac.ops.reset_hw(hw)) hw_dbg(hw, "PF still resetting\n"); else hw->mac.ops.init_hw(hw); if (is_valid_ether_addr(adapter->hw.mac.addr)) { memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); } } static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, int vectors) { int err = 0; int vector_threshold; /* We'll want at least 2 (vector_threshold): * 1) TxQ[0] + RxQ[0] handler * 2) Other (Link Status Change, etc.) */ vector_threshold = MIN_MSIX_COUNT; /* The more we get, the more we will assign to Tx/Rx Cleanup * for the separate queues...where Rx Cleanup >= Tx Cleanup. * Right now, we simply care about how many we'll get; we'll * set them up later while requesting irq's. */ while (vectors >= vector_threshold) { err = pci_enable_msix(adapter->pdev, adapter->msix_entries, vectors); if (!err || err < 0) /* Success or a nasty failure. */ break; else /* err == number of vectors we should try again with */ vectors = err; } if (vectors < vector_threshold) err = -ENOMEM; if (err) { dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); kfree(adapter->msix_entries); adapter->msix_entries = NULL; } else { /* * Adjust for only the vectors we'll use, which is minimum * of max_msix_q_vectors + NON_Q_VECTORS, or the number of * vectors we were allocated. */ adapter->num_msix_vectors = vectors; } return err; } /** * ixgbevf_set_num_queues - Allocate queues for device, feature dependent * @adapter: board private structure to initialize * * This is the top level queue allocation routine. The order here is very * important, starting with the "most" number of features turned on at once, * and ending with the smallest set of features. This way large combinations * can be allocated if they're turned on, and smaller combinations are the * fallthrough conditions. * **/ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) { /* Start with base case */ adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; } /** * ixgbevf_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * * We allocate one ring per queue at run-time since we don't know the * number of queues at compile-time. The polling_netdev array is * intended for Multiqueue, but should work fine with a single queue. **/ static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) { int i; adapter->tx_ring = kcalloc(adapter->num_tx_queues, sizeof(struct ixgbevf_ring), GFP_KERNEL); if (!adapter->tx_ring) goto err_tx_ring_allocation; adapter->rx_ring = kcalloc(adapter->num_rx_queues, sizeof(struct ixgbevf_ring), GFP_KERNEL); if (!adapter->rx_ring) goto err_rx_ring_allocation; for (i = 0; i < adapter->num_tx_queues; i++) { adapter->tx_ring[i].count = adapter->tx_ring_count; adapter->tx_ring[i].queue_index = i; /* reg_idx may be remapped later by DCB config */ adapter->tx_ring[i].reg_idx = i; adapter->tx_ring[i].dev = &adapter->pdev->dev; adapter->tx_ring[i].netdev = adapter->netdev; } for (i = 0; i < adapter->num_rx_queues; i++) { adapter->rx_ring[i].count = adapter->rx_ring_count; adapter->rx_ring[i].queue_index = i; adapter->rx_ring[i].reg_idx = i; adapter->rx_ring[i].dev = &adapter->pdev->dev; adapter->rx_ring[i].netdev = adapter->netdev; } return 0; err_rx_ring_allocation: kfree(adapter->tx_ring); err_tx_ring_allocation: return -ENOMEM; } /** * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported * @adapter: board private structure to initialize * * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel. **/ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err = 0; int vector, v_budget; /* * It's easy to be greedy for MSI-X vectors, but it really * doesn't do us much good if we have a lot more vectors * than CPU's. So let's be conservative and only ask for * (roughly) the same number of vectors as there are CPU's. * The default is to use pairs of vectors. */ v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); v_budget = min_t(int, v_budget, num_online_cpus()); v_budget += NON_Q_VECTORS; /* A failure in MSI-X entry allocation isn't fatal, but it does * mean we disable MSI-X capabilities of the adapter. */ adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); if (!adapter->msix_entries) { err = -ENOMEM; goto out; } for (vector = 0; vector < v_budget; vector++) adapter->msix_entries[vector].entry = vector; err = ixgbevf_acquire_msix_vectors(adapter, v_budget); if (err) goto out; err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); if (err) goto out; err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); out: return err; } /** * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors * @adapter: board private structure to initialize * * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. **/ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) { int q_idx, num_q_vectors; struct ixgbevf_q_vector *q_vector; num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); if (!q_vector) goto err_out; q_vector->adapter = adapter; q_vector->v_idx = q_idx; netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64); adapter->q_vector[q_idx] = q_vector; } return 0; err_out: while (q_idx) { q_idx--; q_vector = adapter->q_vector[q_idx]; netif_napi_del(&q_vector->napi); kfree(q_vector); adapter->q_vector[q_idx] = NULL; } return -ENOMEM; } /** * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors * @adapter: board private structure to initialize * * This function frees the memory allocated to the q_vectors. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector. **/ static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) { int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; adapter->q_vector[q_idx] = NULL; netif_napi_del(&q_vector->napi); kfree(q_vector); } } /** * ixgbevf_reset_interrupt_capability - Reset MSIX setup * @adapter: board private structure * **/ static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) { pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; } /** * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init * @adapter: board private structure to initialize * **/ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) { int err; /* Number of supported queues */ ixgbevf_set_num_queues(adapter); err = ixgbevf_set_interrupt_capability(adapter); if (err) { hw_dbg(&adapter->hw, "Unable to setup interrupt capabilities\n"); goto err_set_interrupt; } err = ixgbevf_alloc_q_vectors(adapter); if (err) { hw_dbg(&adapter->hw, "Unable to allocate memory for queue " "vectors\n"); goto err_alloc_q_vectors; } err = ixgbevf_alloc_queues(adapter); if (err) { pr_err("Unable to allocate memory for queues\n"); goto err_alloc_queues; } hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " "Tx Queue count = %u\n", (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); set_bit(__IXGBEVF_DOWN, &adapter->state); return 0; err_alloc_queues: ixgbevf_free_q_vectors(adapter); err_alloc_q_vectors: ixgbevf_reset_interrupt_capability(adapter); err_set_interrupt: return err; } /** * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings * @adapter: board private structure to clear interrupt scheme on * * We go through and clear interrupt specific resources and reset the structure * to pre-load conditions **/ static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) { adapter->num_tx_queues = 0; adapter->num_rx_queues = 0; ixgbevf_free_q_vectors(adapter); ixgbevf_reset_interrupt_capability(adapter); } /** * ixgbevf_sw_init - Initialize general software structures * (struct ixgbevf_adapter) * @adapter: board private structure to initialize * * ixgbevf_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; int err; /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->revision_id = pdev->revision; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; hw->mbx.ops.init_params(hw); /* assume legacy case in which PF would only give VF 2 queues */ hw->mac.max_tx_queues = 2; hw->mac.max_rx_queues = 2; err = hw->mac.ops.reset_hw(hw); if (err) { dev_info(&pdev->dev, "PF still in reset state. Is the PF interface up?\n"); } else { err = hw->mac.ops.init_hw(hw); if (err) { pr_err("init_shared_code failed: %d\n", err); goto out; } err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); if (err) dev_info(&pdev->dev, "Error reading MAC address\n"); else if (is_zero_ether_addr(adapter->hw.mac.addr)) dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); } if (!is_valid_ether_addr(netdev->dev_addr)) { dev_info(&pdev->dev, "Assigning random MAC address\n"); eth_hw_addr_random(netdev); memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len); } /* lock to protect mailbox accesses */ spin_lock_init(&adapter->mbx_lock); /* Enable dynamic interrupt throttling rates */ adapter->rx_itr_setting = 1; adapter->tx_itr_setting = 1; /* set default ring sizes */ adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; set_bit(__IXGBEVF_DOWN, &adapter->state); return 0; out: return err; } #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ { \ u32 current_counter = IXGBE_READ_REG(hw, reg); \ if (current_counter < last_counter) \ counter += 0x100000000LL; \ last_counter = current_counter; \ counter &= 0xFFFFFFFF00000000LL; \ counter |= current_counter; \ } #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ { \ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ u64 current_counter = (current_counter_msb << 32) | \ current_counter_lsb; \ if (current_counter < last_counter) \ counter += 0x1000000000LL; \ last_counter = current_counter; \ counter &= 0xFFFFFFF000000000LL; \ counter |= current_counter; \ } /** * ixgbevf_update_stats - Update the board statistics counters. * @adapter: board private structure **/ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int i; if (!adapter->link_up) return; UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, adapter->stats.vfgprc); UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, adapter->stats.vfgptc); UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, adapter->stats.last_vfgorc, adapter->stats.vfgorc); UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, adapter->stats.last_vfgotc, adapter->stats.vfgotc); UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, adapter->stats.vfmprc); for (i = 0; i < adapter->num_rx_queues; i++) { adapter->hw_csum_rx_error += adapter->rx_ring[i].hw_csum_rx_error; adapter->hw_csum_rx_good += adapter->rx_ring[i].hw_csum_rx_good; adapter->rx_ring[i].hw_csum_rx_error = 0; adapter->rx_ring[i].hw_csum_rx_good = 0; } } /** * ixgbevf_watchdog - Timer Call-back * @data: pointer to adapter cast into an unsigned long **/ static void ixgbevf_watchdog(unsigned long data) { struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; struct ixgbe_hw *hw = &adapter->hw; u32 eics = 0; int i; /* * Do the watchdog outside of interrupt context due to the lovely * delays that some of the newer hardware requires */ if (test_bit(__IXGBEVF_DOWN, &adapter->state)) goto watchdog_short_circuit; /* get one bit for every active tx/rx interrupt vector */ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { struct ixgbevf_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) eics |= 1 << i; } IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); watchdog_short_circuit: schedule_work(&adapter->watchdog_task); } /** * ixgbevf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ static void ixgbevf_tx_timeout(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ schedule_work(&adapter->reset_task); } static void ixgbevf_reset_task(struct work_struct *work) { struct ixgbevf_adapter *adapter; adapter = container_of(work, struct ixgbevf_adapter, reset_task); /* If we're already down or resetting, just bail */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) return; adapter->tx_timeout_count++; ixgbevf_reinit_locked(adapter); } /** * ixgbevf_watchdog_task - worker thread to bring link up * @work: pointer to work_struct containing our data **/ static void ixgbevf_watchdog_task(struct work_struct *work) { struct ixgbevf_adapter *adapter = container_of(work, struct ixgbevf_adapter, watchdog_task); struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; u32 link_speed = adapter->link_speed; bool link_up = adapter->link_up; s32 need_reset; adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; /* * Always check the link on the watchdog because we have * no LSC interrupt */ spin_lock_bh(&adapter->mbx_lock); need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); spin_unlock_bh(&adapter->mbx_lock); if (need_reset) { adapter->link_up = link_up; adapter->link_speed = link_speed; netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); schedule_work(&adapter->reset_task); goto pf_has_reset; } adapter->link_up = link_up; adapter->link_speed = link_speed; if (link_up) { if (!netif_carrier_ok(netdev)) { char *link_speed_string; switch (link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: link_speed_string = "10 Gbps"; break; case IXGBE_LINK_SPEED_1GB_FULL: link_speed_string = "1 Gbps"; break; case IXGBE_LINK_SPEED_100_FULL: link_speed_string = "100 Mbps"; break; default: link_speed_string = "unknown speed"; break; } dev_info(&adapter->pdev->dev, "NIC Link is Up, %s\n", link_speed_string); netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); } } else { adapter->link_up = false; adapter->link_speed = 0; if (netif_carrier_ok(netdev)) { dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); } } ixgbevf_update_stats(adapter); pf_has_reset: /* Reset the timer */ if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + (2 * HZ))); adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; } /** * ixgbevf_free_tx_resources - Free Tx Resources per Queue * @adapter: board private structure * @tx_ring: Tx descriptor ring for a specific queue * * Free all transmit software resources **/ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *tx_ring) { struct pci_dev *pdev = adapter->pdev; ixgbevf_clean_tx_ring(adapter, tx_ring); vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } /** * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources **/ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tx_ring[i].desc) ixgbevf_free_tx_resources(adapter, &adapter->tx_ring[i]); } /** * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: board private structure * @tx_ring: tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *tx_ring) { struct pci_dev *pdev = adapter->pdev; int size; size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; tx_ring->tx_buffer_info = vzalloc(size); if (!tx_ring->tx_buffer_info) goto err; /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) goto err; tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; return 0; err: vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " "descriptor ring\n"); return -ENOMEM; } /** * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) { int i, err = 0; for (i = 0; i < adapter->num_tx_queues; i++) { err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]); if (!err) continue; hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); break; } return err; } /** * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: board private structure * @rx_ring: rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *rx_ring) { struct pci_dev *pdev = adapter->pdev; int size; size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; rx_ring->rx_buffer_info = vzalloc(size); if (!rx_ring->rx_buffer_info) goto alloc_failed; /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; goto alloc_failed; } rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; return 0; alloc_failed: return -ENOMEM; } /** * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) { int i, err = 0; for (i = 0; i < adapter->num_rx_queues; i++) { err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]); if (!err) continue; hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); break; } return err; } /** * ixgbevf_free_rx_resources - Free Rx Resources * @adapter: board private structure * @rx_ring: ring to clean the resources from * * Free all receive software resources **/ void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *rx_ring) { struct pci_dev *pdev = adapter->pdev; ixgbevf_clean_rx_ring(adapter, rx_ring); vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } /** * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues * @adapter: board private structure * * Free all receive software resources **/ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) if (adapter->rx_ring[i].desc) ixgbevf_free_rx_resources(adapter, &adapter->rx_ring[i]); } static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbevf_ring *rx_ring; unsigned int def_q = 0; unsigned int num_tcs = 0; unsigned int num_rx_queues = 1; int err, i; spin_lock_bh(&adapter->mbx_lock); /* fetch queue configuration from the PF */ err = ixgbevf_get_queues(hw, &num_tcs, &def_q); spin_unlock_bh(&adapter->mbx_lock); if (err) return err; if (num_tcs > 1) { /* update default Tx ring register index */ adapter->tx_ring[0].reg_idx = def_q; /* we need as many queues as traffic classes */ num_rx_queues = num_tcs; } /* nothing to do if we have the correct number of queues */ if (adapter->num_rx_queues == num_rx_queues) return 0; /* allocate new rings */ rx_ring = kcalloc(num_rx_queues, sizeof(struct ixgbevf_ring), GFP_KERNEL); if (!rx_ring) return -ENOMEM; /* setup ring fields */ for (i = 0; i < num_rx_queues; i++) { rx_ring[i].count = adapter->rx_ring_count; rx_ring[i].queue_index = i; rx_ring[i].reg_idx = i; rx_ring[i].dev = &adapter->pdev->dev; rx_ring[i].netdev = adapter->netdev; } /* free the existing ring and queues */ adapter->num_rx_queues = 0; kfree(adapter->rx_ring); /* move new rings into position on the adapter struct */ adapter->rx_ring = rx_ring; adapter->num_rx_queues = num_rx_queues; return 0; } /** * ixgbevf_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ static int ixgbevf_open(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; int err; /* A previous failure to open the device because of a lack of * available MSIX vector resources may have reset the number * of msix vectors variable to zero. The only way to recover * is to unload/reload the driver and hope that the system has * been able to recover some MSIX vector resources. */ if (!adapter->num_msix_vectors) return -ENOMEM; /* disallow open during test */ if (test_bit(__IXGBEVF_TESTING, &adapter->state)) return -EBUSY; if (hw->adapter_stopped) { ixgbevf_reset(adapter); /* if adapter is still stopped then PF isn't up and * the vf can't start. */ if (hw->adapter_stopped) { err = IXGBE_ERR_MBX; pr_err("Unable to start - perhaps the PF Driver isn't " "up yet\n"); goto err_setup_reset; } } ixgbevf_negotiate_api(adapter); /* setup queue reg_idx and Rx queue count */ err = ixgbevf_setup_queues(adapter); if (err) goto err_setup_queues; /* allocate transmit descriptors */ err = ixgbevf_setup_all_tx_resources(adapter); if (err) goto err_setup_tx; /* allocate receive descriptors */ err = ixgbevf_setup_all_rx_resources(adapter); if (err) goto err_setup_rx; ixgbevf_configure(adapter); /* * Map the Tx/Rx rings to the vectors we were allotted. * if request_irq will be called in this function map_rings * must be called *before* up_complete */ ixgbevf_map_rings_to_vectors(adapter); ixgbevf_up_complete(adapter); /* clear any pending interrupts, may auto mask */ IXGBE_READ_REG(hw, IXGBE_VTEICR); err = ixgbevf_request_irq(adapter); if (err) goto err_req_irq; ixgbevf_irq_enable(adapter); return 0; err_req_irq: ixgbevf_down(adapter); err_setup_rx: ixgbevf_free_all_rx_resources(adapter); err_setup_tx: ixgbevf_free_all_tx_resources(adapter); err_setup_queues: ixgbevf_reset(adapter); err_setup_reset: return err; } /** * ixgbevf_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ static int ixgbevf_close(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); ixgbevf_down(adapter); ixgbevf_free_irq(adapter); ixgbevf_free_all_tx_resources(adapter); ixgbevf_free_all_rx_resources(adapter); return 0; } static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, u32 vlan_macip_lens, u32 type_tucmd, u32 mss_l4len_idx) { struct ixgbe_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); i++; tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; /* set bits to identify this as an advanced context descriptor */ type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); context_desc->seqnum_seed = 0; context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); } static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) { u32 vlan_macip_lens, type_tucmd; u32 mss_l4len_idx, l4len; if (!skb_is_gso(skb)) return 0; if (skb_header_cloned(skb)) { int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (err) return err; } /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } /* compute header lengths */ l4len = tcp_hdrlen(skb); *hdr_len += l4len; *hdr_len = skb_transport_offset(skb) + l4len; /* mss_l4len_id: use 1 as index for TSO */ mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = skb_network_header_len(skb); vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); return 1; } static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags) { u32 vlan_macip_lens = 0; u32 mss_l4len_idx = 0; u32 type_tucmd = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 l4_hdr = 0; switch (skb->protocol) { case __constant_htons(ETH_P_IP): vlan_macip_lens |= skb_network_header_len(skb); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; l4_hdr = ip_hdr(skb)->protocol; break; case __constant_htons(ETH_P_IPV6): vlan_macip_lens |= skb_network_header_len(skb); l4_hdr = ipv6_hdr(skb)->nexthdr; break; default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, "partial checksum but proto=%x!\n", skb->protocol); } break; } switch (l4_hdr) { case IPPROTO_TCP: type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; mss_l4len_idx = tcp_hdrlen(skb) << IXGBE_ADVTXD_L4LEN_SHIFT; break; case IPPROTO_SCTP: type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; mss_l4len_idx = sizeof(struct sctphdr) << IXGBE_ADVTXD_L4LEN_SHIFT; break; case IPPROTO_UDP: mss_l4len_idx = sizeof(struct udphdr) << IXGBE_ADVTXD_L4LEN_SHIFT; break; default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, "partial checksum but l4 proto=%x!\n", l4_hdr); } break; } } /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); return (skb->ip_summed == CHECKSUM_PARTIAL); } static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags) { struct ixgbevf_tx_buffer *tx_buffer_info; unsigned int len; unsigned int total = skb->len; unsigned int offset = 0, size; int count = 0; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int f; int i; i = tx_ring->next_to_use; len = min(skb_headlen(skb), total); while (len) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); tx_buffer_info->length = size; tx_buffer_info->mapped_as_page = false; tx_buffer_info->dma = dma_map_single(tx_ring->dev, skb->data + offset, size, DMA_TO_DEVICE); if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) goto dma_error; len -= size; total -= size; offset += size; count++; i++; if (i == tx_ring->count) i = 0; } for (f = 0; f < nr_frags; f++) { const struct skb_frag_struct *frag; frag = &skb_shinfo(skb)->frags[f]; len = min((unsigned int)skb_frag_size(frag), total); offset = 0; while (len) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); tx_buffer_info->length = size; tx_buffer_info->dma = skb_frag_dma_map(tx_ring->dev, frag, offset, size, DMA_TO_DEVICE); if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) goto dma_error; tx_buffer_info->mapped_as_page = true; len -= size; total -= size; offset += size; count++; i++; if (i == tx_ring->count) i = 0; } if (total == 0) break; } if (i == 0) i = tx_ring->count - 1; else i = i - 1; tx_ring->tx_buffer_info[i].skb = skb; return count; dma_error: dev_err(tx_ring->dev, "TX DMA map failed\n"); /* clear timestamp and dma mappings for failed tx_buffer_info map */ tx_buffer_info->dma = 0; count--; /* clear timestamp and dma mappings for remaining portion of packet */ while (count >= 0) { count--; i--; if (i < 0) i += tx_ring->count; tx_buffer_info = &tx_ring->tx_buffer_info[i]; ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); } return count; } static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, int count, unsigned int first, u32 paylen, u8 hdr_len) { union ixgbe_adv_tx_desc *tx_desc = NULL; struct ixgbevf_tx_buffer *tx_buffer_info; u32 olinfo_status = 0, cmd_type_len = 0; unsigned int i; u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; if (tx_flags & IXGBE_TX_FLAGS_VLAN) cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; if (tx_flags & IXGBE_TX_FLAGS_CSUM) olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM; if (tx_flags & IXGBE_TX_FLAGS_TSO) { cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; /* use index 1 context for tso */ olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); if (tx_flags & IXGBE_TX_FLAGS_IPV4) olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; } /* * Check Context must be set if Tx switch is enabled, which it * always is for case where virtual functions are running */ olinfo_status |= IXGBE_ADVTXD_CC; olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); i = tx_ring->next_to_use; while (count--) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; tx_desc = IXGBEVF_TX_DESC(tx_ring, i); tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type_len | tx_buffer_info->length); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); i++; if (i == tx_ring->count) i = 0; } tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); tx_ring->tx_buffer_info[first].time_stamp = jiffies; /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); tx_ring->tx_buffer_info[first].next_to_watch = tx_desc; tx_ring->next_to_use = i; } static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) { struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); * but since that doesn't exist yet, just open code it. */ smp_mb(); /* We need to check again in a case another CPU has just * made room available. */ if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) return -EBUSY; /* A reprieve! - use start_queue because it doesn't call schedule */ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); ++adapter->restart_queue; return 0; } static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) { if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) return 0; return __ixgbevf_maybe_stop_tx(tx_ring, size); } static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_ring *tx_ring; unsigned int first; unsigned int tx_flags = 0; u8 hdr_len = 0; int r_idx = 0, tso; u16 count = TXD_USE_COUNT(skb_headlen(skb)); #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD unsigned short f; #endif u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); if (!dst_mac || is_link_local_ether_addr(dst_mac)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } tx_ring = &adapter->tx_ring[r_idx]; /* * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, * + 1 desc for context descriptor, * otherwise try next time */ #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); #else count += skb_shinfo(skb)->nr_frags; #endif if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { adapter->tx_busy++; return NETDEV_TX_BUSY; } if (vlan_tx_tag_present(skb)) { tx_flags |= vlan_tx_tag_get(skb); tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_VLAN; } first = tx_ring->next_to_use; if (skb->protocol == htons(ETH_P_IP)) tx_flags |= IXGBE_TX_FLAGS_IPV4; tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len); if (tso < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (tso) tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags)) tx_flags |= IXGBE_TX_FLAGS_CSUM; ixgbevf_tx_queue(tx_ring, tx_flags, ixgbevf_tx_map(tx_ring, skb, tx_flags), first, skb->len, hdr_len); writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail); ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); return NETDEV_TX_OK; } /** * ixgbevf_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure **/ static int ixgbevf_set_mac(struct net_device *netdev, void *p) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); spin_lock_bh(&adapter->mbx_lock); hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); spin_unlock_bh(&adapter->mbx_lock); return 0; } /** * ixgbevf_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; switch (adapter->hw.api_version) { case ixgbe_mbox_api_11: max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; break; default: if (adapter->hw.mac.type == ixgbe_mac_X540_vf) max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; break; } /* MTU < 68 is an error and causes problems on some kernels */ if ((new_mtu < 68) || (max_frame > max_possible_frame)) return -EINVAL; hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; if (netif_running(netdev)) ixgbevf_reinit_locked(adapter); return 0; } static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); #ifdef CONFIG_PM int retval = 0; #endif netif_device_detach(netdev); if (netif_running(netdev)) { rtnl_lock(); ixgbevf_down(adapter); ixgbevf_free_irq(adapter); ixgbevf_free_all_tx_resources(adapter); ixgbevf_free_all_rx_resources(adapter); rtnl_unlock(); } ixgbevf_clear_interrupt_scheme(adapter); #ifdef CONFIG_PM retval = pci_save_state(pdev); if (retval) return retval; #endif pci_disable_device(pdev); return 0; } #ifdef CONFIG_PM static int ixgbevf_resume(struct pci_dev *pdev) { struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; u32 err; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* * pci_restore_state clears dev->state_saved so call * pci_save_state to restore it. */ pci_save_state(pdev); err = pci_enable_device_mem(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); rtnl_lock(); err = ixgbevf_init_interrupt_scheme(adapter); rtnl_unlock(); if (err) { dev_err(&pdev->dev, "Cannot initialize interrupts\n"); return err; } ixgbevf_reset(adapter); if (netif_running(netdev)) { err = ixgbevf_open(netdev); if (err) return err; } netif_device_attach(netdev); return err; } #endif /* CONFIG_PM */ static void ixgbevf_shutdown(struct pci_dev *pdev) { ixgbevf_suspend(pdev, PMSG_SUSPEND); } static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); unsigned int start; u64 bytes, packets; const struct ixgbevf_ring *ring; int i; ixgbevf_update_stats(adapter); stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; for (i = 0; i < adapter->num_rx_queues; i++) { ring = &adapter->rx_ring[i]; do { start = u64_stats_fetch_begin_bh(&ring->syncp); bytes = ring->total_bytes; packets = ring->total_packets; } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); stats->rx_bytes += bytes; stats->rx_packets += packets; } for (i = 0; i < adapter->num_tx_queues; i++) { ring = &adapter->tx_ring[i]; do { start = u64_stats_fetch_begin_bh(&ring->syncp); bytes = ring->total_bytes; packets = ring->total_packets; } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); stats->tx_bytes += bytes; stats->tx_packets += packets; } return stats; } static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_open = ixgbevf_open, .ndo_stop = ixgbevf_close, .ndo_start_xmit = ixgbevf_xmit_frame, .ndo_set_rx_mode = ixgbevf_set_rx_mode, .ndo_get_stats64 = ixgbevf_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbevf_set_mac, .ndo_change_mtu = ixgbevf_change_mtu, .ndo_tx_timeout = ixgbevf_tx_timeout, .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, }; static void ixgbevf_assign_netdev_ops(struct net_device *dev) { dev->netdev_ops = &ixgbevf_netdev_ops; ixgbevf_set_ethtool_ops(dev); dev->watchdog_timeo = 5 * HZ; } /** * ixgbevf_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in ixgbevf_pci_tbl * * Returns 0 on success, negative on failure * * ixgbevf_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct ixgbevf_adapter *adapter = NULL; struct ixgbe_hw *hw = NULL; const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; static int cards_found; int err, pci_using_dac; err = pci_enable_device(pdev); if (err) return err; if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA " "configuration, aborting\n"); goto err_dma; } } pci_using_dac = 0; } err = pci_request_regions(pdev, ixgbevf_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); goto err_pci_reg; } pci_set_master(pdev); netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), MAX_TX_QUEUES); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; } SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; hw = &adapter->hw; hw->back = adapter; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); /* * call save state here in standalone driver because it relies on * adapter struct to exist, and needs to call netdev_priv */ pci_save_state(pdev); hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!hw->hw_addr) { err = -EIO; goto err_ioremap; } ixgbevf_assign_netdev_ops(netdev); adapter->bd_number = cards_found; /* Setup hw api */ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); hw->mac.type = ii->mac; memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, sizeof(struct ixgbe_mbx_operations)); /* setup the private structure */ err = ixgbevf_sw_init(adapter); if (err) goto err_sw_init; /* The HW MAC address was set and/or determined in sw_init */ if (!is_valid_ether_addr(netdev->dev_addr)) { pr_err("invalid MAC address\n"); err = -EIO; goto err_sw_init; } netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM; netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO6; netdev->vlan_features |= NETIF_F_IP_CSUM; netdev->vlan_features |= NETIF_F_IPV6_CSUM; netdev->vlan_features |= NETIF_F_SG; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; netdev->priv_flags |= IFF_UNICAST_FLT; init_timer(&adapter->watchdog_timer); adapter->watchdog_timer.function = ixgbevf_watchdog; adapter->watchdog_timer.data = (unsigned long)adapter; INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); err = ixgbevf_init_interrupt_scheme(adapter); if (err) goto err_sw_init; strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); if (err) goto err_register; netif_carrier_off(netdev); ixgbevf_init_last_counter_stats(adapter); /* print the MAC address */ hw_dbg(hw, "%pM\n", netdev->dev_addr); hw_dbg(hw, "MAC: %d\n", hw->mac.type); hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); cards_found++; return 0; err_register: ixgbevf_clear_interrupt_scheme(adapter); err_sw_init: ixgbevf_reset_interrupt_capability(adapter); iounmap(hw->hw_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: pci_release_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); return err; } /** * ixgbevf_remove - Device Removal Routine * @pdev: PCI device information struct * * ixgbevf_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ static void ixgbevf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); set_bit(__IXGBEVF_DOWN, &adapter->state); del_timer_sync(&adapter->watchdog_timer); cancel_work_sync(&adapter->reset_task); cancel_work_sync(&adapter->watchdog_task); if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); ixgbevf_clear_interrupt_scheme(adapter); ixgbevf_reset_interrupt_capability(adapter); iounmap(adapter->hw.hw_addr); pci_release_regions(pdev); hw_dbg(&adapter->hw, "Remove complete\n"); kfree(adapter->tx_ring); kfree(adapter->rx_ring); free_netdev(netdev); pci_disable_device(pdev); } /** * ixgbevf_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state * * This function is called after a PCI bus error affecting * this device has been detected. */ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); netif_device_detach(netdev); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; if (netif_running(netdev)) ixgbevf_down(adapter); pci_disable_device(pdev); /* Request a slot slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /** * ixgbevf_io_slot_reset - called after the pci bus has been reset. * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation * resembles the first-half of the ixgbevf_resume routine. */ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); if (pci_enable_device_mem(pdev)) { dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); ixgbevf_reset(adapter); return PCI_ERS_RESULT_RECOVERED; } /** * ixgbevf_io_resume - called when traffic can start flowing again. * @pdev: Pointer to PCI device * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the * second-half of the ixgbevf_resume routine. */ static void ixgbevf_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) ixgbevf_up(adapter); netif_device_attach(netdev); } /* PCI Error Recovery (ERS) */ static const struct pci_error_handlers ixgbevf_err_handler = { .error_detected = ixgbevf_io_error_detected, .slot_reset = ixgbevf_io_slot_reset, .resume = ixgbevf_io_resume, }; static struct pci_driver ixgbevf_driver = { .name = ixgbevf_driver_name, .id_table = ixgbevf_pci_tbl, .probe = ixgbevf_probe, .remove = ixgbevf_remove, #ifdef CONFIG_PM /* Power Management Hooks */ .suspend = ixgbevf_suspend, .resume = ixgbevf_resume, #endif .shutdown = ixgbevf_shutdown, .err_handler = &ixgbevf_err_handler }; /** * ixgbevf_init_module - Driver Registration Routine * * ixgbevf_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ static int __init ixgbevf_init_module(void) { int ret; pr_info("%s - version %s\n", ixgbevf_driver_string, ixgbevf_driver_version); pr_info("%s\n", ixgbevf_copyright); ret = pci_register_driver(&ixgbevf_driver); return ret; } module_init(ixgbevf_init_module); /** * ixgbevf_exit_module - Driver Exit Cleanup Routine * * ixgbevf_exit_module is called just before the driver is removed * from memory. **/ static void __exit ixgbevf_exit_module(void) { pci_unregister_driver(&ixgbevf_driver); } #ifdef DEBUG /** * ixgbevf_get_hw_dev_name - return device name string * used by hardware layer to print debugging information **/ char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) { struct ixgbevf_adapter *adapter = hw->back; return adapter->netdev->name; } #endif module_exit(ixgbevf_exit_module); /* ixgbevf_main.c */
gpl-2.0
piccolo-dev/aquaris-M5
drivers/net/wireless/rtlwifi/regd.c
2396
11842
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "wifi.h" #include "regd.h" static struct country_code_to_enum_rd allCountries[] = { {COUNTRY_CODE_FCC, "US"}, {COUNTRY_CODE_IC, "US"}, {COUNTRY_CODE_ETSI, "EC"}, {COUNTRY_CODE_SPAIN, "EC"}, {COUNTRY_CODE_FRANCE, "EC"}, {COUNTRY_CODE_MKK, "JP"}, {COUNTRY_CODE_MKK1, "JP"}, {COUNTRY_CODE_ISRAEL, "EC"}, {COUNTRY_CODE_TELEC, "JP"}, {COUNTRY_CODE_MIC, "JP"}, {COUNTRY_CODE_GLOBAL_DOMAIN, "JP"}, {COUNTRY_CODE_WORLD_WIDE_13, "EC"}, {COUNTRY_CODE_TELEC_NETGEAR, "EC"}, }; /* *Only these channels all allow active *scan on all world regulatory domains */ #define RTL819x_2GHZ_CH01_11 \ REG_RULE(2412-10, 2462+10, 40, 0, 20, 0) /* *We enable active scan on these a case *by case basis by regulatory domain */ #define RTL819x_2GHZ_CH12_13 \ REG_RULE(2467-10, 2472+10, 40, 0, 20,\ NL80211_RRF_PASSIVE_SCAN) #define RTL819x_2GHZ_CH14 \ REG_RULE(2484-10, 2484+10, 40, 0, 20, \ NL80211_RRF_PASSIVE_SCAN | \ NL80211_RRF_NO_OFDM) /* 5G chan 36 - chan 64*/ #define RTL819x_5GHZ_5150_5350 \ REG_RULE(5150-10, 5350+10, 40, 0, 30, \ NL80211_RRF_PASSIVE_SCAN | \ NL80211_RRF_NO_IBSS) /* 5G chan 100 - chan 165*/ #define RTL819x_5GHZ_5470_5850 \ REG_RULE(5470-10, 5850+10, 40, 0, 30, \ NL80211_RRF_PASSIVE_SCAN | \ NL80211_RRF_NO_IBSS) /* 5G chan 149 - chan 165*/ #define RTL819x_5GHZ_5725_5850 \ REG_RULE(5725-10, 5850+10, 40, 0, 30, \ NL80211_RRF_PASSIVE_SCAN | \ NL80211_RRF_NO_IBSS) #define RTL819x_5GHZ_ALL \ (RTL819x_5GHZ_5150_5350, RTL819x_5GHZ_5470_5850) static const struct ieee80211_regdomain rtl_regdom_11 = { .n_reg_rules = 1, .alpha2 = "99", .reg_rules = { RTL819x_2GHZ_CH01_11, } }; static const struct ieee80211_regdomain rtl_regdom_12_13 = { .n_reg_rules = 2, .alpha2 = "99", .reg_rules = { RTL819x_2GHZ_CH01_11, RTL819x_2GHZ_CH12_13, } }; static const struct ieee80211_regdomain rtl_regdom_no_midband = { .n_reg_rules = 3, .alpha2 = "99", .reg_rules = { RTL819x_2GHZ_CH01_11, RTL819x_5GHZ_5150_5350, RTL819x_5GHZ_5725_5850, } }; static const struct ieee80211_regdomain rtl_regdom_60_64 = { .n_reg_rules = 3, .alpha2 = "99", .reg_rules = { RTL819x_2GHZ_CH01_11, RTL819x_2GHZ_CH12_13, RTL819x_5GHZ_5725_5850, } }; static const struct ieee80211_regdomain rtl_regdom_14_60_64 = { .n_reg_rules = 4, .alpha2 = "99", .reg_rules = { RTL819x_2GHZ_CH01_11, RTL819x_2GHZ_CH12_13, RTL819x_2GHZ_CH14, RTL819x_5GHZ_5725_5850, } }; static const struct ieee80211_regdomain rtl_regdom_14 = { .n_reg_rules = 3, .alpha2 = "99", .reg_rules = { RTL819x_2GHZ_CH01_11, RTL819x_2GHZ_CH12_13, RTL819x_2GHZ_CH14, } }; static bool _rtl_is_radar_freq(u16 center_freq) { return (center_freq >= 5260 && center_freq <= 5700); } static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { enum ieee80211_band band; struct ieee80211_supported_band *sband; const struct ieee80211_reg_rule *reg_rule; struct ieee80211_channel *ch; unsigned int i; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (!wiphy->bands[band]) continue; sband = wiphy->bands[band]; for (i = 0; i < sband->n_channels; i++) { ch = &sband->channels[i]; if (_rtl_is_radar_freq(ch->center_freq) || (ch->flags & IEEE80211_CHAN_RADAR)) continue; if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) { reg_rule = freq_reg_info(wiphy, ch->center_freq); if (IS_ERR(reg_rule)) continue; /* *If 11d had a rule for this channel ensure *we enable adhoc/beaconing if it allows us to *use it. Note that we would have disabled it *by applying our static world regdomain by *default during init, prior to calling our *regulatory_hint(). */ if (!(reg_rule->flags & NL80211_RRF_NO_IBSS)) ch->flags &= ~IEEE80211_CHAN_NO_IBSS; if (!(reg_rule-> flags & NL80211_RRF_PASSIVE_SCAN)) ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; } else { if (ch->beacon_found) ch->flags &= ~(IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_PASSIVE_SCAN); } } } } /* Allows active scan scan on Ch 12 and 13 */ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; const struct ieee80211_reg_rule *reg_rule; if (!wiphy->bands[IEEE80211_BAND_2GHZ]) return; sband = wiphy->bands[IEEE80211_BAND_2GHZ]; /* *If no country IE has been received always enable active scan *on these channels. This is only done for specific regulatory SKUs */ if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) { ch = &sband->channels[11]; /* CH 12 */ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; ch = &sband->channels[12]; /* CH 13 */ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; return; } /* *If a country IE has been received check its rule for this *channel first before enabling active scan. The passive scan *would have been enforced by the initial processing of our *custom regulatory domain. */ ch = &sband->channels[11]; /* CH 12 */ reg_rule = freq_reg_info(wiphy, ch->center_freq); if (!IS_ERR(reg_rule)) { if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN)) if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; } ch = &sband->channels[12]; /* CH 13 */ reg_rule = freq_reg_info(wiphy, ch->center_freq); if (!IS_ERR(reg_rule)) { if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN)) if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; } } /* *Always apply Radar/DFS rules on *freq range 5260 MHz - 5700 MHz */ static void _rtl_reg_apply_radar_flags(struct wiphy *wiphy) { struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; unsigned int i; if (!wiphy->bands[IEEE80211_BAND_5GHZ]) return; sband = wiphy->bands[IEEE80211_BAND_5GHZ]; for (i = 0; i < sband->n_channels; i++) { ch = &sband->channels[i]; if (!_rtl_is_radar_freq(ch->center_freq)) continue; /* *We always enable radar detection/DFS on this *frequency range. Additionally we also apply on *this frequency range: *- If STA mode does not yet have DFS supports disable * active scanning *- If adhoc mode does not support DFS yet then disable * adhoc in the frequency. *- If AP mode does not yet support radar detection/DFS *do not allow AP mode */ if (!(ch->flags & IEEE80211_CHAN_DISABLED)) ch->flags |= IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_PASSIVE_SCAN; } } static void _rtl_reg_apply_world_flags(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, struct rtl_regulatory *reg) { _rtl_reg_apply_beaconing_flags(wiphy, initiator); _rtl_reg_apply_active_scan_flags(wiphy, initiator); return; } static void _rtl_reg_notifier_apply(struct wiphy *wiphy, struct regulatory_request *request, struct rtl_regulatory *reg) { /* We always apply this */ _rtl_reg_apply_radar_flags(wiphy); switch (request->initiator) { case NL80211_REGDOM_SET_BY_DRIVER: case NL80211_REGDOM_SET_BY_CORE: case NL80211_REGDOM_SET_BY_USER: break; case NL80211_REGDOM_SET_BY_COUNTRY_IE: _rtl_reg_apply_world_flags(wiphy, request->initiator, reg); break; } } static const struct ieee80211_regdomain *_rtl_regdomain_select( struct rtl_regulatory *reg) { switch (reg->country_code) { case COUNTRY_CODE_FCC: return &rtl_regdom_no_midband; case COUNTRY_CODE_IC: return &rtl_regdom_11; case COUNTRY_CODE_ETSI: case COUNTRY_CODE_TELEC_NETGEAR: return &rtl_regdom_60_64; case COUNTRY_CODE_SPAIN: case COUNTRY_CODE_FRANCE: case COUNTRY_CODE_ISRAEL: case COUNTRY_CODE_WORLD_WIDE_13: return &rtl_regdom_12_13; case COUNTRY_CODE_MKK: case COUNTRY_CODE_MKK1: case COUNTRY_CODE_TELEC: case COUNTRY_CODE_MIC: return &rtl_regdom_14_60_64; case COUNTRY_CODE_GLOBAL_DOMAIN: return &rtl_regdom_14; default: return &rtl_regdom_no_midband; } } static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg, struct wiphy *wiphy, void (*reg_notifier) (struct wiphy *wiphy, struct regulatory_request * request)) { const struct ieee80211_regdomain *regd; wiphy->reg_notifier = reg_notifier; wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; wiphy->flags &= ~WIPHY_FLAG_STRICT_REGULATORY; wiphy->flags &= ~WIPHY_FLAG_DISABLE_BEACON_HINTS; regd = _rtl_regdomain_select(reg); wiphy_apply_custom_regulatory(wiphy, regd); _rtl_reg_apply_radar_flags(wiphy); _rtl_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg); return 0; } static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode) { int i; for (i = 0; i < ARRAY_SIZE(allCountries); i++) { if (allCountries[i].countrycode == countrycode) return &allCountries[i]; } return NULL; } int rtl_regd_init(struct ieee80211_hw *hw, void (*reg_notifier) (struct wiphy *wiphy, struct regulatory_request *request)) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct wiphy *wiphy = hw->wiphy; struct country_code_to_enum_rd *country = NULL; if (wiphy == NULL || &rtlpriv->regd == NULL) return -EINVAL; /* init country_code from efuse channel plan */ rtlpriv->regd.country_code = rtlpriv->efuse.channel_plan; RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE, "rtl: EEPROM regdomain: 0x%0x\n", rtlpriv->regd.country_code); if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) { RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG, "rtl: EEPROM indicates invalid contry code, world wide 13 should be used\n"); rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13; } country = _rtl_regd_find_country(rtlpriv->regd.country_code); if (country) { rtlpriv->regd.alpha2[0] = country->iso_name[0]; rtlpriv->regd.alpha2[1] = country->iso_name[1]; } else { rtlpriv->regd.alpha2[0] = '0'; rtlpriv->regd.alpha2[1] = '0'; } RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE, "rtl: Country alpha2 being used: %c%c\n", rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]); _rtl_regd_init_wiphy(&rtlpriv->regd, wiphy, reg_notifier); return 0; } void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct rtl_priv *rtlpriv = rtl_priv(hw); RT_TRACE(rtlpriv, COMP_REGD, DBG_LOUD, "\n"); _rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd); }
gpl-2.0
halcyonaoh/blackbox_sprout
drivers/video/tcx.c
2396
12326
/* tcx.c: TCX frame buffer driver * * Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net) * Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz) * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * * Driver layout based loosely on tgafb.c, see that file for credits. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/fb.h> #include <linux/mm.h> #include <linux/of_device.h> #include <asm/io.h> #include <asm/fbio.h> #include "sbuslib.h" /* * Local functions. */ static int tcx_setcolreg(unsigned, unsigned, unsigned, unsigned, unsigned, struct fb_info *); static int tcx_blank(int, struct fb_info *); static int tcx_mmap(struct fb_info *, struct vm_area_struct *); static int tcx_ioctl(struct fb_info *, unsigned int, unsigned long); static int tcx_pan_display(struct fb_var_screeninfo *, struct fb_info *); /* * Frame buffer operations */ static struct fb_ops tcx_ops = { .owner = THIS_MODULE, .fb_setcolreg = tcx_setcolreg, .fb_blank = tcx_blank, .fb_pan_display = tcx_pan_display, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_mmap = tcx_mmap, .fb_ioctl = tcx_ioctl, #ifdef CONFIG_COMPAT .fb_compat_ioctl = sbusfb_compat_ioctl, #endif }; /* THC definitions */ #define TCX_THC_MISC_REV_SHIFT 16 #define TCX_THC_MISC_REV_MASK 15 #define TCX_THC_MISC_VSYNC_DIS (1 << 25) #define TCX_THC_MISC_HSYNC_DIS (1 << 24) #define TCX_THC_MISC_RESET (1 << 12) #define TCX_THC_MISC_VIDEO (1 << 10) #define TCX_THC_MISC_SYNC (1 << 9) #define TCX_THC_MISC_VSYNC (1 << 8) #define TCX_THC_MISC_SYNC_ENAB (1 << 7) #define TCX_THC_MISC_CURS_RES (1 << 6) #define TCX_THC_MISC_INT_ENAB (1 << 5) #define TCX_THC_MISC_INT (1 << 4) #define TCX_THC_MISC_INIT 0x9f #define TCX_THC_REV_REV_SHIFT 20 #define TCX_THC_REV_REV_MASK 15 #define TCX_THC_REV_MINREV_SHIFT 28 #define TCX_THC_REV_MINREV_MASK 15 /* The contents are unknown */ struct tcx_tec { u32 tec_matrix; u32 tec_clip; u32 tec_vdc; }; struct tcx_thc { u32 thc_rev; u32 thc_pad0[511]; u32 thc_hs; /* hsync timing */ u32 thc_hsdvs; u32 thc_hd; u32 thc_vs; /* vsync timing */ u32 thc_vd; u32 thc_refresh; u32 thc_misc; u32 thc_pad1[56]; u32 thc_cursxy; /* cursor x,y position (16 bits each) */ u32 thc_cursmask[32]; /* cursor mask bits */ u32 thc_cursbits[32]; /* what to show where mask enabled */ }; struct bt_regs { u32 addr; u32 color_map; u32 control; u32 cursor; }; #define TCX_MMAP_ENTRIES 14 struct tcx_par { spinlock_t lock; struct bt_regs __iomem *bt; struct tcx_thc __iomem *thc; struct tcx_tec __iomem *tec; u32 __iomem *cplane; u32 flags; #define TCX_FLAG_BLANKED 0x00000001 unsigned long which_io; struct sbus_mmap_map mmap_map[TCX_MMAP_ENTRIES]; int lowdepth; }; /* Reset control plane so that WID is 8-bit plane. */ static void __tcx_set_control_plane(struct fb_info *info) { struct tcx_par *par = info->par; u32 __iomem *p, *pend; if (par->lowdepth) return; p = par->cplane; if (p == NULL) return; for (pend = p + info->fix.smem_len; p < pend; p++) { u32 tmp = sbus_readl(p); tmp &= 0xffffff; sbus_writel(tmp, p); } } static void tcx_reset(struct fb_info *info) { struct tcx_par *par = (struct tcx_par *) info->par; unsigned long flags; spin_lock_irqsave(&par->lock, flags); __tcx_set_control_plane(info); spin_unlock_irqrestore(&par->lock, flags); } static int tcx_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { tcx_reset(info); return 0; } /** * tcx_setcolreg - Optional function. Sets a color register. * @regno: boolean, 0 copy local, 1 get_user() function * @red: frame buffer colormap structure * @green: The green value which can be up to 16 bits wide * @blue: The blue value which can be up to 16 bits wide. * @transp: If supported the alpha value which can be up to 16 bits wide. * @info: frame buffer info structure */ static int tcx_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct tcx_par *par = (struct tcx_par *) info->par; struct bt_regs __iomem *bt = par->bt; unsigned long flags; if (regno >= 256) return 1; red >>= 8; green >>= 8; blue >>= 8; spin_lock_irqsave(&par->lock, flags); sbus_writel(regno << 24, &bt->addr); sbus_writel(red << 24, &bt->color_map); sbus_writel(green << 24, &bt->color_map); sbus_writel(blue << 24, &bt->color_map); spin_unlock_irqrestore(&par->lock, flags); return 0; } /** * tcx_blank - Optional function. Blanks the display. * @blank_mode: the blank mode we want. * @info: frame buffer structure that represents a single frame buffer */ static int tcx_blank(int blank, struct fb_info *info) { struct tcx_par *par = (struct tcx_par *) info->par; struct tcx_thc __iomem *thc = par->thc; unsigned long flags; u32 val; spin_lock_irqsave(&par->lock, flags); val = sbus_readl(&thc->thc_misc); switch (blank) { case FB_BLANK_UNBLANK: /* Unblanking */ val &= ~(TCX_THC_MISC_VSYNC_DIS | TCX_THC_MISC_HSYNC_DIS); val |= TCX_THC_MISC_VIDEO; par->flags &= ~TCX_FLAG_BLANKED; break; case FB_BLANK_NORMAL: /* Normal blanking */ val &= ~TCX_THC_MISC_VIDEO; par->flags |= TCX_FLAG_BLANKED; break; case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */ val |= TCX_THC_MISC_VSYNC_DIS; break; case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */ val |= TCX_THC_MISC_HSYNC_DIS; break; case FB_BLANK_POWERDOWN: /* Poweroff */ break; }; sbus_writel(val, &thc->thc_misc); spin_unlock_irqrestore(&par->lock, flags); return 0; } static struct sbus_mmap_map __tcx_mmap_map[TCX_MMAP_ENTRIES] = { { .voff = TCX_RAM8BIT, .size = SBUS_MMAP_FBSIZE(1) }, { .voff = TCX_RAM24BIT, .size = SBUS_MMAP_FBSIZE(4) }, { .voff = TCX_UNK3, .size = SBUS_MMAP_FBSIZE(8) }, { .voff = TCX_UNK4, .size = SBUS_MMAP_FBSIZE(8) }, { .voff = TCX_CONTROLPLANE, .size = SBUS_MMAP_FBSIZE(4) }, { .voff = TCX_UNK6, .size = SBUS_MMAP_FBSIZE(8) }, { .voff = TCX_UNK7, .size = SBUS_MMAP_FBSIZE(8) }, { .voff = TCX_TEC, .size = PAGE_SIZE }, { .voff = TCX_BTREGS, .size = PAGE_SIZE }, { .voff = TCX_THC, .size = PAGE_SIZE }, { .voff = TCX_DHC, .size = PAGE_SIZE }, { .voff = TCX_ALT, .size = PAGE_SIZE }, { .voff = TCX_UNK2, .size = 0x20000 }, { .size = 0 } }; static int tcx_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct tcx_par *par = (struct tcx_par *)info->par; return sbusfb_mmap_helper(par->mmap_map, info->fix.smem_start, info->fix.smem_len, par->which_io, vma); } static int tcx_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct tcx_par *par = (struct tcx_par *) info->par; return sbusfb_ioctl_helper(cmd, arg, info, FBTYPE_TCXCOLOR, (par->lowdepth ? 8 : 24), info->fix.smem_len); } /* * Initialisation */ static void tcx_init_fix(struct fb_info *info, int linebytes) { struct tcx_par *par = (struct tcx_par *)info->par; const char *tcx_name; if (par->lowdepth) tcx_name = "TCX8"; else tcx_name = "TCX24"; strlcpy(info->fix.id, tcx_name, sizeof(info->fix.id)); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; info->fix.line_length = linebytes; info->fix.accel = FB_ACCEL_SUN_TCX; } static void tcx_unmap_regs(struct platform_device *op, struct fb_info *info, struct tcx_par *par) { if (par->tec) of_iounmap(&op->resource[7], par->tec, sizeof(struct tcx_tec)); if (par->thc) of_iounmap(&op->resource[9], par->thc, sizeof(struct tcx_thc)); if (par->bt) of_iounmap(&op->resource[8], par->bt, sizeof(struct bt_regs)); if (par->cplane) of_iounmap(&op->resource[4], par->cplane, info->fix.smem_len * sizeof(u32)); if (info->screen_base) of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len); } static int tcx_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; struct fb_info *info; struct tcx_par *par; int linebytes, i, err; info = framebuffer_alloc(sizeof(struct tcx_par), &op->dev); err = -ENOMEM; if (!info) goto out_err; par = info->par; spin_lock_init(&par->lock); par->lowdepth = (of_find_property(dp, "tcx-8-bit", NULL) != NULL); sbusfb_fill_var(&info->var, dp, 8); info->var.red.length = 8; info->var.green.length = 8; info->var.blue.length = 8; linebytes = of_getintprop_default(dp, "linebytes", info->var.xres); info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres); par->tec = of_ioremap(&op->resource[7], 0, sizeof(struct tcx_tec), "tcx tec"); par->thc = of_ioremap(&op->resource[9], 0, sizeof(struct tcx_thc), "tcx thc"); par->bt = of_ioremap(&op->resource[8], 0, sizeof(struct bt_regs), "tcx dac"); info->screen_base = of_ioremap(&op->resource[0], 0, info->fix.smem_len, "tcx ram"); if (!par->tec || !par->thc || !par->bt || !info->screen_base) goto out_unmap_regs; memcpy(&par->mmap_map, &__tcx_mmap_map, sizeof(par->mmap_map)); if (!par->lowdepth) { par->cplane = of_ioremap(&op->resource[4], 0, info->fix.smem_len * sizeof(u32), "tcx cplane"); if (!par->cplane) goto out_unmap_regs; } else { par->mmap_map[1].size = SBUS_MMAP_EMPTY; par->mmap_map[4].size = SBUS_MMAP_EMPTY; par->mmap_map[5].size = SBUS_MMAP_EMPTY; par->mmap_map[6].size = SBUS_MMAP_EMPTY; } info->fix.smem_start = op->resource[0].start; par->which_io = op->resource[0].flags & IORESOURCE_BITS; for (i = 0; i < TCX_MMAP_ENTRIES; i++) { int j; switch (i) { case 10: j = 12; break; case 11: case 12: j = i - 1; break; default: j = i; break; }; par->mmap_map[i].poff = op->resource[j].start; } info->flags = FBINFO_DEFAULT; info->fbops = &tcx_ops; /* Initialize brooktree DAC. */ sbus_writel(0x04 << 24, &par->bt->addr); /* color planes */ sbus_writel(0xff << 24, &par->bt->control); sbus_writel(0x05 << 24, &par->bt->addr); sbus_writel(0x00 << 24, &par->bt->control); sbus_writel(0x06 << 24, &par->bt->addr); /* overlay plane */ sbus_writel(0x73 << 24, &par->bt->control); sbus_writel(0x07 << 24, &par->bt->addr); sbus_writel(0x00 << 24, &par->bt->control); tcx_reset(info); tcx_blank(FB_BLANK_UNBLANK, info); if (fb_alloc_cmap(&info->cmap, 256, 0)) goto out_unmap_regs; fb_set_cmap(&info->cmap, info); tcx_init_fix(info, linebytes); err = register_framebuffer(info); if (err < 0) goto out_dealloc_cmap; dev_set_drvdata(&op->dev, info); printk(KERN_INFO "%s: TCX at %lx:%lx, %s\n", dp->full_name, par->which_io, info->fix.smem_start, par->lowdepth ? "8-bit only" : "24-bit depth"); return 0; out_dealloc_cmap: fb_dealloc_cmap(&info->cmap); out_unmap_regs: tcx_unmap_regs(op, info, par); framebuffer_release(info); out_err: return err; } static int tcx_remove(struct platform_device *op) { struct fb_info *info = dev_get_drvdata(&op->dev); struct tcx_par *par = info->par; unregister_framebuffer(info); fb_dealloc_cmap(&info->cmap); tcx_unmap_regs(op, info, par); framebuffer_release(info); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id tcx_match[] = { { .name = "SUNW,tcx", }, {}, }; MODULE_DEVICE_TABLE(of, tcx_match); static struct platform_driver tcx_driver = { .driver = { .name = "tcx", .owner = THIS_MODULE, .of_match_table = tcx_match, }, .probe = tcx_probe, .remove = tcx_remove, }; static int __init tcx_init(void) { if (fb_get_options("tcxfb", NULL)) return -ENODEV; return platform_driver_register(&tcx_driver); } static void __exit tcx_exit(void) { platform_driver_unregister(&tcx_driver); } module_init(tcx_init); module_exit(tcx_exit); MODULE_DESCRIPTION("framebuffer driver for TCX chipsets"); MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
gpl-2.0
deano0714/EK02-CM-Kernel
drivers/net/3c505.c
3932
48573
/* * Linux Ethernet device driver for the 3Com Etherlink Plus (3C505) * By Craig Southeren, Juha Laiho and Philip Blundell * * 3c505.c This module implements an interface to the 3Com * Etherlink Plus (3c505) Ethernet card. Linux device * driver interface reverse engineered from the Linux 3C509 * device drivers. Some 3C505 information gleaned from * the Crynwr packet driver. Still this driver would not * be here without 3C505 technical reference provided by * 3Com. * * $Id: 3c505.c,v 1.10 1996/04/16 13:06:27 phil Exp $ * * Authors: Linux 3c505 device driver by * Craig Southeren, <craigs@ineluki.apana.org.au> * Final debugging by * Andrew Tridgell, <tridge@nimbus.anu.edu.au> * Auto irq/address, tuning, cleanup and v1.1.4+ kernel mods by * Juha Laiho, <jlaiho@ichaos.nullnet.fi> * Linux 3C509 driver by * Donald Becker, <becker@super.org> * (Now at <becker@scyld.com>) * Crynwr packet driver by * Krishnan Gopalan and Gregg Stefancik, * Clemson University Engineering Computer Operations. * Portions of the code have been adapted from the 3c505 * driver for NCSA Telnet by Bruce Orchard and later * modified by Warren Van Houten and krus@diku.dk. * 3C505 technical information provided by * Terry Murphy, of 3Com Network Adapter Division * Linux 1.3.0 changes by * Alan Cox <Alan.Cox@linux.org> * More debugging, DMA support, currently maintained by * Philip Blundell <philb@gnu.org> * Multicard/soft configurable dma channel/rev 2 hardware support * by Christopher Collins <ccollins@pcug.org.au> * Ethtool support (jgarzik), 11/17/2001 */ #define DRV_NAME "3c505" #define DRV_VERSION "1.10a" /* Theory of operation: * * The 3c505 is quite an intelligent board. All communication with it is done * by means of Primary Command Blocks (PCBs); these are transferred using PIO * through the command register. The card has 256k of on-board RAM, which is * used to buffer received packets. It might seem at first that more buffers * are better, but in fact this isn't true. From my tests, it seems that * more than about 10 buffers are unnecessary, and there is a noticeable * performance hit in having more active on the card. So the majority of the * card's memory isn't, in fact, used. Sadly, the card only has one transmit * buffer and, short of loading our own firmware into it (which is what some * drivers resort to) there's nothing we can do about this. * * We keep up to 4 "receive packet" commands active on the board at a time. * When a packet comes in, so long as there is a receive command active, the * board will send us a "packet received" PCB and then add the data for that * packet to the DMA queue. If a DMA transfer is not already in progress, we * set one up to start uploading the data. We have to maintain a list of * backlogged receive packets, because the card may decide to tell us about * a newly-arrived packet at any time, and we may not be able to start a DMA * transfer immediately (ie one may already be going on). We can't NAK the * PCB, because then it would throw the packet away. * * Trying to send a PCB to the card at the wrong moment seems to have bad * effects. If we send it a transmit PCB while a receive DMA is happening, * it will just NAK the PCB and so we will have wasted our time. Worse, it * sometimes seems to interrupt the transfer. The majority of the low-level * code is protected by one huge semaphore -- "busy" -- which is set whenever * it probably isn't safe to do anything to the card. The receive routine * must gain a lock on "busy" before it can start a DMA transfer, and the * transmit routine must gain a lock before it sends the first PCB to the card. * The send_pcb() routine also has an internal semaphore to protect it against * being re-entered (which would be disastrous) -- this is needed because * several things can happen asynchronously (re-priming the receiver and * asking the card for statistics, for example). send_pcb() will also refuse * to talk to the card at all if a DMA upload is happening. The higher-level * networking code will reschedule a later retry if some part of the driver * is blocked. In practice, this doesn't seem to happen very often. */ /* This driver may now work with revision 2.x hardware, since all the read * operations on the HCR have been removed (we now keep our own softcopy). * But I don't have an old card to test it on. * * This has had the bad effect that the autoprobe routine is now a bit * less friendly to other devices. However, it was never very good. * before, so I doubt it will hurt anybody. */ /* The driver is a mess. I took Craig's and Juha's code, and hacked it firstly * to make it more reliable, and secondly to add DMA mode. Many things could * probably be done better; the concurrency protection is particularly awful. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/ioport.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/gfp.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/dma.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include "3c505.h" /********************************************************* * * define debug messages here as common strings to reduce space * *********************************************************/ #define filename __FILE__ #define timeout_msg "*** timeout at %s:%s (line %d) ***\n" #define TIMEOUT_MSG(lineno) \ pr_notice(timeout_msg, filename, __func__, (lineno)) #define invalid_pcb_msg "*** invalid pcb length %d at %s:%s (line %d) ***\n" #define INVALID_PCB_MSG(len) \ pr_notice(invalid_pcb_msg, (len), filename, __func__, __LINE__) #define search_msg "%s: Looking for 3c505 adapter at address %#x..." #define stilllooking_msg "still looking..." #define found_msg "found.\n" #define notfound_msg "not found (reason = %d)\n" #define couldnot_msg "%s: 3c505 not found\n" /********************************************************* * * various other debug stuff * *********************************************************/ #ifdef ELP_DEBUG static int elp_debug = ELP_DEBUG; #else static int elp_debug; #endif #define debug elp_debug /* * 0 = no messages (well, some) * 1 = messages when high level commands performed * 2 = messages when low level commands performed * 3 = messages when interrupts received */ /***************************************************************** * * List of I/O-addresses we try to auto-sense * Last element MUST BE 0! *****************************************************************/ static int addr_list[] __initdata = {0x300, 0x280, 0x310, 0}; /* Dma Memory related stuff */ static unsigned long dma_mem_alloc(int size) { int order = get_order(size); return __get_dma_pages(GFP_KERNEL, order); } /***************************************************************** * * Functions for I/O (note the inline !) * *****************************************************************/ static inline unsigned char inb_status(unsigned int base_addr) { return inb(base_addr + PORT_STATUS); } static inline int inb_command(unsigned int base_addr) { return inb(base_addr + PORT_COMMAND); } static inline void outb_control(unsigned char val, struct net_device *dev) { outb(val, dev->base_addr + PORT_CONTROL); ((elp_device *)(netdev_priv(dev)))->hcr_val = val; } #define HCR_VAL(x) (((elp_device *)(netdev_priv(x)))->hcr_val) static inline void outb_command(unsigned char val, unsigned int base_addr) { outb(val, base_addr + PORT_COMMAND); } static inline unsigned int backlog_next(unsigned int n) { return (n + 1) % BACKLOG_SIZE; } /***************************************************************** * * useful functions for accessing the adapter * *****************************************************************/ /* * use this routine when accessing the ASF bits as they are * changed asynchronously by the adapter */ /* get adapter PCB status */ #define GET_ASF(addr) \ (get_status(addr)&ASF_PCB_MASK) static inline int get_status(unsigned int base_addr) { unsigned long timeout = jiffies + 10*HZ/100; register int stat1; do { stat1 = inb_status(base_addr); } while (stat1 != inb_status(base_addr) && time_before(jiffies, timeout)); if (time_after_eq(jiffies, timeout)) TIMEOUT_MSG(__LINE__); return stat1; } static inline void set_hsf(struct net_device *dev, int hsf) { elp_device *adapter = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&adapter->lock, flags); outb_control((HCR_VAL(dev) & ~HSF_PCB_MASK) | hsf, dev); spin_unlock_irqrestore(&adapter->lock, flags); } static bool start_receive(struct net_device *, pcb_struct *); static inline void adapter_reset(struct net_device *dev) { unsigned long timeout; elp_device *adapter = netdev_priv(dev); unsigned char orig_hcr = adapter->hcr_val; outb_control(0, dev); if (inb_status(dev->base_addr) & ACRF) { do { inb_command(dev->base_addr); timeout = jiffies + 2*HZ/100; while (time_before_eq(jiffies, timeout) && !(inb_status(dev->base_addr) & ACRF)); } while (inb_status(dev->base_addr) & ACRF); set_hsf(dev, HSF_PCB_NAK); } outb_control(adapter->hcr_val | ATTN | DIR, dev); mdelay(10); outb_control(adapter->hcr_val & ~ATTN, dev); mdelay(10); outb_control(adapter->hcr_val | FLSH, dev); mdelay(10); outb_control(adapter->hcr_val & ~FLSH, dev); mdelay(10); outb_control(orig_hcr, dev); if (!start_receive(dev, &adapter->tx_pcb)) pr_err("%s: start receive command failed\n", dev->name); } /* Check to make sure that a DMA transfer hasn't timed out. This should * never happen in theory, but seems to occur occasionally if the card gets * prodded at the wrong time. */ static inline void check_3c505_dma(struct net_device *dev) { elp_device *adapter = netdev_priv(dev); if (adapter->dmaing && time_after(jiffies, adapter->current_dma.start_time + 10)) { unsigned long flags, f; pr_err("%s: DMA %s timed out, %d bytes left\n", dev->name, adapter->current_dma.direction ? "download" : "upload", get_dma_residue(dev->dma)); spin_lock_irqsave(&adapter->lock, flags); adapter->dmaing = 0; adapter->busy = 0; f=claim_dma_lock(); disable_dma(dev->dma); release_dma_lock(f); if (adapter->rx_active) adapter->rx_active--; outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev); spin_unlock_irqrestore(&adapter->lock, flags); } } /* Primitive functions used by send_pcb() */ static inline bool send_pcb_slow(unsigned int base_addr, unsigned char byte) { unsigned long timeout; outb_command(byte, base_addr); for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) { if (inb_status(base_addr) & HCRE) return false; } pr_warning("3c505: send_pcb_slow timed out\n"); return true; } static inline bool send_pcb_fast(unsigned int base_addr, unsigned char byte) { unsigned int timeout; outb_command(byte, base_addr); for (timeout = 0; timeout < 40000; timeout++) { if (inb_status(base_addr) & HCRE) return false; } pr_warning("3c505: send_pcb_fast timed out\n"); return true; } /* Check to see if the receiver needs restarting, and kick it if so */ static inline void prime_rx(struct net_device *dev) { elp_device *adapter = netdev_priv(dev); while (adapter->rx_active < ELP_RX_PCBS && netif_running(dev)) { if (!start_receive(dev, &adapter->itx_pcb)) break; } } /***************************************************************** * * send_pcb * Send a PCB to the adapter. * * output byte to command reg --<--+ * wait until HCRE is non zero | * loop until all bytes sent -->--+ * set HSF1 and HSF2 to 1 * output pcb length * wait until ASF give ACK or NAK * set HSF1 and HSF2 to 0 * *****************************************************************/ /* This can be quite slow -- the adapter is allowed to take up to 40ms * to respond to the initial interrupt. * * We run initially with interrupts turned on, but with a semaphore set * so that nobody tries to re-enter this code. Once the first byte has * gone through, we turn interrupts off and then send the others (the * timeout is reduced to 500us). */ static bool send_pcb(struct net_device *dev, pcb_struct * pcb) { int i; unsigned long timeout; elp_device *adapter = netdev_priv(dev); unsigned long flags; check_3c505_dma(dev); if (adapter->dmaing && adapter->current_dma.direction == 0) return false; /* Avoid contention */ if (test_and_set_bit(1, &adapter->send_pcb_semaphore)) { if (elp_debug >= 3) { pr_debug("%s: send_pcb entered while threaded\n", dev->name); } return false; } /* * load each byte into the command register and * wait for the HCRE bit to indicate the adapter * had read the byte */ set_hsf(dev, 0); if (send_pcb_slow(dev->base_addr, pcb->command)) goto abort; spin_lock_irqsave(&adapter->lock, flags); if (send_pcb_fast(dev->base_addr, pcb->length)) goto sti_abort; for (i = 0; i < pcb->length; i++) { if (send_pcb_fast(dev->base_addr, pcb->data.raw[i])) goto sti_abort; } outb_control(adapter->hcr_val | 3, dev); /* signal end of PCB */ outb_command(2 + pcb->length, dev->base_addr); /* now wait for the acknowledgement */ spin_unlock_irqrestore(&adapter->lock, flags); for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) { switch (GET_ASF(dev->base_addr)) { case ASF_PCB_ACK: adapter->send_pcb_semaphore = 0; return true; case ASF_PCB_NAK: #ifdef ELP_DEBUG pr_debug("%s: send_pcb got NAK\n", dev->name); #endif goto abort; } } if (elp_debug >= 1) pr_debug("%s: timeout waiting for PCB acknowledge (status %02x)\n", dev->name, inb_status(dev->base_addr)); goto abort; sti_abort: spin_unlock_irqrestore(&adapter->lock, flags); abort: adapter->send_pcb_semaphore = 0; return false; } /***************************************************************** * * receive_pcb * Read a PCB from the adapter * * wait for ACRF to be non-zero ---<---+ * input a byte | * if ASF1 and ASF2 were not both one | * before byte was read, loop --->---+ * set HSF1 and HSF2 for ack * *****************************************************************/ static bool receive_pcb(struct net_device *dev, pcb_struct * pcb) { int i, j; int total_length; int stat; unsigned long timeout; unsigned long flags; elp_device *adapter = netdev_priv(dev); set_hsf(dev, 0); /* get the command code */ timeout = jiffies + 2*HZ/100; while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout)); if (time_after_eq(jiffies, timeout)) { TIMEOUT_MSG(__LINE__); return false; } pcb->command = inb_command(dev->base_addr); /* read the data length */ timeout = jiffies + 3*HZ/100; while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout)); if (time_after_eq(jiffies, timeout)) { TIMEOUT_MSG(__LINE__); pr_info("%s: status %02x\n", dev->name, stat); return false; } pcb->length = inb_command(dev->base_addr); if (pcb->length > MAX_PCB_DATA) { INVALID_PCB_MSG(pcb->length); adapter_reset(dev); return false; } /* read the data */ spin_lock_irqsave(&adapter->lock, flags); for (i = 0; i < MAX_PCB_DATA; i++) { for (j = 0; j < 20000; j++) { stat = get_status(dev->base_addr); if (stat & ACRF) break; } pcb->data.raw[i] = inb_command(dev->base_addr); if ((stat & ASF_PCB_MASK) == ASF_PCB_END || j >= 20000) break; } spin_unlock_irqrestore(&adapter->lock, flags); if (i >= MAX_PCB_DATA) { INVALID_PCB_MSG(i); return false; } if (j >= 20000) { TIMEOUT_MSG(__LINE__); return false; } /* the last "data" byte was really the length! */ total_length = pcb->data.raw[i]; /* safety check total length vs data length */ if (total_length != (pcb->length + 2)) { if (elp_debug >= 2) pr_warning("%s: mangled PCB received\n", dev->name); set_hsf(dev, HSF_PCB_NAK); return false; } if (pcb->command == CMD_RECEIVE_PACKET_COMPLETE) { if (test_and_set_bit(0, (void *) &adapter->busy)) { if (backlog_next(adapter->rx_backlog.in) == adapter->rx_backlog.out) { set_hsf(dev, HSF_PCB_NAK); pr_warning("%s: PCB rejected, transfer in progress and backlog full\n", dev->name); pcb->command = 0; return true; } else { pcb->command = 0xff; } } } set_hsf(dev, HSF_PCB_ACK); return true; } /****************************************************** * * queue a receive command on the adapter so we will get an * interrupt when a packet is received. * ******************************************************/ static bool start_receive(struct net_device *dev, pcb_struct * tx_pcb) { bool status; elp_device *adapter = netdev_priv(dev); if (elp_debug >= 3) pr_debug("%s: restarting receiver\n", dev->name); tx_pcb->command = CMD_RECEIVE_PACKET; tx_pcb->length = sizeof(struct Rcv_pkt); tx_pcb->data.rcv_pkt.buf_seg = tx_pcb->data.rcv_pkt.buf_ofs = 0; /* Unused */ tx_pcb->data.rcv_pkt.buf_len = 1600; tx_pcb->data.rcv_pkt.timeout = 0; /* set timeout to zero */ status = send_pcb(dev, tx_pcb); if (status) adapter->rx_active++; return status; } /****************************************************** * * extract a packet from the adapter * this routine is only called from within the interrupt * service routine, so no cli/sti calls are needed * note that the length is always assumed to be even * ******************************************************/ static void receive_packet(struct net_device *dev, int len) { int rlen; elp_device *adapter = netdev_priv(dev); void *target; struct sk_buff *skb; unsigned long flags; rlen = (len + 1) & ~1; skb = dev_alloc_skb(rlen + 2); if (!skb) { pr_warning("%s: memory squeeze, dropping packet\n", dev->name); target = adapter->dma_buffer; adapter->current_dma.target = NULL; /* FIXME: stats */ return; } skb_reserve(skb, 2); target = skb_put(skb, rlen); if ((unsigned long)(target + rlen) >= MAX_DMA_ADDRESS) { adapter->current_dma.target = target; target = adapter->dma_buffer; } else { adapter->current_dma.target = NULL; } /* if this happens, we die */ if (test_and_set_bit(0, (void *) &adapter->dmaing)) pr_err("%s: rx blocked, DMA in progress, dir %d\n", dev->name, adapter->current_dma.direction); adapter->current_dma.direction = 0; adapter->current_dma.length = rlen; adapter->current_dma.skb = skb; adapter->current_dma.start_time = jiffies; outb_control(adapter->hcr_val | DIR | TCEN | DMAE, dev); flags=claim_dma_lock(); disable_dma(dev->dma); clear_dma_ff(dev->dma); set_dma_mode(dev->dma, 0x04); /* dma read */ set_dma_addr(dev->dma, isa_virt_to_bus(target)); set_dma_count(dev->dma, rlen); enable_dma(dev->dma); release_dma_lock(flags); if (elp_debug >= 3) { pr_debug("%s: rx DMA transfer started\n", dev->name); } if (adapter->rx_active) adapter->rx_active--; if (!adapter->busy) pr_warning("%s: receive_packet called, busy not set.\n", dev->name); } /****************************************************** * * interrupt handler * ******************************************************/ static irqreturn_t elp_interrupt(int irq, void *dev_id) { int len; int dlen; int icount = 0; struct net_device *dev = dev_id; elp_device *adapter = netdev_priv(dev); unsigned long timeout; spin_lock(&adapter->lock); do { /* * has a DMA transfer finished? */ if (inb_status(dev->base_addr) & DONE) { if (!adapter->dmaing) pr_warning("%s: phantom DMA completed\n", dev->name); if (elp_debug >= 3) pr_debug("%s: %s DMA complete, status %02x\n", dev->name, adapter->current_dma.direction ? "tx" : "rx", inb_status(dev->base_addr)); outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev); if (adapter->current_dma.direction) { dev_kfree_skb_irq(adapter->current_dma.skb); } else { struct sk_buff *skb = adapter->current_dma.skb; if (skb) { if (adapter->current_dma.target) { /* have already done the skb_put() */ memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length); } skb->protocol = eth_type_trans(skb,dev); dev->stats.rx_bytes += skb->len; netif_rx(skb); } } adapter->dmaing = 0; if (adapter->rx_backlog.in != adapter->rx_backlog.out) { int t = adapter->rx_backlog.length[adapter->rx_backlog.out]; adapter->rx_backlog.out = backlog_next(adapter->rx_backlog.out); if (elp_debug >= 2) pr_debug("%s: receiving backlogged packet (%d)\n", dev->name, t); receive_packet(dev, t); } else { adapter->busy = 0; } } else { /* has one timed out? */ check_3c505_dma(dev); } /* * receive a PCB from the adapter */ timeout = jiffies + 3*HZ/100; while ((inb_status(dev->base_addr) & ACRF) != 0 && time_before(jiffies, timeout)) { if (receive_pcb(dev, &adapter->irx_pcb)) { switch (adapter->irx_pcb.command) { case 0: break; /* * received a packet - this must be handled fast */ case 0xff: case CMD_RECEIVE_PACKET_COMPLETE: /* if the device isn't open, don't pass packets up the stack */ if (!netif_running(dev)) break; len = adapter->irx_pcb.data.rcv_resp.pkt_len; dlen = adapter->irx_pcb.data.rcv_resp.buf_len; if (adapter->irx_pcb.data.rcv_resp.timeout != 0) { pr_err("%s: interrupt - packet not received correctly\n", dev->name); } else { if (elp_debug >= 3) { pr_debug("%s: interrupt - packet received of length %i (%i)\n", dev->name, len, dlen); } if (adapter->irx_pcb.command == 0xff) { if (elp_debug >= 2) pr_debug("%s: adding packet to backlog (len = %d)\n", dev->name, dlen); adapter->rx_backlog.length[adapter->rx_backlog.in] = dlen; adapter->rx_backlog.in = backlog_next(adapter->rx_backlog.in); } else { receive_packet(dev, dlen); } if (elp_debug >= 3) pr_debug("%s: packet received\n", dev->name); } break; /* * 82586 configured correctly */ case CMD_CONFIGURE_82586_RESPONSE: adapter->got[CMD_CONFIGURE_82586] = 1; if (elp_debug >= 3) pr_debug("%s: interrupt - configure response received\n", dev->name); break; /* * Adapter memory configuration */ case CMD_CONFIGURE_ADAPTER_RESPONSE: adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 1; if (elp_debug >= 3) pr_debug("%s: Adapter memory configuration %s.\n", dev->name, adapter->irx_pcb.data.failed ? "failed" : "succeeded"); break; /* * Multicast list loading */ case CMD_LOAD_MULTICAST_RESPONSE: adapter->got[CMD_LOAD_MULTICAST_LIST] = 1; if (elp_debug >= 3) pr_debug("%s: Multicast address list loading %s.\n", dev->name, adapter->irx_pcb.data.failed ? "failed" : "succeeded"); break; /* * Station address setting */ case CMD_SET_ADDRESS_RESPONSE: adapter->got[CMD_SET_STATION_ADDRESS] = 1; if (elp_debug >= 3) pr_debug("%s: Ethernet address setting %s.\n", dev->name, adapter->irx_pcb.data.failed ? "failed" : "succeeded"); break; /* * received board statistics */ case CMD_NETWORK_STATISTICS_RESPONSE: dev->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv; dev->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit; dev->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC; dev->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align; dev->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun; dev->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res; adapter->got[CMD_NETWORK_STATISTICS] = 1; if (elp_debug >= 3) pr_debug("%s: interrupt - statistics response received\n", dev->name); break; /* * sent a packet */ case CMD_TRANSMIT_PACKET_COMPLETE: if (elp_debug >= 3) pr_debug("%s: interrupt - packet sent\n", dev->name); if (!netif_running(dev)) break; switch (adapter->irx_pcb.data.xmit_resp.c_stat) { case 0xffff: dev->stats.tx_aborted_errors++; pr_info("%s: transmit timed out, network cable problem?\n", dev->name); break; case 0xfffe: dev->stats.tx_fifo_errors++; pr_info("%s: transmit timed out, FIFO underrun\n", dev->name); break; } netif_wake_queue(dev); break; /* * some unknown PCB */ default: pr_debug("%s: unknown PCB received - %2.2x\n", dev->name, adapter->irx_pcb.command); break; } } else { pr_warning("%s: failed to read PCB on interrupt\n", dev->name); adapter_reset(dev); } } } while (icount++ < 5 && (inb_status(dev->base_addr) & (ACRF | DONE))); prime_rx(dev); /* * indicate no longer in interrupt routine */ spin_unlock(&adapter->lock); return IRQ_HANDLED; } /****************************************************** * * open the board * ******************************************************/ static int elp_open(struct net_device *dev) { elp_device *adapter = netdev_priv(dev); int retval; if (elp_debug >= 3) pr_debug("%s: request to open device\n", dev->name); /* * make sure we actually found the device */ if (adapter == NULL) { pr_err("%s: Opening a non-existent physical device\n", dev->name); return -EAGAIN; } /* * disable interrupts on the board */ outb_control(0, dev); /* * clear any pending interrupts */ inb_command(dev->base_addr); adapter_reset(dev); /* * no receive PCBs active */ adapter->rx_active = 0; adapter->busy = 0; adapter->send_pcb_semaphore = 0; adapter->rx_backlog.in = 0; adapter->rx_backlog.out = 0; spin_lock_init(&adapter->lock); /* * install our interrupt service routine */ if ((retval = request_irq(dev->irq, elp_interrupt, 0, dev->name, dev))) { pr_err("%s: could not allocate IRQ%d\n", dev->name, dev->irq); return retval; } if ((retval = request_dma(dev->dma, dev->name))) { free_irq(dev->irq, dev); pr_err("%s: could not allocate DMA%d channel\n", dev->name, dev->dma); return retval; } adapter->dma_buffer = (void *) dma_mem_alloc(DMA_BUFFER_SIZE); if (!adapter->dma_buffer) { pr_err("%s: could not allocate DMA buffer\n", dev->name); free_dma(dev->dma); free_irq(dev->irq, dev); return -ENOMEM; } adapter->dmaing = 0; /* * enable interrupts on the board */ outb_control(CMDE, dev); /* * configure adapter memory: we need 10 multicast addresses, default==0 */ if (elp_debug >= 3) pr_debug("%s: sending 3c505 memory configuration command\n", dev->name); adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY; adapter->tx_pcb.data.memconf.cmd_q = 10; adapter->tx_pcb.data.memconf.rcv_q = 20; adapter->tx_pcb.data.memconf.mcast = 10; adapter->tx_pcb.data.memconf.frame = 20; adapter->tx_pcb.data.memconf.rcv_b = 20; adapter->tx_pcb.data.memconf.progs = 0; adapter->tx_pcb.length = sizeof(struct Memconf); adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 0; if (!send_pcb(dev, &adapter->tx_pcb)) pr_err("%s: couldn't send memory configuration command\n", dev->name); else { unsigned long timeout = jiffies + TIMEOUT; while (adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] == 0 && time_before(jiffies, timeout)); if (time_after_eq(jiffies, timeout)) TIMEOUT_MSG(__LINE__); } /* * configure adapter to receive broadcast messages and wait for response */ if (elp_debug >= 3) pr_debug("%s: sending 82586 configure command\n", dev->name); adapter->tx_pcb.command = CMD_CONFIGURE_82586; adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD; adapter->tx_pcb.length = 2; adapter->got[CMD_CONFIGURE_82586] = 0; if (!send_pcb(dev, &adapter->tx_pcb)) pr_err("%s: couldn't send 82586 configure command\n", dev->name); else { unsigned long timeout = jiffies + TIMEOUT; while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout)); if (time_after_eq(jiffies, timeout)) TIMEOUT_MSG(__LINE__); } /* enable burst-mode DMA */ /* outb(0x1, dev->base_addr + PORT_AUXDMA); */ /* * queue receive commands to provide buffering */ prime_rx(dev); if (elp_debug >= 3) pr_debug("%s: %d receive PCBs active\n", dev->name, adapter->rx_active); /* * device is now officially open! */ netif_start_queue(dev); return 0; } /****************************************************** * * send a packet to the adapter * ******************************************************/ static netdev_tx_t send_packet(struct net_device *dev, struct sk_buff *skb) { elp_device *adapter = netdev_priv(dev); unsigned long target; unsigned long flags; /* * make sure the length is even and no shorter than 60 bytes */ unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1); if (test_and_set_bit(0, (void *) &adapter->busy)) { if (elp_debug >= 2) pr_debug("%s: transmit blocked\n", dev->name); return false; } dev->stats.tx_bytes += nlen; /* * send the adapter a transmit packet command. Ignore segment and offset * and make sure the length is even */ adapter->tx_pcb.command = CMD_TRANSMIT_PACKET; adapter->tx_pcb.length = sizeof(struct Xmit_pkt); adapter->tx_pcb.data.xmit_pkt.buf_ofs = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0; /* Unused */ adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen; if (!send_pcb(dev, &adapter->tx_pcb)) { adapter->busy = 0; return false; } /* if this happens, we die */ if (test_and_set_bit(0, (void *) &adapter->dmaing)) pr_debug("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction); adapter->current_dma.direction = 1; adapter->current_dma.start_time = jiffies; if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) { skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen); memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len); target = isa_virt_to_bus(adapter->dma_buffer); } else { target = isa_virt_to_bus(skb->data); } adapter->current_dma.skb = skb; flags=claim_dma_lock(); disable_dma(dev->dma); clear_dma_ff(dev->dma); set_dma_mode(dev->dma, 0x48); /* dma memory -> io */ set_dma_addr(dev->dma, target); set_dma_count(dev->dma, nlen); outb_control(adapter->hcr_val | DMAE | TCEN, dev); enable_dma(dev->dma); release_dma_lock(flags); if (elp_debug >= 3) pr_debug("%s: DMA transfer started\n", dev->name); return true; } /* * The upper layer thinks we timed out */ static void elp_timeout(struct net_device *dev) { int stat; stat = inb_status(dev->base_addr); pr_warning("%s: transmit timed out, lost %s?\n", dev->name, (stat & ACRF) ? "interrupt" : "command"); if (elp_debug >= 1) pr_debug("%s: status %#02x\n", dev->name, stat); dev->trans_start = jiffies; /* prevent tx timeout */ dev->stats.tx_dropped++; netif_wake_queue(dev); } /****************************************************** * * start the transmitter * return 0 if sent OK, else return 1 * ******************************************************/ static netdev_tx_t elp_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long flags; elp_device *adapter = netdev_priv(dev); spin_lock_irqsave(&adapter->lock, flags); check_3c505_dma(dev); if (elp_debug >= 3) pr_debug("%s: request to send packet of length %d\n", dev->name, (int) skb->len); netif_stop_queue(dev); /* * send the packet at skb->data for skb->len */ if (!send_packet(dev, skb)) { if (elp_debug >= 2) { pr_debug("%s: failed to transmit packet\n", dev->name); } spin_unlock_irqrestore(&adapter->lock, flags); return NETDEV_TX_BUSY; } if (elp_debug >= 3) pr_debug("%s: packet of length %d sent\n", dev->name, (int) skb->len); prime_rx(dev); spin_unlock_irqrestore(&adapter->lock, flags); netif_start_queue(dev); return NETDEV_TX_OK; } /****************************************************** * * return statistics on the board * ******************************************************/ static struct net_device_stats *elp_get_stats(struct net_device *dev) { elp_device *adapter = netdev_priv(dev); if (elp_debug >= 3) pr_debug("%s: request for stats\n", dev->name); /* If the device is closed, just return the latest stats we have, - we cannot ask from the adapter without interrupts */ if (!netif_running(dev)) return &dev->stats; /* send a get statistics command to the board */ adapter->tx_pcb.command = CMD_NETWORK_STATISTICS; adapter->tx_pcb.length = 0; adapter->got[CMD_NETWORK_STATISTICS] = 0; if (!send_pcb(dev, &adapter->tx_pcb)) pr_err("%s: couldn't send get statistics command\n", dev->name); else { unsigned long timeout = jiffies + TIMEOUT; while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && time_before(jiffies, timeout)); if (time_after_eq(jiffies, timeout)) { TIMEOUT_MSG(__LINE__); return &dev->stats; } } /* statistics are now up to date */ return &dev->stats; } static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); } static u32 netdev_get_msglevel(struct net_device *dev) { return debug; } static void netdev_set_msglevel(struct net_device *dev, u32 level) { debug = level; } static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, }; /****************************************************** * * close the board * ******************************************************/ static int elp_close(struct net_device *dev) { elp_device *adapter = netdev_priv(dev); if (elp_debug >= 3) pr_debug("%s: request to close device\n", dev->name); netif_stop_queue(dev); /* Someone may request the device statistic information even when * the interface is closed. The following will update the statistics * structure in the driver, so we'll be able to give current statistics. */ (void) elp_get_stats(dev); /* * disable interrupts on the board */ outb_control(0, dev); /* * release the IRQ */ free_irq(dev->irq, dev); free_dma(dev->dma); free_pages((unsigned long) adapter->dma_buffer, get_order(DMA_BUFFER_SIZE)); return 0; } /************************************************************ * * Set multicast list * num_addrs==0: clear mc_list * num_addrs==-1: set promiscuous mode * num_addrs>0: set mc_list * ************************************************************/ static void elp_set_mc_list(struct net_device *dev) { elp_device *adapter = netdev_priv(dev); struct netdev_hw_addr *ha; int i; unsigned long flags; if (elp_debug >= 3) pr_debug("%s: request to set multicast list\n", dev->name); spin_lock_irqsave(&adapter->lock, flags); if (!(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { /* send a "load multicast list" command to the board, max 10 addrs/cmd */ /* if num_addrs==0 the list will be cleared */ adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST; adapter->tx_pcb.length = 6 * netdev_mc_count(dev); i = 0; netdev_for_each_mc_addr(ha, dev) memcpy(adapter->tx_pcb.data.multicast[i++], ha->addr, 6); adapter->got[CMD_LOAD_MULTICAST_LIST] = 0; if (!send_pcb(dev, &adapter->tx_pcb)) pr_err("%s: couldn't send set_multicast command\n", dev->name); else { unsigned long timeout = jiffies + TIMEOUT; while (adapter->got[CMD_LOAD_MULTICAST_LIST] == 0 && time_before(jiffies, timeout)); if (time_after_eq(jiffies, timeout)) { TIMEOUT_MSG(__LINE__); } } if (!netdev_mc_empty(dev)) adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI; else /* num_addrs == 0 */ adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD; } else adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_PROMISC; /* * configure adapter to receive messages (as specified above) * and wait for response */ if (elp_debug >= 3) pr_debug("%s: sending 82586 configure command\n", dev->name); adapter->tx_pcb.command = CMD_CONFIGURE_82586; adapter->tx_pcb.length = 2; adapter->got[CMD_CONFIGURE_82586] = 0; if (!send_pcb(dev, &adapter->tx_pcb)) { spin_unlock_irqrestore(&adapter->lock, flags); pr_err("%s: couldn't send 82586 configure command\n", dev->name); } else { unsigned long timeout = jiffies + TIMEOUT; spin_unlock_irqrestore(&adapter->lock, flags); while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout)); if (time_after_eq(jiffies, timeout)) TIMEOUT_MSG(__LINE__); } } /************************************************************ * * A couple of tests to see if there's 3C505 or not * Called only by elp_autodetect ************************************************************/ static int __init elp_sense(struct net_device *dev) { int addr = dev->base_addr; const char *name = dev->name; byte orig_HSR; if (!request_region(addr, ELP_IO_EXTENT, "3c505")) return -ENODEV; orig_HSR = inb_status(addr); if (elp_debug > 0) pr_debug(search_msg, name, addr); if (orig_HSR == 0xff) { if (elp_debug > 0) pr_cont(notfound_msg, 1); goto out; } /* Wait for a while; the adapter may still be booting up */ if (elp_debug > 0) pr_cont(stilllooking_msg); if (orig_HSR & DIR) { /* If HCR.DIR is up, we pull it down. HSR.DIR should follow. */ outb(0, dev->base_addr + PORT_CONTROL); msleep(300); if (inb_status(addr) & DIR) { if (elp_debug > 0) pr_cont(notfound_msg, 2); goto out; } } else { /* If HCR.DIR is down, we pull it up. HSR.DIR should follow. */ outb(DIR, dev->base_addr + PORT_CONTROL); msleep(300); if (!(inb_status(addr) & DIR)) { if (elp_debug > 0) pr_cont(notfound_msg, 3); goto out; } } /* * It certainly looks like a 3c505. */ if (elp_debug > 0) pr_cont(found_msg); return 0; out: release_region(addr, ELP_IO_EXTENT); return -ENODEV; } /************************************************************* * * Search through addr_list[] and try to find a 3C505 * Called only by eplus_probe *************************************************************/ static int __init elp_autodetect(struct net_device *dev) { int idx = 0; /* if base address set, then only check that address otherwise, run through the table */ if (dev->base_addr != 0) { /* dev->base_addr == 0 ==> plain autodetect */ if (elp_sense(dev) == 0) return dev->base_addr; } else while ((dev->base_addr = addr_list[idx++])) { if (elp_sense(dev) == 0) return dev->base_addr; } /* could not find an adapter */ if (elp_debug > 0) pr_debug(couldnot_msg, dev->name); return 0; /* Because of this, the layer above will return -ENODEV */ } static const struct net_device_ops elp_netdev_ops = { .ndo_open = elp_open, .ndo_stop = elp_close, .ndo_get_stats = elp_get_stats, .ndo_start_xmit = elp_start_xmit, .ndo_tx_timeout = elp_timeout, .ndo_set_multicast_list = elp_set_mc_list, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; /****************************************************** * * probe for an Etherlink Plus board at the specified address * ******************************************************/ /* There are three situations we need to be able to detect here: * a) the card is idle * b) the card is still booting up * c) the card is stuck in a strange state (some DOS drivers do this) * * In case (a), all is well. In case (b), we wait 10 seconds to see if the * card finishes booting, and carry on if so. In case (c), we do a hard reset, * loop round, and hope for the best. * * This is all very unpleasant, but hopefully avoids the problems with the old * probe code (which had a 15-second delay if the card was idle, and didn't * work at all if it was in a weird state). */ static int __init elplus_setup(struct net_device *dev) { elp_device *adapter = netdev_priv(dev); int i, tries, tries1, okay; unsigned long timeout; unsigned long cookie = 0; int err = -ENODEV; /* * setup adapter structure */ dev->base_addr = elp_autodetect(dev); if (!dev->base_addr) return -ENODEV; adapter->send_pcb_semaphore = 0; for (tries1 = 0; tries1 < 3; tries1++) { outb_control((adapter->hcr_val | CMDE) & ~DIR, dev); /* First try to write just one byte, to see if the card is * responding at all normally. */ timeout = jiffies + 5*HZ/100; okay = 0; while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE)); if ((inb_status(dev->base_addr) & HCRE)) { outb_command(0, dev->base_addr); /* send a spurious byte */ timeout = jiffies + 5*HZ/100; while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE)); if (inb_status(dev->base_addr) & HCRE) okay = 1; } if (!okay) { /* Nope, it's ignoring the command register. This means that * either it's still booting up, or it's died. */ pr_err("%s: command register wouldn't drain, ", dev->name); if ((inb_status(dev->base_addr) & 7) == 3) { /* If the adapter status is 3, it *could* still be booting. * Give it the benefit of the doubt for 10 seconds. */ pr_cont("assuming 3c505 still starting\n"); timeout = jiffies + 10*HZ; while (time_before(jiffies, timeout) && (inb_status(dev->base_addr) & 7)); if (inb_status(dev->base_addr) & 7) { pr_err("%s: 3c505 failed to start\n", dev->name); } else { okay = 1; /* It started */ } } else { /* Otherwise, it must just be in a strange * state. We probably need to kick it. */ pr_cont("3c505 is sulking\n"); } } for (tries = 0; tries < 5 && okay; tries++) { /* * Try to set the Ethernet address, to make sure that the board * is working. */ adapter->tx_pcb.command = CMD_STATION_ADDRESS; adapter->tx_pcb.length = 0; cookie = probe_irq_on(); if (!send_pcb(dev, &adapter->tx_pcb)) { pr_err("%s: could not send first PCB\n", dev->name); probe_irq_off(cookie); continue; } if (!receive_pcb(dev, &adapter->rx_pcb)) { pr_err("%s: could not read first PCB\n", dev->name); probe_irq_off(cookie); continue; } if ((adapter->rx_pcb.command != CMD_ADDRESS_RESPONSE) || (adapter->rx_pcb.length != 6)) { pr_err("%s: first PCB wrong (%d, %d)\n", dev->name, adapter->rx_pcb.command, adapter->rx_pcb.length); probe_irq_off(cookie); continue; } goto okay; } /* It's broken. Do a hard reset to re-initialise the board, * and try again. */ pr_info("%s: resetting adapter\n", dev->name); outb_control(adapter->hcr_val | FLSH | ATTN, dev); outb_control(adapter->hcr_val & ~(FLSH | ATTN), dev); } pr_err("%s: failed to initialise 3c505\n", dev->name); goto out; okay: if (dev->irq) { /* Is there a preset IRQ? */ int rpt = probe_irq_off(cookie); if (dev->irq != rpt) { pr_warning("%s: warning, irq %d configured but %d detected\n", dev->name, dev->irq, rpt); } /* if dev->irq == probe_irq_off(cookie), all is well */ } else /* No preset IRQ; just use what we can detect */ dev->irq = probe_irq_off(cookie); switch (dev->irq) { /* Legal, sane? */ case 0: pr_err("%s: IRQ probe failed: check 3c505 jumpers.\n", dev->name); goto out; case 1: case 6: case 8: case 13: pr_err("%s: Impossible IRQ %d reported by probe_irq_off().\n", dev->name, dev->irq); goto out; } /* * Now we have the IRQ number so we can disable the interrupts from * the board until the board is opened. */ outb_control(adapter->hcr_val & ~CMDE, dev); /* * copy Ethernet address into structure */ for (i = 0; i < 6; i++) dev->dev_addr[i] = adapter->rx_pcb.data.eth_addr[i]; /* find a DMA channel */ if (!dev->dma) { if (dev->mem_start) { dev->dma = dev->mem_start & 7; } else { pr_warning("%s: warning, DMA channel not specified, using default\n", dev->name); dev->dma = ELP_DMA; } } /* * print remainder of startup message */ pr_info("%s: 3c505 at %#lx, irq %d, dma %d, addr %pM, ", dev->name, dev->base_addr, dev->irq, dev->dma, dev->dev_addr); /* * read more information from the adapter */ adapter->tx_pcb.command = CMD_ADAPTER_INFO; adapter->tx_pcb.length = 0; if (!send_pcb(dev, &adapter->tx_pcb) || !receive_pcb(dev, &adapter->rx_pcb) || (adapter->rx_pcb.command != CMD_ADAPTER_INFO_RESPONSE) || (adapter->rx_pcb.length != 10)) { pr_cont("not responding to second PCB\n"); } pr_cont("rev %d.%d, %dk\n", adapter->rx_pcb.data.info.major_vers, adapter->rx_pcb.data.info.minor_vers, adapter->rx_pcb.data.info.RAM_sz); /* * reconfigure the adapter memory to better suit our purposes */ adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY; adapter->tx_pcb.length = 12; adapter->tx_pcb.data.memconf.cmd_q = 8; adapter->tx_pcb.data.memconf.rcv_q = 8; adapter->tx_pcb.data.memconf.mcast = 10; adapter->tx_pcb.data.memconf.frame = 10; adapter->tx_pcb.data.memconf.rcv_b = 10; adapter->tx_pcb.data.memconf.progs = 0; if (!send_pcb(dev, &adapter->tx_pcb) || !receive_pcb(dev, &adapter->rx_pcb) || (adapter->rx_pcb.command != CMD_CONFIGURE_ADAPTER_RESPONSE) || (adapter->rx_pcb.length != 2)) { pr_err("%s: could not configure adapter memory\n", dev->name); } if (adapter->rx_pcb.data.configure) { pr_err("%s: adapter configuration failed\n", dev->name); } dev->netdev_ops = &elp_netdev_ops; dev->watchdog_timeo = 10*HZ; dev->ethtool_ops = &netdev_ethtool_ops; /* local */ dev->mem_start = dev->mem_end = 0; err = register_netdev(dev); if (err) goto out; return 0; out: release_region(dev->base_addr, ELP_IO_EXTENT); return err; } #ifndef MODULE struct net_device * __init elplus_probe(int unit) { struct net_device *dev = alloc_etherdev(sizeof(elp_device)); int err; if (!dev) return ERR_PTR(-ENOMEM); sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); err = elplus_setup(dev); if (err) { free_netdev(dev); return ERR_PTR(err); } return dev; } #else static struct net_device *dev_3c505[ELP_MAX_CARDS]; static int io[ELP_MAX_CARDS]; static int irq[ELP_MAX_CARDS]; static int dma[ELP_MAX_CARDS]; module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param_array(dma, int, NULL, 0); MODULE_PARM_DESC(io, "EtherLink Plus I/O base address(es)"); MODULE_PARM_DESC(irq, "EtherLink Plus IRQ number(s) (assigned)"); MODULE_PARM_DESC(dma, "EtherLink Plus DMA channel(s)"); int __init init_module(void) { int this_dev, found = 0; for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) { struct net_device *dev = alloc_etherdev(sizeof(elp_device)); if (!dev) break; dev->irq = irq[this_dev]; dev->base_addr = io[this_dev]; if (dma[this_dev]) { dev->dma = dma[this_dev]; } else { dev->dma = ELP_DMA; pr_warning("3c505.c: warning, using default DMA channel,\n"); } if (io[this_dev] == 0) { if (this_dev) { free_netdev(dev); break; } pr_notice("3c505.c: module autoprobe not recommended, give io=xx.\n"); } if (elplus_setup(dev) != 0) { pr_warning("3c505.c: Failed to register card at 0x%x.\n", io[this_dev]); free_netdev(dev); break; } dev_3c505[this_dev] = dev; found++; } if (!found) return -ENODEV; return 0; } void __exit cleanup_module(void) { int this_dev; for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) { struct net_device *dev = dev_3c505[this_dev]; if (dev) { unregister_netdev(dev); release_region(dev->base_addr, ELP_IO_EXTENT); free_netdev(dev); } } } #endif /* MODULE */ MODULE_LICENSE("GPL");
gpl-2.0
HSAFoundation/HSA-Drivers-Linux-AMD
src/kernel/arch/arm/mach-omap1/board-sx1-mmc.c
4188
1572
/* * linux/arch/arm/mach-omap1/board-sx1-mmc.c * * Copyright (C) 2007 Instituto Nokia de Tecnologia - INdT * Author: Carlos Eduardo Aguiar <carlos.aguiar@indt.org.br> * * This code is based on linux/arch/arm/mach-omap1/board-h2-mmc.c, which is: * Copyright (C) 2007 Instituto Nokia de Tecnologia - INdT * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/platform_device.h> #include <mach/hardware.h> #include <mach/board-sx1.h> #include "mmc.h" #if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) static int mmc_set_power(struct device *dev, int slot, int power_on, int vdd) { int err; u8 dat = 0; err = sx1_i2c_read_byte(SOFIA_I2C_ADDR, SOFIA_POWER1_REG, &dat); if (err < 0) return err; if (power_on) dat |= SOFIA_MMC_POWER; else dat &= ~SOFIA_MMC_POWER; return sx1_i2c_write_byte(SOFIA_I2C_ADDR, SOFIA_POWER1_REG, dat); } /* Cover switch is at OMAP_MPUIO(3) */ static struct omap_mmc_platform_data mmc1_data = { .nr_slots = 1, .slots[0] = { .set_power = mmc_set_power, .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, .name = "mmcblk", }, }; static struct omap_mmc_platform_data *mmc_data[OMAP15XX_NR_MMC]; void __init sx1_mmc_init(void) { mmc_data[0] = &mmc1_data; omap1_init_mmc(mmc_data, OMAP15XX_NR_MMC); } #else void __init sx1_mmc_init(void) { } #endif
gpl-2.0
lithid/furnace_kernel_lge_hammerhead
drivers/base/driver.c
4444
5929
/* * driver.c - centralized device driver management * * Copyright (c) 2002-3 Patrick Mochel * Copyright (c) 2002-3 Open Source Development Labs * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (c) 2007 Novell Inc. * * This file is released under the GPLv2 * */ #include <linux/device.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/string.h> #include "base.h" static struct device *next_device(struct klist_iter *i) { struct klist_node *n = klist_next(i); struct device *dev = NULL; struct device_private *dev_prv; if (n) { dev_prv = to_device_private_driver(n); dev = dev_prv->device; } return dev; } /** * driver_for_each_device - Iterator for devices bound to a driver. * @drv: Driver we're iterating. * @start: Device to begin with * @data: Data to pass to the callback. * @fn: Function to call for each device. * * Iterate over the @drv's list of devices calling @fn for each one. */ int driver_for_each_device(struct device_driver *drv, struct device *start, void *data, int (*fn)(struct device *, void *)) { struct klist_iter i; struct device *dev; int error = 0; if (!drv) return -EINVAL; klist_iter_init_node(&drv->p->klist_devices, &i, start ? &start->p->knode_driver : NULL); while ((dev = next_device(&i)) && !error) error = fn(dev, data); klist_iter_exit(&i); return error; } EXPORT_SYMBOL_GPL(driver_for_each_device); /** * driver_find_device - device iterator for locating a particular device. * @drv: The device's driver * @start: Device to begin with * @data: Data to pass to match function * @match: Callback function to check device * * This is similar to the driver_for_each_device() function above, but * it returns a reference to a device that is 'found' for later use, as * determined by the @match callback. * * The callback should return 0 if the device doesn't match and non-zero * if it does. If the callback returns non-zero, this function will * return to the caller and not iterate over any more devices. */ struct device *driver_find_device(struct device_driver *drv, struct device *start, void *data, int (*match)(struct device *dev, void *data)) { struct klist_iter i; struct device *dev; if (!drv) return NULL; klist_iter_init_node(&drv->p->klist_devices, &i, (start ? &start->p->knode_driver : NULL)); while ((dev = next_device(&i))) if (match(dev, data) && get_device(dev)) break; klist_iter_exit(&i); return dev; } EXPORT_SYMBOL_GPL(driver_find_device); /** * driver_create_file - create sysfs file for driver. * @drv: driver. * @attr: driver attribute descriptor. */ int driver_create_file(struct device_driver *drv, const struct driver_attribute *attr) { int error; if (drv) error = sysfs_create_file(&drv->p->kobj, &attr->attr); else error = -EINVAL; return error; } EXPORT_SYMBOL_GPL(driver_create_file); /** * driver_remove_file - remove sysfs file for driver. * @drv: driver. * @attr: driver attribute descriptor. */ void driver_remove_file(struct device_driver *drv, const struct driver_attribute *attr) { if (drv) sysfs_remove_file(&drv->p->kobj, &attr->attr); } EXPORT_SYMBOL_GPL(driver_remove_file); static int driver_add_groups(struct device_driver *drv, const struct attribute_group **groups) { int error = 0; int i; if (groups) { for (i = 0; groups[i]; i++) { error = sysfs_create_group(&drv->p->kobj, groups[i]); if (error) { while (--i >= 0) sysfs_remove_group(&drv->p->kobj, groups[i]); break; } } } return error; } static void driver_remove_groups(struct device_driver *drv, const struct attribute_group **groups) { int i; if (groups) for (i = 0; groups[i]; i++) sysfs_remove_group(&drv->p->kobj, groups[i]); } /** * driver_register - register driver with bus * @drv: driver to register * * We pass off most of the work to the bus_add_driver() call, * since most of the things we have to do deal with the bus * structures. */ int driver_register(struct device_driver *drv) { int ret; struct device_driver *other; BUG_ON(!drv->bus->p); if ((drv->bus->probe && drv->probe) || (drv->bus->remove && drv->remove) || (drv->bus->shutdown && drv->shutdown)) printk(KERN_WARNING "Driver '%s' needs updating - please use " "bus_type methods\n", drv->name); other = driver_find(drv->name, drv->bus); if (other) { printk(KERN_ERR "Error: Driver '%s' is already registered, " "aborting...\n", drv->name); return -EBUSY; } ret = bus_add_driver(drv); if (ret) return ret; ret = driver_add_groups(drv, drv->groups); if (ret) bus_remove_driver(drv); return ret; } EXPORT_SYMBOL_GPL(driver_register); /** * driver_unregister - remove driver from system. * @drv: driver. * * Again, we pass off most of the work to the bus-level call. */ void driver_unregister(struct device_driver *drv) { if (!drv || !drv->p) { WARN(1, "Unexpected driver unregister!\n"); return; } driver_remove_groups(drv, drv->groups); bus_remove_driver(drv); } EXPORT_SYMBOL_GPL(driver_unregister); /** * driver_find - locate driver on a bus by its name. * @name: name of the driver. * @bus: bus to scan for the driver. * * Call kset_find_obj() to iterate over list of drivers on * a bus to find driver by name. Return driver if found. * * This routine provides no locking to prevent the driver it returns * from being unregistered or unloaded while the caller is using it. * The caller is responsible for preventing this. */ struct device_driver *driver_find(const char *name, struct bus_type *bus) { struct kobject *k = kset_find_obj(bus->p->drivers_kset, name); struct driver_private *priv; if (k) { /* Drop reference added by kset_find_obj() */ kobject_put(k); priv = to_driver(k); return priv->driver; } return NULL; } EXPORT_SYMBOL_GPL(driver_find);
gpl-2.0
AKToronto/Bubba-Zombie
sound/pci/riptide/riptide.c
4444
64920
/* * Driver for the Conexant Riptide Soundchip * * Copyright (c) 2004 Peter Gruber <nokos@gmx.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* History: - 02/15/2004 first release This Driver is based on the OSS Driver version from Linuxant (riptide-0.6lnxtbeta03111100) credits from the original files: MODULE NAME: cnxt_rt.h AUTHOR: K. Lazarev (Transcribed by KNL) HISTORY: Major Revision Date By ----------------------------- -------- ----- Created 02/1/2000 KNL MODULE NAME: int_mdl.c AUTHOR: Konstantin Lazarev (Transcribed by KNL) HISTORY: Major Revision Date By ----------------------------- -------- ----- Created 10/01/99 KNL MODULE NAME: riptide.h AUTHOR: O. Druzhinin (Transcribed by OLD) HISTORY: Major Revision Date By ----------------------------- -------- ----- Created 10/16/97 OLD MODULE NAME: Rp_Cmdif.cpp AUTHOR: O. Druzhinin (Transcribed by OLD) K. Lazarev (Transcribed by KNL) HISTORY: Major Revision Date By ----------------------------- -------- ----- Adopted from NT4 driver 6/22/99 OLD Ported to Linux 9/01/99 KNL MODULE NAME: rt_hw.c AUTHOR: O. Druzhinin (Transcribed by OLD) C. Lazarev (Transcribed by CNL) HISTORY: Major Revision Date By ----------------------------- -------- ----- Created 11/18/97 OLD Hardware functions for RipTide 11/24/97 CNL (ES1) are coded Hardware functions for RipTide 12/24/97 CNL (A0) are coded Hardware functions for RipTide 03/20/98 CNL (A1) are coded Boot loader is included 05/07/98 CNL Redesigned for WDM 07/27/98 CNL Redesigned for Linux 09/01/99 CNL MODULE NAME: rt_hw.h AUTHOR: C. Lazarev (Transcribed by CNL) HISTORY: Major Revision Date By ----------------------------- -------- ----- Created 11/18/97 CNL MODULE NAME: rt_mdl.c AUTHOR: Konstantin Lazarev (Transcribed by KNL) HISTORY: Major Revision Date By ----------------------------- -------- ----- Created 10/01/99 KNL MODULE NAME: mixer.h AUTHOR: K. Kenney HISTORY: Major Revision Date By ----------------------------- -------- ----- Created from MS W95 Sample 11/28/95 KRS RipTide 10/15/97 KRS Adopted for Windows NT driver 01/20/98 CNL */ #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/wait.h> #include <linux/gameport.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/kernel.h> #include <linux/module.h> #include <asm/io.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/ac97_codec.h> #include <sound/mpu401.h> #include <sound/opl3.h> #include <sound/initval.h> #if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) #define SUPPORT_JOYSTICK 1 #endif MODULE_AUTHOR("Peter Gruber <nokos@gmx.net>"); MODULE_DESCRIPTION("riptide"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Conexant,Riptide}}"); MODULE_FIRMWARE("riptide.hex"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; #ifdef SUPPORT_JOYSTICK static int joystick_port[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS - 1)] = 0x200 }; #endif static int mpu_port[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS - 1)] = 0x330 }; static int opl3_port[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS - 1)] = 0x388 }; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Riptide soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Riptide soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Riptide soundcard."); #ifdef SUPPORT_JOYSTICK module_param_array(joystick_port, int, NULL, 0444); MODULE_PARM_DESC(joystick_port, "Joystick port # for Riptide soundcard."); #endif module_param_array(mpu_port, int, NULL, 0444); MODULE_PARM_DESC(mpu_port, "MPU401 port # for Riptide driver."); module_param_array(opl3_port, int, NULL, 0444); MODULE_PARM_DESC(opl3_port, "OPL3 port # for Riptide driver."); /* */ #define MPU401_HW_RIPTIDE MPU401_HW_MPU401 #define OPL3_HW_RIPTIDE OPL3_HW_OPL3 #define PCI_EXT_CapId 0x40 #define PCI_EXT_NextCapPrt 0x41 #define PCI_EXT_PWMC 0x42 #define PCI_EXT_PWSCR 0x44 #define PCI_EXT_Data00 0x46 #define PCI_EXT_PMSCR_BSE 0x47 #define PCI_EXT_SB_Base 0x48 #define PCI_EXT_FM_Base 0x4a #define PCI_EXT_MPU_Base 0x4C #define PCI_EXT_Game_Base 0x4E #define PCI_EXT_Legacy_Mask 0x50 #define PCI_EXT_AsicRev 0x52 #define PCI_EXT_Reserved3 0x53 #define LEGACY_ENABLE_ALL 0x8000 /* legacy device options */ #define LEGACY_ENABLE_SB 0x4000 #define LEGACY_ENABLE_FM 0x2000 #define LEGACY_ENABLE_MPU_INT 0x1000 #define LEGACY_ENABLE_MPU 0x0800 #define LEGACY_ENABLE_GAMEPORT 0x0400 #define MAX_WRITE_RETRY 10 /* cmd interface limits */ #define MAX_ERROR_COUNT 10 #define CMDIF_TIMEOUT 50000 #define RESET_TRIES 5 #define READ_PORT_ULONG(p) inl((unsigned long)&(p)) #define WRITE_PORT_ULONG(p,x) outl(x,(unsigned long)&(p)) #define READ_AUDIO_CONTROL(p) READ_PORT_ULONG(p->audio_control) #define WRITE_AUDIO_CONTROL(p,x) WRITE_PORT_ULONG(p->audio_control,x) #define UMASK_AUDIO_CONTROL(p,x) WRITE_PORT_ULONG(p->audio_control,READ_PORT_ULONG(p->audio_control)|x) #define MASK_AUDIO_CONTROL(p,x) WRITE_PORT_ULONG(p->audio_control,READ_PORT_ULONG(p->audio_control)&x) #define READ_AUDIO_STATUS(p) READ_PORT_ULONG(p->audio_status) #define SET_GRESET(p) UMASK_AUDIO_CONTROL(p,0x0001) /* global reset switch */ #define UNSET_GRESET(p) MASK_AUDIO_CONTROL(p,~0x0001) #define SET_AIE(p) UMASK_AUDIO_CONTROL(p,0x0004) /* interrupt enable */ #define UNSET_AIE(p) MASK_AUDIO_CONTROL(p,~0x0004) #define SET_AIACK(p) UMASK_AUDIO_CONTROL(p,0x0008) /* interrupt acknowledge */ #define UNSET_AIACKT(p) MASKAUDIO_CONTROL(p,~0x0008) #define SET_ECMDAE(p) UMASK_AUDIO_CONTROL(p,0x0010) #define UNSET_ECMDAE(p) MASK_AUDIO_CONTROL(p,~0x0010) #define SET_ECMDBE(p) UMASK_AUDIO_CONTROL(p,0x0020) #define UNSET_ECMDBE(p) MASK_AUDIO_CONTROL(p,~0x0020) #define SET_EDATAF(p) UMASK_AUDIO_CONTROL(p,0x0040) #define UNSET_EDATAF(p) MASK_AUDIO_CONTROL(p,~0x0040) #define SET_EDATBF(p) UMASK_AUDIO_CONTROL(p,0x0080) #define UNSET_EDATBF(p) MASK_AUDIO_CONTROL(p,~0x0080) #define SET_ESBIRQON(p) UMASK_AUDIO_CONTROL(p,0x0100) #define UNSET_ESBIRQON(p) MASK_AUDIO_CONTROL(p,~0x0100) #define SET_EMPUIRQ(p) UMASK_AUDIO_CONTROL(p,0x0200) #define UNSET_EMPUIRQ(p) MASK_AUDIO_CONTROL(p,~0x0200) #define IS_CMDE(a) (READ_PORT_ULONG(a->stat)&0x1) /* cmd empty */ #define IS_DATF(a) (READ_PORT_ULONG(a->stat)&0x2) /* data filled */ #define IS_READY(p) (READ_AUDIO_STATUS(p)&0x0001) #define IS_DLREADY(p) (READ_AUDIO_STATUS(p)&0x0002) #define IS_DLERR(p) (READ_AUDIO_STATUS(p)&0x0004) #define IS_GERR(p) (READ_AUDIO_STATUS(p)&0x0008) /* error ! */ #define IS_CMDAEIRQ(p) (READ_AUDIO_STATUS(p)&0x0010) #define IS_CMDBEIRQ(p) (READ_AUDIO_STATUS(p)&0x0020) #define IS_DATAFIRQ(p) (READ_AUDIO_STATUS(p)&0x0040) #define IS_DATBFIRQ(p) (READ_AUDIO_STATUS(p)&0x0080) #define IS_EOBIRQ(p) (READ_AUDIO_STATUS(p)&0x0100) /* interrupt status */ #define IS_EOSIRQ(p) (READ_AUDIO_STATUS(p)&0x0200) #define IS_EOCIRQ(p) (READ_AUDIO_STATUS(p)&0x0400) #define IS_UNSLIRQ(p) (READ_AUDIO_STATUS(p)&0x0800) #define IS_SBIRQ(p) (READ_AUDIO_STATUS(p)&0x1000) #define IS_MPUIRQ(p) (READ_AUDIO_STATUS(p)&0x2000) #define RESP 0x00000001 /* command flags */ #define PARM 0x00000002 #define CMDA 0x00000004 #define CMDB 0x00000008 #define NILL 0x00000000 #define LONG0(a) ((u32)a) /* shifts and masks */ #define BYTE0(a) (LONG0(a)&0xff) #define BYTE1(a) (BYTE0(a)<<8) #define BYTE2(a) (BYTE0(a)<<16) #define BYTE3(a) (BYTE0(a)<<24) #define WORD0(a) (LONG0(a)&0xffff) #define WORD1(a) (WORD0(a)<<8) #define WORD2(a) (WORD0(a)<<16) #define TRINIB0(a) (LONG0(a)&0xffffff) #define TRINIB1(a) (TRINIB0(a)<<8) #define RET(a) ((union cmdret *)(a)) #define SEND_GETV(p,b) sendcmd(p,RESP,GETV,0,RET(b)) /* get version */ #define SEND_GETC(p,b,c) sendcmd(p,PARM|RESP,GETC,c,RET(b)) #define SEND_GUNS(p,b) sendcmd(p,RESP,GUNS,0,RET(b)) #define SEND_SCID(p,b) sendcmd(p,RESP,SCID,0,RET(b)) #define SEND_RMEM(p,b,c,d) sendcmd(p,PARM|RESP,RMEM|BYTE1(b),LONG0(c),RET(d)) /* memory access for firmware write */ #define SEND_SMEM(p,b,c) sendcmd(p,PARM,SMEM|BYTE1(b),LONG0(c),RET(0)) /* memory access for firmware write */ #define SEND_WMEM(p,b,c) sendcmd(p,PARM,WMEM|BYTE1(b),LONG0(c),RET(0)) /* memory access for firmware write */ #define SEND_SDTM(p,b,c) sendcmd(p,PARM|RESP,SDTM|TRINIB1(b),0,RET(c)) /* memory access for firmware write */ #define SEND_GOTO(p,b) sendcmd(p,PARM,GOTO,LONG0(b),RET(0)) /* memory access for firmware write */ #define SEND_SETDPLL(p) sendcmd(p,0,ARM_SETDPLL,0,RET(0)) #define SEND_SSTR(p,b,c) sendcmd(p,PARM,SSTR|BYTE3(b),LONG0(c),RET(0)) /* start stream */ #define SEND_PSTR(p,b) sendcmd(p,PARM,PSTR,BYTE3(b),RET(0)) /* pause stream */ #define SEND_KSTR(p,b) sendcmd(p,PARM,KSTR,BYTE3(b),RET(0)) /* stop stream */ #define SEND_KDMA(p) sendcmd(p,0,KDMA,0,RET(0)) /* stop all dma */ #define SEND_GPOS(p,b,c,d) sendcmd(p,PARM|RESP,GPOS,BYTE3(c)|BYTE2(b),RET(d)) /* get position in dma */ #define SEND_SETF(p,b,c,d,e,f,g) sendcmd(p,PARM,SETF|WORD1(b)|BYTE3(c),d|BYTE1(e)|BYTE2(f)|BYTE3(g),RET(0)) /* set sample format at mixer */ #define SEND_GSTS(p,b,c,d) sendcmd(p,PARM|RESP,GSTS,BYTE3(c)|BYTE2(b),RET(d)) #define SEND_NGPOS(p,b,c,d) sendcmd(p,PARM|RESP,NGPOS,BYTE3(c)|BYTE2(b),RET(d)) #define SEND_PSEL(p,b,c) sendcmd(p,PARM,PSEL,BYTE2(b)|BYTE3(c),RET(0)) /* activate lbus path */ #define SEND_PCLR(p,b,c) sendcmd(p,PARM,PCLR,BYTE2(b)|BYTE3(c),RET(0)) /* deactivate lbus path */ #define SEND_PLST(p,b) sendcmd(p,PARM,PLST,BYTE3(b),RET(0)) #define SEND_RSSV(p,b,c,d) sendcmd(p,PARM|RESP,RSSV,BYTE2(b)|BYTE3(c),RET(d)) #define SEND_LSEL(p,b,c,d,e,f,g,h) sendcmd(p,PARM,LSEL|BYTE1(b)|BYTE2(c)|BYTE3(d),BYTE0(e)|BYTE1(f)|BYTE2(g)|BYTE3(h),RET(0)) /* select paths for internal connections */ #define SEND_SSRC(p,b,c,d,e) sendcmd(p,PARM,SSRC|BYTE1(b)|WORD2(c),WORD0(d)|WORD2(e),RET(0)) /* configure source */ #define SEND_SLST(p,b) sendcmd(p,PARM,SLST,BYTE3(b),RET(0)) #define SEND_RSRC(p,b,c) sendcmd(p,RESP,RSRC|BYTE1(b),0,RET(c)) /* read source config */ #define SEND_SSRB(p,b,c) sendcmd(p,PARM,SSRB|BYTE1(b),WORD2(c),RET(0)) #define SEND_SDGV(p,b,c,d,e) sendcmd(p,PARM,SDGV|BYTE2(b)|BYTE3(c),WORD0(d)|WORD2(e),RET(0)) /* set digital mixer */ #define SEND_RDGV(p,b,c,d) sendcmd(p,PARM|RESP,RDGV|BYTE2(b)|BYTE3(c),0,RET(d)) /* read digital mixer */ #define SEND_DLST(p,b) sendcmd(p,PARM,DLST,BYTE3(b),RET(0)) #define SEND_SACR(p,b,c) sendcmd(p,PARM,SACR,WORD0(b)|WORD2(c),RET(0)) /* set AC97 register */ #define SEND_RACR(p,b,c) sendcmd(p,PARM|RESP,RACR,WORD2(b),RET(c)) /* get AC97 register */ #define SEND_ALST(p,b) sendcmd(p,PARM,ALST,BYTE3(b),RET(0)) #define SEND_TXAC(p,b,c,d,e,f) sendcmd(p,PARM,TXAC|BYTE1(b)|WORD2(c),WORD0(d)|BYTE2(e)|BYTE3(f),RET(0)) #define SEND_RXAC(p,b,c,d) sendcmd(p,PARM|RESP,RXAC,BYTE2(b)|BYTE3(c),RET(d)) #define SEND_SI2S(p,b) sendcmd(p,PARM,SI2S,WORD2(b),RET(0)) #define EOB_STATUS 0x80000000 /* status flags : block boundary */ #define EOS_STATUS 0x40000000 /* : stoppped */ #define EOC_STATUS 0x20000000 /* : stream end */ #define ERR_STATUS 0x10000000 #define EMPTY_STATUS 0x08000000 #define IEOB_ENABLE 0x1 /* enable interrupts for status notification above */ #define IEOS_ENABLE 0x2 #define IEOC_ENABLE 0x4 #define RDONCE 0x8 #define DESC_MAX_MASK 0xff #define ST_PLAY 0x1 /* stream states */ #define ST_STOP 0x2 #define ST_PAUSE 0x4 #define I2S_INTDEC 3 /* config for I2S link */ #define I2S_MERGER 0 #define I2S_SPLITTER 0 #define I2S_MIXER 7 #define I2S_RATE 44100 #define MODEM_INTDEC 4 /* config for modem link */ #define MODEM_MERGER 3 #define MODEM_SPLITTER 0 #define MODEM_MIXER 11 #define FM_INTDEC 3 /* config for FM/OPL3 link */ #define FM_MERGER 0 #define FM_SPLITTER 0 #define FM_MIXER 9 #define SPLIT_PATH 0x80 /* path splitting flag */ enum FIRMWARE { DATA_REC = 0, EXT_END_OF_FILE, EXT_SEG_ADDR_REC, EXT_GOTO_CMD_REC, EXT_LIN_ADDR_REC, }; enum CMDS { GETV = 0x00, GETC, GUNS, SCID, RMEM = 0x10, SMEM, WMEM, SDTM, GOTO, SSTR = 0x20, PSTR, KSTR, KDMA, GPOS, SETF, GSTS, NGPOS, PSEL = 0x30, PCLR, PLST, RSSV, LSEL, SSRC = 0x40, SLST, RSRC, SSRB, SDGV = 0x50, RDGV, DLST, SACR = 0x60, RACR, ALST, TXAC, RXAC, SI2S = 0x70, ARM_SETDPLL = 0x72, }; enum E1SOURCE { ARM2LBUS_FIFO0 = 0, ARM2LBUS_FIFO1, ARM2LBUS_FIFO2, ARM2LBUS_FIFO3, ARM2LBUS_FIFO4, ARM2LBUS_FIFO5, ARM2LBUS_FIFO6, ARM2LBUS_FIFO7, ARM2LBUS_FIFO8, ARM2LBUS_FIFO9, ARM2LBUS_FIFO10, ARM2LBUS_FIFO11, ARM2LBUS_FIFO12, ARM2LBUS_FIFO13, ARM2LBUS_FIFO14, ARM2LBUS_FIFO15, INTER0_OUT, INTER1_OUT, INTER2_OUT, INTER3_OUT, INTER4_OUT, INTERM0_OUT, INTERM1_OUT, INTERM2_OUT, INTERM3_OUT, INTERM4_OUT, INTERM5_OUT, INTERM6_OUT, DECIMM0_OUT, DECIMM1_OUT, DECIMM2_OUT, DECIMM3_OUT, DECIM0_OUT, SR3_4_OUT, OPL3_SAMPLE, ASRC0, ASRC1, ACLNK2PADC, ACLNK2MODEM0RX, ACLNK2MIC, ACLNK2MODEM1RX, ACLNK2HNDMIC, DIGITAL_MIXER_OUT0, GAINFUNC0_OUT, GAINFUNC1_OUT, GAINFUNC2_OUT, GAINFUNC3_OUT, GAINFUNC4_OUT, SOFTMODEMTX, SPLITTER0_OUTL, SPLITTER0_OUTR, SPLITTER1_OUTL, SPLITTER1_OUTR, SPLITTER2_OUTL, SPLITTER2_OUTR, SPLITTER3_OUTL, SPLITTER3_OUTR, MERGER0_OUT, MERGER1_OUT, MERGER2_OUT, MERGER3_OUT, ARM2LBUS_FIFO_DIRECT, NO_OUT }; enum E2SINK { LBUS2ARM_FIFO0 = 0, LBUS2ARM_FIFO1, LBUS2ARM_FIFO2, LBUS2ARM_FIFO3, LBUS2ARM_FIFO4, LBUS2ARM_FIFO5, LBUS2ARM_FIFO6, LBUS2ARM_FIFO7, INTER0_IN, INTER1_IN, INTER2_IN, INTER3_IN, INTER4_IN, INTERM0_IN, INTERM1_IN, INTERM2_IN, INTERM3_IN, INTERM4_IN, INTERM5_IN, INTERM6_IN, DECIMM0_IN, DECIMM1_IN, DECIMM2_IN, DECIMM3_IN, DECIM0_IN, SR3_4_IN, PDAC2ACLNK, MODEM0TX2ACLNK, MODEM1TX2ACLNK, HNDSPK2ACLNK, DIGITAL_MIXER_IN0, DIGITAL_MIXER_IN1, DIGITAL_MIXER_IN2, DIGITAL_MIXER_IN3, DIGITAL_MIXER_IN4, DIGITAL_MIXER_IN5, DIGITAL_MIXER_IN6, DIGITAL_MIXER_IN7, DIGITAL_MIXER_IN8, DIGITAL_MIXER_IN9, DIGITAL_MIXER_IN10, DIGITAL_MIXER_IN11, GAINFUNC0_IN, GAINFUNC1_IN, GAINFUNC2_IN, GAINFUNC3_IN, GAINFUNC4_IN, SOFTMODEMRX, SPLITTER0_IN, SPLITTER1_IN, SPLITTER2_IN, SPLITTER3_IN, MERGER0_INL, MERGER0_INR, MERGER1_INL, MERGER1_INR, MERGER2_INL, MERGER2_INR, MERGER3_INL, MERGER3_INR, E2SINK_MAX }; enum LBUS_SINK { LS_SRC_INTERPOLATOR = 0, LS_SRC_INTERPOLATORM, LS_SRC_DECIMATOR, LS_SRC_DECIMATORM, LS_MIXER_IN, LS_MIXER_GAIN_FUNCTION, LS_SRC_SPLITTER, LS_SRC_MERGER, LS_NONE1, LS_NONE2, }; enum RT_CHANNEL_IDS { M0TX = 0, M1TX, TAMTX, HSSPKR, PDAC, DSNDTX0, DSNDTX1, DSNDTX2, DSNDTX3, DSNDTX4, DSNDTX5, DSNDTX6, DSNDTX7, WVSTRTX, COP3DTX, SPARE, M0RX, HSMIC, M1RX, CLEANRX, MICADC, PADC, COPRX1, COPRX2, CHANNEL_ID_COUNTER }; enum { SB_CMD = 0, MODEM_CMD, I2S_CMD0, I2S_CMD1, FM_CMD, MAX_CMD }; struct lbuspath { unsigned char *noconv; unsigned char *stereo; unsigned char *mono; }; struct cmdport { u32 data1; /* cmd,param */ u32 data2; /* param */ u32 stat; /* status */ u32 pad[5]; }; struct riptideport { u32 audio_control; /* status registers */ u32 audio_status; u32 pad[2]; struct cmdport port[2]; /* command ports */ }; struct cmdif { struct riptideport *hwport; spinlock_t lock; unsigned int cmdcnt; /* cmd statistics */ unsigned int cmdtime; unsigned int cmdtimemax; unsigned int cmdtimemin; unsigned int errcnt; int is_reset; }; struct riptide_firmware { u16 ASIC; u16 CODEC; u16 AUXDSP; u16 PROG; }; union cmdret { u8 retbytes[8]; u16 retwords[4]; u32 retlongs[2]; }; union firmware_version { union cmdret ret; struct riptide_firmware firmware; }; #define get_pcmhwdev(substream) (struct pcmhw *)(substream->runtime->private_data) #define PLAYBACK_SUBSTREAMS 3 struct snd_riptide { struct snd_card *card; struct pci_dev *pci; const struct firmware *fw_entry; struct cmdif *cif; struct snd_pcm *pcm; struct snd_pcm *pcm_i2s; struct snd_rawmidi *rmidi; struct snd_opl3 *opl3; struct snd_ac97 *ac97; struct snd_ac97_bus *ac97_bus; struct snd_pcm_substream *playback_substream[PLAYBACK_SUBSTREAMS]; struct snd_pcm_substream *capture_substream; int openstreams; int irq; unsigned long port; unsigned short mpuaddr; unsigned short opladdr; #ifdef SUPPORT_JOYSTICK unsigned short gameaddr; #endif struct resource *res_port; unsigned short device_id; union firmware_version firmware; spinlock_t lock; struct tasklet_struct riptide_tq; struct snd_info_entry *proc_entry; unsigned long received_irqs; unsigned long handled_irqs; #ifdef CONFIG_PM int in_suspend; #endif }; struct sgd { /* scatter gather desriptor */ u32 dwNextLink; u32 dwSegPtrPhys; u32 dwSegLen; u32 dwStat_Ctl; }; struct pcmhw { /* pcm descriptor */ struct lbuspath paths; unsigned char *lbuspath; unsigned char source; unsigned char intdec[2]; unsigned char mixer; unsigned char id; unsigned char state; unsigned int rate; unsigned int channels; snd_pcm_format_t format; struct snd_dma_buffer sgdlist; struct sgd *sgdbuf; unsigned int size; unsigned int pages; unsigned int oldpos; unsigned int pointer; }; #define CMDRET_ZERO (union cmdret){{(u32)0, (u32) 0}} static int sendcmd(struct cmdif *cif, u32 flags, u32 cmd, u32 parm, union cmdret *ret); static int getsourcesink(struct cmdif *cif, unsigned char source, unsigned char sink, unsigned char *a, unsigned char *b); static int snd_riptide_initialize(struct snd_riptide *chip); static int riptide_reset(struct cmdif *cif, struct snd_riptide *chip); /* */ static DEFINE_PCI_DEVICE_TABLE(snd_riptide_ids) = { { PCI_DEVICE(0x127a, 0x4310) }, { PCI_DEVICE(0x127a, 0x4320) }, { PCI_DEVICE(0x127a, 0x4330) }, { PCI_DEVICE(0x127a, 0x4340) }, {0,}, }; #ifdef SUPPORT_JOYSTICK static DEFINE_PCI_DEVICE_TABLE(snd_riptide_joystick_ids) = { { PCI_DEVICE(0x127a, 0x4312) }, { PCI_DEVICE(0x127a, 0x4322) }, { PCI_DEVICE(0x127a, 0x4332) }, { PCI_DEVICE(0x127a, 0x4342) }, {0,}, }; #endif MODULE_DEVICE_TABLE(pci, snd_riptide_ids); /* */ static unsigned char lbusin2out[E2SINK_MAX + 1][2] = { {NO_OUT, LS_NONE1}, {NO_OUT, LS_NONE2}, {NO_OUT, LS_NONE1}, {NO_OUT, LS_NONE2}, {NO_OUT, LS_NONE1}, {NO_OUT, LS_NONE2}, {NO_OUT, LS_NONE1}, {NO_OUT, LS_NONE2}, {INTER0_OUT, LS_SRC_INTERPOLATOR}, {INTER1_OUT, LS_SRC_INTERPOLATOR}, {INTER2_OUT, LS_SRC_INTERPOLATOR}, {INTER3_OUT, LS_SRC_INTERPOLATOR}, {INTER4_OUT, LS_SRC_INTERPOLATOR}, {INTERM0_OUT, LS_SRC_INTERPOLATORM}, {INTERM1_OUT, LS_SRC_INTERPOLATORM}, {INTERM2_OUT, LS_SRC_INTERPOLATORM}, {INTERM3_OUT, LS_SRC_INTERPOLATORM}, {INTERM4_OUT, LS_SRC_INTERPOLATORM}, {INTERM5_OUT, LS_SRC_INTERPOLATORM}, {INTERM6_OUT, LS_SRC_INTERPOLATORM}, {DECIMM0_OUT, LS_SRC_DECIMATORM}, {DECIMM1_OUT, LS_SRC_DECIMATORM}, {DECIMM2_OUT, LS_SRC_DECIMATORM}, {DECIMM3_OUT, LS_SRC_DECIMATORM}, {DECIM0_OUT, LS_SRC_DECIMATOR}, {SR3_4_OUT, LS_NONE1}, {NO_OUT, LS_NONE2}, {NO_OUT, LS_NONE1}, {NO_OUT, LS_NONE2}, {NO_OUT, LS_NONE1}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {DIGITAL_MIXER_OUT0, LS_MIXER_IN}, {GAINFUNC0_OUT, LS_MIXER_GAIN_FUNCTION}, {GAINFUNC1_OUT, LS_MIXER_GAIN_FUNCTION}, {GAINFUNC2_OUT, LS_MIXER_GAIN_FUNCTION}, {GAINFUNC3_OUT, LS_MIXER_GAIN_FUNCTION}, {GAINFUNC4_OUT, LS_MIXER_GAIN_FUNCTION}, {SOFTMODEMTX, LS_NONE1}, {SPLITTER0_OUTL, LS_SRC_SPLITTER}, {SPLITTER1_OUTL, LS_SRC_SPLITTER}, {SPLITTER2_OUTL, LS_SRC_SPLITTER}, {SPLITTER3_OUTL, LS_SRC_SPLITTER}, {MERGER0_OUT, LS_SRC_MERGER}, {MERGER0_OUT, LS_SRC_MERGER}, {MERGER1_OUT, LS_SRC_MERGER}, {MERGER1_OUT, LS_SRC_MERGER}, {MERGER2_OUT, LS_SRC_MERGER}, {MERGER2_OUT, LS_SRC_MERGER}, {MERGER3_OUT, LS_SRC_MERGER}, {MERGER3_OUT, LS_SRC_MERGER}, {NO_OUT, LS_NONE2}, }; static unsigned char lbus_play_opl3[] = { DIGITAL_MIXER_IN0 + FM_MIXER, 0xff }; static unsigned char lbus_play_modem[] = { DIGITAL_MIXER_IN0 + MODEM_MIXER, 0xff }; static unsigned char lbus_play_i2s[] = { INTER0_IN + I2S_INTDEC, DIGITAL_MIXER_IN0 + I2S_MIXER, 0xff }; static unsigned char lbus_play_out[] = { PDAC2ACLNK, 0xff }; static unsigned char lbus_play_outhp[] = { HNDSPK2ACLNK, 0xff }; static unsigned char lbus_play_noconv1[] = { DIGITAL_MIXER_IN0, 0xff }; static unsigned char lbus_play_stereo1[] = { INTER0_IN, DIGITAL_MIXER_IN0, 0xff }; static unsigned char lbus_play_mono1[] = { INTERM0_IN, DIGITAL_MIXER_IN0, 0xff }; static unsigned char lbus_play_noconv2[] = { DIGITAL_MIXER_IN1, 0xff }; static unsigned char lbus_play_stereo2[] = { INTER1_IN, DIGITAL_MIXER_IN1, 0xff }; static unsigned char lbus_play_mono2[] = { INTERM1_IN, DIGITAL_MIXER_IN1, 0xff }; static unsigned char lbus_play_noconv3[] = { DIGITAL_MIXER_IN2, 0xff }; static unsigned char lbus_play_stereo3[] = { INTER2_IN, DIGITAL_MIXER_IN2, 0xff }; static unsigned char lbus_play_mono3[] = { INTERM2_IN, DIGITAL_MIXER_IN2, 0xff }; static unsigned char lbus_rec_noconv1[] = { LBUS2ARM_FIFO5, 0xff }; static unsigned char lbus_rec_stereo1[] = { DECIM0_IN, LBUS2ARM_FIFO5, 0xff }; static unsigned char lbus_rec_mono1[] = { DECIMM3_IN, LBUS2ARM_FIFO5, 0xff }; static unsigned char play_ids[] = { 4, 1, 2, }; static unsigned char play_sources[] = { ARM2LBUS_FIFO4, ARM2LBUS_FIFO1, ARM2LBUS_FIFO2, }; static struct lbuspath lbus_play_paths[] = { { .noconv = lbus_play_noconv1, .stereo = lbus_play_stereo1, .mono = lbus_play_mono1, }, { .noconv = lbus_play_noconv2, .stereo = lbus_play_stereo2, .mono = lbus_play_mono2, }, { .noconv = lbus_play_noconv3, .stereo = lbus_play_stereo3, .mono = lbus_play_mono3, }, }; static struct lbuspath lbus_rec_path = { .noconv = lbus_rec_noconv1, .stereo = lbus_rec_stereo1, .mono = lbus_rec_mono1, }; #define FIRMWARE_VERSIONS 1 static union firmware_version firmware_versions[] = { { .firmware = { .ASIC = 3, .CODEC = 2, .AUXDSP = 3, .PROG = 773, }, }, }; static u32 atoh(const unsigned char *in, unsigned int len) { u32 sum = 0; unsigned int mult = 1; unsigned char c; while (len) { int value; c = in[len - 1]; value = hex_to_bin(c); if (value >= 0) sum += mult * value; mult *= 16; --len; } return sum; } static int senddata(struct cmdif *cif, const unsigned char *in, u32 offset) { u32 addr; u32 data; u32 i; const unsigned char *p; i = atoh(&in[1], 2); addr = offset + atoh(&in[3], 4); if (SEND_SMEM(cif, 0, addr) != 0) return -EACCES; p = in + 9; while (i) { data = atoh(p, 8); if (SEND_WMEM(cif, 2, ((data & 0x0f0f0f0f) << 4) | ((data & 0xf0f0f0f0) >> 4))) return -EACCES; i -= 4; p += 8; } return 0; } static int loadfirmware(struct cmdif *cif, const unsigned char *img, unsigned int size) { const unsigned char *in; u32 laddr, saddr, t, val; int err = 0; laddr = saddr = 0; while (size > 0 && err == 0) { in = img; if (in[0] == ':') { t = atoh(&in[7], 2); switch (t) { case DATA_REC: err = senddata(cif, in, laddr + saddr); break; case EXT_SEG_ADDR_REC: saddr = atoh(&in[9], 4) << 4; break; case EXT_LIN_ADDR_REC: laddr = atoh(&in[9], 4) << 16; break; case EXT_GOTO_CMD_REC: val = atoh(&in[9], 8); if (SEND_GOTO(cif, val) != 0) err = -EACCES; break; case EXT_END_OF_FILE: size = 0; break; default: break; } while (size > 0) { size--; if (*img++ == '\n') break; } } } snd_printdd("load firmware return %d\n", err); return err; } static void alloclbuspath(struct cmdif *cif, unsigned char source, unsigned char *path, unsigned char *mixer, unsigned char *s) { while (*path != 0xff) { unsigned char sink, type; sink = *path & (~SPLIT_PATH); if (sink != E2SINK_MAX) { snd_printdd("alloc path 0x%x->0x%x\n", source, sink); SEND_PSEL(cif, source, sink); source = lbusin2out[sink][0]; type = lbusin2out[sink][1]; if (type == LS_MIXER_IN) { if (mixer) *mixer = sink - DIGITAL_MIXER_IN0; } if (type == LS_SRC_DECIMATORM || type == LS_SRC_DECIMATOR || type == LS_SRC_INTERPOLATORM || type == LS_SRC_INTERPOLATOR) { if (s) { if (s[0] != 0xff) s[1] = sink; else s[0] = sink; } } } if (*path++ & SPLIT_PATH) { unsigned char *npath = path; while (*npath != 0xff) npath++; alloclbuspath(cif, source + 1, ++npath, mixer, s); } } } static void freelbuspath(struct cmdif *cif, unsigned char source, unsigned char *path) { while (*path != 0xff) { unsigned char sink; sink = *path & (~SPLIT_PATH); if (sink != E2SINK_MAX) { snd_printdd("free path 0x%x->0x%x\n", source, sink); SEND_PCLR(cif, source, sink); source = lbusin2out[sink][0]; } if (*path++ & SPLIT_PATH) { unsigned char *npath = path; while (*npath != 0xff) npath++; freelbuspath(cif, source + 1, ++npath); } } } static int writearm(struct cmdif *cif, u32 addr, u32 data, u32 mask) { union cmdret rptr = CMDRET_ZERO; unsigned int i = MAX_WRITE_RETRY; int flag = 1; SEND_RMEM(cif, 0x02, addr, &rptr); rptr.retlongs[0] &= (~mask); while (--i) { SEND_SMEM(cif, 0x01, addr); SEND_WMEM(cif, 0x02, (rptr.retlongs[0] | data)); SEND_RMEM(cif, 0x02, addr, &rptr); if ((rptr.retlongs[0] & data) == data) { flag = 0; break; } else rptr.retlongs[0] &= ~mask; } snd_printdd("send arm 0x%x 0x%x 0x%x return %d\n", addr, data, mask, flag); return flag; } static int sendcmd(struct cmdif *cif, u32 flags, u32 cmd, u32 parm, union cmdret *ret) { int i, j; int err; unsigned int time = 0; unsigned long irqflags; struct riptideport *hwport; struct cmdport *cmdport = NULL; if (snd_BUG_ON(!cif)) return -EINVAL; hwport = cif->hwport; if (cif->errcnt > MAX_ERROR_COUNT) { if (cif->is_reset) { snd_printk(KERN_ERR "Riptide: Too many failed cmds, reinitializing\n"); if (riptide_reset(cif, NULL) == 0) { cif->errcnt = 0; return -EIO; } } snd_printk(KERN_ERR "Riptide: Initialization failed.\n"); return -EINVAL; } if (ret) { ret->retlongs[0] = 0; ret->retlongs[1] = 0; } i = 0; spin_lock_irqsave(&cif->lock, irqflags); while (i++ < CMDIF_TIMEOUT && !IS_READY(cif->hwport)) udelay(10); if (i > CMDIF_TIMEOUT) { err = -EBUSY; goto errout; } err = 0; for (j = 0, time = 0; time < CMDIF_TIMEOUT; j++, time += 2) { cmdport = &(hwport->port[j % 2]); if (IS_DATF(cmdport)) { /* free pending data */ READ_PORT_ULONG(cmdport->data1); READ_PORT_ULONG(cmdport->data2); } if (IS_CMDE(cmdport)) { if (flags & PARM) /* put data */ WRITE_PORT_ULONG(cmdport->data2, parm); WRITE_PORT_ULONG(cmdport->data1, cmd); /* write cmd */ if ((flags & RESP) && ret) { while (!IS_DATF(cmdport) && time < CMDIF_TIMEOUT) { udelay(10); time++; } if (time < CMDIF_TIMEOUT) { /* read response */ ret->retlongs[0] = READ_PORT_ULONG(cmdport->data1); ret->retlongs[1] = READ_PORT_ULONG(cmdport->data2); } else { err = -ENOSYS; goto errout; } } break; } udelay(20); } if (time == CMDIF_TIMEOUT) { err = -ENODATA; goto errout; } spin_unlock_irqrestore(&cif->lock, irqflags); cif->cmdcnt++; /* update command statistics */ cif->cmdtime += time; if (time > cif->cmdtimemax) cif->cmdtimemax = time; if (time < cif->cmdtimemin) cif->cmdtimemin = time; if ((cif->cmdcnt) % 1000 == 0) snd_printdd ("send cmd %d time: %d mintime: %d maxtime %d err: %d\n", cif->cmdcnt, cif->cmdtime, cif->cmdtimemin, cif->cmdtimemax, cif->errcnt); return 0; errout: cif->errcnt++; spin_unlock_irqrestore(&cif->lock, irqflags); snd_printdd ("send cmd %d hw: 0x%x flag: 0x%x cmd: 0x%x parm: 0x%x ret: 0x%x 0x%x CMDE: %d DATF: %d failed %d\n", cif->cmdcnt, (int)((void *)&(cmdport->stat) - (void *)hwport), flags, cmd, parm, ret ? ret->retlongs[0] : 0, ret ? ret->retlongs[1] : 0, IS_CMDE(cmdport), IS_DATF(cmdport), err); return err; } static int setmixer(struct cmdif *cif, short num, unsigned short rval, unsigned short lval) { union cmdret rptr = CMDRET_ZERO; int i = 0; snd_printdd("sent mixer %d: 0x%d 0x%d\n", num, rval, lval); do { SEND_SDGV(cif, num, num, rval, lval); SEND_RDGV(cif, num, num, &rptr); if (rptr.retwords[0] == lval && rptr.retwords[1] == rval) return 0; } while (i++ < MAX_WRITE_RETRY); snd_printdd("sent mixer failed\n"); return -EIO; } static int getpaths(struct cmdif *cif, unsigned char *o) { unsigned char src[E2SINK_MAX]; unsigned char sink[E2SINK_MAX]; int i, j = 0; for (i = 0; i < E2SINK_MAX; i++) { getsourcesink(cif, i, i, &src[i], &sink[i]); if (sink[i] < E2SINK_MAX) { o[j++] = sink[i]; o[j++] = i; } } return j; } static int getsourcesink(struct cmdif *cif, unsigned char source, unsigned char sink, unsigned char *a, unsigned char *b) { union cmdret rptr = CMDRET_ZERO; if (SEND_RSSV(cif, source, sink, &rptr) && SEND_RSSV(cif, source, sink, &rptr)) return -EIO; *a = rptr.retbytes[0]; *b = rptr.retbytes[1]; snd_printdd("getsourcesink 0x%x 0x%x\n", *a, *b); return 0; } static int getsamplerate(struct cmdif *cif, unsigned char *intdec, unsigned int *rate) { unsigned char *s; unsigned int p[2] = { 0, 0 }; int i; union cmdret rptr = CMDRET_ZERO; s = intdec; for (i = 0; i < 2; i++) { if (*s != 0xff) { if (SEND_RSRC(cif, *s, &rptr) && SEND_RSRC(cif, *s, &rptr)) return -EIO; p[i] += rptr.retwords[1]; p[i] *= rptr.retwords[2]; p[i] += rptr.retwords[3]; p[i] /= 65536; } s++; } if (p[0]) { if (p[1] != p[0]) snd_printdd("rates differ %d %d\n", p[0], p[1]); *rate = (unsigned int)p[0]; } else *rate = (unsigned int)p[1]; snd_printdd("getsampleformat %d %d %d\n", intdec[0], intdec[1], *rate); return 0; } static int setsampleformat(struct cmdif *cif, unsigned char mixer, unsigned char id, unsigned char channels, unsigned char format) { unsigned char w, ch, sig, order; snd_printdd ("setsampleformat mixer: %d id: %d channels: %d format: %d\n", mixer, id, channels, format); ch = channels == 1; w = snd_pcm_format_width(format) == 8; sig = snd_pcm_format_unsigned(format) != 0; order = snd_pcm_format_big_endian(format) != 0; if (SEND_SETF(cif, mixer, w, ch, order, sig, id) && SEND_SETF(cif, mixer, w, ch, order, sig, id)) { snd_printdd("setsampleformat failed\n"); return -EIO; } return 0; } static int setsamplerate(struct cmdif *cif, unsigned char *intdec, unsigned int rate) { u32 D, M, N; union cmdret rptr = CMDRET_ZERO; int i; snd_printdd("setsamplerate intdec: %d,%d rate: %d\n", intdec[0], intdec[1], rate); D = 48000; M = ((rate == 48000) ? 47999 : rate) * 65536; N = M % D; M /= D; for (i = 0; i < 2; i++) { if (*intdec != 0xff) { do { SEND_SSRC(cif, *intdec, D, M, N); SEND_RSRC(cif, *intdec, &rptr); } while (rptr.retwords[1] != D && rptr.retwords[2] != M && rptr.retwords[3] != N && i++ < MAX_WRITE_RETRY); if (i > MAX_WRITE_RETRY) { snd_printdd("sent samplerate %d: %d failed\n", *intdec, rate); return -EIO; } } intdec++; } return 0; } static int getmixer(struct cmdif *cif, short num, unsigned short *rval, unsigned short *lval) { union cmdret rptr = CMDRET_ZERO; if (SEND_RDGV(cif, num, num, &rptr) && SEND_RDGV(cif, num, num, &rptr)) return -EIO; *rval = rptr.retwords[0]; *lval = rptr.retwords[1]; snd_printdd("got mixer %d: 0x%d 0x%d\n", num, *rval, *lval); return 0; } static void riptide_handleirq(unsigned long dev_id) { struct snd_riptide *chip = (void *)dev_id; struct cmdif *cif = chip->cif; struct snd_pcm_substream *substream[PLAYBACK_SUBSTREAMS + 1]; struct snd_pcm_runtime *runtime; struct pcmhw *data = NULL; unsigned int pos, period_bytes; struct sgd *c; int i, j; unsigned int flag; if (!cif) return; for (i = 0; i < PLAYBACK_SUBSTREAMS; i++) substream[i] = chip->playback_substream[i]; substream[i] = chip->capture_substream; for (i = 0; i < PLAYBACK_SUBSTREAMS + 1; i++) { if (substream[i] && (runtime = substream[i]->runtime) && (data = runtime->private_data) && data->state != ST_STOP) { pos = 0; for (j = 0; j < data->pages; j++) { c = &data->sgdbuf[j]; flag = le32_to_cpu(c->dwStat_Ctl); if (flag & EOB_STATUS) pos += le32_to_cpu(c->dwSegLen); if (flag & EOC_STATUS) pos += le32_to_cpu(c->dwSegLen); if ((flag & EOS_STATUS) && (data->state == ST_PLAY)) { data->state = ST_STOP; snd_printk(KERN_ERR "Riptide: DMA stopped unexpectedly\n"); } c->dwStat_Ctl = cpu_to_le32(flag & ~(EOS_STATUS | EOB_STATUS | EOC_STATUS)); } data->pointer += pos; pos += data->oldpos; if (data->state != ST_STOP) { period_bytes = frames_to_bytes(runtime, runtime->period_size); snd_printdd ("interrupt 0x%x after 0x%lx of 0x%lx frames in period\n", READ_AUDIO_STATUS(cif->hwport), bytes_to_frames(runtime, pos), runtime->period_size); j = 0; if (pos >= period_bytes) { j++; while (pos >= period_bytes) pos -= period_bytes; } data->oldpos = pos; if (j > 0) snd_pcm_period_elapsed(substream[i]); } } } } #ifdef CONFIG_PM static int riptide_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct snd_riptide *chip = card->private_data; chip->in_suspend = 1; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); snd_ac97_suspend(chip->ac97); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } static int riptide_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct snd_riptide *chip = card->private_data; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "riptide: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); snd_riptide_initialize(chip); snd_ac97_resume(chip->ac97); snd_power_change_state(card, SNDRV_CTL_POWER_D0); chip->in_suspend = 0; return 0; } #endif static int try_to_load_firmware(struct cmdif *cif, struct snd_riptide *chip) { union firmware_version firmware = { .ret = CMDRET_ZERO }; int i, timeout, err; for (i = 0; i < 2; i++) { WRITE_PORT_ULONG(cif->hwport->port[i].data1, 0); WRITE_PORT_ULONG(cif->hwport->port[i].data2, 0); } SET_GRESET(cif->hwport); udelay(100); UNSET_GRESET(cif->hwport); udelay(100); for (timeout = 100000; --timeout; udelay(10)) { if (IS_READY(cif->hwport) && !IS_GERR(cif->hwport)) break; } if (!timeout) { snd_printk(KERN_ERR "Riptide: device not ready, audio status: 0x%x " "ready: %d gerr: %d\n", READ_AUDIO_STATUS(cif->hwport), IS_READY(cif->hwport), IS_GERR(cif->hwport)); return -EIO; } else { snd_printdd ("Riptide: audio status: 0x%x ready: %d gerr: %d\n", READ_AUDIO_STATUS(cif->hwport), IS_READY(cif->hwport), IS_GERR(cif->hwport)); } SEND_GETV(cif, &firmware.ret); snd_printdd("Firmware version: ASIC: %d CODEC %d AUXDSP %d PROG %d\n", firmware.firmware.ASIC, firmware.firmware.CODEC, firmware.firmware.AUXDSP, firmware.firmware.PROG); if (!chip) return 1; for (i = 0; i < FIRMWARE_VERSIONS; i++) { if (!memcmp(&firmware_versions[i], &firmware, sizeof(firmware))) return 1; /* OK */ } snd_printdd("Writing Firmware\n"); if (!chip->fw_entry) { err = request_firmware(&chip->fw_entry, "riptide.hex", &chip->pci->dev); if (err) { snd_printk(KERN_ERR "Riptide: Firmware not available %d\n", err); return -EIO; } } err = loadfirmware(cif, chip->fw_entry->data, chip->fw_entry->size); if (err) { snd_printk(KERN_ERR "Riptide: Could not load firmware %d\n", err); return err; } chip->firmware = firmware; return 1; /* OK */ } static int riptide_reset(struct cmdif *cif, struct snd_riptide *chip) { union cmdret rptr = CMDRET_ZERO; int err, tries; if (!cif) return -EINVAL; cif->cmdcnt = 0; cif->cmdtime = 0; cif->cmdtimemax = 0; cif->cmdtimemin = 0xffffffff; cif->errcnt = 0; cif->is_reset = 0; tries = RESET_TRIES; do { err = try_to_load_firmware(cif, chip); if (err < 0) return err; } while (!err && --tries); SEND_SACR(cif, 0, AC97_RESET); SEND_RACR(cif, AC97_RESET, &rptr); snd_printdd("AC97: 0x%x 0x%x\n", rptr.retlongs[0], rptr.retlongs[1]); SEND_PLST(cif, 0); SEND_SLST(cif, 0); SEND_DLST(cif, 0); SEND_ALST(cif, 0); SEND_KDMA(cif); writearm(cif, 0x301F8, 1, 1); writearm(cif, 0x301F4, 1, 1); SEND_LSEL(cif, MODEM_CMD, 0, 0, MODEM_INTDEC, MODEM_MERGER, MODEM_SPLITTER, MODEM_MIXER); setmixer(cif, MODEM_MIXER, 0x7fff, 0x7fff); alloclbuspath(cif, ARM2LBUS_FIFO13, lbus_play_modem, NULL, NULL); SEND_LSEL(cif, FM_CMD, 0, 0, FM_INTDEC, FM_MERGER, FM_SPLITTER, FM_MIXER); setmixer(cif, FM_MIXER, 0x7fff, 0x7fff); writearm(cif, 0x30648 + FM_MIXER * 4, 0x01, 0x00000005); writearm(cif, 0x301A8, 0x02, 0x00000002); writearm(cif, 0x30264, 0x08, 0xffffffff); alloclbuspath(cif, OPL3_SAMPLE, lbus_play_opl3, NULL, NULL); SEND_SSRC(cif, I2S_INTDEC, 48000, ((u32) I2S_RATE * 65536) / 48000, ((u32) I2S_RATE * 65536) % 48000); SEND_LSEL(cif, I2S_CMD0, 0, 0, I2S_INTDEC, I2S_MERGER, I2S_SPLITTER, I2S_MIXER); SEND_SI2S(cif, 1); alloclbuspath(cif, ARM2LBUS_FIFO0, lbus_play_i2s, NULL, NULL); alloclbuspath(cif, DIGITAL_MIXER_OUT0, lbus_play_out, NULL, NULL); alloclbuspath(cif, DIGITAL_MIXER_OUT0, lbus_play_outhp, NULL, NULL); SET_AIACK(cif->hwport); SET_AIE(cif->hwport); SET_AIACK(cif->hwport); cif->is_reset = 1; return 0; } static struct snd_pcm_hardware snd_riptide_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U16_LE, .rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_48000, .rate_min = 5500, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (64 * 1024), .period_bytes_min = PAGE_SIZE >> 1, .period_bytes_max = PAGE_SIZE << 8, .periods_min = 2, .periods_max = 64, .fifo_size = 0, }; static struct snd_pcm_hardware snd_riptide_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U16_LE, .rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_48000, .rate_min = 5500, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (64 * 1024), .period_bytes_min = PAGE_SIZE >> 1, .period_bytes_max = PAGE_SIZE << 3, .periods_min = 2, .periods_max = 64, .fifo_size = 0, }; static snd_pcm_uframes_t snd_riptide_pointer(struct snd_pcm_substream *substream) { struct snd_riptide *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct pcmhw *data = get_pcmhwdev(substream); struct cmdif *cif = chip->cif; union cmdret rptr = CMDRET_ZERO; snd_pcm_uframes_t ret; SEND_GPOS(cif, 0, data->id, &rptr); if (data->size && runtime->period_size) { snd_printdd ("pointer stream %d position 0x%x(0x%x in buffer) bytes 0x%lx(0x%lx in period) frames\n", data->id, rptr.retlongs[1], rptr.retlongs[1] % data->size, bytes_to_frames(runtime, rptr.retlongs[1]), bytes_to_frames(runtime, rptr.retlongs[1]) % runtime->period_size); if (rptr.retlongs[1] > data->pointer) ret = bytes_to_frames(runtime, rptr.retlongs[1] % data->size); else ret = bytes_to_frames(runtime, data->pointer % data->size); } else { snd_printdd("stream not started or strange parms (%d %ld)\n", data->size, runtime->period_size); ret = bytes_to_frames(runtime, 0); } return ret; } static int snd_riptide_trigger(struct snd_pcm_substream *substream, int cmd) { int i, j; struct snd_riptide *chip = snd_pcm_substream_chip(substream); struct pcmhw *data = get_pcmhwdev(substream); struct cmdif *cif = chip->cif; union cmdret rptr = CMDRET_ZERO; spin_lock(&chip->lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: if (!(data->state & ST_PLAY)) { SEND_SSTR(cif, data->id, data->sgdlist.addr); SET_AIE(cif->hwport); data->state = ST_PLAY; if (data->mixer != 0xff) setmixer(cif, data->mixer, 0x7fff, 0x7fff); chip->openstreams++; data->oldpos = 0; data->pointer = 0; } break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: if (data->mixer != 0xff) setmixer(cif, data->mixer, 0, 0); setmixer(cif, data->mixer, 0, 0); SEND_KSTR(cif, data->id); data->state = ST_STOP; chip->openstreams--; j = 0; do { i = rptr.retlongs[1]; SEND_GPOS(cif, 0, data->id, &rptr); udelay(1); } while (i != rptr.retlongs[1] && j++ < MAX_WRITE_RETRY); if (j > MAX_WRITE_RETRY) snd_printk(KERN_ERR "Riptide: Could not stop stream!"); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (!(data->state & ST_PAUSE)) { SEND_PSTR(cif, data->id); data->state |= ST_PAUSE; chip->openstreams--; } break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (data->state & ST_PAUSE) { SEND_SSTR(cif, data->id, data->sgdlist.addr); data->state &= ~ST_PAUSE; chip->openstreams++; } break; default: spin_unlock(&chip->lock); return -EINVAL; } spin_unlock(&chip->lock); return 0; } static int snd_riptide_prepare(struct snd_pcm_substream *substream) { struct snd_riptide *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct pcmhw *data = get_pcmhwdev(substream); struct cmdif *cif = chip->cif; unsigned char *lbuspath = NULL; unsigned int rate, channels; int err = 0; snd_pcm_format_t format; if (snd_BUG_ON(!cif || !data)) return -EINVAL; snd_printdd("prepare id %d ch: %d f:0x%x r:%d\n", data->id, runtime->channels, runtime->format, runtime->rate); spin_lock_irq(&chip->lock); channels = runtime->channels; format = runtime->format; rate = runtime->rate; switch (channels) { case 1: if (rate == 48000 && format == SNDRV_PCM_FORMAT_S16_LE) lbuspath = data->paths.noconv; else lbuspath = data->paths.mono; break; case 2: if (rate == 48000 && format == SNDRV_PCM_FORMAT_S16_LE) lbuspath = data->paths.noconv; else lbuspath = data->paths.stereo; break; } snd_printdd("use sgdlist at 0x%p\n", data->sgdlist.area); if (data->sgdlist.area) { unsigned int i, j, size, pages, f, pt, period; struct sgd *c, *p = NULL; size = frames_to_bytes(runtime, runtime->buffer_size); period = frames_to_bytes(runtime, runtime->period_size); f = PAGE_SIZE; while ((size + (f >> 1) - 1) <= (f << 7) && (f << 1) > period) f = f >> 1; pages = (size + f - 1) / f; data->size = size; data->pages = pages; snd_printdd ("create sgd size: 0x%x pages %d of size 0x%x for period 0x%x\n", size, pages, f, period); pt = 0; j = 0; for (i = 0; i < pages; i++) { unsigned int ofs, addr; c = &data->sgdbuf[i]; if (p) p->dwNextLink = cpu_to_le32(data->sgdlist.addr + (i * sizeof(struct sgd))); c->dwNextLink = cpu_to_le32(data->sgdlist.addr); ofs = j << PAGE_SHIFT; addr = snd_pcm_sgbuf_get_addr(substream, ofs) + pt; c->dwSegPtrPhys = cpu_to_le32(addr); pt = (pt + f) % PAGE_SIZE; if (pt == 0) j++; c->dwSegLen = cpu_to_le32(f); c->dwStat_Ctl = cpu_to_le32(IEOB_ENABLE | IEOS_ENABLE | IEOC_ENABLE); p = c; size -= f; } data->sgdbuf[i].dwSegLen = cpu_to_le32(size); } if (lbuspath && lbuspath != data->lbuspath) { if (data->lbuspath) freelbuspath(cif, data->source, data->lbuspath); alloclbuspath(cif, data->source, lbuspath, &data->mixer, data->intdec); data->lbuspath = lbuspath; data->rate = 0; } if (data->rate != rate || data->format != format || data->channels != channels) { data->rate = rate; data->format = format; data->channels = channels; if (setsampleformat (cif, data->mixer, data->id, channels, format) || setsamplerate(cif, data->intdec, rate)) err = -EIO; } spin_unlock_irq(&chip->lock); return err; } static int snd_riptide_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_riptide *chip = snd_pcm_substream_chip(substream); struct pcmhw *data = get_pcmhwdev(substream); struct snd_dma_buffer *sgdlist = &data->sgdlist; int err; snd_printdd("hw params id %d (sgdlist: 0x%p 0x%lx %d)\n", data->id, sgdlist->area, (unsigned long)sgdlist->addr, (int)sgdlist->bytes); if (sgdlist->area) snd_dma_free_pages(sgdlist); if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), sizeof(struct sgd) * (DESC_MAX_MASK + 1), sgdlist)) < 0) { snd_printk(KERN_ERR "Riptide: failed to alloc %d dma bytes\n", (int)sizeof(struct sgd) * (DESC_MAX_MASK + 1)); return err; } data->sgdbuf = (struct sgd *)sgdlist->area; return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_riptide_hw_free(struct snd_pcm_substream *substream) { struct snd_riptide *chip = snd_pcm_substream_chip(substream); struct pcmhw *data = get_pcmhwdev(substream); struct cmdif *cif = chip->cif; if (cif && data) { if (data->lbuspath) freelbuspath(cif, data->source, data->lbuspath); data->lbuspath = NULL; data->source = 0xff; data->intdec[0] = 0xff; data->intdec[1] = 0xff; if (data->sgdlist.area) { snd_dma_free_pages(&data->sgdlist); data->sgdlist.area = NULL; } } return snd_pcm_lib_free_pages(substream); } static int snd_riptide_playback_open(struct snd_pcm_substream *substream) { struct snd_riptide *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct pcmhw *data; int sub_num = substream->number; chip->playback_substream[sub_num] = substream; runtime->hw = snd_riptide_playback; data = kzalloc(sizeof(struct pcmhw), GFP_KERNEL); if (data == NULL) return -ENOMEM; data->paths = lbus_play_paths[sub_num]; data->id = play_ids[sub_num]; data->source = play_sources[sub_num]; data->intdec[0] = 0xff; data->intdec[1] = 0xff; data->state = ST_STOP; runtime->private_data = data; return snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); } static int snd_riptide_capture_open(struct snd_pcm_substream *substream) { struct snd_riptide *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct pcmhw *data; chip->capture_substream = substream; runtime->hw = snd_riptide_capture; data = kzalloc(sizeof(struct pcmhw), GFP_KERNEL); if (data == NULL) return -ENOMEM; data->paths = lbus_rec_path; data->id = PADC; data->source = ACLNK2PADC; data->intdec[0] = 0xff; data->intdec[1] = 0xff; data->state = ST_STOP; runtime->private_data = data; return snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); } static int snd_riptide_playback_close(struct snd_pcm_substream *substream) { struct snd_riptide *chip = snd_pcm_substream_chip(substream); struct pcmhw *data = get_pcmhwdev(substream); int sub_num = substream->number; substream->runtime->private_data = NULL; chip->playback_substream[sub_num] = NULL; kfree(data); return 0; } static int snd_riptide_capture_close(struct snd_pcm_substream *substream) { struct snd_riptide *chip = snd_pcm_substream_chip(substream); struct pcmhw *data = get_pcmhwdev(substream); substream->runtime->private_data = NULL; chip->capture_substream = NULL; kfree(data); return 0; } static struct snd_pcm_ops snd_riptide_playback_ops = { .open = snd_riptide_playback_open, .close = snd_riptide_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_riptide_hw_params, .hw_free = snd_riptide_hw_free, .prepare = snd_riptide_prepare, .page = snd_pcm_sgbuf_ops_page, .trigger = snd_riptide_trigger, .pointer = snd_riptide_pointer, }; static struct snd_pcm_ops snd_riptide_capture_ops = { .open = snd_riptide_capture_open, .close = snd_riptide_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_riptide_hw_params, .hw_free = snd_riptide_hw_free, .prepare = snd_riptide_prepare, .page = snd_pcm_sgbuf_ops_page, .trigger = snd_riptide_trigger, .pointer = snd_riptide_pointer, }; static int __devinit snd_riptide_pcm(struct snd_riptide *chip, int device, struct snd_pcm **rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "RIPTIDE", device, PLAYBACK_SUBSTREAMS, 1, &pcm)) < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_riptide_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_riptide_capture_ops); pcm->private_data = chip; pcm->info_flags = 0; strcpy(pcm->name, "RIPTIDE"); chip->pcm = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(chip->pci), 64 * 1024, 128 * 1024); if (rpcm) *rpcm = pcm; return 0; } static irqreturn_t snd_riptide_interrupt(int irq, void *dev_id) { struct snd_riptide *chip = dev_id; struct cmdif *cif = chip->cif; if (cif) { chip->received_irqs++; if (IS_EOBIRQ(cif->hwport) || IS_EOSIRQ(cif->hwport) || IS_EOCIRQ(cif->hwport)) { chip->handled_irqs++; tasklet_schedule(&chip->riptide_tq); } if (chip->rmidi && IS_MPUIRQ(cif->hwport)) { chip->handled_irqs++; snd_mpu401_uart_interrupt(irq, chip->rmidi->private_data); } SET_AIACK(cif->hwport); } return IRQ_HANDLED; } static void snd_riptide_codec_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct snd_riptide *chip = ac97->private_data; struct cmdif *cif = chip->cif; union cmdret rptr = CMDRET_ZERO; int i = 0; if (snd_BUG_ON(!cif)) return; snd_printdd("Write AC97 reg 0x%x 0x%x\n", reg, val); do { SEND_SACR(cif, val, reg); SEND_RACR(cif, reg, &rptr); } while (rptr.retwords[1] != val && i++ < MAX_WRITE_RETRY); if (i > MAX_WRITE_RETRY) snd_printdd("Write AC97 reg failed\n"); } static unsigned short snd_riptide_codec_read(struct snd_ac97 *ac97, unsigned short reg) { struct snd_riptide *chip = ac97->private_data; struct cmdif *cif = chip->cif; union cmdret rptr = CMDRET_ZERO; if (snd_BUG_ON(!cif)) return 0; if (SEND_RACR(cif, reg, &rptr) != 0) SEND_RACR(cif, reg, &rptr); snd_printdd("Read AC97 reg 0x%x got 0x%x\n", reg, rptr.retwords[1]); return rptr.retwords[1]; } static int snd_riptide_initialize(struct snd_riptide *chip) { struct cmdif *cif; unsigned int device_id; int err; if (snd_BUG_ON(!chip)) return -EINVAL; cif = chip->cif; if (!cif) { if ((cif = kzalloc(sizeof(struct cmdif), GFP_KERNEL)) == NULL) return -ENOMEM; cif->hwport = (struct riptideport *)chip->port; spin_lock_init(&cif->lock); chip->cif = cif; } cif->is_reset = 0; if ((err = riptide_reset(cif, chip)) != 0) return err; device_id = chip->device_id; switch (device_id) { case 0x4310: case 0x4320: case 0x4330: snd_printdd("Modem enable?\n"); SEND_SETDPLL(cif); break; } snd_printdd("Enabling MPU IRQs\n"); if (chip->rmidi) SET_EMPUIRQ(cif->hwport); return err; } static int snd_riptide_free(struct snd_riptide *chip) { struct cmdif *cif; if (!chip) return 0; if ((cif = chip->cif)) { SET_GRESET(cif->hwport); udelay(100); UNSET_GRESET(cif->hwport); kfree(chip->cif); } if (chip->irq >= 0) free_irq(chip->irq, chip); if (chip->fw_entry) release_firmware(chip->fw_entry); release_and_free_resource(chip->res_port); kfree(chip); return 0; } static int snd_riptide_dev_free(struct snd_device *device) { struct snd_riptide *chip = device->device_data; return snd_riptide_free(chip); } static int __devinit snd_riptide_create(struct snd_card *card, struct pci_dev *pci, struct snd_riptide **rchip) { struct snd_riptide *chip; struct riptideport *hwport; int err; static struct snd_device_ops ops = { .dev_free = snd_riptide_dev_free, }; *rchip = NULL; if ((err = pci_enable_device(pci)) < 0) return err; if (!(chip = kzalloc(sizeof(struct snd_riptide), GFP_KERNEL))) return -ENOMEM; spin_lock_init(&chip->lock); chip->card = card; chip->pci = pci; chip->irq = -1; chip->openstreams = 0; chip->port = pci_resource_start(pci, 0); chip->received_irqs = 0; chip->handled_irqs = 0; chip->cif = NULL; tasklet_init(&chip->riptide_tq, riptide_handleirq, (unsigned long)chip); if ((chip->res_port = request_region(chip->port, 64, "RIPTIDE")) == NULL) { snd_printk(KERN_ERR "Riptide: unable to grab region 0x%lx-0x%lx\n", chip->port, chip->port + 64 - 1); snd_riptide_free(chip); return -EBUSY; } hwport = (struct riptideport *)chip->port; UNSET_AIE(hwport); if (request_irq(pci->irq, snd_riptide_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { snd_printk(KERN_ERR "Riptide: unable to grab IRQ %d\n", pci->irq); snd_riptide_free(chip); return -EBUSY; } chip->irq = pci->irq; chip->device_id = pci->device; pci_set_master(pci); if ((err = snd_riptide_initialize(chip)) < 0) { snd_riptide_free(chip); return err; } if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_riptide_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *rchip = chip; return 0; } static void snd_riptide_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_riptide *chip = entry->private_data; struct pcmhw *data; int i; struct cmdif *cif = NULL; unsigned char p[256]; unsigned short rval = 0, lval = 0; unsigned int rate; if (!chip) return; snd_iprintf(buffer, "%s\n\n", chip->card->longname); snd_iprintf(buffer, "Device ID: 0x%x\nReceived IRQs: (%ld)%ld\nPorts:", chip->device_id, chip->handled_irqs, chip->received_irqs); for (i = 0; i < 64; i += 4) snd_iprintf(buffer, "%c%02x: %08x", (i % 16) ? ' ' : '\n', i, inl(chip->port + i)); if ((cif = chip->cif)) { snd_iprintf(buffer, "\nVersion: ASIC: %d CODEC: %d AUXDSP: %d PROG: %d", chip->firmware.firmware.ASIC, chip->firmware.firmware.CODEC, chip->firmware.firmware.AUXDSP, chip->firmware.firmware.PROG); snd_iprintf(buffer, "\nDigital mixer:"); for (i = 0; i < 12; i++) { getmixer(cif, i, &rval, &lval); snd_iprintf(buffer, "\n %d: %d %d", i, rval, lval); } snd_iprintf(buffer, "\nARM Commands num: %d failed: %d time: %d max: %d min: %d", cif->cmdcnt, cif->errcnt, cif->cmdtime, cif->cmdtimemax, cif->cmdtimemin); } snd_iprintf(buffer, "\nOpen streams %d:\n", chip->openstreams); for (i = 0; i < PLAYBACK_SUBSTREAMS; i++) { if (chip->playback_substream[i] && chip->playback_substream[i]->runtime && (data = chip->playback_substream[i]->runtime->private_data)) { snd_iprintf(buffer, "stream: %d mixer: %d source: %d (%d,%d)\n", data->id, data->mixer, data->source, data->intdec[0], data->intdec[1]); if (!(getsamplerate(cif, data->intdec, &rate))) snd_iprintf(buffer, "rate: %d\n", rate); } } if (chip->capture_substream && chip->capture_substream->runtime && (data = chip->capture_substream->runtime->private_data)) { snd_iprintf(buffer, "stream: %d mixer: %d source: %d (%d,%d)\n", data->id, data->mixer, data->source, data->intdec[0], data->intdec[1]); if (!(getsamplerate(cif, data->intdec, &rate))) snd_iprintf(buffer, "rate: %d\n", rate); } snd_iprintf(buffer, "Paths:\n"); i = getpaths(cif, p); while (i >= 2) { i -= 2; snd_iprintf(buffer, "%x->%x ", p[i], p[i + 1]); } snd_iprintf(buffer, "\n"); } static void __devinit snd_riptide_proc_init(struct snd_riptide *chip) { struct snd_info_entry *entry; if (!snd_card_proc_new(chip->card, "riptide", &entry)) snd_info_set_text_ops(entry, chip, snd_riptide_proc_read); } static int __devinit snd_riptide_mixer(struct snd_riptide *chip) { struct snd_ac97_bus *pbus; struct snd_ac97_template ac97; int err = 0; static struct snd_ac97_bus_ops ops = { .write = snd_riptide_codec_write, .read = snd_riptide_codec_read, }; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.scaps = AC97_SCAP_SKIP_MODEM; if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &pbus)) < 0) return err; chip->ac97_bus = pbus; ac97.pci = chip->pci; if ((err = snd_ac97_mixer(pbus, &ac97, &chip->ac97)) < 0) return err; return err; } #ifdef SUPPORT_JOYSTICK static int __devinit snd_riptide_joystick_probe(struct pci_dev *pci, const struct pci_device_id *id) { static int dev; struct gameport *gameport; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } if (!joystick_port[dev++]) return 0; gameport = gameport_allocate_port(); if (!gameport) return -ENOMEM; if (!request_region(joystick_port[dev], 8, "Riptide gameport")) { snd_printk(KERN_WARNING "Riptide: cannot grab gameport 0x%x\n", joystick_port[dev]); gameport_free_port(gameport); return -EBUSY; } gameport->io = joystick_port[dev]; gameport_register_port(gameport); pci_set_drvdata(pci, gameport); return 0; } static void __devexit snd_riptide_joystick_remove(struct pci_dev *pci) { struct gameport *gameport = pci_get_drvdata(pci); if (gameport) { release_region(gameport->io, 8); gameport_unregister_port(gameport); pci_set_drvdata(pci, NULL); } } #endif static int __devinit snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct snd_riptide *chip; unsigned short val; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; err = snd_riptide_create(card, pci, &chip); if (err < 0) goto error; card->private_data = chip; err = snd_riptide_pcm(chip, 0, NULL); if (err < 0) goto error; err = snd_riptide_mixer(chip); if (err < 0) goto error; val = LEGACY_ENABLE_ALL; if (opl3_port[dev]) val |= LEGACY_ENABLE_FM; #ifdef SUPPORT_JOYSTICK if (joystick_port[dev]) val |= LEGACY_ENABLE_GAMEPORT; #endif if (mpu_port[dev]) val |= LEGACY_ENABLE_MPU_INT | LEGACY_ENABLE_MPU; val |= (chip->irq << 4) & 0xf0; pci_write_config_word(chip->pci, PCI_EXT_Legacy_Mask, val); if (mpu_port[dev]) { val = mpu_port[dev]; pci_write_config_word(chip->pci, PCI_EXT_MPU_Base, val); err = snd_mpu401_uart_new(card, 0, MPU401_HW_RIPTIDE, val, MPU401_INFO_IRQ_HOOK, -1, &chip->rmidi); if (err < 0) snd_printk(KERN_WARNING "Riptide: Can't Allocate MPU at 0x%x\n", val); else chip->mpuaddr = val; } if (opl3_port[dev]) { val = opl3_port[dev]; pci_write_config_word(chip->pci, PCI_EXT_FM_Base, val); err = snd_opl3_create(card, val, val + 2, OPL3_HW_RIPTIDE, 0, &chip->opl3); if (err < 0) snd_printk(KERN_WARNING "Riptide: Can't Allocate OPL3 at 0x%x\n", val); else { chip->opladdr = val; err = snd_opl3_hwdep_new(chip->opl3, 0, 1, NULL); if (err < 0) snd_printk(KERN_WARNING "Riptide: Can't Allocate OPL3-HWDEP\n"); } } #ifdef SUPPORT_JOYSTICK if (joystick_port[dev]) { val = joystick_port[dev]; pci_write_config_word(chip->pci, PCI_EXT_Game_Base, val); chip->gameaddr = val; } #endif strcpy(card->driver, "RIPTIDE"); strcpy(card->shortname, "Riptide"); #ifdef SUPPORT_JOYSTICK snprintf(card->longname, sizeof(card->longname), "%s at 0x%lx, irq %i mpu 0x%x opl3 0x%x gameport 0x%x", card->shortname, chip->port, chip->irq, chip->mpuaddr, chip->opladdr, chip->gameaddr); #else snprintf(card->longname, sizeof(card->longname), "%s at 0x%lx, irq %i mpu 0x%x opl3 0x%x", card->shortname, chip->port, chip->irq, chip->mpuaddr, chip->opladdr); #endif snd_riptide_proc_init(chip); err = snd_card_register(card); if (err < 0) goto error; pci_set_drvdata(pci, card); dev++; return 0; error: snd_card_free(card); return err; } static void __devexit snd_card_riptide_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = KBUILD_MODNAME, .id_table = snd_riptide_ids, .probe = snd_card_riptide_probe, .remove = __devexit_p(snd_card_riptide_remove), #ifdef CONFIG_PM .suspend = riptide_suspend, .resume = riptide_resume, #endif }; #ifdef SUPPORT_JOYSTICK static struct pci_driver joystick_driver = { .name = KBUILD_MODNAME "-joystick", .id_table = snd_riptide_joystick_ids, .probe = snd_riptide_joystick_probe, .remove = __devexit_p(snd_riptide_joystick_remove), }; #endif static int __init alsa_card_riptide_init(void) { int err; err = pci_register_driver(&driver); if (err < 0) return err; #if defined(SUPPORT_JOYSTICK) err = pci_register_driver(&joystick_driver); /* On failure unregister formerly registered audio driver */ if (err < 0) pci_unregister_driver(&driver); #endif return err; } static void __exit alsa_card_riptide_exit(void) { pci_unregister_driver(&driver); #if defined(SUPPORT_JOYSTICK) pci_unregister_driver(&joystick_driver); #endif } module_init(alsa_card_riptide_init); module_exit(alsa_card_riptide_exit);
gpl-2.0
BytecodeMe/BCM-mako
drivers/infiniband/core/cm.c
4700
110021
/* * Copyright (c) 2004-2007 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/completion.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/module.h> #include <linux/err.h> #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/random.h> #include <linux/rbtree.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/workqueue.h> #include <linux/kdev_t.h> #include <rdma/ib_cache.h> #include <rdma/ib_cm.h> #include "cm_msgs.h" MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("InfiniBand CM"); MODULE_LICENSE("Dual BSD/GPL"); static void cm_add_one(struct ib_device *device); static void cm_remove_one(struct ib_device *device); static struct ib_client cm_client = { .name = "cm", .add = cm_add_one, .remove = cm_remove_one }; static struct ib_cm { spinlock_t lock; struct list_head device_list; rwlock_t device_lock; struct rb_root listen_service_table; u64 listen_service_id; /* struct rb_root peer_service_table; todo: fix peer to peer */ struct rb_root remote_qp_table; struct rb_root remote_id_table; struct rb_root remote_sidr_table; struct idr local_id_table; __be32 random_id_operand; struct list_head timewait_list; struct workqueue_struct *wq; } cm; /* Counter indexes ordered by attribute ID */ enum { CM_REQ_COUNTER, CM_MRA_COUNTER, CM_REJ_COUNTER, CM_REP_COUNTER, CM_RTU_COUNTER, CM_DREQ_COUNTER, CM_DREP_COUNTER, CM_SIDR_REQ_COUNTER, CM_SIDR_REP_COUNTER, CM_LAP_COUNTER, CM_APR_COUNTER, CM_ATTR_COUNT, CM_ATTR_ID_OFFSET = 0x0010, }; enum { CM_XMIT, CM_XMIT_RETRIES, CM_RECV, CM_RECV_DUPLICATES, CM_COUNTER_GROUPS }; static char const counter_group_names[CM_COUNTER_GROUPS] [sizeof("cm_rx_duplicates")] = { "cm_tx_msgs", "cm_tx_retries", "cm_rx_msgs", "cm_rx_duplicates" }; struct cm_counter_group { struct kobject obj; atomic_long_t counter[CM_ATTR_COUNT]; }; struct cm_counter_attribute { struct attribute attr; int index; }; #define CM_COUNTER_ATTR(_name, _index) \ struct cm_counter_attribute cm_##_name##_counter_attr = { \ .attr = { .name = __stringify(_name), .mode = 0444 }, \ .index = _index \ } static CM_COUNTER_ATTR(req, CM_REQ_COUNTER); static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER); static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER); static CM_COUNTER_ATTR(rep, CM_REP_COUNTER); static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER); static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER); static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER); static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER); static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER); static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER); static CM_COUNTER_ATTR(apr, CM_APR_COUNTER); static struct attribute *cm_counter_default_attrs[] = { &cm_req_counter_attr.attr, &cm_mra_counter_attr.attr, &cm_rej_counter_attr.attr, &cm_rep_counter_attr.attr, &cm_rtu_counter_attr.attr, &cm_dreq_counter_attr.attr, &cm_drep_counter_attr.attr, &cm_sidr_req_counter_attr.attr, &cm_sidr_rep_counter_attr.attr, &cm_lap_counter_attr.attr, &cm_apr_counter_attr.attr, NULL }; struct cm_port { struct cm_device *cm_dev; struct ib_mad_agent *mad_agent; struct kobject port_obj; u8 port_num; struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; }; struct cm_device { struct list_head list; struct ib_device *ib_device; struct device *device; u8 ack_delay; struct cm_port *port[0]; }; struct cm_av { struct cm_port *port; union ib_gid dgid; struct ib_ah_attr ah_attr; u16 pkey_index; u8 timeout; }; struct cm_work { struct delayed_work work; struct list_head list; struct cm_port *port; struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ __be32 local_id; /* Established / timewait */ __be32 remote_id; struct ib_cm_event cm_event; struct ib_sa_path_rec path[0]; }; struct cm_timewait_info { struct cm_work work; /* Must be first. */ struct list_head list; struct rb_node remote_qp_node; struct rb_node remote_id_node; __be64 remote_ca_guid; __be32 remote_qpn; u8 inserted_remote_qp; u8 inserted_remote_id; }; struct cm_id_private { struct ib_cm_id id; struct rb_node service_node; struct rb_node sidr_id_node; spinlock_t lock; /* Do not acquire inside cm.lock */ struct completion comp; atomic_t refcount; struct ib_mad_send_buf *msg; struct cm_timewait_info *timewait_info; /* todo: use alternate port on send failure */ struct cm_av av; struct cm_av alt_av; struct ib_cm_compare_data *compare_data; void *private_data; __be64 tid; __be32 local_qpn; __be32 remote_qpn; enum ib_qp_type qp_type; __be32 sq_psn; __be32 rq_psn; int timeout_ms; enum ib_mtu path_mtu; __be16 pkey; u8 private_data_len; u8 max_cm_retries; u8 peer_to_peer; u8 responder_resources; u8 initiator_depth; u8 retry_count; u8 rnr_retry_count; u8 service_timeout; u8 target_ack_delay; struct list_head work_list; atomic_t work_count; }; static void cm_work_handler(struct work_struct *work); static inline void cm_deref_id(struct cm_id_private *cm_id_priv) { if (atomic_dec_and_test(&cm_id_priv->refcount)) complete(&cm_id_priv->comp); } static int cm_alloc_msg(struct cm_id_private *cm_id_priv, struct ib_mad_send_buf **msg) { struct ib_mad_agent *mad_agent; struct ib_mad_send_buf *m; struct ib_ah *ah; mad_agent = cm_id_priv->av.port->mad_agent; ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); if (IS_ERR(ah)) return PTR_ERR(ah); m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, cm_id_priv->av.pkey_index, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC); if (IS_ERR(m)) { ib_destroy_ah(ah); return PTR_ERR(m); } /* Timeout set by caller if response is expected. */ m->ah = ah; m->retries = cm_id_priv->max_cm_retries; atomic_inc(&cm_id_priv->refcount); m->context[0] = cm_id_priv; *msg = m; return 0; } static int cm_alloc_response_msg(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc, struct ib_mad_send_buf **msg) { struct ib_mad_send_buf *m; struct ib_ah *ah; ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, port->port_num); if (IS_ERR(ah)) return PTR_ERR(ah); m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC); if (IS_ERR(m)) { ib_destroy_ah(ah); return PTR_ERR(m); } m->ah = ah; *msg = m; return 0; } static void cm_free_msg(struct ib_mad_send_buf *msg) { ib_destroy_ah(msg->ah); if (msg->context[0]) cm_deref_id(msg->context[0]); ib_free_send_mad(msg); } static void * cm_copy_private_data(const void *private_data, u8 private_data_len) { void *data; if (!private_data || !private_data_len) return NULL; data = kmemdup(private_data, private_data_len, GFP_KERNEL); if (!data) return ERR_PTR(-ENOMEM); return data; } static void cm_set_private_data(struct cm_id_private *cm_id_priv, void *private_data, u8 private_data_len) { if (cm_id_priv->private_data && cm_id_priv->private_data_len) kfree(cm_id_priv->private_data); cm_id_priv->private_data = private_data; cm_id_priv->private_data_len = private_data_len; } static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, struct ib_grh *grh, struct cm_av *av) { av->port = port; av->pkey_index = wc->pkey_index; ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc, grh, &av->ah_attr); } static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) { struct cm_device *cm_dev; struct cm_port *port = NULL; unsigned long flags; int ret; u8 p; read_lock_irqsave(&cm.device_lock, flags); list_for_each_entry(cm_dev, &cm.device_list, list) { if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid, &p, NULL)) { port = cm_dev->port[p-1]; break; } } read_unlock_irqrestore(&cm.device_lock, flags); if (!port) return -EINVAL; ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num, be16_to_cpu(path->pkey), &av->pkey_index); if (ret) return ret; av->port = port; ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path, &av->ah_attr); av->timeout = path->packet_life_time + 1; return 0; } static int cm_alloc_id(struct cm_id_private *cm_id_priv) { unsigned long flags; int ret, id; static int next_id; do { spin_lock_irqsave(&cm.lock, flags); ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id, &id); if (!ret) next_id = ((unsigned) id + 1) & MAX_ID_MASK; spin_unlock_irqrestore(&cm.lock, flags); } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; return ret; } static void cm_free_id(__be32 local_id) { spin_lock_irq(&cm.lock); idr_remove(&cm.local_id_table, (__force int) (local_id ^ cm.random_id_operand)); spin_unlock_irq(&cm.lock); } static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) { struct cm_id_private *cm_id_priv; cm_id_priv = idr_find(&cm.local_id_table, (__force int) (local_id ^ cm.random_id_operand)); if (cm_id_priv) { if (cm_id_priv->id.remote_id == remote_id) atomic_inc(&cm_id_priv->refcount); else cm_id_priv = NULL; } return cm_id_priv; } static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) { struct cm_id_private *cm_id_priv; spin_lock_irq(&cm.lock); cm_id_priv = cm_get_id(local_id, remote_id); spin_unlock_irq(&cm.lock); return cm_id_priv; } static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask) { int i; for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++) ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] & ((unsigned long *) mask)[i]; } static int cm_compare_data(struct ib_cm_compare_data *src_data, struct ib_cm_compare_data *dst_data) { u8 src[IB_CM_COMPARE_SIZE]; u8 dst[IB_CM_COMPARE_SIZE]; if (!src_data || !dst_data) return 0; cm_mask_copy(src, src_data->data, dst_data->mask); cm_mask_copy(dst, dst_data->data, src_data->mask); return memcmp(src, dst, IB_CM_COMPARE_SIZE); } static int cm_compare_private_data(u8 *private_data, struct ib_cm_compare_data *dst_data) { u8 src[IB_CM_COMPARE_SIZE]; if (!dst_data) return 0; cm_mask_copy(src, private_data, dst_data->mask); return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); } /* * Trivial helpers to strip endian annotation and compare; the * endianness doesn't actually matter since we just need a stable * order for the RB tree. */ static int be32_lt(__be32 a, __be32 b) { return (__force u32) a < (__force u32) b; } static int be32_gt(__be32 a, __be32 b) { return (__force u32) a > (__force u32) b; } static int be64_lt(__be64 a, __be64 b) { return (__force u64) a < (__force u64) b; } static int be64_gt(__be64 a, __be64 b) { return (__force u64) a > (__force u64) b; } static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) { struct rb_node **link = &cm.listen_service_table.rb_node; struct rb_node *parent = NULL; struct cm_id_private *cur_cm_id_priv; __be64 service_id = cm_id_priv->id.service_id; __be64 service_mask = cm_id_priv->id.service_mask; int data_cmp; while (*link) { parent = *link; cur_cm_id_priv = rb_entry(parent, struct cm_id_private, service_node); data_cmp = cm_compare_data(cm_id_priv->compare_data, cur_cm_id_priv->compare_data); if ((cur_cm_id_priv->id.service_mask & service_id) == (service_mask & cur_cm_id_priv->id.service_id) && (cm_id_priv->id.device == cur_cm_id_priv->id.device) && !data_cmp) return cur_cm_id_priv; if (cm_id_priv->id.device < cur_cm_id_priv->id.device) link = &(*link)->rb_left; else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) link = &(*link)->rb_right; else if (be64_lt(service_id, cur_cm_id_priv->id.service_id)) link = &(*link)->rb_left; else if (be64_gt(service_id, cur_cm_id_priv->id.service_id)) link = &(*link)->rb_right; else if (data_cmp < 0) link = &(*link)->rb_left; else link = &(*link)->rb_right; } rb_link_node(&cm_id_priv->service_node, parent, link); rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); return NULL; } static struct cm_id_private * cm_find_listen(struct ib_device *device, __be64 service_id, u8 *private_data) { struct rb_node *node = cm.listen_service_table.rb_node; struct cm_id_private *cm_id_priv; int data_cmp; while (node) { cm_id_priv = rb_entry(node, struct cm_id_private, service_node); data_cmp = cm_compare_private_data(private_data, cm_id_priv->compare_data); if ((cm_id_priv->id.service_mask & service_id) == cm_id_priv->id.service_id && (cm_id_priv->id.device == device) && !data_cmp) return cm_id_priv; if (device < cm_id_priv->id.device) node = node->rb_left; else if (device > cm_id_priv->id.device) node = node->rb_right; else if (be64_lt(service_id, cm_id_priv->id.service_id)) node = node->rb_left; else if (be64_gt(service_id, cm_id_priv->id.service_id)) node = node->rb_right; else if (data_cmp < 0) node = node->rb_left; else node = node->rb_right; } return NULL; } static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info *timewait_info) { struct rb_node **link = &cm.remote_id_table.rb_node; struct rb_node *parent = NULL; struct cm_timewait_info *cur_timewait_info; __be64 remote_ca_guid = timewait_info->remote_ca_guid; __be32 remote_id = timewait_info->work.remote_id; while (*link) { parent = *link; cur_timewait_info = rb_entry(parent, struct cm_timewait_info, remote_id_node); if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) link = &(*link)->rb_left; else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) link = &(*link)->rb_right; else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_left; else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_right; else return cur_timewait_info; } timewait_info->inserted_remote_id = 1; rb_link_node(&timewait_info->remote_id_node, parent, link); rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); return NULL; } static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid, __be32 remote_id) { struct rb_node *node = cm.remote_id_table.rb_node; struct cm_timewait_info *timewait_info; while (node) { timewait_info = rb_entry(node, struct cm_timewait_info, remote_id_node); if (be32_lt(remote_id, timewait_info->work.remote_id)) node = node->rb_left; else if (be32_gt(remote_id, timewait_info->work.remote_id)) node = node->rb_right; else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid)) node = node->rb_left; else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid)) node = node->rb_right; else return timewait_info; } return NULL; } static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info *timewait_info) { struct rb_node **link = &cm.remote_qp_table.rb_node; struct rb_node *parent = NULL; struct cm_timewait_info *cur_timewait_info; __be64 remote_ca_guid = timewait_info->remote_ca_guid; __be32 remote_qpn = timewait_info->remote_qpn; while (*link) { parent = *link; cur_timewait_info = rb_entry(parent, struct cm_timewait_info, remote_qp_node); if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn)) link = &(*link)->rb_left; else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn)) link = &(*link)->rb_right; else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_left; else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_right; else return cur_timewait_info; } timewait_info->inserted_remote_qp = 1; rb_link_node(&timewait_info->remote_qp_node, parent, link); rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); return NULL; } static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private *cm_id_priv) { struct rb_node **link = &cm.remote_sidr_table.rb_node; struct rb_node *parent = NULL; struct cm_id_private *cur_cm_id_priv; union ib_gid *port_gid = &cm_id_priv->av.dgid; __be32 remote_id = cm_id_priv->id.remote_id; while (*link) { parent = *link; cur_cm_id_priv = rb_entry(parent, struct cm_id_private, sidr_id_node); if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id)) link = &(*link)->rb_left; else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id)) link = &(*link)->rb_right; else { int cmp; cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid, sizeof *port_gid); if (cmp < 0) link = &(*link)->rb_left; else if (cmp > 0) link = &(*link)->rb_right; else return cur_cm_id_priv; } } rb_link_node(&cm_id_priv->sidr_id_node, parent, link); rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); return NULL; } static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv, enum ib_cm_sidr_status status) { struct ib_cm_sidr_rep_param param; memset(&param, 0, sizeof param); param.status = status; ib_send_cm_sidr_rep(&cm_id_priv->id, &param); } struct ib_cm_id *ib_create_cm_id(struct ib_device *device, ib_cm_handler cm_handler, void *context) { struct cm_id_private *cm_id_priv; int ret; cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); if (!cm_id_priv) return ERR_PTR(-ENOMEM); cm_id_priv->id.state = IB_CM_IDLE; cm_id_priv->id.device = device; cm_id_priv->id.cm_handler = cm_handler; cm_id_priv->id.context = context; cm_id_priv->id.remote_cm_qpn = 1; ret = cm_alloc_id(cm_id_priv); if (ret) goto error; spin_lock_init(&cm_id_priv->lock); init_completion(&cm_id_priv->comp); INIT_LIST_HEAD(&cm_id_priv->work_list); atomic_set(&cm_id_priv->work_count, -1); atomic_set(&cm_id_priv->refcount, 1); return &cm_id_priv->id; error: kfree(cm_id_priv); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(ib_create_cm_id); static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv) { struct cm_work *work; if (list_empty(&cm_id_priv->work_list)) return NULL; work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); list_del(&work->list); return work; } static void cm_free_work(struct cm_work *work) { if (work->mad_recv_wc) ib_free_recv_mad(work->mad_recv_wc); kfree(work); } static inline int cm_convert_to_ms(int iba_time) { /* approximate conversion to ms from 4.096us x 2^iba_time */ return 1 << max(iba_time - 8, 0); } /* * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time * Because of how ack_timeout is stored, adding one doubles the timeout. * To avoid large timeouts, select the max(ack_delay, life_time + 1), and * increment it (round up) only if the other is within 50%. */ static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time) { int ack_timeout = packet_life_time + 1; if (ack_timeout >= ca_ack_delay) ack_timeout += (ca_ack_delay >= (ack_timeout - 1)); else ack_timeout = ca_ack_delay + (ack_timeout >= (ca_ack_delay - 1)); return min(31, ack_timeout); } static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info) { if (timewait_info->inserted_remote_id) { rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); timewait_info->inserted_remote_id = 0; } if (timewait_info->inserted_remote_qp) { rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); timewait_info->inserted_remote_qp = 0; } } static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) { struct cm_timewait_info *timewait_info; timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); if (!timewait_info) return ERR_PTR(-ENOMEM); timewait_info->work.local_id = local_id; INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; return timewait_info; } static void cm_enter_timewait(struct cm_id_private *cm_id_priv) { int wait_time; unsigned long flags; spin_lock_irqsave(&cm.lock, flags); cm_cleanup_timewait(cm_id_priv->timewait_info); list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list); spin_unlock_irqrestore(&cm.lock, flags); /* * The cm_id could be destroyed by the user before we exit timewait. * To protect against this, we search for the cm_id after exiting * timewait before notifying the user that we've exited timewait. */ cm_id_priv->id.state = IB_CM_TIMEWAIT; wait_time = cm_convert_to_ms(cm_id_priv->av.timeout); queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, msecs_to_jiffies(wait_time)); cm_id_priv->timewait_info = NULL; } static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) { unsigned long flags; cm_id_priv->id.state = IB_CM_IDLE; if (cm_id_priv->timewait_info) { spin_lock_irqsave(&cm.lock, flags); cm_cleanup_timewait(cm_id_priv->timewait_info); spin_unlock_irqrestore(&cm.lock, flags); kfree(cm_id_priv->timewait_info); cm_id_priv->timewait_info = NULL; } } static void cm_destroy_id(struct ib_cm_id *cm_id, int err) { struct cm_id_private *cm_id_priv; struct cm_work *work; cm_id_priv = container_of(cm_id, struct cm_id_private, id); retest: spin_lock_irq(&cm_id_priv->lock); switch (cm_id->state) { case IB_CM_LISTEN: cm_id->state = IB_CM_IDLE; spin_unlock_irq(&cm_id_priv->lock); spin_lock_irq(&cm.lock); rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); spin_unlock_irq(&cm.lock); break; case IB_CM_SIDR_REQ_SENT: cm_id->state = IB_CM_IDLE; ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); spin_unlock_irq(&cm_id_priv->lock); break; case IB_CM_SIDR_REQ_RCVD: spin_unlock_irq(&cm_id_priv->lock); cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); break; case IB_CM_REQ_SENT: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); spin_unlock_irq(&cm_id_priv->lock); ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, &cm_id_priv->id.device->node_guid, sizeof cm_id_priv->id.device->node_guid, NULL, 0); break; case IB_CM_REQ_RCVD: if (err == -ENOMEM) { /* Do not reject to allow future retries. */ cm_reset_to_idle(cm_id_priv); spin_unlock_irq(&cm_id_priv->lock); } else { spin_unlock_irq(&cm_id_priv->lock); ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); } break; case IB_CM_MRA_REQ_RCVD: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); /* Fall through */ case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: spin_unlock_irq(&cm_id_priv->lock); ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); break; case IB_CM_ESTABLISHED: spin_unlock_irq(&cm_id_priv->lock); if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) break; ib_send_cm_dreq(cm_id, NULL, 0); goto retest; case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_enter_timewait(cm_id_priv); spin_unlock_irq(&cm_id_priv->lock); break; case IB_CM_DREQ_RCVD: spin_unlock_irq(&cm_id_priv->lock); ib_send_cm_drep(cm_id, NULL, 0); break; default: spin_unlock_irq(&cm_id_priv->lock); break; } cm_free_id(cm_id->local_id); cm_deref_id(cm_id_priv); wait_for_completion(&cm_id_priv->comp); while ((work = cm_dequeue_work(cm_id_priv)) != NULL) cm_free_work(work); kfree(cm_id_priv->compare_data); kfree(cm_id_priv->private_data); kfree(cm_id_priv); } void ib_destroy_cm_id(struct ib_cm_id *cm_id) { cm_destroy_id(cm_id, 0); } EXPORT_SYMBOL(ib_destroy_cm_id); int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, struct ib_cm_compare_data *compare_data) { struct cm_id_private *cm_id_priv, *cur_cm_id_priv; unsigned long flags; int ret = 0; service_mask = service_mask ? service_mask : ~cpu_to_be64(0); service_id &= service_mask; if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && (service_id != IB_CM_ASSIGN_SERVICE_ID)) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); if (cm_id->state != IB_CM_IDLE) return -EINVAL; if (compare_data) { cm_id_priv->compare_data = kzalloc(sizeof *compare_data, GFP_KERNEL); if (!cm_id_priv->compare_data) return -ENOMEM; cm_mask_copy(cm_id_priv->compare_data->data, compare_data->data, compare_data->mask); memcpy(cm_id_priv->compare_data->mask, compare_data->mask, IB_CM_COMPARE_SIZE); } cm_id->state = IB_CM_LISTEN; spin_lock_irqsave(&cm.lock, flags); if (service_id == IB_CM_ASSIGN_SERVICE_ID) { cm_id->service_id = cpu_to_be64(cm.listen_service_id++); cm_id->service_mask = ~cpu_to_be64(0); } else { cm_id->service_id = service_id; cm_id->service_mask = service_mask; } cur_cm_id_priv = cm_insert_listen(cm_id_priv); spin_unlock_irqrestore(&cm.lock, flags); if (cur_cm_id_priv) { cm_id->state = IB_CM_IDLE; kfree(cm_id_priv->compare_data); cm_id_priv->compare_data = NULL; ret = -EBUSY; } return ret; } EXPORT_SYMBOL(ib_cm_listen); static __be64 cm_form_tid(struct cm_id_private *cm_id_priv, enum cm_msg_sequence msg_seq) { u64 hi_tid, low_tid; hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32; low_tid = (u64) ((__force u32)cm_id_priv->id.local_id | (msg_seq << 30)); return cpu_to_be64(hi_tid | low_tid); } static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, __be16 attr_id, __be64 tid) { hdr->base_version = IB_MGMT_BASE_VERSION; hdr->mgmt_class = IB_MGMT_CLASS_CM; hdr->class_version = IB_CM_CLASS_VERSION; hdr->method = IB_MGMT_METHOD_SEND; hdr->attr_id = attr_id; hdr->tid = tid; } static void cm_format_req(struct cm_req_msg *req_msg, struct cm_id_private *cm_id_priv, struct ib_cm_req_param *param) { struct ib_sa_path_rec *pri_path = param->primary_path; struct ib_sa_path_rec *alt_path = param->alternate_path; cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ)); req_msg->local_comm_id = cm_id_priv->id.local_id; req_msg->service_id = param->service_id; req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); cm_req_set_init_depth(req_msg, param->initiator_depth); cm_req_set_remote_resp_timeout(req_msg, param->remote_cm_response_timeout); cm_req_set_qp_type(req_msg, param->qp_type); cm_req_set_flow_ctrl(req_msg, param->flow_control); cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); cm_req_set_local_resp_timeout(req_msg, param->local_cm_response_timeout); req_msg->pkey = param->primary_path->pkey; cm_req_set_path_mtu(req_msg, param->primary_path->mtu); cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); if (param->qp_type != IB_QPT_XRC_INI) { cm_req_set_resp_res(req_msg, param->responder_resources); cm_req_set_retry_count(req_msg, param->retry_count); cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); cm_req_set_srq(req_msg, param->srq); } if (pri_path->hop_limit <= 1) { req_msg->primary_local_lid = pri_path->slid; req_msg->primary_remote_lid = pri_path->dlid; } else { /* Work-around until there's a way to obtain remote LID info */ req_msg->primary_local_lid = IB_LID_PERMISSIVE; req_msg->primary_remote_lid = IB_LID_PERMISSIVE; } req_msg->primary_local_gid = pri_path->sgid; req_msg->primary_remote_gid = pri_path->dgid; cm_req_set_primary_flow_label(req_msg, pri_path->flow_label); cm_req_set_primary_packet_rate(req_msg, pri_path->rate); req_msg->primary_traffic_class = pri_path->traffic_class; req_msg->primary_hop_limit = pri_path->hop_limit; cm_req_set_primary_sl(req_msg, pri_path->sl); cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1)); cm_req_set_primary_local_ack_timeout(req_msg, cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, pri_path->packet_life_time)); if (alt_path) { if (alt_path->hop_limit <= 1) { req_msg->alt_local_lid = alt_path->slid; req_msg->alt_remote_lid = alt_path->dlid; } else { req_msg->alt_local_lid = IB_LID_PERMISSIVE; req_msg->alt_remote_lid = IB_LID_PERMISSIVE; } req_msg->alt_local_gid = alt_path->sgid; req_msg->alt_remote_gid = alt_path->dgid; cm_req_set_alt_flow_label(req_msg, alt_path->flow_label); cm_req_set_alt_packet_rate(req_msg, alt_path->rate); req_msg->alt_traffic_class = alt_path->traffic_class; req_msg->alt_hop_limit = alt_path->hop_limit; cm_req_set_alt_sl(req_msg, alt_path->sl); cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1)); cm_req_set_alt_local_ack_timeout(req_msg, cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, alt_path->packet_life_time)); } if (param->private_data && param->private_data_len) memcpy(req_msg->private_data, param->private_data, param->private_data_len); } static int cm_validate_req_param(struct ib_cm_req_param *param) { /* peer-to-peer not supported */ if (param->peer_to_peer) return -EINVAL; if (!param->primary_path) return -EINVAL; if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC && param->qp_type != IB_QPT_XRC_INI) return -EINVAL; if (param->private_data && param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) return -EINVAL; if (param->alternate_path && (param->alternate_path->pkey != param->primary_path->pkey || param->alternate_path->mtu != param->primary_path->mtu)) return -EINVAL; return 0; } int ib_send_cm_req(struct ib_cm_id *cm_id, struct ib_cm_req_param *param) { struct cm_id_private *cm_id_priv; struct cm_req_msg *req_msg; unsigned long flags; int ret; ret = cm_validate_req_param(param); if (ret) return ret; /* Verify that we're not in timewait. */ cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_IDLE) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); ret = -EINVAL; goto out; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> id.local_id); if (IS_ERR(cm_id_priv->timewait_info)) { ret = PTR_ERR(cm_id_priv->timewait_info); goto out; } ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); if (ret) goto error1; if (param->alternate_path) { ret = cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); if (ret) goto error1; } cm_id->service_id = param->service_id; cm_id->service_mask = ~cpu_to_be64(0); cm_id_priv->timeout_ms = cm_convert_to_ms( param->primary_path->packet_life_time) * 2 + cm_convert_to_ms( param->remote_cm_response_timeout); cm_id_priv->max_cm_retries = param->max_cm_retries; cm_id_priv->initiator_depth = param->initiator_depth; cm_id_priv->responder_resources = param->responder_resources; cm_id_priv->retry_count = param->retry_count; cm_id_priv->path_mtu = param->primary_path->mtu; cm_id_priv->pkey = param->primary_path->pkey; cm_id_priv->qp_type = param->qp_type; ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); if (ret) goto error1; req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; cm_format_req(req_msg, cm_id_priv, param); cm_id_priv->tid = req_msg->hdr.tid; cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg); spin_lock_irqsave(&cm_id_priv->lock, flags); ret = ib_post_send_mad(cm_id_priv->msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); goto error2; } BUG_ON(cm_id->state != IB_CM_IDLE); cm_id->state = IB_CM_REQ_SENT; spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; error2: cm_free_msg(cm_id_priv->msg); error1: kfree(cm_id_priv->timewait_info); out: return ret; } EXPORT_SYMBOL(ib_send_cm_req); static int cm_issue_rej(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc, enum ib_cm_rej_reason reason, enum cm_msg_response msg_rejected, void *ari, u8 ari_length) { struct ib_mad_send_buf *msg = NULL; struct cm_rej_msg *rej_msg, *rcv_msg; int ret; ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); if (ret) return ret; /* We just need common CM header information. Cast to any message. */ rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; rej_msg = (struct cm_rej_msg *) msg->mad; cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); rej_msg->remote_comm_id = rcv_msg->local_comm_id; rej_msg->local_comm_id = rcv_msg->remote_comm_id; cm_rej_set_msg_rejected(rej_msg, msg_rejected); rej_msg->reason = cpu_to_be16(reason); if (ari && ari_length) { cm_rej_set_reject_info_len(rej_msg, ari_length); memcpy(rej_msg->ari, ari, ari_length); } ret = ib_post_send_mad(msg, NULL); if (ret) cm_free_msg(msg); return ret; } static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid, __be32 local_qpn, __be32 remote_qpn) { return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) || ((local_ca_guid == remote_ca_guid) && (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn)))); } static void cm_format_paths_from_req(struct cm_req_msg *req_msg, struct ib_sa_path_rec *primary_path, struct ib_sa_path_rec *alt_path) { memset(primary_path, 0, sizeof *primary_path); primary_path->dgid = req_msg->primary_local_gid; primary_path->sgid = req_msg->primary_remote_gid; primary_path->dlid = req_msg->primary_local_lid; primary_path->slid = req_msg->primary_remote_lid; primary_path->flow_label = cm_req_get_primary_flow_label(req_msg); primary_path->hop_limit = req_msg->primary_hop_limit; primary_path->traffic_class = req_msg->primary_traffic_class; primary_path->reversible = 1; primary_path->pkey = req_msg->pkey; primary_path->sl = cm_req_get_primary_sl(req_msg); primary_path->mtu_selector = IB_SA_EQ; primary_path->mtu = cm_req_get_path_mtu(req_msg); primary_path->rate_selector = IB_SA_EQ; primary_path->rate = cm_req_get_primary_packet_rate(req_msg); primary_path->packet_life_time_selector = IB_SA_EQ; primary_path->packet_life_time = cm_req_get_primary_local_ack_timeout(req_msg); primary_path->packet_life_time -= (primary_path->packet_life_time > 0); if (req_msg->alt_local_lid) { memset(alt_path, 0, sizeof *alt_path); alt_path->dgid = req_msg->alt_local_gid; alt_path->sgid = req_msg->alt_remote_gid; alt_path->dlid = req_msg->alt_local_lid; alt_path->slid = req_msg->alt_remote_lid; alt_path->flow_label = cm_req_get_alt_flow_label(req_msg); alt_path->hop_limit = req_msg->alt_hop_limit; alt_path->traffic_class = req_msg->alt_traffic_class; alt_path->reversible = 1; alt_path->pkey = req_msg->pkey; alt_path->sl = cm_req_get_alt_sl(req_msg); alt_path->mtu_selector = IB_SA_EQ; alt_path->mtu = cm_req_get_path_mtu(req_msg); alt_path->rate_selector = IB_SA_EQ; alt_path->rate = cm_req_get_alt_packet_rate(req_msg); alt_path->packet_life_time_selector = IB_SA_EQ; alt_path->packet_life_time = cm_req_get_alt_local_ack_timeout(req_msg); alt_path->packet_life_time -= (alt_path->packet_life_time > 0); } } static void cm_format_req_event(struct cm_work *work, struct cm_id_private *cm_id_priv, struct ib_cm_id *listen_id) { struct cm_req_msg *req_msg; struct ib_cm_req_event_param *param; req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.req_rcvd; param->listen_id = listen_id; param->port = cm_id_priv->av.port->port_num; param->primary_path = &work->path[0]; if (req_msg->alt_local_lid) param->alternate_path = &work->path[1]; else param->alternate_path = NULL; param->remote_ca_guid = req_msg->local_ca_guid; param->remote_qkey = be32_to_cpu(req_msg->local_qkey); param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg)); param->qp_type = cm_req_get_qp_type(req_msg); param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg)); param->responder_resources = cm_req_get_init_depth(req_msg); param->initiator_depth = cm_req_get_resp_res(req_msg); param->local_cm_response_timeout = cm_req_get_remote_resp_timeout(req_msg); param->flow_control = cm_req_get_flow_ctrl(req_msg); param->remote_cm_response_timeout = cm_req_get_local_resp_timeout(req_msg); param->retry_count = cm_req_get_retry_count(req_msg); param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); param->srq = cm_req_get_srq(req_msg); work->cm_event.private_data = &req_msg->private_data; } static void cm_process_work(struct cm_id_private *cm_id_priv, struct cm_work *work) { int ret; /* We will typically only have the current event to report. */ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); cm_free_work(work); while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { spin_lock_irq(&cm_id_priv->lock); work = cm_dequeue_work(cm_id_priv); spin_unlock_irq(&cm_id_priv->lock); BUG_ON(!work); ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); cm_free_work(work); } cm_deref_id(cm_id_priv); if (ret) cm_destroy_id(&cm_id_priv->id, ret); } static void cm_format_mra(struct cm_mra_msg *mra_msg, struct cm_id_private *cm_id_priv, enum cm_msg_response msg_mraed, u8 service_timeout, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); cm_mra_set_msg_mraed(mra_msg, msg_mraed); mra_msg->local_comm_id = cm_id_priv->id.local_id; mra_msg->remote_comm_id = cm_id_priv->id.remote_id; cm_mra_set_service_timeout(mra_msg, service_timeout); if (private_data && private_data_len) memcpy(mra_msg->private_data, private_data, private_data_len); } static void cm_format_rej(struct cm_rej_msg *rej_msg, struct cm_id_private *cm_id_priv, enum ib_cm_rej_reason reason, void *ari, u8 ari_length, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); rej_msg->remote_comm_id = cm_id_priv->id.remote_id; switch(cm_id_priv->id.state) { case IB_CM_REQ_RCVD: rej_msg->local_comm_id = 0; cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); break; case IB_CM_MRA_REQ_SENT: rej_msg->local_comm_id = cm_id_priv->id.local_id; cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ); break; case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: rej_msg->local_comm_id = cm_id_priv->id.local_id; cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP); break; default: rej_msg->local_comm_id = cm_id_priv->id.local_id; cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER); break; } rej_msg->reason = cpu_to_be16(reason); if (ari && ari_length) { cm_rej_set_reject_info_len(rej_msg, ari_length); memcpy(rej_msg->ari, ari, ari_length); } if (private_data && private_data_len) memcpy(rej_msg->private_data, private_data, private_data_len); } static void cm_dup_req_handler(struct cm_work *work, struct cm_id_private *cm_id_priv) { struct ib_mad_send_buf *msg = NULL; int ret; atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_REQ_COUNTER]); /* Quick state check to discard duplicate REQs. */ if (cm_id_priv->id.state == IB_CM_REQ_RCVD) return; ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); if (ret) return; spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_MRA_REQ_SENT: cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, cm_id_priv->private_data, cm_id_priv->private_data_len); break; case IB_CM_TIMEWAIT: cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); break; default: goto unlock; } spin_unlock_irq(&cm_id_priv->lock); ret = ib_post_send_mad(msg, NULL); if (ret) goto free; return; unlock: spin_unlock_irq(&cm_id_priv->lock); free: cm_free_msg(msg); } static struct cm_id_private * cm_match_req(struct cm_work *work, struct cm_id_private *cm_id_priv) { struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; struct cm_timewait_info *timewait_info; struct cm_req_msg *req_msg; req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; /* Check for possible duplicate REQ. */ spin_lock_irq(&cm.lock); timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); if (timewait_info) { cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, timewait_info->work.remote_id); spin_unlock_irq(&cm.lock); if (cur_cm_id_priv) { cm_dup_req_handler(work, cur_cm_id_priv); cm_deref_id(cur_cm_id_priv); } return NULL; } /* Check for stale connections. */ timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); if (timewait_info) { cm_cleanup_timewait(cm_id_priv->timewait_info); spin_unlock_irq(&cm.lock); cm_issue_rej(work->port, work->mad_recv_wc, IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, NULL, 0); return NULL; } /* Find matching listen request. */ listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, req_msg->service_id, req_msg->private_data); if (!listen_cm_id_priv) { cm_cleanup_timewait(cm_id_priv->timewait_info); spin_unlock_irq(&cm.lock); cm_issue_rej(work->port, work->mad_recv_wc, IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, NULL, 0); goto out; } atomic_inc(&listen_cm_id_priv->refcount); atomic_inc(&cm_id_priv->refcount); cm_id_priv->id.state = IB_CM_REQ_RCVD; atomic_inc(&cm_id_priv->work_count); spin_unlock_irq(&cm.lock); out: return listen_cm_id_priv; } /* * Work-around for inter-subnet connections. If the LIDs are permissive, * we need to override the LID/SL data in the REQ with the LID information * in the work completion. */ static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc) { if (!cm_req_get_primary_subnet_local(req_msg)) { if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) { req_msg->primary_local_lid = cpu_to_be16(wc->slid); cm_req_set_primary_sl(req_msg, wc->sl); } if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE) req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits); } if (!cm_req_get_alt_subnet_local(req_msg)) { if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) { req_msg->alt_local_lid = cpu_to_be16(wc->slid); cm_req_set_alt_sl(req_msg, wc->sl); } if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE) req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits); } } static int cm_req_handler(struct cm_work *work) { struct ib_cm_id *cm_id; struct cm_id_private *cm_id_priv, *listen_cm_id_priv; struct cm_req_msg *req_msg; int ret; req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); if (IS_ERR(cm_id)) return PTR_ERR(cm_id); cm_id_priv = container_of(cm_id, struct cm_id_private, id); cm_id_priv->id.remote_id = req_msg->local_comm_id; cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &cm_id_priv->av); cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> id.local_id); if (IS_ERR(cm_id_priv->timewait_info)) { ret = PTR_ERR(cm_id_priv->timewait_info); goto destroy; } cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg); listen_cm_id_priv = cm_match_req(work, cm_id_priv); if (!listen_cm_id_priv) { ret = -EINVAL; kfree(cm_id_priv->timewait_info); goto destroy; } cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; cm_id_priv->id.context = listen_cm_id_priv->id.context; cm_id_priv->id.service_id = req_msg->service_id; cm_id_priv->id.service_mask = ~cpu_to_be64(0); cm_process_routed_req(req_msg, work->mad_recv_wc->wc); cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); if (ret) { ib_get_cached_gid(work->port->cm_dev->ib_device, work->port->port_num, 0, &work->path[0].sgid); ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID, &work->path[0].sgid, sizeof work->path[0].sgid, NULL, 0); goto rejected; } if (req_msg->alt_local_lid) { ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); if (ret) { ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, &work->path[0].sgid, sizeof work->path[0].sgid, NULL, 0); goto rejected; } } cm_id_priv->tid = req_msg->hdr.tid; cm_id_priv->timeout_ms = cm_convert_to_ms( cm_req_get_local_resp_timeout(req_msg)); cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg); cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg); cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg); cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg); cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg); cm_id_priv->pkey = req_msg->pkey; cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg); cm_id_priv->retry_count = cm_req_get_retry_count(req_msg); cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg); cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); cm_process_work(cm_id_priv, work); cm_deref_id(listen_cm_id_priv); return 0; rejected: atomic_dec(&cm_id_priv->refcount); cm_deref_id(listen_cm_id_priv); destroy: ib_destroy_cm_id(cm_id); return ret; } static void cm_format_rep(struct cm_rep_msg *rep_msg, struct cm_id_private *cm_id_priv, struct ib_cm_rep_param *param) { cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); rep_msg->local_comm_id = cm_id_priv->id.local_id; rep_msg->remote_comm_id = cm_id_priv->id.remote_id; cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); rep_msg->resp_resources = param->responder_resources; cm_rep_set_target_ack_delay(rep_msg, cm_id_priv->av.port->cm_dev->ack_delay); cm_rep_set_failover(rep_msg, param->failover_accepted); cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) { rep_msg->initiator_depth = param->initiator_depth; cm_rep_set_flow_ctrl(rep_msg, param->flow_control); cm_rep_set_srq(rep_msg, param->srq); cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); } else { cm_rep_set_srq(rep_msg, 1); cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num)); } if (param->private_data && param->private_data_len) memcpy(rep_msg->private_data, param->private_data, param->private_data_len); } int ib_send_cm_rep(struct ib_cm_id *cm_id, struct ib_cm_rep_param *param) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; struct cm_rep_msg *rep_msg; unsigned long flags; int ret; if (param->private_data && param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_REQ_RCVD && cm_id->state != IB_CM_MRA_REQ_SENT) { ret = -EINVAL; goto out; } ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto out; rep_msg = (struct cm_rep_msg *) msg->mad; cm_format_rep(rep_msg, cm_id_priv, param); msg->timeout_ms = cm_id_priv->timeout_ms; msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } cm_id->state = IB_CM_REP_SENT; cm_id_priv->msg = msg; cm_id_priv->initiator_depth = param->initiator_depth; cm_id_priv->responder_resources = param->responder_resources; cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF); out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_rep); static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); rtu_msg->local_comm_id = cm_id_priv->id.local_id; rtu_msg->remote_comm_id = cm_id_priv->id.remote_id; if (private_data && private_data_len) memcpy(rtu_msg->private_data, private_data, private_data_len); } int ib_send_cm_rtu(struct ib_cm_id *cm_id, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; void *data; int ret; if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) return -EINVAL; data = cm_copy_private_data(private_data, private_data_len); if (IS_ERR(data)) return PTR_ERR(data); cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_REP_RCVD && cm_id->state != IB_CM_MRA_REP_SENT) { ret = -EINVAL; goto error; } ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto error; cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, private_data, private_data_len); ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); kfree(data); return ret; } cm_id->state = IB_CM_ESTABLISHED; cm_set_private_data(cm_id_priv, data, private_data_len); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); kfree(data); return ret; } EXPORT_SYMBOL(ib_send_cm_rtu); static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type) { struct cm_rep_msg *rep_msg; struct ib_cm_rep_event_param *param; rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.rep_rcvd; param->remote_ca_guid = rep_msg->local_ca_guid; param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type)); param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); param->responder_resources = rep_msg->initiator_depth; param->initiator_depth = rep_msg->resp_resources; param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); param->failover_accepted = cm_rep_get_failover(rep_msg); param->flow_control = cm_rep_get_flow_ctrl(rep_msg); param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); param->srq = cm_rep_get_srq(rep_msg); work->cm_event.private_data = &rep_msg->private_data; } static void cm_dup_rep_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rep_msg *rep_msg; struct ib_mad_send_buf *msg = NULL; int ret; rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, rep_msg->local_comm_id); if (!cm_id_priv) return; atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_REP_COUNTER]); ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); if (ret) goto deref; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state == IB_CM_ESTABLISHED) cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, cm_id_priv->private_data, cm_id_priv->private_data_len); else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, cm_id_priv->private_data, cm_id_priv->private_data_len); else goto unlock; spin_unlock_irq(&cm_id_priv->lock); ret = ib_post_send_mad(msg, NULL); if (ret) goto free; goto deref; unlock: spin_unlock_irq(&cm_id_priv->lock); free: cm_free_msg(msg); deref: cm_deref_id(cm_id_priv); } static int cm_rep_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rep_msg *rep_msg; int ret; rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0); if (!cm_id_priv) { cm_dup_rep_handler(work); return -EINVAL; } cm_format_rep_event(work, cm_id_priv->qp_type); spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: break; default: spin_unlock_irq(&cm_id_priv->lock); ret = -EINVAL; goto error; } cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); spin_lock(&cm.lock); /* Check for duplicate REP. */ if (cm_insert_remote_id(cm_id_priv->timewait_info)) { spin_unlock(&cm.lock); spin_unlock_irq(&cm_id_priv->lock); ret = -EINVAL; goto error; } /* Check for a stale connection. */ if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) { rb_erase(&cm_id_priv->timewait_info->remote_id_node, &cm.remote_id_table); cm_id_priv->timewait_info->inserted_remote_id = 0; spin_unlock(&cm.lock); spin_unlock_irq(&cm_id_priv->lock); cm_issue_rej(work->port, work->mad_recv_wc, IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, NULL, 0); ret = -EINVAL; goto error; } spin_unlock(&cm.lock); cm_id_priv->id.state = IB_CM_REP_RCVD; cm_id_priv->id.remote_id = rep_msg->local_comm_id; cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); cm_id_priv->initiator_depth = rep_msg->resp_resources; cm_id_priv->responder_resources = rep_msg->initiator_depth; cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg); cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg); cm_id_priv->av.timeout = cm_ack_timeout(cm_id_priv->target_ack_delay, cm_id_priv->av.timeout - 1); cm_id_priv->alt_av.timeout = cm_ack_timeout(cm_id_priv->target_ack_delay, cm_id_priv->alt_av.timeout - 1); /* todo: handle peer_to_peer */ ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; error: cm_deref_id(cm_id_priv); return ret; } static int cm_establish_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; int ret; /* See comment in cm_establish about lookup. */ cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); if (!cm_id_priv) return -EINVAL; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { spin_unlock_irq(&cm_id_priv->lock); goto out; } ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_rtu_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rtu_msg *rtu_msg; int ret; rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id, rtu_msg->local_comm_id); if (!cm_id_priv) return -EINVAL; work->cm_event.private_data = &rtu_msg->private_data; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_REP_SENT && cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { spin_unlock_irq(&cm_id_priv->lock); atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_RTU_COUNTER]); goto out; } cm_id_priv->id.state = IB_CM_ESTABLISHED; ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ)); dreq_msg->local_comm_id = cm_id_priv->id.local_id; dreq_msg->remote_comm_id = cm_id_priv->id.remote_id; cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn); if (private_data && private_data_len) memcpy(dreq_msg->private_data, private_data, private_data_len); } int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_ESTABLISHED) { ret = -EINVAL; goto out; } if (cm_id->lap_state == IB_CM_LAP_SENT || cm_id->lap_state == IB_CM_MRA_LAP_RCVD) ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) { cm_enter_timewait(cm_id_priv); goto out; } cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, private_data, private_data_len); msg->timeout_ms = cm_id_priv->timeout_ms; msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; ret = ib_post_send_mad(msg, NULL); if (ret) { cm_enter_timewait(cm_id_priv); spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } cm_id->state = IB_CM_DREQ_SENT; cm_id_priv->msg = msg; out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_dreq); static void cm_format_drep(struct cm_drep_msg *drep_msg, struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); drep_msg->local_comm_id = cm_id_priv->id.local_id; drep_msg->remote_comm_id = cm_id_priv->id.remote_id; if (private_data && private_data_len) memcpy(drep_msg->private_data, private_data, private_data_len); } int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; void *data; int ret; if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) return -EINVAL; data = cm_copy_private_data(private_data, private_data_len); if (IS_ERR(data)) return PTR_ERR(data); cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_DREQ_RCVD) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); kfree(data); return -EINVAL; } cm_set_private_data(cm_id_priv, data, private_data_len); cm_enter_timewait(cm_id_priv); ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto out; cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, private_data, private_data_len); ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_drep); static int cm_issue_drep(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_mad_send_buf *msg = NULL; struct cm_dreq_msg *dreq_msg; struct cm_drep_msg *drep_msg; int ret; ret = cm_alloc_response_msg(port, mad_recv_wc, &msg); if (ret) return ret; dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad; drep_msg = (struct cm_drep_msg *) msg->mad; cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid); drep_msg->remote_comm_id = dreq_msg->local_comm_id; drep_msg->local_comm_id = dreq_msg->remote_comm_id; ret = ib_post_send_mad(msg, NULL); if (ret) cm_free_msg(msg); return ret; } static int cm_dreq_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_dreq_msg *dreq_msg; struct ib_mad_send_buf *msg = NULL; int ret; dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, dreq_msg->local_comm_id); if (!cm_id_priv) { atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); cm_issue_drep(work->port, work->mad_recv_wc); return -EINVAL; } work->cm_event.private_data = &dreq_msg->private_data; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg)) goto unlock; switch (cm_id_priv->id.state) { case IB_CM_REP_SENT: case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); break; case IB_CM_ESTABLISHED: if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); break; case IB_CM_MRA_REP_RCVD: break; case IB_CM_TIMEWAIT: atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) goto unlock; cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, cm_id_priv->private_data, cm_id_priv->private_data_len); spin_unlock_irq(&cm_id_priv->lock); if (ib_post_send_mad(msg, NULL)) cm_free_msg(msg); goto deref; case IB_CM_DREQ_RCVD: atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); goto unlock; default: goto unlock; } cm_id_priv->id.state = IB_CM_DREQ_RCVD; cm_id_priv->tid = dreq_msg->hdr.tid; ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; unlock: spin_unlock_irq(&cm_id_priv->lock); deref: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_drep_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_drep_msg *drep_msg; int ret; drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id, drep_msg->local_comm_id); if (!cm_id_priv) return -EINVAL; work->cm_event.private_data = &drep_msg->private_data; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_DREQ_SENT && cm_id_priv->id.state != IB_CM_DREQ_RCVD) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_enter_timewait(cm_id_priv); ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason, void *ari, u8 ari_length, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id->state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: ret = cm_alloc_msg(cm_id_priv, &msg); if (!ret) cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, reason, ari, ari_length, private_data, private_data_len); cm_reset_to_idle(cm_id_priv); break; case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: ret = cm_alloc_msg(cm_id_priv, &msg); if (!ret) cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, reason, ari, ari_length, private_data, private_data_len); cm_enter_timewait(cm_id_priv); break; default: ret = -EINVAL; goto out; } if (ret) goto out; ret = ib_post_send_mad(msg, NULL); if (ret) cm_free_msg(msg); out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_rej); static void cm_format_rej_event(struct cm_work *work) { struct cm_rej_msg *rej_msg; struct ib_cm_rej_event_param *param; rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.rej_rcvd; param->ari = rej_msg->ari; param->ari_length = cm_rej_get_reject_info_len(rej_msg); param->reason = __be16_to_cpu(rej_msg->reason); work->cm_event.private_data = &rej_msg->private_data; } static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) { struct cm_timewait_info *timewait_info; struct cm_id_private *cm_id_priv; __be32 remote_id; remote_id = rej_msg->local_comm_id; if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { spin_lock_irq(&cm.lock); timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), remote_id); if (!timewait_info) { spin_unlock_irq(&cm.lock); return NULL; } cm_id_priv = idr_find(&cm.local_id_table, (__force int) (timewait_info->work.local_id ^ cm.random_id_operand)); if (cm_id_priv) { if (cm_id_priv->id.remote_id == remote_id) atomic_inc(&cm_id_priv->refcount); else cm_id_priv = NULL; } spin_unlock_irq(&cm.lock); } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ) cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0); else cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id); return cm_id_priv; } static int cm_rej_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rej_msg *rej_msg; int ret; rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_rejected_id(rej_msg); if (!cm_id_priv) return -EINVAL; cm_format_rej_event(work); spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); /* fall through */ case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN) cm_enter_timewait(cm_id_priv); else cm_reset_to_idle(cm_id_priv); break; case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); /* fall through */ case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: cm_enter_timewait(cm_id_priv); break; case IB_CM_ESTABLISHED: if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT || cm_id_priv->id.lap_state == IB_CM_LAP_SENT) { if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT) ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_enter_timewait(cm_id_priv); break; } /* fall through */ default: spin_unlock_irq(&cm_id_priv->lock); ret = -EINVAL; goto out; } ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } int ib_send_cm_mra(struct ib_cm_id *cm_id, u8 service_timeout, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; enum ib_cm_state cm_state; enum ib_cm_lap_state lap_state; enum cm_msg_response msg_response; void *data; unsigned long flags; int ret; if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) return -EINVAL; data = cm_copy_private_data(private_data, private_data_len); if (IS_ERR(data)) return PTR_ERR(data); cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); switch(cm_id_priv->id.state) { case IB_CM_REQ_RCVD: cm_state = IB_CM_MRA_REQ_SENT; lap_state = cm_id->lap_state; msg_response = CM_MSG_RESPONSE_REQ; break; case IB_CM_REP_RCVD: cm_state = IB_CM_MRA_REP_SENT; lap_state = cm_id->lap_state; msg_response = CM_MSG_RESPONSE_REP; break; case IB_CM_ESTABLISHED: if (cm_id->lap_state == IB_CM_LAP_RCVD) { cm_state = cm_id->state; lap_state = IB_CM_MRA_LAP_SENT; msg_response = CM_MSG_RESPONSE_OTHER; break; } default: ret = -EINVAL; goto error1; } if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) { ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto error1; cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, msg_response, service_timeout, private_data, private_data_len); ret = ib_post_send_mad(msg, NULL); if (ret) goto error2; } cm_id->state = cm_state; cm_id->lap_state = lap_state; cm_id_priv->service_timeout = service_timeout; cm_set_private_data(cm_id_priv, data, private_data_len); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags); kfree(data); return ret; error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags); kfree(data); cm_free_msg(msg); return ret; } EXPORT_SYMBOL(ib_send_cm_mra); static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) { switch (cm_mra_get_msg_mraed(mra_msg)) { case CM_MSG_RESPONSE_REQ: return cm_acquire_id(mra_msg->remote_comm_id, 0); case CM_MSG_RESPONSE_REP: case CM_MSG_RESPONSE_OTHER: return cm_acquire_id(mra_msg->remote_comm_id, mra_msg->local_comm_id); default: return NULL; } } static int cm_mra_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_mra_msg *mra_msg; int timeout, ret; mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_mraed_id(mra_msg); if (!cm_id_priv) return -EINVAL; work->cm_event.private_data = &mra_msg->private_data; work->cm_event.param.mra_rcvd.service_timeout = cm_mra_get_service_timeout(mra_msg); timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) + cm_convert_to_ms(cm_id_priv->av.timeout); spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || ib_modify_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg, timeout)) goto out; cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; break; case IB_CM_REP_SENT: if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || ib_modify_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg, timeout)) goto out; cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; break; case IB_CM_ESTABLISHED: if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || cm_id_priv->id.lap_state != IB_CM_LAP_SENT || ib_modify_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg, timeout)) { if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) atomic_long_inc(&work->port-> counter_group[CM_RECV_DUPLICATES]. counter[CM_MRA_COUNTER]); goto out; } cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; break; case IB_CM_MRA_REQ_RCVD: case IB_CM_MRA_REP_RCVD: atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_MRA_COUNTER]); /* fall through */ default: goto out; } cm_id_priv->msg->context[1] = (void *) (unsigned long) cm_id_priv->id.state; ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: spin_unlock_irq(&cm_id_priv->lock); cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_lap(struct cm_lap_msg *lap_msg, struct cm_id_private *cm_id_priv, struct ib_sa_path_rec *alternate_path, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID, cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP)); lap_msg->local_comm_id = cm_id_priv->id.local_id; lap_msg->remote_comm_id = cm_id_priv->id.remote_id; cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn); /* todo: need remote CM response timeout */ cm_lap_set_remote_resp_timeout(lap_msg, 0x1F); lap_msg->alt_local_lid = alternate_path->slid; lap_msg->alt_remote_lid = alternate_path->dlid; lap_msg->alt_local_gid = alternate_path->sgid; lap_msg->alt_remote_gid = alternate_path->dgid; cm_lap_set_flow_label(lap_msg, alternate_path->flow_label); cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class); lap_msg->alt_hop_limit = alternate_path->hop_limit; cm_lap_set_packet_rate(lap_msg, alternate_path->rate); cm_lap_set_sl(lap_msg, alternate_path->sl); cm_lap_set_subnet_local(lap_msg, 1); /* local only... */ cm_lap_set_local_ack_timeout(lap_msg, cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, alternate_path->packet_life_time)); if (private_data && private_data_len) memcpy(lap_msg->private_data, private_data, private_data_len); } int ib_send_cm_lap(struct ib_cm_id *cm_id, struct ib_sa_path_rec *alternate_path, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_ESTABLISHED || (cm_id->lap_state != IB_CM_LAP_UNINIT && cm_id->lap_state != IB_CM_LAP_IDLE)) { ret = -EINVAL; goto out; } ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); if (ret) goto out; cm_id_priv->alt_av.timeout = cm_ack_timeout(cm_id_priv->target_ack_delay, cm_id_priv->alt_av.timeout - 1); ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto out; cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, alternate_path, private_data, private_data_len); msg->timeout_ms = cm_id_priv->timeout_ms; msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } cm_id->lap_state = IB_CM_LAP_SENT; cm_id_priv->msg = msg; out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_lap); static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv, struct ib_sa_path_rec *path, struct cm_lap_msg *lap_msg) { memset(path, 0, sizeof *path); path->dgid = lap_msg->alt_local_gid; path->sgid = lap_msg->alt_remote_gid; path->dlid = lap_msg->alt_local_lid; path->slid = lap_msg->alt_remote_lid; path->flow_label = cm_lap_get_flow_label(lap_msg); path->hop_limit = lap_msg->alt_hop_limit; path->traffic_class = cm_lap_get_traffic_class(lap_msg); path->reversible = 1; path->pkey = cm_id_priv->pkey; path->sl = cm_lap_get_sl(lap_msg); path->mtu_selector = IB_SA_EQ; path->mtu = cm_id_priv->path_mtu; path->rate_selector = IB_SA_EQ; path->rate = cm_lap_get_packet_rate(lap_msg); path->packet_life_time_selector = IB_SA_EQ; path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg); path->packet_life_time -= (path->packet_life_time > 0); } static int cm_lap_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_lap_msg *lap_msg; struct ib_cm_lap_event_param *param; struct ib_mad_send_buf *msg = NULL; int ret; /* todo: verify LAP request and send reject APR if invalid. */ lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id, lap_msg->local_comm_id); if (!cm_id_priv) return -EINVAL; param = &work->cm_event.param.lap_rcvd; param->alternate_path = &work->path[0]; cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg); work->cm_event.private_data = &lap_msg->private_data; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_ESTABLISHED) goto unlock; switch (cm_id_priv->id.lap_state) { case IB_CM_LAP_UNINIT: case IB_CM_LAP_IDLE: break; case IB_CM_MRA_LAP_SENT: atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_LAP_COUNTER]); if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) goto unlock; cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, CM_MSG_RESPONSE_OTHER, cm_id_priv->service_timeout, cm_id_priv->private_data, cm_id_priv->private_data_len); spin_unlock_irq(&cm_id_priv->lock); if (ib_post_send_mad(msg, NULL)) cm_free_msg(msg); goto deref; case IB_CM_LAP_RCVD: atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_LAP_COUNTER]); goto unlock; default: goto unlock; } cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; cm_id_priv->tid = lap_msg->hdr.tid; cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &cm_id_priv->av); cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; unlock: spin_unlock_irq(&cm_id_priv->lock); deref: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_apr(struct cm_apr_msg *apr_msg, struct cm_id_private *cm_id_priv, enum ib_cm_apr_status status, void *info, u8 info_length, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid); apr_msg->local_comm_id = cm_id_priv->id.local_id; apr_msg->remote_comm_id = cm_id_priv->id.remote_id; apr_msg->ap_status = (u8) status; if (info && info_length) { apr_msg->info_length = info_length; memcpy(apr_msg->info, info, info_length); } if (private_data && private_data_len) memcpy(apr_msg->private_data, private_data, private_data_len); } int ib_send_cm_apr(struct ib_cm_id *cm_id, enum ib_cm_apr_status status, void *info, u8 info_length, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) || (info && info_length > IB_CM_APR_INFO_LENGTH)) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_ESTABLISHED || (cm_id->lap_state != IB_CM_LAP_RCVD && cm_id->lap_state != IB_CM_MRA_LAP_SENT)) { ret = -EINVAL; goto out; } ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto out; cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, info, info_length, private_data, private_data_len); ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } cm_id->lap_state = IB_CM_LAP_IDLE; out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_apr); static int cm_apr_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_apr_msg *apr_msg; int ret; apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id, apr_msg->local_comm_id); if (!cm_id_priv) return -EINVAL; /* Unmatched reply. */ work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status; work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info; work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; work->cm_event.private_data = &apr_msg->private_data; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_ESTABLISHED || (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); cm_id_priv->msg = NULL; ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_timewait_handler(struct cm_work *work) { struct cm_timewait_info *timewait_info; struct cm_id_private *cm_id_priv; int ret; timewait_info = (struct cm_timewait_info *)work; spin_lock_irq(&cm.lock); list_del(&timewait_info->list); spin_unlock_irq(&cm.lock); cm_id_priv = cm_acquire_id(timewait_info->work.local_id, timewait_info->work.remote_id); if (!cm_id_priv) return -EINVAL; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_TIMEWAIT || cm_id_priv->remote_qpn != timewait_info->remote_qpn) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_id_priv->id.state = IB_CM_IDLE; ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); spin_unlock_irq(&cm_id_priv->lock); if (ret) cm_process_work(cm_id_priv, work); else cm_deref_id(cm_id_priv); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, struct cm_id_private *cm_id_priv, struct ib_cm_sidr_req_param *param) { cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR)); sidr_req_msg->request_id = cm_id_priv->id.local_id; sidr_req_msg->pkey = param->path->pkey; sidr_req_msg->service_id = param->service_id; if (param->private_data && param->private_data_len) memcpy(sidr_req_msg->private_data, param->private_data, param->private_data_len); } int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, struct ib_cm_sidr_req_param *param) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if (!param->path || (param->private_data && param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); ret = cm_init_av_by_path(param->path, &cm_id_priv->av); if (ret) goto out; cm_id->service_id = param->service_id; cm_id->service_mask = ~cpu_to_be64(0); cm_id_priv->timeout_ms = param->timeout_ms; cm_id_priv->max_cm_retries = param->max_cm_retries; ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto out; cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, param); msg->timeout_ms = cm_id_priv->timeout_ms; msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state == IB_CM_IDLE) ret = ib_post_send_mad(msg, NULL); else ret = -EINVAL; if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); goto out; } cm_id->state = IB_CM_SIDR_REQ_SENT; cm_id_priv->msg = msg; spin_unlock_irqrestore(&cm_id_priv->lock, flags); out: return ret; } EXPORT_SYMBOL(ib_send_cm_sidr_req); static void cm_format_sidr_req_event(struct cm_work *work, struct ib_cm_id *listen_id) { struct cm_sidr_req_msg *sidr_req_msg; struct ib_cm_sidr_req_event_param *param; sidr_req_msg = (struct cm_sidr_req_msg *) work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.sidr_req_rcvd; param->pkey = __be16_to_cpu(sidr_req_msg->pkey); param->listen_id = listen_id; param->port = work->port->port_num; work->cm_event.private_data = &sidr_req_msg->private_data; } static int cm_sidr_req_handler(struct cm_work *work) { struct ib_cm_id *cm_id; struct cm_id_private *cm_id_priv, *cur_cm_id_priv; struct cm_sidr_req_msg *sidr_req_msg; struct ib_wc *wc; cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); if (IS_ERR(cm_id)) return PTR_ERR(cm_id); cm_id_priv = container_of(cm_id, struct cm_id_private, id); /* Record SGID/SLID and request ID for lookup. */ sidr_req_msg = (struct cm_sidr_req_msg *) work->mad_recv_wc->recv_buf.mad; wc = work->mad_recv_wc->wc; cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid); cm_id_priv->av.dgid.global.interface_id = 0; cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &cm_id_priv->av); cm_id_priv->id.remote_id = sidr_req_msg->request_id; cm_id_priv->tid = sidr_req_msg->hdr.tid; atomic_inc(&cm_id_priv->work_count); spin_lock_irq(&cm.lock); cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); if (cur_cm_id_priv) { spin_unlock_irq(&cm.lock); atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_SIDR_REQ_COUNTER]); goto out; /* Duplicate message. */ } cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; cur_cm_id_priv = cm_find_listen(cm_id->device, sidr_req_msg->service_id, sidr_req_msg->private_data); if (!cur_cm_id_priv) { spin_unlock_irq(&cm.lock); cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED); goto out; /* No match. */ } atomic_inc(&cur_cm_id_priv->refcount); atomic_inc(&cm_id_priv->refcount); spin_unlock_irq(&cm.lock); cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; cm_id_priv->id.context = cur_cm_id_priv->id.context; cm_id_priv->id.service_id = sidr_req_msg->service_id; cm_id_priv->id.service_mask = ~cpu_to_be64(0); cm_format_sidr_req_event(work, &cur_cm_id_priv->id); cm_process_work(cm_id_priv, work); cm_deref_id(cur_cm_id_priv); return 0; out: ib_destroy_cm_id(&cm_id_priv->id); return -EINVAL; } static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, struct cm_id_private *cm_id_priv, struct ib_cm_sidr_rep_param *param) { cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, cm_id_priv->tid); sidr_rep_msg->request_id = cm_id_priv->id.remote_id; sidr_rep_msg->status = param->status; cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num)); sidr_rep_msg->service_id = cm_id_priv->id.service_id; sidr_rep_msg->qkey = cpu_to_be32(param->qkey); if (param->info && param->info_length) memcpy(sidr_rep_msg->info, param->info, param->info_length); if (param->private_data && param->private_data_len) memcpy(sidr_rep_msg->private_data, param->private_data, param->private_data_len); } int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, struct ib_cm_sidr_rep_param *param) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; int ret; if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || (param->private_data && param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_SIDR_REQ_RCVD) { ret = -EINVAL; goto error; } ret = cm_alloc_msg(cm_id_priv, &msg); if (ret) goto error; cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, param); ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); return ret; } cm_id->state = IB_CM_IDLE; spin_unlock_irqrestore(&cm_id_priv->lock, flags); spin_lock_irqsave(&cm.lock, flags); rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); spin_unlock_irqrestore(&cm.lock, flags); return 0; error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_sidr_rep); static void cm_format_sidr_rep_event(struct cm_work *work) { struct cm_sidr_rep_msg *sidr_rep_msg; struct ib_cm_sidr_rep_event_param *param; sidr_rep_msg = (struct cm_sidr_rep_msg *) work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.sidr_rep_rcvd; param->status = sidr_rep_msg->status; param->qkey = be32_to_cpu(sidr_rep_msg->qkey); param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg)); param->info = &sidr_rep_msg->info; param->info_len = sidr_rep_msg->info_length; work->cm_event.private_data = &sidr_rep_msg->private_data; } static int cm_sidr_rep_handler(struct cm_work *work) { struct cm_sidr_rep_msg *sidr_rep_msg; struct cm_id_private *cm_id_priv; sidr_rep_msg = (struct cm_sidr_rep_msg *) work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0); if (!cm_id_priv) return -EINVAL; /* Unmatched reply. */ spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_id_priv->id.state = IB_CM_IDLE; ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); spin_unlock_irq(&cm_id_priv->lock); cm_format_sidr_rep_event(work); cm_process_work(cm_id_priv, work); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_process_send_error(struct ib_mad_send_buf *msg, enum ib_wc_status wc_status) { struct cm_id_private *cm_id_priv; struct ib_cm_event cm_event; enum ib_cm_state state; int ret; memset(&cm_event, 0, sizeof cm_event); cm_id_priv = msg->context[0]; /* Discard old sends or ones without a response. */ spin_lock_irq(&cm_id_priv->lock); state = (enum ib_cm_state) (unsigned long) msg->context[1]; if (msg != cm_id_priv->msg || state != cm_id_priv->id.state) goto discard; switch (state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: cm_reset_to_idle(cm_id_priv); cm_event.event = IB_CM_REQ_ERROR; break; case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: cm_reset_to_idle(cm_id_priv); cm_event.event = IB_CM_REP_ERROR; break; case IB_CM_DREQ_SENT: cm_enter_timewait(cm_id_priv); cm_event.event = IB_CM_DREQ_ERROR; break; case IB_CM_SIDR_REQ_SENT: cm_id_priv->id.state = IB_CM_IDLE; cm_event.event = IB_CM_SIDR_REQ_ERROR; break; default: goto discard; } spin_unlock_irq(&cm_id_priv->lock); cm_event.param.send_status = wc_status; /* No other events can occur on the cm_id at this point. */ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); cm_free_msg(msg); if (ret) ib_destroy_cm_id(&cm_id_priv->id); return; discard: spin_unlock_irq(&cm_id_priv->lock); cm_free_msg(msg); } static void cm_send_handler(struct ib_mad_agent *mad_agent, struct ib_mad_send_wc *mad_send_wc) { struct ib_mad_send_buf *msg = mad_send_wc->send_buf; struct cm_port *port; u16 attr_index; port = mad_agent->context; attr_index = be16_to_cpu(((struct ib_mad_hdr *) msg->mad)->attr_id) - CM_ATTR_ID_OFFSET; /* * If the send was in response to a received message (context[0] is not * set to a cm_id), and is not a REJ, then it is a send that was * manually retried. */ if (!msg->context[0] && (attr_index != CM_REJ_COUNTER)) msg->retries = 1; atomic_long_add(1 + msg->retries, &port->counter_group[CM_XMIT].counter[attr_index]); if (msg->retries) atomic_long_add(msg->retries, &port->counter_group[CM_XMIT_RETRIES]. counter[attr_index]); switch (mad_send_wc->status) { case IB_WC_SUCCESS: case IB_WC_WR_FLUSH_ERR: cm_free_msg(msg); break; default: if (msg->context[0] && msg->context[1]) cm_process_send_error(msg, mad_send_wc->status); else cm_free_msg(msg); break; } } static void cm_work_handler(struct work_struct *_work) { struct cm_work *work = container_of(_work, struct cm_work, work.work); int ret; switch (work->cm_event.event) { case IB_CM_REQ_RECEIVED: ret = cm_req_handler(work); break; case IB_CM_MRA_RECEIVED: ret = cm_mra_handler(work); break; case IB_CM_REJ_RECEIVED: ret = cm_rej_handler(work); break; case IB_CM_REP_RECEIVED: ret = cm_rep_handler(work); break; case IB_CM_RTU_RECEIVED: ret = cm_rtu_handler(work); break; case IB_CM_USER_ESTABLISHED: ret = cm_establish_handler(work); break; case IB_CM_DREQ_RECEIVED: ret = cm_dreq_handler(work); break; case IB_CM_DREP_RECEIVED: ret = cm_drep_handler(work); break; case IB_CM_SIDR_REQ_RECEIVED: ret = cm_sidr_req_handler(work); break; case IB_CM_SIDR_REP_RECEIVED: ret = cm_sidr_rep_handler(work); break; case IB_CM_LAP_RECEIVED: ret = cm_lap_handler(work); break; case IB_CM_APR_RECEIVED: ret = cm_apr_handler(work); break; case IB_CM_TIMEWAIT_EXIT: ret = cm_timewait_handler(work); break; default: ret = -EINVAL; break; } if (ret) cm_free_work(work); } static int cm_establish(struct ib_cm_id *cm_id) { struct cm_id_private *cm_id_priv; struct cm_work *work; unsigned long flags; int ret = 0; work = kmalloc(sizeof *work, GFP_ATOMIC); if (!work) return -ENOMEM; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id->state) { case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: cm_id->state = IB_CM_ESTABLISHED; break; case IB_CM_ESTABLISHED: ret = -EISCONN; break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); if (ret) { kfree(work); goto out; } /* * The CM worker thread may try to destroy the cm_id before it * can execute this work item. To prevent potential deadlock, * we need to find the cm_id once we're in the context of the * worker thread, rather than holding a reference on it. */ INIT_DELAYED_WORK(&work->work, cm_work_handler); work->local_id = cm_id->local_id; work->remote_id = cm_id->remote_id; work->mad_recv_wc = NULL; work->cm_event.event = IB_CM_USER_ESTABLISHED; queue_delayed_work(cm.wq, &work->work, 0); out: return ret; } static int cm_migrate(struct ib_cm_id *cm_id) { struct cm_id_private *cm_id_priv; unsigned long flags; int ret = 0; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state == IB_CM_ESTABLISHED && (cm_id->lap_state == IB_CM_LAP_UNINIT || cm_id->lap_state == IB_CM_LAP_IDLE)) { cm_id->lap_state = IB_CM_LAP_IDLE; cm_id_priv->av = cm_id_priv->alt_av; } else ret = -EINVAL; spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event) { int ret; switch (event) { case IB_EVENT_COMM_EST: ret = cm_establish(cm_id); break; case IB_EVENT_PATH_MIG: ret = cm_migrate(cm_id); break; default: ret = -EINVAL; } return ret; } EXPORT_SYMBOL(ib_cm_notify); static void cm_recv_handler(struct ib_mad_agent *mad_agent, struct ib_mad_recv_wc *mad_recv_wc) { struct cm_port *port = mad_agent->context; struct cm_work *work; enum ib_cm_event_type event; u16 attr_id; int paths = 0; switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { case CM_REQ_ATTR_ID: paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)-> alt_local_lid != 0); event = IB_CM_REQ_RECEIVED; break; case CM_MRA_ATTR_ID: event = IB_CM_MRA_RECEIVED; break; case CM_REJ_ATTR_ID: event = IB_CM_REJ_RECEIVED; break; case CM_REP_ATTR_ID: event = IB_CM_REP_RECEIVED; break; case CM_RTU_ATTR_ID: event = IB_CM_RTU_RECEIVED; break; case CM_DREQ_ATTR_ID: event = IB_CM_DREQ_RECEIVED; break; case CM_DREP_ATTR_ID: event = IB_CM_DREP_RECEIVED; break; case CM_SIDR_REQ_ATTR_ID: event = IB_CM_SIDR_REQ_RECEIVED; break; case CM_SIDR_REP_ATTR_ID: event = IB_CM_SIDR_REP_RECEIVED; break; case CM_LAP_ATTR_ID: paths = 1; event = IB_CM_LAP_RECEIVED; break; case CM_APR_ATTR_ID: event = IB_CM_APR_RECEIVED; break; default: ib_free_recv_mad(mad_recv_wc); return; } attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); atomic_long_inc(&port->counter_group[CM_RECV]. counter[attr_id - CM_ATTR_ID_OFFSET]); work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, GFP_KERNEL); if (!work) { ib_free_recv_mad(mad_recv_wc); return; } INIT_DELAYED_WORK(&work->work, cm_work_handler); work->cm_event.event = event; work->mad_recv_wc = mad_recv_wc; work->port = port; queue_delayed_work(cm.wq, &work->work, 0); } static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: case IB_CM_ESTABLISHED: *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; if (cm_id_priv->responder_resources) qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_ATOMIC; qp_attr->pkey_index = cm_id_priv->av.pkey_index; qp_attr->port_num = cm_id_priv->av.port->port_num; ret = 0; break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->id.state) { case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: case IB_CM_ESTABLISHED: *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | IB_QP_RQ_PSN; qp_attr->ah_attr = cm_id_priv->av.ah_attr; qp_attr->path_mtu = cm_id_priv->path_mtu; qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); if (cm_id_priv->qp_type == IB_QPT_RC || cm_id_priv->qp_type == IB_QPT_XRC_TGT) { *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER; qp_attr->max_dest_rd_atomic = cm_id_priv->responder_resources; qp_attr->min_rnr_timer = 0; } if (cm_id_priv->alt_av.ah_attr.dlid) { *qp_attr_mask |= IB_QP_ALT_PATH; qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; } ret = 0; break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->id.state) { /* Allow transition to RTS before sending REP */ case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: case IB_CM_ESTABLISHED: if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); switch (cm_id_priv->qp_type) { case IB_QPT_RC: case IB_QPT_XRC_INI: *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC; qp_attr->retry_cnt = cm_id_priv->retry_count; qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; /* fall through */ case IB_QPT_XRC_TGT: *qp_attr_mask |= IB_QP_TIMEOUT; qp_attr->timeout = cm_id_priv->av.timeout; break; default: break; } if (cm_id_priv->alt_av.ah_attr.dlid) { *qp_attr_mask |= IB_QP_PATH_MIG_STATE; qp_attr->path_mig_state = IB_MIG_REARM; } } else { *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE; qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; qp_attr->path_mig_state = IB_MIG_REARM; } ret = 0; break; default: ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { struct cm_id_private *cm_id_priv; int ret; cm_id_priv = container_of(cm_id, struct cm_id_private, id); switch (qp_attr->qp_state) { case IB_QPS_INIT: ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); break; case IB_QPS_RTR: ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); break; case IB_QPS_RTS: ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); break; default: ret = -EINVAL; break; } return ret; } EXPORT_SYMBOL(ib_cm_init_qp_attr); static void cm_get_ack_delay(struct cm_device *cm_dev) { struct ib_device_attr attr; if (ib_query_device(cm_dev->ib_device, &attr)) cm_dev->ack_delay = 0; /* acks will rely on packet life time */ else cm_dev->ack_delay = attr.local_ca_ack_delay; } static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr, char *buf) { struct cm_counter_group *group; struct cm_counter_attribute *cm_attr; group = container_of(obj, struct cm_counter_group, obj); cm_attr = container_of(attr, struct cm_counter_attribute, attr); return sprintf(buf, "%ld\n", atomic_long_read(&group->counter[cm_attr->index])); } static const struct sysfs_ops cm_counter_ops = { .show = cm_show_counter }; static struct kobj_type cm_counter_obj_type = { .sysfs_ops = &cm_counter_ops, .default_attrs = cm_counter_default_attrs }; static void cm_release_port_obj(struct kobject *obj) { struct cm_port *cm_port; cm_port = container_of(obj, struct cm_port, port_obj); kfree(cm_port); } static struct kobj_type cm_port_obj_type = { .release = cm_release_port_obj }; static char *cm_devnode(struct device *dev, umode_t *mode) { if (mode) *mode = 0666; return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); } struct class cm_class = { .owner = THIS_MODULE, .name = "infiniband_cm", .devnode = cm_devnode, }; EXPORT_SYMBOL(cm_class); static int cm_create_port_fs(struct cm_port *port) { int i, ret; ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type, &port->cm_dev->device->kobj, "%d", port->port_num); if (ret) { kfree(port); return ret; } for (i = 0; i < CM_COUNTER_GROUPS; i++) { ret = kobject_init_and_add(&port->counter_group[i].obj, &cm_counter_obj_type, &port->port_obj, "%s", counter_group_names[i]); if (ret) goto error; } return 0; error: while (i--) kobject_put(&port->counter_group[i].obj); kobject_put(&port->port_obj); return ret; } static void cm_remove_port_fs(struct cm_port *port) { int i; for (i = 0; i < CM_COUNTER_GROUPS; i++) kobject_put(&port->counter_group[i].obj); kobject_put(&port->port_obj); } static void cm_add_one(struct ib_device *ib_device) { struct cm_device *cm_dev; struct cm_port *port; struct ib_mad_reg_req reg_req = { .mgmt_class = IB_MGMT_CLASS_CM, .mgmt_class_version = IB_CM_CLASS_VERSION }; struct ib_port_modify port_modify = { .set_port_cap_mask = IB_PORT_CM_SUP }; unsigned long flags; int ret; u8 i; if (rdma_node_get_transport(ib_device->node_type) != RDMA_TRANSPORT_IB) return; cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) * ib_device->phys_port_cnt, GFP_KERNEL); if (!cm_dev) return; cm_dev->ib_device = ib_device; cm_get_ack_delay(cm_dev); cm_dev->device = device_create(&cm_class, &ib_device->dev, MKDEV(0, 0), NULL, "%s", ib_device->name); if (IS_ERR(cm_dev->device)) { kfree(cm_dev); return; } set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); for (i = 1; i <= ib_device->phys_port_cnt; i++) { port = kzalloc(sizeof *port, GFP_KERNEL); if (!port) goto error1; cm_dev->port[i-1] = port; port->cm_dev = cm_dev; port->port_num = i; ret = cm_create_port_fs(port); if (ret) goto error1; port->mad_agent = ib_register_mad_agent(ib_device, i, IB_QPT_GSI, &reg_req, 0, cm_send_handler, cm_recv_handler, port); if (IS_ERR(port->mad_agent)) goto error2; ret = ib_modify_port(ib_device, i, 0, &port_modify); if (ret) goto error3; } ib_set_client_data(ib_device, &cm_client, cm_dev); write_lock_irqsave(&cm.device_lock, flags); list_add_tail(&cm_dev->list, &cm.device_list); write_unlock_irqrestore(&cm.device_lock, flags); return; error3: ib_unregister_mad_agent(port->mad_agent); error2: cm_remove_port_fs(port); error1: port_modify.set_port_cap_mask = 0; port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; while (--i) { port = cm_dev->port[i-1]; ib_modify_port(ib_device, port->port_num, 0, &port_modify); ib_unregister_mad_agent(port->mad_agent); cm_remove_port_fs(port); } device_unregister(cm_dev->device); kfree(cm_dev); } static void cm_remove_one(struct ib_device *ib_device) { struct cm_device *cm_dev; struct cm_port *port; struct ib_port_modify port_modify = { .clr_port_cap_mask = IB_PORT_CM_SUP }; unsigned long flags; int i; cm_dev = ib_get_client_data(ib_device, &cm_client); if (!cm_dev) return; write_lock_irqsave(&cm.device_lock, flags); list_del(&cm_dev->list); write_unlock_irqrestore(&cm.device_lock, flags); for (i = 1; i <= ib_device->phys_port_cnt; i++) { port = cm_dev->port[i-1]; ib_modify_port(ib_device, port->port_num, 0, &port_modify); ib_unregister_mad_agent(port->mad_agent); flush_workqueue(cm.wq); cm_remove_port_fs(port); } device_unregister(cm_dev->device); kfree(cm_dev); } static int __init ib_cm_init(void) { int ret; memset(&cm, 0, sizeof cm); INIT_LIST_HEAD(&cm.device_list); rwlock_init(&cm.device_lock); spin_lock_init(&cm.lock); cm.listen_service_table = RB_ROOT; cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); cm.remote_id_table = RB_ROOT; cm.remote_qp_table = RB_ROOT; cm.remote_sidr_table = RB_ROOT; idr_init(&cm.local_id_table); get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); idr_pre_get(&cm.local_id_table, GFP_KERNEL); INIT_LIST_HEAD(&cm.timewait_list); ret = class_register(&cm_class); if (ret) return -ENOMEM; cm.wq = create_workqueue("ib_cm"); if (!cm.wq) { ret = -ENOMEM; goto error1; } ret = ib_register_client(&cm_client); if (ret) goto error2; return 0; error2: destroy_workqueue(cm.wq); error1: class_unregister(&cm_class); return ret; } static void __exit ib_cm_cleanup(void) { struct cm_timewait_info *timewait_info, *tmp; spin_lock_irq(&cm.lock); list_for_each_entry(timewait_info, &cm.timewait_list, list) cancel_delayed_work(&timewait_info->work.work); spin_unlock_irq(&cm.lock); ib_unregister_client(&cm_client); destroy_workqueue(cm.wq); list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { list_del(&timewait_info->list); kfree(timewait_info); } class_unregister(&cm_class); idr_destroy(&cm.local_id_table); } module_init(ib_cm_init); module_exit(ib_cm_cleanup);
gpl-2.0
asias/linux-kvm
drivers/media/video/gspca/jeilinj.c
4956
14415
/* * Jeilinj subdriver * * Supports some Jeilin dual-mode cameras which use bulk transport and * download raw JPEG data. * * Copyright (C) 2009 Theodore Kilgore * * Sportscam DV15 support and control settings are * Copyright (C) 2011 Patrice Chotard * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "jeilinj" #include <linux/slab.h> #include "gspca.h" #include "jpeg.h" MODULE_AUTHOR("Theodore Kilgore <kilgota@auburn.edu>"); MODULE_DESCRIPTION("GSPCA/JEILINJ USB Camera Driver"); MODULE_LICENSE("GPL"); /* Default timeouts, in ms */ #define JEILINJ_CMD_TIMEOUT 500 #define JEILINJ_CMD_DELAY 160 #define JEILINJ_DATA_TIMEOUT 1000 /* Maximum transfer size to use. */ #define JEILINJ_MAX_TRANSFER 0x200 #define FRAME_HEADER_LEN 0x10 #define FRAME_START 0xFFFFFFFF enum { SAKAR_57379, SPORTSCAM_DV15, }; #define CAMQUALITY_MIN 0 /* highest cam quality */ #define CAMQUALITY_MAX 97 /* lowest cam quality */ enum e_ctrl { LIGHTFREQ, AUTOGAIN, RED, GREEN, BLUE, NCTRLS /* number of controls */ }; /* Structure to hold all of our device specific stuff */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct gspca_ctrl ctrls[NCTRLS]; int blocks_left; const struct v4l2_pix_format *cap_mode; /* Driver stuff */ u8 type; u8 quality; /* image quality */ #define QUALITY_MIN 35 #define QUALITY_MAX 85 #define QUALITY_DEF 85 u8 jpeg_hdr[JPEG_HDR_SZ]; }; struct jlj_command { unsigned char instruction[2]; unsigned char ack_wanted; unsigned char delay; }; /* AFAICT these cameras will only do 320x240. */ static struct v4l2_pix_format jlj_mode[] = { { 320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, { 640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0} }; /* * cam uses endpoint 0x03 to send commands, 0x84 for read commands, * and 0x82 for bulk transfer. */ /* All commands are two bytes only */ static void jlj_write2(struct gspca_dev *gspca_dev, unsigned char *command) { int retval; if (gspca_dev->usb_err < 0) return; memcpy(gspca_dev->usb_buf, command, 2); retval = usb_bulk_msg(gspca_dev->dev, usb_sndbulkpipe(gspca_dev->dev, 3), gspca_dev->usb_buf, 2, NULL, 500); if (retval < 0) { pr_err("command write [%02x] error %d\n", gspca_dev->usb_buf[0], retval); gspca_dev->usb_err = retval; } } /* Responses are one byte only */ static void jlj_read1(struct gspca_dev *gspca_dev, unsigned char response) { int retval; if (gspca_dev->usb_err < 0) return; retval = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x84), gspca_dev->usb_buf, 1, NULL, 500); response = gspca_dev->usb_buf[0]; if (retval < 0) { pr_err("read command [%02x] error %d\n", gspca_dev->usb_buf[0], retval); gspca_dev->usb_err = retval; } } static void setfreq(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 freq_commands[][2] = { {0x71, 0x80}, {0x70, 0x07} }; freq_commands[0][1] |= (sd->ctrls[LIGHTFREQ].val >> 1); jlj_write2(gspca_dev, freq_commands[0]); jlj_write2(gspca_dev, freq_commands[1]); } static void setcamquality(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 quality_commands[][2] = { {0x71, 0x1E}, {0x70, 0x06} }; u8 camquality; /* adapt camera quality from jpeg quality */ camquality = ((QUALITY_MAX - sd->quality) * CAMQUALITY_MAX) / (QUALITY_MAX - QUALITY_MIN); quality_commands[0][1] += camquality; jlj_write2(gspca_dev, quality_commands[0]); jlj_write2(gspca_dev, quality_commands[1]); } static void setautogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 autogain_commands[][2] = { {0x94, 0x02}, {0xcf, 0x00} }; autogain_commands[1][1] = (sd->ctrls[AUTOGAIN].val << 4); jlj_write2(gspca_dev, autogain_commands[0]); jlj_write2(gspca_dev, autogain_commands[1]); } static void setred(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 setred_commands[][2] = { {0x94, 0x02}, {0xe6, 0x00} }; setred_commands[1][1] = sd->ctrls[RED].val; jlj_write2(gspca_dev, setred_commands[0]); jlj_write2(gspca_dev, setred_commands[1]); } static void setgreen(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 setgreen_commands[][2] = { {0x94, 0x02}, {0xe7, 0x00} }; setgreen_commands[1][1] = sd->ctrls[GREEN].val; jlj_write2(gspca_dev, setgreen_commands[0]); jlj_write2(gspca_dev, setgreen_commands[1]); } static void setblue(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 setblue_commands[][2] = { {0x94, 0x02}, {0xe9, 0x00} }; setblue_commands[1][1] = sd->ctrls[BLUE].val; jlj_write2(gspca_dev, setblue_commands[0]); jlj_write2(gspca_dev, setblue_commands[1]); } static const struct ctrl sd_ctrls[NCTRLS] = { [LIGHTFREQ] = { { .id = V4L2_CID_POWER_LINE_FREQUENCY, .type = V4L2_CTRL_TYPE_MENU, .name = "Light frequency filter", .minimum = V4L2_CID_POWER_LINE_FREQUENCY_DISABLED, /* 1 */ .maximum = V4L2_CID_POWER_LINE_FREQUENCY_60HZ, /* 2 */ .step = 1, .default_value = V4L2_CID_POWER_LINE_FREQUENCY_60HZ, }, .set_control = setfreq }, [AUTOGAIN] = { { .id = V4L2_CID_AUTOGAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Automatic Gain (and Exposure)", .minimum = 0, .maximum = 3, .step = 1, #define AUTOGAIN_DEF 0 .default_value = AUTOGAIN_DEF, }, .set_control = setautogain }, [RED] = { { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "red balance", .minimum = 0, .maximum = 3, .step = 1, #define RED_BALANCE_DEF 2 .default_value = RED_BALANCE_DEF, }, .set_control = setred }, [GREEN] = { { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "green balance", .minimum = 0, .maximum = 3, .step = 1, #define GREEN_BALANCE_DEF 2 .default_value = GREEN_BALANCE_DEF, }, .set_control = setgreen }, [BLUE] = { { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "blue balance", .minimum = 0, .maximum = 3, .step = 1, #define BLUE_BALANCE_DEF 2 .default_value = BLUE_BALANCE_DEF, }, .set_control = setblue }, }; static int jlj_start(struct gspca_dev *gspca_dev) { int i; int start_commands_size; u8 response = 0xff; struct sd *sd = (struct sd *) gspca_dev; struct jlj_command start_commands[] = { {{0x71, 0x81}, 0, 0}, {{0x70, 0x05}, 0, JEILINJ_CMD_DELAY}, {{0x95, 0x70}, 1, 0}, {{0x71, 0x81 - gspca_dev->curr_mode}, 0, 0}, {{0x70, 0x04}, 0, JEILINJ_CMD_DELAY}, {{0x95, 0x70}, 1, 0}, {{0x71, 0x00}, 0, 0}, /* start streaming ??*/ {{0x70, 0x08}, 0, JEILINJ_CMD_DELAY}, {{0x95, 0x70}, 1, 0}, #define SPORTSCAM_DV15_CMD_SIZE 9 {{0x94, 0x02}, 0, 0}, {{0xde, 0x24}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xdd, 0xf0}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xe3, 0x2c}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xe4, 0x00}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xe5, 0x00}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xe6, 0x2c}, 0, 0}, {{0x94, 0x03}, 0, 0}, {{0xaa, 0x00}, 0, 0} }; sd->blocks_left = 0; /* Under Windows, USB spy shows that only the 9 first start * commands are used for SPORTSCAM_DV15 webcam */ if (sd->type == SPORTSCAM_DV15) start_commands_size = SPORTSCAM_DV15_CMD_SIZE; else start_commands_size = ARRAY_SIZE(start_commands); for (i = 0; i < start_commands_size; i++) { jlj_write2(gspca_dev, start_commands[i].instruction); if (start_commands[i].delay) msleep(start_commands[i].delay); if (start_commands[i].ack_wanted) jlj_read1(gspca_dev, response); } setcamquality(gspca_dev); msleep(2); setfreq(gspca_dev); if (gspca_dev->usb_err < 0) PDEBUG(D_ERR, "Start streaming command failed"); return gspca_dev->usb_err; } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; int packet_type; u32 header_marker; PDEBUG(D_STREAM, "Got %d bytes out of %d for Block 0", len, JEILINJ_MAX_TRANSFER); if (len != JEILINJ_MAX_TRANSFER) { PDEBUG(D_PACK, "bad length"); goto discard; } /* check if it's start of frame */ header_marker = ((u32 *)data)[0]; if (header_marker == FRAME_START) { sd->blocks_left = data[0x0a] - 1; PDEBUG(D_STREAM, "blocks_left = 0x%x", sd->blocks_left); /* Start a new frame, and add the JPEG header, first thing */ gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); /* Toss line 0 of data block 0, keep the rest. */ gspca_frame_add(gspca_dev, INTER_PACKET, data + FRAME_HEADER_LEN, JEILINJ_MAX_TRANSFER - FRAME_HEADER_LEN); } else if (sd->blocks_left > 0) { PDEBUG(D_STREAM, "%d blocks remaining for frame", sd->blocks_left); sd->blocks_left -= 1; if (sd->blocks_left == 0) packet_type = LAST_PACKET; else packet_type = INTER_PACKET; gspca_frame_add(gspca_dev, packet_type, data, JEILINJ_MAX_TRANSFER); } else goto discard; return; discard: /* Discard data until a new frame starts. */ gspca_dev->last_packet_type = DISCARD_PACKET; } /* This function is called at probe time just before sd_init */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam = &gspca_dev->cam; struct sd *dev = (struct sd *) gspca_dev; dev->type = id->driver_info; gspca_dev->cam.ctrls = dev->ctrls; dev->quality = QUALITY_DEF; cam->cam_mode = jlj_mode; cam->nmodes = ARRAY_SIZE(jlj_mode); cam->bulk = 1; cam->bulk_nurbs = 1; cam->bulk_size = JEILINJ_MAX_TRANSFER; return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { int i; u8 *buf; static u8 stop_commands[][2] = { {0x71, 0x00}, {0x70, 0x09}, {0x71, 0x80}, {0x70, 0x05} }; for (;;) { /* get the image remaining blocks */ usb_bulk_msg(gspca_dev->dev, gspca_dev->urb[0]->pipe, gspca_dev->urb[0]->transfer_buffer, JEILINJ_MAX_TRANSFER, NULL, JEILINJ_DATA_TIMEOUT); /* search for 0xff 0xd9 (EOF for JPEG) */ i = 0; buf = gspca_dev->urb[0]->transfer_buffer; while ((i < (JEILINJ_MAX_TRANSFER - 1)) && ((buf[i] != 0xff) || (buf[i+1] != 0xd9))) i++; if (i != (JEILINJ_MAX_TRANSFER - 1)) /* last remaining block found */ break; } for (i = 0; i < ARRAY_SIZE(stop_commands); i++) jlj_write2(gspca_dev, stop_commands[i]); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { return gspca_dev->usb_err; } /* Set up for getting frames. */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; /* create the JPEG header */ jpeg_define(dev->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x21); /* JPEG 422 */ jpeg_set_qual(dev->jpeg_hdr, dev->quality); PDEBUG(D_STREAM, "Start streaming at %dx%d", gspca_dev->height, gspca_dev->width); jlj_start(gspca_dev); return gspca_dev->usb_err; } /* Table of supported USB devices */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0979, 0x0280), .driver_info = SAKAR_57379}, {USB_DEVICE(0x0979, 0x0270), .driver_info = SPORTSCAM_DV15}, {} }; MODULE_DEVICE_TABLE(usb, device_table); static int sd_querymenu(struct gspca_dev *gspca_dev, struct v4l2_querymenu *menu) { switch (menu->id) { case V4L2_CID_POWER_LINE_FREQUENCY: switch (menu->index) { case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */ strcpy((char *) menu->name, "disable"); return 0; case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */ strcpy((char *) menu->name, "50 Hz"); return 0; case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */ strcpy((char *) menu->name, "60 Hz"); return 0; } break; } return -EINVAL; } static int sd_set_jcomp(struct gspca_dev *gspca_dev, struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; if (jcomp->quality < QUALITY_MIN) sd->quality = QUALITY_MIN; else if (jcomp->quality > QUALITY_MAX) sd->quality = QUALITY_MAX; else sd->quality = jcomp->quality; if (gspca_dev->streaming) { jpeg_set_qual(sd->jpeg_hdr, sd->quality); setcamquality(gspca_dev); } return 0; } static int sd_get_jcomp(struct gspca_dev *gspca_dev, struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; memset(jcomp, 0, sizeof *jcomp); jcomp->quality = sd->quality; jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT; return 0; } /* sub-driver description */ static const struct sd_desc sd_desc_sakar_57379 = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, }; /* sub-driver description */ static const struct sd_desc sd_desc_sportscam_dv15 = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .ctrls = sd_ctrls, .nctrls = ARRAY_SIZE(sd_ctrls), .querymenu = sd_querymenu, .get_jcomp = sd_get_jcomp, .set_jcomp = sd_set_jcomp, }; static const struct sd_desc *sd_desc[2] = { &sd_desc_sakar_57379, &sd_desc_sportscam_dv15 }; /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, sd_desc[id->driver_info], sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
samssm/linux-socfpga
drivers/net/ethernet/mellanox/mlx4/sense.c
5468
4126
/* * Copyright (c) 2007 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/errno.h> #include <linux/if_ether.h> #include <linux/mlx4/cmd.h> #include "mlx4.h" int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, enum mlx4_port_type *type) { u64 out_param; int err = 0; err = mlx4_cmd_imm(dev, 0, &out_param, port, 0, MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); if (err) { mlx4_err(dev, "Sense command failed for port: %d\n", port); return err; } if (out_param > 2) { mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param); return -EINVAL; } *type = out_param; return 0; } void mlx4_do_sense_ports(struct mlx4_dev *dev, enum mlx4_port_type *stype, enum mlx4_port_type *defaults) { struct mlx4_sense *sense = &mlx4_priv(dev)->sense; int err; int i; for (i = 1; i <= dev->caps.num_ports; i++) { stype[i - 1] = 0; if (sense->do_sense_port[i] && sense->sense_allowed[i] && dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { err = mlx4_SENSE_PORT(dev, i, &stype[i - 1]); if (err) stype[i - 1] = defaults[i - 1]; } else stype[i - 1] = defaults[i - 1]; } /* * If sensed nothing, remain in current configuration. */ for (i = 0; i < dev->caps.num_ports; i++) stype[i] = stype[i] ? stype[i] : defaults[i]; } static void mlx4_sense_port(struct work_struct *work) { struct delayed_work *delay = to_delayed_work(work); struct mlx4_sense *sense = container_of(delay, struct mlx4_sense, sense_poll); struct mlx4_dev *dev = sense->dev; struct mlx4_priv *priv = mlx4_priv(dev); enum mlx4_port_type stype[MLX4_MAX_PORTS]; mutex_lock(&priv->port_mutex); mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]); if (mlx4_check_port_params(dev, stype)) goto sense_again; if (mlx4_change_port_types(dev, stype)) mlx4_err(dev, "Failed to change port_types\n"); sense_again: mutex_unlock(&priv->port_mutex); queue_delayed_work(mlx4_wq , &sense->sense_poll, round_jiffies_relative(MLX4_SENSE_RANGE)); } void mlx4_start_sense(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_sense *sense = &priv->sense; if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) return; queue_delayed_work(mlx4_wq , &sense->sense_poll, round_jiffies_relative(MLX4_SENSE_RANGE)); } void mlx4_stop_sense(struct mlx4_dev *dev) { cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll); } void mlx4_sense_init(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_sense *sense = &priv->sense; int port; sense->dev = dev; for (port = 1; port <= dev->caps.num_ports; port++) sense->do_sense_port[port] = 1; INIT_DEFERRABLE_WORK(&sense->sense_poll, mlx4_sense_port); }
gpl-2.0
DaemonGG/LARP_kernel3.16.0
lib/klist.c
5468
9566
/* * klist.c - Routines for manipulating klists. * * Copyright (C) 2005 Patrick Mochel * * This file is released under the GPL v2. * * This klist interface provides a couple of structures that wrap around * struct list_head to provide explicit list "head" (struct klist) and list * "node" (struct klist_node) objects. For struct klist, a spinlock is * included that protects access to the actual list itself. struct * klist_node provides a pointer to the klist that owns it and a kref * reference count that indicates the number of current users of that node * in the list. * * The entire point is to provide an interface for iterating over a list * that is safe and allows for modification of the list during the * iteration (e.g. insertion and removal), including modification of the * current node on the list. * * It works using a 3rd object type - struct klist_iter - that is declared * and initialized before an iteration. klist_next() is used to acquire the * next element in the list. It returns NULL if there are no more items. * Internally, that routine takes the klist's lock, decrements the * reference count of the previous klist_node and increments the count of * the next klist_node. It then drops the lock and returns. * * There are primitives for adding and removing nodes to/from a klist. * When deleting, klist_del() will simply decrement the reference count. * Only when the count goes to 0 is the node removed from the list. * klist_remove() will try to delete the node from the list and block until * it is actually removed. This is useful for objects (like devices) that * have been removed from the system and must be freed (but must wait until * all accessors have finished). */ #include <linux/klist.h> #include <linux/export.h> #include <linux/sched.h> /* * Use the lowest bit of n_klist to mark deleted nodes and exclude * dead ones from iteration. */ #define KNODE_DEAD 1LU #define KNODE_KLIST_MASK ~KNODE_DEAD static struct klist *knode_klist(struct klist_node *knode) { return (struct klist *) ((unsigned long)knode->n_klist & KNODE_KLIST_MASK); } static bool knode_dead(struct klist_node *knode) { return (unsigned long)knode->n_klist & KNODE_DEAD; } static void knode_set_klist(struct klist_node *knode, struct klist *klist) { knode->n_klist = klist; /* no knode deserves to start its life dead */ WARN_ON(knode_dead(knode)); } static void knode_kill(struct klist_node *knode) { /* and no knode should die twice ever either, see we're very humane */ WARN_ON(knode_dead(knode)); *(unsigned long *)&knode->n_klist |= KNODE_DEAD; } /** * klist_init - Initialize a klist structure. * @k: The klist we're initializing. * @get: The get function for the embedding object (NULL if none) * @put: The put function for the embedding object (NULL if none) * * Initialises the klist structure. If the klist_node structures are * going to be embedded in refcounted objects (necessary for safe * deletion) then the get/put arguments are used to initialise * functions that take and release references on the embedding * objects. */ void klist_init(struct klist *k, void (*get)(struct klist_node *), void (*put)(struct klist_node *)) { INIT_LIST_HEAD(&k->k_list); spin_lock_init(&k->k_lock); k->get = get; k->put = put; } EXPORT_SYMBOL_GPL(klist_init); static void add_head(struct klist *k, struct klist_node *n) { spin_lock(&k->k_lock); list_add(&n->n_node, &k->k_list); spin_unlock(&k->k_lock); } static void add_tail(struct klist *k, struct klist_node *n) { spin_lock(&k->k_lock); list_add_tail(&n->n_node, &k->k_list); spin_unlock(&k->k_lock); } static void klist_node_init(struct klist *k, struct klist_node *n) { INIT_LIST_HEAD(&n->n_node); kref_init(&n->n_ref); knode_set_klist(n, k); if (k->get) k->get(n); } /** * klist_add_head - Initialize a klist_node and add it to front. * @n: node we're adding. * @k: klist it's going on. */ void klist_add_head(struct klist_node *n, struct klist *k) { klist_node_init(k, n); add_head(k, n); } EXPORT_SYMBOL_GPL(klist_add_head); /** * klist_add_tail - Initialize a klist_node and add it to back. * @n: node we're adding. * @k: klist it's going on. */ void klist_add_tail(struct klist_node *n, struct klist *k) { klist_node_init(k, n); add_tail(k, n); } EXPORT_SYMBOL_GPL(klist_add_tail); /** * klist_add_after - Init a klist_node and add it after an existing node * @n: node we're adding. * @pos: node to put @n after */ void klist_add_after(struct klist_node *n, struct klist_node *pos) { struct klist *k = knode_klist(pos); klist_node_init(k, n); spin_lock(&k->k_lock); list_add(&n->n_node, &pos->n_node); spin_unlock(&k->k_lock); } EXPORT_SYMBOL_GPL(klist_add_after); /** * klist_add_before - Init a klist_node and add it before an existing node * @n: node we're adding. * @pos: node to put @n after */ void klist_add_before(struct klist_node *n, struct klist_node *pos) { struct klist *k = knode_klist(pos); klist_node_init(k, n); spin_lock(&k->k_lock); list_add_tail(&n->n_node, &pos->n_node); spin_unlock(&k->k_lock); } EXPORT_SYMBOL_GPL(klist_add_before); struct klist_waiter { struct list_head list; struct klist_node *node; struct task_struct *process; int woken; }; static DEFINE_SPINLOCK(klist_remove_lock); static LIST_HEAD(klist_remove_waiters); static void klist_release(struct kref *kref) { struct klist_waiter *waiter, *tmp; struct klist_node *n = container_of(kref, struct klist_node, n_ref); WARN_ON(!knode_dead(n)); list_del(&n->n_node); spin_lock(&klist_remove_lock); list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) { if (waiter->node != n) continue; list_del(&waiter->list); waiter->woken = 1; mb(); wake_up_process(waiter->process); } spin_unlock(&klist_remove_lock); knode_set_klist(n, NULL); } static int klist_dec_and_del(struct klist_node *n) { return kref_put(&n->n_ref, klist_release); } static void klist_put(struct klist_node *n, bool kill) { struct klist *k = knode_klist(n); void (*put)(struct klist_node *) = k->put; spin_lock(&k->k_lock); if (kill) knode_kill(n); if (!klist_dec_and_del(n)) put = NULL; spin_unlock(&k->k_lock); if (put) put(n); } /** * klist_del - Decrement the reference count of node and try to remove. * @n: node we're deleting. */ void klist_del(struct klist_node *n) { klist_put(n, true); } EXPORT_SYMBOL_GPL(klist_del); /** * klist_remove - Decrement the refcount of node and wait for it to go away. * @n: node we're removing. */ void klist_remove(struct klist_node *n) { struct klist_waiter waiter; waiter.node = n; waiter.process = current; waiter.woken = 0; spin_lock(&klist_remove_lock); list_add(&waiter.list, &klist_remove_waiters); spin_unlock(&klist_remove_lock); klist_del(n); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (waiter.woken) break; schedule(); } __set_current_state(TASK_RUNNING); } EXPORT_SYMBOL_GPL(klist_remove); /** * klist_node_attached - Say whether a node is bound to a list or not. * @n: Node that we're testing. */ int klist_node_attached(struct klist_node *n) { return (n->n_klist != NULL); } EXPORT_SYMBOL_GPL(klist_node_attached); /** * klist_iter_init_node - Initialize a klist_iter structure. * @k: klist we're iterating. * @i: klist_iter we're filling. * @n: node to start with. * * Similar to klist_iter_init(), but starts the action off with @n, * instead of with the list head. */ void klist_iter_init_node(struct klist *k, struct klist_iter *i, struct klist_node *n) { i->i_klist = k; i->i_cur = n; if (n) kref_get(&n->n_ref); } EXPORT_SYMBOL_GPL(klist_iter_init_node); /** * klist_iter_init - Iniitalize a klist_iter structure. * @k: klist we're iterating. * @i: klist_iter structure we're filling. * * Similar to klist_iter_init_node(), but start with the list head. */ void klist_iter_init(struct klist *k, struct klist_iter *i) { klist_iter_init_node(k, i, NULL); } EXPORT_SYMBOL_GPL(klist_iter_init); /** * klist_iter_exit - Finish a list iteration. * @i: Iterator structure. * * Must be called when done iterating over list, as it decrements the * refcount of the current node. Necessary in case iteration exited before * the end of the list was reached, and always good form. */ void klist_iter_exit(struct klist_iter *i) { if (i->i_cur) { klist_put(i->i_cur, false); i->i_cur = NULL; } } EXPORT_SYMBOL_GPL(klist_iter_exit); static struct klist_node *to_klist_node(struct list_head *n) { return container_of(n, struct klist_node, n_node); } /** * klist_next - Ante up next node in list. * @i: Iterator structure. * * First grab list lock. Decrement the reference count of the previous * node, if there was one. Grab the next node, increment its reference * count, drop the lock, and return that next node. */ struct klist_node *klist_next(struct klist_iter *i) { void (*put)(struct klist_node *) = i->i_klist->put; struct klist_node *last = i->i_cur; struct klist_node *next; spin_lock(&i->i_klist->k_lock); if (last) { next = to_klist_node(last->n_node.next); if (!klist_dec_and_del(last)) put = NULL; } else next = to_klist_node(i->i_klist->k_list.next); i->i_cur = NULL; while (next != to_klist_node(&i->i_klist->k_list)) { if (likely(!knode_dead(next))) { kref_get(&next->n_ref); i->i_cur = next; break; } next = to_klist_node(next->n_node.next); } spin_unlock(&i->i_klist->k_lock); if (put && last) put(last); return i->i_cur; } EXPORT_SYMBOL_GPL(klist_next);
gpl-2.0
chrmhoffmann/android_kernel_asus_tf300t
arch/x86/platform/efi/efi_64.c
5468
2544
/* * x86_64 specific EFI support functions * Based on Extensible Firmware Interface Specification version 1.0 * * Copyright (C) 2005-2008 Intel Co. * Fenghua Yu <fenghua.yu@intel.com> * Bibo Mao <bibo.mao@intel.com> * Chandramouli Narayanan <mouli@linux.intel.com> * Huang Ying <ying.huang@intel.com> * * Code to convert EFI to E820 map has been implemented in elilo bootloader * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table * is setup appropriately for EFI runtime code. * - mouli 06/14/2007. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/bootmem.h> #include <linux/ioport.h> #include <linux/module.h> #include <linux/efi.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/reboot.h> #include <asm/setup.h> #include <asm/page.h> #include <asm/e820.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/proto.h> #include <asm/efi.h> #include <asm/cacheflush.h> #include <asm/fixmap.h> static pgd_t save_pgd __initdata; static unsigned long efi_flags __initdata; static void __init early_code_mapping_set_exec(int executable) { efi_memory_desc_t *md; void *p; if (!(__supported_pte_mask & _PAGE_NX)) return; /* Make EFI service code area executable */ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { md = p; if (md->type == EFI_RUNTIME_SERVICES_CODE || md->type == EFI_BOOT_SERVICES_CODE) efi_set_executable(md, executable); } } void __init efi_call_phys_prelog(void) { unsigned long vaddress; early_code_mapping_set_exec(1); local_irq_save(efi_flags); vaddress = (unsigned long)__va(0x0UL); save_pgd = *pgd_offset_k(0x0UL); set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress)); __flush_tlb_all(); } void __init efi_call_phys_epilog(void) { /* * After the lock is released, the original page table is restored. */ set_pgd(pgd_offset_k(0x0UL), save_pgd); __flush_tlb_all(); local_irq_restore(efi_flags); early_code_mapping_set_exec(0); } void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, u32 type) { unsigned long last_map_pfn; if (type == EFI_MEMORY_MAPPED_IO) return ioremap(phys_addr, size); last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { unsigned long top = last_map_pfn << PAGE_SHIFT; efi_ioremap(top, size - (top - phys_addr), type); } return (void __iomem *)__va(phys_addr); }
gpl-2.0
VM12/android_kernel_oneplus_msm8974
arch/tile/lib/memcpy_64.c
7260
5364
/* * Copyright 2011 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/types.h> #include <linux/string.h> #include <linux/module.h> #define __memcpy memcpy /* EXPORT_SYMBOL() is in arch/tile/lib/exports.c since this should be asm. */ /* Must be 8 bytes in size. */ #define word_t uint64_t #if CHIP_L2_LINE_SIZE() != 64 && CHIP_L2_LINE_SIZE() != 128 #error "Assumes 64 or 128 byte line size" #endif /* How many cache lines ahead should we prefetch? */ #define PREFETCH_LINES_AHEAD 3 /* * Provide "base versions" of load and store for the normal code path. * The kernel provides other versions for userspace copies. */ #define ST(p, v) (*(p) = (v)) #define LD(p) (*(p)) #ifndef USERCOPY_FUNC #define ST1 ST #define ST2 ST #define ST4 ST #define ST8 ST #define LD1 LD #define LD2 LD #define LD4 LD #define LD8 LD #define RETVAL dstv void *memcpy(void *__restrict dstv, const void *__restrict srcv, size_t n) #else /* * Special kernel version will provide implementation of the LDn/STn * macros to return a count of uncopied bytes due to mm fault. */ #define RETVAL 0 int USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n) #endif { char *__restrict dst1 = (char *)dstv; const char *__restrict src1 = (const char *)srcv; const char *__restrict src1_end; const char *__restrict prefetch; word_t *__restrict dst8; /* 8-byte pointer to destination memory. */ word_t final; /* Final bytes to write to trailing word, if any */ long i; if (n < 16) { for (; n; n--) ST1(dst1++, LD1(src1++)); return RETVAL; } /* * Locate the end of source memory we will copy. Don't * prefetch past this. */ src1_end = src1 + n - 1; /* Prefetch ahead a few cache lines, but not past the end. */ prefetch = src1; for (i = 0; i < PREFETCH_LINES_AHEAD; i++) { __insn_prefetch(prefetch); prefetch += CHIP_L2_LINE_SIZE(); prefetch = (prefetch > src1_end) ? prefetch : src1; } /* Copy bytes until dst is word-aligned. */ for (; (uintptr_t)dst1 & (sizeof(word_t) - 1); n--) ST1(dst1++, LD1(src1++)); /* 8-byte pointer to destination memory. */ dst8 = (word_t *)dst1; if (__builtin_expect((uintptr_t)src1 & (sizeof(word_t) - 1), 0)) { /* * Misaligned copy. Copy 8 bytes at a time, but don't * bother with other fanciness. * * TODO: Consider prefetching and using wh64 as well. */ /* Create an aligned src8. */ const word_t *__restrict src8 = (const word_t *)((uintptr_t)src1 & -sizeof(word_t)); word_t b; word_t a = LD8(src8++); for (; n >= sizeof(word_t); n -= sizeof(word_t)) { b = LD8(src8++); a = __insn_dblalign(a, b, src1); ST8(dst8++, a); a = b; } if (n == 0) return RETVAL; b = ((const char *)src8 <= src1_end) ? *src8 : 0; /* * Final source bytes to write to trailing partial * word, if any. */ final = __insn_dblalign(a, b, src1); } else { /* Aligned copy. */ const word_t* __restrict src8 = (const word_t *)src1; /* src8 and dst8 are both word-aligned. */ if (n >= CHIP_L2_LINE_SIZE()) { /* Copy until 'dst' is cache-line-aligned. */ for (; (uintptr_t)dst8 & (CHIP_L2_LINE_SIZE() - 1); n -= sizeof(word_t)) ST8(dst8++, LD8(src8++)); for (; n >= CHIP_L2_LINE_SIZE(); ) { __insn_wh64(dst8); /* * Prefetch and advance to next line * to prefetch, but don't go past the end */ __insn_prefetch(prefetch); prefetch += CHIP_L2_LINE_SIZE(); prefetch = (prefetch > src1_end) ? prefetch : (const char *)src8; /* * Copy an entire cache line. Manually * unrolled to avoid idiosyncracies of * compiler unrolling. */ #define COPY_WORD(offset) ({ ST8(dst8+offset, LD8(src8+offset)); n -= 8; }) COPY_WORD(0); COPY_WORD(1); COPY_WORD(2); COPY_WORD(3); COPY_WORD(4); COPY_WORD(5); COPY_WORD(6); COPY_WORD(7); #if CHIP_L2_LINE_SIZE() == 128 COPY_WORD(8); COPY_WORD(9); COPY_WORD(10); COPY_WORD(11); COPY_WORD(12); COPY_WORD(13); COPY_WORD(14); COPY_WORD(15); #elif CHIP_L2_LINE_SIZE() != 64 # error Fix code that assumes particular L2 cache line sizes #endif dst8 += CHIP_L2_LINE_SIZE() / sizeof(word_t); src8 += CHIP_L2_LINE_SIZE() / sizeof(word_t); } } for (; n >= sizeof(word_t); n -= sizeof(word_t)) ST8(dst8++, LD8(src8++)); if (__builtin_expect(n == 0, 1)) return RETVAL; final = LD8(src8); } /* n != 0 if we get here. Write out any trailing bytes. */ dst1 = (char *)dst8; if (n & 4) { ST4((uint32_t *)dst1, final); dst1 += 4; final >>= 32; n &= 3; } if (n & 2) { ST2((uint16_t *)dst1, final); dst1 += 2; final >>= 16; n &= 1; } if (n) ST1((uint8_t *)dst1, final); return RETVAL; } #ifdef USERCOPY_FUNC #undef ST1 #undef ST2 #undef ST4 #undef ST8 #undef LD1 #undef LD2 #undef LD4 #undef LD8 #undef USERCOPY_FUNC #endif
gpl-2.0
nycbjr/android_kernel_asus_tf700
fs/lockd/svc4proc.c
8028
14173
/* * linux/fs/lockd/svc4proc.c * * Lockd server procedures. We don't implement the NLM_*_RES * procedures because we don't use the async procedures. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #include <linux/types.h> #include <linux/time.h> #include <linux/lockd/lockd.h> #include <linux/lockd/share.h> #define NLMDBG_FACILITY NLMDBG_CLIENT /* * Obtain client and file from arguments */ static __be32 nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_host **hostp, struct nlm_file **filp) { struct nlm_host *host = NULL; struct nlm_file *file = NULL; struct nlm_lock *lock = &argp->lock; __be32 error = 0; /* nfsd callbacks must have been installed for this procedure */ if (!nlmsvc_ops) return nlm_lck_denied_nolocks; /* Obtain host handle */ if (!(host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len)) || (argp->monitor && nsm_monitor(host) < 0)) goto no_locks; *hostp = host; /* Obtain file pointer. Not used by FREE_ALL call. */ if (filp != NULL) { if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0) goto no_locks; *filp = file; /* Set up the missing parts of the file_lock structure */ lock->fl.fl_file = file->f_file; lock->fl.fl_owner = (fl_owner_t) host; lock->fl.fl_lmops = &nlmsvc_lock_operations; } return 0; no_locks: nlmsvc_release_host(host); if (error) return error; return nlm_lck_denied_nolocks; } /* * NULL: Test for presence of service */ static __be32 nlm4svc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp) { dprintk("lockd: NULL called\n"); return rpc_success; } /* * TEST: Check for conflicting lock */ static __be32 nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; __be32 rc = rpc_success; dprintk("lockd: TEST4 called\n"); resp->cookie = argp->cookie; /* Obtain client and file */ if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now check for conflicting locks */ resp->status = nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie); if (resp->status == nlm_drop_reply) rc = rpc_drop_reply; else dprintk("lockd: TEST4 status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rc; } static __be32 nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; __be32 rc = rpc_success; dprintk("lockd: LOCK called\n"); resp->cookie = argp->cookie; /* Obtain client and file */ if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; #if 0 /* If supplied state doesn't match current state, we assume it's * an old request that time-warped somehow. Any error return would * do in this case because it's irrelevant anyway. * * NB: We don't retrieve the remote host's state yet. */ if (host->h_nsmstate && host->h_nsmstate != argp->state) { resp->status = nlm_lck_denied_nolocks; } else #endif /* Now try to lock the file */ resp->status = nlmsvc_lock(rqstp, file, host, &argp->lock, argp->block, &argp->cookie, argp->reclaim); if (resp->status == nlm_drop_reply) rc = rpc_drop_reply; else dprintk("lockd: LOCK status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rc; } static __be32 nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; dprintk("lockd: CANCEL called\n"); resp->cookie = argp->cookie; /* Don't accept requests during grace period */ if (locks_in_grace()) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Try to cancel request. */ resp->status = nlmsvc_cancel_blocked(file, &argp->lock); dprintk("lockd: CANCEL status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * UNLOCK: release a lock */ static __be32 nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; dprintk("lockd: UNLOCK called\n"); resp->cookie = argp->cookie; /* Don't accept new lock requests during grace period */ if (locks_in_grace()) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to remove the lock */ resp->status = nlmsvc_unlock(file, &argp->lock); dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * GRANTED: A server calls us to tell that a process' lock request * was granted */ static __be32 nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { resp->cookie = argp->cookie; dprintk("lockd: GRANTED called\n"); resp->status = nlmclnt_grant(svc_addr(rqstp), &argp->lock); dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); return rpc_success; } /* * This is the generic lockd callback for async RPC calls */ static void nlm4svc_callback_exit(struct rpc_task *task, void *data) { dprintk("lockd: %5u callback returned %d\n", task->tk_pid, -task->tk_status); } static void nlm4svc_callback_release(void *data) { nlmsvc_release_call(data); } static const struct rpc_call_ops nlm4svc_callback_ops = { .rpc_call_done = nlm4svc_callback_exit, .rpc_release = nlm4svc_callback_release, }; /* * `Async' versions of the above service routines. They aren't really, * because we send the callback before the reply proper. I hope this * doesn't break any clients. */ static __be32 nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args *argp, __be32 (*func)(struct svc_rqst *, struct nlm_args *, struct nlm_res *)) { struct nlm_host *host; struct nlm_rqst *call; __be32 stat; host = nlmsvc_lookup_host(rqstp, argp->lock.caller, argp->lock.len); if (host == NULL) return rpc_system_err; call = nlm_alloc_call(host); if (call == NULL) return rpc_system_err; stat = func(rqstp, argp, &call->a_res); if (stat != 0) { nlmsvc_release_call(call); return stat; } call->a_flags = RPC_TASK_ASYNC; if (nlm_async_reply(call, proc, &nlm4svc_callback_ops) < 0) return rpc_system_err; return rpc_success; } static __be32 nlm4svc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: TEST_MSG called\n"); return nlm4svc_callback(rqstp, NLMPROC_TEST_RES, argp, nlm4svc_proc_test); } static __be32 nlm4svc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: LOCK_MSG called\n"); return nlm4svc_callback(rqstp, NLMPROC_LOCK_RES, argp, nlm4svc_proc_lock); } static __be32 nlm4svc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: CANCEL_MSG called\n"); return nlm4svc_callback(rqstp, NLMPROC_CANCEL_RES, argp, nlm4svc_proc_cancel); } static __be32 nlm4svc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: UNLOCK_MSG called\n"); return nlm4svc_callback(rqstp, NLMPROC_UNLOCK_RES, argp, nlm4svc_proc_unlock); } static __be32 nlm4svc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: GRANTED_MSG called\n"); return nlm4svc_callback(rqstp, NLMPROC_GRANTED_RES, argp, nlm4svc_proc_granted); } /* * SHARE: create a DOS share or alter existing share. */ static __be32 nlm4svc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; dprintk("lockd: SHARE called\n"); resp->cookie = argp->cookie; /* Don't accept new lock requests during grace period */ if (locks_in_grace() && !argp->reclaim) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to create the share */ resp->status = nlmsvc_share_file(host, file, argp); dprintk("lockd: SHARE status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * UNSHARE: Release a DOS share. */ static __be32 nlm4svc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; dprintk("lockd: UNSHARE called\n"); resp->cookie = argp->cookie; /* Don't accept requests during grace period */ if (locks_in_grace()) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to lock the file */ resp->status = nlmsvc_unshare_file(host, file, argp); dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * NM_LOCK: Create an unmonitored lock */ static __be32 nlm4svc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { dprintk("lockd: NM_LOCK called\n"); argp->monitor = 0; /* just clean the monitor flag */ return nlm4svc_proc_lock(rqstp, argp, resp); } /* * FREE_ALL: Release all locks and shares held by client */ static __be32 nlm4svc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { struct nlm_host *host; /* Obtain client */ if (nlm4svc_retrieve_args(rqstp, argp, &host, NULL)) return rpc_success; nlmsvc_free_host_resources(host); nlmsvc_release_host(host); return rpc_success; } /* * SM_NOTIFY: private callback from statd (not part of official NLM proto) */ static __be32 nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp, void *resp) { dprintk("lockd: SM_NOTIFY called\n"); if (!nlm_privileged_requester(rqstp)) { char buf[RPC_MAX_ADDRBUFLEN]; printk(KERN_WARNING "lockd: rejected NSM callback from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); return rpc_system_err; } nlm_host_rebooted(argp); return rpc_success; } /* * client sent a GRANTED_RES, let's remove the associated block */ static __be32 nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp, void *resp) { if (!nlmsvc_ops) return rpc_success; dprintk("lockd: GRANTED_RES called\n"); nlmsvc_grant_reply(&argp->cookie, argp->status); return rpc_success; } /* * NLM Server procedures. */ #define nlm4svc_encode_norep nlm4svc_encode_void #define nlm4svc_decode_norep nlm4svc_decode_void #define nlm4svc_decode_testres nlm4svc_decode_void #define nlm4svc_decode_lockres nlm4svc_decode_void #define nlm4svc_decode_unlockres nlm4svc_decode_void #define nlm4svc_decode_cancelres nlm4svc_decode_void #define nlm4svc_decode_grantedres nlm4svc_decode_void #define nlm4svc_proc_none nlm4svc_proc_null #define nlm4svc_proc_test_res nlm4svc_proc_null #define nlm4svc_proc_lock_res nlm4svc_proc_null #define nlm4svc_proc_cancel_res nlm4svc_proc_null #define nlm4svc_proc_unlock_res nlm4svc_proc_null struct nlm_void { int dummy; }; #define PROC(name, xargt, xrest, argt, rest, respsize) \ { .pc_func = (svc_procfunc) nlm4svc_proc_##name, \ .pc_decode = (kxdrproc_t) nlm4svc_decode_##xargt, \ .pc_encode = (kxdrproc_t) nlm4svc_encode_##xrest, \ .pc_release = NULL, \ .pc_argsize = sizeof(struct nlm_##argt), \ .pc_ressize = sizeof(struct nlm_##rest), \ .pc_xdrressize = respsize, \ } #define Ck (1+XDR_QUADLEN(NLM_MAXCOOKIELEN)) /* cookie */ #define No (1+1024/4) /* netobj */ #define St 1 /* status */ #define Rg 4 /* range (offset + length) */ struct svc_procedure nlmsvc_procedures4[] = { PROC(null, void, void, void, void, 1), PROC(test, testargs, testres, args, res, Ck+St+2+No+Rg), PROC(lock, lockargs, res, args, res, Ck+St), PROC(cancel, cancargs, res, args, res, Ck+St), PROC(unlock, unlockargs, res, args, res, Ck+St), PROC(granted, testargs, res, args, res, Ck+St), PROC(test_msg, testargs, norep, args, void, 1), PROC(lock_msg, lockargs, norep, args, void, 1), PROC(cancel_msg, cancargs, norep, args, void, 1), PROC(unlock_msg, unlockargs, norep, args, void, 1), PROC(granted_msg, testargs, norep, args, void, 1), PROC(test_res, testres, norep, res, void, 1), PROC(lock_res, lockres, norep, res, void, 1), PROC(cancel_res, cancelres, norep, res, void, 1), PROC(unlock_res, unlockres, norep, res, void, 1), PROC(granted_res, res, norep, res, void, 1), /* statd callback */ PROC(sm_notify, reboot, void, reboot, void, 1), PROC(none, void, void, void, void, 0), PROC(none, void, void, void, void, 0), PROC(none, void, void, void, void, 0), PROC(share, shareargs, shareres, args, res, Ck+St+1), PROC(unshare, shareargs, shareres, args, res, Ck+St+1), PROC(nm_lock, lockargs, res, args, res, Ck+St), PROC(free_all, notify, void, args, void, 1), };
gpl-2.0
lani11/Potsy_Kernel
drivers/media/video/sn9c102/sn9c102_mi0360.c
12892
13264
/*************************************************************************** * Plug-in for MI-0360 image sensor connected to the SN9C1xx PC Camera * * Controllers * * * * Copyright (C) 2007 by Luca Risolia <luca.risolia@studio.unibo.it> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the Free Software * * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ***************************************************************************/ #include "sn9c102_sensor.h" #include "sn9c102_devtable.h" static int mi0360_init(struct sn9c102_device* cam) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); int err = 0; switch (sn9c102_get_bridge(cam)) { case BRIDGE_SN9C103: err = sn9c102_write_const_regs(cam, {0x00, 0x10}, {0x00, 0x11}, {0x0a, 0x14}, {0x40, 0x01}, {0x20, 0x17}, {0x07, 0x18}, {0xa0, 0x19}, {0x02, 0x1c}, {0x03, 0x1d}, {0x0f, 0x1e}, {0x0c, 0x1f}, {0x00, 0x20}, {0x10, 0x21}, {0x20, 0x22}, {0x30, 0x23}, {0x40, 0x24}, {0x50, 0x25}, {0x60, 0x26}, {0x70, 0x27}, {0x80, 0x28}, {0x90, 0x29}, {0xa0, 0x2a}, {0xb0, 0x2b}, {0xc0, 0x2c}, {0xd0, 0x2d}, {0xe0, 0x2e}, {0xf0, 0x2f}, {0xff, 0x30}); break; case BRIDGE_SN9C105: case BRIDGE_SN9C120: err = sn9c102_write_const_regs(cam, {0x44, 0x01}, {0x40, 0x02}, {0x00, 0x03}, {0x1a, 0x04}, {0x50, 0x05}, {0x20, 0x06}, {0x10, 0x07}, {0x03, 0x10}, {0x08, 0x14}, {0xa2, 0x17}, {0x47, 0x18}, {0x00, 0x19}, {0x1d, 0x1a}, {0x10, 0x1b}, {0x02, 0x1c}, {0x03, 0x1d}, {0x0f, 0x1e}, {0x0c, 0x1f}, {0x00, 0x20}, {0x29, 0x21}, {0x40, 0x22}, {0x54, 0x23}, {0x66, 0x24}, {0x76, 0x25}, {0x85, 0x26}, {0x94, 0x27}, {0xa1, 0x28}, {0xae, 0x29}, {0xbb, 0x2a}, {0xc7, 0x2b}, {0xd3, 0x2c}, {0xde, 0x2d}, {0xea, 0x2e}, {0xf4, 0x2f}, {0xff, 0x30}, {0x00, 0x3F}, {0xC7, 0x40}, {0x01, 0x41}, {0x44, 0x42}, {0x00, 0x43}, {0x44, 0x44}, {0x00, 0x45}, {0x44, 0x46}, {0x00, 0x47}, {0xC7, 0x48}, {0x01, 0x49}, {0xC7, 0x4A}, {0x01, 0x4B}, {0xC7, 0x4C}, {0x01, 0x4D}, {0x44, 0x4E}, {0x00, 0x4F}, {0x44, 0x50}, {0x00, 0x51}, {0x44, 0x52}, {0x00, 0x53}, {0xC7, 0x54}, {0x01, 0x55}, {0xC7, 0x56}, {0x01, 0x57}, {0xC7, 0x58}, {0x01, 0x59}, {0x44, 0x5A}, {0x00, 0x5B}, {0x44, 0x5C}, {0x00, 0x5D}, {0x44, 0x5E}, {0x00, 0x5F}, {0xC7, 0x60}, {0x01, 0x61}, {0xC7, 0x62}, {0x01, 0x63}, {0xC7, 0x64}, {0x01, 0x65}, {0x44, 0x66}, {0x00, 0x67}, {0x44, 0x68}, {0x00, 0x69}, {0x44, 0x6A}, {0x00, 0x6B}, {0xC7, 0x6C}, {0x01, 0x6D}, {0xC7, 0x6E}, {0x01, 0x6F}, {0xC7, 0x70}, {0x01, 0x71}, {0x44, 0x72}, {0x00, 0x73}, {0x44, 0x74}, {0x00, 0x75}, {0x44, 0x76}, {0x00, 0x77}, {0xC7, 0x78}, {0x01, 0x79}, {0xC7, 0x7A}, {0x01, 0x7B}, {0xC7, 0x7C}, {0x01, 0x7D}, {0x44, 0x7E}, {0x00, 0x7F}, {0x14, 0x84}, {0x00, 0x85}, {0x27, 0x86}, {0x00, 0x87}, {0x07, 0x88}, {0x00, 0x89}, {0xEC, 0x8A}, {0x0f, 0x8B}, {0xD8, 0x8C}, {0x0f, 0x8D}, {0x3D, 0x8E}, {0x00, 0x8F}, {0x3D, 0x90}, {0x00, 0x91}, {0xCD, 0x92}, {0x0f, 0x93}, {0xf7, 0x94}, {0x0f, 0x95}, {0x0C, 0x96}, {0x00, 0x97}, {0x00, 0x98}, {0x66, 0x99}, {0x05, 0x9A}, {0x00, 0x9B}, {0x04, 0x9C}, {0x00, 0x9D}, {0x08, 0x9E}, {0x00, 0x9F}, {0x2D, 0xC0}, {0x2D, 0xC1}, {0x3A, 0xC2}, {0x05, 0xC3}, {0x04, 0xC4}, {0x3F, 0xC5}, {0x00, 0xC6}, {0x00, 0xC7}, {0x50, 0xC8}, {0x3C, 0xC9}, {0x28, 0xCA}, {0xD8, 0xCB}, {0x14, 0xCC}, {0xEC, 0xCD}, {0x32, 0xCE}, {0xDD, 0xCF}, {0x32, 0xD0}, {0xDD, 0xD1}, {0x6A, 0xD2}, {0x50, 0xD3}, {0x00, 0xD4}, {0x00, 0xD5}, {0x00, 0xD6}); break; default: break; } err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x0d, 0x00, 0x01, 0, 0); err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x0d, 0x00, 0x00, 0, 0); err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x03, 0x01, 0xe1, 0, 0); err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x04, 0x02, 0x81, 0, 0); err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x05, 0x00, 0x17, 0, 0); err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x06, 0x00, 0x11, 0, 0); err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x62, 0x04, 0x9a, 0, 0); return err; } static int mi0360_get_ctrl(struct sn9c102_device* cam, struct v4l2_control* ctrl) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); u8 data[2]; switch (ctrl->id) { case V4L2_CID_EXPOSURE: if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x09, 2, data) < 0) return -EIO; ctrl->value = data[0]; return 0; case V4L2_CID_GAIN: if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x35, 2, data) < 0) return -EIO; ctrl->value = data[1]; return 0; case V4L2_CID_RED_BALANCE: if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x2c, 2, data) < 0) return -EIO; ctrl->value = data[1]; return 0; case V4L2_CID_BLUE_BALANCE: if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x2d, 2, data) < 0) return -EIO; ctrl->value = data[1]; return 0; case SN9C102_V4L2_CID_GREEN_BALANCE: if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x2e, 2, data) < 0) return -EIO; ctrl->value = data[1]; return 0; case V4L2_CID_HFLIP: if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x20, 2, data) < 0) return -EIO; ctrl->value = data[1] & 0x20 ? 1 : 0; return 0; case V4L2_CID_VFLIP: if (sn9c102_i2c_try_raw_read(cam, s, s->i2c_slave_id, 0x20, 2, data) < 0) return -EIO; ctrl->value = data[1] & 0x80 ? 1 : 0; return 0; default: return -EINVAL; } return 0; } static int mi0360_set_ctrl(struct sn9c102_device* cam, const struct v4l2_control* ctrl) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); int err = 0; switch (ctrl->id) { case V4L2_CID_EXPOSURE: err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x09, ctrl->value, 0x00, 0, 0); break; case V4L2_CID_GAIN: err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x35, 0x03, ctrl->value, 0, 0); break; case V4L2_CID_RED_BALANCE: err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x2c, 0x03, ctrl->value, 0, 0); break; case V4L2_CID_BLUE_BALANCE: err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x2d, 0x03, ctrl->value, 0, 0); break; case SN9C102_V4L2_CID_GREEN_BALANCE: err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x2b, 0x03, ctrl->value, 0, 0); err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x2e, 0x03, ctrl->value, 0, 0); break; case V4L2_CID_HFLIP: err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x20, ctrl->value ? 0x40:0x00, ctrl->value ? 0x20:0x00, 0, 0); break; case V4L2_CID_VFLIP: err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x20, ctrl->value ? 0x80:0x00, ctrl->value ? 0x80:0x00, 0, 0); break; default: return -EINVAL; } return err ? -EIO : 0; } static int mi0360_set_crop(struct sn9c102_device* cam, const struct v4l2_rect* rect) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); int err = 0; u8 h_start = 0, v_start = (u8)(rect->top - s->cropcap.bounds.top) + 1; switch (sn9c102_get_bridge(cam)) { case BRIDGE_SN9C103: h_start = (u8)(rect->left - s->cropcap.bounds.left) + 0; break; case BRIDGE_SN9C105: case BRIDGE_SN9C120: h_start = (u8)(rect->left - s->cropcap.bounds.left) + 1; break; default: break; } err += sn9c102_write_reg(cam, h_start, 0x12); err += sn9c102_write_reg(cam, v_start, 0x13); return err; } static int mi0360_set_pix_format(struct sn9c102_device* cam, const struct v4l2_pix_format* pix) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); int err = 0; if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8) { err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x0a, 0x00, 0x05, 0, 0); err += sn9c102_write_reg(cam, 0x60, 0x19); if (sn9c102_get_bridge(cam) == BRIDGE_SN9C105 || sn9c102_get_bridge(cam) == BRIDGE_SN9C120) err += sn9c102_write_reg(cam, 0xa6, 0x17); } else { err += sn9c102_i2c_try_raw_write(cam, s, 4, s->i2c_slave_id, 0x0a, 0x00, 0x02, 0, 0); err += sn9c102_write_reg(cam, 0x20, 0x19); if (sn9c102_get_bridge(cam) == BRIDGE_SN9C105 || sn9c102_get_bridge(cam) == BRIDGE_SN9C120) err += sn9c102_write_reg(cam, 0xa2, 0x17); } return err; } static const struct sn9c102_sensor mi0360 = { .name = "MI-0360", .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>", .supported_bridge = BRIDGE_SN9C103 | BRIDGE_SN9C105 | BRIDGE_SN9C120, .frequency = SN9C102_I2C_100KHZ, .interface = SN9C102_I2C_2WIRES, .i2c_slave_id = 0x5d, .init = &mi0360_init, .qctrl = { { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "exposure", .minimum = 0x00, .maximum = 0x0f, .step = 0x01, .default_value = 0x05, .flags = 0, }, { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "global gain", .minimum = 0x00, .maximum = 0x7f, .step = 0x01, .default_value = 0x25, .flags = 0, }, { .id = V4L2_CID_HFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "horizontal mirror", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, .flags = 0, }, { .id = V4L2_CID_VFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "vertical mirror", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, .flags = 0, }, { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "blue balance", .minimum = 0x00, .maximum = 0x7f, .step = 0x01, .default_value = 0x0f, .flags = 0, }, { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "red balance", .minimum = 0x00, .maximum = 0x7f, .step = 0x01, .default_value = 0x32, .flags = 0, }, { .id = SN9C102_V4L2_CID_GREEN_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "green balance", .minimum = 0x00, .maximum = 0x7f, .step = 0x01, .default_value = 0x25, .flags = 0, }, }, .get_ctrl = &mi0360_get_ctrl, .set_ctrl = &mi0360_set_ctrl, .cropcap = { .bounds = { .left = 0, .top = 0, .width = 640, .height = 480, }, .defrect = { .left = 0, .top = 0, .width = 640, .height = 480, }, }, .set_crop = &mi0360_set_crop, .pix_format = { .width = 640, .height = 480, .pixelformat = V4L2_PIX_FMT_SBGGR8, .priv = 8, }, .set_pix_format = &mi0360_set_pix_format }; int sn9c102_probe_mi0360(struct sn9c102_device* cam) { u8 data[2]; switch (sn9c102_get_bridge(cam)) { case BRIDGE_SN9C103: if (sn9c102_write_const_regs(cam, {0x01, 0x01}, {0x00, 0x01}, {0x28, 0x17})) return -EIO; break; case BRIDGE_SN9C105: case BRIDGE_SN9C120: if (sn9c102_write_const_regs(cam, {0x01, 0xf1}, {0x00, 0xf1}, {0x01, 0x01}, {0x00, 0x01}, {0x28, 0x17})) return -EIO; break; default: break; } if (sn9c102_i2c_try_raw_read(cam, &mi0360, mi0360.i2c_slave_id, 0x00, 2, data) < 0) return -EIO; if (data[0] != 0x82 || data[1] != 0x43) return -ENODEV; sn9c102_attach_sensor(cam, &mi0360); return 0; }
gpl-2.0
xperiasailors/android_kernel_sony_msm8974
drivers/sh/superhyway/superhyway-sysfs.c
14684
1340
/* * drivers/sh/superhyway/superhyway-sysfs.c * * SuperHyway Bus sysfs interface * * Copyright (C) 2004, 2005 Paul Mundt <lethal@linux-sh.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/types.h> #include <linux/superhyway.h> #define superhyway_ro_attr(name, fmt, field) \ static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct superhyway_device *s = to_superhyway_device(dev); \ return sprintf(buf, fmt, s->field); \ } /* VCR flags */ superhyway_ro_attr(perr_flags, "0x%02x\n", vcr.perr_flags); superhyway_ro_attr(merr_flags, "0x%02x\n", vcr.merr_flags); superhyway_ro_attr(mod_vers, "0x%04x\n", vcr.mod_vers); superhyway_ro_attr(mod_id, "0x%04x\n", vcr.mod_id); superhyway_ro_attr(bot_mb, "0x%02x\n", vcr.bot_mb); superhyway_ro_attr(top_mb, "0x%02x\n", vcr.top_mb); /* Misc */ superhyway_ro_attr(resource, "0x%08lx\n", resource[0].start); struct device_attribute superhyway_dev_attrs[] = { __ATTR_RO(perr_flags), __ATTR_RO(merr_flags), __ATTR_RO(mod_vers), __ATTR_RO(mod_id), __ATTR_RO(bot_mb), __ATTR_RO(top_mb), __ATTR_RO(resource), __ATTR_NULL, };
gpl-2.0
richardtrip/GT-P6200-kernel
drivers/staging/rtl8192su/ieee80211/rtl819x_HTProc.c
93
56306
/****************************************************************************** * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> ******************************************************************************/ #include "ieee80211.h" #include "rtl819x_HT.h" u8 MCS_FILTER_ALL[16] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; u8 MCS_FILTER_1SS[16] = {0xff, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; u16 MCS_DATA_RATE[2][2][77] = { { {13, 26, 39, 52, 78, 104, 117, 130, 26, 52, 78 ,104, 156, 208, 234, 260, 39, 78, 117, 234, 312, 351, 390, 52, 104, 156, 208, 312, 416, 468, 520, 0, 78, 104, 130, 117, 156, 195, 104, 130, 130, 156, 182, 182, 208, 156, 195, 195, 234, 273, 273, 312, 130, 156, 181, 156, 181, 208, 234, 208, 234, 260, 260, 286, 195, 234, 273, 234, 273, 312, 351, 312, 351, 390, 390, 429}, {14, 29, 43, 58, 87, 116, 130, 144, 29, 58, 87, 116, 173, 231, 260, 289, 43, 87, 130, 173, 260, 347, 390, 433, 58, 116, 173, 231, 347, 462, 520, 578, 0, 87, 116, 144, 130, 173, 217, 116, 144, 144, 173, 202, 202, 231, 173, 217, 217, 260, 303, 303, 347, 144, 173, 202, 173, 202, 231, 260, 231, 260, 289, 289, 318, 217, 260, 303, 260, 303, 347, 390, 347, 390, 433, 433, 477} }, { {27, 54, 81, 108, 162, 216, 243, 270, 54, 108, 162, 216, 324, 432, 486, 540, 81, 162, 243, 324, 486, 648, 729, 810, 108, 216, 324, 432, 648, 864, 972, 1080, 12, 162, 216, 270, 243, 324, 405, 216, 270, 270, 324, 378, 378, 432, 324, 405, 405, 486, 567, 567, 648, 270, 324, 378, 324, 378, 432, 486, 432, 486, 540, 540, 594, 405, 486, 567, 486, 567, 648, 729, 648, 729, 810, 810, 891}, {30, 60, 90, 120, 180, 240, 270, 300, 60, 120, 180, 240, 360, 480, 540, 600, 90, 180, 270, 360, 540, 720, 810, 900, 120, 240, 360, 480, 720, 960, 1080, 1200, 13, 180, 240, 300, 270, 360, 450, 240, 300, 300, 360, 420, 420, 480, 360, 450, 450, 540, 630, 630, 720, 300, 360, 420, 360, 420, 480, 540, 480, 540, 600, 600, 660, 450, 540, 630, 540, 630, 720, 810, 720, 810, 900, 900, 990} } }; static u8 UNKNOWN_BORADCOM[3] = {0x00, 0x14, 0xbf}; static u8 LINKSYSWRT330_LINKSYSWRT300_BROADCOM[3] = {0x00, 0x1a, 0x70}; static u8 LINKSYSWRT350_LINKSYSWRT150_BROADCOM[3] = {0x00, 0x1d, 0x7e}; static u8 BELKINF5D8233V1_RALINK[3] = {0x00, 0x17, 0x3f}; static u8 BELKINF5D82334V3_RALINK[3] = {0x00, 0x1c, 0xdf}; static u8 PCI_RALINK[3] = {0x00, 0x90, 0xcc}; static u8 EDIMAX_RALINK[3] = {0x00, 0x0e, 0x2e}; static u8 AIRLINK_RALINK[3] = {0x00, 0x18, 0x02}; static u8 DLINK_ATHEROS_1[3] = {0x00, 0x1c, 0xf0}; static u8 DLINK_ATHEROS_2[3] = {0x00, 0x21, 0x91}; static u8 CISCO_BROADCOM[3] = {0x00, 0x17, 0x94}; static u8 NETGEAR_BROADCOM[3] = {0x00, 0x1f, 0x33}; static u8 LINKSYS_MARVELL_4400N[3] = {0x00, 0x14, 0xa4}; /******************************************************************************************************************** *function: This function update default settings in pHTInfo structure * input: PRT_HIGH_THROUGHPUT pHTInfo * output: none * return: none * notice: These value need be modified if any changes. * *****************************************************************************************************************/ void HTUpdateDefaultSetting(struct ieee80211_device* ieee) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; // ShortGI support pHTInfo->bRegShortGI20MHz= 1; pHTInfo->bRegShortGI40MHz= 1; // 40MHz channel support pHTInfo->bRegBW40MHz = 1; // CCK rate support in 40MHz channel if(pHTInfo->bRegBW40MHz) pHTInfo->bRegSuppCCK = 1; else pHTInfo->bRegSuppCCK = true; // AMSDU related pHTInfo->nAMSDU_MaxSize = 7935UL; pHTInfo->bAMSDU_Support = 0; // AMPDU related pHTInfo->bAMPDUEnable = 1; //YJ,test,090311 pHTInfo->AMPDU_Factor = 2; //// 0: 2n13(8K), 1:2n14(16K), 2:2n15(32K), 3:2n16(64k) pHTInfo->MPDU_Density = 0;// 0: No restriction, 1: 1/8usec, 2: 1/4usec, 3: 1/2usec, 4: 1usec, 5: 2usec, 6: 4usec, 7:8usec // MIMO Power Save pHTInfo->SelfMimoPs = 3;// 0: Static Mimo Ps, 1: Dynamic Mimo Ps, 3: No Limitation, 2: Reserved(Set to 3 automatically.) if(pHTInfo->SelfMimoPs == 2) pHTInfo->SelfMimoPs = 3; // 8190 only. Assign rate operation mode to firmware ieee->bTxDisableRateFallBack = 0; ieee->bTxUseDriverAssingedRate = 0; #ifdef TO_DO_LIST // 8190 only. Assign duration operation mode to firmware pMgntInfo->bTxEnableFwCalcDur = (BOOLEAN)pNdisCommon->bRegTxEnableFwCalcDur; #endif // 8190 only, Realtek proprietary aggregation mode // Set MPDUDensity=2, 1: Set MPDUDensity=2(32k) for Realtek AP and set MPDUDensity=0(8k) for others pHTInfo->bRegRT2RTAggregation = 1;//0: Set MPDUDensity=2, 1: Set MPDUDensity=2(32k) for Realtek AP and set MPDUDensity=0(8k) for others // For Rx Reorder Control pHTInfo->bRegRxReorderEnable = 1;//YJ,test,090311 pHTInfo->RxReorderWinSize = 64; pHTInfo->RxReorderPendingTime = 30; } /******************************************************************************************************************** *function: This function print out each field on HT capability IE mainly from (Beacon/ProbeRsp/AssocReq) * input: u8* CapIE //Capability IE to be printed out * u8* TitleString //mainly print out caller function * output: none * return: none * notice: Driver should not print out this message by default. * *****************************************************************************************************************/ void HTDebugHTCapability(u8* CapIE, u8* TitleString ) { static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily PHT_CAPABILITY_ELE pCapELE; if(!memcmp(CapIE, EWC11NHTCap, sizeof(EWC11NHTCap))) { //EWC IE IEEE80211_DEBUG(IEEE80211_DL_HT, "EWC IE in %s()\n", __FUNCTION__); pCapELE = (PHT_CAPABILITY_ELE)(&CapIE[4]); }else pCapELE = (PHT_CAPABILITY_ELE)(&CapIE[0]); IEEE80211_DEBUG(IEEE80211_DL_HT, "<Log HT Capability>. Called by %s\n", TitleString ); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupported Channel Width = %s\n", (pCapELE->ChlWidth)?"20MHz": "20/40MHz"); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport Short GI for 20M = %s\n", (pCapELE->ShortGI20Mhz)?"YES": "NO"); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport Short GI for 40M = %s\n", (pCapELE->ShortGI40Mhz)?"YES": "NO"); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport TX STBC = %s\n", (pCapELE->TxSTBC)?"YES": "NO"); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMax AMSDU Size = %s\n", (pCapELE->MaxAMSDUSize)?"3839": "7935"); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSupport CCK in 20/40 mode = %s\n", (pCapELE->DssCCk)?"YES": "NO"); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMax AMPDU Factor = %d\n", pCapELE->MaxRxAMPDUFactor); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMPDU Density = %d\n", pCapELE->MPDUDensity); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tMCS Rate Set = [%x][%x][%x][%x][%x]\n", pCapELE->MCS[0],\ pCapELE->MCS[1], pCapELE->MCS[2], pCapELE->MCS[3], pCapELE->MCS[4]); return; } /******************************************************************************************************************** *function: This function print out each field on HT Information IE mainly from (Beacon/ProbeRsp) * input: u8* InfoIE //Capability IE to be printed out * u8* TitleString //mainly print out caller function * output: none * return: none * notice: Driver should not print out this message by default. * *****************************************************************************************************************/ void HTDebugHTInfo(u8* InfoIE, u8* TitleString) { static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily PHT_INFORMATION_ELE pHTInfoEle; if(!memcmp(InfoIE, EWC11NHTInfo, sizeof(EWC11NHTInfo))) { // Not EWC IE IEEE80211_DEBUG(IEEE80211_DL_HT, "EWC IE in %s()\n", __FUNCTION__); pHTInfoEle = (PHT_INFORMATION_ELE)(&InfoIE[4]); }else pHTInfoEle = (PHT_INFORMATION_ELE)(&InfoIE[0]); IEEE80211_DEBUG(IEEE80211_DL_HT, "<Log HT Information Element>. Called by %s\n", TitleString); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tPrimary channel = %d\n", pHTInfoEle->ControlChl); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tSenondary channel ="); switch(pHTInfoEle->ExtChlOffset) { case 0: IEEE80211_DEBUG(IEEE80211_DL_HT, "Not Present\n"); break; case 1: IEEE80211_DEBUG(IEEE80211_DL_HT, "Upper channel\n"); break; case 2: IEEE80211_DEBUG(IEEE80211_DL_HT, "Reserved. Eooro!!!\n"); break; case 3: IEEE80211_DEBUG(IEEE80211_DL_HT, "Lower Channel\n"); break; } IEEE80211_DEBUG(IEEE80211_DL_HT, "\tRecommended channel width = %s\n", (pHTInfoEle->RecommemdedTxWidth)?"20Mhz": "40Mhz"); IEEE80211_DEBUG(IEEE80211_DL_HT, "\tOperation mode for protection = "); switch(pHTInfoEle->OptMode) { case 0: IEEE80211_DEBUG(IEEE80211_DL_HT, "No Protection\n"); break; case 1: IEEE80211_DEBUG(IEEE80211_DL_HT, "HT non-member protection mode\n"); break; case 2: IEEE80211_DEBUG(IEEE80211_DL_HT, "Suggest to open protection\n"); break; case 3: IEEE80211_DEBUG(IEEE80211_DL_HT, "HT mixed mode\n"); break; } IEEE80211_DEBUG(IEEE80211_DL_HT, "\tBasic MCS Rate Set = [%x][%x][%x][%x][%x]\n", pHTInfoEle->BasicMSC[0],\ pHTInfoEle->BasicMSC[1], pHTInfoEle->BasicMSC[2], pHTInfoEle->BasicMSC[3], pHTInfoEle->BasicMSC[4]); return; } /* * Return: true if station in half n mode and AP supports 40 bw */ bool IsHTHalfNmode40Bandwidth(struct ieee80211_device* ieee) { bool retValue = false; PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; if(pHTInfo->bCurrentHTSupport == false ) // wireless is n mode retValue = false; else if(pHTInfo->bRegBW40MHz == false) // station supports 40 bw retValue = false; else if(!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) // station in half n mode retValue = false; else if(((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ChlWidth) // ap support 40 bw retValue = true; else retValue = false; return retValue; } bool IsHTHalfNmodeSGI(struct ieee80211_device* ieee, bool is40MHz) { bool retValue = false; PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; if(pHTInfo->bCurrentHTSupport == false ) // wireless is n mode retValue = false; else if(!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) // station in half n mode retValue = false; else if(is40MHz) // ap support 40 bw { if(((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ShortGI40Mhz) // ap support 40 bw short GI retValue = true; else retValue = false; } else { if(((PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf))->ShortGI20Mhz) // ap support 40 bw short GI retValue = true; else retValue = false; } return retValue; } u16 HTHalfMcsToDataRate(struct ieee80211_device* ieee, u8 nMcsRate) { u8 is40MHz; u8 isShortGI; is40MHz = (IsHTHalfNmode40Bandwidth(ieee))?1:0; isShortGI = (IsHTHalfNmodeSGI(ieee, is40MHz))? 1:0; return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate&0x7f)]; } u16 HTMcsToDataRate( struct ieee80211_device* ieee, u8 nMcsRate) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; u8 is40MHz = (pHTInfo->bCurBW40MHz)?1:0; u8 isShortGI = (pHTInfo->bCurBW40MHz)? ((pHTInfo->bCurShortGI40MHz)?1:0): ((pHTInfo->bCurShortGI20MHz)?1:0); return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate&0x7f)]; } /******************************************************************************************************************** *function: This function returns current datarate. * input: struct ieee80211_device* ieee * u8 nDataRate * output: none * return: tx rate * notice: quite unsure about how to use this function //wb * *****************************************************************************************************************/ u16 TxCountToDataRate( struct ieee80211_device* ieee, u8 nDataRate) { u16 CCKOFDMRate[12] = {0x02 , 0x04 , 0x0b , 0x16 , 0x0c , 0x12 , 0x18 , 0x24 , 0x30 , 0x48 , 0x60 , 0x6c}; u8 is40MHz = 0; u8 isShortGI = 0; if(nDataRate < 12) { return CCKOFDMRate[nDataRate]; } else { if (nDataRate >= 0x10 && nDataRate <= 0x1f)//if(nDataRate > 11 && nDataRate < 28 ) { is40MHz = 0; isShortGI = 0; } else if(nDataRate >=0x20 && nDataRate <= 0x2f ) //(27, 44) { is40MHz = 1; isShortGI = 0; } else if(nDataRate >= 0x30 && nDataRate <= 0x3f ) //(43, 60) { is40MHz = 0; isShortGI = 1; } else if(nDataRate >= 0x40 && nDataRate <= 0x4f ) //(59, 76) { is40MHz = 1; isShortGI = 1; } return MCS_DATA_RATE[is40MHz][isShortGI][nDataRate&0xf]; } } bool IsHTHalfNmodeAPs(struct ieee80211_device* ieee) { bool retValue = false; struct ieee80211_network* net = &ieee->current_network; if((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3)==0) || (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3)==0) || (memcmp(net->bssid, PCI_RALINK, 3)==0) || (memcmp(net->bssid, EDIMAX_RALINK, 3)==0) || (memcmp(net->bssid, AIRLINK_RALINK, 3)==0) || (net->ralink_cap_exist)) retValue = true; else if((memcmp(net->bssid, UNKNOWN_BORADCOM, 3)==0) || (memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0)|| (memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)==0)|| (net->broadcom_cap_exist)) retValue = true; else if(net->bssht.bdRT2RTAggregation) retValue = true; else retValue = false; return retValue; } /******************************************************************************************************************** *function: This function returns peer IOT. * input: struct ieee80211_device* ieee * output: none * return: * notice: * *****************************************************************************************************************/ void HTIOTPeerDetermine(struct ieee80211_device* ieee) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; struct ieee80211_network* net = &ieee->current_network; //FIXME: need to decide 92U_SOFTAP //LZM,090320 if(net->bssht.bdRT2RTAggregation){ pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK; if(net->bssht.RT2RT_HT_Mode & RT_HT_CAP_USE_92SE){ pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK_92SE; } if(net->bssht.RT2RT_HT_Mode & RT_HT_CAP_USE_SOFTAP){ pHTInfo->IOTPeer = HT_IOT_PEER_92U_SOFTAP; } } else if(net->broadcom_cap_exist) pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM; else if((memcmp(net->bssid, UNKNOWN_BORADCOM, 3)==0) || (memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0)|| (memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3)==0)) pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM; else if((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3)==0) || (memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3)==0) || (memcmp(net->bssid, PCI_RALINK, 3)==0) || (memcmp(net->bssid, EDIMAX_RALINK, 3)==0) || (memcmp(net->bssid, AIRLINK_RALINK, 3)==0) || net->ralink_cap_exist) pHTInfo->IOTPeer = HT_IOT_PEER_RALINK; else if((net->atheros_cap_exist )|| (memcmp(net->bssid, DLINK_ATHEROS_1, 3) == 0)|| (memcmp(net->bssid, DLINK_ATHEROS_2, 3) == 0)) pHTInfo->IOTPeer = HT_IOT_PEER_ATHEROS; else if ((memcmp(net->bssid, CISCO_BROADCOM, 3)==0)||net->cisco_cap_exist) pHTInfo->IOTPeer = HT_IOT_PEER_CISCO; else if ((memcmp(net->bssid, LINKSYS_MARVELL_4400N, 3) == 0) || net->marvell_cap_exist) pHTInfo->IOTPeer = HT_IOT_PEER_MARVELL; else pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN; IEEE80211_DEBUG(IEEE80211_DL_IOT, "Joseph debug!! IOTPEER: %x\n", pHTInfo->IOTPeer); } /******************************************************************************************************************** *function: Check whether driver should declare received rate up to MCS13 only since some chipset is not good * at receiving MCS14~15 frame from some AP. * input: struct ieee80211_device* ieee * u8 * PeerMacAddr * output: none * return: return 1 if driver should declare MCS13 only(otherwise return 0) * *****************************************************************************************************************/ u8 HTIOTActIsDisableMCS14(struct ieee80211_device* ieee, u8* PeerMacAddr) { u8 ret = 0; return ret; } /** * Function: HTIOTActIsDisableMCS15 * * Overview: Check whether driver should declare capability of receving MCS15 * * Input: * PADAPTER Adapter, * * Output: None * Return: true if driver should disable MCS15 * 2008.04.15 Emily */ bool HTIOTActIsDisableMCS15(struct ieee80211_device* ieee) { bool retValue = false; return retValue; } /** * Function: HTIOTActIsDisableMCSTwoSpatialStream * * Overview: Check whether driver should declare capability of receving All 2 ss packets * * Input: * PADAPTER Adapter, * * Output: None * Return: true if driver should disable all two spatial stream packet * 2008.04.21 Emily */ bool HTIOTActIsDisableMCSTwoSpatialStream(struct ieee80211_device* ieee) { bool retValue = false; #ifdef TODO // Apply for 819u only //#if (HAL_CODE_BASE==RTL8192) //This rule only apply to Belkin(Ralink) AP if(IS_UNDER_11N_AES_MODE(Adapter)) { if((PlatformCompareMemory(PeerMacAddr, BELKINF5D8233V1_RALINK, 3)==0) || (PlatformCompareMemory(PeerMacAddr, PCI_RALINK, 3)==0) || (PlatformCompareMemory(PeerMacAddr, EDIMAX_RALINK, 3)==0)) { //Set True to disable this function. Disable by default, Emily, 2008.04.23 retValue = false; } } //#endif #endif #if 1 PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; if(ieee->is_ap_in_wep_tkip && ieee->is_ap_in_wep_tkip(ieee->dev)) { if( (pHTInfo->IOTPeer != HT_IOT_PEER_ATHEROS) && (pHTInfo->IOTPeer != HT_IOT_PEER_UNKNOWN) && (pHTInfo->IOTPeer != HT_IOT_PEER_MARVELL) ) retValue = true; } #endif return retValue; } /******************************************************************************************************************** *function: Check whether driver should disable EDCA turbo mode * input: struct ieee80211_device* ieee * u8* PeerMacAddr * output: none * return: return 1 if driver should disable EDCA turbo mode(otherwise return 0) * *****************************************************************************************************************/ u8 HTIOTActIsDisableEDCATurbo(struct ieee80211_device* ieee, u8* PeerMacAddr) { u8 retValue = false; // default enable EDCA Turbo mode. // Set specific EDCA parameter for different AP in DM handler. return retValue; } /******************************************************************************************************************** *function: Check whether we need to use OFDM to sned MGNT frame for broadcom AP * input: struct ieee80211_network *network //current network we live * output: none * return: return 1 if true * *****************************************************************************************************************/ u8 HTIOTActIsMgntUseCCK6M(struct ieee80211_network *network) { u8 retValue = 0; // 2008/01/25 MH Judeg if we need to use OFDM to sned MGNT frame for broadcom AP. // 2008/01/28 MH We must prevent that we select null bssid to link. if(network->broadcom_cap_exist) { retValue = 1; } return retValue; } u8 HTIOTActIsForcedCTS2Self(struct ieee80211_network *network) { u8 retValue = 0; if(network->marvell_cap_exist) { retValue = 1; } return retValue; } u8 HTIOTActIsForcedRTSCTS(struct ieee80211_device *ieee, struct ieee80211_network *network) { u8 retValue = 0; printk("============>%s(), %d\n", __FUNCTION__, network->realtek_cap_exit); // Force protection if(ieee->pHTInfo->bCurrentHTSupport) { //if(!network->realtek_cap_exit) if((ieee->pHTInfo->IOTPeer != HT_IOT_PEER_REALTEK)&& (ieee->pHTInfo->IOTPeer != HT_IOT_PEER_REALTEK_92SE)) { if((ieee->pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION) == 0) retValue = 1; } } return retValue; } u8 HTIOTActIsForcedAMSDU8K(struct ieee80211_device *ieee, struct ieee80211_network *network) { u8 retValue = 0; return retValue; } u8 HTIOTActIsCCDFsync(u8* PeerMacAddr) { u8 retValue = 0; if( (memcmp(PeerMacAddr, UNKNOWN_BORADCOM, 3)==0) || (memcmp(PeerMacAddr, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3)==0) || (memcmp(PeerMacAddr, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) ==0)) { retValue = 1; } return retValue; } /* * 819xS single chip b-cut series cannot handle BAR */ u8 HTIOCActRejcectADDBARequest(struct ieee80211_network *network) { u8 retValue = 0; //if(IS_HARDWARE_TYPE_8192SE(Adapter) || // IS_HARDWARE_TYPE_8192SU(Adapter) //) { // Do not reject ADDBA REQ because some of the AP may // keep on sending ADDBA REQ qhich cause DHCP fail or ping loss! // by HPFan, 2008/12/30 //if(pBssDesc->Vender == HT_IOT_PEER_MARVELL) // return FALSE; } return retValue; } /* * EDCA parameters bias on downlink */ u8 HTIOTActIsEDCABiasRx(struct ieee80211_device* ieee,struct ieee80211_network *network) { u8 retValue = 0; PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; { if(pHTInfo->IOTPeer==HT_IOT_PEER_ATHEROS || pHTInfo->IOTPeer==HT_IOT_PEER_BROADCOM || pHTInfo->IOTPeer==HT_IOT_PEER_RALINK) return 1; } return retValue; } u8 HTIOTActDisableShortGI(struct ieee80211_device* ieee,struct ieee80211_network *network) { u8 retValue = 0; PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; if(pHTInfo->IOTPeer==HT_IOT_PEER_RALINK) { retValue = 1; } return retValue; } u8 HTIOTActDisableHighPower(struct ieee80211_device* ieee,struct ieee80211_network *network) { u8 retValue = 0; PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; if(pHTInfo->IOTPeer==HT_IOT_PEER_RALINK || pHTInfo->IOTPeer==HT_IOT_PEER_REALTEK || pHTInfo->IOTPeer==HT_IOT_PEER_REALTEK_92SE) { retValue = 1; } return retValue; } void HTIOTActDetermineRaFunc(struct ieee80211_device* ieee, bool bPeerRx2ss) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; pHTInfo->IOTRaFunc &= HT_IOT_RAFUNC_DISABLE_ALL; if(pHTInfo->IOTPeer == HT_IOT_PEER_RALINK && !bPeerRx2ss) pHTInfo->IOTRaFunc |= HT_IOT_RAFUNC_PEER_1R; if(pHTInfo->IOTAction & HT_IOT_ACT_AMSDU_ENABLE) pHTInfo->IOTRaFunc |= HT_IOT_RAFUNC_TX_AMSDU; printk("!!!!!!!!!!!!!!!!!!!!!!!!!!!IOTRaFunc = %8.8x\n", pHTInfo->IOTRaFunc); } u8 HTIOTActIsDisableTx40MHz(struct ieee80211_device* ieee,struct ieee80211_network *network) { u8 retValue = 0; PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; if( (KEY_TYPE_WEP104 == ieee->pairwise_key_type) || (KEY_TYPE_WEP40 == ieee->pairwise_key_type) || (KEY_TYPE_WEP104 == ieee->group_key_type) || (KEY_TYPE_WEP40 == ieee->group_key_type) || (KEY_TYPE_TKIP == ieee->pairwise_key_type) ) { if((pHTInfo->IOTPeer==HT_IOT_PEER_REALTEK) && (network->bssht.bdSupportHT)) retValue = 1; } return retValue; } u8 HTIOTActIsTxNoAggregation(struct ieee80211_device* ieee,struct ieee80211_network *network) { u8 retValue = 0; PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; if( (KEY_TYPE_WEP104 == ieee->pairwise_key_type) || (KEY_TYPE_WEP40 == ieee->pairwise_key_type) || (KEY_TYPE_WEP104 == ieee->group_key_type) || (KEY_TYPE_WEP40 == ieee->group_key_type) || (KEY_TYPE_TKIP == ieee->pairwise_key_type) ) { if(pHTInfo->IOTPeer==HT_IOT_PEER_REALTEK) retValue = 1; } return retValue; } u8 HTIOTActIsDisableTx2SS(struct ieee80211_device* ieee,struct ieee80211_network *network) { u8 retValue = 0; PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; if( (KEY_TYPE_WEP104 == ieee->pairwise_key_type) || (KEY_TYPE_WEP40 == ieee->pairwise_key_type) || (KEY_TYPE_WEP104 == ieee->group_key_type) || (KEY_TYPE_WEP40 == ieee->group_key_type) || (KEY_TYPE_TKIP == ieee->pairwise_key_type) ) { if((pHTInfo->IOTPeer==HT_IOT_PEER_REALTEK) && (network->bssht.bdSupportHT)) retValue = 1; } return retValue; } bool HTIOCActAllowPeerAggOnePacket(struct ieee80211_device* ieee,struct ieee80211_network *network) { bool retValue = false; PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; if(pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) { if((memcmp(network->bssid, NETGEAR_BROADCOM, 3)==0) && (network->bssht.bdBandWidth == HT_CHANNEL_WIDTH_20_40)) return true; } return retValue; } void HTResetIOTSetting( PRT_HIGH_THROUGHPUT pHTInfo ) { pHTInfo->IOTAction = 0; pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN; pHTInfo->IOTRaFunc = 0; } /******************************************************************************************************************** *function: Construct Capablility Element in Beacon... if HTEnable is turned on * input: struct ieee80211_device* ieee * u8* posHTCap //pointer to store Capability Ele * u8* len //store length of CE * u8 IsEncrypt //whether encrypt, needed further * output: none * return: none * notice: posHTCap can't be null and should be initialized before. * *****************************************************************************************************************/ void HTConstructCapabilityElement(struct ieee80211_device* ieee, u8* posHTCap, u8* len, u8 IsEncrypt) { PRT_HIGH_THROUGHPUT pHT = ieee->pHTInfo; PHT_CAPABILITY_ELE pCapELE = NULL; if ((posHTCap == NULL) || (pHT == NULL)) { IEEE80211_DEBUG(IEEE80211_DL_ERR, "posHTCap or pHTInfo can't be null in HTConstructCapabilityElement()\n"); return; } memset(posHTCap, 0, *len); if(pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC) { u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily memcpy(posHTCap, EWC11NHTCap, sizeof(EWC11NHTCap)); pCapELE = (PHT_CAPABILITY_ELE)&(posHTCap[4]); }else { pCapELE = (PHT_CAPABILITY_ELE)posHTCap; } //HT capability info pCapELE->AdvCoding = 0; // This feature is not supported now!! if(ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) { pCapELE->ChlWidth = 0; } else { pCapELE->ChlWidth = (pHT->bRegBW40MHz?1:0); } pCapELE->MimoPwrSave = pHT->SelfMimoPs; pCapELE->GreenField = 0; // This feature is not supported now!! pCapELE->ShortGI20Mhz = 1; // We can receive Short GI!! pCapELE->ShortGI40Mhz = 1; // We can receive Short GI!! pCapELE->TxSTBC = 1; pCapELE->RxSTBC = 0; pCapELE->DelayBA = 0; // Do not support now!! pCapELE->MaxAMSDUSize = (MAX_RECEIVE_BUFFER_SIZE>=7935)?1:0; pCapELE->DssCCk = ((pHT->bRegBW40MHz)?(pHT->bRegSuppCCK?1:0):0); pCapELE->PSMP = 0; // Do not support now!! pCapELE->LSigTxopProtect = 0; // Do not support now!! //MAC HT parameters info // TODO: Nedd to take care of this part IEEE80211_DEBUG(IEEE80211_DL_HT, "TX HT cap/info ele BW=%d MaxAMSDUSize:%d DssCCk:%d\n", pCapELE->ChlWidth, pCapELE->MaxAMSDUSize, pCapELE->DssCCk); if( IsEncrypt) { pCapELE->MPDUDensity = 7; // 8us pCapELE->MaxRxAMPDUFactor = 2; // 2 is for 32 K and 3 is 64K } else { pCapELE->MaxRxAMPDUFactor = 3; // 2 is for 32 K and 3 is 64K pCapELE->MPDUDensity = 0; // no density } //Supported MCS set memcpy(pCapELE->MCS, ieee->Regdot11HTOperationalRateSet, 16); if(pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS15) pCapELE->MCS[1] &= 0x7f; if(pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS14) pCapELE->MCS[1] &= 0xbf; if(pHT->IOTAction & HT_IOT_ACT_DISABLE_ALL_2SS) pCapELE->MCS[1] &= 0x00; // 2008.06.12 // For RTL819X, if pairwisekey = wep/tkip, ap is ralink, we support only MCS0~7. if(ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) { int i; for(i = 1; i< 16; i++) pCapELE->MCS[i] = 0; } //Extended HT Capability Info memset(&pCapELE->ExtHTCapInfo, 0, 2); //TXBF Capabilities memset(pCapELE->TxBFCap, 0, 4); //Antenna Selection Capabilities pCapELE->ASCap = 0; //add 2 to give space for element ID and len when construct frames if(pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC) *len = 30 + 2; else *len = 26 + 2; return; } /******************************************************************************************************************** *function: Construct Information Element in Beacon... if HTEnable is turned on * input: struct ieee80211_device* ieee * u8* posHTCap //pointer to store Information Ele * u8* len //store len of * u8 IsEncrypt //whether encrypt, needed further * output: none * return: none * notice: posHTCap can't be null and be initialized before. only AP and IBSS sta should do this * *****************************************************************************************************************/ void HTConstructInfoElement(struct ieee80211_device* ieee, u8* posHTInfo, u8* len, u8 IsEncrypt) { PRT_HIGH_THROUGHPUT pHT = ieee->pHTInfo; PHT_INFORMATION_ELE pHTInfoEle = (PHT_INFORMATION_ELE)posHTInfo; if ((posHTInfo == NULL) || (pHTInfoEle == NULL)) { IEEE80211_DEBUG(IEEE80211_DL_ERR, "posHTInfo or pHTInfoEle can't be null in HTConstructInfoElement()\n"); return; } memset(posHTInfo, 0, *len); if ( (ieee->iw_mode == IW_MODE_ADHOC) || (ieee->iw_mode == IW_MODE_MASTER)) //ap mode is not currently supported { pHTInfoEle->ControlChl = ieee->current_network.channel; pHTInfoEle->ExtChlOffset = ((pHT->bRegBW40MHz == false)?HT_EXTCHNL_OFFSET_NO_EXT: (ieee->current_network.channel<=6)? HT_EXTCHNL_OFFSET_UPPER:HT_EXTCHNL_OFFSET_LOWER); pHTInfoEle->RecommemdedTxWidth = pHT->bRegBW40MHz; pHTInfoEle->RIFS = 0; pHTInfoEle->PSMPAccessOnly = 0; pHTInfoEle->SrvIntGranularity = 0; pHTInfoEle->OptMode = pHT->CurrentOpMode; pHTInfoEle->NonGFDevPresent = 0; pHTInfoEle->DualBeacon = 0; pHTInfoEle->SecondaryBeacon = 0; pHTInfoEle->LSigTxopProtectFull = 0; pHTInfoEle->PcoActive = 0; pHTInfoEle->PcoPhase = 0; memset(pHTInfoEle->BasicMSC, 0, 16); *len = 22 + 2; //same above } else { //STA should not generate High Throughput Information Element *len = 0; } return; } /* * According to experiment, Realtek AP to STA (based on rtl8190) may achieve best performance * if both STA and AP set limitation of aggregation size to 32K, that is, set AMPDU density to 2 * (Ref: IEEE 11n specification). However, if Realtek STA associates to other AP, STA should set * limitation of aggregation size to 8K, otherwise, performance of traffic stream from STA to AP * will be much less than the traffic stream from AP to STA if both of the stream runs concurrently * at the same time. * * Frame Format * Element ID Length OUI Type1 Reserved * 1 byte 1 byte 3 bytes 1 byte 1 byte * * OUI = 0x00, 0xe0, 0x4c, * Type = 0x02 * Reserved = 0x00 * * 2007.8.21 by Emily */ /******************************************************************************************************************** *function: Construct Information Element in Beacon... in RT2RT condition * input: struct ieee80211_device* ieee * u8* posRT2RTAgg //pointer to store Information Ele * u8* len //store len * output: none * return: none * notice: * *****************************************************************************************************************/ void HTConstructRT2RTAggElement(struct ieee80211_device* ieee, u8* posRT2RTAgg, u8* len) { if (posRT2RTAgg == NULL) { IEEE80211_DEBUG(IEEE80211_DL_ERR, "posRT2RTAgg can't be null in HTConstructRT2RTAggElement()\n"); return; } memset(posRT2RTAgg, 0, *len); *posRT2RTAgg++ = 0x00; *posRT2RTAgg++ = 0xe0; *posRT2RTAgg++ = 0x4c; *posRT2RTAgg++ = 0x02; *posRT2RTAgg++ = 0x01; *posRT2RTAgg = 0x10;//*posRT2RTAgg = 0x02; if(ieee->bSupportRemoteWakeUp) { *posRT2RTAgg |= 0x08;//RT_HT_CAP_USE_WOW; } *len = 6 + 2; return; #ifdef TODO #if(HAL_CODE_BASE == RTL8192 && DEV_BUS_TYPE == USB_INTERFACE) /* //Emily. If it is required to Ask Realtek AP to send AMPDU during AES mode, enable this section of code. if(IS_UNDER_11N_AES_MODE(Adapter)) { posRT2RTAgg->Octet[5] |=RT_HT_CAP_USE_AMPDU; }else { posRT2RTAgg->Octet[5] &= 0xfb; } */ #else #endif posRT2RTAgg->Length = 6; #endif } /******************************************************************************************************************** *function: Pick the right Rate Adaptive table to use * input: struct ieee80211_device* ieee * u8* pOperateMCS //A pointer to MCS rate bitmap * return: always we return true * notice: * *****************************************************************************************************************/ u8 HT_PickMCSRate(struct ieee80211_device* ieee, u8* pOperateMCS) { u8 i; if (pOperateMCS == NULL) { IEEE80211_DEBUG(IEEE80211_DL_ERR, "pOperateMCS can't be null in HT_PickMCSRate()\n"); return false; } switch(ieee->mode) { case IEEE_A: case IEEE_B: case IEEE_G: //legacy rate routine handled at selectedrate //no MCS rate for(i=0;i<=15;i++){ pOperateMCS[i] = 0; } break; case IEEE_N_24G: //assume CCK rate ok case IEEE_N_5G: // Legacy part we only use 6, 5.5,2,1 for N_24G and 6 for N_5G. // Legacy part shall be handled at SelectRateSet(). //HT part // TODO: may be different if we have different number of antenna pOperateMCS[0] &=RATE_ADPT_1SS_MASK; //support MCS 0~7 pOperateMCS[1] &=RATE_ADPT_2SS_MASK; pOperateMCS[3] &=RATE_ADPT_MCS32_MASK; break; //should never reach here default: break; } return true; } /* * Description: * This function will get the highest speed rate in input MCS set. * * /param Adapter Pionter to Adapter entity * pMCSRateSet Pointer to MCS rate bitmap * pMCSFilter Pointer to MCS rate filter * * /return Highest MCS rate included in pMCSRateSet and filtered by pMCSFilter. * */ /******************************************************************************************************************** *function: This function will get the highest speed rate in input MCS set. * input: struct ieee80211_device* ieee * u8* pMCSRateSet //Pointer to MCS rate bitmap * u8* pMCSFilter //Pointer to MCS rate filter * return: Highest MCS rate included in pMCSRateSet and filtered by pMCSFilter * notice: * *****************************************************************************************************************/ u8 HTGetHighestMCSRate(struct ieee80211_device* ieee, u8* pMCSRateSet, u8* pMCSFilter) { u8 i, j; u8 bitMap; u8 mcsRate = 0; u8 availableMcsRate[16]; if (pMCSRateSet == NULL || pMCSFilter == NULL) { IEEE80211_DEBUG(IEEE80211_DL_ERR, "pMCSRateSet or pMCSFilter can't be null in HTGetHighestMCSRate()\n"); return false; } for(i=0; i<16; i++) availableMcsRate[i] = pMCSRateSet[i] & pMCSFilter[i]; for(i = 0; i < 16; i++) { if(availableMcsRate[i] != 0) break; } if(i == 16) return false; for(i = 0; i < 16; i++) { if(availableMcsRate[i] != 0) { bitMap = availableMcsRate[i]; for(j = 0; j < 8; j++) { if((bitMap%2) != 0) { if(HTMcsToDataRate(ieee, (8*i+j)) > HTMcsToDataRate(ieee, mcsRate)) mcsRate = (8*i+j); } bitMap = bitMap>>1; } } } return (mcsRate|0x80); } /* ** **1.Filter our operation rate set with AP's rate set **2.shall reference channel bandwidth, STBC, Antenna number **3.generate rate adative table for firmware **David 20060906 ** ** \pHTSupportedCap: the connected STA's supported rate Capability element */ u8 HTFilterMCSRate( struct ieee80211_device* ieee, u8* pSupportMCS, u8* pOperateMCS) { u8 i=0; // filter out operational rate set not supported by AP, the lenth of it is 16 for(i=0;i<=15;i++){ pOperateMCS[i] = ieee->Regdot11HTOperationalRateSet[i]&pSupportMCS[i]; } // TODO: adjust our operational rate set according to our channel bandwidth, STBC and Antenna number // TODO: fill suggested rate adaptive rate index and give firmware info using Tx command packet // we also shall suggested the first start rate set according to our singal strength HT_PickMCSRate(ieee, pOperateMCS); // For RTL819X, if pairwisekey = wep/tkip, we support only MCS0~7. if(ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) pOperateMCS[1] = 0; // // For RTL819X, we support only MCS0~15. // And also, we do not know how to use MCS32 now. // for(i=2; i<=15; i++) pOperateMCS[i] = 0; return true; } void HTSetConnectBwMode(struct ieee80211_device* ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset); void HTOnAssocRsp(struct ieee80211_device *ieee) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; PHT_CAPABILITY_ELE pPeerHTCap = NULL; PHT_INFORMATION_ELE pPeerHTInfo = NULL; u16 nMaxAMSDUSize = 0; u8* pMcsFilter = NULL; static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33}; // For 11n EWC definition, 2007.07.17, by Emily static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34}; // For 11n EWC definition, 2007.07.17, by Emily if( pHTInfo->bCurrentHTSupport == false ) { IEEE80211_DEBUG(IEEE80211_DL_ERR, "<=== HTOnAssocRsp(): HT_DISABLE\n"); return; } IEEE80211_DEBUG(IEEE80211_DL_HT, "===> HTOnAssocRsp_wq(): HT_ENABLE\n"); if(!memcmp(pHTInfo->PeerHTCapBuf,EWC11NHTCap, sizeof(EWC11NHTCap))) pPeerHTCap = (PHT_CAPABILITY_ELE)(&pHTInfo->PeerHTCapBuf[4]); else pPeerHTCap = (PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf); if(!memcmp(pHTInfo->PeerHTInfoBuf, EWC11NHTInfo, sizeof(EWC11NHTInfo))) pPeerHTInfo = (PHT_INFORMATION_ELE)(&pHTInfo->PeerHTInfoBuf[4]); else pPeerHTInfo = (PHT_INFORMATION_ELE)(pHTInfo->PeerHTInfoBuf); //////////////////////////////////////////////////////// // Configurations: //////////////////////////////////////////////////////// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_HT, pPeerHTCap, sizeof(HT_CAPABILITY_ELE)); HTSetConnectBwMode(ieee, (HT_CHANNEL_WIDTH)(pPeerHTCap->ChlWidth), (HT_EXTCHNL_OFFSET)(pPeerHTInfo->ExtChlOffset)); if(pHTInfo->bCurBW40MHz == true) pHTInfo->bCurTxBW40MHz = ((pPeerHTInfo->RecommemdedTxWidth == 1)?true:false); // // Update short GI/ long GI setting // // TODO: pHTInfo->bCurShortGI20MHz= ((pHTInfo->bRegShortGI20MHz)?((pPeerHTCap->ShortGI20Mhz==1)?true:false):false); pHTInfo->bCurShortGI40MHz= ((pHTInfo->bRegShortGI40MHz)?((pPeerHTCap->ShortGI40Mhz==1)?true:false):false); // // Config TX STBC setting // // TODO: // // Config DSSS/CCK mode in 40MHz mode // // TODO: pHTInfo->bCurSuppCCK = ((pHTInfo->bRegSuppCCK)?((pPeerHTCap->DssCCk==1)?true:false):false); // // Config and configure A-MSDU setting // pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support; nMaxAMSDUSize = (pPeerHTCap->MaxAMSDUSize==0)?3839:7935; if(pHTInfo->nAMSDU_MaxSize > nMaxAMSDUSize ) pHTInfo->nCurrent_AMSDU_MaxSize = nMaxAMSDUSize; else pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize; // // Config A-MPDU setting // pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable; if(ieee->is_ap_in_wep_tkip && ieee->is_ap_in_wep_tkip(ieee->dev)) { if( (pHTInfo->IOTPeer== HT_IOT_PEER_ATHEROS) || (pHTInfo->IOTPeer == HT_IOT_PEER_UNKNOWN) ) pHTInfo->bCurrentAMPDUEnable = false; } // <1> Decide AMPDU Factor // By Emily if(!pHTInfo->bRegRT2RTAggregation) { // Decide AMPDU Factor according to protocol handshake if(pHTInfo->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor) pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor; else pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor; }else { // Set MPDU density to 2 to Realtek AP, and set it to 0 for others // Replace MPDU factor declared in original association response frame format. 2007.08.20 by Emily if (ieee->current_network.bssht.bdRT2RTAggregation) { if( ieee->pairwise_key_type != KEY_TYPE_NA) // Realtek may set 32k in security mode and 64k for others pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor; else pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_64K; }else { if(pPeerHTCap->MaxRxAMPDUFactor < HT_AGG_SIZE_32K) pHTInfo->CurrentAMPDUFactor = pPeerHTCap->MaxRxAMPDUFactor; else pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_32K; } } // <2> Set AMPDU Minimum MPDU Start Spacing // 802.11n 3.0 section 9.7d.3 #if 0 if(pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity) pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density; else pHTInfo->CurrentMPDUDensity = pPeerHTCap->MPDUDensity; if(ieee->pairwise_key_type != KEY_TYPE_NA ) pHTInfo->CurrentMPDUDensity = 7; // 8us #else if(pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity) pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density; else pHTInfo->CurrentMPDUDensity = pPeerHTCap->MPDUDensity; #endif // Force TX AMSDU // Lanhsin: mark for tmp to avoid deauth by ap from s3 //if(memcmp(pMgntInfo->Bssid, NETGEAR834Bv2_BROADCOM, 3)==0) if(pHTInfo->IOTAction & HT_IOT_ACT_TX_USE_AMSDU_8K) { pHTInfo->bCurrentAMPDUEnable = false; pHTInfo->ForcedAMSDUMode = HT_AGG_FORCE_ENABLE; pHTInfo->ForcedAMSDUMaxSize = 7935; } // Rx Reorder Setting pHTInfo->bCurRxReorderEnable = pHTInfo->bRegRxReorderEnable; // // Filter out unsupported HT rate for this AP // Update RATR table // This is only for 8190 ,8192 or later product which using firmware to handle rate adaptive mechanism. // // Handle Ralink AP bad MCS rate set condition. Joseph. // This fix the bug of Ralink AP. This may be removed in the future. if(pPeerHTCap->MCS[0] == 0) pPeerHTCap->MCS[0] = 0xff; // Joseph test //LZM ADD 090318 HTIOTActDetermineRaFunc(ieee, ((pPeerHTCap->MCS[1])!=0)); HTFilterMCSRate(ieee, pPeerHTCap->MCS, ieee->dot11HTOperationalRateSet); // // Config MIMO Power Save setting // pHTInfo->PeerMimoPs = pPeerHTCap->MimoPwrSave; if(pHTInfo->PeerMimoPs == MIMO_PS_STATIC) pMcsFilter = MCS_FILTER_1SS; else pMcsFilter = MCS_FILTER_ALL; //WB add for MCS8 bug // pMcsFilter = MCS_FILTER_1SS; ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee, ieee->dot11HTOperationalRateSet, pMcsFilter); ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate; // // Config current operation mode. // pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode; } void HTSetConnectBwModeCallback(struct ieee80211_device* ieee); /******************************************************************************************************************** *function: initialize HT info(struct PRT_HIGH_THROUGHPUT) * input: struct ieee80211_device* ieee * output: none * return: none * notice: This function is called when * (1) MPInitialization Phase * (2) Receiving of Deauthentication from AP ********************************************************************************************************************/ // TODO: Should this funciton be called when receiving of Disassociation? void HTInitializeHTInfo(struct ieee80211_device* ieee) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; // // These parameters will be reset when receiving deauthentication packet // IEEE80211_DEBUG(IEEE80211_DL_HT, "===========>%s()\n", __FUNCTION__); pHTInfo->bCurrentHTSupport = false; // 40MHz channel support pHTInfo->bCurBW40MHz = false; pHTInfo->bCurTxBW40MHz = false; // Short GI support pHTInfo->bCurShortGI20MHz = false; pHTInfo->bCurShortGI40MHz = false; pHTInfo->bForcedShortGI = false; // CCK rate support // This flag is set to true to support CCK rate by default. // It will be affected by "pHTInfo->bRegSuppCCK" and AP capabilities only when associate to // 11N BSS. pHTInfo->bCurSuppCCK = true; // AMSDU related pHTInfo->bCurrent_AMSDU_Support = false; pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize; // AMPUD related pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density; pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor; // Initialize all of the parameters related to 11n memset((void*)(&(pHTInfo->SelfHTCap)), 0, sizeof(pHTInfo->SelfHTCap)); memset((void*)(&(pHTInfo->SelfHTInfo)), 0, sizeof(pHTInfo->SelfHTInfo)); memset((void*)(&(pHTInfo->PeerHTCapBuf)), 0, sizeof(pHTInfo->PeerHTCapBuf)); memset((void*)(&(pHTInfo->PeerHTInfoBuf)), 0, sizeof(pHTInfo->PeerHTInfoBuf)); pHTInfo->bSwBwInProgress = false; pHTInfo->ChnlOp = CHNLOP_NONE; // Set default IEEE spec for Draft N pHTInfo->ePeerHTSpecVer = HT_SPEC_VER_IEEE; // Realtek proprietary aggregation mode pHTInfo->bCurrentRT2RTAggregation = false; pHTInfo->bCurrentRT2RTLongSlotTime = false; pHTInfo->RT2RT_HT_Mode = (RT_HT_CAPBILITY)0; pHTInfo->IOTPeer = 0; pHTInfo->IOTAction = 0; pHTInfo->IOTRaFunc = 0; //MCS rate initialized here { u8* RegHTSuppRateSets = &(ieee->RegHTSuppRateSet[0]); RegHTSuppRateSets[0] = 0xFF; //support MCS 0~7 RegHTSuppRateSets[1] = 0xFF; //support MCS 8~15 RegHTSuppRateSets[4] = 0x01; //support MCS 32 } } /******************************************************************************************************************** *function: initialize Bss HT structure(struct PBSS_HT) * input: PBSS_HT pBssHT //to be initialized * output: none * return: none * notice: This function is called when initialize network structure ********************************************************************************************************************/ void HTInitializeBssDesc(PBSS_HT pBssHT) { pBssHT->bdSupportHT = false; memset(pBssHT->bdHTCapBuf, 0, sizeof(pBssHT->bdHTCapBuf)); pBssHT->bdHTCapLen = 0; memset(pBssHT->bdHTInfoBuf, 0, sizeof(pBssHT->bdHTInfoBuf)); pBssHT->bdHTInfoLen = 0; pBssHT->bdHTSpecVer= HT_SPEC_VER_IEEE; pBssHT->bdRT2RTAggregation = false; pBssHT->bdRT2RTLongSlotTime = false; pBssHT->RT2RT_HT_Mode = (RT_HT_CAPBILITY)0; } /******************************************************************************************************************** *function: initialize Bss HT structure(struct PBSS_HT) * input: struct ieee80211_device *ieee * struct ieee80211_network *pNetwork //usually current network we are live in * output: none * return: none * notice: This function should ONLY be called before association ********************************************************************************************************************/ void HTResetSelfAndSavePeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; // u16 nMaxAMSDUSize; // PHT_CAPABILITY_ELE pPeerHTCap = (PHT_CAPABILITY_ELE)pNetwork->bssht.bdHTCapBuf; // PHT_INFORMATION_ELE pPeerHTInfo = (PHT_INFORMATION_ELE)pNetwork->bssht.bdHTInfoBuf; // u8* pMcsFilter; u8 bIOTAction = 0; // // Save Peer Setting before Association // IEEE80211_DEBUG(IEEE80211_DL_HT, "==============>%s()\n", __FUNCTION__); /*unmark bEnableHT flag here is the same reason why unmarked in function ieee80211_softmac_new_net. WB 2008.09.10*/ // if( pHTInfo->bEnableHT && pNetwork->bssht.bdSupportHT) if (pNetwork->bssht.bdSupportHT) { pHTInfo->bCurrentHTSupport = true; pHTInfo->ePeerHTSpecVer = pNetwork->bssht.bdHTSpecVer; // Save HTCap and HTInfo information Element if(pNetwork->bssht.bdHTCapLen > 0 && pNetwork->bssht.bdHTCapLen <= sizeof(pHTInfo->PeerHTCapBuf)) memcpy(pHTInfo->PeerHTCapBuf, pNetwork->bssht.bdHTCapBuf, pNetwork->bssht.bdHTCapLen); if(pNetwork->bssht.bdHTInfoLen > 0 && pNetwork->bssht.bdHTInfoLen <= sizeof(pHTInfo->PeerHTInfoBuf)) memcpy(pHTInfo->PeerHTInfoBuf, pNetwork->bssht.bdHTInfoBuf, pNetwork->bssht.bdHTInfoLen); // Check whether RT to RT aggregation mode is enabled if(pHTInfo->bRegRT2RTAggregation) { pHTInfo->bCurrentRT2RTAggregation = pNetwork->bssht.bdRT2RTAggregation; pHTInfo->bCurrentRT2RTLongSlotTime = pNetwork->bssht.bdRT2RTLongSlotTime; pHTInfo->RT2RT_HT_Mode = pNetwork->bssht.RT2RT_HT_Mode; } else { pHTInfo->bCurrentRT2RTAggregation = false; pHTInfo->bCurrentRT2RTLongSlotTime = false; pHTInfo->RT2RT_HT_Mode = (RT_HT_CAPBILITY)0; } // Determine the IOT Peer Vendor. HTIOTPeerDetermine(ieee); // Decide IOT Action // Must be called after the parameter of pHTInfo->bCurrentRT2RTAggregation is decided pHTInfo->IOTAction = 0; bIOTAction = HTIOTActIsDisableMCS14(ieee, pNetwork->bssid); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS14; bIOTAction = HTIOTActIsDisableMCS15(ieee); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS15; bIOTAction = HTIOTActIsDisableMCSTwoSpatialStream(ieee); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_ALL_2SS; bIOTAction = HTIOTActIsDisableEDCATurbo(ieee, pNetwork->bssid); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_EDCA_TURBO; bIOTAction = HTIOTActIsMgntUseCCK6M(pNetwork); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_MGNT_USE_CCK_6M; bIOTAction = HTIOTActIsCCDFsync(pNetwork->bssid); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_CDD_FSYNC; bIOTAction = HTIOTActIsForcedCTS2Self(pNetwork); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_FORCED_CTS2SELF; //bIOTAction = HTIOTActIsForcedRTSCTS(ieee, pNetwork); //if(bIOTAction) // pHTInfo->IOTAction |= HT_IOT_ACT_FORCED_RTS; bIOTAction = HTIOCActRejcectADDBARequest(pNetwork); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_REJECT_ADDBA_REQ; bIOTAction = HTIOCActAllowPeerAggOnePacket(ieee, pNetwork); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT; bIOTAction = HTIOTActIsEDCABiasRx(ieee, pNetwork); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_EDCA_BIAS_ON_RX; bIOTAction = HTIOTActDisableShortGI(ieee, pNetwork); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_SHORT_GI; bIOTAction = HTIOTActDisableHighPower(ieee, pNetwork); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_HIGH_POWER; bIOTAction = HTIOTActIsForcedAMSDU8K(ieee, pNetwork); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_TX_USE_AMSDU_8K; bIOTAction = HTIOTActIsTxNoAggregation(ieee, pNetwork); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_TX_NO_AGGREGATION; bIOTAction = HTIOTActIsDisableTx40MHz(ieee, pNetwork); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_TX_40_MHZ; bIOTAction = HTIOTActIsDisableTx2SS(ieee, pNetwork); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_TX_2SS; //must after HT_IOT_ACT_TX_NO_AGGREGATION bIOTAction = HTIOTActIsForcedRTSCTS(ieee, pNetwork); if(bIOTAction) pHTInfo->IOTAction |= HT_IOT_ACT_FORCED_RTS; printk("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!IOTAction = %8.8x\n", pHTInfo->IOTAction); } else { pHTInfo->bCurrentHTSupport = false; pHTInfo->bCurrentRT2RTAggregation = false; pHTInfo->bCurrentRT2RTLongSlotTime = false; pHTInfo->RT2RT_HT_Mode = (RT_HT_CAPBILITY)0; pHTInfo->IOTAction = 0; pHTInfo->IOTRaFunc = 0; } } void HTUpdateSelfAndPeerSetting(struct ieee80211_device* ieee, struct ieee80211_network * pNetwork) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; // PHT_CAPABILITY_ELE pPeerHTCap = (PHT_CAPABILITY_ELE)pNetwork->bssht.bdHTCapBuf; PHT_INFORMATION_ELE pPeerHTInfo = (PHT_INFORMATION_ELE)pNetwork->bssht.bdHTInfoBuf; if(pHTInfo->bCurrentHTSupport) { // // Config current operation mode. // if(pNetwork->bssht.bdHTInfoLen != 0) pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode; // // <TODO: Config according to OBSS non-HT STA present!!> // } } void HTUseDefaultSetting(struct ieee80211_device* ieee) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; // u8 regBwOpMode; if(pHTInfo->bEnableHT) { pHTInfo->bCurrentHTSupport = true; pHTInfo->bCurSuppCCK = pHTInfo->bRegSuppCCK; pHTInfo->bCurBW40MHz = pHTInfo->bRegBW40MHz; pHTInfo->bCurShortGI20MHz= pHTInfo->bRegShortGI20MHz; pHTInfo->bCurShortGI40MHz= pHTInfo->bRegShortGI40MHz; pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support; pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize; pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable; pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor; pHTInfo->CurrentMPDUDensity = pHTInfo->CurrentMPDUDensity; // Set BWOpMode register //update RATR index0 HTFilterMCSRate(ieee, ieee->Regdot11HTOperationalRateSet, ieee->dot11HTOperationalRateSet); //function below is not implemented at all. WB #ifdef TODO Adapter->HalFunc.InitHalRATRTableHandler( Adapter, &pMgntInfo->dot11OperationalRateSet, pMgntInfo->dot11HTOperationalRateSet); #endif ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee, ieee->dot11HTOperationalRateSet, MCS_FILTER_ALL); ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate; } else { pHTInfo->bCurrentHTSupport = false; } return; } /******************************************************************************************************************** *function: check whether HT control field exists * input: struct ieee80211_device *ieee * u8* pFrame //coming skb->data * output: none * return: return true if HT control field exists(false otherwise) * notice: ********************************************************************************************************************/ u8 HTCCheck(struct ieee80211_device* ieee, u8* pFrame) { if(ieee->pHTInfo->bCurrentHTSupport) { if( (IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1) { IEEE80211_DEBUG(IEEE80211_DL_HT, "HT CONTROL FILED EXIST!!\n"); return true; } } return false; } // // This function set bandwidth mode in protocol layer. // void HTSetConnectBwMode(struct ieee80211_device* ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; // u32 flags = 0; if(pHTInfo->bRegBW40MHz == false) return; // To reduce dummy operation // if((pHTInfo->bCurBW40MHz==false && Bandwidth==HT_CHANNEL_WIDTH_20) || // (pHTInfo->bCurBW40MHz==true && Bandwidth==HT_CHANNEL_WIDTH_20_40 && Offset==pHTInfo->CurSTAExtChnlOffset)) // return; // spin_lock_irqsave(&(ieee->bw_spinlock), flags); if(pHTInfo->bSwBwInProgress) { // spin_unlock_irqrestore(&(ieee->bw_spinlock), flags); return; } //if in half N mode, set to 20M bandwidth please 09.08.2008 WB. if(Bandwidth==HT_CHANNEL_WIDTH_20_40 && (!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))) { // Handle Illegal extention channel offset!! if(ieee->current_network.channel<2 && Offset==HT_EXTCHNL_OFFSET_LOWER) Offset = HT_EXTCHNL_OFFSET_NO_EXT; if(Offset==HT_EXTCHNL_OFFSET_UPPER || Offset==HT_EXTCHNL_OFFSET_LOWER) { pHTInfo->bCurBW40MHz = true; pHTInfo->CurSTAExtChnlOffset = Offset; } else { pHTInfo->bCurBW40MHz = false; pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT; } } else { pHTInfo->bCurBW40MHz = false; pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT; } pHTInfo->bSwBwInProgress = true; // TODO: 2007.7.13 by Emily Wait 2000ms in order to garantee that switching // bandwidth is executed after scan is finished. It is a temporal solution // because software should ganrantee the last operation of switching bandwidth // is executed properlly. HTSetConnectBwModeCallback(ieee); // spin_unlock_irqrestore(&(ieee->bw_spinlock), flags); } void HTSetConnectBwModeCallback(struct ieee80211_device* ieee) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; IEEE80211_DEBUG(IEEE80211_DL_HT, "======>%s()\n", __FUNCTION__); if(pHTInfo->bCurBW40MHz) { if(pHTInfo->CurSTAExtChnlOffset==HT_EXTCHNL_OFFSET_UPPER) ieee->set_chan(ieee->dev, ieee->current_network.channel+2); else if(pHTInfo->CurSTAExtChnlOffset==HT_EXTCHNL_OFFSET_LOWER) ieee->set_chan(ieee->dev, ieee->current_network.channel-2); else ieee->set_chan(ieee->dev, ieee->current_network.channel); ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20_40, pHTInfo->CurSTAExtChnlOffset); } else { ieee->set_chan(ieee->dev, ieee->current_network.channel); ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT); } pHTInfo->bSwBwInProgress = false; }
gpl-2.0
oddstone/splayer
Thirdparty/openssl-0.9.8x/crypto/pkcs12/p12_utl.c
93
5164
/* p12_utl.c */ /* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL * project 1999. */ /* ==================================================================== * Copyright (c) 1999 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * licensing@OpenSSL.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ #include <stdio.h> #include "cryptlib.h" #include <openssl/pkcs12.h> #ifdef OPENSSL_SYS_NETWARE /* Rename these functions to avoid name clashes on NetWare OS */ #define uni2asc OPENSSL_uni2asc #define asc2uni OPENSSL_asc2uni #endif /* Cheap and nasty Unicode stuff */ unsigned char *asc2uni(const char *asc, int asclen, unsigned char **uni, int *unilen) { int ulen, i; unsigned char *unitmp; if (asclen == -1) asclen = strlen(asc); ulen = asclen*2 + 2; if (!(unitmp = OPENSSL_malloc(ulen))) return NULL; for (i = 0; i < ulen - 2; i+=2) { unitmp[i] = 0; unitmp[i + 1] = asc[i>>1]; } /* Make result double null terminated */ unitmp[ulen - 2] = 0; unitmp[ulen - 1] = 0; if (unilen) *unilen = ulen; if (uni) *uni = unitmp; return unitmp; } char *uni2asc(unsigned char *uni, int unilen) { int asclen, i; char *asctmp; asclen = unilen / 2; /* If no terminating zero allow for one */ if (!unilen || uni[unilen - 1]) asclen++; uni++; if (!(asctmp = OPENSSL_malloc(asclen))) return NULL; for (i = 0; i < unilen; i+=2) asctmp[i>>1] = uni[i]; asctmp[asclen - 1] = 0; return asctmp; } int i2d_PKCS12_bio(BIO *bp, PKCS12 *p12) { return ASN1_item_i2d_bio(ASN1_ITEM_rptr(PKCS12), bp, p12); } #ifndef OPENSSL_NO_FP_API int i2d_PKCS12_fp(FILE *fp, PKCS12 *p12) { return ASN1_item_i2d_fp(ASN1_ITEM_rptr(PKCS12), fp, p12); } #endif PKCS12 *d2i_PKCS12_bio(BIO *bp, PKCS12 **p12) { return ASN1_item_d2i_bio(ASN1_ITEM_rptr(PKCS12), bp, p12); } #ifndef OPENSSL_NO_FP_API PKCS12 *d2i_PKCS12_fp(FILE *fp, PKCS12 **p12) { return ASN1_item_d2i_fp(ASN1_ITEM_rptr(PKCS12), fp, p12); } #endif PKCS12_SAFEBAG *PKCS12_x5092certbag(X509 *x509) { return PKCS12_item_pack_safebag(x509, ASN1_ITEM_rptr(X509), NID_x509Certificate, NID_certBag); } PKCS12_SAFEBAG *PKCS12_x509crl2certbag(X509_CRL *crl) { return PKCS12_item_pack_safebag(crl, ASN1_ITEM_rptr(X509_CRL), NID_x509Crl, NID_crlBag); } X509 *PKCS12_certbag2x509(PKCS12_SAFEBAG *bag) { if(M_PKCS12_bag_type(bag) != NID_certBag) return NULL; if(M_PKCS12_cert_bag_type(bag) != NID_x509Certificate) return NULL; return ASN1_item_unpack(bag->value.bag->value.octet, ASN1_ITEM_rptr(X509)); } X509_CRL *PKCS12_certbag2x509crl(PKCS12_SAFEBAG *bag) { if(M_PKCS12_bag_type(bag) != NID_crlBag) return NULL; if(M_PKCS12_cert_bag_type(bag) != NID_x509Crl) return NULL; return ASN1_item_unpack(bag->value.bag->value.octet, ASN1_ITEM_rptr(X509_CRL)); }
gpl-2.0
lacvapps/linux
drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
1117
27168
/* * Linux network driver for QLogic BR-series Converged Network Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. * Copyright (c) 2014-2015 QLogic Corporation * All rights reserved * www.qlogic.com */ #include "bfa_ioc.h" #include "cna.h" #include "bfi.h" #include "bfi_reg.h" #include "bfa_defs.h" #define bfa_ioc_ct_sync_pos(__ioc) BIT(bfa_ioc_pcifn(__ioc)) #define BFA_IOC_SYNC_REQD_SH 16 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff) #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000) #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH) #define bfa_ioc_ct_sync_reqd_pos(__ioc) \ (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH) /* * forward declarations */ static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc); static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc); static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc); static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc); static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc); static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc); static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc); static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); static void bfa_ioc_ct_set_cur_ioc_fwstate( struct bfa_ioc *ioc, enum bfi_ioc_state fwstate); static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc); static void bfa_ioc_ct_set_alt_ioc_fwstate( struct bfa_ioc *ioc, enum bfi_ioc_state fwstate); static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc); static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode); static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode); static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc); static const struct bfa_ioc_hwif nw_hwif_ct = { .ioc_pll_init = bfa_ioc_ct_pll_init, .ioc_firmware_lock = bfa_ioc_ct_firmware_lock, .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock, .ioc_reg_init = bfa_ioc_ct_reg_init, .ioc_map_port = bfa_ioc_ct_map_port, .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set, .ioc_notify_fail = bfa_ioc_ct_notify_fail, .ioc_ownership_reset = bfa_ioc_ct_ownership_reset, .ioc_sync_start = bfa_ioc_ct_sync_start, .ioc_sync_join = bfa_ioc_ct_sync_join, .ioc_sync_leave = bfa_ioc_ct_sync_leave, .ioc_sync_ack = bfa_ioc_ct_sync_ack, .ioc_sync_complete = bfa_ioc_ct_sync_complete, .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate, .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate, .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate, .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate, }; static const struct bfa_ioc_hwif nw_hwif_ct2 = { .ioc_pll_init = bfa_ioc_ct2_pll_init, .ioc_firmware_lock = bfa_ioc_ct_firmware_lock, .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock, .ioc_reg_init = bfa_ioc_ct2_reg_init, .ioc_map_port = bfa_ioc_ct2_map_port, .ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat, .ioc_isr_mode_set = NULL, .ioc_notify_fail = bfa_ioc_ct_notify_fail, .ioc_ownership_reset = bfa_ioc_ct_ownership_reset, .ioc_sync_start = bfa_ioc_ct_sync_start, .ioc_sync_join = bfa_ioc_ct_sync_join, .ioc_sync_leave = bfa_ioc_ct_sync_leave, .ioc_sync_ack = bfa_ioc_ct_sync_ack, .ioc_sync_complete = bfa_ioc_ct_sync_complete, .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate, .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate, .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate, .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate, }; /* Called from bfa_ioc_attach() to map asic specific calls. */ void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) { ioc->ioc_hwif = &nw_hwif_ct; } void bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc) { ioc->ioc_hwif = &nw_hwif_ct2; } /* Return true if firmware of current driver matches the running firmware. */ static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) { enum bfi_ioc_state ioc_fwstate; u32 usecnt; struct bfi_ioc_image_hdr fwhdr; /** * If bios boot (flash based) -- do not increment usage count */ if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < BFA_IOC_FWIMG_MINSZ) return true; bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); usecnt = readl(ioc->ioc_regs.ioc_usage_reg); /** * If usage count is 0, always return TRUE. */ if (usecnt == 0) { writel(1, ioc->ioc_regs.ioc_usage_reg); bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); writel(0, ioc->ioc_regs.ioc_fail_sync); return true; } ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); /** * Use count cannot be non-zero and chip in uninitialized state. */ BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT)); /** * Check if another driver with a different firmware is active */ bfa_nw_ioc_fwver_get(ioc, &fwhdr); if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) { bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); return false; } /** * Same firmware version. Increment the reference count. */ usecnt++; writel(usecnt, ioc->ioc_regs.ioc_usage_reg); bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); return true; } static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc) { u32 usecnt; /** * If bios boot (flash based) -- do not decrement usage count */ if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < BFA_IOC_FWIMG_MINSZ) return; /** * decrement usage count */ bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); usecnt = readl(ioc->ioc_regs.ioc_usage_reg); BUG_ON(!(usecnt > 0)); usecnt--; writel(usecnt, ioc->ioc_regs.ioc_usage_reg); bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); } /* Notify other functions on HB failure. */ static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) { writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); /* Wait for halt to take effect */ readl(ioc->ioc_regs.ll_halt); readl(ioc->ioc_regs.alt_ll_halt); } /* Host to LPU mailbox message addresses */ static const struct { u32 hfn_mbox; u32 lpu_mbox; u32 hfn_pgn; } ct_fnreg[] = { { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } }; /* Host <-> LPU mailbox command/status registers - port 0 */ static const struct { u32 hfn; u32 lpu; } ct_p0reg[] = { { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT }, { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT }, { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT } }; /* Host <-> LPU mailbox command/status registers - port 1 */ static const struct { u32 hfn; u32 lpu; } ct_p1reg[] = { { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT }, { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }, { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT }, { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT } }; static const struct { u32 hfn_mbox; u32 lpu_mbox; u32 hfn_pgn; u32 hfn; u32 lpu; u32 lpu_read; } ct2_reg[] = { { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT, CT2_HOSTFN_LPU0_READ_STAT}, { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT, CT2_HOSTFN_LPU1_READ_STAT}, }; static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) { void __iomem *rb; int pcifn = bfa_ioc_pcifn(ioc); rb = bfa_ioc_bar0(ioc); ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; if (ioc->port_id == 0) { ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; } else { ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG; ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG; ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; } /* * PSS control registers */ ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG; ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG; /* * IOC semaphore registers and serialization */ ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG; ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG; ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG; ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT; ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC; /** * sram memory access */ ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; /* * err set reg : for notification of hb failure in fcmode */ ioc->ioc_regs.err_set = (rb + ERR_SET_REG); } static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc) { void __iomem *rb; int port = bfa_ioc_portid(ioc); rb = bfa_ioc_bar0(ioc); ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; if (port == 0) { ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; } else { ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG; ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; } /* * PSS control registers */ ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG; ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG; /* * IOC semaphore registers and serialization */ ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG; ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG; ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG; ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT; ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC; /** * sram memory access */ ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; /* * err set reg : for notification of hb failure in fcmode */ ioc->ioc_regs.err_set = rb + ERR_SET_REG; } /* Initialize IOC to port mapping. */ #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc) { void __iomem *rb = ioc->pcidev.pci_bar_kva; u32 r32; /** * For catapult, base port id on personality register and IOC type */ r32 = readl(rb + FNC_PERS_REG); r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; } static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc) { void __iomem *rb = ioc->pcidev.pci_bar_kva; u32 r32; r32 = readl(rb + CT2_HOSTFN_PERSONALITY0); ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); } /* Set interrupt mode for a function: INTX or MSIX */ static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) { void __iomem *rb = ioc->pcidev.pci_bar_kva; u32 r32, mode; r32 = readl(rb + FNC_PERS_REG); mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & __F0_INTX_STATUS; /** * If already in desired mode, do not change anything */ if ((!msix && mode) || (msix && !mode)) return; if (msix) mode = __F0_INTX_STATUS_MSIX; else mode = __F0_INTX_STATUS_INTA; r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); writel(r32, rb + FNC_PERS_REG); } static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc) { u32 r32; r32 = readl(ioc->ioc_regs.lpu_read_stat); if (r32) { writel(1, ioc->ioc_regs.lpu_read_stat); return true; } return false; } /* MSI-X resource allocation for 1860 with no asic block */ #define HOSTFN_MSIX_DEFAULT 64 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138 #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c #define __MSIX_VT_NUMVT__MK 0x003ff800 #define __MSIX_VT_NUMVT__SH 11 #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH) #define __MSIX_VT_OFST_ 0x000007ff void bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc) { void __iomem *rb = ioc->pcidev.pci_bar_kva; u32 r32; r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT); if (r32 & __MSIX_VT_NUMVT__MK) { writel(r32 & __MSIX_VT_OFST_, rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); return; } writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) | HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), rb + HOSTFN_MSIX_VT_OFST_NUMVT); writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); } /* Cleanup hw semaphore and usecnt registers */ static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) { bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); writel(0, ioc->ioc_regs.ioc_usage_reg); bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); /* * Read the hw sem reg to make sure that it is locked * before we clear it. If it is not locked, writing 1 * will lock it instead of clearing it. */ readl(ioc->ioc_regs.ioc_sem_reg); bfa_nw_ioc_hw_sem_release(ioc); } /* Synchronized IOC failure processing routines */ static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) { u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); /* * Driver load time. If the sync required bit for this PCI fn * is set, it is due to an unclean exit by the driver for this * PCI fn in the previous incarnation. Whoever comes here first * should clean it up, no matter which PCI fn. */ if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) { writel(0, ioc->ioc_regs.ioc_fail_sync); writel(1, ioc->ioc_regs.ioc_usage_reg); writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); return true; } return bfa_ioc_ct_sync_complete(ioc); } /* Synchronized IOC failure processing routines */ static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) { u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); } static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc) { u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | bfa_ioc_ct_sync_pos(ioc); writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); } static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc) { u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); writel(r32 | bfa_ioc_ct_sync_pos(ioc), ioc->ioc_regs.ioc_fail_sync); } static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc) { u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32); u32 tmp_ackd; if (sync_ackd == 0) return true; /** * The check below is to see whether any other PCI fn * has reinitialized the ASIC (reset sync_ackd bits) * and failed again while this IOC was waiting for hw * semaphore (in bfa_iocpf_sm_semwait()). */ tmp_ackd = sync_ackd; if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) sync_ackd |= bfa_ioc_ct_sync_pos(ioc); if (sync_reqd == sync_ackd) { writel(bfa_ioc_ct_clear_sync_ackd(r32), ioc->ioc_regs.ioc_fail_sync); writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); return true; } /** * If another PCI fn reinitialized and failed again while * this IOC was waiting for hw sem, the sync_ackd bit for * this IOC need to be set again to allow reinitialization. */ if (tmp_ackd != sync_ackd) writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); return false; } static void bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc, enum bfi_ioc_state fwstate) { writel(fwstate, ioc->ioc_regs.ioc_fwstate); } static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc) { return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate); } static void bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc, enum bfi_ioc_state fwstate) { writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate); } static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc) { return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate); } static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) { u32 pll_sclk, pll_fclk, r32; bool fcmode = (asic_mode == BFI_ASIC_MODE_FC); pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) | __APP_PLL_SCLK_JITLMT0_1(3U) | __APP_PLL_SCLK_CNTLMT0_1(1U); pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | __APP_PLL_LCLK_JITLMT0_1(3U) | __APP_PLL_LCLK_CNTLMT0_1(1U); if (fcmode) { writel(0, (rb + OP_MODE)); writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 | __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG)); } else { writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE)); writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG)); } writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG); writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG); writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG); writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG); readl(rb + HOSTFN0_INT_MSK); udelay(2000); writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG); writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG); if (!fcmode) { writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); } r32 = readl(rb + PSS_CTL_REG); r32 &= ~__PSS_LMEM_RESET; writel(r32, (rb + PSS_CTL_REG)); udelay(1000); if (!fcmode) { writel(0, (rb + PMM_1T_RESET_REG_P0)); writel(0, (rb + PMM_1T_RESET_REG_P1)); } writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG)); udelay(1000); r32 = readl(rb + MBIST_STAT_REG); writel(0, (rb + MBIST_CTL_REG)); return BFA_STATUS_OK; } static void bfa_ioc_ct2_sclk_init(void __iomem *rb) { u32 r32; /* * put s_clk PLL and PLL FSM in reset */ r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN); r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS | __APP_PLL_SCLK_LOGIC_SOFT_RESET); writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); /* * Ignore mode and program for the max clock (which is FC16) * Firmware/NFC will do the PLL init appropriately */ r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); /* * while doing PLL init dont clock gate ethernet subsystem */ r32 = readl(rb + CT2_CHIP_MISC_PRG); writel(r32 | __ETH_CLK_ENABLE_PORT0, rb + CT2_CHIP_MISC_PRG); r32 = readl(rb + CT2_PCIE_MISC_REG); writel(r32 | __ETH_CLK_ENABLE_PORT1, rb + CT2_PCIE_MISC_REG); /* * set sclk value */ r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); writel(r32 | 0x1061731b, rb + CT2_APP_PLL_SCLK_CTL_REG); /* * poll for s_clk lock or delay 1ms */ udelay(1000); /* * Dont do clock gating for ethernet subsystem, firmware/NFC will * do this appropriately */ } static void bfa_ioc_ct2_lclk_init(void __iomem *rb) { u32 r32; /* * put l_clk PLL and PLL FSM in reset */ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN); r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS | __APP_PLL_LCLK_LOGIC_SOFT_RESET); writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG); /* * set LPU speed (set for FC16 which will work for other modes) */ r32 = readl(rb + CT2_CHIP_MISC_PRG); writel(r32, (rb + CT2_CHIP_MISC_PRG)); /* * set LPU half speed (set for FC16 which will work for other modes) */ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG); /* * set lclk for mode (set for FC16) */ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED); r32 |= 0x20c1731b; writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); /* * poll for s_clk lock or delay 1ms */ udelay(1000); } static void bfa_ioc_ct2_mem_init(void __iomem *rb) { u32 r32; r32 = readl(rb + PSS_CTL_REG); r32 &= ~__PSS_LMEM_RESET; writel(r32, rb + PSS_CTL_REG); udelay(1000); writel(__EDRAM_BISTR_START, rb + CT2_MBIST_CTL_REG); udelay(1000); writel(0, rb + CT2_MBIST_CTL_REG); } static void bfa_ioc_ct2_mac_reset(void __iomem *rb) { volatile u32 r32; bfa_ioc_ct2_sclk_init(rb); bfa_ioc_ct2_lclk_init(rb); /* * release soft reset on s_clk & l_clk */ r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + CT2_APP_PLL_SCLK_CTL_REG); /* * release soft reset on s_clk & l_clk */ r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + CT2_APP_PLL_LCLK_CTL_REG); /* put port0, port1 MAC & AHB in reset */ writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET, rb + CT2_CSI_MAC_CONTROL_REG(0)); writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET, rb + CT2_CSI_MAC_CONTROL_REG(1)); } #define CT2_NFC_MAX_DELAY 1000 #define CT2_NFC_VER_VALID 0x143 #define BFA_IOC_PLL_POLL 1000000 static bool bfa_ioc_ct2_nfc_halted(void __iomem *rb) { volatile u32 r32; r32 = readl(rb + CT2_NFC_CSR_SET_REG); if (r32 & __NFC_CONTROLLER_HALTED) return true; return false; } static void bfa_ioc_ct2_nfc_resume(void __iomem *rb) { volatile u32 r32; int i; writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG); for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { r32 = readl(rb + CT2_NFC_CSR_SET_REG); if (!(r32 & __NFC_CONTROLLER_HALTED)) return; udelay(1000); } BUG_ON(1); } static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) { volatile u32 wgn, r32; u32 nfc_ver, i; wgn = readl(rb + CT2_WGN_STATUS); nfc_ver = readl(rb + CT2_RSC_GPR15_REG); if (wgn == (__A2T_AHB_LOAD | __WGN_READY) && nfc_ver >= CT2_NFC_VER_VALID) { if (bfa_ioc_ct2_nfc_halted(rb)) bfa_ioc_ct2_nfc_resume(rb); writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG); for (i = 0; i < BFA_IOC_PLL_POLL; i++) { r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS) break; } BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)); for (i = 0; i < BFA_IOC_PLL_POLL; i++) { r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)) break; } BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); udelay(1000); r32 = readl(rb + CT2_CSI_FW_CTL_REG); BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); } else { writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG)); for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { r32 = readl(rb + CT2_NFC_CSR_SET_REG); if (r32 & __NFC_CONTROLLER_HALTED) break; udelay(1000); } bfa_ioc_ct2_mac_reset(rb); bfa_ioc_ct2_sclk_init(rb); bfa_ioc_ct2_lclk_init(rb); /* release soft reset on s_clk & l_clk */ r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + CT2_APP_PLL_SCLK_CTL_REG); r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + CT2_APP_PLL_LCLK_CTL_REG); } /* Announce flash device presence, if flash was corrupted. */ if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { r32 = readl(rb + PSS_GPIO_OUT_REG); writel(r32 & ~1, rb + PSS_GPIO_OUT_REG); r32 = readl(rb + PSS_GPIO_OE_REG); writel(r32 | 1, rb + PSS_GPIO_OE_REG); } /* * Mask the interrupts and clear any * pending interrupts left by BIOS/EFI */ writel(1, rb + CT2_LPU0_HOSTFN_MBOX0_MSK); writel(1, rb + CT2_LPU1_HOSTFN_MBOX0_MSK); /* For first time initialization, no need to clear interrupts */ r32 = readl(rb + HOST_SEM5_REG); if (r32 & 0x1) { r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT); if (r32 == 1) { writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT); readl(rb + CT2_LPU0_HOSTFN_CMD_STAT); } r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); if (r32 == 1) { writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT); readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); } } bfa_ioc_ct2_mem_init(rb); writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG); writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG); return BFA_STATUS_OK; }
gpl-2.0
gospo/net-next
sound/soc/soc-devres.c
1373
4026
/* * soc-devres.c -- ALSA SoC Audio Layer devres functions * * Copyright (C) 2013 Linaro Ltd * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <sound/soc.h> #include <sound/dmaengine_pcm.h> static void devm_component_release(struct device *dev, void *res) { snd_soc_unregister_component(*(struct device **)res); } /** * devm_snd_soc_register_component - resource managed component registration * @dev: Device used to manage component * @cmpnt_drv: Component driver * @dai_drv: DAI driver * @num_dai: Number of DAIs to register * * Register a component with automatic unregistration when the device is * unregistered. */ int devm_snd_soc_register_component(struct device *dev, const struct snd_soc_component_driver *cmpnt_drv, struct snd_soc_dai_driver *dai_drv, int num_dai) { struct device **ptr; int ret; ptr = devres_alloc(devm_component_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = snd_soc_register_component(dev, cmpnt_drv, dai_drv, num_dai); if (ret == 0) { *ptr = dev; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; } EXPORT_SYMBOL_GPL(devm_snd_soc_register_component); static void devm_platform_release(struct device *dev, void *res) { snd_soc_unregister_platform(*(struct device **)res); } /** * devm_snd_soc_register_platform - resource managed platform registration * @dev: Device used to manage platform * @platform_drv: platform to register * * Register a platform driver with automatic unregistration when the device is * unregistered. */ int devm_snd_soc_register_platform(struct device *dev, const struct snd_soc_platform_driver *platform_drv) { struct device **ptr; int ret; ptr = devres_alloc(devm_platform_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = snd_soc_register_platform(dev, platform_drv); if (ret == 0) { *ptr = dev; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; } EXPORT_SYMBOL_GPL(devm_snd_soc_register_platform); static void devm_card_release(struct device *dev, void *res) { snd_soc_unregister_card(*(struct snd_soc_card **)res); } /** * devm_snd_soc_register_card - resource managed card registration * @dev: Device used to manage card * @card: Card to register * * Register a card with automatic unregistration when the device is * unregistered. */ int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card) { struct snd_soc_card **ptr; int ret; ptr = devres_alloc(devm_card_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = snd_soc_register_card(card); if (ret == 0) { *ptr = card; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; } EXPORT_SYMBOL_GPL(devm_snd_soc_register_card); #ifdef CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM static void devm_dmaengine_pcm_release(struct device *dev, void *res) { snd_dmaengine_pcm_unregister(*(struct device **)res); } /** * devm_snd_dmaengine_pcm_register - resource managed dmaengine PCM registration * @dev: The parent device for the PCM device * @config: Platform specific PCM configuration * @flags: Platform specific quirks * * Register a dmaengine based PCM device with automatic unregistration when the * device is unregistered. */ int devm_snd_dmaengine_pcm_register(struct device *dev, const struct snd_dmaengine_pcm_config *config, unsigned int flags) { struct device **ptr; int ret; ptr = devres_alloc(devm_dmaengine_pcm_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = snd_dmaengine_pcm_register(dev, config, flags); if (ret == 0) { *ptr = dev; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; } EXPORT_SYMBOL_GPL(devm_snd_dmaengine_pcm_register); #endif
gpl-2.0
Galaxy-J5/android_kernel_samsung_j5nlte
drivers/usb/musb/musb_host.c
1885
72655
/* * MUSB OTG driver host support * * Copyright 2005 Mentor Graphics Corporation * Copyright (C) 2005-2006 by Texas Instruments * Copyright (C) 2006-2007 Nokia Corporation * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/list.h> #include <linux/dma-mapping.h> #include "musb_core.h" #include "musb_host.h" /* MUSB HOST status 22-mar-2006 * * - There's still lots of partial code duplication for fault paths, so * they aren't handled as consistently as they need to be. * * - PIO mostly behaved when last tested. * + including ep0, with all usbtest cases 9, 10 * + usbtest 14 (ep0out) doesn't seem to run at all * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest * configurations, but otherwise double buffering passes basic tests. * + for 2.6.N, for N > ~10, needs API changes for hcd framework. * * - DMA (CPPI) ... partially behaves, not currently recommended * + about 1/15 the speed of typical EHCI implementations (PCI) * + RX, all too often reqpkt seems to misbehave after tx * + TX, no known issues (other than evident silicon issue) * * - DMA (Mentor/OMAP) ...has at least toggle update problems * * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet * starvation ... nothing yet for TX, interrupt, or bulk. * * - Not tested with HNP, but some SRP paths seem to behave. * * NOTE 24-August-2006: * * - Bulk traffic finally uses both sides of hardware ep1, freeing up an * extra endpoint for periodic use enabling hub + keybd + mouse. That * mostly works, except that with "usbnet" it's easy to trigger cases * with "ping" where RX loses. (a) ping to davinci, even "ping -f", * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses * although ARP RX wins. (That test was done with a full speed link.) */ /* * NOTE on endpoint usage: * * CONTROL transfers all go through ep0. BULK ones go through dedicated IN * and OUT endpoints ... hardware is dedicated for those "async" queue(s). * (Yes, bulk _could_ use more of the endpoints than that, and would even * benefit from it.) * * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. * So far that scheduling is both dumb and optimistic: the endpoint will be * "claimed" until its software queue is no longer refilled. No multiplexing * of transfers between endpoints, or anything clever. */ static void musb_ep_program(struct musb *musb, u8 epnum, struct urb *urb, int is_out, u8 *buf, u32 offset, u32 len); /* * Clear TX fifo. Needed to avoid BABBLE errors. */ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) { struct musb *musb = ep->musb; void __iomem *epio = ep->regs; u16 csr; u16 lastcsr = 0; int retries = 1000; csr = musb_readw(epio, MUSB_TXCSR); while (csr & MUSB_TXCSR_FIFONOTEMPTY) { if (csr != lastcsr) dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr); lastcsr = csr; csr |= MUSB_TXCSR_FLUSHFIFO; musb_writew(epio, MUSB_TXCSR, csr); csr = musb_readw(epio, MUSB_TXCSR); if (WARN(retries-- < 1, "Could not flush host TX%d fifo: csr: %04x\n", ep->epnum, csr)) return; mdelay(1); } } static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep) { void __iomem *epio = ep->regs; u16 csr; int retries = 5; /* scrub any data left in the fifo */ do { csr = musb_readw(epio, MUSB_TXCSR); if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY))) break; musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO); csr = musb_readw(epio, MUSB_TXCSR); udelay(10); } while (--retries); WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n", ep->epnum, csr); /* and reset for the next transfer */ musb_writew(epio, MUSB_TXCSR, 0); } /* * Start transmit. Caller is responsible for locking shared resources. * musb must be locked. */ static inline void musb_h_tx_start(struct musb_hw_ep *ep) { u16 txcsr; /* NOTE: no locks here; caller should lock and select EP */ if (ep->epnum) { txcsr = musb_readw(ep->regs, MUSB_TXCSR); txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; musb_writew(ep->regs, MUSB_TXCSR, txcsr); } else { txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; musb_writew(ep->regs, MUSB_CSR0, txcsr); } } static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep) { u16 txcsr; /* NOTE: no locks here; caller should lock and select EP */ txcsr = musb_readw(ep->regs, MUSB_TXCSR); txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; if (is_cppi_enabled()) txcsr |= MUSB_TXCSR_DMAMODE; musb_writew(ep->regs, MUSB_TXCSR, txcsr); } static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh) { if (is_in != 0 || ep->is_shared_fifo) ep->in_qh = qh; if (is_in == 0 || ep->is_shared_fifo) ep->out_qh = qh; } static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in) { return is_in ? ep->in_qh : ep->out_qh; } /* * Start the URB at the front of an endpoint's queue * end must be claimed from the caller. * * Context: controller locked, irqs blocked */ static void musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) { u16 frame; u32 len; void __iomem *mbase = musb->mregs; struct urb *urb = next_urb(qh); void *buf = urb->transfer_buffer; u32 offset = 0; struct musb_hw_ep *hw_ep = qh->hw_ep; unsigned pipe = urb->pipe; u8 address = usb_pipedevice(pipe); int epnum = hw_ep->epnum; /* initialize software qh state */ qh->offset = 0; qh->segsize = 0; /* gather right source of data */ switch (qh->type) { case USB_ENDPOINT_XFER_CONTROL: /* control transfers always start with SETUP */ is_in = 0; musb->ep0_stage = MUSB_EP0_START; buf = urb->setup_packet; len = 8; break; case USB_ENDPOINT_XFER_ISOC: qh->iso_idx = 0; qh->frame = 0; offset = urb->iso_frame_desc[0].offset; len = urb->iso_frame_desc[0].length; break; default: /* bulk, interrupt */ /* actual_length may be nonzero on retry paths */ buf = urb->transfer_buffer + urb->actual_length; len = urb->transfer_buffer_length - urb->actual_length; } dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", qh, urb, address, qh->epnum, is_in ? "in" : "out", ({char *s; switch (qh->type) { case USB_ENDPOINT_XFER_CONTROL: s = ""; break; case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break; case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; default: s = "-intr"; break; }; s; }), epnum, buf + offset, len); /* Configure endpoint */ musb_ep_set_qh(hw_ep, is_in, qh); musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len); /* transmit may have more work: start it when it is time */ if (is_in) return; /* determine if the time is right for a periodic transfer */ switch (qh->type) { case USB_ENDPOINT_XFER_ISOC: case USB_ENDPOINT_XFER_INT: dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n"); frame = musb_readw(mbase, MUSB_FRAME); /* FIXME this doesn't implement that scheduling policy ... * or handle framecounter wrapping */ if ((urb->transfer_flags & URB_ISO_ASAP) || (frame >= urb->start_frame)) { /* REVISIT the SOF irq handler shouldn't duplicate * this code; and we don't init urb->start_frame... */ qh->frame = 0; goto start; } else { qh->frame = urb->start_frame; /* enable SOF interrupt so we can count down */ dev_dbg(musb->controller, "SOF for %d\n", epnum); #if 1 /* ifndef CONFIG_ARCH_DAVINCI */ musb_writeb(mbase, MUSB_INTRUSBE, 0xff); #endif } break; default: start: dev_dbg(musb->controller, "Start TX%d %s\n", epnum, hw_ep->tx_channel ? "dma" : "pio"); if (!hw_ep->tx_channel) musb_h_tx_start(hw_ep); else if (is_cppi_enabled() || tusb_dma_omap()) musb_h_tx_dma_start(hw_ep); } } /* Context: caller owns controller lock, IRQs are blocked */ static void musb_giveback(struct musb *musb, struct urb *urb, int status) __releases(musb->lock) __acquires(musb->lock) { dev_dbg(musb->controller, "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n", urb, urb->complete, status, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), usb_pipein(urb->pipe) ? "in" : "out", urb->actual_length, urb->transfer_buffer_length ); usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb); spin_unlock(&musb->lock); usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status); spin_lock(&musb->lock); } /* For bulk/interrupt endpoints only */ static inline void musb_save_toggle(struct musb_qh *qh, int is_in, struct urb *urb) { void __iomem *epio = qh->hw_ep->regs; u16 csr; /* * FIXME: the current Mentor DMA code seems to have * problems getting toggle correct. */ if (is_in) csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE; else csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE; usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0); } /* * Advance this hardware endpoint's queue, completing the specified URB and * advancing to either the next URB queued to that qh, or else invalidating * that qh and advancing to the next qh scheduled after the current one. * * Context: caller owns controller lock, IRQs are blocked */ static void musb_advance_schedule(struct musb *musb, struct urb *urb, struct musb_hw_ep *hw_ep, int is_in) { struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in); struct musb_hw_ep *ep = qh->hw_ep; int ready = qh->is_ready; int status; status = (urb->status == -EINPROGRESS) ? 0 : urb->status; /* save toggle eagerly, for paranoia */ switch (qh->type) { case USB_ENDPOINT_XFER_BULK: case USB_ENDPOINT_XFER_INT: musb_save_toggle(qh, is_in, urb); break; case USB_ENDPOINT_XFER_ISOC: if (status == 0 && urb->error_count) status = -EXDEV; break; } qh->is_ready = 0; musb_giveback(musb, urb, status); qh->is_ready = ready; /* reclaim resources (and bandwidth) ASAP; deschedule it, and * invalidate qh as soon as list_empty(&hep->urb_list) */ if (list_empty(&qh->hep->urb_list)) { struct list_head *head; struct dma_controller *dma = musb->dma_controller; if (is_in) { ep->rx_reinit = 1; if (ep->rx_channel) { dma->channel_release(ep->rx_channel); ep->rx_channel = NULL; } } else { ep->tx_reinit = 1; if (ep->tx_channel) { dma->channel_release(ep->tx_channel); ep->tx_channel = NULL; } } /* Clobber old pointers to this qh */ musb_ep_set_qh(ep, is_in, NULL); qh->hep->hcpriv = NULL; switch (qh->type) { case USB_ENDPOINT_XFER_CONTROL: case USB_ENDPOINT_XFER_BULK: /* fifo policy for these lists, except that NAKing * should rotate a qh to the end (for fairness). */ if (qh->mux == 1) { head = qh->ring.prev; list_del(&qh->ring); kfree(qh); qh = first_qh(head); break; } case USB_ENDPOINT_XFER_ISOC: case USB_ENDPOINT_XFER_INT: /* this is where periodic bandwidth should be * de-allocated if it's tracked and allocated; * and where we'd update the schedule tree... */ kfree(qh); qh = NULL; break; } } if (qh != NULL && qh->is_ready) { dev_dbg(musb->controller, "... next ep%d %cX urb %p\n", hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); musb_start_urb(musb, is_in, qh); } } static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) { /* we don't want fifo to fill itself again; * ignore dma (various models), * leave toggle alone (may not have been saved yet) */ csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_AUTOREQ | MUSB_RXCSR_AUTOCLEAR); /* write 2x to allow double buffering */ musb_writew(hw_ep->regs, MUSB_RXCSR, csr); musb_writew(hw_ep->regs, MUSB_RXCSR, csr); /* flush writebuffer */ return musb_readw(hw_ep->regs, MUSB_RXCSR); } /* * PIO RX for a packet (or part of it). */ static bool musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) { u16 rx_count; u8 *buf; u16 csr; bool done = false; u32 length; int do_flush = 0; struct musb_hw_ep *hw_ep = musb->endpoints + epnum; void __iomem *epio = hw_ep->regs; struct musb_qh *qh = hw_ep->in_qh; int pipe = urb->pipe; void *buffer = urb->transfer_buffer; /* musb_ep_select(mbase, epnum); */ rx_count = musb_readw(epio, MUSB_RXCOUNT); dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, urb->transfer_buffer, qh->offset, urb->transfer_buffer_length); /* unload FIFO */ if (usb_pipeisoc(pipe)) { int status = 0; struct usb_iso_packet_descriptor *d; if (iso_err) { status = -EILSEQ; urb->error_count++; } d = urb->iso_frame_desc + qh->iso_idx; buf = buffer + d->offset; length = d->length; if (rx_count > length) { if (status == 0) { status = -EOVERFLOW; urb->error_count++; } dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length); do_flush = 1; } else length = rx_count; urb->actual_length += length; d->actual_length = length; d->status = status; /* see if we are done */ done = (++qh->iso_idx >= urb->number_of_packets); } else { /* non-isoch */ buf = buffer + qh->offset; length = urb->transfer_buffer_length - qh->offset; if (rx_count > length) { if (urb->status == -EINPROGRESS) urb->status = -EOVERFLOW; dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length); do_flush = 1; } else length = rx_count; urb->actual_length += length; qh->offset += length; /* see if we are done */ done = (urb->actual_length == urb->transfer_buffer_length) || (rx_count < qh->maxpacket) || (urb->status != -EINPROGRESS); if (done && (urb->status == -EINPROGRESS) && (urb->transfer_flags & URB_SHORT_NOT_OK) && (urb->actual_length < urb->transfer_buffer_length)) urb->status = -EREMOTEIO; } musb_read_fifo(hw_ep, length, buf); csr = musb_readw(epio, MUSB_RXCSR); csr |= MUSB_RXCSR_H_WZC_BITS; if (unlikely(do_flush)) musb_h_flush_rxfifo(hw_ep, csr); else { /* REVISIT this assumes AUTOCLEAR is never set */ csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); if (!done) csr |= MUSB_RXCSR_H_REQPKT; musb_writew(epio, MUSB_RXCSR, csr); } return done; } /* we don't always need to reinit a given side of an endpoint... * when we do, use tx/rx reinit routine and then construct a new CSR * to address data toggle, NYET, and DMA or PIO. * * it's possible that driver bugs (especially for DMA) or aborting a * transfer might have left the endpoint busier than it should be. * the busy/not-empty tests are basically paranoia. */ static void musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) { u16 csr; /* NOTE: we know the "rx" fifo reinit never triggers for ep0. * That always uses tx_reinit since ep0 repurposes TX register * offsets; the initial SETUP packet is also a kind of OUT. */ /* if programmed for Tx, put it in RX mode */ if (ep->is_shared_fifo) { csr = musb_readw(ep->regs, MUSB_TXCSR); if (csr & MUSB_TXCSR_MODE) { musb_h_tx_flush_fifo(ep); csr = musb_readw(ep->regs, MUSB_TXCSR); musb_writew(ep->regs, MUSB_TXCSR, csr | MUSB_TXCSR_FRCDATATOG); } /* * Clear the MODE bit (and everything else) to enable Rx. * NOTE: we mustn't clear the DMAMODE bit before DMAENAB. */ if (csr & MUSB_TXCSR_DMAMODE) musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE); musb_writew(ep->regs, MUSB_TXCSR, 0); /* scrub all previous state, clearing toggle */ } else { csr = musb_readw(ep->regs, MUSB_RXCSR); if (csr & MUSB_RXCSR_RXPKTRDY) WARNING("rx%d, packet/%d ready?\n", ep->epnum, musb_readw(ep->regs, MUSB_RXCOUNT)); musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); } /* target addr and (for multipoint) hub addr/port */ if (musb->is_multipoint) { musb_write_rxfunaddr(ep->target_regs, qh->addr_reg); musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg); musb_write_rxhubport(ep->target_regs, qh->h_port_reg); } else musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); /* protocol/endpoint, interval/NAKlimit, i/o size */ musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); /* NOTE: bulk combining rewrites high bits of maxpacket */ /* Set RXMAXP with the FIFO size of the endpoint * to disable double buffer mode. */ if (musb->double_buffer_not_ok) musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); else musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket | ((qh->hb_mult - 1) << 11)); ep->rx_reinit = 0; } static bool musb_tx_dma_program(struct dma_controller *dma, struct musb_hw_ep *hw_ep, struct musb_qh *qh, struct urb *urb, u32 offset, u32 length) { struct dma_channel *channel = hw_ep->tx_channel; void __iomem *epio = hw_ep->regs; u16 pkt_size = qh->maxpacket; u16 csr; u8 mode; #ifdef CONFIG_USB_INVENTRA_DMA if (length > channel->max_len) length = channel->max_len; csr = musb_readw(epio, MUSB_TXCSR); if (length > pkt_size) { mode = 1; csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB; /* autoset shouldn't be set in high bandwidth */ /* * Enable Autoset according to table * below * bulk_split hb_mult Autoset_Enable * 0 1 Yes(Normal) * 0 >1 No(High BW ISO) * 1 1 Yes(HS bulk) * 1 >1 Yes(FS bulk) */ if (qh->hb_mult == 1 || (qh->hb_mult > 1 && can_bulk_split(hw_ep->musb, qh->type))) csr |= MUSB_TXCSR_AUTOSET; } else { mode = 0; csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ } channel->desired_mode = mode; musb_writew(epio, MUSB_TXCSR, csr); #else if (!is_cppi_enabled() && !tusb_dma_omap()) return false; channel->actual_len = 0; /* * TX uses "RNDIS" mode automatically but needs help * to identify the zero-length-final-packet case. */ mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; #endif qh->segsize = length; /* * Ensure the data reaches to main memory before starting * DMA transfer */ wmb(); if (!dma->channel_program(channel, pkt_size, mode, urb->transfer_dma + offset, length)) { dma->channel_release(channel); hw_ep->tx_channel = NULL; csr = musb_readw(epio, MUSB_TXCSR); csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS); return false; } return true; } /* * Program an HDRC endpoint as per the given URB * Context: irqs blocked, controller lock held */ static void musb_ep_program(struct musb *musb, u8 epnum, struct urb *urb, int is_out, u8 *buf, u32 offset, u32 len) { struct dma_controller *dma_controller; struct dma_channel *dma_channel; u8 dma_ok; void __iomem *mbase = musb->mregs; struct musb_hw_ep *hw_ep = musb->endpoints + epnum; void __iomem *epio = hw_ep->regs; struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out); u16 packet_sz = qh->maxpacket; u8 use_dma = 1; u16 csr; dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s " "h_addr%02x h_port%02x bytes %d\n", is_out ? "-->" : "<--", epnum, urb, urb->dev->speed, qh->addr_reg, qh->epnum, is_out ? "out" : "in", qh->h_addr_reg, qh->h_port_reg, len); musb_ep_select(mbase, epnum); if (is_out && !len) { use_dma = 0; csr = musb_readw(epio, MUSB_TXCSR); csr &= ~MUSB_TXCSR_DMAENAB; musb_writew(epio, MUSB_TXCSR, csr); hw_ep->tx_channel = NULL; } /* candidate for DMA? */ dma_controller = musb->dma_controller; if (use_dma && is_dma_capable() && epnum && dma_controller) { dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; if (!dma_channel) { dma_channel = dma_controller->channel_alloc( dma_controller, hw_ep, is_out); if (is_out) hw_ep->tx_channel = dma_channel; else hw_ep->rx_channel = dma_channel; } } else dma_channel = NULL; /* make sure we clear DMAEnab, autoSet bits from previous run */ /* OUT/transmit/EP0 or IN/receive? */ if (is_out) { u16 csr; u16 int_txe; u16 load_count; csr = musb_readw(epio, MUSB_TXCSR); /* disable interrupt in case we flush */ int_txe = musb->intrtxe; musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); /* general endpoint setup */ if (epnum) { /* flush all old state, set default */ /* * We could be flushing valid * packets in double buffering * case */ if (!hw_ep->tx_double_buffered) musb_h_tx_flush_fifo(hw_ep); /* * We must not clear the DMAMODE bit before or in * the same cycle with the DMAENAB bit, so we clear * the latter first... */ csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT | MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB | MUSB_TXCSR_FRCDATATOG | MUSB_TXCSR_H_RXSTALL | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_TXPKTRDY ); csr |= MUSB_TXCSR_MODE; if (!hw_ep->tx_double_buffered) { if (usb_gettoggle(urb->dev, qh->epnum, 1)) csr |= MUSB_TXCSR_H_WR_DATATOGGLE | MUSB_TXCSR_H_DATATOGGLE; else csr |= MUSB_TXCSR_CLRDATATOG; } musb_writew(epio, MUSB_TXCSR, csr); /* REVISIT may need to clear FLUSHFIFO ... */ csr &= ~MUSB_TXCSR_DMAMODE; musb_writew(epio, MUSB_TXCSR, csr); csr = musb_readw(epio, MUSB_TXCSR); } else { /* endpoint 0: just flush */ musb_h_ep0_flush_fifo(hw_ep); } /* target addr and (for multipoint) hub addr/port */ if (musb->is_multipoint) { musb_write_txfunaddr(mbase, epnum, qh->addr_reg); musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg); musb_write_txhubport(mbase, epnum, qh->h_port_reg); /* FIXME if !epnum, do the same for RX ... */ } else musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); /* protocol/endpoint/interval/NAKlimit */ if (epnum) { musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); if (musb->double_buffer_not_ok) { musb_writew(epio, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); } else if (can_bulk_split(musb, qh->type)) { qh->hb_mult = hw_ep->max_packet_sz_tx / packet_sz; musb_writew(epio, MUSB_TXMAXP, packet_sz | ((qh->hb_mult) - 1) << 11); } else { musb_writew(epio, MUSB_TXMAXP, qh->maxpacket | ((qh->hb_mult - 1) << 11)); } musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); } else { musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); if (musb->is_multipoint) musb_writeb(epio, MUSB_TYPE0, qh->type_reg); } if (can_bulk_split(musb, qh->type)) load_count = min((u32) hw_ep->max_packet_sz_tx, len); else load_count = min((u32) packet_sz, len); if (dma_channel && musb_tx_dma_program(dma_controller, hw_ep, qh, urb, offset, len)) load_count = 0; if (load_count) { /* PIO to load FIFO */ qh->segsize = load_count; if (!buf) { sg_miter_start(&qh->sg_miter, urb->sg, 1, SG_MITER_ATOMIC | SG_MITER_FROM_SG); if (!sg_miter_next(&qh->sg_miter)) { dev_err(musb->controller, "error: sg" "list empty\n"); sg_miter_stop(&qh->sg_miter); goto finish; } buf = qh->sg_miter.addr + urb->sg->offset + urb->actual_length; load_count = min_t(u32, load_count, qh->sg_miter.length); musb_write_fifo(hw_ep, load_count, buf); qh->sg_miter.consumed = load_count; sg_miter_stop(&qh->sg_miter); } else musb_write_fifo(hw_ep, load_count, buf); } finish: /* re-enable interrupt */ musb_writew(mbase, MUSB_INTRTXE, int_txe); /* IN/receive */ } else { u16 csr; if (hw_ep->rx_reinit) { musb_rx_reinit(musb, qh, hw_ep); /* init new state: toggle and NYET, maybe DMA later */ if (usb_gettoggle(urb->dev, qh->epnum, 0)) csr = MUSB_RXCSR_H_WR_DATATOGGLE | MUSB_RXCSR_H_DATATOGGLE; else csr = 0; if (qh->type == USB_ENDPOINT_XFER_INT) csr |= MUSB_RXCSR_DISNYET; } else { csr = musb_readw(hw_ep->regs, MUSB_RXCSR); if (csr & (MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_DMAENAB | MUSB_RXCSR_H_REQPKT)) ERR("broken !rx_reinit, ep%d csr %04x\n", hw_ep->epnum, csr); /* scrub any stale state, leaving toggle alone */ csr &= MUSB_RXCSR_DISNYET; } /* kick things off */ if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { /* Candidate for DMA */ dma_channel->actual_len = 0L; qh->segsize = len; /* AUTOREQ is in a DMA register */ musb_writew(hw_ep->regs, MUSB_RXCSR, csr); csr = musb_readw(hw_ep->regs, MUSB_RXCSR); /* * Unless caller treats short RX transfers as * errors, we dare not queue multiple transfers. */ dma_ok = dma_controller->channel_program(dma_channel, packet_sz, !(urb->transfer_flags & URB_SHORT_NOT_OK), urb->transfer_dma + offset, qh->segsize); if (!dma_ok) { dma_controller->channel_release(dma_channel); hw_ep->rx_channel = dma_channel = NULL; } else csr |= MUSB_RXCSR_DMAENAB; } csr |= MUSB_RXCSR_H_REQPKT; dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr); musb_writew(hw_ep->regs, MUSB_RXCSR, csr); csr = musb_readw(hw_ep->regs, MUSB_RXCSR); } } /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to * the end; avoids starvation for other endpoints. */ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep, int is_in) { struct dma_channel *dma; struct urb *urb; void __iomem *mbase = musb->mregs; void __iomem *epio = ep->regs; struct musb_qh *cur_qh, *next_qh; u16 rx_csr, tx_csr; musb_ep_select(mbase, ep->epnum); if (is_in) { dma = is_dma_capable() ? ep->rx_channel : NULL; /* clear nak timeout bit */ rx_csr = musb_readw(epio, MUSB_RXCSR); rx_csr |= MUSB_RXCSR_H_WZC_BITS; rx_csr &= ~MUSB_RXCSR_DATAERROR; musb_writew(epio, MUSB_RXCSR, rx_csr); cur_qh = first_qh(&musb->in_bulk); } else { dma = is_dma_capable() ? ep->tx_channel : NULL; /* clear nak timeout bit */ tx_csr = musb_readw(epio, MUSB_TXCSR); tx_csr |= MUSB_TXCSR_H_WZC_BITS; tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT; musb_writew(epio, MUSB_TXCSR, tx_csr); cur_qh = first_qh(&musb->out_bulk); } if (cur_qh) { urb = next_urb(cur_qh); if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { dma->status = MUSB_DMA_STATUS_CORE_ABORT; musb->dma_controller->channel_abort(dma); urb->actual_length += dma->actual_len; dma->actual_len = 0L; } musb_save_toggle(cur_qh, is_in, urb); if (is_in) { /* move cur_qh to end of queue */ list_move_tail(&cur_qh->ring, &musb->in_bulk); /* get the next qh from musb->in_bulk */ next_qh = first_qh(&musb->in_bulk); /* set rx_reinit and schedule the next qh */ ep->rx_reinit = 1; } else { /* move cur_qh to end of queue */ list_move_tail(&cur_qh->ring, &musb->out_bulk); /* get the next qh from musb->out_bulk */ next_qh = first_qh(&musb->out_bulk); /* set tx_reinit and schedule the next qh */ ep->tx_reinit = 1; } musb_start_urb(musb, is_in, next_qh); } } /* * Service the default endpoint (ep0) as host. * Return true until it's time to start the status stage. */ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) { bool more = false; u8 *fifo_dest = NULL; u16 fifo_count = 0; struct musb_hw_ep *hw_ep = musb->control_ep; struct musb_qh *qh = hw_ep->in_qh; struct usb_ctrlrequest *request; switch (musb->ep0_stage) { case MUSB_EP0_IN: fifo_dest = urb->transfer_buffer + urb->actual_length; fifo_count = min_t(size_t, len, urb->transfer_buffer_length - urb->actual_length); if (fifo_count < len) urb->status = -EOVERFLOW; musb_read_fifo(hw_ep, fifo_count, fifo_dest); urb->actual_length += fifo_count; if (len < qh->maxpacket) { /* always terminate on short read; it's * rarely reported as an error. */ } else if (urb->actual_length < urb->transfer_buffer_length) more = true; break; case MUSB_EP0_START: request = (struct usb_ctrlrequest *) urb->setup_packet; if (!request->wLength) { dev_dbg(musb->controller, "start no-DATA\n"); break; } else if (request->bRequestType & USB_DIR_IN) { dev_dbg(musb->controller, "start IN-DATA\n"); musb->ep0_stage = MUSB_EP0_IN; more = true; break; } else { dev_dbg(musb->controller, "start OUT-DATA\n"); musb->ep0_stage = MUSB_EP0_OUT; more = true; } /* FALLTHROUGH */ case MUSB_EP0_OUT: fifo_count = min_t(size_t, qh->maxpacket, urb->transfer_buffer_length - urb->actual_length); if (fifo_count) { fifo_dest = (u8 *) (urb->transfer_buffer + urb->actual_length); dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n", fifo_count, (fifo_count == 1) ? "" : "s", fifo_dest); musb_write_fifo(hw_ep, fifo_count, fifo_dest); urb->actual_length += fifo_count; more = true; } break; default: ERR("bogus ep0 stage %d\n", musb->ep0_stage); break; } return more; } /* * Handle default endpoint interrupt as host. Only called in IRQ time * from musb_interrupt(). * * called with controller irqlocked */ irqreturn_t musb_h_ep0_irq(struct musb *musb) { struct urb *urb; u16 csr, len; int status = 0; void __iomem *mbase = musb->mregs; struct musb_hw_ep *hw_ep = musb->control_ep; void __iomem *epio = hw_ep->regs; struct musb_qh *qh = hw_ep->in_qh; bool complete = false; irqreturn_t retval = IRQ_NONE; /* ep0 only has one queue, "in" */ urb = next_urb(qh); musb_ep_select(mbase, 0); csr = musb_readw(epio, MUSB_CSR0); len = (csr & MUSB_CSR0_RXPKTRDY) ? musb_readb(epio, MUSB_COUNT0) : 0; dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", csr, qh, len, urb, musb->ep0_stage); /* if we just did status stage, we are done */ if (MUSB_EP0_STATUS == musb->ep0_stage) { retval = IRQ_HANDLED; complete = true; } /* prepare status */ if (csr & MUSB_CSR0_H_RXSTALL) { dev_dbg(musb->controller, "STALLING ENDPOINT\n"); status = -EPIPE; } else if (csr & MUSB_CSR0_H_ERROR) { dev_dbg(musb->controller, "no response, csr0 %04x\n", csr); status = -EPROTO; } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { dev_dbg(musb->controller, "control NAK timeout\n"); /* NOTE: this code path would be a good place to PAUSE a * control transfer, if another one is queued, so that * ep0 is more likely to stay busy. That's already done * for bulk RX transfers. * * if (qh->ring.next != &musb->control), then * we have a candidate... NAKing is *NOT* an error */ musb_writew(epio, MUSB_CSR0, 0); retval = IRQ_HANDLED; } if (status) { dev_dbg(musb->controller, "aborting\n"); retval = IRQ_HANDLED; if (urb) urb->status = status; complete = true; /* use the proper sequence to abort the transfer */ if (csr & MUSB_CSR0_H_REQPKT) { csr &= ~MUSB_CSR0_H_REQPKT; musb_writew(epio, MUSB_CSR0, csr); csr &= ~MUSB_CSR0_H_NAKTIMEOUT; musb_writew(epio, MUSB_CSR0, csr); } else { musb_h_ep0_flush_fifo(hw_ep); } musb_writeb(epio, MUSB_NAKLIMIT0, 0); /* clear it */ musb_writew(epio, MUSB_CSR0, 0); } if (unlikely(!urb)) { /* stop endpoint since we have no place for its data, this * SHOULD NEVER HAPPEN! */ ERR("no URB for end 0\n"); musb_h_ep0_flush_fifo(hw_ep); goto done; } if (!complete) { /* call common logic and prepare response */ if (musb_h_ep0_continue(musb, len, urb)) { /* more packets required */ csr = (MUSB_EP0_IN == musb->ep0_stage) ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; } else { /* data transfer complete; perform status phase */ if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length) csr = MUSB_CSR0_H_STATUSPKT | MUSB_CSR0_H_REQPKT; else csr = MUSB_CSR0_H_STATUSPKT | MUSB_CSR0_TXPKTRDY; /* flag status stage */ musb->ep0_stage = MUSB_EP0_STATUS; dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr); } musb_writew(epio, MUSB_CSR0, csr); retval = IRQ_HANDLED; } else musb->ep0_stage = MUSB_EP0_IDLE; /* call completion handler if done */ if (complete) musb_advance_schedule(musb, urb, hw_ep, 1); done: return retval; } #ifdef CONFIG_USB_INVENTRA_DMA /* Host side TX (OUT) using Mentor DMA works as follows: submit_urb -> - if queue was empty, Program Endpoint - ... which starts DMA to fifo in mode 1 or 0 DMA Isr (transfer complete) -> TxAvail() - Stop DMA (~DmaEnab) (<--- Alert ... currently happens only in musb_cleanup_urb) - TxPktRdy has to be set in mode 0 or for short packets in mode 1. */ #endif /* Service a Tx-Available or dma completion irq for the endpoint */ void musb_host_tx(struct musb *musb, u8 epnum) { int pipe; bool done = false; u16 tx_csr; size_t length = 0; size_t offset = 0; struct musb_hw_ep *hw_ep = musb->endpoints + epnum; void __iomem *epio = hw_ep->regs; struct musb_qh *qh = hw_ep->out_qh; struct urb *urb = next_urb(qh); u32 status = 0; void __iomem *mbase = musb->mregs; struct dma_channel *dma; bool transfer_pending = false; musb_ep_select(mbase, epnum); tx_csr = musb_readw(epio, MUSB_TXCSR); /* with CPPI, DMA sometimes triggers "extra" irqs */ if (!urb) { dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr); return; } pipe = urb->pipe; dma = is_dma_capable() ? hw_ep->tx_channel : NULL; dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, dma ? ", dma" : ""); /* check for errors */ if (tx_csr & MUSB_TXCSR_H_RXSTALL) { /* dma was disabled, fifo flushed */ dev_dbg(musb->controller, "TX end %d stall\n", epnum); /* stall; record URB status */ status = -EPIPE; } else if (tx_csr & MUSB_TXCSR_H_ERROR) { /* (NON-ISO) dma was disabled, fifo flushed */ dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum); status = -ETIMEDOUT; } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1 && !list_is_singular(&musb->out_bulk)) { dev_dbg(musb->controller, "NAK timeout on TX%d ep\n", epnum); musb_bulk_nak_timeout(musb, hw_ep, 0); } else { dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum); /* NOTE: this code path would be a good place to PAUSE a * transfer, if there's some other (nonperiodic) tx urb * that could use this fifo. (dma complicates it...) * That's already done for bulk RX transfers. * * if (bulk && qh->ring.next != &musb->out_bulk), then * we have a candidate... NAKing is *NOT* an error */ musb_ep_select(mbase, epnum); musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); } return; } done: if (status) { if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { dma->status = MUSB_DMA_STATUS_CORE_ABORT; (void) musb->dma_controller->channel_abort(dma); } /* do the proper sequence to abort the transfer in the * usb core; the dma engine should already be stopped. */ musb_h_tx_flush_fifo(hw_ep); tx_csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_H_RXSTALL | MUSB_TXCSR_H_NAKTIMEOUT ); musb_ep_select(mbase, epnum); musb_writew(epio, MUSB_TXCSR, tx_csr); /* REVISIT may need to clear FLUSHFIFO ... */ musb_writew(epio, MUSB_TXCSR, tx_csr); musb_writeb(epio, MUSB_TXINTERVAL, 0); done = true; } /* second cppi case */ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr); return; } if (is_dma_capable() && dma && !status) { /* * DMA has completed. But if we're using DMA mode 1 (multi * packet DMA), we need a terminal TXPKTRDY interrupt before * we can consider this transfer completed, lest we trash * its last packet when writing the next URB's data. So we * switch back to mode 0 to get that interrupt; we'll come * back here once it happens. */ if (tx_csr & MUSB_TXCSR_DMAMODE) { /* * We shouldn't clear DMAMODE with DMAENAB set; so * clear them in a safe order. That should be OK * once TXPKTRDY has been set (and I've never seen * it being 0 at this moment -- DMA interrupt latency * is significant) but if it hasn't been then we have * no choice but to stop being polite and ignore the * programmer's guide... :-) * * Note that we must write TXCSR with TXPKTRDY cleared * in order not to re-trigger the packet send (this bit * can't be cleared by CPU), and there's another caveat: * TXPKTRDY may be set shortly and then cleared in the * double-buffered FIFO mode, so we do an extra TXCSR * read for debouncing... */ tx_csr &= musb_readw(epio, MUSB_TXCSR); if (tx_csr & MUSB_TXCSR_TXPKTRDY) { tx_csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_TXPKTRDY); musb_writew(epio, MUSB_TXCSR, tx_csr | MUSB_TXCSR_H_WZC_BITS); } tx_csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_TXPKTRDY); musb_writew(epio, MUSB_TXCSR, tx_csr | MUSB_TXCSR_H_WZC_BITS); /* * There is no guarantee that we'll get an interrupt * after clearing DMAMODE as we might have done this * too late (after TXPKTRDY was cleared by controller). * Re-read TXCSR as we have spoiled its previous value. */ tx_csr = musb_readw(epio, MUSB_TXCSR); } /* * We may get here from a DMA completion or TXPKTRDY interrupt. * In any case, we must check the FIFO status here and bail out * only if the FIFO still has data -- that should prevent the * "missed" TXPKTRDY interrupts and deal with double-buffered * FIFO mode too... */ if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) { dev_dbg(musb->controller, "DMA complete but packet still in FIFO, " "CSR %04x\n", tx_csr); return; } } if (!status || dma || usb_pipeisoc(pipe)) { if (dma) length = dma->actual_len; else length = qh->segsize; qh->offset += length; if (usb_pipeisoc(pipe)) { struct usb_iso_packet_descriptor *d; d = urb->iso_frame_desc + qh->iso_idx; d->actual_length = length; d->status = status; if (++qh->iso_idx >= urb->number_of_packets) { done = true; } else { d++; offset = d->offset; length = d->length; } } else if (dma && urb->transfer_buffer_length == qh->offset) { done = true; } else { /* see if we need to send more data, or ZLP */ if (qh->segsize < qh->maxpacket) done = true; else if (qh->offset == urb->transfer_buffer_length && !(urb->transfer_flags & URB_ZERO_PACKET)) done = true; if (!done) { offset = qh->offset; length = urb->transfer_buffer_length - offset; transfer_pending = true; } } } /* urb->status != -EINPROGRESS means request has been faulted, * so we must abort this transfer after cleanup */ if (urb->status != -EINPROGRESS) { done = true; if (status == 0) status = urb->status; } if (done) { /* set status */ urb->status = status; urb->actual_length = qh->offset; musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); return; } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) { if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, offset, length)) { if (is_cppi_enabled() || tusb_dma_omap()) musb_h_tx_dma_start(hw_ep); return; } } else if (tx_csr & MUSB_TXCSR_DMAENAB) { dev_dbg(musb->controller, "not complete, but DMA enabled?\n"); return; } /* * PIO: start next packet in this URB. * * REVISIT: some docs say that when hw_ep->tx_double_buffered, * (and presumably, FIFO is not half-full) we should write *two* * packets before updating TXCSR; other docs disagree... */ if (length > qh->maxpacket) length = qh->maxpacket; /* Unmap the buffer so that CPU can use it */ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb); /* * We need to map sg if the transfer_buffer is * NULL. */ if (!urb->transfer_buffer) qh->use_sg = true; if (qh->use_sg) { /* sg_miter_start is already done in musb_ep_program */ if (!sg_miter_next(&qh->sg_miter)) { dev_err(musb->controller, "error: sg list empty\n"); sg_miter_stop(&qh->sg_miter); status = -EINVAL; goto done; } urb->transfer_buffer = qh->sg_miter.addr; length = min_t(u32, length, qh->sg_miter.length); musb_write_fifo(hw_ep, length, urb->transfer_buffer); qh->sg_miter.consumed = length; sg_miter_stop(&qh->sg_miter); } else { musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset); } qh->segsize = length; if (qh->use_sg) { if (offset + length >= urb->transfer_buffer_length) qh->use_sg = false; } musb_ep_select(mbase, epnum); musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); } #ifdef CONFIG_USB_INVENTRA_DMA /* Host side RX (IN) using Mentor DMA works as follows: submit_urb -> - if queue was empty, ProgramEndpoint - first IN token is sent out (by setting ReqPkt) LinuxIsr -> RxReady() /\ => first packet is received | - Set in mode 0 (DmaEnab, ~ReqPkt) | -> DMA Isr (transfer complete) -> RxReady() | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) | - if urb not complete, send next IN token (ReqPkt) | | else complete urb. | | --------------------------- * * Nuances of mode 1: * For short packets, no ack (+RxPktRdy) is sent automatically * (even if AutoClear is ON) * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent * automatically => major problem, as collecting the next packet becomes * difficult. Hence mode 1 is not used. * * REVISIT * All we care about at this driver level is that * (a) all URBs terminate with REQPKT cleared and fifo(s) empty; * (b) termination conditions are: short RX, or buffer full; * (c) fault modes include * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. * (and that endpoint's dma queue stops immediately) * - overflow (full, PLUS more bytes in the terminal packet) * * So for example, usb-storage sets URB_SHORT_NOT_OK, and would * thus be a great candidate for using mode 1 ... for all but the * last packet of one URB's transfer. */ #endif /* * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, * and high-bandwidth IN transfer cases. */ void musb_host_rx(struct musb *musb, u8 epnum) { struct urb *urb; struct musb_hw_ep *hw_ep = musb->endpoints + epnum; void __iomem *epio = hw_ep->regs; struct musb_qh *qh = hw_ep->in_qh; size_t xfer_len; void __iomem *mbase = musb->mregs; int pipe; u16 rx_csr, val; bool iso_err = false; bool done = false; u32 status; struct dma_channel *dma; unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; musb_ep_select(mbase, epnum); urb = next_urb(qh); dma = is_dma_capable() ? hw_ep->rx_channel : NULL; status = 0; xfer_len = 0; rx_csr = musb_readw(epio, MUSB_RXCSR); val = rx_csr; if (unlikely(!urb)) { /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least * usbtest #11 (unlinks) triggers it regularly, sometimes * with fifo full. (Only with DMA??) */ dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, musb_readw(epio, MUSB_RXCOUNT)); musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); return; } pipe = urb->pipe; dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", epnum, rx_csr, urb->actual_length, dma ? dma->actual_len : 0); /* check for errors, concurrent stall & unlink is not really * handled yet! */ if (rx_csr & MUSB_RXCSR_H_RXSTALL) { dev_dbg(musb->controller, "RX end %d STALL\n", epnum); /* stall; record URB status */ status = -EPIPE; } else if (rx_csr & MUSB_RXCSR_H_ERROR) { dev_dbg(musb->controller, "end %d RX proto error\n", epnum); status = -EPROTO; musb_writeb(epio, MUSB_RXINTERVAL, 0); } else if (rx_csr & MUSB_RXCSR_DATAERROR) { if (USB_ENDPOINT_XFER_ISOC != qh->type) { dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum); /* NOTE: NAKing is *NOT* an error, so we want to * continue. Except ... if there's a request for * another QH, use that instead of starving it. * * Devices like Ethernet and serial adapters keep * reads posted at all times, which will starve * other devices without this logic. */ if (usb_pipebulk(urb->pipe) && qh->mux == 1 && !list_is_singular(&musb->in_bulk)) { musb_bulk_nak_timeout(musb, hw_ep, 1); return; } musb_ep_select(mbase, epnum); rx_csr |= MUSB_RXCSR_H_WZC_BITS; rx_csr &= ~MUSB_RXCSR_DATAERROR; musb_writew(epio, MUSB_RXCSR, rx_csr); goto finish; } else { dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum); /* packet error reported later */ iso_err = true; } } else if (rx_csr & MUSB_RXCSR_INCOMPRX) { dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n", epnum); status = -EPROTO; } /* faults abort the transfer */ if (status) { /* clean up dma and collect transfer count */ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { dma->status = MUSB_DMA_STATUS_CORE_ABORT; (void) musb->dma_controller->channel_abort(dma); xfer_len = dma->actual_len; } musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); musb_writeb(epio, MUSB_RXINTERVAL, 0); done = true; goto finish; } if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr); goto finish; } /* thorough shutdown for now ... given more precise fault handling * and better queueing support, we might keep a DMA pipeline going * while processing this irq for earlier completions. */ /* FIXME this is _way_ too much in-line logic for Mentor DMA */ #ifndef CONFIG_USB_INVENTRA_DMA if (rx_csr & MUSB_RXCSR_H_REQPKT) { /* REVISIT this happened for a while on some short reads... * the cleanup still needs investigation... looks bad... * and also duplicates dma cleanup code above ... plus, * shouldn't this be the "half full" double buffer case? */ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { dma->status = MUSB_DMA_STATUS_CORE_ABORT; (void) musb->dma_controller->channel_abort(dma); xfer_len = dma->actual_len; done = true; } dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, xfer_len, dma ? ", dma" : ""); rx_csr &= ~MUSB_RXCSR_H_REQPKT; musb_ep_select(mbase, epnum); musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | rx_csr); } #endif if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { xfer_len = dma->actual_len; val &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_H_AUTOREQ | MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_RXPKTRDY); musb_writew(hw_ep->regs, MUSB_RXCSR, val); #ifdef CONFIG_USB_INVENTRA_DMA if (usb_pipeisoc(pipe)) { struct usb_iso_packet_descriptor *d; d = urb->iso_frame_desc + qh->iso_idx; d->actual_length = xfer_len; /* even if there was an error, we did the dma * for iso_frame_desc->length */ if (d->status != -EILSEQ && d->status != -EOVERFLOW) d->status = 0; if (++qh->iso_idx >= urb->number_of_packets) done = true; else done = false; } else { /* done if urb buffer is full or short packet is recd */ done = (urb->actual_length + xfer_len >= urb->transfer_buffer_length || dma->actual_len < qh->maxpacket); } /* send IN token for next packet, without AUTOREQ */ if (!done) { val |= MUSB_RXCSR_H_REQPKT; musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val); } dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum, done ? "off" : "reset", musb_readw(epio, MUSB_RXCSR), musb_readw(epio, MUSB_RXCOUNT)); #else done = true; #endif } else if (urb->status == -EINPROGRESS) { /* if no errors, be sure a packet is ready for unloading */ if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { status = -EPROTO; ERR("Rx interrupt with no errors or packet!\n"); /* FIXME this is another "SHOULD NEVER HAPPEN" */ /* SCRUB (RX) */ /* do the proper sequence to abort the transfer */ musb_ep_select(mbase, epnum); val &= ~MUSB_RXCSR_H_REQPKT; musb_writew(epio, MUSB_RXCSR, val); goto finish; } /* we are expecting IN packets */ #ifdef CONFIG_USB_INVENTRA_DMA if (dma) { struct dma_controller *c; u16 rx_count; int ret, length; dma_addr_t buf; rx_count = musb_readw(epio, MUSB_RXCOUNT); dev_dbg(musb->controller, "RX%d count %d, buffer 0x%x len %d/%d\n", epnum, rx_count, urb->transfer_dma + urb->actual_length, qh->offset, urb->transfer_buffer_length); c = musb->dma_controller; if (usb_pipeisoc(pipe)) { int d_status = 0; struct usb_iso_packet_descriptor *d; d = urb->iso_frame_desc + qh->iso_idx; if (iso_err) { d_status = -EILSEQ; urb->error_count++; } if (rx_count > d->length) { if (d_status == 0) { d_status = -EOVERFLOW; urb->error_count++; } dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\ rx_count, d->length); length = d->length; } else length = rx_count; d->status = d_status; buf = urb->transfer_dma + d->offset; } else { length = rx_count; buf = urb->transfer_dma + urb->actual_length; } dma->desired_mode = 0; #ifdef USE_MODE1 /* because of the issue below, mode 1 will * only rarely behave with correct semantics. */ if ((urb->transfer_flags & URB_SHORT_NOT_OK) && (urb->transfer_buffer_length - urb->actual_length) > qh->maxpacket) dma->desired_mode = 1; if (rx_count < hw_ep->max_packet_sz_rx) { length = rx_count; dma->desired_mode = 0; } else { length = urb->transfer_buffer_length; } #endif /* Disadvantage of using mode 1: * It's basically usable only for mass storage class; essentially all * other protocols also terminate transfers on short packets. * * Details: * An extra IN token is sent at the end of the transfer (due to AUTOREQ) * If you try to use mode 1 for (transfer_buffer_length - 512), and try * to use the extra IN token to grab the last packet using mode 0, then * the problem is that you cannot be sure when the device will send the * last packet and RxPktRdy set. Sometimes the packet is recd too soon * such that it gets lost when RxCSR is re-set at the end of the mode 1 * transfer, while sometimes it is recd just a little late so that if you * try to configure for mode 0 soon after the mode 1 transfer is * completed, you will find rxcount 0. Okay, so you might think why not * wait for an interrupt when the pkt is recd. Well, you won't get any! */ val = musb_readw(epio, MUSB_RXCSR); val &= ~MUSB_RXCSR_H_REQPKT; if (dma->desired_mode == 0) val &= ~MUSB_RXCSR_H_AUTOREQ; else val |= MUSB_RXCSR_H_AUTOREQ; val |= MUSB_RXCSR_DMAENAB; /* autoclear shouldn't be set in high bandwidth */ if (qh->hb_mult == 1) val |= MUSB_RXCSR_AUTOCLEAR; musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val); /* REVISIT if when actual_length != 0, * transfer_buffer_length needs to be * adjusted first... */ ret = c->channel_program( dma, qh->maxpacket, dma->desired_mode, buf, length); if (!ret) { c->channel_release(dma); hw_ep->rx_channel = NULL; dma = NULL; val = musb_readw(epio, MUSB_RXCSR); val &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_H_AUTOREQ | MUSB_RXCSR_AUTOCLEAR); musb_writew(epio, MUSB_RXCSR, val); } } #endif /* Mentor DMA */ if (!dma) { unsigned int received_len; /* Unmap the buffer so that CPU can use it */ usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb); /* * We need to map sg if the transfer_buffer is * NULL. */ if (!urb->transfer_buffer) { qh->use_sg = true; sg_miter_start(&qh->sg_miter, urb->sg, 1, sg_flags); } if (qh->use_sg) { if (!sg_miter_next(&qh->sg_miter)) { dev_err(musb->controller, "error: sg list empty\n"); sg_miter_stop(&qh->sg_miter); status = -EINVAL; done = true; goto finish; } urb->transfer_buffer = qh->sg_miter.addr; received_len = urb->actual_length; qh->offset = 0x0; done = musb_host_packet_rx(musb, urb, epnum, iso_err); /* Calculate the number of bytes received */ received_len = urb->actual_length - received_len; qh->sg_miter.consumed = received_len; sg_miter_stop(&qh->sg_miter); } else { done = musb_host_packet_rx(musb, urb, epnum, iso_err); } dev_dbg(musb->controller, "read %spacket\n", done ? "last " : ""); } } finish: urb->actual_length += xfer_len; qh->offset += xfer_len; if (done) { if (qh->use_sg) qh->use_sg = false; if (urb->status == -EINPROGRESS) urb->status = status; musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); } } /* schedule nodes correspond to peripheral endpoints, like an OHCI QH. * the software schedule associates multiple such nodes with a given * host side hardware endpoint + direction; scheduling may activate * that hardware endpoint. */ static int musb_schedule( struct musb *musb, struct musb_qh *qh, int is_in) { int idle; int best_diff; int best_end, epnum; struct musb_hw_ep *hw_ep = NULL; struct list_head *head = NULL; u8 toggle; u8 txtype; struct urb *urb = next_urb(qh); /* use fixed hardware for control and bulk */ if (qh->type == USB_ENDPOINT_XFER_CONTROL) { head = &musb->control; hw_ep = musb->control_ep; goto success; } /* else, periodic transfers get muxed to other endpoints */ /* * We know this qh hasn't been scheduled, so all we need to do * is choose which hardware endpoint to put it on ... * * REVISIT what we really want here is a regular schedule tree * like e.g. OHCI uses. */ best_diff = 4096; best_end = -1; for (epnum = 1, hw_ep = musb->endpoints + 1; epnum < musb->nr_endpoints; epnum++, hw_ep++) { int diff; if (musb_ep_get_qh(hw_ep, is_in) != NULL) continue; if (hw_ep == musb->bulk_ep) continue; if (is_in) diff = hw_ep->max_packet_sz_rx; else diff = hw_ep->max_packet_sz_tx; diff -= (qh->maxpacket * qh->hb_mult); if (diff >= 0 && best_diff > diff) { /* * Mentor controller has a bug in that if we schedule * a BULK Tx transfer on an endpoint that had earlier * handled ISOC then the BULK transfer has to start on * a zero toggle. If the BULK transfer starts on a 1 * toggle then this transfer will fail as the mentor * controller starts the Bulk transfer on a 0 toggle * irrespective of the programming of the toggle bits * in the TXCSR register. Check for this condition * while allocating the EP for a Tx Bulk transfer. If * so skip this EP. */ hw_ep = musb->endpoints + epnum; toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in); txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE) >> 4) & 0x3; if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) && toggle && (txtype == USB_ENDPOINT_XFER_ISOC)) continue; best_diff = diff; best_end = epnum; } } /* use bulk reserved ep1 if no other ep is free */ if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) { hw_ep = musb->bulk_ep; if (is_in) head = &musb->in_bulk; else head = &musb->out_bulk; /* Enable bulk RX/TX NAK timeout scheme when bulk requests are * multiplexed. This scheme doen't work in high speed to full * speed scenario as NAK interrupts are not coming from a * full speed device connected to a high speed device. * NAK timeout interval is 8 (128 uframe or 16ms) for HS and * 4 (8 frame or 8ms) for FS device. */ if (qh->dev) qh->intv_reg = (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4; goto success; } else if (best_end < 0) { return -ENOSPC; } idle = 1; qh->mux = 0; hw_ep = musb->endpoints + best_end; dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end); success: if (head) { idle = list_empty(head); list_add_tail(&qh->ring, head); qh->mux = 1; } qh->hw_ep = hw_ep; qh->hep->hcpriv = qh; if (idle) musb_start_urb(musb, is_in, qh); return 0; } static int musb_urb_enqueue( struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) { unsigned long flags; struct musb *musb = hcd_to_musb(hcd); struct usb_host_endpoint *hep = urb->ep; struct musb_qh *qh; struct usb_endpoint_descriptor *epd = &hep->desc; int ret; unsigned type_reg; unsigned interval; /* host role must be active */ if (!is_host_active(musb) || !musb->is_active) return -ENODEV; spin_lock_irqsave(&musb->lock, flags); ret = usb_hcd_link_urb_to_ep(hcd, urb); qh = ret ? NULL : hep->hcpriv; if (qh) urb->hcpriv = qh; spin_unlock_irqrestore(&musb->lock, flags); /* DMA mapping was already done, if needed, and this urb is on * hep->urb_list now ... so we're done, unless hep wasn't yet * scheduled onto a live qh. * * REVISIT best to keep hep->hcpriv valid until the endpoint gets * disabled, testing for empty qh->ring and avoiding qh setup costs * except for the first urb queued after a config change. */ if (qh || ret) return ret; /* Allocate and initialize qh, minimizing the work done each time * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. * * REVISIT consider a dedicated qh kmem_cache, so it's harder * for bugs in other kernel code to break this driver... */ qh = kzalloc(sizeof *qh, mem_flags); if (!qh) { spin_lock_irqsave(&musb->lock, flags); usb_hcd_unlink_urb_from_ep(hcd, urb); spin_unlock_irqrestore(&musb->lock, flags); return -ENOMEM; } qh->hep = hep; qh->dev = urb->dev; INIT_LIST_HEAD(&qh->ring); qh->is_ready = 1; qh->maxpacket = usb_endpoint_maxp(epd); qh->type = usb_endpoint_type(epd); /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier. * Some musb cores don't support high bandwidth ISO transfers; and * we don't (yet!) support high bandwidth interrupt transfers. */ qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03); if (qh->hb_mult > 1) { int ok = (qh->type == USB_ENDPOINT_XFER_ISOC); if (ok) ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx) || (usb_pipeout(urb->pipe) && musb->hb_iso_tx); if (!ok) { ret = -EMSGSIZE; goto done; } qh->maxpacket &= 0x7ff; } qh->epnum = usb_endpoint_num(epd); /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ qh->addr_reg = (u8) usb_pipedevice(urb->pipe); /* precompute rxtype/txtype/type0 register */ type_reg = (qh->type << 4) | qh->epnum; switch (urb->dev->speed) { case USB_SPEED_LOW: type_reg |= 0xc0; break; case USB_SPEED_FULL: type_reg |= 0x80; break; default: type_reg |= 0x40; } qh->type_reg = type_reg; /* Precompute RXINTERVAL/TXINTERVAL register */ switch (qh->type) { case USB_ENDPOINT_XFER_INT: /* * Full/low speeds use the linear encoding, * high speed uses the logarithmic encoding. */ if (urb->dev->speed <= USB_SPEED_FULL) { interval = max_t(u8, epd->bInterval, 1); break; } /* FALLTHROUGH */ case USB_ENDPOINT_XFER_ISOC: /* ISO always uses logarithmic encoding */ interval = min_t(u8, epd->bInterval, 16); break; default: /* REVISIT we actually want to use NAK limits, hinting to the * transfer scheduling logic to try some other qh, e.g. try * for 2 msec first: * * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; * * The downside of disabling this is that transfer scheduling * gets VERY unfair for nonperiodic transfers; a misbehaving * peripheral could make that hurt. That's perfectly normal * for reads from network or serial adapters ... so we have * partial NAKlimit support for bulk RX. * * The upside of disabling it is simpler transfer scheduling. */ interval = 0; } qh->intv_reg = interval; /* precompute addressing for external hub/tt ports */ if (musb->is_multipoint) { struct usb_device *parent = urb->dev->parent; if (parent != hcd->self.root_hub) { qh->h_addr_reg = (u8) parent->devnum; /* set up tt info if needed */ if (urb->dev->tt) { qh->h_port_reg = (u8) urb->dev->ttport; if (urb->dev->tt->hub) qh->h_addr_reg = (u8) urb->dev->tt->hub->devnum; if (urb->dev->tt->multi) qh->h_addr_reg |= 0x80; } } } /* invariant: hep->hcpriv is null OR the qh that's already scheduled. * until we get real dma queues (with an entry for each urb/buffer), * we only have work to do in the former case. */ spin_lock_irqsave(&musb->lock, flags); if (hep->hcpriv || !next_urb(qh)) { /* some concurrent activity submitted another urb to hep... * odd, rare, error prone, but legal. */ kfree(qh); qh = NULL; ret = 0; } else ret = musb_schedule(musb, qh, epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); if (ret == 0) { urb->hcpriv = qh; /* FIXME set urb->start_frame for iso/intr, it's tested in * musb_start_urb(), but otherwise only konicawc cares ... */ } spin_unlock_irqrestore(&musb->lock, flags); done: if (ret != 0) { spin_lock_irqsave(&musb->lock, flags); usb_hcd_unlink_urb_from_ep(hcd, urb); spin_unlock_irqrestore(&musb->lock, flags); kfree(qh); } return ret; } /* * abort a transfer that's at the head of a hardware queue. * called with controller locked, irqs blocked * that hardware queue advances to the next transfer, unless prevented */ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh) { struct musb_hw_ep *ep = qh->hw_ep; struct musb *musb = ep->musb; void __iomem *epio = ep->regs; unsigned hw_end = ep->epnum; void __iomem *regs = ep->musb->mregs; int is_in = usb_pipein(urb->pipe); int status = 0; u16 csr; musb_ep_select(regs, hw_end); if (is_dma_capable()) { struct dma_channel *dma; dma = is_in ? ep->rx_channel : ep->tx_channel; if (dma) { status = ep->musb->dma_controller->channel_abort(dma); dev_dbg(musb->controller, "abort %cX%d DMA for urb %p --> %d\n", is_in ? 'R' : 'T', ep->epnum, urb, status); urb->actual_length += dma->actual_len; } } /* turn off DMA requests, discard state, stop polling ... */ if (ep->epnum && is_in) { /* giveback saves bulk toggle */ csr = musb_h_flush_rxfifo(ep, 0); /* REVISIT we still get an irq; should likely clear the * endpoint's irq status here to avoid bogus irqs. * clearing that status is platform-specific... */ } else if (ep->epnum) { musb_h_tx_flush_fifo(ep); csr = musb_readw(epio, MUSB_TXCSR); csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_RXSTALL | MUSB_TXCSR_H_NAKTIMEOUT | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_TXPKTRDY); musb_writew(epio, MUSB_TXCSR, csr); /* REVISIT may need to clear FLUSHFIFO ... */ musb_writew(epio, MUSB_TXCSR, csr); /* flush cpu writebuffer */ csr = musb_readw(epio, MUSB_TXCSR); } else { musb_h_ep0_flush_fifo(ep); } if (status == 0) musb_advance_schedule(ep->musb, urb, ep, is_in); return status; } static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { struct musb *musb = hcd_to_musb(hcd); struct musb_qh *qh; unsigned long flags; int is_in = usb_pipein(urb->pipe); int ret; dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), is_in ? "in" : "out"); spin_lock_irqsave(&musb->lock, flags); ret = usb_hcd_check_unlink_urb(hcd, urb, status); if (ret) goto done; qh = urb->hcpriv; if (!qh) goto done; /* * Any URB not actively programmed into endpoint hardware can be * immediately given back; that's any URB not at the head of an * endpoint queue, unless someday we get real DMA queues. And even * if it's at the head, it might not be known to the hardware... * * Otherwise abort current transfer, pending DMA, etc.; urb->status * has already been updated. This is a synchronous abort; it'd be * OK to hold off until after some IRQ, though. * * NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list || musb_ep_get_qh(qh->hw_ep, is_in) != qh) { int ready = qh->is_ready; qh->is_ready = 0; musb_giveback(musb, urb, 0); qh->is_ready = ready; /* If nothing else (usually musb_giveback) is using it * and its URB list has emptied, recycle this qh. */ if (ready && list_empty(&qh->hep->urb_list)) { qh->hep->hcpriv = NULL; list_del(&qh->ring); kfree(qh); } } else ret = musb_cleanup_urb(urb, qh); done: spin_unlock_irqrestore(&musb->lock, flags); return ret; } /* disable an endpoint */ static void musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) { u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN; unsigned long flags; struct musb *musb = hcd_to_musb(hcd); struct musb_qh *qh; struct urb *urb; spin_lock_irqsave(&musb->lock, flags); qh = hep->hcpriv; if (qh == NULL) goto exit; /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ /* Kick the first URB off the hardware, if needed */ qh->is_ready = 0; if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) { urb = next_urb(qh); /* make software (then hardware) stop ASAP */ if (!urb->unlinked) urb->status = -ESHUTDOWN; /* cleanup */ musb_cleanup_urb(urb, qh); /* Then nuke all the others ... and advance the * queue on hw_ep (e.g. bulk ring) when we're done. */ while (!list_empty(&hep->urb_list)) { urb = next_urb(qh); urb->status = -ESHUTDOWN; musb_advance_schedule(musb, urb, qh->hw_ep, is_in); } } else { /* Just empty the queue; the hardware is busy with * other transfers, and since !qh->is_ready nothing * will activate any of these as it advances. */ while (!list_empty(&hep->urb_list)) musb_giveback(musb, next_urb(qh), -ESHUTDOWN); hep->hcpriv = NULL; list_del(&qh->ring); kfree(qh); } exit: spin_unlock_irqrestore(&musb->lock, flags); } static int musb_h_get_frame_number(struct usb_hcd *hcd) { struct musb *musb = hcd_to_musb(hcd); return musb_readw(musb->mregs, MUSB_FRAME); } static int musb_h_start(struct usb_hcd *hcd) { struct musb *musb = hcd_to_musb(hcd); /* NOTE: musb_start() is called when the hub driver turns * on port power, or when (OTG) peripheral starts. */ hcd->state = HC_STATE_RUNNING; musb->port1_status = 0; return 0; } static void musb_h_stop(struct usb_hcd *hcd) { musb_stop(hcd_to_musb(hcd)); hcd->state = HC_STATE_HALT; } static int musb_bus_suspend(struct usb_hcd *hcd) { struct musb *musb = hcd_to_musb(hcd); u8 devctl; if (!is_host_active(musb)) return 0; switch (musb->xceiv->state) { case OTG_STATE_A_SUSPEND: return 0; case OTG_STATE_A_WAIT_VRISE: /* ID could be grounded even if there's no device * on the other end of the cable. NOTE that the * A_WAIT_VRISE timers are messy with MUSB... */ devctl = musb_readb(musb->mregs, MUSB_DEVCTL); if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) musb->xceiv->state = OTG_STATE_A_WAIT_BCON; break; default: break; } if (musb->is_active) { WARNING("trying to suspend as %s while active\n", usb_otg_state_string(musb->xceiv->state)); return -EBUSY; } else return 0; } static int musb_bus_resume(struct usb_hcd *hcd) { /* resuming child port does the work */ return 0; } #ifndef CONFIG_MUSB_PIO_ONLY #define MUSB_USB_DMA_ALIGN 4 struct musb_temp_buffer { void *kmalloc_ptr; void *old_xfer_buffer; u8 data[0]; }; static void musb_free_temp_buffer(struct urb *urb) { enum dma_data_direction dir; struct musb_temp_buffer *temp; if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) return; dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; temp = container_of(urb->transfer_buffer, struct musb_temp_buffer, data); if (dir == DMA_FROM_DEVICE) { memcpy(temp->old_xfer_buffer, temp->data, urb->transfer_buffer_length); } urb->transfer_buffer = temp->old_xfer_buffer; kfree(temp->kmalloc_ptr); urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; } static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags) { enum dma_data_direction dir; struct musb_temp_buffer *temp; void *kmalloc_ptr; size_t kmalloc_size; if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0 || !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1))) return 0; dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; /* Allocate a buffer with enough padding for alignment */ kmalloc_size = urb->transfer_buffer_length + sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1; kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); if (!kmalloc_ptr) return -ENOMEM; /* Position our struct temp_buffer such that data is aligned */ temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN); temp->kmalloc_ptr = kmalloc_ptr; temp->old_xfer_buffer = urb->transfer_buffer; if (dir == DMA_TO_DEVICE) memcpy(temp->data, urb->transfer_buffer, urb->transfer_buffer_length); urb->transfer_buffer = temp->data; urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; return 0; } static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) { struct musb *musb = hcd_to_musb(hcd); int ret; /* * The DMA engine in RTL1.8 and above cannot handle * DMA addresses that are not aligned to a 4 byte boundary. * For such engine implemented (un)map_urb_for_dma hooks. * Do not use these hooks for RTL<1.8 */ if (musb->hwvers < MUSB_HWVERS_1800) return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); ret = musb_alloc_temp_buffer(urb, mem_flags); if (ret) return ret; ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); if (ret) musb_free_temp_buffer(urb); return ret; } static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) { struct musb *musb = hcd_to_musb(hcd); usb_hcd_unmap_urb_for_dma(hcd, urb); /* Do not use this hook for RTL<1.8 (see description above) */ if (musb->hwvers < MUSB_HWVERS_1800) return; musb_free_temp_buffer(urb); } #endif /* !CONFIG_MUSB_PIO_ONLY */ const struct hc_driver musb_hc_driver = { .description = "musb-hcd", .product_desc = "MUSB HDRC host driver", .hcd_priv_size = sizeof(struct musb), .flags = HCD_USB2 | HCD_MEMORY, /* not using irq handler or reset hooks from usbcore, since * those must be shared with peripheral code for OTG configs */ .start = musb_h_start, .stop = musb_h_stop, .get_frame_number = musb_h_get_frame_number, .urb_enqueue = musb_urb_enqueue, .urb_dequeue = musb_urb_dequeue, .endpoint_disable = musb_h_disable, #ifndef CONFIG_MUSB_PIO_ONLY .map_urb_for_dma = musb_map_urb_for_dma, .unmap_urb_for_dma = musb_unmap_urb_for_dma, #endif .hub_status_data = musb_hub_status_data, .hub_control = musb_hub_control, .bus_suspend = musb_bus_suspend, .bus_resume = musb_bus_resume, /* .start_port_reset = NULL, */ /* .hub_irq_enable = NULL, */ };
gpl-2.0
dremaker/imx6ul_linux
block/cmdline-parser.c
2141
5012
/* * Parse command line, get partition information * * Written by Cai Zhiyong <caizhiyong@huawei.com> * */ #include <linux/export.h> #include <linux/cmdline-parser.h> static int parse_subpart(struct cmdline_subpart **subpart, char *partdef) { int ret = 0; struct cmdline_subpart *new_subpart; *subpart = NULL; new_subpart = kzalloc(sizeof(struct cmdline_subpart), GFP_KERNEL); if (!new_subpart) return -ENOMEM; if (*partdef == '-') { new_subpart->size = (sector_t)(~0ULL); partdef++; } else { new_subpart->size = (sector_t)memparse(partdef, &partdef); if (new_subpart->size < (sector_t)PAGE_SIZE) { pr_warn("cmdline partition size is invalid."); ret = -EINVAL; goto fail; } } if (*partdef == '@') { partdef++; new_subpart->from = (sector_t)memparse(partdef, &partdef); } else { new_subpart->from = (sector_t)(~0ULL); } if (*partdef == '(') { int length; char *next = strchr(++partdef, ')'); if (!next) { pr_warn("cmdline partition format is invalid."); ret = -EINVAL; goto fail; } length = min_t(int, next - partdef, sizeof(new_subpart->name) - 1); strncpy(new_subpart->name, partdef, length); new_subpart->name[length] = '\0'; partdef = ++next; } else new_subpart->name[0] = '\0'; new_subpart->flags = 0; if (!strncmp(partdef, "ro", 2)) { new_subpart->flags |= PF_RDONLY; partdef += 2; } if (!strncmp(partdef, "lk", 2)) { new_subpart->flags |= PF_POWERUP_LOCK; partdef += 2; } *subpart = new_subpart; return 0; fail: kfree(new_subpart); return ret; } static void free_subpart(struct cmdline_parts *parts) { struct cmdline_subpart *subpart; while (parts->subpart) { subpart = parts->subpart; parts->subpart = subpart->next_subpart; kfree(subpart); } } static int parse_parts(struct cmdline_parts **parts, const char *bdevdef) { int ret = -EINVAL; char *next; int length; struct cmdline_subpart **next_subpart; struct cmdline_parts *newparts; char buf[BDEVNAME_SIZE + 32 + 4]; *parts = NULL; newparts = kzalloc(sizeof(struct cmdline_parts), GFP_KERNEL); if (!newparts) return -ENOMEM; next = strchr(bdevdef, ':'); if (!next) { pr_warn("cmdline partition has no block device."); goto fail; } length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1); strncpy(newparts->name, bdevdef, length); newparts->name[length] = '\0'; newparts->nr_subparts = 0; next_subpart = &newparts->subpart; while (next && *(++next)) { bdevdef = next; next = strchr(bdevdef, ','); length = (!next) ? (sizeof(buf) - 1) : min_t(int, next - bdevdef, sizeof(buf) - 1); strncpy(buf, bdevdef, length); buf[length] = '\0'; ret = parse_subpart(next_subpart, buf); if (ret) goto fail; newparts->nr_subparts++; next_subpart = &(*next_subpart)->next_subpart; } if (!newparts->subpart) { pr_warn("cmdline partition has no valid partition."); ret = -EINVAL; goto fail; } *parts = newparts; return 0; fail: free_subpart(newparts); kfree(newparts); return ret; } void cmdline_parts_free(struct cmdline_parts **parts) { struct cmdline_parts *next_parts; while (*parts) { next_parts = (*parts)->next_parts; free_subpart(*parts); kfree(*parts); *parts = next_parts; } } EXPORT_SYMBOL(cmdline_parts_free); int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline) { int ret; char *buf; char *pbuf; char *next; struct cmdline_parts **next_parts; *parts = NULL; next = pbuf = buf = kstrdup(cmdline, GFP_KERNEL); if (!buf) return -ENOMEM; next_parts = parts; while (next && *pbuf) { next = strchr(pbuf, ';'); if (next) *next = '\0'; ret = parse_parts(next_parts, pbuf); if (ret) goto fail; if (next) pbuf = ++next; next_parts = &(*next_parts)->next_parts; } if (!*parts) { pr_warn("cmdline partition has no valid partition."); ret = -EINVAL; goto fail; } ret = 0; done: kfree(buf); return ret; fail: cmdline_parts_free(parts); goto done; } EXPORT_SYMBOL(cmdline_parts_parse); struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts, const char *bdev) { while (parts && strncmp(bdev, parts->name, sizeof(parts->name))) parts = parts->next_parts; return parts; } EXPORT_SYMBOL(cmdline_parts_find); /* * add_part() * 0 success. * 1 can not add so many partitions. */ int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size, int slot, int (*add_part)(int, struct cmdline_subpart *, void *), void *param) { sector_t from = 0; struct cmdline_subpart *subpart; for (subpart = parts->subpart; subpart; subpart = subpart->next_subpart, slot++) { if (subpart->from == (sector_t)(~0ULL)) subpart->from = from; else from = subpart->from; if (from >= disk_size) break; if (subpart->size > (disk_size - from)) subpart->size = disk_size - from; from += subpart->size; if (add_part(slot, subpart, param)) break; } return slot; } EXPORT_SYMBOL(cmdline_parts_set);
gpl-2.0
tbalden/One_X-2.6.39.4
arch/alpha/kernel/signal.c
3165
17804
/* * linux/arch/alpha/kernel/signal.c * * Copyright (C) 1995 Linus Torvalds * * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/bitops.h> #include <linux/syscalls.h> #include <linux/tracehook.h> #include <asm/uaccess.h> #include <asm/sigcontext.h> #include <asm/ucontext.h> #include "proto.h" #define DEBUG_SIG 0 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) asmlinkage void ret_from_sys_call(void); static void do_signal(struct pt_regs *, struct switch_stack *, unsigned long, unsigned long); /* * The OSF/1 sigprocmask calling sequence is different from the * C sigprocmask() sequence.. */ SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask) { sigset_t oldmask; sigset_t mask; unsigned long res; siginitset(&mask, newmask & _BLOCKABLE); res = sigprocmask(how, &mask, &oldmask); if (!res) { force_successful_syscall_return(); res = oldmask.sig[0]; } return res; } SYSCALL_DEFINE3(osf_sigaction, int, sig, const struct osf_sigaction __user *, act, struct osf_sigaction __user *, oact) { struct k_sigaction new_ka, old_ka; int ret; if (act) { old_sigset_t mask; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || __get_user(new_ka.sa.sa_flags, &act->sa_flags) || __get_user(mask, &act->sa_mask)) return -EFAULT; siginitset(&new_ka.sa.sa_mask, mask); new_ka.ka_restorer = NULL; } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; } return ret; } SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act, struct sigaction __user *, oact, size_t, sigsetsize, void __user *, restorer) { struct k_sigaction new_ka, old_ka; int ret; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (act) { new_ka.ka_restorer = restorer; if (copy_from_user(&new_ka.sa, act, sizeof(*act))) return -EFAULT; } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (copy_to_user(oact, &old_ka.sa, sizeof(*oact))) return -EFAULT; } return ret; } /* * Atomically swap in the new signal mask, and wait for a signal. */ SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) { mask &= _BLOCKABLE; spin_lock_irq(&current->sighand->siglock); current->saved_sigmask = current->blocked; siginitset(&current->blocked, mask); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); current->state = TASK_INTERRUPTIBLE; schedule(); set_thread_flag(TIF_RESTORE_SIGMASK); return -ERESTARTNOHAND; } asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) { return do_sigaltstack(uss, uoss, rdusp()); } /* * Do a signal return; undo the signal stack. */ #if _NSIG_WORDS > 1 # error "Non SA_SIGINFO frame needs rearranging" #endif struct sigframe { struct sigcontext sc; unsigned int retcode[3]; }; struct rt_sigframe { struct siginfo info; struct ucontext uc; unsigned int retcode[3]; }; /* If this changes, userland unwinders that Know Things about our signal frame will break. Do not undertake lightly. It also implies an ABI change wrt the size of siginfo_t, which may cause some pain. */ extern char compile_time_assert [offsetof(struct rt_sigframe, uc.uc_mcontext) == 176 ? 1 : -1]; #define INSN_MOV_R30_R16 0x47fe0410 #define INSN_LDI_R0 0x201f0000 #define INSN_CALLSYS 0x00000083 static long restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, struct switch_stack *sw) { unsigned long usp; long i, err = __get_user(regs->pc, &sc->sc_pc); current_thread_info()->restart_block.fn = do_no_restart_syscall; sw->r26 = (unsigned long) ret_from_sys_call; err |= __get_user(regs->r0, sc->sc_regs+0); err |= __get_user(regs->r1, sc->sc_regs+1); err |= __get_user(regs->r2, sc->sc_regs+2); err |= __get_user(regs->r3, sc->sc_regs+3); err |= __get_user(regs->r4, sc->sc_regs+4); err |= __get_user(regs->r5, sc->sc_regs+5); err |= __get_user(regs->r6, sc->sc_regs+6); err |= __get_user(regs->r7, sc->sc_regs+7); err |= __get_user(regs->r8, sc->sc_regs+8); err |= __get_user(sw->r9, sc->sc_regs+9); err |= __get_user(sw->r10, sc->sc_regs+10); err |= __get_user(sw->r11, sc->sc_regs+11); err |= __get_user(sw->r12, sc->sc_regs+12); err |= __get_user(sw->r13, sc->sc_regs+13); err |= __get_user(sw->r14, sc->sc_regs+14); err |= __get_user(sw->r15, sc->sc_regs+15); err |= __get_user(regs->r16, sc->sc_regs+16); err |= __get_user(regs->r17, sc->sc_regs+17); err |= __get_user(regs->r18, sc->sc_regs+18); err |= __get_user(regs->r19, sc->sc_regs+19); err |= __get_user(regs->r20, sc->sc_regs+20); err |= __get_user(regs->r21, sc->sc_regs+21); err |= __get_user(regs->r22, sc->sc_regs+22); err |= __get_user(regs->r23, sc->sc_regs+23); err |= __get_user(regs->r24, sc->sc_regs+24); err |= __get_user(regs->r25, sc->sc_regs+25); err |= __get_user(regs->r26, sc->sc_regs+26); err |= __get_user(regs->r27, sc->sc_regs+27); err |= __get_user(regs->r28, sc->sc_regs+28); err |= __get_user(regs->gp, sc->sc_regs+29); err |= __get_user(usp, sc->sc_regs+30); wrusp(usp); for (i = 0; i < 31; i++) err |= __get_user(sw->fp[i], sc->sc_fpregs+i); err |= __get_user(sw->fp[31], &sc->sc_fpcr); return err; } /* Note that this syscall is also used by setcontext(3) to install a given sigcontext. This because it's impossible to set *all* registers and transfer control from userland. */ asmlinkage void do_sigreturn(struct sigcontext __user *sc, struct pt_regs *regs, struct switch_stack *sw) { sigset_t set; /* Verify that it's a good sigcontext before using it */ if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) goto give_sigsegv; if (__get_user(set.sig[0], &sc->sc_mask)) goto give_sigsegv; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); if (restore_sigcontext(sc, regs, sw)) goto give_sigsegv; /* Send SIGTRAP if we're single-stepping: */ if (ptrace_cancel_bpt (current)) { siginfo_t info; info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_BRKPT; info.si_addr = (void __user *) regs->pc; info.si_trapno = 0; send_sig_info(SIGTRAP, &info, current); } return; give_sigsegv: force_sig(SIGSEGV, current); } asmlinkage void do_rt_sigreturn(struct rt_sigframe __user *frame, struct pt_regs *regs, struct switch_stack *sw) { sigset_t set; /* Verify that it's a good ucontext_t before using it */ if (!access_ok(VERIFY_READ, &frame->uc, sizeof(frame->uc))) goto give_sigsegv; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto give_sigsegv; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); if (restore_sigcontext(&frame->uc.uc_mcontext, regs, sw)) goto give_sigsegv; /* Send SIGTRAP if we're single-stepping: */ if (ptrace_cancel_bpt (current)) { siginfo_t info; info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_BRKPT; info.si_addr = (void __user *) regs->pc; info.si_trapno = 0; send_sig_info(SIGTRAP, &info, current); } return; give_sigsegv: force_sig(SIGSEGV, current); } /* * Set up a signal frame. */ static inline void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) { if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp)) sp = current->sas_ss_sp + current->sas_ss_size; return (void __user *)((sp - frame_size) & -32ul); } static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, struct switch_stack *sw, unsigned long mask, unsigned long sp) { long i, err = 0; err |= __put_user(on_sig_stack((unsigned long)sc), &sc->sc_onstack); err |= __put_user(mask, &sc->sc_mask); err |= __put_user(regs->pc, &sc->sc_pc); err |= __put_user(8, &sc->sc_ps); err |= __put_user(regs->r0 , sc->sc_regs+0); err |= __put_user(regs->r1 , sc->sc_regs+1); err |= __put_user(regs->r2 , sc->sc_regs+2); err |= __put_user(regs->r3 , sc->sc_regs+3); err |= __put_user(regs->r4 , sc->sc_regs+4); err |= __put_user(regs->r5 , sc->sc_regs+5); err |= __put_user(regs->r6 , sc->sc_regs+6); err |= __put_user(regs->r7 , sc->sc_regs+7); err |= __put_user(regs->r8 , sc->sc_regs+8); err |= __put_user(sw->r9 , sc->sc_regs+9); err |= __put_user(sw->r10 , sc->sc_regs+10); err |= __put_user(sw->r11 , sc->sc_regs+11); err |= __put_user(sw->r12 , sc->sc_regs+12); err |= __put_user(sw->r13 , sc->sc_regs+13); err |= __put_user(sw->r14 , sc->sc_regs+14); err |= __put_user(sw->r15 , sc->sc_regs+15); err |= __put_user(regs->r16, sc->sc_regs+16); err |= __put_user(regs->r17, sc->sc_regs+17); err |= __put_user(regs->r18, sc->sc_regs+18); err |= __put_user(regs->r19, sc->sc_regs+19); err |= __put_user(regs->r20, sc->sc_regs+20); err |= __put_user(regs->r21, sc->sc_regs+21); err |= __put_user(regs->r22, sc->sc_regs+22); err |= __put_user(regs->r23, sc->sc_regs+23); err |= __put_user(regs->r24, sc->sc_regs+24); err |= __put_user(regs->r25, sc->sc_regs+25); err |= __put_user(regs->r26, sc->sc_regs+26); err |= __put_user(regs->r27, sc->sc_regs+27); err |= __put_user(regs->r28, sc->sc_regs+28); err |= __put_user(regs->gp , sc->sc_regs+29); err |= __put_user(sp, sc->sc_regs+30); err |= __put_user(0, sc->sc_regs+31); for (i = 0; i < 31; i++) err |= __put_user(sw->fp[i], sc->sc_fpregs+i); err |= __put_user(0, sc->sc_fpregs+31); err |= __put_user(sw->fp[31], &sc->sc_fpcr); err |= __put_user(regs->trap_a0, &sc->sc_traparg_a0); err |= __put_user(regs->trap_a1, &sc->sc_traparg_a1); err |= __put_user(regs->trap_a2, &sc->sc_traparg_a2); return err; } static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs, struct switch_stack * sw) { unsigned long oldsp, r26, err = 0; struct sigframe __user *frame; oldsp = rdusp(); frame = get_sigframe(ka, oldsp, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; err |= setup_sigcontext(&frame->sc, regs, sw, set->sig[0], oldsp); if (err) goto give_sigsegv; /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->ka_restorer) { r26 = (unsigned long) ka->ka_restorer; } else { err |= __put_user(INSN_MOV_R30_R16, frame->retcode+0); err |= __put_user(INSN_LDI_R0+__NR_sigreturn, frame->retcode+1); err |= __put_user(INSN_CALLSYS, frame->retcode+2); imb(); r26 = (unsigned long) frame->retcode; } /* Check that everything was written properly. */ if (err) goto give_sigsegv; /* "Return" to the handler */ regs->r26 = r26; regs->r27 = regs->pc = (unsigned long) ka->sa.sa_handler; regs->r16 = sig; /* a0: signal number */ regs->r17 = 0; /* a1: exception code */ regs->r18 = (unsigned long) &frame->sc; /* a2: sigcontext pointer */ wrusp((unsigned long) frame); #if DEBUG_SIG printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", current->comm, current->pid, frame, regs->pc, regs->r26); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs, struct switch_stack * sw) { unsigned long oldsp, r26, err = 0; struct rt_sigframe __user *frame; oldsp = rdusp(); frame = get_sigframe(ka, oldsp, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; err |= copy_siginfo_to_user(&frame->info, info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(set->sig[0], &frame->uc.uc_osf_sigmask); err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(oldsp), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, sw, set->sig[0], oldsp); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->ka_restorer) { r26 = (unsigned long) ka->ka_restorer; } else { err |= __put_user(INSN_MOV_R30_R16, frame->retcode+0); err |= __put_user(INSN_LDI_R0+__NR_rt_sigreturn, frame->retcode+1); err |= __put_user(INSN_CALLSYS, frame->retcode+2); imb(); r26 = (unsigned long) frame->retcode; } if (err) goto give_sigsegv; /* "Return" to the handler */ regs->r26 = r26; regs->r27 = regs->pc = (unsigned long) ka->sa.sa_handler; regs->r16 = sig; /* a0: signal number */ regs->r17 = (unsigned long) &frame->info; /* a1: siginfo pointer */ regs->r18 = (unsigned long) &frame->uc; /* a2: ucontext pointer */ wrusp((unsigned long) frame); #if DEBUG_SIG printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", current->comm, current->pid, frame, regs->pc, regs->r26); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } /* * OK, we're invoking a handler. */ static inline int handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, struct pt_regs * regs, struct switch_stack *sw) { int ret; if (ka->sa.sa_flags & SA_SIGINFO) ret = setup_rt_frame(sig, ka, info, oldset, regs, sw); else ret = setup_frame(sig, ka, oldset, regs, sw); if (ret == 0) { spin_lock_irq(&current->sighand->siglock); sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); if (!(ka->sa.sa_flags & SA_NODEFER)) sigaddset(&current->blocked,sig); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); } return ret; } static inline void syscall_restart(unsigned long r0, unsigned long r19, struct pt_regs *regs, struct k_sigaction *ka) { switch (regs->r0) { case ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { case ERESTARTNOHAND: regs->r0 = EINTR; break; } /* fallthrough */ case ERESTARTNOINTR: regs->r0 = r0; /* reset v0 and a3 and replay syscall */ regs->r19 = r19; regs->pc -= 4; break; case ERESTART_RESTARTBLOCK: regs->r0 = EINTR; break; } } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Note that we go through the signals twice: once to check the signals that * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. * * "r0" and "r19" are the registers we need to restore for system call * restart. "r0" is also used as an indicator whether we can restart at * all (if we get here from anything but a syscall return, it will be 0) */ static void do_signal(struct pt_regs * regs, struct switch_stack * sw, unsigned long r0, unsigned long r19) { siginfo_t info; int signr; unsigned long single_stepping = ptrace_cancel_bpt(current); struct k_sigaction ka; sigset_t *oldset; if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = &current->saved_sigmask; else oldset = &current->blocked; /* This lets the debugger run, ... */ signr = get_signal_to_deliver(&info, &ka, regs, NULL); /* ... so re-check the single stepping. */ single_stepping |= ptrace_cancel_bpt(current); if (signr > 0) { /* Whee! Actually deliver the signal. */ if (r0) syscall_restart(r0, r19, regs, &ka); if (handle_signal(signr, &ka, &info, oldset, regs, sw) == 0) { /* A signal was successfully delivered, and the saved sigmask was stored on the signal frame, and will be restored by sigreturn. So we can simply clear the restore sigmask flag. */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) clear_thread_flag(TIF_RESTORE_SIGMASK); } if (single_stepping) ptrace_set_bpt(current); /* re-set bpt */ return; } if (r0) { switch (regs->r0) { case ERESTARTNOHAND: case ERESTARTSYS: case ERESTARTNOINTR: /* Reset v0 and a3 and replay syscall. */ regs->r0 = r0; regs->r19 = r19; regs->pc -= 4; break; case ERESTART_RESTARTBLOCK: /* Force v0 to the restart syscall and reply. */ regs->r0 = __NR_restart_syscall; regs->pc -= 4; break; } } /* If there's no signal to deliver, we just restore the saved mask. */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { clear_thread_flag(TIF_RESTORE_SIGMASK); sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } if (single_stepping) ptrace_set_bpt(current); /* re-set breakpoint */ } void do_notify_resume(struct pt_regs *regs, struct switch_stack *sw, unsigned long thread_info_flags, unsigned long r0, unsigned long r19) { if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) do_signal(regs, sw, r0, r19); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); if (current->replacement_session_keyring) key_replace_session_keyring(); } }
gpl-2.0
user98/android_kernel_samsung_grandneove3g
arch/arm/mach-imx/mmdc.c
3165
1673
/* * Copyright 2011 Freescale Semiconductor, Inc. * Copyright 2011 Linaro Ltd. * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> #define MMDC_MAPSR 0x404 #define BP_MMDC_MAPSR_PSD 0 #define BP_MMDC_MAPSR_PSS 4 static int imx_mmdc_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; void __iomem *mmdc_base, *reg; u32 val; int timeout = 0x400; mmdc_base = of_iomap(np, 0); WARN_ON(!mmdc_base); reg = mmdc_base + MMDC_MAPSR; /* Enable automatic power saving */ val = readl_relaxed(reg); val &= ~(1 << BP_MMDC_MAPSR_PSD); writel_relaxed(val, reg); /* Ensure it's successfully enabled */ while (!(readl_relaxed(reg) & 1 << BP_MMDC_MAPSR_PSS) && --timeout) cpu_relax(); if (unlikely(!timeout)) { pr_warn("%s: failed to enable automatic power saving\n", __func__); return -EBUSY; } return 0; } static struct of_device_id imx_mmdc_dt_ids[] = { { .compatible = "fsl,imx6q-mmdc", }, { /* sentinel */ } }; static struct platform_driver imx_mmdc_driver = { .driver = { .name = "imx-mmdc", .owner = THIS_MODULE, .of_match_table = imx_mmdc_dt_ids, }, .probe = imx_mmdc_probe, }; static int __init imx_mmdc_init(void) { return platform_driver_register(&imx_mmdc_driver); } postcore_initcall(imx_mmdc_init);
gpl-2.0
ulrikdb/linux
arch/mips/lib/dump_tlb.c
3421
2721
/* * Dump R4x00 TLB for debugging purposes. * * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle. * Copyright (C) 1999 by Silicon Graphics, Inc. */ #include <linux/kernel.h> #include <linux/mm.h> #include <asm/mipsregs.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/tlbdebug.h> static inline const char *msk2str(unsigned int mask) { switch (mask) { case PM_4K: return "4kb"; case PM_16K: return "16kb"; case PM_64K: return "64kb"; case PM_256K: return "256kb"; #ifdef CONFIG_CPU_CAVIUM_OCTEON case PM_8K: return "8kb"; case PM_32K: return "32kb"; case PM_128K: return "128kb"; case PM_512K: return "512kb"; case PM_2M: return "2Mb"; case PM_8M: return "8Mb"; case PM_32M: return "32Mb"; #endif #ifndef CONFIG_CPU_VR41XX case PM_1M: return "1Mb"; case PM_4M: return "4Mb"; case PM_16M: return "16Mb"; case PM_64M: return "64Mb"; case PM_256M: return "256Mb"; case PM_1G: return "1Gb"; #endif } return ""; } #define BARRIER() \ __asm__ __volatile__( \ ".set\tnoreorder\n\t" \ "nop;nop;nop;nop;nop;nop;nop\n\t" \ ".set\treorder"); static void dump_tlb(int first, int last) { unsigned long s_entryhi, entryhi, asid; unsigned long long entrylo0, entrylo1; unsigned int s_index, s_pagemask, pagemask, c0, c1, i; s_pagemask = read_c0_pagemask(); s_entryhi = read_c0_entryhi(); s_index = read_c0_index(); asid = s_entryhi & 0xff; for (i = first; i <= last; i++) { write_c0_index(i); BARRIER(); tlb_read(); BARRIER(); pagemask = read_c0_pagemask(); entryhi = read_c0_entryhi(); entrylo0 = read_c0_entrylo0(); entrylo1 = read_c0_entrylo1(); /* Unused entries have a virtual address of CKSEG0. */ if ((entryhi & ~0x1ffffUL) != CKSEG0 && (entryhi & 0xff) == asid) { #ifdef CONFIG_32BIT int width = 8; #else int width = 11; #endif /* * Only print entries in use */ printk("Index: %2d pgmask=%s ", i, msk2str(pagemask)); c0 = (entrylo0 >> 3) & 7; c1 = (entrylo1 >> 3) & 7; printk("va=%0*lx asid=%02lx\n", width, (entryhi & ~0x1fffUL), entryhi & 0xff); printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ", width, (entrylo0 << 6) & PAGE_MASK, c0, (entrylo0 & 4) ? 1 : 0, (entrylo0 & 2) ? 1 : 0, (entrylo0 & 1) ? 1 : 0); printk("[pa=%0*llx c=%d d=%d v=%d g=%d]\n", width, (entrylo1 << 6) & PAGE_MASK, c1, (entrylo1 & 4) ? 1 : 0, (entrylo1 & 2) ? 1 : 0, (entrylo1 & 1) ? 1 : 0); } } printk("\n"); write_c0_entryhi(s_entryhi); write_c0_index(s_index); write_c0_pagemask(s_pagemask); } void dump_tlb_all(void) { dump_tlb(0, current_cpu_data.tlbsize - 1); }
gpl-2.0
caoxin1988/linux-3.0.86
drivers/gpu/drm/i915/intel_bios.c
3677
20378
/* * Copyright © 2006 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Eric Anholt <eric@anholt.net> * */ #include <linux/dmi.h> #include <drm/drm_dp_helper.h> #include "drmP.h" #include "drm.h" #include "i915_drm.h" #include "i915_drv.h" #include "intel_bios.h" #define SLAVE_ADDR1 0x70 #define SLAVE_ADDR2 0x72 static int panel_type; static void * find_section(struct bdb_header *bdb, int section_id) { u8 *base = (u8 *)bdb; int index = 0; u16 total, current_size; u8 current_id; /* skip to first section */ index += bdb->header_size; total = bdb->bdb_size; /* walk the sections looking for section_id */ while (index < total) { current_id = *(base + index); index++; current_size = *((u16 *)(base + index)); index += 2; if (current_id == section_id) return base + index; index += current_size; } return NULL; } static u16 get_blocksize(void *p) { u16 *block_ptr, block_size; block_ptr = (u16 *)((char *)p - 2); block_size = *block_ptr; return block_size; } static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, const struct lvds_dvo_timing *dvo_timing) { panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | dvo_timing->hactive_lo; panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay + ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo); panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start + dvo_timing->hsync_pulse_width; panel_fixed_mode->htotal = panel_fixed_mode->hdisplay + ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo); panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) | dvo_timing->vactive_lo; panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay + dvo_timing->vsync_off; panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start + dvo_timing->vsync_pulse_width; panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay + ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo); panel_fixed_mode->clock = dvo_timing->clock * 10; panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; if (dvo_timing->hsync_positive) panel_fixed_mode->flags |= DRM_MODE_FLAG_PHSYNC; else panel_fixed_mode->flags |= DRM_MODE_FLAG_NHSYNC; if (dvo_timing->vsync_positive) panel_fixed_mode->flags |= DRM_MODE_FLAG_PVSYNC; else panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; /* Some VBTs have bogus h/vtotal values */ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal) panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1; drm_mode_set_name(panel_fixed_mode); } static bool lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a, const struct lvds_dvo_timing *b) { if (a->hactive_hi != b->hactive_hi || a->hactive_lo != b->hactive_lo) return false; if (a->hsync_off_hi != b->hsync_off_hi || a->hsync_off_lo != b->hsync_off_lo) return false; if (a->hsync_pulse_width != b->hsync_pulse_width) return false; if (a->hblank_hi != b->hblank_hi || a->hblank_lo != b->hblank_lo) return false; if (a->vactive_hi != b->vactive_hi || a->vactive_lo != b->vactive_lo) return false; if (a->vsync_off != b->vsync_off) return false; if (a->vsync_pulse_width != b->vsync_pulse_width) return false; if (a->vblank_hi != b->vblank_hi || a->vblank_lo != b->vblank_lo) return false; return true; } static const struct lvds_dvo_timing * get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data, const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs, int index) { /* * the size of fp_timing varies on the different platform. * So calculate the DVO timing relative offset in LVDS data * entry to get the DVO timing entry */ int lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset - lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset; int dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset - lvds_lfp_data_ptrs->ptr[0].fp_timing_offset; char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index; return (struct lvds_dvo_timing *)(entry + dvo_timing_offset); } /* Try to find integrated panel data */ static void parse_lfp_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { const struct bdb_lvds_options *lvds_options; const struct bdb_lvds_lfp_data *lvds_lfp_data; const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; const struct lvds_dvo_timing *panel_dvo_timing; struct drm_display_mode *panel_fixed_mode; int i, downclock; lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); if (!lvds_options) return; dev_priv->lvds_dither = lvds_options->pixel_dither; if (lvds_options->panel_type == 0xff) return; panel_type = lvds_options->panel_type; lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); if (!lvds_lfp_data) return; lvds_lfp_data_ptrs = find_section(bdb, BDB_LVDS_LFP_DATA_PTRS); if (!lvds_lfp_data_ptrs) return; dev_priv->lvds_vbt = 1; panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, lvds_lfp_data_ptrs, lvds_options->panel_type); panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); if (!panel_fixed_mode) return; fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing); dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n"); drm_mode_debug_printmodeline(panel_fixed_mode); /* * Iterate over the LVDS panel timing info to find the lowest clock * for the native resolution. */ downclock = panel_dvo_timing->clock; for (i = 0; i < 16; i++) { const struct lvds_dvo_timing *dvo_timing; dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, lvds_lfp_data_ptrs, i); if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) && dvo_timing->clock < downclock) downclock = dvo_timing->clock; } if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) { dev_priv->lvds_downclock_avail = 1; dev_priv->lvds_downclock = downclock * 10; DRM_DEBUG_KMS("LVDS downclock is found in VBT. " "Normal Clock %dKHz, downclock %dKHz\n", panel_fixed_mode->clock, 10*downclock); } } /* Try to find sdvo panel data */ static void parse_sdvo_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct lvds_dvo_timing *dvo_timing; struct drm_display_mode *panel_fixed_mode; int index; index = i915_vbt_sdvo_panel_type; if (index == -1) { struct bdb_sdvo_lvds_options *sdvo_lvds_options; sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS); if (!sdvo_lvds_options) return; index = sdvo_lvds_options->panel_type; } dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS); if (!dvo_timing) return; panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); if (!panel_fixed_mode) return; fill_detail_timing_data(panel_fixed_mode, dvo_timing + index); dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode; DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n"); drm_mode_debug_printmodeline(panel_fixed_mode); } static int intel_bios_ssc_frequency(struct drm_device *dev, bool alternate) { switch (INTEL_INFO(dev)->gen) { case 2: return alternate ? 66 : 48; case 3: case 4: return alternate ? 100 : 96; default: return alternate ? 100 : 120; } } static void parse_general_features(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct drm_device *dev = dev_priv->dev; struct bdb_general_features *general; general = find_section(bdb, BDB_GENERAL_FEATURES); if (general) { dev_priv->int_tv_support = general->int_tv_support; dev_priv->int_crt_support = general->int_crt_support; dev_priv->lvds_use_ssc = general->enable_ssc; dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, general->ssc_freq); dev_priv->display_clock_mode = general->display_clock_mode; DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n", dev_priv->int_tv_support, dev_priv->int_crt_support, dev_priv->lvds_use_ssc, dev_priv->lvds_ssc_freq, dev_priv->display_clock_mode); } } static void parse_general_definitions(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct bdb_general_definitions *general; general = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (general) { u16 block_size = get_blocksize(general); if (block_size >= sizeof(*general)) { int bus_pin = general->crt_ddc_gmbus_pin; DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); if (bus_pin >= 1 && bus_pin <= 6) dev_priv->crt_ddc_pin = bus_pin; } else { DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", block_size); } } } static void parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct sdvo_device_mapping *p_mapping; struct bdb_general_definitions *p_defs; struct child_device_config *p_child; int i, child_device_num, count; u16 block_size; p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (!p_defs) { DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n"); return; } /* judge whether the size of child device meets the requirements. * If the child device size obtained from general definition block * is different with sizeof(struct child_device_config), skip the * parsing of sdvo device info */ if (p_defs->child_dev_size != sizeof(*p_child)) { /* different child dev size . Ignore it */ DRM_DEBUG_KMS("different child size is found. Invalid.\n"); return; } /* get the block size of general definitions */ block_size = get_blocksize(p_defs); /* get the number of child device */ child_device_num = (block_size - sizeof(*p_defs)) / sizeof(*p_child); count = 0; for (i = 0; i < child_device_num; i++) { p_child = &(p_defs->devices[i]); if (!p_child->device_type) { /* skip the device block if device type is invalid */ continue; } if (p_child->slave_addr != SLAVE_ADDR1 && p_child->slave_addr != SLAVE_ADDR2) { /* * If the slave address is neither 0x70 nor 0x72, * it is not a SDVO device. Skip it. */ continue; } if (p_child->dvo_port != DEVICE_PORT_DVOB && p_child->dvo_port != DEVICE_PORT_DVOC) { /* skip the incorrect SDVO port */ DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n"); continue; } DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on" " %s port\n", p_child->slave_addr, (p_child->dvo_port == DEVICE_PORT_DVOB) ? "SDVOB" : "SDVOC"); p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]); if (!p_mapping->initialized) { p_mapping->dvo_port = p_child->dvo_port; p_mapping->slave_addr = p_child->slave_addr; p_mapping->dvo_wiring = p_child->dvo_wiring; p_mapping->ddc_pin = p_child->ddc_pin; p_mapping->i2c_pin = p_child->i2c_pin; p_mapping->initialized = 1; DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", p_mapping->dvo_port, p_mapping->slave_addr, p_mapping->dvo_wiring, p_mapping->ddc_pin, p_mapping->i2c_pin); } else { DRM_DEBUG_KMS("Maybe one SDVO port is shared by " "two SDVO device.\n"); } if (p_child->slave2_addr) { /* Maybe this is a SDVO device with multiple inputs */ /* And the mapping info is not added */ DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this" " is a SDVO device with multiple inputs.\n"); } count++; } if (!count) { /* No SDVO device info is found */ DRM_DEBUG_KMS("No SDVO device info is found in VBT\n"); } return; } static void parse_driver_features(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct drm_device *dev = dev_priv->dev; struct bdb_driver_features *driver; driver = find_section(bdb, BDB_DRIVER_FEATURES); if (!driver) return; if (SUPPORTS_EDP(dev) && driver->lvds_config == BDB_DRIVER_FEATURE_EDP) dev_priv->edp.support = 1; if (driver->dual_frequency) dev_priv->render_reclock_avail = true; } static void parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct bdb_edp *edp; struct edp_power_seq *edp_pps; struct edp_link_params *edp_link_params; edp = find_section(bdb, BDB_EDP); if (!edp) { if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) { DRM_DEBUG_KMS("No eDP BDB found but eDP panel " "supported, assume %dbpp panel color " "depth.\n", dev_priv->edp.bpp); } return; } switch ((edp->color_depth >> (panel_type * 2)) & 3) { case EDP_18BPP: dev_priv->edp.bpp = 18; break; case EDP_24BPP: dev_priv->edp.bpp = 24; break; case EDP_30BPP: dev_priv->edp.bpp = 30; break; } /* Get the eDP sequencing and link info */ edp_pps = &edp->power_seqs[panel_type]; edp_link_params = &edp->link_params[panel_type]; dev_priv->edp.pps = *edp_pps; dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 : DP_LINK_BW_1_62; switch (edp_link_params->lanes) { case 0: dev_priv->edp.lanes = 1; break; case 1: dev_priv->edp.lanes = 2; break; case 3: default: dev_priv->edp.lanes = 4; break; } switch (edp_link_params->preemphasis) { case 0: dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0; break; case 1: dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; break; case 2: dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6; break; case 3: dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; break; } switch (edp_link_params->vswing) { case 0: dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400; break; case 1: dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600; break; case 2: dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800; break; case 3: dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200; break; } } static void parse_device_mapping(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct bdb_general_definitions *p_defs; struct child_device_config *p_child, *child_dev_ptr; int i, child_device_num, count; u16 block_size; p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (!p_defs) { DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); return; } /* judge whether the size of child device meets the requirements. * If the child device size obtained from general definition block * is different with sizeof(struct child_device_config), skip the * parsing of sdvo device info */ if (p_defs->child_dev_size != sizeof(*p_child)) { /* different child dev size . Ignore it */ DRM_DEBUG_KMS("different child size is found. Invalid.\n"); return; } /* get the block size of general definitions */ block_size = get_blocksize(p_defs); /* get the number of child device */ child_device_num = (block_size - sizeof(*p_defs)) / sizeof(*p_child); count = 0; /* get the number of child device that is present */ for (i = 0; i < child_device_num; i++) { p_child = &(p_defs->devices[i]); if (!p_child->device_type) { /* skip the device block if device type is invalid */ continue; } count++; } if (!count) { DRM_DEBUG_KMS("no child dev is parsed from VBT\n"); return; } dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL); if (!dev_priv->child_dev) { DRM_DEBUG_KMS("No memory space for child device\n"); return; } dev_priv->child_dev_num = count; count = 0; for (i = 0; i < child_device_num; i++) { p_child = &(p_defs->devices[i]); if (!p_child->device_type) { /* skip the device block if device type is invalid */ continue; } child_dev_ptr = dev_priv->child_dev + count; count++; memcpy((void *)child_dev_ptr, (void *)p_child, sizeof(*p_child)); } return; } static void init_vbt_defaults(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC; /* LFP panel data */ dev_priv->lvds_dither = 1; dev_priv->lvds_vbt = 0; /* SDVO panel data */ dev_priv->sdvo_lvds_vbt_mode = NULL; /* general features */ dev_priv->int_tv_support = 1; dev_priv->int_crt_support = 1; /* Default to using SSC */ dev_priv->lvds_use_ssc = 1; dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); /* eDP data */ dev_priv->edp.bpp = 18; } static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) { DRM_DEBUG_KMS("Falling back to manually reading VBT from " "VBIOS ROM for %s\n", id->ident); return 1; } static const struct dmi_system_id intel_no_opregion_vbt[] = { { .callback = intel_no_opregion_vbt_callback, .ident = "ThinkCentre A57", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"), }, }, { } }; /** * intel_parse_bios - find VBT and initialize settings from the BIOS * @dev: DRM device * * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers * to appropriate values. * * Returns 0 on success, nonzero on failure. */ bool intel_parse_bios(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct pci_dev *pdev = dev->pdev; struct bdb_header *bdb = NULL; u8 __iomem *bios = NULL; init_vbt_defaults(dev_priv); /* XXX Should this validation be moved to intel_opregion.c? */ if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) { struct vbt_header *vbt = dev_priv->opregion.vbt; if (memcmp(vbt->signature, "$VBT", 4) == 0) { DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n", vbt->signature); bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset); } else dev_priv->opregion.vbt = NULL; } if (bdb == NULL) { struct vbt_header *vbt = NULL; size_t size; int i; bios = pci_map_rom(pdev, &size); if (!bios) return -1; /* Scour memory looking for the VBT signature */ for (i = 0; i + 4 < size; i++) { if (!memcmp(bios + i, "$VBT", 4)) { vbt = (struct vbt_header *)(bios + i); break; } } if (!vbt) { DRM_DEBUG_DRIVER("VBT signature missing\n"); pci_unmap_rom(pdev, bios); return -1; } bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset); } /* Grab useful general definitions */ parse_general_features(dev_priv, bdb); parse_general_definitions(dev_priv, bdb); parse_lfp_panel_data(dev_priv, bdb); parse_sdvo_panel_data(dev_priv, bdb); parse_sdvo_device_mapping(dev_priv, bdb); parse_device_mapping(dev_priv, bdb); parse_driver_features(dev_priv, bdb); parse_edp(dev_priv, bdb); if (bios) pci_unmap_rom(pdev, bios); return 0; } /* Ensure that vital registers have been initialised, even if the BIOS * is absent or just failing to do its job. */ void intel_setup_bios(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; /* Set the Panel Power On/Off timings if uninitialized. */ if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) { /* Set T2 to 40ms and T5 to 200ms */ I915_WRITE(PP_ON_DELAYS, 0x019007d0); /* Set T3 to 35ms and Tx to 200ms */ I915_WRITE(PP_OFF_DELAYS, 0x015e07d0); } }
gpl-2.0
mcrosson/samsung_kernel_comanche
drivers/pci/hotplug/ibmphp_core.c
4189
35110
/* * IBM Hot Plug Controller Driver * * Written By: Chuck Cole, Jyoti Shah, Tong Yu, Irene Zubarev, IBM Corporation * * Copyright (C) 2001,2003 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001-2003 IBM Corp. * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <gregkh@us.ibm.com> * */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/wait.h> #include "../pci.h" #include <asm/pci_x86.h> /* for struct irq_routing_table */ #include "ibmphp.h" #define attn_on(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_ATTNON) #define attn_off(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_ATTNOFF) #define attn_LED_blink(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_BLINKLED) #define get_ctrl_revision(sl, rev) ibmphp_hpc_readslot (sl, READ_REVLEVEL, rev) #define get_hpc_options(sl, opt) ibmphp_hpc_readslot (sl, READ_HPCOPTIONS, opt) #define DRIVER_VERSION "0.6" #define DRIVER_DESC "IBM Hot Plug PCI Controller Driver" int ibmphp_debug; static int debug; module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC (debug, "Debugging mode enabled or not"); MODULE_LICENSE ("GPL"); MODULE_DESCRIPTION (DRIVER_DESC); struct pci_bus *ibmphp_pci_bus; static int max_slots; static int irqs[16]; /* PIC mode IRQ's we're using so far (in case MPS * tables don't provide default info for empty slots */ static int init_flag; /* static int get_max_adapter_speed_1 (struct hotplug_slot *, u8 *, u8); static inline int get_max_adapter_speed (struct hotplug_slot *hs, u8 *value) { return get_max_adapter_speed_1 (hs, value, 1); } */ static inline int get_cur_bus_info(struct slot **sl) { int rc = 1; struct slot * slot_cur = *sl; debug("options = %x\n", slot_cur->ctrl->options); debug("revision = %x\n", slot_cur->ctrl->revision); if (READ_BUS_STATUS(slot_cur->ctrl)) rc = ibmphp_hpc_readslot(slot_cur, READ_BUSSTATUS, NULL); if (rc) return rc; slot_cur->bus_on->current_speed = CURRENT_BUS_SPEED(slot_cur->busstatus); if (READ_BUS_MODE(slot_cur->ctrl)) slot_cur->bus_on->current_bus_mode = CURRENT_BUS_MODE(slot_cur->busstatus); else slot_cur->bus_on->current_bus_mode = 0xFF; debug("busstatus = %x, bus_speed = %x, bus_mode = %x\n", slot_cur->busstatus, slot_cur->bus_on->current_speed, slot_cur->bus_on->current_bus_mode); *sl = slot_cur; return 0; } static inline int slot_update(struct slot **sl) { int rc; rc = ibmphp_hpc_readslot(*sl, READ_ALLSTAT, NULL); if (rc) return rc; if (!init_flag) rc = get_cur_bus_info(sl); return rc; } static int __init get_max_slots (void) { struct slot * slot_cur; struct list_head * tmp; u8 slot_count = 0; list_for_each(tmp, &ibmphp_slot_head) { slot_cur = list_entry(tmp, struct slot, ibm_slot_list); /* sometimes the hot-pluggable slots start with 4 (not always from 1) */ slot_count = max(slot_count, slot_cur->number); } return slot_count; } /* This routine will put the correct slot->device information per slot. It's * called from initialization of the slot structures. It will also assign * interrupt numbers per each slot. * Parameters: struct slot * Returns 0 or errors */ int ibmphp_init_devno(struct slot **cur_slot) { struct irq_routing_table *rtable; int len; int loop; int i; rtable = pcibios_get_irq_routing_table(); if (!rtable) { err("no BIOS routing table...\n"); return -ENOMEM; } len = (rtable->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); if (!len) { kfree(rtable); return -1; } for (loop = 0; loop < len; loop++) { if ((*cur_slot)->number == rtable->slots[loop].slot && (*cur_slot)->bus == rtable->slots[loop].bus) { struct io_apic_irq_attr irq_attr; (*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn); for (i = 0; i < 4; i++) (*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus, (int) (*cur_slot)->device, i, &irq_attr); debug("(*cur_slot)->irq[0] = %x\n", (*cur_slot)->irq[0]); debug("(*cur_slot)->irq[1] = %x\n", (*cur_slot)->irq[1]); debug("(*cur_slot)->irq[2] = %x\n", (*cur_slot)->irq[2]); debug("(*cur_slot)->irq[3] = %x\n", (*cur_slot)->irq[3]); debug("rtable->exlusive_irqs = %x\n", rtable->exclusive_irqs); debug("rtable->slots[loop].irq[0].bitmap = %x\n", rtable->slots[loop].irq[0].bitmap); debug("rtable->slots[loop].irq[1].bitmap = %x\n", rtable->slots[loop].irq[1].bitmap); debug("rtable->slots[loop].irq[2].bitmap = %x\n", rtable->slots[loop].irq[2].bitmap); debug("rtable->slots[loop].irq[3].bitmap = %x\n", rtable->slots[loop].irq[3].bitmap); debug("rtable->slots[loop].irq[0].link = %x\n", rtable->slots[loop].irq[0].link); debug("rtable->slots[loop].irq[1].link = %x\n", rtable->slots[loop].irq[1].link); debug("rtable->slots[loop].irq[2].link = %x\n", rtable->slots[loop].irq[2].link); debug("rtable->slots[loop].irq[3].link = %x\n", rtable->slots[loop].irq[3].link); debug("end of init_devno\n"); kfree(rtable); return 0; } } kfree(rtable); return -1; } static inline int power_on(struct slot *slot_cur) { u8 cmd = HPC_SLOT_ON; int retval; retval = ibmphp_hpc_writeslot(slot_cur, cmd); if (retval) { err("power on failed\n"); return retval; } if (CTLR_RESULT(slot_cur->ctrl->status)) { err("command not completed successfully in power_on\n"); return -EIO; } msleep(3000); /* For ServeRAID cards, and some 66 PCI */ return 0; } static inline int power_off(struct slot *slot_cur) { u8 cmd = HPC_SLOT_OFF; int retval; retval = ibmphp_hpc_writeslot(slot_cur, cmd); if (retval) { err("power off failed\n"); return retval; } if (CTLR_RESULT(slot_cur->ctrl->status)) { err("command not completed successfully in power_off\n"); retval = -EIO; } return retval; } static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value) { int rc = 0; struct slot *pslot; u8 cmd = 0x00; /* avoid compiler warning */ debug("set_attention_status - Entry hotplug_slot[%lx] value[%x]\n", (ulong) hotplug_slot, value); ibmphp_lock_operations(); if (hotplug_slot) { switch (value) { case HPC_SLOT_ATTN_OFF: cmd = HPC_SLOT_ATTNOFF; break; case HPC_SLOT_ATTN_ON: cmd = HPC_SLOT_ATTNON; break; case HPC_SLOT_ATTN_BLINK: cmd = HPC_SLOT_BLINKLED; break; default: rc = -ENODEV; err("set_attention_status - Error : invalid input [%x]\n", value); break; } if (rc == 0) { pslot = hotplug_slot->private; if (pslot) rc = ibmphp_hpc_writeslot(pslot, cmd); else rc = -ENODEV; } } else rc = -ENODEV; ibmphp_unlock_operations(); debug("set_attention_status - Exit rc[%d]\n", rc); return rc; } static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 * value) { int rc = -ENODEV; struct slot *pslot; struct slot myslot; debug("get_attention_status - Entry hotplug_slot[%lx] pvalue[%lx]\n", (ulong) hotplug_slot, (ulong) value); ibmphp_lock_operations(); if (hotplug_slot) { pslot = hotplug_slot->private; if (pslot) { memcpy(&myslot, pslot, sizeof(struct slot)); rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS, &(myslot.status)); if (!rc) rc = ibmphp_hpc_readslot(pslot, READ_EXTSLOTSTATUS, &(myslot.ext_status)); if (!rc) *value = SLOT_ATTN(myslot.status, myslot.ext_status); } } ibmphp_unlock_operations(); debug("get_attention_status - Exit rc[%d] value[%x]\n", rc, *value); return rc; } static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 * value) { int rc = -ENODEV; struct slot *pslot; struct slot myslot; debug("get_latch_status - Entry hotplug_slot[%lx] pvalue[%lx]\n", (ulong) hotplug_slot, (ulong) value); ibmphp_lock_operations(); if (hotplug_slot) { pslot = hotplug_slot->private; if (pslot) { memcpy(&myslot, pslot, sizeof(struct slot)); rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS, &(myslot.status)); if (!rc) *value = SLOT_LATCH(myslot.status); } } ibmphp_unlock_operations(); debug("get_latch_status - Exit rc[%d] rc[%x] value[%x]\n", rc, rc, *value); return rc; } static int get_power_status(struct hotplug_slot *hotplug_slot, u8 * value) { int rc = -ENODEV; struct slot *pslot; struct slot myslot; debug("get_power_status - Entry hotplug_slot[%lx] pvalue[%lx]\n", (ulong) hotplug_slot, (ulong) value); ibmphp_lock_operations(); if (hotplug_slot) { pslot = hotplug_slot->private; if (pslot) { memcpy(&myslot, pslot, sizeof(struct slot)); rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS, &(myslot.status)); if (!rc) *value = SLOT_PWRGD(myslot.status); } } ibmphp_unlock_operations(); debug("get_power_status - Exit rc[%d] rc[%x] value[%x]\n", rc, rc, *value); return rc; } static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 * value) { int rc = -ENODEV; struct slot *pslot; u8 present; struct slot myslot; debug("get_adapter_status - Entry hotplug_slot[%lx] pvalue[%lx]\n", (ulong) hotplug_slot, (ulong) value); ibmphp_lock_operations(); if (hotplug_slot) { pslot = hotplug_slot->private; if (pslot) { memcpy(&myslot, pslot, sizeof(struct slot)); rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS, &(myslot.status)); if (!rc) { present = SLOT_PRESENT(myslot.status); if (present == HPC_SLOT_EMPTY) *value = 0; else *value = 1; } } } ibmphp_unlock_operations(); debug("get_adapter_present - Exit rc[%d] value[%x]\n", rc, *value); return rc; } static int get_max_bus_speed(struct slot *slot) { int rc; u8 mode = 0; enum pci_bus_speed speed; struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus; debug("%s - Entry slot[%p]\n", __func__, slot); ibmphp_lock_operations(); mode = slot->supported_bus_mode; speed = slot->supported_speed; ibmphp_unlock_operations(); switch (speed) { case BUS_SPEED_33: break; case BUS_SPEED_66: if (mode == BUS_MODE_PCIX) speed += 0x01; break; case BUS_SPEED_100: case BUS_SPEED_133: speed += 0x01; break; default: /* Note (will need to change): there would be soon 256, 512 also */ rc = -ENODEV; } if (!rc) bus->max_bus_speed = speed; debug("%s - Exit rc[%d] speed[%x]\n", __func__, rc, speed); return rc; } /* static int get_max_adapter_speed_1(struct hotplug_slot *hotplug_slot, u8 * value, u8 flag) { int rc = -ENODEV; struct slot *pslot; struct slot myslot; debug("get_max_adapter_speed_1 - Entry hotplug_slot[%lx] pvalue[%lx]\n", (ulong)hotplug_slot, (ulong) value); if (flag) ibmphp_lock_operations(); if (hotplug_slot && value) { pslot = hotplug_slot->private; if (pslot) { memcpy(&myslot, pslot, sizeof(struct slot)); rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS, &(myslot.status)); if (!(SLOT_LATCH (myslot.status)) && (SLOT_PRESENT (myslot.status))) { rc = ibmphp_hpc_readslot(pslot, READ_EXTSLOTSTATUS, &(myslot.ext_status)); if (!rc) *value = SLOT_SPEED(myslot.ext_status); } else *value = MAX_ADAPTER_NONE; } } if (flag) ibmphp_unlock_operations(); debug("get_max_adapter_speed_1 - Exit rc[%d] value[%x]\n", rc, *value); return rc; } static int get_bus_name(struct hotplug_slot *hotplug_slot, char * value) { int rc = -ENODEV; struct slot *pslot = NULL; debug("get_bus_name - Entry hotplug_slot[%lx]\n", (ulong)hotplug_slot); ibmphp_lock_operations(); if (hotplug_slot) { pslot = hotplug_slot->private; if (pslot) { rc = 0; snprintf(value, 100, "Bus %x", pslot->bus); } } else rc = -ENODEV; ibmphp_unlock_operations(); debug("get_bus_name - Exit rc[%d] value[%x]\n", rc, *value); return rc; } */ /**************************************************************************** * This routine will initialize the ops data structure used in the validate * function. It will also power off empty slots that are powered on since BIOS * leaves those on, albeit disconnected ****************************************************************************/ static int __init init_ops(void) { struct slot *slot_cur; struct list_head *tmp; int retval; int rc; list_for_each(tmp, &ibmphp_slot_head) { slot_cur = list_entry(tmp, struct slot, ibm_slot_list); if (!slot_cur) return -ENODEV; debug("BEFORE GETTING SLOT STATUS, slot # %x\n", slot_cur->number); if (slot_cur->ctrl->revision == 0xFF) if (get_ctrl_revision(slot_cur, &slot_cur->ctrl->revision)) return -1; if (slot_cur->bus_on->current_speed == 0xFF) if (get_cur_bus_info(&slot_cur)) return -1; get_max_bus_speed(slot_cur); if (slot_cur->ctrl->options == 0xFF) if (get_hpc_options(slot_cur, &slot_cur->ctrl->options)) return -1; retval = slot_update(&slot_cur); if (retval) return retval; debug("status = %x\n", slot_cur->status); debug("ext_status = %x\n", slot_cur->ext_status); debug("SLOT_POWER = %x\n", SLOT_POWER(slot_cur->status)); debug("SLOT_PRESENT = %x\n", SLOT_PRESENT(slot_cur->status)); debug("SLOT_LATCH = %x\n", SLOT_LATCH(slot_cur->status)); if ((SLOT_PWRGD(slot_cur->status)) && !(SLOT_PRESENT(slot_cur->status)) && !(SLOT_LATCH(slot_cur->status))) { debug("BEFORE POWER OFF COMMAND\n"); rc = power_off(slot_cur); if (rc) return rc; /* retval = slot_update(&slot_cur); * if (retval) * return retval; * ibmphp_update_slot_info(slot_cur); */ } } init_flag = 0; return 0; } /* This operation will check whether the slot is within the bounds and * the operation is valid to perform on that slot * Parameters: slot, operation * Returns: 0 or error codes */ static int validate(struct slot *slot_cur, int opn) { int number; int retval; if (!slot_cur) return -ENODEV; number = slot_cur->number; if ((number > max_slots) || (number < 0)) return -EBADSLT; debug("slot_number in validate is %d\n", slot_cur->number); retval = slot_update(&slot_cur); if (retval) return retval; switch (opn) { case ENABLE: if (!(SLOT_PWRGD(slot_cur->status)) && (SLOT_PRESENT(slot_cur->status)) && !(SLOT_LATCH(slot_cur->status))) return 0; break; case DISABLE: if ((SLOT_PWRGD(slot_cur->status)) && (SLOT_PRESENT(slot_cur->status)) && !(SLOT_LATCH(slot_cur->status))) return 0; break; default: break; } err("validate failed....\n"); return -EINVAL; } /**************************************************************************** * This routine is for updating the data structures in the hotplug core * Parameters: struct slot * Returns: 0 or error ****************************************************************************/ int ibmphp_update_slot_info(struct slot *slot_cur) { struct hotplug_slot_info *info; struct pci_bus *bus = slot_cur->hotplug_slot->pci_slot->bus; int rc; u8 bus_speed; u8 mode; info = kmalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL); if (!info) { err("out of system memory\n"); return -ENOMEM; } info->power_status = SLOT_PWRGD(slot_cur->status); info->attention_status = SLOT_ATTN(slot_cur->status, slot_cur->ext_status); info->latch_status = SLOT_LATCH(slot_cur->status); if (!SLOT_PRESENT(slot_cur->status)) { info->adapter_status = 0; /* info->max_adapter_speed_status = MAX_ADAPTER_NONE; */ } else { info->adapter_status = 1; /* get_max_adapter_speed_1(slot_cur->hotplug_slot, &info->max_adapter_speed_status, 0); */ } bus_speed = slot_cur->bus_on->current_speed; mode = slot_cur->bus_on->current_bus_mode; switch (bus_speed) { case BUS_SPEED_33: break; case BUS_SPEED_66: if (mode == BUS_MODE_PCIX) bus_speed += 0x01; else if (mode == BUS_MODE_PCI) ; else bus_speed = PCI_SPEED_UNKNOWN; break; case BUS_SPEED_100: case BUS_SPEED_133: bus_speed += 0x01; break; default: bus_speed = PCI_SPEED_UNKNOWN; } bus->cur_bus_speed = bus_speed; // To do: bus_names rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info); kfree(info); return rc; } /****************************************************************************** * This function will return the pci_func, given bus and devfunc, or NULL. It * is called from visit routines ******************************************************************************/ static struct pci_func *ibm_slot_find(u8 busno, u8 device, u8 function) { struct pci_func *func_cur; struct slot *slot_cur; struct list_head * tmp; list_for_each(tmp, &ibmphp_slot_head) { slot_cur = list_entry(tmp, struct slot, ibm_slot_list); if (slot_cur->func) { func_cur = slot_cur->func; while (func_cur) { if ((func_cur->busno == busno) && (func_cur->device == device) && (func_cur->function == function)) return func_cur; func_cur = func_cur->next; } } } return NULL; } /************************************************************* * This routine frees up memory used by struct slot, including * the pointers to pci_func, bus, hotplug_slot, controller, * and deregistering from the hotplug core *************************************************************/ static void free_slots(void) { struct slot *slot_cur; struct list_head * tmp; struct list_head * next; debug("%s -- enter\n", __func__); list_for_each_safe(tmp, next, &ibmphp_slot_head) { slot_cur = list_entry(tmp, struct slot, ibm_slot_list); pci_hp_deregister(slot_cur->hotplug_slot); } debug("%s -- exit\n", __func__); } static void ibm_unconfigure_device(struct pci_func *func) { struct pci_dev *temp; u8 j; debug("inside %s\n", __func__); debug("func->device = %x, func->function = %x\n", func->device, func->function); debug("func->device << 3 | 0x0 = %x\n", func->device << 3 | 0x0); for (j = 0; j < 0x08; j++) { temp = pci_get_bus_and_slot(func->busno, (func->device << 3) | j); if (temp) { pci_remove_bus_device(temp); pci_dev_put(temp); } } pci_dev_put(func->dev); } /* * The following function is to fix kernel bug regarding * getting bus entries, here we manually add those primary * bus entries to kernel bus structure whenever apply */ static u8 bus_structure_fixup(u8 busno) { struct pci_bus *bus; struct pci_dev *dev; u16 l; if (pci_find_bus(0, busno) || !(ibmphp_find_same_bus_num(busno))) return 1; bus = kmalloc(sizeof(*bus), GFP_KERNEL); if (!bus) { err("%s - out of memory\n", __func__); return 1; } dev = kmalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { kfree(bus); err("%s - out of memory\n", __func__); return 1; } bus->number = busno; bus->ops = ibmphp_pci_bus->ops; dev->bus = bus; for (dev->devfn = 0; dev->devfn < 256; dev->devfn += 8) { if (!pci_read_config_word(dev, PCI_VENDOR_ID, &l) && (l != 0x0000) && (l != 0xffff)) { debug("%s - Inside bus_struture_fixup()\n", __func__); pci_scan_bus(busno, ibmphp_pci_bus->ops, NULL); break; } } kfree(dev); kfree(bus); return 0; } static int ibm_configure_device(struct pci_func *func) { unsigned char bus; struct pci_bus *child; int num; int flag = 0; /* this is to make sure we don't double scan the bus, for bridged devices primarily */ if (!(bus_structure_fixup(func->busno))) flag = 1; if (func->dev == NULL) func->dev = pci_get_bus_and_slot(func->busno, PCI_DEVFN(func->device, func->function)); if (func->dev == NULL) { struct pci_bus *bus = pci_find_bus(0, func->busno); if (!bus) return 0; num = pci_scan_slot(bus, PCI_DEVFN(func->device, func->function)); if (num) pci_bus_add_devices(bus); func->dev = pci_get_bus_and_slot(func->busno, PCI_DEVFN(func->device, func->function)); if (func->dev == NULL) { err("ERROR... : pci_dev still NULL\n"); return 0; } } if (!(flag) && (func->dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) { pci_read_config_byte(func->dev, PCI_SECONDARY_BUS, &bus); child = pci_add_new_bus(func->dev->bus, func->dev, bus); pci_do_scan_bus(child); } return 0; } /******************************************************* * Returns whether the bus is empty or not *******************************************************/ static int is_bus_empty(struct slot * slot_cur) { int rc; struct slot * tmp_slot; u8 i = slot_cur->bus_on->slot_min; while (i <= slot_cur->bus_on->slot_max) { if (i == slot_cur->number) { i++; continue; } tmp_slot = ibmphp_get_slot_from_physical_num(i); if (!tmp_slot) return 0; rc = slot_update(&tmp_slot); if (rc) return 0; if (SLOT_PRESENT(tmp_slot->status) && SLOT_PWRGD(tmp_slot->status)) return 0; i++; } return 1; } /*********************************************************** * If the HPC permits and the bus currently empty, tries to set the * bus speed and mode at the maximum card and bus capability * Parameters: slot * Returns: bus is set (0) or error code ***********************************************************/ static int set_bus(struct slot * slot_cur) { int rc; u8 speed; u8 cmd = 0x0; int retval; static struct pci_device_id ciobx[] = { { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, 0x0101) }, { }, }; debug("%s - entry slot # %d\n", __func__, slot_cur->number); if (SET_BUS_STATUS(slot_cur->ctrl) && is_bus_empty(slot_cur)) { rc = slot_update(&slot_cur); if (rc) return rc; speed = SLOT_SPEED(slot_cur->ext_status); debug("ext_status = %x, speed = %x\n", slot_cur->ext_status, speed); switch (speed) { case HPC_SLOT_SPEED_33: cmd = HPC_BUS_33CONVMODE; break; case HPC_SLOT_SPEED_66: if (SLOT_PCIX(slot_cur->ext_status)) { if ((slot_cur->supported_speed >= BUS_SPEED_66) && (slot_cur->supported_bus_mode == BUS_MODE_PCIX)) cmd = HPC_BUS_66PCIXMODE; else if (!SLOT_BUS_MODE(slot_cur->ext_status)) /* if max slot/bus capability is 66 pci and there's no bus mode mismatch, then the adapter supports 66 pci */ cmd = HPC_BUS_66CONVMODE; else cmd = HPC_BUS_33CONVMODE; } else { if (slot_cur->supported_speed >= BUS_SPEED_66) cmd = HPC_BUS_66CONVMODE; else cmd = HPC_BUS_33CONVMODE; } break; case HPC_SLOT_SPEED_133: switch (slot_cur->supported_speed) { case BUS_SPEED_33: cmd = HPC_BUS_33CONVMODE; break; case BUS_SPEED_66: if (slot_cur->supported_bus_mode == BUS_MODE_PCIX) cmd = HPC_BUS_66PCIXMODE; else cmd = HPC_BUS_66CONVMODE; break; case BUS_SPEED_100: cmd = HPC_BUS_100PCIXMODE; break; case BUS_SPEED_133: /* This is to take care of the bug in CIOBX chip */ if (pci_dev_present(ciobx)) ibmphp_hpc_writeslot(slot_cur, HPC_BUS_100PCIXMODE); cmd = HPC_BUS_133PCIXMODE; break; default: err("Wrong bus speed\n"); return -ENODEV; } break; default: err("wrong slot speed\n"); return -ENODEV; } debug("setting bus speed for slot %d, cmd %x\n", slot_cur->number, cmd); retval = ibmphp_hpc_writeslot(slot_cur, cmd); if (retval) { err("setting bus speed failed\n"); return retval; } if (CTLR_RESULT(slot_cur->ctrl->status)) { err("command not completed successfully in set_bus\n"); return -EIO; } } /* This is for x440, once Brandon fixes the firmware, will not need this delay */ msleep(1000); debug("%s -Exit\n", __func__); return 0; } /* This routine checks the bus limitations that the slot is on from the BIOS. * This is used in deciding whether or not to power up the slot. * (electrical/spec limitations. For example, >1 133 MHz or >2 66 PCI cards on * same bus) * Parameters: slot * Returns: 0 = no limitations, -EINVAL = exceeded limitations on the bus */ static int check_limitations(struct slot *slot_cur) { u8 i; struct slot * tmp_slot; u8 count = 0; u8 limitation = 0; for (i = slot_cur->bus_on->slot_min; i <= slot_cur->bus_on->slot_max; i++) { tmp_slot = ibmphp_get_slot_from_physical_num(i); if (!tmp_slot) return -ENODEV; if ((SLOT_PWRGD(tmp_slot->status)) && !(SLOT_CONNECT(tmp_slot->status))) count++; } get_cur_bus_info(&slot_cur); switch (slot_cur->bus_on->current_speed) { case BUS_SPEED_33: limitation = slot_cur->bus_on->slots_at_33_conv; break; case BUS_SPEED_66: if (slot_cur->bus_on->current_bus_mode == BUS_MODE_PCIX) limitation = slot_cur->bus_on->slots_at_66_pcix; else limitation = slot_cur->bus_on->slots_at_66_conv; break; case BUS_SPEED_100: limitation = slot_cur->bus_on->slots_at_100_pcix; break; case BUS_SPEED_133: limitation = slot_cur->bus_on->slots_at_133_pcix; break; } if ((count + 1) > limitation) return -EINVAL; return 0; } static inline void print_card_capability(struct slot *slot_cur) { info("capability of the card is "); if ((slot_cur->ext_status & CARD_INFO) == PCIX133) info(" 133 MHz PCI-X\n"); else if ((slot_cur->ext_status & CARD_INFO) == PCIX66) info(" 66 MHz PCI-X\n"); else if ((slot_cur->ext_status & CARD_INFO) == PCI66) info(" 66 MHz PCI\n"); else info(" 33 MHz PCI\n"); } /* This routine will power on the slot, configure the device(s) and find the * drivers for them. * Parameters: hotplug_slot * Returns: 0 or failure codes */ static int enable_slot(struct hotplug_slot *hs) { int rc, i, rcpr; struct slot *slot_cur; u8 function; struct pci_func *tmp_func; ibmphp_lock_operations(); debug("ENABLING SLOT........\n"); slot_cur = hs->private; if ((rc = validate(slot_cur, ENABLE))) { err("validate function failed\n"); goto error_nopower; } attn_LED_blink(slot_cur); rc = set_bus(slot_cur); if (rc) { err("was not able to set the bus\n"); goto error_nopower; } /*-----------------debugging------------------------------*/ get_cur_bus_info(&slot_cur); debug("the current bus speed right after set_bus = %x\n", slot_cur->bus_on->current_speed); /*----------------------------------------------------------*/ rc = check_limitations(slot_cur); if (rc) { err("Adding this card exceeds the limitations of this bus.\n"); err("(i.e., >1 133MHz cards running on same bus, or " ">2 66 PCI cards running on same bus.\n"); err("Try hot-adding into another bus\n"); rc = -EINVAL; goto error_nopower; } rc = power_on(slot_cur); if (rc) { err("something wrong when powering up... please see below for details\n"); /* need to turn off before on, otherwise, blinking overwrites */ attn_off(slot_cur); attn_on(slot_cur); if (slot_update(&slot_cur)) { attn_off(slot_cur); attn_on(slot_cur); rc = -ENODEV; goto exit; } /* Check to see the error of why it failed */ if ((SLOT_POWER(slot_cur->status)) && !(SLOT_PWRGD(slot_cur->status))) err("power fault occurred trying to power up\n"); else if (SLOT_BUS_SPEED(slot_cur->status)) { err("bus speed mismatch occurred. please check " "current bus speed and card capability\n"); print_card_capability(slot_cur); } else if (SLOT_BUS_MODE(slot_cur->ext_status)) { err("bus mode mismatch occurred. please check " "current bus mode and card capability\n"); print_card_capability(slot_cur); } ibmphp_update_slot_info(slot_cur); goto exit; } debug("after power_on\n"); /*-----------------------debugging---------------------------*/ get_cur_bus_info(&slot_cur); debug("the current bus speed right after power_on = %x\n", slot_cur->bus_on->current_speed); /*----------------------------------------------------------*/ rc = slot_update(&slot_cur); if (rc) goto error_power; rc = -EINVAL; if (SLOT_POWER(slot_cur->status) && !(SLOT_PWRGD(slot_cur->status))) { err("power fault occurred trying to power up...\n"); goto error_power; } if (SLOT_POWER(slot_cur->status) && (SLOT_BUS_SPEED(slot_cur->status))) { err("bus speed mismatch occurred. please check current bus " "speed and card capability\n"); print_card_capability(slot_cur); goto error_power; } /* Don't think this case will happen after above checks... * but just in case, for paranoia sake */ if (!(SLOT_POWER(slot_cur->status))) { err("power on failed...\n"); goto error_power; } slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL); if (!slot_cur->func) { /* We cannot do update_slot_info here, since no memory for * kmalloc n.e.ways, and update_slot_info allocates some */ err("out of system memory\n"); rc = -ENOMEM; goto error_power; } slot_cur->func->busno = slot_cur->bus; slot_cur->func->device = slot_cur->device; for (i = 0; i < 4; i++) slot_cur->func->irq[i] = slot_cur->irq[i]; debug("b4 configure_card, slot_cur->bus = %x, slot_cur->device = %x\n", slot_cur->bus, slot_cur->device); if (ibmphp_configure_card(slot_cur->func, slot_cur->number)) { err("configure_card was unsuccessful...\n"); /* true because don't need to actually deallocate resources, * just remove references */ ibmphp_unconfigure_card(&slot_cur, 1); debug("after unconfigure_card\n"); slot_cur->func = NULL; rc = -ENOMEM; goto error_power; } function = 0x00; do { tmp_func = ibm_slot_find(slot_cur->bus, slot_cur->func->device, function++); if (tmp_func && !(tmp_func->dev)) ibm_configure_device(tmp_func); } while (tmp_func); attn_off(slot_cur); if (slot_update(&slot_cur)) { rc = -EFAULT; goto exit; } ibmphp_print_test(); rc = ibmphp_update_slot_info(slot_cur); exit: ibmphp_unlock_operations(); return rc; error_nopower: attn_off(slot_cur); /* need to turn off if was blinking b4 */ attn_on(slot_cur); error_cont: rcpr = slot_update(&slot_cur); if (rcpr) { rc = rcpr; goto exit; } ibmphp_update_slot_info(slot_cur); goto exit; error_power: attn_off(slot_cur); /* need to turn off if was blinking b4 */ attn_on(slot_cur); rcpr = power_off(slot_cur); if (rcpr) { rc = rcpr; goto exit; } goto error_cont; } /************************************************************** * HOT REMOVING ADAPTER CARD * * INPUT: POINTER TO THE HOTPLUG SLOT STRUCTURE * * OUTPUT: SUCCESS 0 ; FAILURE: UNCONFIGURE , VALIDATE * DISABLE POWER , * **************************************************************/ static int ibmphp_disable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; int rc; ibmphp_lock_operations(); rc = ibmphp_do_disable_slot(slot); ibmphp_unlock_operations(); return rc; } int ibmphp_do_disable_slot(struct slot *slot_cur) { int rc; u8 flag; debug("DISABLING SLOT...\n"); if ((slot_cur == NULL) || (slot_cur->ctrl == NULL)) { return -ENODEV; } flag = slot_cur->flag; slot_cur->flag = 1; if (flag == 1) { rc = validate(slot_cur, DISABLE); /* checking if powered off already & valid slot # */ if (rc) goto error; } attn_LED_blink(slot_cur); if (slot_cur->func == NULL) { /* We need this for fncs's that were there on bootup */ slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL); if (!slot_cur->func) { err("out of system memory\n"); rc = -ENOMEM; goto error; } slot_cur->func->busno = slot_cur->bus; slot_cur->func->device = slot_cur->device; } ibm_unconfigure_device(slot_cur->func); /* If we got here from latch suddenly opening on operating card or a power fault, there's no power to the card, so cannot read from it to determine what resources it occupied. This operation is forbidden anyhow. The best we can do is remove it from kernel lists at least */ if (!flag) { attn_off(slot_cur); return 0; } rc = ibmphp_unconfigure_card(&slot_cur, 0); slot_cur->func = NULL; debug("in disable_slot. after unconfigure_card\n"); if (rc) { err("could not unconfigure card.\n"); goto error; } rc = ibmphp_hpc_writeslot(slot_cur, HPC_SLOT_OFF); if (rc) goto error; attn_off(slot_cur); rc = slot_update(&slot_cur); if (rc) goto exit; rc = ibmphp_update_slot_info(slot_cur); ibmphp_print_test(); exit: return rc; error: /* Need to turn off if was blinking b4 */ attn_off(slot_cur); attn_on(slot_cur); if (slot_update(&slot_cur)) { rc = -EFAULT; goto exit; } if (flag) ibmphp_update_slot_info(slot_cur); goto exit; } struct hotplug_slot_ops ibmphp_hotplug_slot_ops = { .set_attention_status = set_attention_status, .enable_slot = enable_slot, .disable_slot = ibmphp_disable_slot, .hardware_test = NULL, .get_power_status = get_power_status, .get_attention_status = get_attention_status, .get_latch_status = get_latch_status, .get_adapter_status = get_adapter_present, /* .get_max_adapter_speed = get_max_adapter_speed, .get_bus_name_status = get_bus_name, */ }; static void ibmphp_unload(void) { free_slots(); debug("after slots\n"); ibmphp_free_resources(); debug("after resources\n"); ibmphp_free_bus_info_queue(); debug("after bus info\n"); ibmphp_free_ebda_hpc_queue(); debug("after ebda hpc\n"); ibmphp_free_ebda_pci_rsrc_queue(); debug("after ebda pci rsrc\n"); kfree(ibmphp_pci_bus); } static int __init ibmphp_init(void) { struct pci_bus *bus; int i = 0; int rc = 0; init_flag = 1; info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); ibmphp_pci_bus = kmalloc(sizeof(*ibmphp_pci_bus), GFP_KERNEL); if (!ibmphp_pci_bus) { err("out of memory\n"); rc = -ENOMEM; goto exit; } bus = pci_find_bus(0, 0); if (!bus) { err("Can't find the root pci bus, can not continue\n"); rc = -ENODEV; goto error; } memcpy(ibmphp_pci_bus, bus, sizeof(*ibmphp_pci_bus)); ibmphp_debug = debug; ibmphp_hpc_initvars(); for (i = 0; i < 16; i++) irqs[i] = 0; if ((rc = ibmphp_access_ebda())) goto error; debug("after ibmphp_access_ebda()\n"); if ((rc = ibmphp_rsrc_init())) goto error; debug("AFTER Resource & EBDA INITIALIZATIONS\n"); max_slots = get_max_slots(); if ((rc = ibmphp_register_pci())) goto error; if (init_ops()) { rc = -ENODEV; goto error; } ibmphp_print_test(); if ((rc = ibmphp_hpc_start_poll_thread())) { goto error; } exit: return rc; error: ibmphp_unload(); goto exit; } static void __exit ibmphp_exit(void) { ibmphp_hpc_stop_poll_thread(); debug("after polling\n"); ibmphp_unload(); debug("done\n"); } module_init(ibmphp_init); module_exit(ibmphp_exit);
gpl-2.0
muftiarfan/Sony_xperia_m_ktt
security/selinux/nlmsgtab.c
4189
6554
/* * Netlink message type permission tables, for user generated messages. * * Author: James Morris <jmorris@redhat.com> * * Copyright (C) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/netlink.h> #include <linux/rtnetlink.h> #include <linux/if.h> #include <linux/netfilter_ipv4/ip_queue.h> #include <linux/inet_diag.h> #include <linux/xfrm.h> #include <linux/audit.h> #include "flask.h" #include "av_permissions.h" #include "security.h" struct nlmsg_perm { u16 nlmsg_type; u32 perm; }; static struct nlmsg_perm nlmsg_route_perms[] = { { RTM_NEWLINK, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_DELLINK, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_GETLINK, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_SETLINK, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_NEWADDR, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_DELADDR, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_GETADDR, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_NEWROUTE, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_DELROUTE, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_GETROUTE, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_NEWNEIGH, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_DELNEIGH, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_GETNEIGH, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_NEWRULE, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_DELRULE, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_GETRULE, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_NEWQDISC, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_DELQDISC, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_GETQDISC, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_NEWTCLASS, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_DELTCLASS, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_GETTCLASS, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_NEWTFILTER, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_DELTFILTER, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_GETTFILTER, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_NEWACTION, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_DELACTION, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_GETACTION, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_NEWPREFIX, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_GETMULTICAST, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_GETANYCAST, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_GETNEIGHTBL, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_SETNEIGHTBL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_NEWADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_DELADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_GETADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_GETDCB, NETLINK_ROUTE_SOCKET__NLMSG_READ }, { RTM_SETDCB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, }; static struct nlmsg_perm nlmsg_firewall_perms[] = { { IPQM_MODE, NETLINK_FIREWALL_SOCKET__NLMSG_WRITE }, { IPQM_VERDICT, NETLINK_FIREWALL_SOCKET__NLMSG_WRITE }, }; static struct nlmsg_perm nlmsg_tcpdiag_perms[] = { { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, }; static struct nlmsg_perm nlmsg_xfrm_perms[] = { { XFRM_MSG_NEWSA, NETLINK_XFRM_SOCKET__NLMSG_WRITE }, { XFRM_MSG_DELSA, NETLINK_XFRM_SOCKET__NLMSG_WRITE }, { XFRM_MSG_GETSA, NETLINK_XFRM_SOCKET__NLMSG_READ }, { XFRM_MSG_NEWPOLICY, NETLINK_XFRM_SOCKET__NLMSG_WRITE }, { XFRM_MSG_DELPOLICY, NETLINK_XFRM_SOCKET__NLMSG_WRITE }, { XFRM_MSG_GETPOLICY, NETLINK_XFRM_SOCKET__NLMSG_READ }, { XFRM_MSG_ALLOCSPI, NETLINK_XFRM_SOCKET__NLMSG_WRITE }, { XFRM_MSG_ACQUIRE, NETLINK_XFRM_SOCKET__NLMSG_WRITE }, { XFRM_MSG_EXPIRE, NETLINK_XFRM_SOCKET__NLMSG_WRITE }, { XFRM_MSG_UPDPOLICY, NETLINK_XFRM_SOCKET__NLMSG_WRITE }, { XFRM_MSG_UPDSA, NETLINK_XFRM_SOCKET__NLMSG_WRITE }, { XFRM_MSG_POLEXPIRE, NETLINK_XFRM_SOCKET__NLMSG_WRITE }, { XFRM_MSG_FLUSHSA, NETLINK_XFRM_SOCKET__NLMSG_WRITE }, { XFRM_MSG_FLUSHPOLICY, NETLINK_XFRM_SOCKET__NLMSG_WRITE }, { XFRM_MSG_NEWAE, NETLINK_XFRM_SOCKET__NLMSG_WRITE }, { XFRM_MSG_GETAE, NETLINK_XFRM_SOCKET__NLMSG_READ }, }; static struct nlmsg_perm nlmsg_audit_perms[] = { { AUDIT_GET, NETLINK_AUDIT_SOCKET__NLMSG_READ }, { AUDIT_SET, NETLINK_AUDIT_SOCKET__NLMSG_WRITE }, { AUDIT_LIST, NETLINK_AUDIT_SOCKET__NLMSG_READPRIV }, { AUDIT_ADD, NETLINK_AUDIT_SOCKET__NLMSG_WRITE }, { AUDIT_DEL, NETLINK_AUDIT_SOCKET__NLMSG_WRITE }, { AUDIT_LIST_RULES, NETLINK_AUDIT_SOCKET__NLMSG_READPRIV }, { AUDIT_ADD_RULE, NETLINK_AUDIT_SOCKET__NLMSG_WRITE }, { AUDIT_DEL_RULE, NETLINK_AUDIT_SOCKET__NLMSG_WRITE }, { AUDIT_USER, NETLINK_AUDIT_SOCKET__NLMSG_RELAY }, { AUDIT_SIGNAL_INFO, NETLINK_AUDIT_SOCKET__NLMSG_READ }, { AUDIT_TRIM, NETLINK_AUDIT_SOCKET__NLMSG_WRITE }, { AUDIT_MAKE_EQUIV, NETLINK_AUDIT_SOCKET__NLMSG_WRITE }, { AUDIT_TTY_GET, NETLINK_AUDIT_SOCKET__NLMSG_READ }, { AUDIT_TTY_SET, NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT }, }; static int nlmsg_perm(u16 nlmsg_type, u32 *perm, struct nlmsg_perm *tab, size_t tabsize) { int i, err = -EINVAL; for (i = 0; i < tabsize/sizeof(struct nlmsg_perm); i++) if (nlmsg_type == tab[i].nlmsg_type) { *perm = tab[i].perm; err = 0; break; } return err; } int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm) { int err = 0; switch (sclass) { case SECCLASS_NETLINK_ROUTE_SOCKET: err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms, sizeof(nlmsg_route_perms)); break; case SECCLASS_NETLINK_FIREWALL_SOCKET: case SECCLASS_NETLINK_IP6FW_SOCKET: err = nlmsg_perm(nlmsg_type, perm, nlmsg_firewall_perms, sizeof(nlmsg_firewall_perms)); break; case SECCLASS_NETLINK_TCPDIAG_SOCKET: err = nlmsg_perm(nlmsg_type, perm, nlmsg_tcpdiag_perms, sizeof(nlmsg_tcpdiag_perms)); break; case SECCLASS_NETLINK_XFRM_SOCKET: err = nlmsg_perm(nlmsg_type, perm, nlmsg_xfrm_perms, sizeof(nlmsg_xfrm_perms)); break; case SECCLASS_NETLINK_AUDIT_SOCKET: if ((nlmsg_type >= AUDIT_FIRST_USER_MSG && nlmsg_type <= AUDIT_LAST_USER_MSG) || (nlmsg_type >= AUDIT_FIRST_USER_MSG2 && nlmsg_type <= AUDIT_LAST_USER_MSG2)) { *perm = NETLINK_AUDIT_SOCKET__NLMSG_RELAY; } else { err = nlmsg_perm(nlmsg_type, perm, nlmsg_audit_perms, sizeof(nlmsg_audit_perms)); } break; /* No messaging from userspace, or class unknown/unhandled */ default: err = -ENOENT; break; } return err; }
gpl-2.0
jawad6233/Lenovo_A820_kernel_kk
kernel/arch/arm/mach-pxa/am300epd.c
4957
6586
/* * am300epd.c -- Platform device for AM300 EPD kit * * Copyright (C) 2008, Jaya Kumar * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * This work was made possible by help and equipment support from E-Ink * Corporation. http://support.eink.com/community * * This driver is written to be used with the Broadsheet display controller. * on the AM300 EPD prototype kit/development kit with an E-Ink 800x600 * Vizplex EPD on a Gumstix board using the Broadsheet interface board. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/gpio.h> #include <mach/gumstix.h> #include <mach/mfp-pxa25x.h> #include <mach/pxafb.h> #include "generic.h" #include <video/broadsheetfb.h> static unsigned int panel_type = 6; static struct platform_device *am300_device; static struct broadsheet_board am300_board; static unsigned long am300_pin_config[] __initdata = { GPIO16_GPIO, GPIO17_GPIO, GPIO32_GPIO, GPIO48_GPIO, GPIO49_GPIO, GPIO51_GPIO, GPIO74_GPIO, GPIO75_GPIO, GPIO76_GPIO, GPIO77_GPIO, /* this is the 16-bit hdb bus 58-73 */ GPIO58_GPIO, GPIO59_GPIO, GPIO60_GPIO, GPIO61_GPIO, GPIO62_GPIO, GPIO63_GPIO, GPIO64_GPIO, GPIO65_GPIO, GPIO66_GPIO, GPIO67_GPIO, GPIO68_GPIO, GPIO69_GPIO, GPIO70_GPIO, GPIO71_GPIO, GPIO72_GPIO, GPIO73_GPIO, }; /* register offsets for gpio control */ #define PWR_GPIO_PIN 16 #define CFG_GPIO_PIN 17 #define RDY_GPIO_PIN 32 #define DC_GPIO_PIN 48 #define RST_GPIO_PIN 49 #define LED_GPIO_PIN 51 #define RD_GPIO_PIN 74 #define WR_GPIO_PIN 75 #define CS_GPIO_PIN 76 #define IRQ_GPIO_PIN 77 /* hdb bus */ #define DB0_GPIO_PIN 58 #define DB15_GPIO_PIN 73 static int gpios[] = { PWR_GPIO_PIN, CFG_GPIO_PIN, RDY_GPIO_PIN, DC_GPIO_PIN, RST_GPIO_PIN, RD_GPIO_PIN, WR_GPIO_PIN, CS_GPIO_PIN, IRQ_GPIO_PIN, LED_GPIO_PIN }; static char *gpio_names[] = { "PWR", "CFG", "RDY", "DC", "RST", "RD", "WR", "CS", "IRQ", "LED" }; static int am300_wait_event(struct broadsheetfb_par *par) { /* todo: improve err recovery */ wait_event(par->waitq, gpio_get_value(RDY_GPIO_PIN)); return 0; } static int am300_init_gpio_regs(struct broadsheetfb_par *par) { int i; int err; char dbname[8]; for (i = 0; i < ARRAY_SIZE(gpios); i++) { err = gpio_request(gpios[i], gpio_names[i]); if (err) { dev_err(&am300_device->dev, "failed requesting " "gpio %s, err=%d\n", gpio_names[i], err); goto err_req_gpio; } } /* we also need to take care of the hdb bus */ for (i = DB0_GPIO_PIN; i <= DB15_GPIO_PIN; i++) { sprintf(dbname, "DB%d", i); err = gpio_request(i, dbname); if (err) { dev_err(&am300_device->dev, "failed requesting " "gpio %d, err=%d\n", i, err); goto err_req_gpio2; } } /* setup the outputs and init values */ gpio_direction_output(PWR_GPIO_PIN, 0); gpio_direction_output(CFG_GPIO_PIN, 1); gpio_direction_output(DC_GPIO_PIN, 0); gpio_direction_output(RD_GPIO_PIN, 1); gpio_direction_output(WR_GPIO_PIN, 1); gpio_direction_output(CS_GPIO_PIN, 1); gpio_direction_output(RST_GPIO_PIN, 0); /* setup the inputs */ gpio_direction_input(RDY_GPIO_PIN); gpio_direction_input(IRQ_GPIO_PIN); /* start the hdb bus as an input */ for (i = DB0_GPIO_PIN; i <= DB15_GPIO_PIN; i++) gpio_direction_output(i, 0); /* go into command mode */ gpio_set_value(CFG_GPIO_PIN, 1); gpio_set_value(RST_GPIO_PIN, 0); msleep(10); gpio_set_value(RST_GPIO_PIN, 1); msleep(10); am300_wait_event(par); return 0; err_req_gpio2: while (--i >= DB0_GPIO_PIN) gpio_free(i); i = ARRAY_SIZE(gpios); err_req_gpio: while (--i >= 0) gpio_free(gpios[i]); return err; } static int am300_init_board(struct broadsheetfb_par *par) { return am300_init_gpio_regs(par); } static void am300_cleanup(struct broadsheetfb_par *par) { int i; free_irq(PXA_GPIO_TO_IRQ(RDY_GPIO_PIN), par); for (i = 0; i < ARRAY_SIZE(gpios); i++) gpio_free(gpios[i]); for (i = DB0_GPIO_PIN; i <= DB15_GPIO_PIN; i++) gpio_free(i); } static u16 am300_get_hdb(struct broadsheetfb_par *par) { u16 res = 0; int i; for (i = 0; i <= (DB15_GPIO_PIN - DB0_GPIO_PIN) ; i++) res |= (gpio_get_value(DB0_GPIO_PIN + i)) ? (1 << i) : 0; return res; } static void am300_set_hdb(struct broadsheetfb_par *par, u16 data) { int i; for (i = 0; i <= (DB15_GPIO_PIN - DB0_GPIO_PIN) ; i++) gpio_set_value(DB0_GPIO_PIN + i, (data >> i) & 0x01); } static void am300_set_ctl(struct broadsheetfb_par *par, unsigned char bit, u8 state) { switch (bit) { case BS_CS: gpio_set_value(CS_GPIO_PIN, state); break; case BS_DC: gpio_set_value(DC_GPIO_PIN, state); break; case BS_WR: gpio_set_value(WR_GPIO_PIN, state); break; } } static int am300_get_panel_type(void) { return panel_type; } static irqreturn_t am300_handle_irq(int irq, void *dev_id) { struct broadsheetfb_par *par = dev_id; wake_up(&par->waitq); return IRQ_HANDLED; } static int am300_setup_irq(struct fb_info *info) { int ret; struct broadsheetfb_par *par = info->par; ret = request_irq(PXA_GPIO_TO_IRQ(RDY_GPIO_PIN), am300_handle_irq, IRQF_DISABLED|IRQF_TRIGGER_RISING, "AM300", par); if (ret) dev_err(&am300_device->dev, "request_irq failed: %d\n", ret); return ret; } static struct broadsheet_board am300_board = { .owner = THIS_MODULE, .init = am300_init_board, .cleanup = am300_cleanup, .set_hdb = am300_set_hdb, .get_hdb = am300_get_hdb, .set_ctl = am300_set_ctl, .wait_for_rdy = am300_wait_event, .get_panel_type = am300_get_panel_type, .setup_irq = am300_setup_irq, }; int __init am300_init(void) { int ret; pxa2xx_mfp_config(ARRAY_AND_SIZE(am300_pin_config)); /* request our platform independent driver */ request_module("broadsheetfb"); am300_device = platform_device_alloc("broadsheetfb", -1); if (!am300_device) return -ENOMEM; /* the am300_board that will be seen by broadsheetfb is a copy */ platform_device_add_data(am300_device, &am300_board, sizeof(am300_board)); ret = platform_device_add(am300_device); if (ret) { platform_device_put(am300_device); return ret; } return 0; } module_param(panel_type, uint, 0); MODULE_PARM_DESC(panel_type, "Select the panel type: 37, 6, 97"); MODULE_DESCRIPTION("board driver for am300 epd kit"); MODULE_AUTHOR("Jaya Kumar"); MODULE_LICENSE("GPL");
gpl-2.0
Fusion-Devices/android_kernel_oneplus_msm8974
drivers/dca/dca-core.c
5725
11284
/* * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * The full GNU General Public License is included in this distribution in the * file called COPYING. */ /* * This driver supports an interface for DCA clients and providers to meet. */ #include <linux/kernel.h> #include <linux/notifier.h> #include <linux/device.h> #include <linux/dca.h> #include <linux/slab.h> #include <linux/module.h> #define DCA_VERSION "1.12.1" MODULE_VERSION(DCA_VERSION); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Intel Corporation"); static DEFINE_RAW_SPINLOCK(dca_lock); static LIST_HEAD(dca_domains); static BLOCKING_NOTIFIER_HEAD(dca_provider_chain); static int dca_providers_blocked; static struct pci_bus *dca_pci_rc_from_dev(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct pci_bus *bus = pdev->bus; while (bus->parent) bus = bus->parent; return bus; } static struct dca_domain *dca_allocate_domain(struct pci_bus *rc) { struct dca_domain *domain; domain = kzalloc(sizeof(*domain), GFP_NOWAIT); if (!domain) return NULL; INIT_LIST_HEAD(&domain->dca_providers); domain->pci_rc = rc; return domain; } static void dca_free_domain(struct dca_domain *domain) { list_del(&domain->node); kfree(domain); } static int dca_provider_ioat_ver_3_0(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); return ((pdev->vendor == PCI_VENDOR_ID_INTEL) && ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) || (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) || (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) || (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) || (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) || (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) || (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) || (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7))); } static void unregister_dca_providers(void) { struct dca_provider *dca, *_dca; struct list_head unregistered_providers; struct dca_domain *domain; unsigned long flags; blocking_notifier_call_chain(&dca_provider_chain, DCA_PROVIDER_REMOVE, NULL); INIT_LIST_HEAD(&unregistered_providers); raw_spin_lock_irqsave(&dca_lock, flags); if (list_empty(&dca_domains)) { raw_spin_unlock_irqrestore(&dca_lock, flags); return; } /* at this point only one domain in the list is expected */ domain = list_first_entry(&dca_domains, struct dca_domain, node); list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) list_move(&dca->node, &unregistered_providers); dca_free_domain(domain); raw_spin_unlock_irqrestore(&dca_lock, flags); list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) { dca_sysfs_remove_provider(dca); list_del(&dca->node); } } static struct dca_domain *dca_find_domain(struct pci_bus *rc) { struct dca_domain *domain; list_for_each_entry(domain, &dca_domains, node) if (domain->pci_rc == rc) return domain; return NULL; } static struct dca_domain *dca_get_domain(struct device *dev) { struct pci_bus *rc; struct dca_domain *domain; rc = dca_pci_rc_from_dev(dev); domain = dca_find_domain(rc); if (!domain) { if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) dca_providers_blocked = 1; } return domain; } static struct dca_provider *dca_find_provider_by_dev(struct device *dev) { struct dca_provider *dca; struct pci_bus *rc; struct dca_domain *domain; if (dev) { rc = dca_pci_rc_from_dev(dev); domain = dca_find_domain(rc); if (!domain) return NULL; } else { if (!list_empty(&dca_domains)) domain = list_first_entry(&dca_domains, struct dca_domain, node); else return NULL; } list_for_each_entry(dca, &domain->dca_providers, node) if ((!dev) || (dca->ops->dev_managed(dca, dev))) return dca; return NULL; } /** * dca_add_requester - add a dca client to the list * @dev - the device that wants dca service */ int dca_add_requester(struct device *dev) { struct dca_provider *dca; int err, slot = -ENODEV; unsigned long flags; struct pci_bus *pci_rc; struct dca_domain *domain; if (!dev) return -EFAULT; raw_spin_lock_irqsave(&dca_lock, flags); /* check if the requester has not been added already */ dca = dca_find_provider_by_dev(dev); if (dca) { raw_spin_unlock_irqrestore(&dca_lock, flags); return -EEXIST; } pci_rc = dca_pci_rc_from_dev(dev); domain = dca_find_domain(pci_rc); if (!domain) { raw_spin_unlock_irqrestore(&dca_lock, flags); return -ENODEV; } list_for_each_entry(dca, &domain->dca_providers, node) { slot = dca->ops->add_requester(dca, dev); if (slot >= 0) break; } raw_spin_unlock_irqrestore(&dca_lock, flags); if (slot < 0) return slot; err = dca_sysfs_add_req(dca, dev, slot); if (err) { raw_spin_lock_irqsave(&dca_lock, flags); if (dca == dca_find_provider_by_dev(dev)) dca->ops->remove_requester(dca, dev); raw_spin_unlock_irqrestore(&dca_lock, flags); return err; } return 0; } EXPORT_SYMBOL_GPL(dca_add_requester); /** * dca_remove_requester - remove a dca client from the list * @dev - the device that wants dca service */ int dca_remove_requester(struct device *dev) { struct dca_provider *dca; int slot; unsigned long flags; if (!dev) return -EFAULT; raw_spin_lock_irqsave(&dca_lock, flags); dca = dca_find_provider_by_dev(dev); if (!dca) { raw_spin_unlock_irqrestore(&dca_lock, flags); return -ENODEV; } slot = dca->ops->remove_requester(dca, dev); raw_spin_unlock_irqrestore(&dca_lock, flags); if (slot < 0) return slot; dca_sysfs_remove_req(dca, slot); return 0; } EXPORT_SYMBOL_GPL(dca_remove_requester); /** * dca_common_get_tag - return the dca tag (serves both new and old api) * @dev - the device that wants dca service * @cpu - the cpuid as returned by get_cpu() */ u8 dca_common_get_tag(struct device *dev, int cpu) { struct dca_provider *dca; u8 tag; unsigned long flags; raw_spin_lock_irqsave(&dca_lock, flags); dca = dca_find_provider_by_dev(dev); if (!dca) { raw_spin_unlock_irqrestore(&dca_lock, flags); return -ENODEV; } tag = dca->ops->get_tag(dca, dev, cpu); raw_spin_unlock_irqrestore(&dca_lock, flags); return tag; } /** * dca3_get_tag - return the dca tag to the requester device * for the given cpu (new api) * @dev - the device that wants dca service * @cpu - the cpuid as returned by get_cpu() */ u8 dca3_get_tag(struct device *dev, int cpu) { if (!dev) return -EFAULT; return dca_common_get_tag(dev, cpu); } EXPORT_SYMBOL_GPL(dca3_get_tag); /** * dca_get_tag - return the dca tag for the given cpu (old api) * @cpu - the cpuid as returned by get_cpu() */ u8 dca_get_tag(int cpu) { struct device *dev = NULL; return dca_common_get_tag(dev, cpu); } EXPORT_SYMBOL_GPL(dca_get_tag); /** * alloc_dca_provider - get data struct for describing a dca provider * @ops - pointer to struct of dca operation function pointers * @priv_size - size of extra mem to be added for provider's needs */ struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size) { struct dca_provider *dca; int alloc_size; alloc_size = (sizeof(*dca) + priv_size); dca = kzalloc(alloc_size, GFP_KERNEL); if (!dca) return NULL; dca->ops = ops; return dca; } EXPORT_SYMBOL_GPL(alloc_dca_provider); /** * free_dca_provider - release the dca provider data struct * @ops - pointer to struct of dca operation function pointers * @priv_size - size of extra mem to be added for provider's needs */ void free_dca_provider(struct dca_provider *dca) { kfree(dca); } EXPORT_SYMBOL_GPL(free_dca_provider); /** * register_dca_provider - register a dca provider * @dca - struct created by alloc_dca_provider() * @dev - device providing dca services */ int register_dca_provider(struct dca_provider *dca, struct device *dev) { int err; unsigned long flags; struct dca_domain *domain, *newdomain = NULL; raw_spin_lock_irqsave(&dca_lock, flags); if (dca_providers_blocked) { raw_spin_unlock_irqrestore(&dca_lock, flags); return -ENODEV; } raw_spin_unlock_irqrestore(&dca_lock, flags); err = dca_sysfs_add_provider(dca, dev); if (err) return err; raw_spin_lock_irqsave(&dca_lock, flags); domain = dca_get_domain(dev); if (!domain) { struct pci_bus *rc; if (dca_providers_blocked) { raw_spin_unlock_irqrestore(&dca_lock, flags); dca_sysfs_remove_provider(dca); unregister_dca_providers(); return -ENODEV; } raw_spin_unlock_irqrestore(&dca_lock, flags); rc = dca_pci_rc_from_dev(dev); newdomain = dca_allocate_domain(rc); if (!newdomain) return -ENODEV; raw_spin_lock_irqsave(&dca_lock, flags); /* Recheck, we might have raced after dropping the lock */ domain = dca_get_domain(dev); if (!domain) { domain = newdomain; newdomain = NULL; list_add(&domain->node, &dca_domains); } } list_add(&dca->node, &domain->dca_providers); raw_spin_unlock_irqrestore(&dca_lock, flags); blocking_notifier_call_chain(&dca_provider_chain, DCA_PROVIDER_ADD, NULL); kfree(newdomain); return 0; } EXPORT_SYMBOL_GPL(register_dca_provider); /** * unregister_dca_provider - remove a dca provider * @dca - struct created by alloc_dca_provider() */ void unregister_dca_provider(struct dca_provider *dca, struct device *dev) { unsigned long flags; struct pci_bus *pci_rc; struct dca_domain *domain; blocking_notifier_call_chain(&dca_provider_chain, DCA_PROVIDER_REMOVE, NULL); raw_spin_lock_irqsave(&dca_lock, flags); if (list_empty(&dca_domains)) { raw_spin_unlock_irqrestore(&dca_lock, flags); return; } list_del(&dca->node); pci_rc = dca_pci_rc_from_dev(dev); domain = dca_find_domain(pci_rc); if (list_empty(&domain->dca_providers)) dca_free_domain(domain); raw_spin_unlock_irqrestore(&dca_lock, flags); dca_sysfs_remove_provider(dca); } EXPORT_SYMBOL_GPL(unregister_dca_provider); /** * dca_register_notify - register a client's notifier callback */ void dca_register_notify(struct notifier_block *nb) { blocking_notifier_chain_register(&dca_provider_chain, nb); } EXPORT_SYMBOL_GPL(dca_register_notify); /** * dca_unregister_notify - remove a client's notifier callback */ void dca_unregister_notify(struct notifier_block *nb) { blocking_notifier_chain_unregister(&dca_provider_chain, nb); } EXPORT_SYMBOL_GPL(dca_unregister_notify); static int __init dca_init(void) { pr_info("dca service started, version %s\n", DCA_VERSION); return dca_sysfs_init(); } static void __exit dca_exit(void) { dca_sysfs_exit(); } arch_initcall(dca_init); module_exit(dca_exit);
gpl-2.0
SebastianFM/SebastianFM-kernel
drivers/usb/misc/adutux.c
6493
24631
/* * adutux - driver for ADU devices from Ontrak Control Systems * This is an experimental driver. Use at your own risk. * This driver is not supported by Ontrak Control Systems. * * Copyright (c) 2003 John Homppi (SCO, leave this notice here) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * derived from the Lego USB Tower driver 0.56: * Copyright (c) 2003 David Glance <davidgsf@sourceforge.net> * 2001 Juergen Stuber <stuber@loria.fr> * that was derived from USB Skeleton driver - 0.5 * Copyright (c) 2001 Greg Kroah-Hartman (greg@kroah.com) * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/mutex.h> #include <asm/uaccess.h> #ifdef CONFIG_USB_DEBUG static int debug = 5; #else static int debug = 1; #endif /* Use our own dbg macro */ #undef dbg #define dbg(lvl, format, arg...) \ do { \ if (debug >= lvl) \ printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \ } while (0) /* Version Information */ #define DRIVER_VERSION "v0.0.13" #define DRIVER_AUTHOR "John Homppi" #define DRIVER_DESC "adutux (see www.ontrak.net)" /* Module parameters */ module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug enabled or not"); /* Define these values to match your device */ #define ADU_VENDOR_ID 0x0a07 #define ADU_PRODUCT_ID 0x0064 /* table of devices that work with this driver */ static const struct usb_device_id device_table[] = { { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID) }, /* ADU100 */ { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+20) }, /* ADU120 */ { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+30) }, /* ADU130 */ { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+100) }, /* ADU200 */ { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+108) }, /* ADU208 */ { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+118) }, /* ADU218 */ { }/* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, device_table); #ifdef CONFIG_USB_DYNAMIC_MINORS #define ADU_MINOR_BASE 0 #else #define ADU_MINOR_BASE 67 #endif /* we can have up to this number of device plugged in at once */ #define MAX_DEVICES 16 #define COMMAND_TIMEOUT (2*HZ) /* 60 second timeout for a command */ /* * The locking scheme is a vanilla 3-lock: * adu_device.buflock: A spinlock, covers what IRQs touch. * adutux_mutex: A Static lock to cover open_count. It would also cover * any globals, but we don't have them in 2.6. * adu_device.mtx: A mutex to hold across sleepers like copy_from_user. * It covers all of adu_device, except the open_count * and what .buflock covers. */ /* Structure to hold all of our device specific stuff */ struct adu_device { struct mutex mtx; struct usb_device* udev; /* save off the usb device pointer */ struct usb_interface* interface; unsigned int minor; /* the starting minor number for this device */ char serial_number[8]; int open_count; /* number of times this port has been opened */ char* read_buffer_primary; int read_buffer_length; char* read_buffer_secondary; int secondary_head; int secondary_tail; spinlock_t buflock; wait_queue_head_t read_wait; wait_queue_head_t write_wait; char* interrupt_in_buffer; struct usb_endpoint_descriptor* interrupt_in_endpoint; struct urb* interrupt_in_urb; int read_urb_finished; char* interrupt_out_buffer; struct usb_endpoint_descriptor* interrupt_out_endpoint; struct urb* interrupt_out_urb; int out_urb_finished; }; static DEFINE_MUTEX(adutux_mutex); static struct usb_driver adu_driver; static void adu_debug_data(int level, const char *function, int size, const unsigned char *data) { int i; if (debug < level) return; printk(KERN_DEBUG "%s: %s - length = %d, data = ", __FILE__, function, size); for (i = 0; i < size; ++i) printk("%.2x ", data[i]); printk("\n"); } /** * adu_abort_transfers * aborts transfers and frees associated data structures */ static void adu_abort_transfers(struct adu_device *dev) { unsigned long flags; dbg(2," %s : enter", __func__); if (dev->udev == NULL) { dbg(1," %s : udev is null", __func__); goto exit; } /* shutdown transfer */ /* XXX Anchor these instead */ spin_lock_irqsave(&dev->buflock, flags); if (!dev->read_urb_finished) { spin_unlock_irqrestore(&dev->buflock, flags); usb_kill_urb(dev->interrupt_in_urb); } else spin_unlock_irqrestore(&dev->buflock, flags); spin_lock_irqsave(&dev->buflock, flags); if (!dev->out_urb_finished) { spin_unlock_irqrestore(&dev->buflock, flags); usb_kill_urb(dev->interrupt_out_urb); } else spin_unlock_irqrestore(&dev->buflock, flags); exit: dbg(2," %s : leave", __func__); } static void adu_delete(struct adu_device *dev) { dbg(2, "%s enter", __func__); /* free data structures */ usb_free_urb(dev->interrupt_in_urb); usb_free_urb(dev->interrupt_out_urb); kfree(dev->read_buffer_primary); kfree(dev->read_buffer_secondary); kfree(dev->interrupt_in_buffer); kfree(dev->interrupt_out_buffer); kfree(dev); dbg(2, "%s : leave", __func__); } static void adu_interrupt_in_callback(struct urb *urb) { struct adu_device *dev = urb->context; int status = urb->status; dbg(4," %s : enter, status %d", __func__, status); adu_debug_data(5, __func__, urb->actual_length, urb->transfer_buffer); spin_lock(&dev->buflock); if (status != 0) { if ((status != -ENOENT) && (status != -ECONNRESET) && (status != -ESHUTDOWN)) { dbg(1," %s : nonzero status received: %d", __func__, status); } goto exit; } if (urb->actual_length > 0 && dev->interrupt_in_buffer[0] != 0x00) { if (dev->read_buffer_length < (4 * usb_endpoint_maxp(dev->interrupt_in_endpoint)) - (urb->actual_length)) { memcpy (dev->read_buffer_primary + dev->read_buffer_length, dev->interrupt_in_buffer, urb->actual_length); dev->read_buffer_length += urb->actual_length; dbg(2," %s reading %d ", __func__, urb->actual_length); } else { dbg(1," %s : read_buffer overflow", __func__); } } exit: dev->read_urb_finished = 1; spin_unlock(&dev->buflock); /* always wake up so we recover from errors */ wake_up_interruptible(&dev->read_wait); adu_debug_data(5, __func__, urb->actual_length, urb->transfer_buffer); dbg(4," %s : leave, status %d", __func__, status); } static void adu_interrupt_out_callback(struct urb *urb) { struct adu_device *dev = urb->context; int status = urb->status; dbg(4," %s : enter, status %d", __func__, status); adu_debug_data(5,__func__, urb->actual_length, urb->transfer_buffer); if (status != 0) { if ((status != -ENOENT) && (status != -ECONNRESET)) { dbg(1, " %s :nonzero status received: %d", __func__, status); } goto exit; } spin_lock(&dev->buflock); dev->out_urb_finished = 1; wake_up(&dev->write_wait); spin_unlock(&dev->buflock); exit: adu_debug_data(5, __func__, urb->actual_length, urb->transfer_buffer); dbg(4," %s : leave, status %d", __func__, status); } static int adu_open(struct inode *inode, struct file *file) { struct adu_device *dev = NULL; struct usb_interface *interface; int subminor; int retval; dbg(2,"%s : enter", __func__); subminor = iminor(inode); if ((retval = mutex_lock_interruptible(&adutux_mutex))) { dbg(2, "%s : mutex lock failed", __func__); goto exit_no_lock; } interface = usb_find_interface(&adu_driver, subminor); if (!interface) { printk(KERN_ERR "adutux: %s - error, can't find device for " "minor %d\n", __func__, subminor); retval = -ENODEV; goto exit_no_device; } dev = usb_get_intfdata(interface); if (!dev || !dev->udev) { retval = -ENODEV; goto exit_no_device; } /* check that nobody else is using the device */ if (dev->open_count) { retval = -EBUSY; goto exit_no_device; } ++dev->open_count; dbg(2,"%s : open count %d", __func__, dev->open_count); /* save device in the file's private structure */ file->private_data = dev; /* initialize in direction */ dev->read_buffer_length = 0; /* fixup first read by having urb waiting for it */ usb_fill_int_urb(dev->interrupt_in_urb,dev->udev, usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress), dev->interrupt_in_buffer, usb_endpoint_maxp(dev->interrupt_in_endpoint), adu_interrupt_in_callback, dev, dev->interrupt_in_endpoint->bInterval); dev->read_urb_finished = 0; if (usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL)) dev->read_urb_finished = 1; /* we ignore failure */ /* end of fixup for first read */ /* initialize out direction */ dev->out_urb_finished = 1; retval = 0; exit_no_device: mutex_unlock(&adutux_mutex); exit_no_lock: dbg(2,"%s : leave, return value %d ", __func__, retval); return retval; } static void adu_release_internal(struct adu_device *dev) { dbg(2," %s : enter", __func__); /* decrement our usage count for the device */ --dev->open_count; dbg(2," %s : open count %d", __func__, dev->open_count); if (dev->open_count <= 0) { adu_abort_transfers(dev); dev->open_count = 0; } dbg(2," %s : leave", __func__); } static int adu_release(struct inode *inode, struct file *file) { struct adu_device *dev; int retval = 0; dbg(2," %s : enter", __func__); if (file == NULL) { dbg(1," %s : file is NULL", __func__); retval = -ENODEV; goto exit; } dev = file->private_data; if (dev == NULL) { dbg(1," %s : object is NULL", __func__); retval = -ENODEV; goto exit; } mutex_lock(&adutux_mutex); /* not interruptible */ if (dev->open_count <= 0) { dbg(1," %s : device not opened", __func__); retval = -ENODEV; goto unlock; } adu_release_internal(dev); if (dev->udev == NULL) { /* the device was unplugged before the file was released */ if (!dev->open_count) /* ... and we're the last user */ adu_delete(dev); } unlock: mutex_unlock(&adutux_mutex); exit: dbg(2," %s : leave, return value %d", __func__, retval); return retval; } static ssize_t adu_read(struct file *file, __user char *buffer, size_t count, loff_t *ppos) { struct adu_device *dev; size_t bytes_read = 0; size_t bytes_to_read = count; int i; int retval = 0; int timeout = 0; int should_submit = 0; unsigned long flags; DECLARE_WAITQUEUE(wait, current); dbg(2," %s : enter, count = %Zd, file=%p", __func__, count, file); dev = file->private_data; dbg(2," %s : dev=%p", __func__, dev); if (mutex_lock_interruptible(&dev->mtx)) return -ERESTARTSYS; /* verify that the device wasn't unplugged */ if (dev->udev == NULL) { retval = -ENODEV; printk(KERN_ERR "adutux: No device or device unplugged %d\n", retval); goto exit; } /* verify that some data was requested */ if (count == 0) { dbg(1," %s : read request of 0 bytes", __func__); goto exit; } timeout = COMMAND_TIMEOUT; dbg(2," %s : about to start looping", __func__); while (bytes_to_read) { int data_in_secondary = dev->secondary_tail - dev->secondary_head; dbg(2," %s : while, data_in_secondary=%d, status=%d", __func__, data_in_secondary, dev->interrupt_in_urb->status); if (data_in_secondary) { /* drain secondary buffer */ int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary; i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount); if (i) { retval = -EFAULT; goto exit; } dev->secondary_head += (amount - i); bytes_read += (amount - i); bytes_to_read -= (amount - i); if (i) { retval = bytes_read ? bytes_read : -EFAULT; goto exit; } } else { /* we check the primary buffer */ spin_lock_irqsave (&dev->buflock, flags); if (dev->read_buffer_length) { /* we secure access to the primary */ char *tmp; dbg(2," %s : swap, read_buffer_length = %d", __func__, dev->read_buffer_length); tmp = dev->read_buffer_secondary; dev->read_buffer_secondary = dev->read_buffer_primary; dev->read_buffer_primary = tmp; dev->secondary_head = 0; dev->secondary_tail = dev->read_buffer_length; dev->read_buffer_length = 0; spin_unlock_irqrestore(&dev->buflock, flags); /* we have a free buffer so use it */ should_submit = 1; } else { /* even the primary was empty - we may need to do IO */ if (!dev->read_urb_finished) { /* somebody is doing IO */ spin_unlock_irqrestore(&dev->buflock, flags); dbg(2," %s : submitted already", __func__); } else { /* we must initiate input */ dbg(2," %s : initiate input", __func__); dev->read_urb_finished = 0; spin_unlock_irqrestore(&dev->buflock, flags); usb_fill_int_urb(dev->interrupt_in_urb,dev->udev, usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress), dev->interrupt_in_buffer, usb_endpoint_maxp(dev->interrupt_in_endpoint), adu_interrupt_in_callback, dev, dev->interrupt_in_endpoint->bInterval); retval = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL); if (retval) { dev->read_urb_finished = 1; if (retval == -ENOMEM) { retval = bytes_read ? bytes_read : -ENOMEM; } dbg(2," %s : submit failed", __func__); goto exit; } } /* we wait for I/O to complete */ set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&dev->read_wait, &wait); spin_lock_irqsave(&dev->buflock, flags); if (!dev->read_urb_finished) { spin_unlock_irqrestore(&dev->buflock, flags); timeout = schedule_timeout(COMMAND_TIMEOUT); } else { spin_unlock_irqrestore(&dev->buflock, flags); set_current_state(TASK_RUNNING); } remove_wait_queue(&dev->read_wait, &wait); if (timeout <= 0) { dbg(2," %s : timeout", __func__); retval = bytes_read ? bytes_read : -ETIMEDOUT; goto exit; } if (signal_pending(current)) { dbg(2," %s : signal pending", __func__); retval = bytes_read ? bytes_read : -EINTR; goto exit; } } } } retval = bytes_read; /* if the primary buffer is empty then use it */ spin_lock_irqsave(&dev->buflock, flags); if (should_submit && dev->read_urb_finished) { dev->read_urb_finished = 0; spin_unlock_irqrestore(&dev->buflock, flags); usb_fill_int_urb(dev->interrupt_in_urb,dev->udev, usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress), dev->interrupt_in_buffer, usb_endpoint_maxp(dev->interrupt_in_endpoint), adu_interrupt_in_callback, dev, dev->interrupt_in_endpoint->bInterval); if (usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL) != 0) dev->read_urb_finished = 1; /* we ignore failure */ } else { spin_unlock_irqrestore(&dev->buflock, flags); } exit: /* unlock the device */ mutex_unlock(&dev->mtx); dbg(2," %s : leave, return value %d", __func__, retval); return retval; } static ssize_t adu_write(struct file *file, const __user char *buffer, size_t count, loff_t *ppos) { DECLARE_WAITQUEUE(waita, current); struct adu_device *dev; size_t bytes_written = 0; size_t bytes_to_write; size_t buffer_size; unsigned long flags; int retval; dbg(2," %s : enter, count = %Zd", __func__, count); dev = file->private_data; retval = mutex_lock_interruptible(&dev->mtx); if (retval) goto exit_nolock; /* verify that the device wasn't unplugged */ if (dev->udev == NULL) { retval = -ENODEV; printk(KERN_ERR "adutux: No device or device unplugged %d\n", retval); goto exit; } /* verify that we actually have some data to write */ if (count == 0) { dbg(1," %s : write request of 0 bytes", __func__); goto exit; } while (count > 0) { add_wait_queue(&dev->write_wait, &waita); set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&dev->buflock, flags); if (!dev->out_urb_finished) { spin_unlock_irqrestore(&dev->buflock, flags); mutex_unlock(&dev->mtx); if (signal_pending(current)) { dbg(1," %s : interrupted", __func__); set_current_state(TASK_RUNNING); retval = -EINTR; goto exit_onqueue; } if (schedule_timeout(COMMAND_TIMEOUT) == 0) { dbg(1, "%s - command timed out.", __func__); retval = -ETIMEDOUT; goto exit_onqueue; } remove_wait_queue(&dev->write_wait, &waita); retval = mutex_lock_interruptible(&dev->mtx); if (retval) { retval = bytes_written ? bytes_written : retval; goto exit_nolock; } dbg(4," %s : in progress, count = %Zd", __func__, count); } else { spin_unlock_irqrestore(&dev->buflock, flags); set_current_state(TASK_RUNNING); remove_wait_queue(&dev->write_wait, &waita); dbg(4," %s : sending, count = %Zd", __func__, count); /* write the data into interrupt_out_buffer from userspace */ buffer_size = usb_endpoint_maxp(dev->interrupt_out_endpoint); bytes_to_write = count > buffer_size ? buffer_size : count; dbg(4," %s : buffer_size = %Zd, count = %Zd, bytes_to_write = %Zd", __func__, buffer_size, count, bytes_to_write); if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write) != 0) { retval = -EFAULT; goto exit; } /* send off the urb */ usb_fill_int_urb( dev->interrupt_out_urb, dev->udev, usb_sndintpipe(dev->udev, dev->interrupt_out_endpoint->bEndpointAddress), dev->interrupt_out_buffer, bytes_to_write, adu_interrupt_out_callback, dev, dev->interrupt_out_endpoint->bInterval); dev->interrupt_out_urb->actual_length = bytes_to_write; dev->out_urb_finished = 0; retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL); if (retval < 0) { dev->out_urb_finished = 1; dev_err(&dev->udev->dev, "Couldn't submit " "interrupt_out_urb %d\n", retval); goto exit; } buffer += bytes_to_write; count -= bytes_to_write; bytes_written += bytes_to_write; } } mutex_unlock(&dev->mtx); return bytes_written; exit: mutex_unlock(&dev->mtx); exit_nolock: dbg(2," %s : leave, return value %d", __func__, retval); return retval; exit_onqueue: remove_wait_queue(&dev->write_wait, &waita); return retval; } /* file operations needed when we register this driver */ static const struct file_operations adu_fops = { .owner = THIS_MODULE, .read = adu_read, .write = adu_write, .open = adu_open, .release = adu_release, .llseek = noop_llseek, }; /* * usb class driver info in order to get a minor number from the usb core, * and to have the device registered with devfs and the driver core */ static struct usb_class_driver adu_class = { .name = "usb/adutux%d", .fops = &adu_fops, .minor_base = ADU_MINOR_BASE, }; /** * adu_probe * * Called by the usb core when a new device is connected that it thinks * this driver might be interested in. */ static int adu_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct adu_device *dev = NULL; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int retval = -ENODEV; int in_end_size; int out_end_size; int i; dbg(2," %s : enter", __func__); if (udev == NULL) { dev_err(&interface->dev, "udev is NULL.\n"); goto exit; } /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(struct adu_device), GFP_KERNEL); if (dev == NULL) { dev_err(&interface->dev, "Out of memory\n"); retval = -ENOMEM; goto exit; } mutex_init(&dev->mtx); spin_lock_init(&dev->buflock); dev->udev = udev; init_waitqueue_head(&dev->read_wait); init_waitqueue_head(&dev->write_wait); iface_desc = &interface->altsetting[0]; /* set up the endpoint information */ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (usb_endpoint_is_int_in(endpoint)) dev->interrupt_in_endpoint = endpoint; if (usb_endpoint_is_int_out(endpoint)) dev->interrupt_out_endpoint = endpoint; } if (dev->interrupt_in_endpoint == NULL) { dev_err(&interface->dev, "interrupt in endpoint not found\n"); goto error; } if (dev->interrupt_out_endpoint == NULL) { dev_err(&interface->dev, "interrupt out endpoint not found\n"); goto error; } in_end_size = usb_endpoint_maxp(dev->interrupt_in_endpoint); out_end_size = usb_endpoint_maxp(dev->interrupt_out_endpoint); dev->read_buffer_primary = kmalloc((4 * in_end_size), GFP_KERNEL); if (!dev->read_buffer_primary) { dev_err(&interface->dev, "Couldn't allocate read_buffer_primary\n"); retval = -ENOMEM; goto error; } /* debug code prime the buffer */ memset(dev->read_buffer_primary, 'a', in_end_size); memset(dev->read_buffer_primary + in_end_size, 'b', in_end_size); memset(dev->read_buffer_primary + (2 * in_end_size), 'c', in_end_size); memset(dev->read_buffer_primary + (3 * in_end_size), 'd', in_end_size); dev->read_buffer_secondary = kmalloc((4 * in_end_size), GFP_KERNEL); if (!dev->read_buffer_secondary) { dev_err(&interface->dev, "Couldn't allocate read_buffer_secondary\n"); retval = -ENOMEM; goto error; } /* debug code prime the buffer */ memset(dev->read_buffer_secondary, 'e', in_end_size); memset(dev->read_buffer_secondary + in_end_size, 'f', in_end_size); memset(dev->read_buffer_secondary + (2 * in_end_size), 'g', in_end_size); memset(dev->read_buffer_secondary + (3 * in_end_size), 'h', in_end_size); dev->interrupt_in_buffer = kmalloc(in_end_size, GFP_KERNEL); if (!dev->interrupt_in_buffer) { dev_err(&interface->dev, "Couldn't allocate interrupt_in_buffer\n"); goto error; } /* debug code prime the buffer */ memset(dev->interrupt_in_buffer, 'i', in_end_size); dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->interrupt_in_urb) { dev_err(&interface->dev, "Couldn't allocate interrupt_in_urb\n"); goto error; } dev->interrupt_out_buffer = kmalloc(out_end_size, GFP_KERNEL); if (!dev->interrupt_out_buffer) { dev_err(&interface->dev, "Couldn't allocate interrupt_out_buffer\n"); goto error; } dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->interrupt_out_urb) { dev_err(&interface->dev, "Couldn't allocate interrupt_out_urb\n"); goto error; } if (!usb_string(udev, udev->descriptor.iSerialNumber, dev->serial_number, sizeof(dev->serial_number))) { dev_err(&interface->dev, "Could not retrieve serial number\n"); goto error; } dbg(2," %s : serial_number=%s", __func__, dev->serial_number); /* we can register the device now, as it is ready */ usb_set_intfdata(interface, dev); retval = usb_register_dev(interface, &adu_class); if (retval) { /* something prevented us from registering this driver */ dev_err(&interface->dev, "Not able to get a minor for this device.\n"); usb_set_intfdata(interface, NULL); goto error; } dev->minor = interface->minor; /* let the user know what node this device is now attached to */ dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n", udev->descriptor.idProduct, dev->serial_number, (dev->minor - ADU_MINOR_BASE)); exit: dbg(2," %s : leave, return value %p (dev)", __func__, dev); return retval; error: adu_delete(dev); return retval; } /** * adu_disconnect * * Called by the usb core when the device is removed from the system. */ static void adu_disconnect(struct usb_interface *interface) { struct adu_device *dev; int minor; dbg(2," %s : enter", __func__); dev = usb_get_intfdata(interface); mutex_lock(&dev->mtx); /* not interruptible */ dev->udev = NULL; /* poison */ minor = dev->minor; usb_deregister_dev(interface, &adu_class); mutex_unlock(&dev->mtx); mutex_lock(&adutux_mutex); usb_set_intfdata(interface, NULL); /* if the device is not opened, then we clean up right now */ dbg(2," %s : open count %d", __func__, dev->open_count); if (!dev->open_count) adu_delete(dev); mutex_unlock(&adutux_mutex); dev_info(&interface->dev, "ADU device adutux%d now disconnected\n", (minor - ADU_MINOR_BASE)); dbg(2," %s : leave", __func__); } /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver adu_driver = { .name = "adutux", .probe = adu_probe, .disconnect = adu_disconnect, .id_table = device_table, }; module_usb_driver(adu_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
rocky-wang/smart210_linux
drivers/ide/rapide.c
9309
2079
/* * Copyright (c) 1996-2002 Russell King. */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/errno.h> #include <linux/ide.h> #include <linux/init.h> #include <asm/ecard.h> static const struct ide_port_info rapide_port_info = { .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, .chipset = ide_generic, }; static void rapide_setup_ports(struct ide_hw *hw, void __iomem *base, void __iomem *ctrl, unsigned int sz, int irq) { unsigned long port = (unsigned long)base; int i; for (i = 0; i <= 7; i++) { hw->io_ports_array[i] = port; port += sz; } hw->io_ports.ctl_addr = (unsigned long)ctrl; hw->irq = irq; } static int __devinit rapide_probe(struct expansion_card *ec, const struct ecard_id *id) { void __iomem *base; struct ide_host *host; int ret; struct ide_hw hw, *hws[] = { &hw }; ret = ecard_request_resources(ec); if (ret) goto out; base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); if (!base) { ret = -ENOMEM; goto release; } memset(&hw, 0, sizeof(hw)); rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq); hw.dev = &ec->dev; ret = ide_host_add(&rapide_port_info, hws, 1, &host); if (ret) goto release; ecard_set_drvdata(ec, host); goto out; release: ecard_release_resources(ec); out: return ret; } static void __devexit rapide_remove(struct expansion_card *ec) { struct ide_host *host = ecard_get_drvdata(ec); ecard_set_drvdata(ec, NULL); ide_host_remove(host); ecard_release_resources(ec); } static struct ecard_id rapide_ids[] = { { MANU_YELLOWSTONE, PROD_YELLOWSTONE_RAPIDE32 }, { 0xffff, 0xffff } }; static struct ecard_driver rapide_driver = { .probe = rapide_probe, .remove = __devexit_p(rapide_remove), .id_table = rapide_ids, .drv = { .name = "rapide", }, }; static int __init rapide_init(void) { return ecard_register_driver(&rapide_driver); } static void __exit rapide_exit(void) { ecard_remove_driver(&rapide_driver); } MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Yellowstone RAPIDE driver"); module_init(rapide_init); module_exit(rapide_exit);
gpl-2.0
Entropy512/I9300_N8013_Changes
drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
10589
13383
/* ZD1211 USB-WLAN driver for Linux * * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de> * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include "zd_rf.h" #include "zd_usb.h" #include "zd_chip.h" static const u32 chan_rv[][2] = { RF_CHANNEL( 1) = { 0x09ec00, 0x8cccc8 }, RF_CHANNEL( 2) = { 0x09ec00, 0x8cccd8 }, RF_CHANNEL( 3) = { 0x09ec00, 0x8cccc0 }, RF_CHANNEL( 4) = { 0x09ec00, 0x8cccd0 }, RF_CHANNEL( 5) = { 0x05ec00, 0x8cccc8 }, RF_CHANNEL( 6) = { 0x05ec00, 0x8cccd8 }, RF_CHANNEL( 7) = { 0x05ec00, 0x8cccc0 }, RF_CHANNEL( 8) = { 0x05ec00, 0x8cccd0 }, RF_CHANNEL( 9) = { 0x0dec00, 0x8cccc8 }, RF_CHANNEL(10) = { 0x0dec00, 0x8cccd8 }, RF_CHANNEL(11) = { 0x0dec00, 0x8cccc0 }, RF_CHANNEL(12) = { 0x0dec00, 0x8cccd0 }, RF_CHANNEL(13) = { 0x03ec00, 0x8cccc8 }, RF_CHANNEL(14) = { 0x03ec00, 0x866660 }, }; static const u32 std_rv[] = { 0x4ff821, 0xc5fbfc, 0x21ebfe, 0xafd401, /* freq shift 0xaad401 */ 0x6cf56a, 0xe04073, 0x193d76, 0x9dd844, 0x500007, 0xd8c010, }; static const u32 rv_init1[] = { 0x3c9000, 0xbfffff, 0x700000, 0xf15d58, }; static const u32 rv_init2[] = { 0xf15d59, 0xf15d5c, 0xf15d58, }; static const struct zd_ioreq16 ioreqs_sw[] = { { ZD_CR128, 0x14 }, { ZD_CR129, 0x12 }, { ZD_CR130, 0x10 }, { ZD_CR38, 0x38 }, { ZD_CR136, 0xdf }, }; static int zd1211b_al7230b_finalize(struct zd_chip *chip) { int r; static const struct zd_ioreq16 ioreqs[] = { { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR79, 0x58 }, { ZD_CR12, 0xf0 }, { ZD_CR77, 0x1b }, { ZD_CR78, 0x58 }, { ZD_CR203, 0x04 }, { }, { ZD_CR240, 0x80 }, }; r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); if (r) return r; if (chip->new_phy_layout) { /* antenna selection? */ r = zd_iowrite16_locked(chip, 0xe5, ZD_CR9); if (r) return r; } return zd_iowrite16_locked(chip, 0x04, ZD_CR203); } static int zd1211_al7230b_init_hw(struct zd_rf *rf) { int r; struct zd_chip *chip = zd_rf_to_chip(rf); /* All of these writes are identical to AL2230 unless otherwise * specified */ static const struct zd_ioreq16 ioreqs_1[] = { /* This one is 7230-specific, and happens before the rest */ { ZD_CR240, 0x57 }, { }, { ZD_CR15, 0x20 }, { ZD_CR23, 0x40 }, { ZD_CR24, 0x20 }, { ZD_CR26, 0x11 }, { ZD_CR28, 0x3e }, { ZD_CR29, 0x00 }, { ZD_CR44, 0x33 }, /* This value is different for 7230 (was: 0x2a) */ { ZD_CR106, 0x22 }, { ZD_CR107, 0x1a }, { ZD_CR109, 0x09 }, { ZD_CR110, 0x27 }, { ZD_CR111, 0x2b }, { ZD_CR112, 0x2b }, { ZD_CR119, 0x0a }, /* This happened further down in AL2230, * and the value changed (was: 0xe0) */ { ZD_CR122, 0xfc }, { ZD_CR10, 0x89 }, /* for newest (3rd cut) AL2300 */ { ZD_CR17, 0x28 }, { ZD_CR26, 0x93 }, { ZD_CR34, 0x30 }, /* for newest (3rd cut) AL2300 */ { ZD_CR35, 0x3e }, { ZD_CR41, 0x24 }, { ZD_CR44, 0x32 }, /* for newest (3rd cut) AL2300 */ { ZD_CR46, 0x96 }, { ZD_CR47, 0x1e }, { ZD_CR79, 0x58 }, { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR87, 0x0a }, { ZD_CR89, 0x04 }, { ZD_CR92, 0x0a }, { ZD_CR99, 0x28 }, /* This value is different for 7230 (was: 0x00) */ { ZD_CR100, 0x02 }, { ZD_CR101, 0x13 }, { ZD_CR102, 0x27 }, /* This value is different for 7230 (was: 0x24) */ { ZD_CR106, 0x22 }, /* This value is different for 7230 (was: 0x2a) */ { ZD_CR107, 0x3f }, { ZD_CR109, 0x09 }, /* This value is different for 7230 (was: 0x13) */ { ZD_CR110, 0x1f }, { ZD_CR111, 0x1f }, { ZD_CR112, 0x1f }, { ZD_CR113, 0x27 }, { ZD_CR114, 0x27 }, /* for newest (3rd cut) AL2300 */ { ZD_CR115, 0x24 }, /* This value is different for 7230 (was: 0x24) */ { ZD_CR116, 0x3f }, /* This value is different for 7230 (was: 0xf4) */ { ZD_CR117, 0xfa }, { ZD_CR118, 0xfc }, { ZD_CR119, 0x10 }, { ZD_CR120, 0x4f }, { ZD_CR121, 0x77 }, { ZD_CR137, 0x88 }, /* This one is 7230-specific */ { ZD_CR138, 0xa8 }, /* This value is different for 7230 (was: 0xff) */ { ZD_CR252, 0x34 }, /* This value is different for 7230 (was: 0xff) */ { ZD_CR253, 0x34 }, /* PLL_OFF */ { ZD_CR251, 0x2f }, }; static const struct zd_ioreq16 ioreqs_2[] = { { ZD_CR251, 0x3f }, /* PLL_ON */ { ZD_CR128, 0x14 }, { ZD_CR129, 0x12 }, { ZD_CR130, 0x10 }, { ZD_CR38, 0x38 }, { ZD_CR136, 0xdf }, }; r = zd_iowrite16a_locked(chip, ioreqs_1, ARRAY_SIZE(ioreqs_1)); if (r) return r; r = zd_rfwritev_cr_locked(chip, chan_rv[0], ARRAY_SIZE(chan_rv[0])); if (r) return r; r = zd_rfwritev_cr_locked(chip, std_rv, ARRAY_SIZE(std_rv)); if (r) return r; r = zd_rfwritev_cr_locked(chip, rv_init1, ARRAY_SIZE(rv_init1)); if (r) return r; r = zd_iowrite16a_locked(chip, ioreqs_2, ARRAY_SIZE(ioreqs_2)); if (r) return r; r = zd_rfwritev_cr_locked(chip, rv_init2, ARRAY_SIZE(rv_init2)); if (r) return r; r = zd_iowrite16_locked(chip, 0x06, ZD_CR203); if (r) return r; r = zd_iowrite16_locked(chip, 0x80, ZD_CR240); if (r) return r; return 0; } static int zd1211b_al7230b_init_hw(struct zd_rf *rf) { int r; struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs_1[] = { { ZD_CR240, 0x57 }, { ZD_CR9, 0x9 }, { }, { ZD_CR10, 0x8b }, { ZD_CR15, 0x20 }, { ZD_CR17, 0x2B }, /* for newest (3rd cut) AL2230 */ { ZD_CR20, 0x10 }, /* 4N25->Stone Request */ { ZD_CR23, 0x40 }, { ZD_CR24, 0x20 }, { ZD_CR26, 0x93 }, { ZD_CR28, 0x3e }, { ZD_CR29, 0x00 }, { ZD_CR33, 0x28 }, /* 5613 */ { ZD_CR34, 0x30 }, { ZD_CR35, 0x3e }, /* for newest (3rd cut) AL2230 */ { ZD_CR41, 0x24 }, { ZD_CR44, 0x32 }, { ZD_CR46, 0x99 }, /* for newest (3rd cut) AL2230 */ { ZD_CR47, 0x1e }, /* ZD1215 5610 */ { ZD_CR48, 0x00 }, { ZD_CR49, 0x00 }, { ZD_CR51, 0x01 }, { ZD_CR52, 0x80 }, { ZD_CR53, 0x7e }, { ZD_CR65, 0x00 }, { ZD_CR66, 0x00 }, { ZD_CR67, 0x00 }, { ZD_CR68, 0x00 }, { ZD_CR69, 0x28 }, { ZD_CR79, 0x58 }, { ZD_CR80, 0x30 }, { ZD_CR81, 0x30 }, { ZD_CR87, 0x0A }, { ZD_CR89, 0x04 }, { ZD_CR90, 0x58 }, /* 5112 */ { ZD_CR91, 0x00 }, /* 5613 */ { ZD_CR92, 0x0a }, { ZD_CR98, 0x8d }, /* 4804, for 1212 new algorithm */ { ZD_CR99, 0x00 }, { ZD_CR100, 0x02 }, { ZD_CR101, 0x13 }, { ZD_CR102, 0x27 }, { ZD_CR106, 0x20 }, /* change to 0x24 for AL7230B */ { ZD_CR109, 0x13 }, /* 4804, for 1212 new algorithm */ { ZD_CR112, 0x1f }, }; static const struct zd_ioreq16 ioreqs_new_phy[] = { { ZD_CR107, 0x28 }, { ZD_CR110, 0x1f }, /* 5127, 0x13->0x1f */ { ZD_CR111, 0x1f }, /* 0x13 to 0x1f for AL7230B */ { ZD_CR116, 0x2a }, { ZD_CR118, 0xfa }, { ZD_CR119, 0x12 }, { ZD_CR121, 0x6c }, /* 5613 */ }; static const struct zd_ioreq16 ioreqs_old_phy[] = { { ZD_CR107, 0x24 }, { ZD_CR110, 0x13 }, /* 5127, 0x13->0x1f */ { ZD_CR111, 0x13 }, /* 0x13 to 0x1f for AL7230B */ { ZD_CR116, 0x24 }, { ZD_CR118, 0xfc }, { ZD_CR119, 0x11 }, { ZD_CR121, 0x6a }, /* 5613 */ }; static const struct zd_ioreq16 ioreqs_2[] = { { ZD_CR113, 0x27 }, { ZD_CR114, 0x27 }, { ZD_CR115, 0x24 }, { ZD_CR117, 0xfa }, { ZD_CR120, 0x4f }, { ZD_CR122, 0xfc }, /* E0->FCh at 4901 */ { ZD_CR123, 0x57 }, /* 5613 */ { ZD_CR125, 0xad }, /* 4804, for 1212 new algorithm */ { ZD_CR126, 0x6c }, /* 5613 */ { ZD_CR127, 0x03 }, /* 4804, for 1212 new algorithm */ { ZD_CR130, 0x10 }, { ZD_CR131, 0x00 }, /* 5112 */ { ZD_CR137, 0x50 }, /* 5613 */ { ZD_CR138, 0xa8 }, /* 5112 */ { ZD_CR144, 0xac }, /* 5613 */ { ZD_CR148, 0x40 }, /* 5112 */ { ZD_CR149, 0x40 }, /* 4O07, 50->40 */ { ZD_CR150, 0x1a }, /* 5112, 0C->1A */ { ZD_CR252, 0x34 }, { ZD_CR253, 0x34 }, { ZD_CR251, 0x2f }, /* PLL_OFF */ }; static const struct zd_ioreq16 ioreqs_3[] = { { ZD_CR251, 0x7f }, /* PLL_ON */ { ZD_CR128, 0x14 }, { ZD_CR129, 0x12 }, { ZD_CR130, 0x10 }, { ZD_CR38, 0x38 }, { ZD_CR136, 0xdf }, }; r = zd_iowrite16a_locked(chip, ioreqs_1, ARRAY_SIZE(ioreqs_1)); if (r) return r; if (chip->new_phy_layout) r = zd_iowrite16a_locked(chip, ioreqs_new_phy, ARRAY_SIZE(ioreqs_new_phy)); else r = zd_iowrite16a_locked(chip, ioreqs_old_phy, ARRAY_SIZE(ioreqs_old_phy)); if (r) return r; r = zd_iowrite16a_locked(chip, ioreqs_2, ARRAY_SIZE(ioreqs_2)); if (r) return r; r = zd_rfwritev_cr_locked(chip, chan_rv[0], ARRAY_SIZE(chan_rv[0])); if (r) return r; r = zd_rfwritev_cr_locked(chip, std_rv, ARRAY_SIZE(std_rv)); if (r) return r; r = zd_rfwritev_cr_locked(chip, rv_init1, ARRAY_SIZE(rv_init1)); if (r) return r; r = zd_iowrite16a_locked(chip, ioreqs_3, ARRAY_SIZE(ioreqs_3)); if (r) return r; r = zd_rfwritev_cr_locked(chip, rv_init2, ARRAY_SIZE(rv_init2)); if (r) return r; return zd1211b_al7230b_finalize(chip); } static int zd1211_al7230b_set_channel(struct zd_rf *rf, u8 channel) { int r; const u32 *rv = chan_rv[channel-1]; struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { /* PLL_ON */ { ZD_CR251, 0x3f }, { ZD_CR203, 0x06 }, { ZD_CR240, 0x08 }, }; r = zd_iowrite16_locked(chip, 0x57, ZD_CR240); if (r) return r; /* PLL_OFF */ r = zd_iowrite16_locked(chip, 0x2f, ZD_CR251); if (r) return r; r = zd_rfwritev_cr_locked(chip, std_rv, ARRAY_SIZE(std_rv)); if (r) return r; r = zd_rfwrite_cr_locked(chip, 0x3c9000); if (r) return r; r = zd_rfwrite_cr_locked(chip, 0xf15d58); if (r) return r; r = zd_iowrite16a_locked(chip, ioreqs_sw, ARRAY_SIZE(ioreqs_sw)); if (r) return r; r = zd_rfwritev_cr_locked(chip, rv, 2); if (r) return r; r = zd_rfwrite_cr_locked(chip, 0x3c9000); if (r) return r; return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } static int zd1211b_al7230b_set_channel(struct zd_rf *rf, u8 channel) { int r; const u32 *rv = chan_rv[channel-1]; struct zd_chip *chip = zd_rf_to_chip(rf); r = zd_iowrite16_locked(chip, 0x57, ZD_CR240); if (r) return r; r = zd_iowrite16_locked(chip, 0xe4, ZD_CR9); if (r) return r; /* PLL_OFF */ r = zd_iowrite16_locked(chip, 0x2f, ZD_CR251); if (r) return r; r = zd_rfwritev_cr_locked(chip, std_rv, ARRAY_SIZE(std_rv)); if (r) return r; r = zd_rfwrite_cr_locked(chip, 0x3c9000); if (r) return r; r = zd_rfwrite_cr_locked(chip, 0xf15d58); if (r) return r; r = zd_iowrite16a_locked(chip, ioreqs_sw, ARRAY_SIZE(ioreqs_sw)); if (r) return r; r = zd_rfwritev_cr_locked(chip, rv, 2); if (r) return r; r = zd_rfwrite_cr_locked(chip, 0x3c9000); if (r) return r; r = zd_iowrite16_locked(chip, 0x7f, ZD_CR251); if (r) return r; return zd1211b_al7230b_finalize(chip); } static int zd1211_al7230b_switch_radio_on(struct zd_rf *rf) { struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { { ZD_CR11, 0x00 }, { ZD_CR251, 0x3f }, }; return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } static int zd1211b_al7230b_switch_radio_on(struct zd_rf *rf) { struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { { ZD_CR11, 0x00 }, { ZD_CR251, 0x7f }, }; return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } static int al7230b_switch_radio_off(struct zd_rf *rf) { struct zd_chip *chip = zd_rf_to_chip(rf); static const struct zd_ioreq16 ioreqs[] = { { ZD_CR11, 0x04 }, { ZD_CR251, 0x2f }, }; return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } /* ZD1211B+AL7230B 6m band edge patching differs slightly from other * configurations */ static int zd1211b_al7230b_patch_6m(struct zd_rf *rf, u8 channel) { struct zd_chip *chip = zd_rf_to_chip(rf); struct zd_ioreq16 ioreqs[] = { { ZD_CR128, 0x14 }, { ZD_CR129, 0x12 }, }; /* FIXME: Channel 11 is not the edge for all regulatory domains. */ if (channel == 1) { ioreqs[0].value = 0x0e; ioreqs[1].value = 0x10; } else if (channel == 11) { ioreqs[0].value = 0x10; ioreqs[1].value = 0x10; } dev_dbg_f(zd_chip_dev(chip), "patching for channel %d\n", channel); return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } int zd_rf_init_al7230b(struct zd_rf *rf) { struct zd_chip *chip = zd_rf_to_chip(rf); if (zd_chip_is_zd1211b(chip)) { rf->init_hw = zd1211b_al7230b_init_hw; rf->switch_radio_on = zd1211b_al7230b_switch_radio_on; rf->set_channel = zd1211b_al7230b_set_channel; rf->patch_6m_band_edge = zd1211b_al7230b_patch_6m; } else { rf->init_hw = zd1211_al7230b_init_hw; rf->switch_radio_on = zd1211_al7230b_switch_radio_on; rf->set_channel = zd1211_al7230b_set_channel; rf->patch_6m_band_edge = zd_rf_generic_patch_6m; rf->patch_cck_gain = 1; } rf->switch_radio_off = al7230b_switch_radio_off; return 0; }
gpl-2.0
andyjhf/mini2440-linux-2.6.32.2
arch/arm/mach-iop13xx/tpmi.c
12381
7214
/* * iop13xx tpmi device resources * Copyright (c) 2005-2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <asm/irq.h> #include <asm/sizes.h> /* assumes CONTROLLER_ONLY# is never asserted in the ESSR register */ #define IOP13XX_TPMI_MMR(dev) IOP13XX_REG_ADDR32_PHYS(0x48000 + (dev << 12)) #define IOP13XX_TPMI_MEM(dev) IOP13XX_REG_ADDR32_PHYS(0x60000 + (dev << 13)) #define IOP13XX_TPMI_CTRL(dev) IOP13XX_REG_ADDR32_PHYS(0x50000 + (dev << 10)) #define IOP13XX_TPMI_IOP_CTRL(dev) (IOP13XX_TPMI_CTRL(dev) + 0x2000) #define IOP13XX_TPMI_MMR_SIZE (SZ_4K - 1) #define IOP13XX_TPMI_MEM_SIZE (255) #define IOP13XX_TPMI_MEM_CTRL (SZ_1K - 1) #define IOP13XX_TPMI_RESOURCE_MMR 0 #define IOP13XX_TPMI_RESOURCE_MEM 1 #define IOP13XX_TPMI_RESOURCE_CTRL 2 #define IOP13XX_TPMI_RESOURCE_IOP_CTRL 3 #define IOP13XX_TPMI_RESOURCE_IRQ 4 static struct resource iop13xx_tpmi_0_resources[] = { [IOP13XX_TPMI_RESOURCE_MMR] = { .start = IOP13XX_TPMI_MMR(4), /* tpmi0 starts at dev == 4 */ .end = IOP13XX_TPMI_MMR(4) + IOP13XX_TPMI_MMR_SIZE, .flags = IORESOURCE_MEM, }, [IOP13XX_TPMI_RESOURCE_MEM] = { .start = IOP13XX_TPMI_MEM(0), .end = IOP13XX_TPMI_MEM(0) + IOP13XX_TPMI_MEM_SIZE, .flags = IORESOURCE_MEM, }, [IOP13XX_TPMI_RESOURCE_CTRL] = { .start = IOP13XX_TPMI_CTRL(0), .end = IOP13XX_TPMI_CTRL(0) + IOP13XX_TPMI_MEM_CTRL, .flags = IORESOURCE_MEM, }, [IOP13XX_TPMI_RESOURCE_IOP_CTRL] = { .start = IOP13XX_TPMI_IOP_CTRL(0), .end = IOP13XX_TPMI_IOP_CTRL(0) + IOP13XX_TPMI_MEM_CTRL, .flags = IORESOURCE_MEM, }, [IOP13XX_TPMI_RESOURCE_IRQ] = { .start = IRQ_IOP13XX_TPMI0_OUT, .end = IRQ_IOP13XX_TPMI0_OUT, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_tpmi_1_resources[] = { [IOP13XX_TPMI_RESOURCE_MMR] = { .start = IOP13XX_TPMI_MMR(1), .end = IOP13XX_TPMI_MMR(1) + IOP13XX_TPMI_MMR_SIZE, .flags = IORESOURCE_MEM, }, [IOP13XX_TPMI_RESOURCE_MEM] = { .start = IOP13XX_TPMI_MEM(1), .end = IOP13XX_TPMI_MEM(1) + IOP13XX_TPMI_MEM_SIZE, .flags = IORESOURCE_MEM, }, [IOP13XX_TPMI_RESOURCE_CTRL] = { .start = IOP13XX_TPMI_CTRL(1), .end = IOP13XX_TPMI_CTRL(1) + IOP13XX_TPMI_MEM_CTRL, .flags = IORESOURCE_MEM, }, [IOP13XX_TPMI_RESOURCE_IOP_CTRL] = { .start = IOP13XX_TPMI_IOP_CTRL(1), .end = IOP13XX_TPMI_IOP_CTRL(1) + IOP13XX_TPMI_MEM_CTRL, .flags = IORESOURCE_MEM, }, [IOP13XX_TPMI_RESOURCE_IRQ] = { .start = IRQ_IOP13XX_TPMI1_OUT, .end = IRQ_IOP13XX_TPMI1_OUT, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_tpmi_2_resources[] = { [IOP13XX_TPMI_RESOURCE_MMR] = { .start = IOP13XX_TPMI_MMR(2), .end = IOP13XX_TPMI_MMR(2) + IOP13XX_TPMI_MMR_SIZE, .flags = IORESOURCE_MEM, }, [IOP13XX_TPMI_RESOURCE_MEM] = { .start = IOP13XX_TPMI_MEM(2), .end = IOP13XX_TPMI_MEM(2) + IOP13XX_TPMI_MEM_SIZE, .flags = IORESOURCE_MEM, }, [IOP13XX_TPMI_RESOURCE_CTRL] = { .start = IOP13XX_TPMI_CTRL(2), .end = IOP13XX_TPMI_CTRL(2) + IOP13XX_TPMI_MEM_CTRL, .flags = IORESOURCE_MEM, }, [IOP13XX_TPMI_RESOURCE_IOP_CTRL] = { .start = IOP13XX_TPMI_IOP_CTRL(2), .end = IOP13XX_TPMI_IOP_CTRL(2) + IOP13XX_TPMI_MEM_CTRL, .flags = IORESOURCE_MEM, }, [IOP13XX_TPMI_RESOURCE_IRQ] = { .start = IRQ_IOP13XX_TPMI2_OUT, .end = IRQ_IOP13XX_TPMI2_OUT, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_tpmi_3_resources[] = { [IOP13XX_TPMI_RESOURCE_MMR] = { .start = IOP13XX_TPMI_MMR(3), .end = IOP13XX_TPMI_MMR(3) + IOP13XX_TPMI_MMR_SIZE, .flags = IORESOURCE_MEM, }, [IOP13XX_TPMI_RESOURCE_MEM] = { .start = IOP13XX_TPMI_MEM(3), .end = IOP13XX_TPMI_MEM(3) + IOP13XX_TPMI_MEM_SIZE, .flags = IORESOURCE_MEM, }, [IOP13XX_TPMI_RESOURCE_CTRL] = { .start = IOP13XX_TPMI_CTRL(3), .end = IOP13XX_TPMI_CTRL(3) + IOP13XX_TPMI_MEM_CTRL, .flags = IORESOURCE_MEM, }, [IOP13XX_TPMI_RESOURCE_IOP_CTRL] = { .start = IOP13XX_TPMI_IOP_CTRL(3), .end = IOP13XX_TPMI_IOP_CTRL(3) + IOP13XX_TPMI_MEM_CTRL, .flags = IORESOURCE_MEM, }, [IOP13XX_TPMI_RESOURCE_IRQ] = { .start = IRQ_IOP13XX_TPMI3_OUT, .end = IRQ_IOP13XX_TPMI3_OUT, .flags = IORESOURCE_IRQ } }; u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64); static struct platform_device iop13xx_tpmi_0_device = { .name = "iop-tpmi", .id = 0, .num_resources = ARRAY_SIZE(iop13xx_tpmi_0_resources), .resource = iop13xx_tpmi_0_resources, .dev = { .dma_mask = &iop13xx_tpmi_mask, .coherent_dma_mask = DMA_BIT_MASK(64), }, }; static struct platform_device iop13xx_tpmi_1_device = { .name = "iop-tpmi", .id = 1, .num_resources = ARRAY_SIZE(iop13xx_tpmi_1_resources), .resource = iop13xx_tpmi_1_resources, .dev = { .dma_mask = &iop13xx_tpmi_mask, .coherent_dma_mask = DMA_BIT_MASK(64), }, }; static struct platform_device iop13xx_tpmi_2_device = { .name = "iop-tpmi", .id = 2, .num_resources = ARRAY_SIZE(iop13xx_tpmi_2_resources), .resource = iop13xx_tpmi_2_resources, .dev = { .dma_mask = &iop13xx_tpmi_mask, .coherent_dma_mask = DMA_BIT_MASK(64), }, }; static struct platform_device iop13xx_tpmi_3_device = { .name = "iop-tpmi", .id = 3, .num_resources = ARRAY_SIZE(iop13xx_tpmi_3_resources), .resource = iop13xx_tpmi_3_resources, .dev = { .dma_mask = &iop13xx_tpmi_mask, .coherent_dma_mask = DMA_BIT_MASK(64), }, }; __init void iop13xx_add_tpmi_devices(void) { unsigned short device_id; /* tpmi's not present on iop341 or iop342 */ if (__raw_readl(IOP13XX_ESSR0) & IOP13XX_INTERFACE_SEL_PCIX) /* ATUE must be present */ device_id = __raw_readw(IOP13XX_ATUE_DID); else /* ATUX must be present */ device_id = __raw_readw(IOP13XX_ATUX_DID); switch (device_id) { /* iop34[1|2] 0-tpmi */ case 0x3380: case 0x3384: case 0x3388: case 0x338c: case 0x3382: case 0x3386: case 0x338a: case 0x338e: return; /* iop348 1-tpmi */ case 0x3310: case 0x3312: case 0x3314: case 0x3318: case 0x331a: case 0x331c: case 0x33c0: case 0x33c2: case 0x33c4: case 0x33c8: case 0x33ca: case 0x33cc: case 0x33b0: case 0x33b2: case 0x33b4: case 0x33b8: case 0x33ba: case 0x33bc: case 0x3320: case 0x3322: case 0x3324: case 0x3328: case 0x332a: case 0x332c: platform_device_register(&iop13xx_tpmi_0_device); return; default: platform_device_register(&iop13xx_tpmi_0_device); platform_device_register(&iop13xx_tpmi_1_device); platform_device_register(&iop13xx_tpmi_2_device); platform_device_register(&iop13xx_tpmi_3_device); return; } }
gpl-2.0
alban/linux
arch/arm/mach-footbridge/isa-rtc.c
13661
1229
/* * arch/arm/mach-footbridge/isa-rtc.c * * Copyright (C) 1998 Russell King. * Copyright (C) 1998 Phil Blundell * * CATS has a real-time clock, though the evaluation board doesn't. * * Changelog: * 21-Mar-1998 RMK Created * 27-Aug-1998 PJB CATS support * 28-Dec-1998 APH Made leds optional * 20-Jan-1999 RMK Started merge of EBSA285, CATS and NetWinder * 16-Mar-1999 RMK More support for EBSA285-like machines with RTCs in */ #define RTC_PORT(x) (0x70+(x)) #define RTC_ALWAYS_BCD 0 #include <linux/init.h> #include <linux/mc146818rtc.h> #include <linux/bcd.h> #include <linux/io.h> #include "common.h" void __init isa_rtc_init(void) { int reg_d, reg_b; /* * Probe for the RTC. */ reg_d = CMOS_READ(RTC_REG_D); /* * make sure the divider is set */ CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_REG_A); /* * Set control reg B * (24 hour mode, update enabled) */ reg_b = CMOS_READ(RTC_REG_B) & 0x7f; reg_b |= 2; CMOS_WRITE(reg_b, RTC_REG_B); if ((CMOS_READ(RTC_REG_A) & 0x7f) == RTC_REF_CLCK_32KHZ && CMOS_READ(RTC_REG_B) == reg_b) { /* * We have a RTC. Check the battery */ if ((reg_d & 0x80) == 0) printk(KERN_WARNING "RTC: *** warning: CMOS battery bad\n"); } }
gpl-2.0
siminles/hw01e_cm10_kernel
arch/arm/mach-msm/rpm.c
94
24775
/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/bug.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/semaphore.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/platform_device.h> #include <asm/hardware/gic.h> #include <mach/msm_iomap.h> #include <mach/rpm.h> #include <mach/socinfo.h> /****************************************************************************** * Data type and structure definitions *****************************************************************************/ struct msm_rpm_request { struct msm_rpm_iv_pair *req; int count; uint32_t *ctx_mask_ack; uint32_t *sel_masks_ack; struct completion *done; }; struct msm_rpm_notif_config { struct msm_rpm_iv_pair iv[MSM_RPM_SEL_MASK_SIZE * 2]; }; #define configured_iv(notif_cfg) ((notif_cfg)->iv) #define registered_iv(notif_cfg) ((notif_cfg)->iv + MSM_RPM_SEL_MASK_SIZE) static struct msm_rpm_platform_data *msm_rpm_platform; static uint32_t msm_rpm_map[MSM_RPM_ID_LAST + 1]; static DEFINE_MUTEX(msm_rpm_mutex); static DEFINE_SPINLOCK(msm_rpm_lock); static DEFINE_SPINLOCK(msm_rpm_irq_lock); static struct msm_rpm_request *msm_rpm_request; static struct msm_rpm_request msm_rpm_request_irq_mode; static struct msm_rpm_request msm_rpm_request_poll_mode; static LIST_HEAD(msm_rpm_notifications); static struct msm_rpm_notif_config msm_rpm_notif_cfgs[MSM_RPM_CTX_SET_COUNT]; static bool msm_rpm_init_notif_done; /****************************************************************************** * Internal functions *****************************************************************************/ static inline uint32_t msm_rpm_read(unsigned int page, unsigned int reg) { return __raw_readl(msm_rpm_platform->reg_base_addrs[page] + reg * 4); } static inline void msm_rpm_write( unsigned int page, unsigned int reg, uint32_t value) { __raw_writel(value, msm_rpm_platform->reg_base_addrs[page] + reg * 4); } static inline void msm_rpm_read_contiguous( unsigned int page, unsigned int reg, uint32_t *values, int count) { int i; for (i = 0; i < count; i++) values[i] = msm_rpm_read(page, reg + i); } static inline void msm_rpm_write_contiguous( unsigned int page, unsigned int reg, uint32_t *values, int count) { int i; for (i = 0; i < count; i++) msm_rpm_write(page, reg + i, values[i]); } static inline void msm_rpm_write_contiguous_zeros( unsigned int page, unsigned int reg, int count) { int i; for (i = 0; i < count; i++) msm_rpm_write(page, reg + i, 0); } static inline uint32_t msm_rpm_map_id_to_sel(uint32_t id) { return (id > MSM_RPM_ID_LAST) ? MSM_RPM_SEL_LAST + 1 : msm_rpm_map[id]; } /* * Note: the function does not clear the masks before filling them. * * Return value: * 0: success * -EINVAL: invalid id in <req> array */ static int msm_rpm_fill_sel_masks( uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count) { uint32_t sel; int i; for (i = 0; i < count; i++) { sel = msm_rpm_map_id_to_sel(req[i].id); if (sel > MSM_RPM_SEL_LAST) return -EINVAL; sel_masks[msm_rpm_get_sel_mask_reg(sel)] |= msm_rpm_get_sel_mask(sel); } return 0; } static inline void msm_rpm_send_req_interrupt(void) { __raw_writel(msm_rpm_platform->msm_apps_ipc_rpm_val, msm_rpm_platform->msm_apps_ipc_rpm_reg); } /* * Note: assumes caller has acquired <msm_rpm_irq_lock>. * * Return value: * 0: request acknowledgement * 1: notification * 2: spurious interrupt */ static int msm_rpm_process_ack_interrupt(void) { uint32_t ctx_mask_ack; uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE]; ctx_mask_ack = msm_rpm_read(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0); msm_rpm_read_contiguous(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_SEL_0, sel_masks_ack, MSM_RPM_SEL_MASK_SIZE); if (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_NOTIFICATION)) { struct msm_rpm_notification *n; int i; list_for_each_entry(n, &msm_rpm_notifications, list) for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++) if (sel_masks_ack[i] & n->sel_masks[i]) { up(&n->sem); break; } msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_SEL_0, MSM_RPM_SEL_MASK_SIZE); msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0, 0); /* Ensure the write is complete before return */ mb(); return 1; } if (msm_rpm_request) { int i; *(msm_rpm_request->ctx_mask_ack) = ctx_mask_ack; memcpy(msm_rpm_request->sel_masks_ack, sel_masks_ack, sizeof(sel_masks_ack)); for (i = 0; i < msm_rpm_request->count; i++) msm_rpm_request->req[i].value = msm_rpm_read(MSM_RPM_PAGE_ACK, msm_rpm_request->req[i].id); msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_SEL_0, MSM_RPM_SEL_MASK_SIZE); msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_ACK_CTX_0, 0); /* Ensure the write is complete before return */ mb(); if (msm_rpm_request->done) complete_all(msm_rpm_request->done); msm_rpm_request = NULL; return 0; } return 2; } static irqreturn_t msm_rpm_ack_interrupt(int irq, void *dev_id) { unsigned long flags; int rc; if (dev_id != &msm_rpm_ack_interrupt) return IRQ_NONE; spin_lock_irqsave(&msm_rpm_irq_lock, flags); rc = msm_rpm_process_ack_interrupt(); spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); return IRQ_HANDLED; } /* * Note: assumes caller has acquired <msm_rpm_irq_lock>. */ static void msm_rpm_busy_wait_for_request_completion( bool allow_async_completion) { int rc; do { while (!gic_is_spi_pending(msm_rpm_platform->irq_ack) && msm_rpm_request) { if (allow_async_completion) spin_unlock(&msm_rpm_irq_lock); udelay(1); if (allow_async_completion) spin_lock(&msm_rpm_irq_lock); } if (!msm_rpm_request) break; rc = msm_rpm_process_ack_interrupt(); gic_clear_spi_pending(msm_rpm_platform->irq_ack); } while (rc); } /* Upon return, the <req> array will contain values from the ack page. * * Note: assumes caller has acquired <msm_rpm_mutex>. * * Return value: * 0: success * -ENOSPC: request rejected */ static int msm_rpm_set_exclusive(int ctx, uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count) { DECLARE_COMPLETION_ONSTACK(ack); unsigned long flags; uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx); uint32_t ctx_mask_ack = 0; uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE]; int i; msm_rpm_request_irq_mode.req = req; msm_rpm_request_irq_mode.count = count; msm_rpm_request_irq_mode.ctx_mask_ack = &ctx_mask_ack; msm_rpm_request_irq_mode.sel_masks_ack = sel_masks_ack; msm_rpm_request_irq_mode.done = &ack; spin_lock_irqsave(&msm_rpm_lock, flags); spin_lock(&msm_rpm_irq_lock); BUG_ON(msm_rpm_request); msm_rpm_request = &msm_rpm_request_irq_mode; for (i = 0; i < count; i++) { BUG_ON(req[i].id > MSM_RPM_ID_LAST); msm_rpm_write(MSM_RPM_PAGE_REQ, req[i].id, req[i].value); } msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_SEL_0, sel_masks, MSM_RPM_SEL_MASK_SIZE); msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_CTX_0, ctx_mask); /* Ensure RPM data is written before sending the interrupt */ mb(); msm_rpm_send_req_interrupt(); spin_unlock(&msm_rpm_irq_lock); spin_unlock_irqrestore(&msm_rpm_lock, flags); wait_for_completion(&ack); BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))) != ctx_mask); BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack))); return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)) ? -ENOSPC : 0; } /* Upon return, the <req> array will contain values from the ack page. * * Note: assumes caller has acquired <msm_rpm_lock>. * * Return value: * 0: success * -ENOSPC: request rejected */ static int msm_rpm_set_exclusive_noirq(int ctx, uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count) { unsigned int irq = msm_rpm_platform->irq_ack; unsigned long flags; uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx); uint32_t ctx_mask_ack = 0; uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE]; struct irq_chip *irq_chip = NULL; int i; msm_rpm_request_poll_mode.req = req; msm_rpm_request_poll_mode.count = count; msm_rpm_request_poll_mode.ctx_mask_ack = &ctx_mask_ack; msm_rpm_request_poll_mode.sel_masks_ack = sel_masks_ack; msm_rpm_request_poll_mode.done = NULL; spin_lock_irqsave(&msm_rpm_irq_lock, flags); irq_chip = irq_get_chip(irq); if (!irq_chip) { spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); return -ENOSPC; } irq_chip->irq_mask(irq_get_irq_data(irq)); if (msm_rpm_request) { msm_rpm_busy_wait_for_request_completion(true); BUG_ON(msm_rpm_request); } msm_rpm_request = &msm_rpm_request_poll_mode; for (i = 0; i < count; i++) { BUG_ON(req[i].id > MSM_RPM_ID_LAST); msm_rpm_write(MSM_RPM_PAGE_REQ, req[i].id, req[i].value); } msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_SEL_0, sel_masks, MSM_RPM_SEL_MASK_SIZE); msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_CTX_0, ctx_mask); /* Ensure RPM data is written before sending the interrupt */ mb(); msm_rpm_send_req_interrupt(); msm_rpm_busy_wait_for_request_completion(false); BUG_ON(msm_rpm_request); irq_chip->irq_unmask(irq_get_irq_data(irq)); spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))) != ctx_mask); BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack))); return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)) ? -ENOSPC : 0; } /* Upon return, the <req> array will contain values from the ack page. * * Return value: * 0: success * -EINVAL: invalid <ctx> or invalid id in <req> array * -ENOSPC: request rejected * -ENODEV: RPM driver not initialized */ static int msm_rpm_set_common( int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq) { uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {}; int rc; if (!msm_rpm_platform) { if (cpu_is_apq8064()) return 0; else return -ENODEV; } if (ctx >= MSM_RPM_CTX_SET_COUNT) { rc = -EINVAL; goto set_common_exit; } rc = msm_rpm_fill_sel_masks(sel_masks, req, count); if (rc) goto set_common_exit; if (noirq) { unsigned long flags; spin_lock_irqsave(&msm_rpm_lock, flags); rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, req, count); spin_unlock_irqrestore(&msm_rpm_lock, flags); } else { mutex_lock(&msm_rpm_mutex); rc = msm_rpm_set_exclusive(ctx, sel_masks, req, count); mutex_unlock(&msm_rpm_mutex); } set_common_exit: return rc; } /* * Return value: * 0: success * -EINVAL: invalid <ctx> or invalid id in <req> array * -ENODEV: RPM driver not initialized. */ static int msm_rpm_clear_common( int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq) { uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {}; struct msm_rpm_iv_pair r[MSM_RPM_SEL_MASK_SIZE]; int rc; int i; if (!msm_rpm_platform) { if (cpu_is_apq8064()) return 0; else return -ENODEV; } if (ctx >= MSM_RPM_CTX_SET_COUNT) { rc = -EINVAL; goto clear_common_exit; } rc = msm_rpm_fill_sel_masks(sel_masks, req, count); if (rc) goto clear_common_exit; for (i = 0; i < ARRAY_SIZE(r); i++) { r[i].id = MSM_RPM_ID_INVALIDATE_0 + i; r[i].value = sel_masks[i]; } memset(sel_masks, 0, sizeof(sel_masks)); sel_masks[msm_rpm_get_sel_mask_reg(MSM_RPM_SEL_INVALIDATE)] |= msm_rpm_get_sel_mask(MSM_RPM_SEL_INVALIDATE); if (noirq) { unsigned long flags; spin_lock_irqsave(&msm_rpm_lock, flags); rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, r, ARRAY_SIZE(r)); spin_unlock_irqrestore(&msm_rpm_lock, flags); BUG_ON(rc); } else { mutex_lock(&msm_rpm_mutex); rc = msm_rpm_set_exclusive(ctx, sel_masks, r, ARRAY_SIZE(r)); mutex_unlock(&msm_rpm_mutex); BUG_ON(rc); } clear_common_exit: return rc; } /* * Note: assumes caller has acquired <msm_rpm_mutex>. */ static void msm_rpm_update_notification(uint32_t ctx, struct msm_rpm_notif_config *curr_cfg, struct msm_rpm_notif_config *new_cfg) { if (memcmp(curr_cfg, new_cfg, sizeof(*new_cfg))) { uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {}; int rc; sel_masks[msm_rpm_get_sel_mask_reg(MSM_RPM_SEL_NOTIFICATION)] |= msm_rpm_get_sel_mask(MSM_RPM_SEL_NOTIFICATION); rc = msm_rpm_set_exclusive(ctx, sel_masks, new_cfg->iv, ARRAY_SIZE(new_cfg->iv)); BUG_ON(rc); memcpy(curr_cfg, new_cfg, sizeof(*new_cfg)); } } /* * Note: assumes caller has acquired <msm_rpm_mutex>. */ static void msm_rpm_initialize_notification(void) { struct msm_rpm_notif_config cfg; unsigned int ctx; int i; for (ctx = MSM_RPM_CTX_SET_0; ctx <= MSM_RPM_CTX_SET_SLEEP; ctx++) { cfg = msm_rpm_notif_cfgs[ctx]; for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++) { configured_iv(&cfg)[i].id = MSM_RPM_ID_NOTIFICATION_CONFIGURED_0 + i; configured_iv(&cfg)[i].value = ~0UL; registered_iv(&cfg)[i].id = MSM_RPM_ID_NOTIFICATION_REGISTERED_0 + i; registered_iv(&cfg)[i].value = 0; } msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg); } } /****************************************************************************** * Public functions *****************************************************************************/ int msm_rpm_local_request_is_outstanding(void) { unsigned long flags; int outstanding = 0; if (!spin_trylock_irqsave(&msm_rpm_lock, flags)) goto local_request_is_outstanding_exit; if (!spin_trylock(&msm_rpm_irq_lock)) goto local_request_is_outstanding_unlock; outstanding = (msm_rpm_request != NULL); spin_unlock(&msm_rpm_irq_lock); local_request_is_outstanding_unlock: spin_unlock_irqrestore(&msm_rpm_lock, flags); local_request_is_outstanding_exit: return outstanding; } /* * Read the specified status registers and return their values. * * status: array of id-value pairs. Each <id> specifies a status register, * i.e, one of MSM_RPM_STATUS_ID_xxxx. Upon return, each <value> will * contain the value of the status register. * count: number of id-value pairs in the array * * Return value: * 0: success * -EBUSY: RPM is updating the status page; values across different registers * may not be consistent * -EINVAL: invalid id in <status> array * -ENODEV: RPM driver not initialized */ int msm_rpm_get_status(struct msm_rpm_iv_pair *status, int count) { uint32_t seq_begin; uint32_t seq_end; int rc; int i; if (!msm_rpm_platform) { if (cpu_is_apq8064()) return 0; else return -ENODEV; } seq_begin = msm_rpm_read(MSM_RPM_PAGE_STATUS, MSM_RPM_STATUS_ID_SEQUENCE); for (i = 0; i < count; i++) { if (status[i].id > MSM_RPM_STATUS_ID_LAST) { rc = -EINVAL; goto get_status_exit; } status[i].value = msm_rpm_read(MSM_RPM_PAGE_STATUS, status[i].id); } seq_end = msm_rpm_read(MSM_RPM_PAGE_STATUS, MSM_RPM_STATUS_ID_SEQUENCE); rc = (seq_begin != seq_end || (seq_begin & 0x01)) ? -EBUSY : 0; get_status_exit: return rc; } EXPORT_SYMBOL(msm_rpm_get_status); /* * Issue a resource request to RPM to set resource values. * * Note: the function may sleep and must be called in a task context. * * ctx: the request's context. * There two contexts that a RPM driver client can use: * MSM_RPM_CTX_SET_0 and MSM_RPM_CTX_SET_SLEEP. For resource values * that are intended to take effect when the CPU is active, * MSM_RPM_CTX_SET_0 should be used. For resource values that are * intended to take effect when the CPU is not active, * MSM_RPM_CTX_SET_SLEEP should be used. * req: array of id-value pairs. Each <id> specifies a RPM resource, * i.e, one of MSM_RPM_ID_xxxx. Each <value> specifies the requested * resource value. * count: number of id-value pairs in the array * * Return value: * 0: success * -EINVAL: invalid <ctx> or invalid id in <req> array * -ENOSPC: request rejected * -ENODEV: RPM driver not initialized */ int msm_rpm_set(int ctx, struct msm_rpm_iv_pair *req, int count) { return msm_rpm_set_common(ctx, req, count, false); } EXPORT_SYMBOL(msm_rpm_set); /* * Issue a resource request to RPM to set resource values. * * Note: the function is similar to msm_rpm_set() except that it must be * called with interrupts masked. If possible, use msm_rpm_set() * instead, to maximize CPU throughput. */ int msm_rpm_set_noirq(int ctx, struct msm_rpm_iv_pair *req, int count) { WARN(!irqs_disabled(), "msm_rpm_set_noirq can only be called " "safely when local irqs are disabled. Consider using " "msm_rpm_set or msm_rpm_set_nosleep instead."); return msm_rpm_set_common(ctx, req, count, true); } EXPORT_SYMBOL(msm_rpm_set_noirq); /* * Issue a resource request to RPM to clear resource values. Once the * values are cleared, the resources revert back to their default values * for this RPM master. * * Note: the function may sleep and must be called in a task context. * * ctx: the request's context. * req: array of id-value pairs. Each <id> specifies a RPM resource, * i.e, one of MSM_RPM_ID_xxxx. <value>'s are ignored. * count: number of id-value pairs in the array * * Return value: * 0: success * -EINVAL: invalid <ctx> or invalid id in <req> array */ int msm_rpm_clear(int ctx, struct msm_rpm_iv_pair *req, int count) { return msm_rpm_clear_common(ctx, req, count, false); } EXPORT_SYMBOL(msm_rpm_clear); /* * Issue a resource request to RPM to clear resource values. * * Note: the function is similar to msm_rpm_clear() except that it must be * called with interrupts masked. If possible, use msm_rpm_clear() * instead, to maximize CPU throughput. */ int msm_rpm_clear_noirq(int ctx, struct msm_rpm_iv_pair *req, int count) { WARN(!irqs_disabled(), "msm_rpm_clear_noirq can only be called " "safely when local irqs are disabled. Consider using " "msm_rpm_clear or msm_rpm_clear_nosleep instead."); return msm_rpm_clear_common(ctx, req, count, true); } EXPORT_SYMBOL(msm_rpm_clear_noirq); /* * Register for RPM notification. When the specified resources * change their status on RPM, RPM sends out notifications and the * driver will "up" the semaphore in struct msm_rpm_notification. * * Note: the function may sleep and must be called in a task context. * * Memory for <n> must not be freed until the notification is * unregistered. Memory for <req> can be freed after this * function returns. * * n: the notifcation object. Caller should initialize only the * semaphore field. When a notification arrives later, the * semaphore will be "up"ed. * req: array of id-value pairs. Each <id> specifies a status register, * i.e, one of MSM_RPM_STATUS_ID_xxxx. <value>'s are ignored. * count: number of id-value pairs in the array * * Return value: * 0: success * -EINVAL: invalid id in <req> array * -ENODEV: RPM driver not initialized */ int msm_rpm_register_notification(struct msm_rpm_notification *n, struct msm_rpm_iv_pair *req, int count) { unsigned long flags; unsigned int ctx; struct msm_rpm_notif_config cfg; int rc; int i; if (!msm_rpm_platform) { if (cpu_is_apq8064()) return 0; else return -ENODEV; } INIT_LIST_HEAD(&n->list); rc = msm_rpm_fill_sel_masks(n->sel_masks, req, count); if (rc) goto register_notification_exit; mutex_lock(&msm_rpm_mutex); if (!msm_rpm_init_notif_done) { msm_rpm_initialize_notification(); msm_rpm_init_notif_done = true; } spin_lock_irqsave(&msm_rpm_irq_lock, flags); list_add(&n->list, &msm_rpm_notifications); spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); ctx = MSM_RPM_CTX_SET_0; cfg = msm_rpm_notif_cfgs[ctx]; for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++) registered_iv(&cfg)[i].value |= n->sel_masks[i]; msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg); mutex_unlock(&msm_rpm_mutex); register_notification_exit: return rc; } EXPORT_SYMBOL(msm_rpm_register_notification); /* * Unregister a notification. * * Note: the function may sleep and must be called in a task context. * * n: the notifcation object that was registered previously. * * Return value: * 0: success * -ENODEV: RPM driver not initialized */ int msm_rpm_unregister_notification(struct msm_rpm_notification *n) { unsigned long flags; unsigned int ctx; struct msm_rpm_notif_config cfg; int rc = 0; int i; if (!msm_rpm_platform) { if (cpu_is_apq8064()) return 0; else return -ENODEV; } mutex_lock(&msm_rpm_mutex); ctx = MSM_RPM_CTX_SET_0; cfg = msm_rpm_notif_cfgs[ctx]; for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++) registered_iv(&cfg)[i].value = 0; spin_lock_irqsave(&msm_rpm_irq_lock, flags); list_del(&n->list); list_for_each_entry(n, &msm_rpm_notifications, list) for (i = 0; i < MSM_RPM_SEL_MASK_SIZE; i++) registered_iv(&cfg)[i].value |= n->sel_masks[i]; spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); msm_rpm_update_notification(ctx, &msm_rpm_notif_cfgs[ctx], &cfg); mutex_unlock(&msm_rpm_mutex); return rc; } EXPORT_SYMBOL(msm_rpm_unregister_notification); static uint32_t fw_major, fw_minor, fw_build; static ssize_t driver_version_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%u.%u.%u\n", RPM_MAJOR_VER, RPM_MINOR_VER, RPM_BUILD_VER); } static ssize_t fw_version_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%u.%u.%u\n", fw_major, fw_minor, fw_build); } static struct kobj_attribute driver_version_attr = __ATTR_RO(driver_version); static struct kobj_attribute fw_version_attr = __ATTR_RO(fw_version); static struct attribute *driver_attributes[] = { &driver_version_attr.attr, &fw_version_attr.attr, NULL }; static struct attribute_group driver_attr_group = { .attrs = driver_attributes, }; static int __devinit msm_rpm_probe(struct platform_device *pdev) { return sysfs_create_group(&pdev->dev.kobj, &driver_attr_group); } static int __devexit msm_rpm_remove(struct platform_device *pdev) { sysfs_remove_group(&pdev->dev.kobj, &driver_attr_group); return 0; } static struct platform_driver msm_rpm_platform_driver = { .probe = msm_rpm_probe, .remove = __devexit_p(msm_rpm_remove), .driver = { .name = "msm_rpm", .owner = THIS_MODULE, }, }; static void __init msm_rpm_populate_map(void) { int i, k; for (i = 0; i < ARRAY_SIZE(msm_rpm_map); i++) msm_rpm_map[i] = MSM_RPM_SEL_LAST + 1; for (i = 0; i < rpm_map_data_size; i++) { struct msm_rpm_map_data *raw_data = &rpm_map_data[i]; for (k = 0; k < raw_data->count; k++) msm_rpm_map[raw_data->id + k] = raw_data->sel; } } int __init msm_rpm_init(struct msm_rpm_platform_data *data) { unsigned int irq; int rc; if (cpu_is_apq8064()) return 0; msm_rpm_platform = data; fw_major = msm_rpm_read(MSM_RPM_PAGE_STATUS, MSM_RPM_STATUS_ID_VERSION_MAJOR); fw_minor = msm_rpm_read(MSM_RPM_PAGE_STATUS, MSM_RPM_STATUS_ID_VERSION_MINOR); fw_build = msm_rpm_read(MSM_RPM_PAGE_STATUS, MSM_RPM_STATUS_ID_VERSION_BUILD); pr_info("%s: RPM firmware %u.%u.%u\n", __func__, fw_major, fw_minor, fw_build); if (fw_major != RPM_MAJOR_VER) { pr_err("%s: RPM version %u.%u.%u incompatible with " "this driver version %u.%u.%u\n", __func__, fw_major, fw_minor, fw_build, RPM_MAJOR_VER, RPM_MINOR_VER, RPM_BUILD_VER); return -EFAULT; } msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_MAJOR, RPM_MAJOR_VER); msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_MINOR, RPM_MINOR_VER); msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_VERSION_BUILD, RPM_BUILD_VER); irq = msm_rpm_platform->irq_ack; rc = request_irq(irq, msm_rpm_ack_interrupt, IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND, "rpm_drv", msm_rpm_ack_interrupt); if (rc) { pr_err("%s: failed to request irq %d: %d\n", __func__, irq, rc); return rc; } rc = irq_set_irq_wake(irq, 1); if (rc) { pr_err("%s: failed to set wakeup irq %u: %d\n", __func__, irq, rc); return rc; } msm_rpm_populate_map(); return platform_driver_register(&msm_rpm_platform_driver); }
gpl-2.0
hvaibhav/am335x-linux
drivers/net/wireless/ath/carl9170/fw.c
94
12083
/* * Atheros CARL9170 driver * * firmware parser * * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, see * http://www.gnu.org/licenses/. */ #include <linux/kernel.h> #include <linux/firmware.h> #include <linux/crc32.h> #include <linux/module.h> #include "carl9170.h" #include "fwcmd.h" #include "version.h" #define MAKE_STR(symbol) #symbol #define TO_STR(symbol) MAKE_STR(symbol) #define CARL9170FW_API_VER_STR TO_STR(CARL9170FW_API_MAX_VER) MODULE_VERSION(CARL9170FW_API_VER_STR ":" CARL9170FW_VERSION_GIT); static const u8 otus_magic[4] = { OTUS_MAGIC }; static const void *carl9170_fw_find_desc(struct ar9170 *ar, const u8 descid[4], const unsigned int len, const u8 compatible_revision) { const struct carl9170fw_desc_head *iter; carl9170fw_for_each_hdr(iter, ar->fw.desc) { if (carl9170fw_desc_cmp(iter, descid, len, compatible_revision)) return (void *)iter; } /* needed to find the LAST desc */ if (carl9170fw_desc_cmp(iter, descid, len, compatible_revision)) return (void *)iter; return NULL; } static int carl9170_fw_verify_descs(struct ar9170 *ar, const struct carl9170fw_desc_head *head, unsigned int max_len) { const struct carl9170fw_desc_head *pos; unsigned long pos_addr, end_addr; unsigned int pos_length; if (max_len < sizeof(*pos)) return -ENODATA; max_len = min_t(unsigned int, CARL9170FW_DESC_MAX_LENGTH, max_len); pos = head; pos_addr = (unsigned long) pos; end_addr = pos_addr + max_len; while (pos_addr < end_addr) { if (pos_addr + sizeof(*head) > end_addr) return -E2BIG; pos_length = le16_to_cpu(pos->length); if (pos_length < sizeof(*head)) return -EBADMSG; if (pos_length > max_len) return -EOVERFLOW; if (pos_addr + pos_length > end_addr) return -EMSGSIZE; if (carl9170fw_desc_cmp(pos, LAST_MAGIC, CARL9170FW_LAST_DESC_SIZE, CARL9170FW_LAST_DESC_CUR_VER)) return 0; pos_addr += pos_length; pos = (void *)pos_addr; max_len -= pos_length; } return -EINVAL; } static void carl9170_fw_info(struct ar9170 *ar) { const struct carl9170fw_motd_desc *motd_desc; unsigned int str_ver_len; u32 fw_date; dev_info(&ar->udev->dev, "driver API: %s 2%03d-%02d-%02d [%d-%d]\n", CARL9170FW_VERSION_GIT, CARL9170FW_VERSION_YEAR, CARL9170FW_VERSION_MONTH, CARL9170FW_VERSION_DAY, CARL9170FW_API_MIN_VER, CARL9170FW_API_MAX_VER); motd_desc = carl9170_fw_find_desc(ar, MOTD_MAGIC, sizeof(*motd_desc), CARL9170FW_MOTD_DESC_CUR_VER); if (motd_desc) { str_ver_len = strnlen(motd_desc->release, CARL9170FW_MOTD_RELEASE_LEN); fw_date = le32_to_cpu(motd_desc->fw_year_month_day); dev_info(&ar->udev->dev, "firmware API: %.*s 2%03d-%02d-%02d\n", str_ver_len, motd_desc->release, CARL9170FW_GET_YEAR(fw_date), CARL9170FW_GET_MONTH(fw_date), CARL9170FW_GET_DAY(fw_date)); strlcpy(ar->hw->wiphy->fw_version, motd_desc->release, sizeof(ar->hw->wiphy->fw_version)); } } static bool valid_dma_addr(const u32 address) { if (address >= AR9170_SRAM_OFFSET && address < (AR9170_SRAM_OFFSET + AR9170_SRAM_SIZE)) return true; return false; } static bool valid_cpu_addr(const u32 address) { if (valid_dma_addr(address) || (address >= AR9170_PRAM_OFFSET && address < (AR9170_PRAM_OFFSET + AR9170_PRAM_SIZE))) return true; return false; } static int carl9170_fw_checksum(struct ar9170 *ar, const __u8 *data, size_t len) { const struct carl9170fw_otus_desc *otus_desc; const struct carl9170fw_last_desc *last_desc; const struct carl9170fw_chk_desc *chk_desc; unsigned long fin, diff; unsigned int dsc_len; u32 crc32; last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC, sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER); if (!last_desc) return -EINVAL; otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC, sizeof(*otus_desc), CARL9170FW_OTUS_DESC_CUR_VER); if (!otus_desc) { dev_err(&ar->udev->dev, "failed to find compatible firmware " "descriptor.\n"); return -ENODATA; } chk_desc = carl9170_fw_find_desc(ar, CHK_MAGIC, sizeof(*chk_desc), CARL9170FW_CHK_DESC_CUR_VER); if (!chk_desc) { dev_warn(&ar->udev->dev, "Unprotected firmware image.\n"); return 0; } dsc_len = min_t(unsigned int, len, (unsigned long)chk_desc - (unsigned long)otus_desc); fin = (unsigned long) last_desc + sizeof(*last_desc); diff = fin - (unsigned long) otus_desc; if (diff < len) len -= diff; if (len < 256) return -EIO; crc32 = crc32_le(~0, data, len); if (cpu_to_le32(crc32) != chk_desc->fw_crc32) { dev_err(&ar->udev->dev, "fw checksum test failed.\n"); return -ENOEXEC; } crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len); if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) { dev_err(&ar->udev->dev, "descriptor check failed.\n"); return -EINVAL; } return 0; } static int carl9170_fw_tx_sequence(struct ar9170 *ar) { const struct carl9170fw_txsq_desc *txsq_desc; txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC, sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER); if (txsq_desc) { ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr); if (!valid_cpu_addr(ar->fw.tx_seq_table)) return -EINVAL; } else { ar->fw.tx_seq_table = 0; } return 0; } static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) { const struct carl9170fw_otus_desc *otus_desc; int err; u16 if_comb_types; err = carl9170_fw_checksum(ar, data, len); if (err) return err; otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC, sizeof(*otus_desc), CARL9170FW_OTUS_DESC_CUR_VER); if (!otus_desc) { return -ENODATA; } #define SUPP(feat) \ (carl9170fw_supports(otus_desc->feature_set, feat)) if (!SUPP(CARL9170FW_DUMMY_FEATURE)) { dev_err(&ar->udev->dev, "invalid firmware descriptor " "format detected.\n"); return -EINVAL; } ar->fw.api_version = otus_desc->api_ver; if (ar->fw.api_version < CARL9170FW_API_MIN_VER || ar->fw.api_version > CARL9170FW_API_MAX_VER) { dev_err(&ar->udev->dev, "unsupported firmware api version.\n"); return -EINVAL; } if (!SUPP(CARL9170FW_COMMAND_PHY) || SUPP(CARL9170FW_UNUSABLE) || !SUPP(CARL9170FW_HANDLE_BACK_REQ)) { dev_err(&ar->udev->dev, "firmware does support " "mandatory features.\n"); return -ECANCELED; } if (ilog2(le32_to_cpu(otus_desc->feature_set)) >= __CARL9170FW_FEATURE_NUM) { dev_warn(&ar->udev->dev, "driver does not support all " "firmware features.\n"); } if (!SUPP(CARL9170FW_COMMAND_CAM)) { dev_info(&ar->udev->dev, "crypto offloading is disabled " "by firmware.\n"); ar->disable_offload = true; } if (SUPP(CARL9170FW_PSM) && SUPP(CARL9170FW_FIXED_5GHZ_PSM)) ar->hw->flags |= IEEE80211_HW_SUPPORTS_PS; if (!SUPP(CARL9170FW_USB_INIT_FIRMWARE)) { dev_err(&ar->udev->dev, "firmware does not provide " "mandatory interfaces.\n"); return -EINVAL; } if (SUPP(CARL9170FW_MINIBOOT)) ar->fw.offset = le16_to_cpu(otus_desc->miniboot_size); else ar->fw.offset = 0; if (SUPP(CARL9170FW_USB_DOWN_STREAM)) { ar->hw->extra_tx_headroom += sizeof(struct ar9170_stream); ar->fw.tx_stream = true; } if (SUPP(CARL9170FW_USB_UP_STREAM)) ar->fw.rx_stream = true; if (SUPP(CARL9170FW_RX_FILTER)) { ar->fw.rx_filter = true; ar->rx_filter_caps = FIF_FCSFAIL | FIF_PLCPFAIL | FIF_CONTROL | FIF_PSPOLL | FIF_OTHER_BSS | FIF_PROMISC_IN_BSS; } if (SUPP(CARL9170FW_HW_COUNTERS)) ar->fw.hw_counters = true; if (SUPP(CARL9170FW_WOL)) device_set_wakeup_enable(&ar->udev->dev, true); if_comb_types = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT); ar->fw.vif_num = otus_desc->vif_num; ar->fw.cmd_bufs = otus_desc->cmd_bufs; ar->fw.address = le32_to_cpu(otus_desc->fw_address); ar->fw.rx_size = le16_to_cpu(otus_desc->rx_max_frame_len); ar->fw.mem_blocks = min_t(unsigned int, otus_desc->tx_descs, 0xfe); atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks); ar->fw.mem_block_size = le16_to_cpu(otus_desc->tx_frag_len); if (ar->fw.vif_num >= AR9170_MAX_VIRTUAL_MAC || !ar->fw.vif_num || ar->fw.mem_blocks < 16 || !ar->fw.cmd_bufs || ar->fw.mem_block_size < 64 || ar->fw.mem_block_size > 512 || ar->fw.rx_size > 32768 || ar->fw.rx_size < 4096 || !valid_cpu_addr(ar->fw.address)) { dev_err(&ar->udev->dev, "firmware shows obvious signs of " "malicious tampering.\n"); return -EINVAL; } ar->fw.beacon_addr = le32_to_cpu(otus_desc->bcn_addr); ar->fw.beacon_max_len = le16_to_cpu(otus_desc->bcn_len); if (valid_dma_addr(ar->fw.beacon_addr) && ar->fw.beacon_max_len >= AR9170_MAC_BCN_LENGTH_MAX) { ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); if (SUPP(CARL9170FW_WLANTX_CAB)) { if_comb_types |= BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO); } } ar->if_comb_limits[0].max = ar->fw.vif_num; ar->if_comb_limits[0].types = if_comb_types; ar->if_combs[0].num_different_channels = 1; ar->if_combs[0].max_interfaces = ar->fw.vif_num; ar->if_combs[0].limits = ar->if_comb_limits; ar->if_combs[0].n_limits = ARRAY_SIZE(ar->if_comb_limits); ar->hw->wiphy->iface_combinations = ar->if_combs; ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ar->if_combs); ar->hw->wiphy->interface_modes |= if_comb_types; #undef SUPPORTED return carl9170_fw_tx_sequence(ar); } static struct carl9170fw_desc_head * carl9170_find_fw_desc(struct ar9170 *ar, const __u8 *fw_data, const size_t len) { int scan = 0, found = 0; if (!carl9170fw_size_check(len)) { dev_err(&ar->udev->dev, "firmware size is out of bound.\n"); return NULL; } while (scan < len - sizeof(struct carl9170fw_desc_head)) { if (fw_data[scan++] == otus_magic[found]) found++; else found = 0; if (scan >= len) break; if (found == sizeof(otus_magic)) break; } if (found != sizeof(otus_magic)) return NULL; return (void *)&fw_data[scan - found]; } int carl9170_fw_fix_eeprom(struct ar9170 *ar) { const struct carl9170fw_fix_desc *fix_desc = NULL; unsigned int i, n, off; u32 *data = (void *)&ar->eeprom; fix_desc = carl9170_fw_find_desc(ar, FIX_MAGIC, sizeof(*fix_desc), CARL9170FW_FIX_DESC_CUR_VER); if (!fix_desc) return 0; n = (le16_to_cpu(fix_desc->head.length) - sizeof(*fix_desc)) / sizeof(struct carl9170fw_fix_entry); for (i = 0; i < n; i++) { off = le32_to_cpu(fix_desc->data[i].address) - AR9170_EEPROM_START; if (off >= sizeof(struct ar9170_eeprom) || (off & 3)) { dev_err(&ar->udev->dev, "Skip invalid entry %d\n", i); continue; } data[off / sizeof(*data)] &= le32_to_cpu(fix_desc->data[i].mask); data[off / sizeof(*data)] |= le32_to_cpu(fix_desc->data[i].value); } return 0; } int carl9170_parse_firmware(struct ar9170 *ar) { const struct carl9170fw_desc_head *fw_desc = NULL; const struct firmware *fw = ar->fw.fw; unsigned long header_offset = 0; int err; if (WARN_ON(!fw)) return -EINVAL; fw_desc = carl9170_find_fw_desc(ar, fw->data, fw->size); if (!fw_desc) { dev_err(&ar->udev->dev, "unsupported firmware.\n"); return -ENODATA; } header_offset = (unsigned long)fw_desc - (unsigned long)fw->data; err = carl9170_fw_verify_descs(ar, fw_desc, fw->size - header_offset); if (err) { dev_err(&ar->udev->dev, "damaged firmware (%d).\n", err); return err; } ar->fw.desc = fw_desc; carl9170_fw_info(ar); err = carl9170_fw(ar, fw->data, fw->size); if (err) { dev_err(&ar->udev->dev, "failed to parse firmware (%d).\n", err); return err; } return 0; }
gpl-2.0
vidonme/xbmc
xbmc/visualizations/XBMCProjectM/libprojectM/image_DXT.c
94
16540
/* Jonathan Dummer 2007-07-31-10.32 simple DXT compression / decompression code public domain */ #include "image_DXT.h" #include <math.h> #include <stdlib.h> #include <string.h> #include <stdio.h> /* set this =1 if you want to use the covarince matrix method... which is better than my method of using standard deviations overall, except on the infintesimal chance that the power method fails for finding the largest eigenvector */ #define USE_COV_MAT 1 /********* Function Prototypes *********/ /* Takes a 4x4 block of pixels and compresses it into 8 bytes in DXT1 format (color only, no alpha). Speed is valued over prettyness, at least for now. */ void compress_DDS_color_block( int channels, const unsigned char *const uncompressed, unsigned char compressed[8] ); /* Takes a 4x4 block of pixels and compresses the alpha component it into 8 bytes for use in DXT5 DDS files. Speed is valued over prettyness, at least for now. */ void compress_DDS_alpha_block( const unsigned char *const uncompressed, unsigned char compressed[8] ); /********* Actual Exposed Functions *********/ int save_image_as_DDS ( const char *filename, int width, int height, int channels, const unsigned char *const data ) { /* variables */ FILE *fout; unsigned char *DDS_data; DDS_header header; int DDS_size; /* error check */ if( (NULL == filename) || (width < 1) || (height < 1) || (channels < 1) || (channels > 4) || (data == NULL ) ) { return 0; } /* Convert the image */ if( (channels & 1) == 1 ) { /* no alpha, just use DXT1 */ DDS_data = convert_image_to_DXT1( data, width, height, channels, &DDS_size ); } else { /* has alpha, so use DXT5 */ DDS_data = convert_image_to_DXT5( data, width, height, channels, &DDS_size ); } /* save it */ memset( &header, 0, sizeof( DDS_header ) ); header.dwMagic = ('D' << 0) | ('D' << 8) | ('S' << 16) | (' ' << 24); header.dwSize = 124; header.dwFlags = DDSD_CAPS | DDSD_HEIGHT | DDSD_WIDTH | DDSD_PIXELFORMAT | DDSD_LINEARSIZE; header.dwWidth = width; header.dwHeight = height; header.dwPitchOrLinearSize = DDS_size; header.sPixelFormat.dwSize = 32; header.sPixelFormat.dwFlags = DDPF_FOURCC; if( (channels & 1) == 1 ) { header.sPixelFormat.dwFourCC = ('D' << 0) | ('X' << 8) | ('T' << 16) | ('1' << 24); } else { header.sPixelFormat.dwFourCC = ('D' << 0) | ('X' << 8) | ('T' << 16) | ('5' << 24); } header.sCaps.dwCaps1 = DDSCAPS_TEXTURE; /* write it out */ fout = fopen( filename, "wb"); fwrite( &header, sizeof( DDS_header ), 1, fout ); fwrite( DDS_data, 1, DDS_size, fout ); fclose( fout ); /* done */ free( DDS_data ); return 1; } unsigned char* convert_image_to_DXT1( const unsigned char *const uncompressed, int width, int height, int channels, int *out_size ) { unsigned char *compressed; int i, j, x, y; unsigned char ublock[16*3]; unsigned char cblock[8]; int index = 0, chan_step = 1; int block_count = 0; /* error check */ *out_size = 0; if( (width < 1) || (height < 1) || (NULL == uncompressed) || (channels < 1) || (channels > 4) ) { return NULL; } /* for channels == 1 or 2, I do not step forward for R,G,B values */ if( channels < 3 ) { chan_step = 0; } /* get the RAM for the compressed image (8 bytes per 4x4 pixel block) */ *out_size = ((width+3) >> 2) * ((height+3) >> 2) * 8; compressed = (unsigned char*)malloc( *out_size ); /* go through each block */ for( j = 0; j < height; j += 4 ) { for( i = 0; i < width; i += 4 ) { /* copy this block into a new one */ int idx = 0; int mx = 4, my = 4; if( j+4 >= height ) { my = height - j; } if( i+4 >= width ) { mx = width - i; } for( y = 0; y < my; ++y ) { for( x = 0; x < mx; ++x ) { ublock[idx++] = uncompressed[(j+y)*width*channels+(i+x)*channels]; ublock[idx++] = uncompressed[(j+y)*width*channels+(i+x)*channels+chan_step]; ublock[idx++] = uncompressed[(j+y)*width*channels+(i+x)*channels+chan_step+chan_step]; } for( x = mx; x < 4; ++x ) { ublock[idx++] = ublock[0]; ublock[idx++] = ublock[1]; ublock[idx++] = ublock[2]; } } for( y = my; y < 4; ++y ) { for( x = 0; x < 4; ++x ) { ublock[idx++] = ublock[0]; ublock[idx++] = ublock[1]; ublock[idx++] = ublock[2]; } } /* compress the block */ ++block_count; compress_DDS_color_block( 3, ublock, cblock ); /* copy the data from the block into the main block */ for( x = 0; x < 8; ++x ) { compressed[index++] = cblock[x]; } } } return compressed; } unsigned char* convert_image_to_DXT5( const unsigned char *const uncompressed, int width, int height, int channels, int *out_size ) { unsigned char *compressed; int i, j, x, y; unsigned char ublock[16*4]; unsigned char cblock[8]; int index = 0, chan_step = 1; int block_count = 0, has_alpha; /* error check */ *out_size = 0; if( (width < 1) || (height < 1) || (NULL == uncompressed) || (channels < 1) || ( channels > 4) ) { return NULL; } /* for channels == 1 or 2, I do not step forward for R,G,B vales */ if( channels < 3 ) { chan_step = 0; } /* # channels = 1 or 3 have no alpha, 2 & 4 do have alpha */ has_alpha = 1 - (channels & 1); /* get the RAM for the compressed image (16 bytes per 4x4 pixel block) */ *out_size = ((width+3) >> 2) * ((height+3) >> 2) * 16; compressed = (unsigned char*)malloc( *out_size ); /* go through each block */ for( j = 0; j < height; j += 4 ) { for( i = 0; i < width; i += 4 ) { /* local variables, and my block counter */ int idx = 0; int mx = 4, my = 4; if( j+4 >= height ) { my = height - j; } if( i+4 >= width ) { mx = width - i; } for( y = 0; y < my; ++y ) { for( x = 0; x < mx; ++x ) { ublock[idx++] = uncompressed[(j+y)*width*channels+(i+x)*channels]; ublock[idx++] = uncompressed[(j+y)*width*channels+(i+x)*channels+chan_step]; ublock[idx++] = uncompressed[(j+y)*width*channels+(i+x)*channels+chan_step+chan_step]; ublock[idx++] = has_alpha * uncompressed[(j+y)*width*channels+(i+x)*channels+channels-1] + (1-has_alpha)*255; } for( x = mx; x < 4; ++x ) { ublock[idx++] = ublock[0]; ublock[idx++] = ublock[1]; ublock[idx++] = ublock[2]; ublock[idx++] = ublock[3]; } } for( y = my; y < 4; ++y ) { for( x = 0; x < 4; ++x ) { ublock[idx++] = ublock[0]; ublock[idx++] = ublock[1]; ublock[idx++] = ublock[2]; ublock[idx++] = ublock[3]; } } /* now compress the alpha block */ compress_DDS_alpha_block( ublock, cblock ); /* copy the data from the compressed alpha block into the main buffer */ for( x = 0; x < 8; ++x ) { compressed[index++] = cblock[x]; } /* then compress the color block */ ++block_count; compress_DDS_color_block( 4, ublock, cblock ); /* copy the data from the compressed color block into the main buffer */ for( x = 0; x < 8; ++x ) { compressed[index++] = cblock[x]; } } } return compressed; } /********* Helper Functions *********/ int convert_bit_range( int c, int from_bits, int to_bits ) { int b = (1 << (from_bits - 1)) + c * ((1 << to_bits) - 1); return (b + (b >> from_bits)) >> from_bits; } int rgb_to_565( int r, int g, int b ) { return (convert_bit_range( r, 8, 5 ) << 11) | (convert_bit_range( g, 8, 6 ) << 05) | (convert_bit_range( b, 8, 5 ) << 00); } void rgb_888_from_565( unsigned int c, int *r, int *g, int *b ) { *r = convert_bit_range( (c >> 11) & 31, 5, 8 ); *g = convert_bit_range( (c >> 05) & 63, 6, 8 ); *b = convert_bit_range( (c >> 00) & 31, 5, 8 ); } void compute_color_line_STDEV( const unsigned char *const uncompressed, int channels, float point[3], float direction[3] ) { const float inv_16 = 1.0f / 16.0f; int i; float sum_r = 0.0f, sum_g = 0.0f, sum_b = 0.0f; float sum_rr = 0.0f, sum_gg = 0.0f, sum_bb = 0.0f; float sum_rg = 0.0f, sum_rb = 0.0f, sum_gb = 0.0f; /* calculate all data needed for the covariance matrix ( to compare with _rygdxt code) */ for( i = 0; i < 16*channels; i += channels ) { sum_r += uncompressed[i+0]; sum_rr += uncompressed[i+0] * uncompressed[i+0]; sum_g += uncompressed[i+1]; sum_gg += uncompressed[i+1] * uncompressed[i+1]; sum_b += uncompressed[i+2]; sum_bb += uncompressed[i+2] * uncompressed[i+2]; sum_rg += uncompressed[i+0] * uncompressed[i+1]; sum_rb += uncompressed[i+0] * uncompressed[i+2]; sum_gb += uncompressed[i+1] * uncompressed[i+2]; } /* convert the sums to averages */ sum_r *= inv_16; sum_g *= inv_16; sum_b *= inv_16; /* and convert the squares to the squares of the value - avg_value */ sum_rr -= 16.0f * sum_r * sum_r; sum_gg -= 16.0f * sum_g * sum_g; sum_bb -= 16.0f * sum_b * sum_b; sum_rg -= 16.0f * sum_r * sum_g; sum_rb -= 16.0f * sum_r * sum_b; sum_gb -= 16.0f * sum_g * sum_b; /* the point on the color line is the average */ point[0] = sum_r; point[1] = sum_g; point[2] = sum_b; #if USE_COV_MAT /* The following idea was from ryg. (https://mollyrocket.com/forums/viewtopic.php?t=392) The method worked great (less RMSE than mine) most of the time, but had some issues handling some simple boundary cases, like full green next to full red, which would generate a covariance matrix like this: | 1 -1 0 | | -1 1 0 | | 0 0 0 | For a given starting vector, the power method can generate all zeros! So no starting with {1,1,1} as I was doing! This kind of error is still a slight posibillity, but will be very rare. */ /* use the covariance matrix directly (1st iteration, don't use all 1.0 values!) */ sum_r = 1.0f; sum_g = 2.718281828f; sum_b = 3.141592654f; direction[0] = sum_r*sum_rr + sum_g*sum_rg + sum_b*sum_rb; direction[1] = sum_r*sum_rg + sum_g*sum_gg + sum_b*sum_gb; direction[2] = sum_r*sum_rb + sum_g*sum_gb + sum_b*sum_bb; /* 2nd iteration, use results from the 1st guy */ sum_r = direction[0]; sum_g = direction[1]; sum_b = direction[2]; direction[0] = sum_r*sum_rr + sum_g*sum_rg + sum_b*sum_rb; direction[1] = sum_r*sum_rg + sum_g*sum_gg + sum_b*sum_gb; direction[2] = sum_r*sum_rb + sum_g*sum_gb + sum_b*sum_bb; /* 3rd iteration, use results from the 2nd guy */ sum_r = direction[0]; sum_g = direction[1]; sum_b = direction[2]; direction[0] = sum_r*sum_rr + sum_g*sum_rg + sum_b*sum_rb; direction[1] = sum_r*sum_rg + sum_g*sum_gg + sum_b*sum_gb; direction[2] = sum_r*sum_rb + sum_g*sum_gb + sum_b*sum_bb; #else /* use my standard deviation method (very robust, a tiny bit slower and less accurate) */ direction[0] = sqrt( sum_rr ); direction[1] = sqrt( sum_gg ); direction[2] = sqrt( sum_bb ); /* which has a greater component */ if( sum_gg > sum_rr ) { /* green has greater component, so base the other signs off of green */ if( sum_rg < 0.0f ) { direction[0] = -direction[0]; } if( sum_gb < 0.0f ) { direction[2] = -direction[2]; } } else { /* red has a greater component */ if( sum_rg < 0.0f ) { direction[1] = -direction[1]; } if( sum_rb < 0.0f ) { direction[2] = -direction[2]; } } #endif } void LSE_master_colors_max_min( int *cmax, int *cmin, int channels, const unsigned char *const uncompressed ) { int i, j; /* the master colors */ int c0[3], c1[3]; /* used for fitting the line */ float sum_x[] = { 0.0f, 0.0f, 0.0f }; float sum_x2[] = { 0.0f, 0.0f, 0.0f }; float dot_max = 1.0f, dot_min = -1.0f; float vec_len2 = 0.0f; float dot; /* error check */ if( (channels < 3) || (channels > 4) ) { return; } compute_color_line_STDEV( uncompressed, channels, sum_x, sum_x2 ); vec_len2 = 1.0f / ( 0.00001f + sum_x2[0]*sum_x2[0] + sum_x2[1]*sum_x2[1] + sum_x2[2]*sum_x2[2] ); /* finding the max and min vector values */ dot_max = ( sum_x2[0] * uncompressed[0] + sum_x2[1] * uncompressed[1] + sum_x2[2] * uncompressed[2] ); dot_min = dot_max; for( i = 1; i < 16; ++i ) { dot = ( sum_x2[0] * uncompressed[i*channels+0] + sum_x2[1] * uncompressed[i*channels+1] + sum_x2[2] * uncompressed[i*channels+2] ); if( dot < dot_min ) { dot_min = dot; } else if( dot > dot_max ) { dot_max = dot; } } /* and the offset (from the average location) */ dot = sum_x2[0]*sum_x[0] + sum_x2[1]*sum_x[1] + sum_x2[2]*sum_x[2]; dot_min -= dot; dot_max -= dot; /* post multiply by the scaling factor */ dot_min *= vec_len2; dot_max *= vec_len2; /* OK, build the master colors */ for( i = 0; i < 3; ++i ) { /* color 0 */ c0[i] = (int)(0.5f + sum_x[i] + dot_max * sum_x2[i]); if( c0[i] < 0 ) { c0[i] = 0; } else if( c0[i] > 255 ) { c0[i] = 255; } /* color 1 */ c1[i] = (int)(0.5f + sum_x[i] + dot_min * sum_x2[i]); if( c1[i] < 0 ) { c1[i] = 0; } else if( c1[i] > 255 ) { c1[i] = 255; } } /* down_sample (with rounding?) */ i = rgb_to_565( c0[0], c0[1], c0[2] ); j = rgb_to_565( c1[0], c1[1], c1[2] ); if( i > j ) { *cmax = i; *cmin = j; } else { *cmax = j; *cmin = i; } } void compress_DDS_color_block ( int channels, const unsigned char *const uncompressed, unsigned char compressed[8] ) { /* variables */ int i; int next_bit; int enc_c0, enc_c1; int c0[4], c1[4]; float color_line[] = { 0.0f, 0.0f, 0.0f, 0.0f }; float vec_len2 = 0.0f, dot_offset = 0.0f; /* stupid order */ int swizzle4[] = { 0, 2, 3, 1 }; /* get the master colors */ LSE_master_colors_max_min( &enc_c0, &enc_c1, channels, uncompressed ); /* store the 565 color 0 and color 1 */ compressed[0] = (enc_c0 >> 0) & 255; compressed[1] = (enc_c0 >> 8) & 255; compressed[2] = (enc_c1 >> 0) & 255; compressed[3] = (enc_c1 >> 8) & 255; /* zero out the compressed data */ compressed[4] = 0; compressed[5] = 0; compressed[6] = 0; compressed[7] = 0; /* reconstitute the master color vectors */ rgb_888_from_565( enc_c0, &c0[0], &c0[1], &c0[2] ); rgb_888_from_565( enc_c1, &c1[0], &c1[1], &c1[2] ); /* the new vector */ vec_len2 = 0.0f; for( i = 0; i < 3; ++i ) { color_line[i] = c1[i] - c0[i]; vec_len2 += color_line[i] * color_line[i]; } if( vec_len2 > 0.0f ) { vec_len2 = 1.0f / vec_len2; } /* pre-proform the scaling */ color_line[0] *= vec_len2; color_line[1] *= vec_len2; color_line[2] *= vec_len2; /* compute the offset (constant) portion of the dot product */ dot_offset = color_line[0]*c0[0] + color_line[1]*c0[1] + color_line[2]*c0[2]; /* store the rest of the bits */ next_bit = 8*4; for( i = 0; i < 16; ++i ) { /* find the dot product of this color, to place it on the line (should be [-1,1]) */ int next_value = 0; float dot_product = color_line[0] * uncompressed[i*channels+0] + color_line[1] * uncompressed[i*channels+1] + color_line[2] * uncompressed[i*channels+2] - dot_offset; /* map to [0,3] */ next_value = (int)( dot_product * 3.0f + 0.5f ); if( next_value > 3 ) { next_value = 3; } else if( next_value < 0 ) { next_value = 0; } /* OK, store this value */ compressed[next_bit >> 3] |= swizzle4[ next_value ] << (next_bit & 7); next_bit += 2; } /* done compressing to DXT1 */ } void compress_DDS_alpha_block ( const unsigned char *const uncompressed, unsigned char compressed[8] ) { /* variables */ int i; int next_bit; int a0, a1; float scale_me; /* stupid order */ int swizzle8[] = { 1, 7, 6, 5, 4, 3, 2, 0 }; /* get the alpha limits (a0 > a1) */ a0 = a1 = uncompressed[3]; for( i = 4+3; i < 16*4; i += 4 ) { if( uncompressed[i] > a0 ) { a0 = uncompressed[i]; } else if( uncompressed[i] < a1 ) { a1 = uncompressed[i]; } } /* store those limits, and zero the rest of the compressed dataset */ compressed[0] = a0; compressed[1] = a1; /* zero out the compressed data */ compressed[2] = 0; compressed[3] = 0; compressed[4] = 0; compressed[5] = 0; compressed[6] = 0; compressed[7] = 0; /* store the all of the alpha values */ next_bit = 8*2; scale_me = 7.9999f / (a0 - a1); for( i = 3; i < 16*4; i += 4 ) { /* convert this alpha value to a 3 bit number */ int svalue; int value = (int)((uncompressed[i] - a1) * scale_me); svalue = swizzle8[ value&7 ]; /* OK, store this value, start with the 1st byte */ compressed[next_bit >> 3] |= svalue << (next_bit & 7); if( (next_bit & 7) > 5 ) { /* spans 2 bytes, fill in the start of the 2nd byte */ compressed[1 + (next_bit >> 3)] |= svalue >> (8 - (next_bit & 7) ); } next_bit += 3; } /* done compressing to DXT1 */ }
gpl-2.0
schlund-deprecated/kernel-for-mega
drivers/acpi/acpica/exstoren.c
606
9591
/****************************************************************************** * * Module Name: exstoren - AML Interpreter object store support, * Store to Node (namespace object) * *****************************************************************************/ /* * Copyright (C) 2000 - 2008, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #include "amlcode.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exstoren") /******************************************************************************* * * FUNCTION: acpi_ex_resolve_object * * PARAMETERS: source_desc_ptr - Pointer to the source object * target_type - Current type of the target * walk_state - Current walk state * * RETURN: Status, resolved object in source_desc_ptr. * * DESCRIPTION: Resolve an object. If the object is a reference, dereference * it and return the actual object in the source_desc_ptr. * ******************************************************************************/ acpi_status acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr, acpi_object_type target_type, struct acpi_walk_state *walk_state) { union acpi_operand_object *source_desc = *source_desc_ptr; acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ex_resolve_object); /* Ensure we have a Target that can be stored to */ switch (target_type) { case ACPI_TYPE_BUFFER_FIELD: case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_LOCAL_BANK_FIELD: case ACPI_TYPE_LOCAL_INDEX_FIELD: /* * These cases all require only Integers or values that * can be converted to Integers (Strings or Buffers) */ case ACPI_TYPE_INTEGER: case ACPI_TYPE_STRING: case ACPI_TYPE_BUFFER: /* * Stores into a Field/Region or into a Integer/Buffer/String * are all essentially the same. This case handles the * "interchangeable" types Integer, String, and Buffer. */ if (source_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) { /* Resolve a reference object first */ status = acpi_ex_resolve_to_value(source_desc_ptr, walk_state); if (ACPI_FAILURE(status)) { break; } } /* For copy_object, no further validation necessary */ if (walk_state->opcode == AML_COPY_OP) { break; } /* Must have a Integer, Buffer, or String */ if ((source_desc->common.type != ACPI_TYPE_INTEGER) && (source_desc->common.type != ACPI_TYPE_BUFFER) && (source_desc->common.type != ACPI_TYPE_STRING) && !((source_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) && (source_desc->reference.class == ACPI_REFCLASS_TABLE))) { /* Conversion successful but still not a valid type */ ACPI_ERROR((AE_INFO, "Cannot assign type %s to %s (must be type Int/Str/Buf)", acpi_ut_get_object_type_name(source_desc), acpi_ut_get_type_name(target_type))); status = AE_AML_OPERAND_TYPE; } break; case ACPI_TYPE_LOCAL_ALIAS: case ACPI_TYPE_LOCAL_METHOD_ALIAS: /* * All aliases should have been resolved earlier, during the * operand resolution phase. */ ACPI_ERROR((AE_INFO, "Store into an unresolved Alias object")); status = AE_AML_INTERNAL; break; case ACPI_TYPE_PACKAGE: default: /* * All other types than Alias and the various Fields come here, * including the untyped case - ACPI_TYPE_ANY. */ break; } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ex_store_object_to_object * * PARAMETERS: source_desc - Object to store * dest_desc - Object to receive a copy of the source * new_desc - New object if dest_desc is obsoleted * walk_state - Current walk state * * RETURN: Status * * DESCRIPTION: "Store" an object to another object. This may include * converting the source type to the target type (implicit * conversion), and a copy of the value of the source to * the target. * * The Assignment of an object to another (not named) object * is handled here. * The Source passed in will replace the current value (if any) * with the input value. * * When storing into an object the data is converted to the * target object type then stored in the object. This means * that the target object type (for an initialized target) will * not be changed by a store operation. * * This module allows destination types of Number, String, * Buffer, and Package. * * Assumes parameters are already validated. NOTE: source_desc * resolution (from a reference object) must be performed by * the caller if necessary. * ******************************************************************************/ acpi_status acpi_ex_store_object_to_object(union acpi_operand_object *source_desc, union acpi_operand_object *dest_desc, union acpi_operand_object **new_desc, struct acpi_walk_state *walk_state) { union acpi_operand_object *actual_src_desc; acpi_status status = AE_OK; ACPI_FUNCTION_TRACE_PTR(ex_store_object_to_object, source_desc); actual_src_desc = source_desc; if (!dest_desc) { /* * There is no destination object (An uninitialized node or * package element), so we can simply copy the source object * creating a new destination object */ status = acpi_ut_copy_iobject_to_iobject(actual_src_desc, new_desc, walk_state); return_ACPI_STATUS(status); } if (source_desc->common.type != dest_desc->common.type) { /* * The source type does not match the type of the destination. * Perform the "implicit conversion" of the source to the current type * of the target as per the ACPI specification. * * If no conversion performed, actual_src_desc = source_desc. * Otherwise, actual_src_desc is a temporary object to hold the * converted object. */ status = acpi_ex_convert_to_target_type(dest_desc->common.type, source_desc, &actual_src_desc, walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (source_desc == actual_src_desc) { /* * No conversion was performed. Return the source_desc as the * new object. */ *new_desc = source_desc; return_ACPI_STATUS(AE_OK); } } /* * We now have two objects of identical types, and we can perform a * copy of the *value* of the source object. */ switch (dest_desc->common.type) { case ACPI_TYPE_INTEGER: dest_desc->integer.value = actual_src_desc->integer.value; /* Truncate value if we are executing from a 32-bit ACPI table */ acpi_ex_truncate_for32bit_table(dest_desc); break; case ACPI_TYPE_STRING: status = acpi_ex_store_string_to_string(actual_src_desc, dest_desc); break; case ACPI_TYPE_BUFFER: status = acpi_ex_store_buffer_to_buffer(actual_src_desc, dest_desc); break; case ACPI_TYPE_PACKAGE: status = acpi_ut_copy_iobject_to_iobject(actual_src_desc, &dest_desc, walk_state); break; default: /* * All other types come here. */ ACPI_WARNING((AE_INFO, "Store into type %s not implemented", acpi_ut_get_object_type_name(dest_desc))); status = AE_NOT_IMPLEMENTED; break; } if (actual_src_desc != source_desc) { /* Delete the intermediate (temporary) source object */ acpi_ut_remove_reference(actual_src_desc); } *new_desc = dest_desc; return_ACPI_STATUS(status); }
gpl-2.0
codefarmer-cyk/linux
drivers/usb/gadget/function/f_ncm.c
606
43995
/* * f_ncm.c -- USB CDC Network (NCM) link function driver * * Copyright (C) 2010 Nokia Corporation * Contact: Yauheni Kaliuta <yauheni.kaliuta@nokia.com> * * The driver borrows from f_ecm.c which is: * * Copyright (C) 2003-2005,2008 David Brownell * Copyright (C) 2008 Nokia Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/etherdevice.h> #include <linux/crc32.h> #include <linux/usb/cdc.h> #include "u_ether.h" #include "u_ether_configfs.h" #include "u_ncm.h" /* * This function is a "CDC Network Control Model" (CDC NCM) Ethernet link. * NCM is intended to be used with high-speed network attachments. * * Note that NCM requires the use of "alternate settings" for its data * interface. This means that the set_alt() method has real work to do, * and also means that a get_alt() method is required. */ /* to trigger crc/non-crc ndp signature */ #define NCM_NDP_HDR_CRC_MASK 0x01000000 #define NCM_NDP_HDR_CRC 0x01000000 #define NCM_NDP_HDR_NOCRC 0x00000000 enum ncm_notify_state { NCM_NOTIFY_NONE, /* don't notify */ NCM_NOTIFY_CONNECT, /* issue CONNECT next */ NCM_NOTIFY_SPEED, /* issue SPEED_CHANGE next */ }; struct f_ncm { struct gether port; u8 ctrl_id, data_id; char ethaddr[14]; struct usb_ep *notify; struct usb_request *notify_req; u8 notify_state; bool is_open; const struct ndp_parser_opts *parser_opts; bool is_crc; u32 ndp_sign; /* * for notification, it is accessed from both * callback and ethernet open/close */ spinlock_t lock; struct net_device *netdev; /* For multi-frame NDP TX */ struct sk_buff *skb_tx_data; struct sk_buff *skb_tx_ndp; u16 ndp_dgram_count; bool timer_force_tx; struct tasklet_struct tx_tasklet; struct hrtimer task_timer; bool timer_stopping; }; static inline struct f_ncm *func_to_ncm(struct usb_function *f) { return container_of(f, struct f_ncm, port.func); } /* peak (theoretical) bulk transfer rate in bits-per-second */ static inline unsigned ncm_bitrate(struct usb_gadget *g) { if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) return 13 * 512 * 8 * 1000 * 8; else return 19 * 64 * 1 * 1000 * 8; } /*-------------------------------------------------------------------------*/ /* * We cannot group frames so use just the minimal size which ok to put * one max-size ethernet frame. * If the host can group frames, allow it to do that, 16K is selected, * because it's used by default by the current linux host driver */ #define NTB_DEFAULT_IN_SIZE 16384 #define NTB_OUT_SIZE 16384 /* Allocation for storing the NDP, 32 should suffice for a * 16k packet. This allows a maximum of 32 * 507 Byte packets to * be transmitted in a single 16kB skb, though when sending full size * packets this limit will be plenty. * Smaller packets are not likely to be trying to maximize the * throughput and will be mstly sending smaller infrequent frames. */ #define TX_MAX_NUM_DPE 32 /* Delay for the transmit to wait before sending an unfilled NTB frame. */ #define TX_TIMEOUT_NSECS 300000 #define FORMATS_SUPPORTED (USB_CDC_NCM_NTB16_SUPPORTED | \ USB_CDC_NCM_NTB32_SUPPORTED) static struct usb_cdc_ncm_ntb_parameters ntb_parameters = { .wLength = cpu_to_le16(sizeof(ntb_parameters)), .bmNtbFormatsSupported = cpu_to_le16(FORMATS_SUPPORTED), .dwNtbInMaxSize = cpu_to_le32(NTB_DEFAULT_IN_SIZE), .wNdpInDivisor = cpu_to_le16(4), .wNdpInPayloadRemainder = cpu_to_le16(0), .wNdpInAlignment = cpu_to_le16(4), .dwNtbOutMaxSize = cpu_to_le32(NTB_OUT_SIZE), .wNdpOutDivisor = cpu_to_le16(4), .wNdpOutPayloadRemainder = cpu_to_le16(0), .wNdpOutAlignment = cpu_to_le16(4), }; /* * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one * packet, to simplify cancellation; and a big transfer interval, to * waste less bandwidth. */ #define NCM_STATUS_INTERVAL_MS 32 #define NCM_STATUS_BYTECOUNT 16 /* 8 byte header + data */ static struct usb_interface_assoc_descriptor ncm_iad_desc = { .bLength = sizeof ncm_iad_desc, .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, /* .bFirstInterface = DYNAMIC, */ .bInterfaceCount = 2, /* control + data */ .bFunctionClass = USB_CLASS_COMM, .bFunctionSubClass = USB_CDC_SUBCLASS_NCM, .bFunctionProtocol = USB_CDC_PROTO_NONE, /* .iFunction = DYNAMIC */ }; /* interface descriptor: */ static struct usb_interface_descriptor ncm_control_intf = { .bLength = sizeof ncm_control_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bNumEndpoints = 1, .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, .bInterfaceProtocol = USB_CDC_PROTO_NONE, /* .iInterface = DYNAMIC */ }; static struct usb_cdc_header_desc ncm_header_desc = { .bLength = sizeof ncm_header_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_HEADER_TYPE, .bcdCDC = cpu_to_le16(0x0110), }; static struct usb_cdc_union_desc ncm_union_desc = { .bLength = sizeof(ncm_union_desc), .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_UNION_TYPE, /* .bMasterInterface0 = DYNAMIC */ /* .bSlaveInterface0 = DYNAMIC */ }; static struct usb_cdc_ether_desc ecm_desc = { .bLength = sizeof ecm_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_ETHERNET_TYPE, /* this descriptor actually adds value, surprise! */ /* .iMACAddress = DYNAMIC */ .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */ .wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN), .wNumberMCFilters = cpu_to_le16(0), .bNumberPowerFilters = 0, }; #define NCAPS (USB_CDC_NCM_NCAP_ETH_FILTER | USB_CDC_NCM_NCAP_CRC_MODE) static struct usb_cdc_ncm_desc ncm_desc = { .bLength = sizeof ncm_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_NCM_TYPE, .bcdNcmVersion = cpu_to_le16(0x0100), /* can process SetEthernetPacketFilter */ .bmNetworkCapabilities = NCAPS, }; /* the default data interface has no endpoints ... */ static struct usb_interface_descriptor ncm_data_nop_intf = { .bLength = sizeof ncm_data_nop_intf, .bDescriptorType = USB_DT_INTERFACE, .bInterfaceNumber = 1, .bAlternateSetting = 0, .bNumEndpoints = 0, .bInterfaceClass = USB_CLASS_CDC_DATA, .bInterfaceSubClass = 0, .bInterfaceProtocol = USB_CDC_NCM_PROTO_NTB, /* .iInterface = DYNAMIC */ }; /* ... but the "real" data interface has two bulk endpoints */ static struct usb_interface_descriptor ncm_data_intf = { .bLength = sizeof ncm_data_intf, .bDescriptorType = USB_DT_INTERFACE, .bInterfaceNumber = 1, .bAlternateSetting = 1, .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_CDC_DATA, .bInterfaceSubClass = 0, .bInterfaceProtocol = USB_CDC_NCM_PROTO_NTB, /* .iInterface = DYNAMIC */ }; /* full speed support: */ static struct usb_endpoint_descriptor fs_ncm_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT), .bInterval = NCM_STATUS_INTERVAL_MS, }; static struct usb_endpoint_descriptor fs_ncm_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor fs_ncm_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_descriptor_header *ncm_fs_function[] = { (struct usb_descriptor_header *) &ncm_iad_desc, /* CDC NCM control descriptors */ (struct usb_descriptor_header *) &ncm_control_intf, (struct usb_descriptor_header *) &ncm_header_desc, (struct usb_descriptor_header *) &ncm_union_desc, (struct usb_descriptor_header *) &ecm_desc, (struct usb_descriptor_header *) &ncm_desc, (struct usb_descriptor_header *) &fs_ncm_notify_desc, /* data interface, altsettings 0 and 1 */ (struct usb_descriptor_header *) &ncm_data_nop_intf, (struct usb_descriptor_header *) &ncm_data_intf, (struct usb_descriptor_header *) &fs_ncm_in_desc, (struct usb_descriptor_header *) &fs_ncm_out_desc, NULL, }; /* high speed support: */ static struct usb_endpoint_descriptor hs_ncm_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT), .bInterval = USB_MS_TO_HS_INTERVAL(NCM_STATUS_INTERVAL_MS), }; static struct usb_endpoint_descriptor hs_ncm_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_endpoint_descriptor hs_ncm_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_descriptor_header *ncm_hs_function[] = { (struct usb_descriptor_header *) &ncm_iad_desc, /* CDC NCM control descriptors */ (struct usb_descriptor_header *) &ncm_control_intf, (struct usb_descriptor_header *) &ncm_header_desc, (struct usb_descriptor_header *) &ncm_union_desc, (struct usb_descriptor_header *) &ecm_desc, (struct usb_descriptor_header *) &ncm_desc, (struct usb_descriptor_header *) &hs_ncm_notify_desc, /* data interface, altsettings 0 and 1 */ (struct usb_descriptor_header *) &ncm_data_nop_intf, (struct usb_descriptor_header *) &ncm_data_intf, (struct usb_descriptor_header *) &hs_ncm_in_desc, (struct usb_descriptor_header *) &hs_ncm_out_desc, NULL, }; /* string descriptors: */ #define STRING_CTRL_IDX 0 #define STRING_MAC_IDX 1 #define STRING_DATA_IDX 2 #define STRING_IAD_IDX 3 static struct usb_string ncm_string_defs[] = { [STRING_CTRL_IDX].s = "CDC Network Control Model (NCM)", [STRING_MAC_IDX].s = "", [STRING_DATA_IDX].s = "CDC Network Data", [STRING_IAD_IDX].s = "CDC NCM", { } /* end of list */ }; static struct usb_gadget_strings ncm_string_table = { .language = 0x0409, /* en-us */ .strings = ncm_string_defs, }; static struct usb_gadget_strings *ncm_strings[] = { &ncm_string_table, NULL, }; /* * Here are options for NCM Datagram Pointer table (NDP) parser. * There are 2 different formats: NDP16 and NDP32 in the spec (ch. 3), * in NDP16 offsets and sizes fields are 1 16bit word wide, * in NDP32 -- 2 16bit words wide. Also signatures are different. * To make the parser code the same, put the differences in the structure, * and switch pointers to the structures when the format is changed. */ struct ndp_parser_opts { u32 nth_sign; u32 ndp_sign; unsigned nth_size; unsigned ndp_size; unsigned dpe_size; unsigned ndplen_align; /* sizes in u16 units */ unsigned dgram_item_len; /* index or length */ unsigned block_length; unsigned ndp_index; unsigned reserved1; unsigned reserved2; unsigned next_ndp_index; }; #define INIT_NDP16_OPTS { \ .nth_sign = USB_CDC_NCM_NTH16_SIGN, \ .ndp_sign = USB_CDC_NCM_NDP16_NOCRC_SIGN, \ .nth_size = sizeof(struct usb_cdc_ncm_nth16), \ .ndp_size = sizeof(struct usb_cdc_ncm_ndp16), \ .dpe_size = sizeof(struct usb_cdc_ncm_dpe16), \ .ndplen_align = 4, \ .dgram_item_len = 1, \ .block_length = 1, \ .ndp_index = 1, \ .reserved1 = 0, \ .reserved2 = 0, \ .next_ndp_index = 1, \ } #define INIT_NDP32_OPTS { \ .nth_sign = USB_CDC_NCM_NTH32_SIGN, \ .ndp_sign = USB_CDC_NCM_NDP32_NOCRC_SIGN, \ .nth_size = sizeof(struct usb_cdc_ncm_nth32), \ .ndp_size = sizeof(struct usb_cdc_ncm_ndp32), \ .dpe_size = sizeof(struct usb_cdc_ncm_dpe32), \ .ndplen_align = 8, \ .dgram_item_len = 2, \ .block_length = 2, \ .ndp_index = 2, \ .reserved1 = 1, \ .reserved2 = 2, \ .next_ndp_index = 2, \ } static const struct ndp_parser_opts ndp16_opts = INIT_NDP16_OPTS; static const struct ndp_parser_opts ndp32_opts = INIT_NDP32_OPTS; static inline void put_ncm(__le16 **p, unsigned size, unsigned val) { switch (size) { case 1: put_unaligned_le16((u16)val, *p); break; case 2: put_unaligned_le32((u32)val, *p); break; default: BUG(); } *p += size; } static inline unsigned get_ncm(__le16 **p, unsigned size) { unsigned tmp; switch (size) { case 1: tmp = get_unaligned_le16(*p); break; case 2: tmp = get_unaligned_le32(*p); break; default: BUG(); } *p += size; return tmp; } /*-------------------------------------------------------------------------*/ static inline void ncm_reset_values(struct f_ncm *ncm) { ncm->parser_opts = &ndp16_opts; ncm->is_crc = false; ncm->port.cdc_filter = DEFAULT_FILTER; /* doesn't make sense for ncm, fixed size used */ ncm->port.header_len = 0; ncm->port.fixed_out_len = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); ncm->port.fixed_in_len = NTB_DEFAULT_IN_SIZE; } /* * Context: ncm->lock held */ static void ncm_do_notify(struct f_ncm *ncm) { struct usb_request *req = ncm->notify_req; struct usb_cdc_notification *event; struct usb_composite_dev *cdev = ncm->port.func.config->cdev; __le32 *data; int status; /* notification already in flight? */ if (!req) return; event = req->buf; switch (ncm->notify_state) { case NCM_NOTIFY_NONE: return; case NCM_NOTIFY_CONNECT: event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION; if (ncm->is_open) event->wValue = cpu_to_le16(1); else event->wValue = cpu_to_le16(0); event->wLength = 0; req->length = sizeof *event; DBG(cdev, "notify connect %s\n", ncm->is_open ? "true" : "false"); ncm->notify_state = NCM_NOTIFY_NONE; break; case NCM_NOTIFY_SPEED: event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE; event->wValue = cpu_to_le16(0); event->wLength = cpu_to_le16(8); req->length = NCM_STATUS_BYTECOUNT; /* SPEED_CHANGE data is up/down speeds in bits/sec */ data = req->buf + sizeof *event; data[0] = cpu_to_le32(ncm_bitrate(cdev->gadget)); data[1] = data[0]; DBG(cdev, "notify speed %d\n", ncm_bitrate(cdev->gadget)); ncm->notify_state = NCM_NOTIFY_CONNECT; break; } event->bmRequestType = 0xA1; event->wIndex = cpu_to_le16(ncm->ctrl_id); ncm->notify_req = NULL; /* * In double buffering if there is a space in FIFO, * completion callback can be called right after the call, * so unlocking */ spin_unlock(&ncm->lock); status = usb_ep_queue(ncm->notify, req, GFP_ATOMIC); spin_lock(&ncm->lock); if (status < 0) { ncm->notify_req = req; DBG(cdev, "notify --> %d\n", status); } } /* * Context: ncm->lock held */ static void ncm_notify(struct f_ncm *ncm) { /* * NOTE on most versions of Linux, host side cdc-ethernet * won't listen for notifications until its netdevice opens. * The first notification then sits in the FIFO for a long * time, and the second one is queued. * * If ncm_notify() is called before the second (CONNECT) * notification is sent, then it will reset to send the SPEED * notificaion again (and again, and again), but it's not a problem */ ncm->notify_state = NCM_NOTIFY_SPEED; ncm_do_notify(ncm); } static void ncm_notify_complete(struct usb_ep *ep, struct usb_request *req) { struct f_ncm *ncm = req->context; struct usb_composite_dev *cdev = ncm->port.func.config->cdev; struct usb_cdc_notification *event = req->buf; spin_lock(&ncm->lock); switch (req->status) { case 0: VDBG(cdev, "Notification %02x sent\n", event->bNotificationType); break; case -ECONNRESET: case -ESHUTDOWN: ncm->notify_state = NCM_NOTIFY_NONE; break; default: DBG(cdev, "event %02x --> %d\n", event->bNotificationType, req->status); break; } ncm->notify_req = req; ncm_do_notify(ncm); spin_unlock(&ncm->lock); } static void ncm_ep0out_complete(struct usb_ep *ep, struct usb_request *req) { /* now for SET_NTB_INPUT_SIZE only */ unsigned in_size; struct usb_function *f = req->context; struct f_ncm *ncm = func_to_ncm(f); struct usb_composite_dev *cdev = ep->driver_data; req->context = NULL; if (req->status || req->actual != req->length) { DBG(cdev, "Bad control-OUT transfer\n"); goto invalid; } in_size = get_unaligned_le32(req->buf); if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE || in_size > le32_to_cpu(ntb_parameters.dwNtbInMaxSize)) { DBG(cdev, "Got wrong INPUT SIZE (%d) from host\n", in_size); goto invalid; } ncm->port.fixed_in_len = in_size; VDBG(cdev, "Set NTB INPUT SIZE %d\n", in_size); return; invalid: usb_ep_set_halt(ep); return; } static int ncm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct f_ncm *ncm = func_to_ncm(f); struct usb_composite_dev *cdev = f->config->cdev; struct usb_request *req = cdev->req; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); /* * composite driver infrastructure handles everything except * CDC class messages; interface activation uses set_alt(). */ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SET_ETHERNET_PACKET_FILTER: /* * see 6.2.30: no data, wIndex = interface, * wValue = packet filter bitmap */ if (w_length != 0 || w_index != ncm->ctrl_id) goto invalid; DBG(cdev, "packet filter %02x\n", w_value); /* * REVISIT locking of cdc_filter. This assumes the UDC * driver won't have a concurrent packet TX irq running on * another CPU; or that if it does, this write is atomic... */ ncm->port.cdc_filter = w_value; value = 0; break; /* * and optionally: * case USB_CDC_SEND_ENCAPSULATED_COMMAND: * case USB_CDC_GET_ENCAPSULATED_RESPONSE: * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS: * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER: * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER: * case USB_CDC_GET_ETHERNET_STATISTIC: */ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_NTB_PARAMETERS: if (w_length == 0 || w_value != 0 || w_index != ncm->ctrl_id) goto invalid; value = w_length > sizeof ntb_parameters ? sizeof ntb_parameters : w_length; memcpy(req->buf, &ntb_parameters, value); VDBG(cdev, "Host asked NTB parameters\n"); break; case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_NTB_INPUT_SIZE: if (w_length < 4 || w_value != 0 || w_index != ncm->ctrl_id) goto invalid; put_unaligned_le32(ncm->port.fixed_in_len, req->buf); value = 4; VDBG(cdev, "Host asked INPUT SIZE, sending %d\n", ncm->port.fixed_in_len); break; case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SET_NTB_INPUT_SIZE: { if (w_length != 4 || w_value != 0 || w_index != ncm->ctrl_id) goto invalid; req->complete = ncm_ep0out_complete; req->length = w_length; req->context = f; value = req->length; break; } case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_NTB_FORMAT: { uint16_t format; if (w_length < 2 || w_value != 0 || w_index != ncm->ctrl_id) goto invalid; format = (ncm->parser_opts == &ndp16_opts) ? 0x0000 : 0x0001; put_unaligned_le16(format, req->buf); value = 2; VDBG(cdev, "Host asked NTB FORMAT, sending %d\n", format); break; } case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SET_NTB_FORMAT: { if (w_length != 0 || w_index != ncm->ctrl_id) goto invalid; switch (w_value) { case 0x0000: ncm->parser_opts = &ndp16_opts; DBG(cdev, "NCM16 selected\n"); break; case 0x0001: ncm->parser_opts = &ndp32_opts; DBG(cdev, "NCM32 selected\n"); break; default: goto invalid; } value = 0; break; } case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_GET_CRC_MODE: { uint16_t is_crc; if (w_length < 2 || w_value != 0 || w_index != ncm->ctrl_id) goto invalid; is_crc = ncm->is_crc ? 0x0001 : 0x0000; put_unaligned_le16(is_crc, req->buf); value = 2; VDBG(cdev, "Host asked CRC MODE, sending %d\n", is_crc); break; } case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SET_CRC_MODE: { int ndp_hdr_crc = 0; if (w_length != 0 || w_index != ncm->ctrl_id) goto invalid; switch (w_value) { case 0x0000: ncm->is_crc = false; ndp_hdr_crc = NCM_NDP_HDR_NOCRC; DBG(cdev, "non-CRC mode selected\n"); break; case 0x0001: ncm->is_crc = true; ndp_hdr_crc = NCM_NDP_HDR_CRC; DBG(cdev, "CRC mode selected\n"); break; default: goto invalid; } ncm->ndp_sign = ncm->parser_opts->ndp_sign | ndp_hdr_crc; value = 0; break; } /* and disabled in ncm descriptor: */ /* case USB_CDC_GET_NET_ADDRESS: */ /* case USB_CDC_SET_NET_ADDRESS: */ /* case USB_CDC_GET_MAX_DATAGRAM_SIZE: */ /* case USB_CDC_SET_MAX_DATAGRAM_SIZE: */ default: invalid: DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); } /* respond with data transfer or status phase? */ if (value >= 0) { DBG(cdev, "ncm req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); req->zero = 0; req->length = value; value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); if (value < 0) ERROR(cdev, "ncm req %02x.%02x response err %d\n", ctrl->bRequestType, ctrl->bRequest, value); } /* device either stalls (value < 0) or reports success */ return value; } static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_ncm *ncm = func_to_ncm(f); struct usb_composite_dev *cdev = f->config->cdev; /* Control interface has only altsetting 0 */ if (intf == ncm->ctrl_id) { if (alt != 0) goto fail; if (ncm->notify->driver_data) { DBG(cdev, "reset ncm control %d\n", intf); usb_ep_disable(ncm->notify); } if (!(ncm->notify->desc)) { DBG(cdev, "init ncm ctrl %d\n", intf); if (config_ep_by_speed(cdev->gadget, f, ncm->notify)) goto fail; } usb_ep_enable(ncm->notify); ncm->notify->driver_data = ncm; /* Data interface has two altsettings, 0 and 1 */ } else if (intf == ncm->data_id) { if (alt > 1) goto fail; if (ncm->port.in_ep->driver_data) { DBG(cdev, "reset ncm\n"); ncm->timer_stopping = true; ncm->netdev = NULL; gether_disconnect(&ncm->port); ncm_reset_values(ncm); } /* * CDC Network only sends data in non-default altsettings. * Changing altsettings resets filters, statistics, etc. */ if (alt == 1) { struct net_device *net; if (!ncm->port.in_ep->desc || !ncm->port.out_ep->desc) { DBG(cdev, "init ncm\n"); if (config_ep_by_speed(cdev->gadget, f, ncm->port.in_ep) || config_ep_by_speed(cdev->gadget, f, ncm->port.out_ep)) { ncm->port.in_ep->desc = NULL; ncm->port.out_ep->desc = NULL; goto fail; } } /* TODO */ /* Enable zlps by default for NCM conformance; * override for musb_hdrc (avoids txdma ovhead) */ ncm->port.is_zlp_ok = !( gadget_is_musbhdrc(cdev->gadget) ); ncm->port.cdc_filter = DEFAULT_FILTER; DBG(cdev, "activate ncm\n"); net = gether_connect(&ncm->port); if (IS_ERR(net)) return PTR_ERR(net); ncm->netdev = net; ncm->timer_stopping = false; } spin_lock(&ncm->lock); ncm_notify(ncm); spin_unlock(&ncm->lock); } else goto fail; return 0; fail: return -EINVAL; } /* * Because the data interface supports multiple altsettings, * this NCM function *MUST* implement a get_alt() method. */ static int ncm_get_alt(struct usb_function *f, unsigned intf) { struct f_ncm *ncm = func_to_ncm(f); if (intf == ncm->ctrl_id) return 0; return ncm->port.in_ep->driver_data ? 1 : 0; } static struct sk_buff *package_for_tx(struct f_ncm *ncm) { __le16 *ntb_iter; struct sk_buff *skb2 = NULL; unsigned ndp_pad; unsigned ndp_index; unsigned new_len; const struct ndp_parser_opts *opts = ncm->parser_opts; const int ndp_align = le16_to_cpu(ntb_parameters.wNdpInAlignment); const int dgram_idx_len = 2 * 2 * opts->dgram_item_len; /* Stop the timer */ hrtimer_try_to_cancel(&ncm->task_timer); ndp_pad = ALIGN(ncm->skb_tx_data->len, ndp_align) - ncm->skb_tx_data->len; ndp_index = ncm->skb_tx_data->len + ndp_pad; new_len = ndp_index + dgram_idx_len + ncm->skb_tx_ndp->len; /* Set the final BlockLength and wNdpIndex */ ntb_iter = (void *) ncm->skb_tx_data->data; /* Increment pointer to BlockLength */ ntb_iter += 2 + 1 + 1; put_ncm(&ntb_iter, opts->block_length, new_len); put_ncm(&ntb_iter, opts->ndp_index, ndp_index); /* Set the final NDP wLength */ new_len = opts->ndp_size + (ncm->ndp_dgram_count * dgram_idx_len); ncm->ndp_dgram_count = 0; /* Increment from start to wLength */ ntb_iter = (void *) ncm->skb_tx_ndp->data; ntb_iter += 2; put_unaligned_le16(new_len, ntb_iter); /* Merge the skbs */ swap(skb2, ncm->skb_tx_data); if (ncm->skb_tx_data) { dev_kfree_skb_any(ncm->skb_tx_data); ncm->skb_tx_data = NULL; } /* Insert NDP alignment. */ ntb_iter = (void *) skb_put(skb2, ndp_pad); memset(ntb_iter, 0, ndp_pad); /* Copy NTB across. */ ntb_iter = (void *) skb_put(skb2, ncm->skb_tx_ndp->len); memcpy(ntb_iter, ncm->skb_tx_ndp->data, ncm->skb_tx_ndp->len); dev_kfree_skb_any(ncm->skb_tx_ndp); ncm->skb_tx_ndp = NULL; /* Insert zero'd datagram. */ ntb_iter = (void *) skb_put(skb2, dgram_idx_len); memset(ntb_iter, 0, dgram_idx_len); return skb2; } static struct sk_buff *ncm_wrap_ntb(struct gether *port, struct sk_buff *skb) { struct f_ncm *ncm = func_to_ncm(&port->func); struct sk_buff *skb2 = NULL; int ncb_len = 0; __le16 *ntb_data; __le16 *ntb_ndp; int dgram_pad; unsigned max_size = ncm->port.fixed_in_len; const struct ndp_parser_opts *opts = ncm->parser_opts; const int ndp_align = le16_to_cpu(ntb_parameters.wNdpInAlignment); const int div = le16_to_cpu(ntb_parameters.wNdpInDivisor); const int rem = le16_to_cpu(ntb_parameters.wNdpInPayloadRemainder); const int dgram_idx_len = 2 * 2 * opts->dgram_item_len; if (!skb && !ncm->skb_tx_data) return NULL; if (skb) { /* Add the CRC if required up front */ if (ncm->is_crc) { uint32_t crc; __le16 *crc_pos; crc = ~crc32_le(~0, skb->data, skb->len); crc_pos = (void *) skb_put(skb, sizeof(uint32_t)); put_unaligned_le32(crc, crc_pos); } /* If the new skb is too big for the current NCM NTB then * set the current stored skb to be sent now and clear it * ready for new data. * NOTE: Assume maximum align for speed of calculation. */ if (ncm->skb_tx_data && (ncm->ndp_dgram_count >= TX_MAX_NUM_DPE || (ncm->skb_tx_data->len + div + rem + skb->len + ncm->skb_tx_ndp->len + ndp_align + (2 * dgram_idx_len)) > max_size)) { skb2 = package_for_tx(ncm); if (!skb2) goto err; } if (!ncm->skb_tx_data) { ncb_len = opts->nth_size; dgram_pad = ALIGN(ncb_len, div) + rem - ncb_len; ncb_len += dgram_pad; /* Create a new skb for the NTH and datagrams. */ ncm->skb_tx_data = alloc_skb(max_size, GFP_ATOMIC); if (!ncm->skb_tx_data) goto err; ntb_data = (void *) skb_put(ncm->skb_tx_data, ncb_len); memset(ntb_data, 0, ncb_len); /* dwSignature */ put_unaligned_le32(opts->nth_sign, ntb_data); ntb_data += 2; /* wHeaderLength */ put_unaligned_le16(opts->nth_size, ntb_data++); /* Allocate an skb for storing the NDP, * TX_MAX_NUM_DPE should easily suffice for a * 16k packet. */ ncm->skb_tx_ndp = alloc_skb((int)(opts->ndp_size + opts->dpe_size * TX_MAX_NUM_DPE), GFP_ATOMIC); if (!ncm->skb_tx_ndp) goto err; ntb_ndp = (void *) skb_put(ncm->skb_tx_ndp, opts->ndp_size); memset(ntb_ndp, 0, ncb_len); /* dwSignature */ put_unaligned_le32(ncm->ndp_sign, ntb_ndp); ntb_ndp += 2; /* There is always a zeroed entry */ ncm->ndp_dgram_count = 1; /* Note: we skip opts->next_ndp_index */ } /* Delay the timer. */ hrtimer_start(&ncm->task_timer, ktime_set(0, TX_TIMEOUT_NSECS), HRTIMER_MODE_REL); /* Add the datagram position entries */ ntb_ndp = (void *) skb_put(ncm->skb_tx_ndp, dgram_idx_len); memset(ntb_ndp, 0, dgram_idx_len); ncb_len = ncm->skb_tx_data->len; dgram_pad = ALIGN(ncb_len, div) + rem - ncb_len; ncb_len += dgram_pad; /* (d)wDatagramIndex */ put_ncm(&ntb_ndp, opts->dgram_item_len, ncb_len); /* (d)wDatagramLength */ put_ncm(&ntb_ndp, opts->dgram_item_len, skb->len); ncm->ndp_dgram_count++; /* Add the new data to the skb */ ntb_data = (void *) skb_put(ncm->skb_tx_data, dgram_pad); memset(ntb_data, 0, dgram_pad); ntb_data = (void *) skb_put(ncm->skb_tx_data, skb->len); memcpy(ntb_data, skb->data, skb->len); dev_kfree_skb_any(skb); skb = NULL; } else if (ncm->skb_tx_data && ncm->timer_force_tx) { /* If the tx was requested because of a timeout then send */ skb2 = package_for_tx(ncm); if (!skb2) goto err; } return skb2; err: ncm->netdev->stats.tx_dropped++; if (skb) dev_kfree_skb_any(skb); if (ncm->skb_tx_data) dev_kfree_skb_any(ncm->skb_tx_data); if (ncm->skb_tx_ndp) dev_kfree_skb_any(ncm->skb_tx_ndp); return NULL; } /* * This transmits the NTB if there are frames waiting. */ static void ncm_tx_tasklet(unsigned long data) { struct f_ncm *ncm = (void *)data; if (ncm->timer_stopping) return; /* Only send if data is available. */ if (ncm->skb_tx_data) { ncm->timer_force_tx = true; /* XXX This allowance of a NULL skb argument to ndo_start_xmit * XXX is not sane. The gadget layer should be redesigned so * XXX that the dev->wrap() invocations to build SKBs is transparent * XXX and performed in some way outside of the ndo_start_xmit * XXX interface. */ ncm->netdev->netdev_ops->ndo_start_xmit(NULL, ncm->netdev); ncm->timer_force_tx = false; } } /* * The transmit should only be run if no skb data has been sent * for a certain duration. */ static enum hrtimer_restart ncm_tx_timeout(struct hrtimer *data) { struct f_ncm *ncm = container_of(data, struct f_ncm, task_timer); tasklet_schedule(&ncm->tx_tasklet); return HRTIMER_NORESTART; } static int ncm_unwrap_ntb(struct gether *port, struct sk_buff *skb, struct sk_buff_head *list) { struct f_ncm *ncm = func_to_ncm(&port->func); __le16 *tmp = (void *) skb->data; unsigned index, index2; int ndp_index; unsigned dg_len, dg_len2; unsigned ndp_len; struct sk_buff *skb2; int ret = -EINVAL; unsigned max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); const struct ndp_parser_opts *opts = ncm->parser_opts; unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; int dgram_counter; /* dwSignature */ if (get_unaligned_le32(tmp) != opts->nth_sign) { INFO(port->func.config->cdev, "Wrong NTH SIGN, skblen %d\n", skb->len); print_hex_dump(KERN_INFO, "HEAD:", DUMP_PREFIX_ADDRESS, 32, 1, skb->data, 32, false); goto err; } tmp += 2; /* wHeaderLength */ if (get_unaligned_le16(tmp++) != opts->nth_size) { INFO(port->func.config->cdev, "Wrong NTB headersize\n"); goto err; } tmp++; /* skip wSequence */ /* (d)wBlockLength */ if (get_ncm(&tmp, opts->block_length) > max_size) { INFO(port->func.config->cdev, "OUT size exceeded\n"); goto err; } ndp_index = get_ncm(&tmp, opts->ndp_index); /* Run through all the NDP's in the NTB */ do { /* NCM 3.2 */ if (((ndp_index % 4) != 0) && (ndp_index < opts->nth_size)) { INFO(port->func.config->cdev, "Bad index: %#X\n", ndp_index); goto err; } /* walk through NDP */ tmp = (void *)(skb->data + ndp_index); if (get_unaligned_le32(tmp) != ncm->ndp_sign) { INFO(port->func.config->cdev, "Wrong NDP SIGN\n"); goto err; } tmp += 2; ndp_len = get_unaligned_le16(tmp++); /* * NCM 3.3.1 * entry is 2 items * item size is 16/32 bits, opts->dgram_item_len * 2 bytes * minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry * Each entry is a dgram index and a dgram length. */ if ((ndp_len < opts->ndp_size + 2 * 2 * (opts->dgram_item_len * 2)) || (ndp_len % opts->ndplen_align != 0)) { INFO(port->func.config->cdev, "Bad NDP length: %#X\n", ndp_len); goto err; } tmp += opts->reserved1; /* Check for another NDP (d)wNextNdpIndex */ ndp_index = get_ncm(&tmp, opts->next_ndp_index); tmp += opts->reserved2; ndp_len -= opts->ndp_size; index2 = get_ncm(&tmp, opts->dgram_item_len); dg_len2 = get_ncm(&tmp, opts->dgram_item_len); dgram_counter = 0; do { index = index2; dg_len = dg_len2; if (dg_len < 14 + crc_len) { /* ethernet hdr + crc */ INFO(port->func.config->cdev, "Bad dgram length: %#X\n", dg_len); goto err; } if (ncm->is_crc) { uint32_t crc, crc2; crc = get_unaligned_le32(skb->data + index + dg_len - crc_len); crc2 = ~crc32_le(~0, skb->data + index, dg_len - crc_len); if (crc != crc2) { INFO(port->func.config->cdev, "Bad CRC\n"); goto err; } } index2 = get_ncm(&tmp, opts->dgram_item_len); dg_len2 = get_ncm(&tmp, opts->dgram_item_len); /* * Copy the data into a new skb. * This ensures the truesize is correct */ skb2 = netdev_alloc_skb_ip_align(ncm->netdev, dg_len - crc_len); if (skb2 == NULL) goto err; memcpy(skb_put(skb2, dg_len - crc_len), skb->data + index, dg_len - crc_len); skb_queue_tail(list, skb2); ndp_len -= 2 * (opts->dgram_item_len * 2); dgram_counter++; if (index2 == 0 || dg_len2 == 0) break; } while (ndp_len > 2 * (opts->dgram_item_len * 2)); } while (ndp_index); dev_kfree_skb_any(skb); VDBG(port->func.config->cdev, "Parsed NTB with %d frames\n", dgram_counter); return 0; err: skb_queue_purge(list); dev_kfree_skb_any(skb); return ret; } static void ncm_disable(struct usb_function *f) { struct f_ncm *ncm = func_to_ncm(f); struct usb_composite_dev *cdev = f->config->cdev; DBG(cdev, "ncm deactivated\n"); if (ncm->port.in_ep->driver_data) { ncm->timer_stopping = true; ncm->netdev = NULL; gether_disconnect(&ncm->port); } if (ncm->notify->driver_data) { usb_ep_disable(ncm->notify); ncm->notify->driver_data = NULL; ncm->notify->desc = NULL; } } /*-------------------------------------------------------------------------*/ /* * Callbacks let us notify the host about connect/disconnect when the * net device is opened or closed. * * For testing, note that link states on this side include both opened * and closed variants of: * * - disconnected/unconfigured * - configured but inactive (data alt 0) * - configured and active (data alt 1) * * Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and * SET_INTERFACE (altsetting). Remember also that "configured" doesn't * imply the host is actually polling the notification endpoint, and * likewise that "active" doesn't imply it's actually using the data * endpoints for traffic. */ static void ncm_open(struct gether *geth) { struct f_ncm *ncm = func_to_ncm(&geth->func); DBG(ncm->port.func.config->cdev, "%s\n", __func__); spin_lock(&ncm->lock); ncm->is_open = true; ncm_notify(ncm); spin_unlock(&ncm->lock); } static void ncm_close(struct gether *geth) { struct f_ncm *ncm = func_to_ncm(&geth->func); DBG(ncm->port.func.config->cdev, "%s\n", __func__); spin_lock(&ncm->lock); ncm->is_open = false; ncm_notify(ncm); spin_unlock(&ncm->lock); } /*-------------------------------------------------------------------------*/ /* ethernet function driver setup/binding */ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_ncm *ncm = func_to_ncm(f); struct usb_string *us; int status; struct usb_ep *ep; struct f_ncm_opts *ncm_opts; if (!can_support_ecm(cdev->gadget)) return -EINVAL; ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst); /* * in drivers/usb/gadget/configfs.c:configfs_composite_bind() * configurations are bound in sequence with list_for_each_entry, * in each configuration its functions are bound in sequence * with list_for_each_entry, so we assume no race condition * with regard to ncm_opts->bound access */ if (!ncm_opts->bound) { mutex_lock(&ncm_opts->lock); gether_set_gadget(ncm_opts->net, cdev->gadget); status = gether_register_netdev(ncm_opts->net); mutex_unlock(&ncm_opts->lock); if (status) return status; ncm_opts->bound = true; } us = usb_gstrings_attach(cdev, ncm_strings, ARRAY_SIZE(ncm_string_defs)); if (IS_ERR(us)) return PTR_ERR(us); ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id; ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id; ncm_data_intf.iInterface = us[STRING_DATA_IDX].id; ecm_desc.iMACAddress = us[STRING_MAC_IDX].id; ncm_iad_desc.iFunction = us[STRING_IAD_IDX].id; /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) goto fail; ncm->ctrl_id = status; ncm_iad_desc.bFirstInterface = status; ncm_control_intf.bInterfaceNumber = status; ncm_union_desc.bMasterInterface0 = status; status = usb_interface_id(c, f); if (status < 0) goto fail; ncm->data_id = status; ncm_data_nop_intf.bInterfaceNumber = status; ncm_data_intf.bInterfaceNumber = status; ncm_union_desc.bSlaveInterface0 = status; status = -ENODEV; /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_in_desc); if (!ep) goto fail; ncm->port.in_ep = ep; ep->driver_data = cdev; /* claim */ ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_out_desc); if (!ep) goto fail; ncm->port.out_ep = ep; ep->driver_data = cdev; /* claim */ ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_notify_desc); if (!ep) goto fail; ncm->notify = ep; ep->driver_data = cdev; /* claim */ status = -ENOMEM; /* allocate notification request and buffer */ ncm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL); if (!ncm->notify_req) goto fail; ncm->notify_req->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL); if (!ncm->notify_req->buf) goto fail; ncm->notify_req->context = ncm; ncm->notify_req->complete = ncm_notify_complete; /* * support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at * both speeds */ hs_ncm_in_desc.bEndpointAddress = fs_ncm_in_desc.bEndpointAddress; hs_ncm_out_desc.bEndpointAddress = fs_ncm_out_desc.bEndpointAddress; hs_ncm_notify_desc.bEndpointAddress = fs_ncm_notify_desc.bEndpointAddress; status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function, NULL); if (status) goto fail; /* * NOTE: all that is done without knowing or caring about * the network link ... which is unavailable to this code * until we're activated via set_alt(). */ ncm->port.open = ncm_open; ncm->port.close = ncm_close; tasklet_init(&ncm->tx_tasklet, ncm_tx_tasklet, (unsigned long) ncm); hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ncm->task_timer.function = ncm_tx_timeout; DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n", gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", ncm->port.in_ep->name, ncm->port.out_ep->name, ncm->notify->name); return 0; fail: if (ncm->notify_req) { kfree(ncm->notify_req->buf); usb_ep_free_request(ncm->notify, ncm->notify_req); } /* we might as well release our claims on endpoints */ if (ncm->notify) ncm->notify->driver_data = NULL; if (ncm->port.out_ep) ncm->port.out_ep->driver_data = NULL; if (ncm->port.in_ep) ncm->port.in_ep->driver_data = NULL; ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); return status; } static inline struct f_ncm_opts *to_f_ncm_opts(struct config_item *item) { return container_of(to_config_group(item), struct f_ncm_opts, func_inst.group); } /* f_ncm_item_ops */ USB_ETHERNET_CONFIGFS_ITEM(ncm); /* f_ncm_opts_dev_addr */ USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(ncm); /* f_ncm_opts_host_addr */ USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(ncm); /* f_ncm_opts_qmult */ USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(ncm); /* f_ncm_opts_ifname */ USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(ncm); static struct configfs_attribute *ncm_attrs[] = { &f_ncm_opts_dev_addr.attr, &f_ncm_opts_host_addr.attr, &f_ncm_opts_qmult.attr, &f_ncm_opts_ifname.attr, NULL, }; static struct config_item_type ncm_func_type = { .ct_item_ops = &ncm_item_ops, .ct_attrs = ncm_attrs, .ct_owner = THIS_MODULE, }; static void ncm_free_inst(struct usb_function_instance *f) { struct f_ncm_opts *opts; opts = container_of(f, struct f_ncm_opts, func_inst); if (opts->bound) gether_cleanup(netdev_priv(opts->net)); else free_netdev(opts->net); kfree(opts); } static struct usb_function_instance *ncm_alloc_inst(void) { struct f_ncm_opts *opts; opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) return ERR_PTR(-ENOMEM); mutex_init(&opts->lock); opts->func_inst.free_func_inst = ncm_free_inst; opts->net = gether_setup_default(); if (IS_ERR(opts->net)) { struct net_device *net = opts->net; kfree(opts); return ERR_CAST(net); } config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type); return &opts->func_inst; } static void ncm_free(struct usb_function *f) { struct f_ncm *ncm; struct f_ncm_opts *opts; ncm = func_to_ncm(f); opts = container_of(f->fi, struct f_ncm_opts, func_inst); kfree(ncm); mutex_lock(&opts->lock); opts->refcnt--; mutex_unlock(&opts->lock); } static void ncm_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_ncm *ncm = func_to_ncm(f); DBG(c->cdev, "ncm unbind\n"); hrtimer_cancel(&ncm->task_timer); tasklet_kill(&ncm->tx_tasklet); ncm_string_defs[0].id = 0; usb_free_all_descriptors(f); kfree(ncm->notify_req->buf); usb_ep_free_request(ncm->notify, ncm->notify_req); } static struct usb_function *ncm_alloc(struct usb_function_instance *fi) { struct f_ncm *ncm; struct f_ncm_opts *opts; int status; /* allocate and initialize one new instance */ ncm = kzalloc(sizeof(*ncm), GFP_KERNEL); if (!ncm) return ERR_PTR(-ENOMEM); opts = container_of(fi, struct f_ncm_opts, func_inst); mutex_lock(&opts->lock); opts->refcnt++; /* export host's Ethernet address in CDC format */ status = gether_get_host_addr_cdc(opts->net, ncm->ethaddr, sizeof(ncm->ethaddr)); if (status < 12) { /* strlen("01234567890a") */ kfree(ncm); mutex_unlock(&opts->lock); return ERR_PTR(-EINVAL); } ncm_string_defs[STRING_MAC_IDX].s = ncm->ethaddr; spin_lock_init(&ncm->lock); ncm_reset_values(ncm); ncm->port.ioport = netdev_priv(opts->net); mutex_unlock(&opts->lock); ncm->port.is_fixed = true; ncm->port.supports_multi_frame = true; ncm->port.func.name = "cdc_network"; /* descriptors are per-instance copies */ ncm->port.func.bind = ncm_bind; ncm->port.func.unbind = ncm_unbind; ncm->port.func.set_alt = ncm_set_alt; ncm->port.func.get_alt = ncm_get_alt; ncm->port.func.setup = ncm_setup; ncm->port.func.disable = ncm_disable; ncm->port.func.free_func = ncm_free; ncm->port.wrap = ncm_wrap_ntb; ncm->port.unwrap = ncm_unwrap_ntb; return &ncm->port.func; } DECLARE_USB_FUNCTION_INIT(ncm, ncm_alloc_inst, ncm_alloc); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Yauheni Kaliuta");
gpl-2.0
Soaa-/-lightspeed-vision
drivers/char/hvsi.c
606
32449
/* * Copyright (C) 2004 Hollis Blanchard <hollisb@us.ibm.com>, IBM * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Host Virtual Serial Interface (HVSI) is a protocol between the hosted OS * and the service processor on IBM pSeries servers. On these servers, there * are no serial ports under the OS's control, and sometimes there is no other * console available either. However, the service processor has two standard * serial ports, so this over-complicated protocol allows the OS to control * those ports by proxy. * * Besides data, the procotol supports the reading/writing of the serial * port's DTR line, and the reading of the CD line. This is to allow the OS to * control a modem attached to the service processor's serial port. Note that * the OS cannot change the speed of the port through this protocol. */ #undef DEBUG #include <linux/console.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/major.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/sysrq.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <asm/hvcall.h> #include <asm/hvconsole.h> #include <asm/prom.h> #include <asm/uaccess.h> #include <asm/vio.h> #include <asm/param.h> #define HVSI_MAJOR 229 #define HVSI_MINOR 128 #define MAX_NR_HVSI_CONSOLES 4 #define HVSI_TIMEOUT (5*HZ) #define HVSI_VERSION 1 #define HVSI_MAX_PACKET 256 #define HVSI_MAX_READ 16 #define HVSI_MAX_OUTGOING_DATA 12 #define N_OUTBUF 12 /* * we pass data via two 8-byte registers, so we would like our char arrays * properly aligned for those loads. */ #define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) struct hvsi_struct { struct delayed_work writer; struct work_struct handshaker; wait_queue_head_t emptyq; /* woken when outbuf is emptied */ wait_queue_head_t stateq; /* woken when HVSI state changes */ spinlock_t lock; int index; struct tty_struct *tty; int count; uint8_t throttle_buf[128]; uint8_t outbuf[N_OUTBUF]; /* to implement write_room and chars_in_buffer */ /* inbuf is for packet reassembly. leave a little room for leftovers. */ uint8_t inbuf[HVSI_MAX_PACKET + HVSI_MAX_READ]; uint8_t *inbuf_end; int n_throttle; int n_outbuf; uint32_t vtermno; uint32_t virq; atomic_t seqno; /* HVSI packet sequence number */ uint16_t mctrl; uint8_t state; /* HVSI protocol state */ uint8_t flags; #ifdef CONFIG_MAGIC_SYSRQ uint8_t sysrq; #endif /* CONFIG_MAGIC_SYSRQ */ }; static struct hvsi_struct hvsi_ports[MAX_NR_HVSI_CONSOLES]; static struct tty_driver *hvsi_driver; static int hvsi_count; static int (*hvsi_wait)(struct hvsi_struct *hp, int state); enum HVSI_PROTOCOL_STATE { HVSI_CLOSED, HVSI_WAIT_FOR_VER_RESPONSE, HVSI_WAIT_FOR_VER_QUERY, HVSI_OPEN, HVSI_WAIT_FOR_MCTRL_RESPONSE, HVSI_FSP_DIED, }; #define HVSI_CONSOLE 0x1 #define VS_DATA_PACKET_HEADER 0xff #define VS_CONTROL_PACKET_HEADER 0xfe #define VS_QUERY_PACKET_HEADER 0xfd #define VS_QUERY_RESPONSE_PACKET_HEADER 0xfc /* control verbs */ #define VSV_SET_MODEM_CTL 1 /* to service processor only */ #define VSV_MODEM_CTL_UPDATE 2 /* from service processor only */ #define VSV_CLOSE_PROTOCOL 3 /* query verbs */ #define VSV_SEND_VERSION_NUMBER 1 #define VSV_SEND_MODEM_CTL_STATUS 2 /* yes, these masks are not consecutive. */ #define HVSI_TSDTR 0x01 #define HVSI_TSCD 0x20 struct hvsi_header { uint8_t type; uint8_t len; uint16_t seqno; } __attribute__((packed)); struct hvsi_data { uint8_t type; uint8_t len; uint16_t seqno; uint8_t data[HVSI_MAX_OUTGOING_DATA]; } __attribute__((packed)); struct hvsi_control { uint8_t type; uint8_t len; uint16_t seqno; uint16_t verb; /* optional depending on verb: */ uint32_t word; uint32_t mask; } __attribute__((packed)); struct hvsi_query { uint8_t type; uint8_t len; uint16_t seqno; uint16_t verb; } __attribute__((packed)); struct hvsi_query_response { uint8_t type; uint8_t len; uint16_t seqno; uint16_t verb; uint16_t query_seqno; union { uint8_t version; uint32_t mctrl_word; } u; } __attribute__((packed)); static inline int is_console(struct hvsi_struct *hp) { return hp->flags & HVSI_CONSOLE; } static inline int is_open(struct hvsi_struct *hp) { /* if we're waiting for an mctrl then we're already open */ return (hp->state == HVSI_OPEN) || (hp->state == HVSI_WAIT_FOR_MCTRL_RESPONSE); } static inline void print_state(struct hvsi_struct *hp) { #ifdef DEBUG static const char *state_names[] = { "HVSI_CLOSED", "HVSI_WAIT_FOR_VER_RESPONSE", "HVSI_WAIT_FOR_VER_QUERY", "HVSI_OPEN", "HVSI_WAIT_FOR_MCTRL_RESPONSE", "HVSI_FSP_DIED", }; const char *name = state_names[hp->state]; if (hp->state > ARRAY_SIZE(state_names)) name = "UNKNOWN"; pr_debug("hvsi%i: state = %s\n", hp->index, name); #endif /* DEBUG */ } static inline void __set_state(struct hvsi_struct *hp, int state) { hp->state = state; print_state(hp); wake_up_all(&hp->stateq); } static inline void set_state(struct hvsi_struct *hp, int state) { unsigned long flags; spin_lock_irqsave(&hp->lock, flags); __set_state(hp, state); spin_unlock_irqrestore(&hp->lock, flags); } static inline int len_packet(const uint8_t *packet) { return (int)((struct hvsi_header *)packet)->len; } static inline int is_header(const uint8_t *packet) { struct hvsi_header *header = (struct hvsi_header *)packet; return header->type >= VS_QUERY_RESPONSE_PACKET_HEADER; } static inline int got_packet(const struct hvsi_struct *hp, uint8_t *packet) { if (hp->inbuf_end < packet + sizeof(struct hvsi_header)) return 0; /* don't even have the packet header */ if (hp->inbuf_end < (packet + len_packet(packet))) return 0; /* don't have the rest of the packet */ return 1; } /* shift remaining bytes in packetbuf down */ static void compact_inbuf(struct hvsi_struct *hp, uint8_t *read_to) { int remaining = (int)(hp->inbuf_end - read_to); pr_debug("%s: %i chars remain\n", __func__, remaining); if (read_to != hp->inbuf) memmove(hp->inbuf, read_to, remaining); hp->inbuf_end = hp->inbuf + remaining; } #ifdef DEBUG #define dbg_dump_packet(packet) dump_packet(packet) #define dbg_dump_hex(data, len) dump_hex(data, len) #else #define dbg_dump_packet(packet) do { } while (0) #define dbg_dump_hex(data, len) do { } while (0) #endif static void dump_hex(const uint8_t *data, int len) { int i; printk(" "); for (i=0; i < len; i++) printk("%.2x", data[i]); printk("\n "); for (i=0; i < len; i++) { if (isprint(data[i])) printk("%c", data[i]); else printk("."); } printk("\n"); } static void dump_packet(uint8_t *packet) { struct hvsi_header *header = (struct hvsi_header *)packet; printk("type 0x%x, len %i, seqno %i:\n", header->type, header->len, header->seqno); dump_hex(packet, header->len); } static int hvsi_read(struct hvsi_struct *hp, char *buf, int count) { unsigned long got; got = hvc_get_chars(hp->vtermno, buf, count); return got; } static void hvsi_recv_control(struct hvsi_struct *hp, uint8_t *packet, struct tty_struct **to_hangup, struct hvsi_struct **to_handshake) { struct hvsi_control *header = (struct hvsi_control *)packet; switch (header->verb) { case VSV_MODEM_CTL_UPDATE: if ((header->word & HVSI_TSCD) == 0) { /* CD went away; no more connection */ pr_debug("hvsi%i: CD dropped\n", hp->index); hp->mctrl &= TIOCM_CD; /* If userland hasn't done an open(2) yet, hp->tty is NULL. */ if (hp->tty && !(hp->tty->flags & CLOCAL)) *to_hangup = hp->tty; } break; case VSV_CLOSE_PROTOCOL: pr_debug("hvsi%i: service processor came back\n", hp->index); if (hp->state != HVSI_CLOSED) { *to_handshake = hp; } break; default: printk(KERN_WARNING "hvsi%i: unknown HVSI control packet: ", hp->index); dump_packet(packet); break; } } static void hvsi_recv_response(struct hvsi_struct *hp, uint8_t *packet) { struct hvsi_query_response *resp = (struct hvsi_query_response *)packet; switch (hp->state) { case HVSI_WAIT_FOR_VER_RESPONSE: __set_state(hp, HVSI_WAIT_FOR_VER_QUERY); break; case HVSI_WAIT_FOR_MCTRL_RESPONSE: hp->mctrl = 0; if (resp->u.mctrl_word & HVSI_TSDTR) hp->mctrl |= TIOCM_DTR; if (resp->u.mctrl_word & HVSI_TSCD) hp->mctrl |= TIOCM_CD; __set_state(hp, HVSI_OPEN); break; default: printk(KERN_ERR "hvsi%i: unexpected query response: ", hp->index); dump_packet(packet); break; } } /* respond to service processor's version query */ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno) { struct hvsi_query_response packet __ALIGNED__; int wrote; packet.type = VS_QUERY_RESPONSE_PACKET_HEADER; packet.len = sizeof(struct hvsi_query_response); packet.seqno = atomic_inc_return(&hp->seqno); packet.verb = VSV_SEND_VERSION_NUMBER; packet.u.version = HVSI_VERSION; packet.query_seqno = query_seqno+1; pr_debug("%s: sending %i bytes\n", __func__, packet.len); dbg_dump_hex((uint8_t*)&packet, packet.len); wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len); if (wrote != packet.len) { printk(KERN_ERR "hvsi%i: couldn't send query response!\n", hp->index); return -EIO; } return 0; } static void hvsi_recv_query(struct hvsi_struct *hp, uint8_t *packet) { struct hvsi_query *query = (struct hvsi_query *)packet; switch (hp->state) { case HVSI_WAIT_FOR_VER_QUERY: hvsi_version_respond(hp, query->seqno); __set_state(hp, HVSI_OPEN); break; default: printk(KERN_ERR "hvsi%i: unexpected query: ", hp->index); dump_packet(packet); break; } } static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len) { int i; for (i=0; i < len; i++) { char c = buf[i]; #ifdef CONFIG_MAGIC_SYSRQ if (c == '\0') { hp->sysrq = 1; continue; } else if (hp->sysrq) { handle_sysrq(c, hp->tty); hp->sysrq = 0; continue; } #endif /* CONFIG_MAGIC_SYSRQ */ tty_insert_flip_char(hp->tty, c, 0); } } /* * We could get 252 bytes of data at once here. But the tty layer only * throttles us at TTY_THRESHOLD_THROTTLE (128) bytes, so we could overflow * it. Accordingly we won't send more than 128 bytes at a time to the flip * buffer, which will give the tty buffer a chance to throttle us. Should the * value of TTY_THRESHOLD_THROTTLE change in n_tty.c, this code should be * revisited. */ #define TTY_THRESHOLD_THROTTLE 128 static struct tty_struct *hvsi_recv_data(struct hvsi_struct *hp, const uint8_t *packet) { const struct hvsi_header *header = (const struct hvsi_header *)packet; const uint8_t *data = packet + sizeof(struct hvsi_header); int datalen = header->len - sizeof(struct hvsi_header); int overflow = datalen - TTY_THRESHOLD_THROTTLE; pr_debug("queueing %i chars '%.*s'\n", datalen, datalen, data); if (datalen == 0) return NULL; if (overflow > 0) { pr_debug("%s: got >TTY_THRESHOLD_THROTTLE bytes\n", __func__); datalen = TTY_THRESHOLD_THROTTLE; } hvsi_insert_chars(hp, data, datalen); if (overflow > 0) { /* * we still have more data to deliver, so we need to save off the * overflow and send it later */ pr_debug("%s: deferring overflow\n", __func__); memcpy(hp->throttle_buf, data + TTY_THRESHOLD_THROTTLE, overflow); hp->n_throttle = overflow; } return hp->tty; } /* * Returns true/false indicating data successfully read from hypervisor. * Used both to get packets for tty connections and to advance the state * machine during console handshaking (in which case tty = NULL and we ignore * incoming data). */ static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct **flip, struct tty_struct **hangup, struct hvsi_struct **handshake) { uint8_t *packet = hp->inbuf; int chunklen; *flip = NULL; *hangup = NULL; *handshake = NULL; chunklen = hvsi_read(hp, hp->inbuf_end, HVSI_MAX_READ); if (chunklen == 0) { pr_debug("%s: 0-length read\n", __func__); return 0; } pr_debug("%s: got %i bytes\n", __func__, chunklen); dbg_dump_hex(hp->inbuf_end, chunklen); hp->inbuf_end += chunklen; /* handle all completed packets */ while ((packet < hp->inbuf_end) && got_packet(hp, packet)) { struct hvsi_header *header = (struct hvsi_header *)packet; if (!is_header(packet)) { printk(KERN_ERR "hvsi%i: got malformed packet\n", hp->index); /* skip bytes until we find a header or run out of data */ while ((packet < hp->inbuf_end) && (!is_header(packet))) packet++; continue; } pr_debug("%s: handling %i-byte packet\n", __func__, len_packet(packet)); dbg_dump_packet(packet); switch (header->type) { case VS_DATA_PACKET_HEADER: if (!is_open(hp)) break; if (hp->tty == NULL) break; /* no tty buffer to put data in */ *flip = hvsi_recv_data(hp, packet); break; case VS_CONTROL_PACKET_HEADER: hvsi_recv_control(hp, packet, hangup, handshake); break; case VS_QUERY_RESPONSE_PACKET_HEADER: hvsi_recv_response(hp, packet); break; case VS_QUERY_PACKET_HEADER: hvsi_recv_query(hp, packet); break; default: printk(KERN_ERR "hvsi%i: unknown HVSI packet type 0x%x\n", hp->index, header->type); dump_packet(packet); break; } packet += len_packet(packet); if (*hangup || *handshake) { pr_debug("%s: hangup or handshake\n", __func__); /* * we need to send the hangup now before receiving any more data. * If we get "data, hangup, data", we can't deliver the second * data before the hangup. */ break; } } compact_inbuf(hp, packet); return 1; } static void hvsi_send_overflow(struct hvsi_struct *hp) { pr_debug("%s: delivering %i bytes overflow\n", __func__, hp->n_throttle); hvsi_insert_chars(hp, hp->throttle_buf, hp->n_throttle); hp->n_throttle = 0; } /* * must get all pending data because we only get an irq on empty->non-empty * transition */ static irqreturn_t hvsi_interrupt(int irq, void *arg) { struct hvsi_struct *hp = (struct hvsi_struct *)arg; struct tty_struct *flip; struct tty_struct *hangup; struct hvsi_struct *handshake; unsigned long flags; int again = 1; pr_debug("%s\n", __func__); while (again) { spin_lock_irqsave(&hp->lock, flags); again = hvsi_load_chunk(hp, &flip, &hangup, &handshake); spin_unlock_irqrestore(&hp->lock, flags); /* * we have to call tty_flip_buffer_push() and tty_hangup() outside our * spinlock. But we also have to keep going until we've read all the * available data. */ if (flip) { /* there was data put in the tty flip buffer */ tty_flip_buffer_push(flip); flip = NULL; } if (hangup) { tty_hangup(hangup); } if (handshake) { pr_debug("hvsi%i: attempting re-handshake\n", handshake->index); schedule_work(&handshake->handshaker); } } spin_lock_irqsave(&hp->lock, flags); if (hp->tty && hp->n_throttle && (!test_bit(TTY_THROTTLED, &hp->tty->flags))) { /* we weren't hung up and we weren't throttled, so we can deliver the * rest now */ flip = hp->tty; hvsi_send_overflow(hp); } spin_unlock_irqrestore(&hp->lock, flags); if (flip) { tty_flip_buffer_push(flip); } return IRQ_HANDLED; } /* for boot console, before the irq handler is running */ static int __init poll_for_state(struct hvsi_struct *hp, int state) { unsigned long end_jiffies = jiffies + HVSI_TIMEOUT; for (;;) { hvsi_interrupt(hp->virq, (void *)hp); /* get pending data */ if (hp->state == state) return 0; mdelay(5); if (time_after(jiffies, end_jiffies)) return -EIO; } } /* wait for irq handler to change our state */ static int wait_for_state(struct hvsi_struct *hp, int state) { int ret = 0; if (!wait_event_timeout(hp->stateq, (hp->state == state), HVSI_TIMEOUT)) ret = -EIO; return ret; } static int hvsi_query(struct hvsi_struct *hp, uint16_t verb) { struct hvsi_query packet __ALIGNED__; int wrote; packet.type = VS_QUERY_PACKET_HEADER; packet.len = sizeof(struct hvsi_query); packet.seqno = atomic_inc_return(&hp->seqno); packet.verb = verb; pr_debug("%s: sending %i bytes\n", __func__, packet.len); dbg_dump_hex((uint8_t*)&packet, packet.len); wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len); if (wrote != packet.len) { printk(KERN_ERR "hvsi%i: couldn't send query (%i)!\n", hp->index, wrote); return -EIO; } return 0; } static int hvsi_get_mctrl(struct hvsi_struct *hp) { int ret; set_state(hp, HVSI_WAIT_FOR_MCTRL_RESPONSE); hvsi_query(hp, VSV_SEND_MODEM_CTL_STATUS); ret = hvsi_wait(hp, HVSI_OPEN); if (ret < 0) { printk(KERN_ERR "hvsi%i: didn't get modem flags\n", hp->index); set_state(hp, HVSI_OPEN); return ret; } pr_debug("%s: mctrl 0x%x\n", __func__, hp->mctrl); return 0; } /* note that we can only set DTR */ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl) { struct hvsi_control packet __ALIGNED__; int wrote; packet.type = VS_CONTROL_PACKET_HEADER, packet.seqno = atomic_inc_return(&hp->seqno); packet.len = sizeof(struct hvsi_control); packet.verb = VSV_SET_MODEM_CTL; packet.mask = HVSI_TSDTR; if (mctrl & TIOCM_DTR) packet.word = HVSI_TSDTR; pr_debug("%s: sending %i bytes\n", __func__, packet.len); dbg_dump_hex((uint8_t*)&packet, packet.len); wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len); if (wrote != packet.len) { printk(KERN_ERR "hvsi%i: couldn't set DTR!\n", hp->index); return -EIO; } return 0; } static void hvsi_drain_input(struct hvsi_struct *hp) { uint8_t buf[HVSI_MAX_READ] __ALIGNED__; unsigned long end_jiffies = jiffies + HVSI_TIMEOUT; while (time_before(end_jiffies, jiffies)) if (0 == hvsi_read(hp, buf, HVSI_MAX_READ)) break; } static int hvsi_handshake(struct hvsi_struct *hp) { int ret; /* * We could have a CLOSE or other data waiting for us before we even try * to open; try to throw it all away so we don't get confused. (CLOSE * is the first message sent up the pipe when the FSP comes online. We * need to distinguish between "it came up a while ago and we're the first * user" and "it was just reset before it saw our handshake packet".) */ hvsi_drain_input(hp); set_state(hp, HVSI_WAIT_FOR_VER_RESPONSE); ret = hvsi_query(hp, VSV_SEND_VERSION_NUMBER); if (ret < 0) { printk(KERN_ERR "hvsi%i: couldn't send version query\n", hp->index); return ret; } ret = hvsi_wait(hp, HVSI_OPEN); if (ret < 0) return ret; return 0; } static void hvsi_handshaker(struct work_struct *work) { struct hvsi_struct *hp = container_of(work, struct hvsi_struct, handshaker); if (hvsi_handshake(hp) >= 0) return; printk(KERN_ERR "hvsi%i: re-handshaking failed\n", hp->index); if (is_console(hp)) { /* * ttys will re-attempt the handshake via hvsi_open, but * the console will not. */ printk(KERN_ERR "hvsi%i: lost console!\n", hp->index); } } static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count) { struct hvsi_data packet __ALIGNED__; int ret; BUG_ON(count > HVSI_MAX_OUTGOING_DATA); packet.type = VS_DATA_PACKET_HEADER; packet.seqno = atomic_inc_return(&hp->seqno); packet.len = count + sizeof(struct hvsi_header); memcpy(&packet.data, buf, count); ret = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len); if (ret == packet.len) { /* return the number of chars written, not the packet length */ return count; } return ret; /* return any errors */ } static void hvsi_close_protocol(struct hvsi_struct *hp) { struct hvsi_control packet __ALIGNED__; packet.type = VS_CONTROL_PACKET_HEADER; packet.seqno = atomic_inc_return(&hp->seqno); packet.len = 6; packet.verb = VSV_CLOSE_PROTOCOL; pr_debug("%s: sending %i bytes\n", __func__, packet.len); dbg_dump_hex((uint8_t*)&packet, packet.len); hvc_put_chars(hp->vtermno, (char *)&packet, packet.len); } static int hvsi_open(struct tty_struct *tty, struct file *filp) { struct hvsi_struct *hp; unsigned long flags; int line = tty->index; int ret; pr_debug("%s\n", __func__); if (line < 0 || line >= hvsi_count) return -ENODEV; hp = &hvsi_ports[line]; tty->driver_data = hp; mb(); if (hp->state == HVSI_FSP_DIED) return -EIO; spin_lock_irqsave(&hp->lock, flags); hp->tty = tty; hp->count++; atomic_set(&hp->seqno, 0); h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE); spin_unlock_irqrestore(&hp->lock, flags); if (is_console(hp)) return 0; /* this has already been handshaked as the console */ ret = hvsi_handshake(hp); if (ret < 0) { printk(KERN_ERR "%s: HVSI handshaking failed\n", tty->name); return ret; } ret = hvsi_get_mctrl(hp); if (ret < 0) { printk(KERN_ERR "%s: couldn't get initial modem flags\n", tty->name); return ret; } ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR); if (ret < 0) { printk(KERN_ERR "%s: couldn't set DTR\n", tty->name); return ret; } return 0; } /* wait for hvsi_write_worker to empty hp->outbuf */ static void hvsi_flush_output(struct hvsi_struct *hp) { wait_event_timeout(hp->emptyq, (hp->n_outbuf <= 0), HVSI_TIMEOUT); /* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */ cancel_delayed_work(&hp->writer); flush_scheduled_work(); /* * it's also possible that our timeout expired and hvsi_write_worker * didn't manage to push outbuf. poof. */ hp->n_outbuf = 0; } static void hvsi_close(struct tty_struct *tty, struct file *filp) { struct hvsi_struct *hp = tty->driver_data; unsigned long flags; pr_debug("%s\n", __func__); if (tty_hung_up_p(filp)) return; spin_lock_irqsave(&hp->lock, flags); if (--hp->count == 0) { hp->tty = NULL; hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */ /* only close down connection if it is not the console */ if (!is_console(hp)) { h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE); /* no more irqs */ __set_state(hp, HVSI_CLOSED); /* * any data delivered to the tty layer after this will be * discarded (except for XON/XOFF) */ tty->closing = 1; spin_unlock_irqrestore(&hp->lock, flags); /* let any existing irq handlers finish. no more will start. */ synchronize_irq(hp->virq); /* hvsi_write_worker will re-schedule until outbuf is empty. */ hvsi_flush_output(hp); /* tell FSP to stop sending data */ hvsi_close_protocol(hp); /* * drain anything FSP is still in the middle of sending, and let * hvsi_handshake drain the rest on the next open. */ hvsi_drain_input(hp); spin_lock_irqsave(&hp->lock, flags); } } else if (hp->count < 0) printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n", hp - hvsi_ports, hp->count); spin_unlock_irqrestore(&hp->lock, flags); } static void hvsi_hangup(struct tty_struct *tty) { struct hvsi_struct *hp = tty->driver_data; unsigned long flags; pr_debug("%s\n", __func__); spin_lock_irqsave(&hp->lock, flags); hp->count = 0; hp->n_outbuf = 0; hp->tty = NULL; spin_unlock_irqrestore(&hp->lock, flags); } /* called with hp->lock held */ static void hvsi_push(struct hvsi_struct *hp) { int n; if (hp->n_outbuf <= 0) return; n = hvsi_put_chars(hp, hp->outbuf, hp->n_outbuf); if (n > 0) { /* success */ pr_debug("%s: wrote %i chars\n", __func__, n); hp->n_outbuf = 0; } else if (n == -EIO) { __set_state(hp, HVSI_FSP_DIED); printk(KERN_ERR "hvsi%i: service processor died\n", hp->index); } } /* hvsi_write_worker will keep rescheduling itself until outbuf is empty */ static void hvsi_write_worker(struct work_struct *work) { struct hvsi_struct *hp = container_of(work, struct hvsi_struct, writer.work); unsigned long flags; #ifdef DEBUG static long start_j = 0; if (start_j == 0) start_j = jiffies; #endif /* DEBUG */ spin_lock_irqsave(&hp->lock, flags); pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf); if (!is_open(hp)) { /* * We could have a non-open connection if the service processor died * while we were busily scheduling ourselves. In that case, it could * be minutes before the service processor comes back, so only try * again once a second. */ schedule_delayed_work(&hp->writer, HZ); goto out; } hvsi_push(hp); if (hp->n_outbuf > 0) schedule_delayed_work(&hp->writer, 10); else { #ifdef DEBUG pr_debug("%s: outbuf emptied after %li jiffies\n", __func__, jiffies - start_j); start_j = 0; #endif /* DEBUG */ wake_up_all(&hp->emptyq); tty_wakeup(hp->tty); } out: spin_unlock_irqrestore(&hp->lock, flags); } static int hvsi_write_room(struct tty_struct *tty) { struct hvsi_struct *hp = tty->driver_data; return N_OUTBUF - hp->n_outbuf; } static int hvsi_chars_in_buffer(struct tty_struct *tty) { struct hvsi_struct *hp = tty->driver_data; return hp->n_outbuf; } static int hvsi_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct hvsi_struct *hp = tty->driver_data; const char *source = buf; unsigned long flags; int total = 0; int origcount = count; spin_lock_irqsave(&hp->lock, flags); pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf); if (!is_open(hp)) { /* we're either closing or not yet open; don't accept data */ pr_debug("%s: not open\n", __func__); goto out; } /* * when the hypervisor buffer (16K) fills, data will stay in hp->outbuf * and hvsi_write_worker will be scheduled. subsequent hvsi_write() calls * will see there is no room in outbuf and return. */ while ((count > 0) && (hvsi_write_room(hp->tty) > 0)) { int chunksize = min(count, hvsi_write_room(hp->tty)); BUG_ON(hp->n_outbuf < 0); memcpy(hp->outbuf + hp->n_outbuf, source, chunksize); hp->n_outbuf += chunksize; total += chunksize; source += chunksize; count -= chunksize; hvsi_push(hp); } if (hp->n_outbuf > 0) { /* * we weren't able to write it all to the hypervisor. * schedule another push attempt. */ schedule_delayed_work(&hp->writer, 10); } out: spin_unlock_irqrestore(&hp->lock, flags); if (total != origcount) pr_debug("%s: wanted %i, only wrote %i\n", __func__, origcount, total); return total; } /* * I have never seen throttle or unthrottle called, so this little throttle * buffering scheme may or may not work. */ static void hvsi_throttle(struct tty_struct *tty) { struct hvsi_struct *hp = tty->driver_data; pr_debug("%s\n", __func__); h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE); } static void hvsi_unthrottle(struct tty_struct *tty) { struct hvsi_struct *hp = tty->driver_data; unsigned long flags; int shouldflip = 0; pr_debug("%s\n", __func__); spin_lock_irqsave(&hp->lock, flags); if (hp->n_throttle) { hvsi_send_overflow(hp); shouldflip = 1; } spin_unlock_irqrestore(&hp->lock, flags); if (shouldflip) tty_flip_buffer_push(hp->tty); h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE); } static int hvsi_tiocmget(struct tty_struct *tty, struct file *file) { struct hvsi_struct *hp = tty->driver_data; hvsi_get_mctrl(hp); return hp->mctrl; } static int hvsi_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear) { struct hvsi_struct *hp = tty->driver_data; unsigned long flags; uint16_t new_mctrl; /* we can only alter DTR */ clear &= TIOCM_DTR; set &= TIOCM_DTR; spin_lock_irqsave(&hp->lock, flags); new_mctrl = (hp->mctrl & ~clear) | set; if (hp->mctrl != new_mctrl) { hvsi_set_mctrl(hp, new_mctrl); hp->mctrl = new_mctrl; } spin_unlock_irqrestore(&hp->lock, flags); return 0; } static const struct tty_operations hvsi_ops = { .open = hvsi_open, .close = hvsi_close, .write = hvsi_write, .hangup = hvsi_hangup, .write_room = hvsi_write_room, .chars_in_buffer = hvsi_chars_in_buffer, .throttle = hvsi_throttle, .unthrottle = hvsi_unthrottle, .tiocmget = hvsi_tiocmget, .tiocmset = hvsi_tiocmset, }; static int __init hvsi_init(void) { int i; hvsi_driver = alloc_tty_driver(hvsi_count); if (!hvsi_driver) return -ENOMEM; hvsi_driver->owner = THIS_MODULE; hvsi_driver->driver_name = "hvsi"; hvsi_driver->name = "hvsi"; hvsi_driver->major = HVSI_MAJOR; hvsi_driver->minor_start = HVSI_MINOR; hvsi_driver->type = TTY_DRIVER_TYPE_SYSTEM; hvsi_driver->init_termios = tty_std_termios; hvsi_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL; hvsi_driver->init_termios.c_ispeed = 9600; hvsi_driver->init_termios.c_ospeed = 9600; hvsi_driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(hvsi_driver, &hvsi_ops); for (i=0; i < hvsi_count; i++) { struct hvsi_struct *hp = &hvsi_ports[i]; int ret = 1; ret = request_irq(hp->virq, hvsi_interrupt, IRQF_DISABLED, "hvsi", hp); if (ret) printk(KERN_ERR "HVSI: couldn't reserve irq 0x%x (error %i)\n", hp->virq, ret); } hvsi_wait = wait_for_state; /* irqs active now */ if (tty_register_driver(hvsi_driver)) panic("Couldn't register hvsi console driver\n"); printk(KERN_DEBUG "HVSI: registered %i devices\n", hvsi_count); return 0; } device_initcall(hvsi_init); /***** console (not tty) code: *****/ static void hvsi_console_print(struct console *console, const char *buf, unsigned int count) { struct hvsi_struct *hp = &hvsi_ports[console->index]; char c[HVSI_MAX_OUTGOING_DATA] __ALIGNED__; unsigned int i = 0, n = 0; int ret, donecr = 0; mb(); if (!is_open(hp)) return; /* * ugh, we have to translate LF -> CRLF ourselves, in place. * copied from hvc_console.c: */ while (count > 0 || i > 0) { if (count > 0 && i < sizeof(c)) { if (buf[n] == '\n' && !donecr) { c[i++] = '\r'; donecr = 1; } else { c[i++] = buf[n++]; donecr = 0; --count; } } else { ret = hvsi_put_chars(hp, c, i); if (ret < 0) i = 0; i -= ret; } } } static struct tty_driver *hvsi_console_device(struct console *console, int *index) { *index = console->index; return hvsi_driver; } static int __init hvsi_console_setup(struct console *console, char *options) { struct hvsi_struct *hp; int ret; if (console->index < 0 || console->index >= hvsi_count) return -1; hp = &hvsi_ports[console->index]; /* give the FSP a chance to change the baud rate when we re-open */ hvsi_close_protocol(hp); ret = hvsi_handshake(hp); if (ret < 0) return ret; ret = hvsi_get_mctrl(hp); if (ret < 0) return ret; ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR); if (ret < 0) return ret; hp->flags |= HVSI_CONSOLE; return 0; } static struct console hvsi_con_driver = { .name = "hvsi", .write = hvsi_console_print, .device = hvsi_console_device, .setup = hvsi_console_setup, .flags = CON_PRINTBUFFER, .index = -1, }; static int __init hvsi_console_init(void) { struct device_node *vty; hvsi_wait = poll_for_state; /* no irqs yet; must poll */ /* search device tree for vty nodes */ for (vty = of_find_compatible_node(NULL, "serial", "hvterm-protocol"); vty != NULL; vty = of_find_compatible_node(vty, "serial", "hvterm-protocol")) { struct hvsi_struct *hp; const uint32_t *vtermno, *irq; vtermno = of_get_property(vty, "reg", NULL); irq = of_get_property(vty, "interrupts", NULL); if (!vtermno || !irq) continue; if (hvsi_count >= MAX_NR_HVSI_CONSOLES) { of_node_put(vty); break; } hp = &hvsi_ports[hvsi_count]; INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker); INIT_WORK(&hp->handshaker, hvsi_handshaker); init_waitqueue_head(&hp->emptyq); init_waitqueue_head(&hp->stateq); spin_lock_init(&hp->lock); hp->index = hvsi_count; hp->inbuf_end = hp->inbuf; hp->state = HVSI_CLOSED; hp->vtermno = *vtermno; hp->virq = irq_create_mapping(NULL, irq[0]); if (hp->virq == NO_IRQ) { printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n", __func__, irq[0]); continue; } hvsi_count++; } if (hvsi_count) register_console(&hvsi_con_driver); return 0; } console_initcall(hvsi_console_init);
gpl-2.0
tb-303/GFRG110
sound/pci/trident/trident.c
606
5885
/* * Driver for Trident 4DWave DX/NX & SiS SI7018 Audio PCI soundcard * * Driver was originated by Trident <audio@tridentmicro.com> * Fri Feb 19 15:55:28 MST 1999 * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/pci.h> #include <linux/time.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/trident.h> #include <sound/initval.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, <audio@tridentmicro.com>"); MODULE_DESCRIPTION("Trident 4D-WaveDX/NX & SiS SI7018"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Trident,4DWave DX}," "{Trident,4DWave NX}," "{SiS,SI7018 PCI Audio}," "{Best Union,Miss Melody 4DWave PCI}," "{HIS,4DWave PCI}," "{Warpspeed,ONSpeed 4DWave PCI}," "{Aztech Systems,PCI 64-Q3D}," "{Addonics,SV 750}," "{CHIC,True Sound 4Dwave}," "{Shark,Predator4D-PCI}," "{Jaton,SonicWave 4D}," "{Hoontech,SoundTrack Digital 4DWave NX}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static int pcm_channels[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 32}; static int wavetable_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 8192}; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Trident 4DWave PCI soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Trident 4DWave PCI soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Trident 4DWave PCI soundcard."); module_param_array(pcm_channels, int, NULL, 0444); MODULE_PARM_DESC(pcm_channels, "Number of hardware channels assigned for PCM."); module_param_array(wavetable_size, int, NULL, 0444); MODULE_PARM_DESC(wavetable_size, "Maximum memory size in kB for wavetable synth."); static struct pci_device_id snd_trident_ids[] = { {PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_DX), PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_TRIDENT_4DWAVE_NX), 0, 0, 0}, {PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7018), 0, 0, 0}, { 0, } }; MODULE_DEVICE_TABLE(pci, snd_trident_ids); static int __devinit snd_trident_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct snd_trident *trident; const char *str; int err, pcm_dev = 0; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; if ((err = snd_trident_create(card, pci, pcm_channels[dev], ((pci->vendor << 16) | pci->device) == TRIDENT_DEVICE_ID_SI7018 ? 1 : 2, wavetable_size[dev], &trident)) < 0) { snd_card_free(card); return err; } card->private_data = trident; switch (trident->device) { case TRIDENT_DEVICE_ID_DX: str = "TRID4DWAVEDX"; break; case TRIDENT_DEVICE_ID_NX: str = "TRID4DWAVENX"; break; case TRIDENT_DEVICE_ID_SI7018: str = "SI7018"; break; default: str = "Unknown"; } strcpy(card->driver, str); if (trident->device == TRIDENT_DEVICE_ID_SI7018) { strcpy(card->shortname, "SiS "); } else { strcpy(card->shortname, "Trident "); } strcat(card->shortname, card->driver); sprintf(card->longname, "%s PCI Audio at 0x%lx, irq %d", card->shortname, trident->port, trident->irq); if ((err = snd_trident_pcm(trident, pcm_dev++, NULL)) < 0) { snd_card_free(card); return err; } switch (trident->device) { case TRIDENT_DEVICE_ID_DX: case TRIDENT_DEVICE_ID_NX: if ((err = snd_trident_foldback_pcm(trident, pcm_dev++, NULL)) < 0) { snd_card_free(card); return err; } break; } if (trident->device == TRIDENT_DEVICE_ID_NX || trident->device == TRIDENT_DEVICE_ID_SI7018) { if ((err = snd_trident_spdif_pcm(trident, pcm_dev++, NULL)) < 0) { snd_card_free(card); return err; } } if (trident->device != TRIDENT_DEVICE_ID_SI7018 && (err = snd_mpu401_uart_new(card, 0, MPU401_HW_TRID4DWAVE, trident->midi_port, MPU401_INFO_INTEGRATED, trident->irq, 0, &trident->rmidi)) < 0) { snd_card_free(card); return err; } snd_trident_create_gameport(trident); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_trident_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = "Trident4DWaveAudio", .id_table = snd_trident_ids, .probe = snd_trident_probe, .remove = __devexit_p(snd_trident_remove), #ifdef CONFIG_PM .suspend = snd_trident_suspend, .resume = snd_trident_resume, #endif }; static int __init alsa_card_trident_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_trident_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_trident_init) module_exit(alsa_card_trident_exit)
gpl-2.0
MadRocker/jet-2.6.39.5
drivers/acpi/processor_thermal.c
1118
6326
/* * processor_thermal.c - Passive cooling submodule of the ACPI processor driver * * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> * - Added processor hotplug support * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/sysdev.h> #include <asm/uaccess.h> #include <acpi/acpi_bus.h> #include <acpi/processor.h> #include <acpi/acpi_drivers.h> #define PREFIX "ACPI: " #define ACPI_PROCESSOR_CLASS "processor" #define _COMPONENT ACPI_PROCESSOR_COMPONENT ACPI_MODULE_NAME("processor_thermal"); #ifdef CONFIG_CPU_FREQ /* If a passive cooling situation is detected, primarily CPUfreq is used, as it * offers (in most cases) voltage scaling in addition to frequency scaling, and * thus a cubic (instead of linear) reduction of energy. Also, we allow for * _any_ cpufreq driver and not only the acpi-cpufreq driver. */ #define CPUFREQ_THERMAL_MIN_STEP 0 #define CPUFREQ_THERMAL_MAX_STEP 3 static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg); static unsigned int acpi_thermal_cpufreq_is_init = 0; static int cpu_has_cpufreq(unsigned int cpu) { struct cpufreq_policy policy; if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu)) return 0; return 1; } static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb, unsigned long event, void *data) { struct cpufreq_policy *policy = data; unsigned long max_freq = 0; if (event != CPUFREQ_ADJUST) goto out; max_freq = ( policy->cpuinfo.max_freq * (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20) ) / 100; cpufreq_verify_within_limits(policy, 0, max_freq); out: return 0; } static struct notifier_block acpi_thermal_cpufreq_notifier_block = { .notifier_call = acpi_thermal_cpufreq_notifier, }; static int cpufreq_get_max_state(unsigned int cpu) { if (!cpu_has_cpufreq(cpu)) return 0; return CPUFREQ_THERMAL_MAX_STEP; } static int cpufreq_get_cur_state(unsigned int cpu) { if (!cpu_has_cpufreq(cpu)) return 0; return per_cpu(cpufreq_thermal_reduction_pctg, cpu); } static int cpufreq_set_cur_state(unsigned int cpu, int state) { if (!cpu_has_cpufreq(cpu)) return 0; per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state; cpufreq_update_policy(cpu); return 0; } void acpi_thermal_cpufreq_init(void) { int i; for (i = 0; i < nr_cpu_ids; i++) if (cpu_present(i)) per_cpu(cpufreq_thermal_reduction_pctg, i) = 0; i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER); if (!i) acpi_thermal_cpufreq_is_init = 1; } void acpi_thermal_cpufreq_exit(void) { if (acpi_thermal_cpufreq_is_init) cpufreq_unregister_notifier (&acpi_thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER); acpi_thermal_cpufreq_is_init = 0; } #else /* ! CONFIG_CPU_FREQ */ static int cpufreq_get_max_state(unsigned int cpu) { return 0; } static int cpufreq_get_cur_state(unsigned int cpu) { return 0; } static int cpufreq_set_cur_state(unsigned int cpu, int state) { return 0; } #endif int acpi_processor_get_limit_info(struct acpi_processor *pr) { if (!pr) return -EINVAL; if (pr->flags.throttling) pr->flags.limit = 1; return 0; } /* thermal coolign device callbacks */ static int acpi_processor_max_state(struct acpi_processor *pr) { int max_state = 0; /* * There exists four states according to * cpufreq_thermal_reduction_ptg. 0, 1, 2, 3 */ max_state += cpufreq_get_max_state(pr->id); if (pr->flags.throttling) max_state += (pr->throttling.state_count -1); return max_state; } static int processor_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { struct acpi_device *device = cdev->devdata; struct acpi_processor *pr = acpi_driver_data(device); if (!device || !pr) return -EINVAL; *state = acpi_processor_max_state(pr); return 0; } static int processor_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *cur_state) { struct acpi_device *device = cdev->devdata; struct acpi_processor *pr = acpi_driver_data(device); if (!device || !pr) return -EINVAL; *cur_state = cpufreq_get_cur_state(pr->id); if (pr->flags.throttling) *cur_state += pr->throttling.state; return 0; } static int processor_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) { struct acpi_device *device = cdev->devdata; struct acpi_processor *pr = acpi_driver_data(device); int result = 0; int max_pstate; if (!device || !pr) return -EINVAL; max_pstate = cpufreq_get_max_state(pr->id); if (state > acpi_processor_max_state(pr)) return -EINVAL; if (state <= max_pstate) { if (pr->flags.throttling && pr->throttling.state) result = acpi_processor_set_throttling(pr, 0, false); cpufreq_set_cur_state(pr->id, state); } else { cpufreq_set_cur_state(pr->id, max_pstate); result = acpi_processor_set_throttling(pr, state - max_pstate, false); } return result; } struct thermal_cooling_device_ops processor_cooling_ops = { .get_max_state = processor_get_max_state, .get_cur_state = processor_get_cur_state, .set_cur_state = processor_set_cur_state, };
gpl-2.0