repo_name
string
path
string
copies
string
size
string
content
string
license
string
hujiafu/lpc1788_uclinux
cortexm_uclinux-master/kernel/linux-2.6.33/arch/xtensa/lib/pci-auto.c
9563
9268
/* * arch/xtensa/lib/pci-auto.c * * PCI autoconfiguration library * * Copyright (C) 2001 - 2005 Tensilica Inc. * * Chris Zankel <zankel@tensilica.com, cez@zankel.net> * * Based on work from Matt Porter <mporter@mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/pci.h> #include <asm/pci-bridge.h> /* * * Setting up a PCI * * pci_ctrl->first_busno = <first bus number (0)> * pci_ctrl->last_busno = <last bus number (0xff)> * pci_ctrl->ops = <PCI config operations> * pci_ctrl->map_irq = <function to return the interrupt number for a device> * * pci_ctrl->io_space.start = <IO space start address (PCI view)> * pci_ctrl->io_space.end = <IO space end address (PCI view)> * pci_ctrl->io_space.base = <IO space offset: address 0 from CPU space> * pci_ctrl->mem_space.start = <MEM space start address (PCI view)> * pci_ctrl->mem_space.end = <MEM space end address (PCI view)> * pci_ctrl->mem_space.base = <MEM space offset: address 0 from CPU space> * * pcibios_init_resource(&pci_ctrl->io_resource, <IO space start>, * <IO space end>, IORESOURCE_IO, "PCI host bridge"); * pcibios_init_resource(&pci_ctrl->mem_resources[0], <MEM space start>, * <MEM space end>, IORESOURCE_MEM, "PCI host bridge"); * * pci_ctrl->last_busno = pciauto_bus_scan(pci_ctrl,pci_ctrl->first_busno); * * int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) * */ /* define DEBUG to print some debugging messages. */ #undef DEBUG #ifdef DEBUG # define DBG(x...) printk(x) #else # define DBG(x...) #endif static int pciauto_upper_iospc; static int pciauto_upper_memspc; static struct pci_dev pciauto_dev; static struct pci_bus pciauto_bus; /* * Helper functions */ /* Initialize the bars of a PCI device. */ static void __init pciauto_setup_bars(struct pci_dev *dev, int bar_limit) { int bar_size; int bar, bar_nr; int *upper_limit; int found_mem64 = 0; for (bar = PCI_BASE_ADDRESS_0, bar_nr = 0; bar <= bar_limit; bar+=4, bar_nr++) { /* Tickle the BAR and get the size */ pci_write_config_dword(dev, bar, 0xffffffff); pci_read_config_dword(dev, bar, &bar_size); /* If BAR is not implemented go to the next BAR */ if (!bar_size) continue; /* Check the BAR type and set our address mask */ if (bar_size & PCI_BASE_ADDRESS_SPACE_IO) { bar_size &= PCI_BASE_ADDRESS_IO_MASK; upper_limit = &pciauto_upper_iospc; DBG("PCI Autoconfig: BAR %d, I/O, ", bar_nr); } else { if ((bar_size & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) found_mem64 = 1; bar_size &= PCI_BASE_ADDRESS_MEM_MASK; upper_limit = &pciauto_upper_memspc; DBG("PCI Autoconfig: BAR %d, Mem, ", bar_nr); } /* Allocate a base address (bar_size is negative!) */ *upper_limit = (*upper_limit + bar_size) & bar_size; /* Write it out and update our limit */ pci_write_config_dword(dev, bar, *upper_limit); /* * If we are a 64-bit decoder then increment to the * upper 32 bits of the bar and force it to locate * in the lower 4GB of memory. */ if (found_mem64) pci_write_config_dword(dev, (bar+=4), 0x00000000); DBG("size=0x%x, address=0x%x\n", ~bar_size + 1, *upper_limit); } } /* Initialize the interrupt number. */ static void __init pciauto_setup_irq(struct pci_controller* pci_ctrl,struct pci_dev *dev,int devfn) { u8 pin; int irq = 0; pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); /* Fix illegal pin numbers. */ if (pin == 0 || pin > 4) pin = 1; if (pci_ctrl->map_irq) irq = pci_ctrl->map_irq(dev, PCI_SLOT(devfn), pin); if (irq == -1) irq = 0; DBG("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin); pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); } static void __init pciauto_prescan_setup_bridge(struct pci_dev *dev, int current_bus, int sub_bus, int *iosave, int *memsave) { /* Configure bus number registers */ pci_write_config_byte(dev, PCI_PRIMARY_BUS, current_bus); pci_write_config_byte(dev, PCI_SECONDARY_BUS, sub_bus + 1); pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, 0xff); /* Round memory allocator to 1MB boundary */ pciauto_upper_memspc &= ~(0x100000 - 1); *memsave = pciauto_upper_memspc; /* Round I/O allocator to 4KB boundary */ pciauto_upper_iospc &= ~(0x1000 - 1); *iosave = pciauto_upper_iospc; /* Set up memory and I/O filter limits, assume 32-bit I/O space */ pci_write_config_word(dev, PCI_MEMORY_LIMIT, ((pciauto_upper_memspc - 1) & 0xfff00000) >> 16); pci_write_config_byte(dev, PCI_IO_LIMIT, ((pciauto_upper_iospc - 1) & 0x0000f000) >> 8); pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16, ((pciauto_upper_iospc - 1) & 0xffff0000) >> 16); } static void __init pciauto_postscan_setup_bridge(struct pci_dev *dev, int current_bus, int sub_bus, int *iosave, int *memsave) { int cmdstat; /* Configure bus number registers */ pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, sub_bus); /* * Round memory allocator to 1MB boundary. * If no space used, allocate minimum. */ pciauto_upper_memspc &= ~(0x100000 - 1); if (*memsave == pciauto_upper_memspc) pciauto_upper_memspc -= 0x00100000; pci_write_config_word(dev, PCI_MEMORY_BASE, pciauto_upper_memspc >> 16); /* Allocate 1MB for pre-fretch */ pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, ((pciauto_upper_memspc - 1) & 0xfff00000) >> 16); pciauto_upper_memspc -= 0x100000; pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, pciauto_upper_memspc >> 16); /* Round I/O allocator to 4KB boundary */ pciauto_upper_iospc &= ~(0x1000 - 1); if (*iosave == pciauto_upper_iospc) pciauto_upper_iospc -= 0x1000; pci_write_config_byte(dev, PCI_IO_BASE, (pciauto_upper_iospc & 0x0000f000) >> 8); pci_write_config_word(dev, PCI_IO_BASE_UPPER16, pciauto_upper_iospc >> 16); /* Enable memory and I/O accesses, enable bus master */ pci_read_config_dword(dev, PCI_COMMAND, &cmdstat); pci_write_config_dword(dev, PCI_COMMAND, cmdstat | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); } /* * Scan the current PCI bus. */ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) { int sub_bus, pci_devfn, pci_class, cmdstat, found_multi=0; unsigned short vid; unsigned char header_type; struct pci_dev *dev = &pciauto_dev; pciauto_dev.bus = &pciauto_bus; pciauto_dev.sysdata = pci_ctrl; pciauto_bus.ops = pci_ctrl->ops; /* * Fetch our I/O and memory space upper boundaries used * to allocated base addresses on this pci_controller. */ if (current_bus == pci_ctrl->first_busno) { pciauto_upper_iospc = pci_ctrl->io_resource.end + 1; pciauto_upper_memspc = pci_ctrl->mem_resources[0].end + 1; } sub_bus = current_bus; for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) { /* Skip our host bridge */ if ((current_bus == pci_ctrl->first_busno) && (pci_devfn == 0)) continue; if (PCI_FUNC(pci_devfn) && !found_multi) continue; pciauto_bus.number = current_bus; pciauto_dev.devfn = pci_devfn; /* If config space read fails from this device, move on */ if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type)) continue; if (!PCI_FUNC(pci_devfn)) found_multi = header_type & 0x80; pci_read_config_word(dev, PCI_VENDOR_ID, &vid); if (vid == 0xffff || vid == 0x0000) { found_multi = 0; continue; } pci_read_config_dword(dev, PCI_CLASS_REVISION, &pci_class); if ((pci_class >> 16) == PCI_CLASS_BRIDGE_PCI) { int iosave, memsave; DBG("PCI Autoconfig: Found P2P bridge, device %d\n", PCI_SLOT(pci_devfn)); /* Allocate PCI I/O and/or memory space */ pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1); pciauto_prescan_setup_bridge(dev, current_bus, sub_bus, &iosave, &memsave); sub_bus = pciauto_bus_scan(pci_ctrl, sub_bus+1); pciauto_postscan_setup_bridge(dev, current_bus, sub_bus, &iosave, &memsave); pciauto_bus.number = current_bus; continue; } #if 0 /* Skip legacy mode IDE controller */ if ((pci_class >> 16) == PCI_CLASS_STORAGE_IDE) { unsigned char prg_iface; pci_read_config_byte(dev, PCI_CLASS_PROG, &prg_iface); if (!(prg_iface & PCIAUTO_IDE_MODE_MASK)) { DBG("PCI Autoconfig: Skipping legacy mode " "IDE controller\n"); continue; } } #endif /* * Found a peripheral, enable some standard * settings */ pci_read_config_dword(dev, PCI_COMMAND, &cmdstat); pci_write_config_dword(dev, PCI_COMMAND, cmdstat | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80); /* Allocate PCI I/O and/or memory space */ DBG("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n", current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn) ); pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5); pciauto_setup_irq(pci_ctrl, dev, pci_devfn); } return sub_bus; }
gpl-2.0
anbulang/sctp-cmt
arch/arm/mach-s3c24xx/setup-sdhci-gpio.c
10587
1043
/* linux/arch/arm/plat-s3c2416/setup-sdhci-gpio.c * * Copyright 2010 Promwad Innovation Company * Yauhen Kharuzhy <yauhen.kharuzhy@promwad.com> * * S3C2416 - Helper functions for setting up SDHCI device(s) GPIO (HSMMC) * * Based on mach-s3c64xx/setup-sdhci-gpio.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/gpio.h> #include <mach/regs-gpio.h> #include <plat/gpio-cfg.h> void s3c2416_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width) { s3c_gpio_cfgrange_nopull(S3C2410_GPE(5), 2 + width, S3C_GPIO_SFN(2)); } void s3c2416_setup_sdhci1_cfg_gpio(struct platform_device *dev, int width) { s3c_gpio_cfgrange_nopull(S3C2410_GPL(0), width, S3C_GPIO_SFN(2)); s3c_gpio_cfgrange_nopull(S3C2410_GPL(8), 2, S3C_GPIO_SFN(2)); }
gpl-2.0
drewx2/caf-kernel
net/ceph/ceph_fs.c
12379
1726
/* * Some non-inline ceph helpers */ #include <linux/module.h> #include <linux/ceph/types.h> /* * return true if @layout appears to be valid */ int ceph_file_layout_is_valid(const struct ceph_file_layout *layout) { __u32 su = le32_to_cpu(layout->fl_stripe_unit); __u32 sc = le32_to_cpu(layout->fl_stripe_count); __u32 os = le32_to_cpu(layout->fl_object_size); /* stripe unit, object size must be non-zero, 64k increment */ if (!su || (su & (CEPH_MIN_STRIPE_UNIT-1))) return 0; if (!os || (os & (CEPH_MIN_STRIPE_UNIT-1))) return 0; /* object size must be a multiple of stripe unit */ if (os < su || os % su) return 0; /* stripe count must be non-zero */ if (!sc) return 0; return 1; } int ceph_flags_to_mode(int flags) { int mode; #ifdef O_DIRECTORY /* fixme */ if ((flags & O_DIRECTORY) == O_DIRECTORY) return CEPH_FILE_MODE_PIN; #endif switch (flags & O_ACCMODE) { case O_WRONLY: mode = CEPH_FILE_MODE_WR; break; case O_RDONLY: mode = CEPH_FILE_MODE_RD; break; case O_RDWR: case O_ACCMODE: /* this is what the VFS does */ mode = CEPH_FILE_MODE_RDWR; break; } #ifdef O_LAZY if (flags & O_LAZY) mode |= CEPH_FILE_MODE_LAZY; #endif return mode; } EXPORT_SYMBOL(ceph_flags_to_mode); int ceph_caps_for_mode(int mode) { int caps = CEPH_CAP_PIN; if (mode & CEPH_FILE_MODE_RD) caps |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE; if (mode & CEPH_FILE_MODE_WR) caps |= CEPH_CAP_FILE_EXCL | CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | CEPH_CAP_AUTH_SHARED | CEPH_CAP_AUTH_EXCL | CEPH_CAP_XATTR_SHARED | CEPH_CAP_XATTR_EXCL; if (mode & CEPH_FILE_MODE_LAZY) caps |= CEPH_CAP_FILE_LAZYIO; return caps; } EXPORT_SYMBOL(ceph_caps_for_mode);
gpl-2.0
varund7726/OwnKernel-bacon
kernel/backtracetest.c
12891
2135
/* * Simple stack backtrace regression test module * * (C) Copyright 2008 Intel Corporation * Author: Arjan van de Ven <arjan@linux.intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ #include <linux/completion.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/stacktrace.h> static void backtrace_test_normal(void) { printk("Testing a backtrace from process context.\n"); printk("The following trace is a kernel self test and not a bug!\n"); dump_stack(); } static DECLARE_COMPLETION(backtrace_work); static void backtrace_test_irq_callback(unsigned long data) { dump_stack(); complete(&backtrace_work); } static DECLARE_TASKLET(backtrace_tasklet, &backtrace_test_irq_callback, 0); static void backtrace_test_irq(void) { printk("Testing a backtrace from irq context.\n"); printk("The following trace is a kernel self test and not a bug!\n"); init_completion(&backtrace_work); tasklet_schedule(&backtrace_tasklet); wait_for_completion(&backtrace_work); } #ifdef CONFIG_STACKTRACE static void backtrace_test_saved(void) { struct stack_trace trace; unsigned long entries[8]; printk("Testing a saved backtrace.\n"); printk("The following trace is a kernel self test and not a bug!\n"); trace.nr_entries = 0; trace.max_entries = ARRAY_SIZE(entries); trace.entries = entries; trace.skip = 0; save_stack_trace(&trace); print_stack_trace(&trace, 0); } #else static void backtrace_test_saved(void) { printk("Saved backtrace test skipped.\n"); } #endif static int backtrace_regression_test(void) { printk("====[ backtrace testing ]===========\n"); backtrace_test_normal(); backtrace_test_irq(); backtrace_test_saved(); printk("====[ end of backtrace testing ]====\n"); return 0; } static void exitf(void) { } module_init(backtrace_regression_test); module_exit(exitf); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
gpl-2.0
broodplank/samsung-kernel-jfltexx
drivers/ide/ide-iops.c
13659
13870
/* * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2003 Red Hat * */ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/genhd.h> #include <linux/blkpg.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/ide.h> #include <linux/bitops.h> #include <linux/nmi.h> #include <asm/byteorder.h> #include <asm/irq.h> #include <asm/uaccess.h> #include <asm/io.h> void SELECT_MASK(ide_drive_t *drive, int mask) { const struct ide_port_ops *port_ops = drive->hwif->port_ops; if (port_ops && port_ops->maskproc) port_ops->maskproc(drive, mask); } u8 ide_read_error(ide_drive_t *drive) { struct ide_taskfile tf; drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_ERROR); return tf.error; } EXPORT_SYMBOL_GPL(ide_read_error); void ide_fix_driveid(u16 *id) { #ifndef __LITTLE_ENDIAN # ifdef __BIG_ENDIAN int i; for (i = 0; i < 256; i++) id[i] = __le16_to_cpu(id[i]); # else # error "Please fix <asm/byteorder.h>" # endif #endif } /* * ide_fixstring() cleans up and (optionally) byte-swaps a text string, * removing leading/trailing blanks and compressing internal blanks. * It is primarily used to tidy up the model name/number fields as * returned by the ATA_CMD_ID_ATA[PI] commands. */ void ide_fixstring(u8 *s, const int bytecount, const int byteswap) { u8 *p, *end = &s[bytecount & ~1]; /* bytecount must be even */ if (byteswap) { /* convert from big-endian to host byte order */ for (p = s ; p != end ; p += 2) be16_to_cpus((u16 *) p); } /* strip leading blanks */ p = s; while (s != end && *s == ' ') ++s; /* compress internal blanks and strip trailing blanks */ while (s != end && *s) { if (*s++ != ' ' || (s != end && *s && *s != ' ')) *p++ = *(s-1); } /* wipe out trailing garbage */ while (p != end) *p++ = '\0'; } EXPORT_SYMBOL(ide_fixstring); /* * This routine busy-waits for the drive status to be not "busy". * It then checks the status for all of the "good" bits and none * of the "bad" bits, and if all is okay it returns 0. All other * cases return error -- caller may then invoke ide_error(). * * This routine should get fixed to not hog the cpu during extra long waits.. * That could be done by busy-waiting for the first jiffy or two, and then * setting a timer to wake up at half second intervals thereafter, * until timeout is achieved, before timing out. */ int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat) { ide_hwif_t *hwif = drive->hwif; const struct ide_tp_ops *tp_ops = hwif->tp_ops; unsigned long flags; int i; u8 stat; udelay(1); /* spec allows drive 400ns to assert "BUSY" */ stat = tp_ops->read_status(hwif); if (stat & ATA_BUSY) { local_save_flags(flags); local_irq_enable_in_hardirq(); timeout += jiffies; while ((stat = tp_ops->read_status(hwif)) & ATA_BUSY) { if (time_after(jiffies, timeout)) { /* * One last read after the timeout in case * heavy interrupt load made us not make any * progress during the timeout.. */ stat = tp_ops->read_status(hwif); if ((stat & ATA_BUSY) == 0) break; local_irq_restore(flags); *rstat = stat; return -EBUSY; } } local_irq_restore(flags); } /* * Allow status to settle, then read it again. * A few rare drives vastly violate the 400ns spec here, * so we'll wait up to 10usec for a "good" status * rather than expensively fail things immediately. * This fix courtesy of Matthew Faupel & Niccolo Rigacci. */ for (i = 0; i < 10; i++) { udelay(1); stat = tp_ops->read_status(hwif); if (OK_STAT(stat, good, bad)) { *rstat = stat; return 0; } } *rstat = stat; return -EFAULT; } /* * In case of error returns error value after doing "*startstop = ide_error()". * The caller should return the updated value of "startstop" in this case, * "startstop" is unchanged when the function returns 0. */ int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout) { int err; u8 stat; /* bail early if we've exceeded max_failures */ if (drive->max_failures && (drive->failures > drive->max_failures)) { *startstop = ide_stopped; return 1; } err = __ide_wait_stat(drive, good, bad, timeout, &stat); if (err) { char *s = (err == -EBUSY) ? "status timeout" : "status error"; *startstop = ide_error(drive, s, stat); } return err; } EXPORT_SYMBOL(ide_wait_stat); /** * ide_in_drive_list - look for drive in black/white list * @id: drive identifier * @table: list to inspect * * Look for a drive in the blacklist and the whitelist tables * Returns 1 if the drive is found in the table. */ int ide_in_drive_list(u16 *id, const struct drive_list_entry *table) { for ( ; table->id_model; table++) if ((!strcmp(table->id_model, (char *)&id[ATA_ID_PROD])) && (!table->id_firmware || strstr((char *)&id[ATA_ID_FW_REV], table->id_firmware))) return 1; return 0; } EXPORT_SYMBOL_GPL(ide_in_drive_list); /* * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid. * Some optical devices with the buggy firmwares have the same problem. */ static const struct drive_list_entry ivb_list[] = { { "QUANTUM FIREBALLlct10 05" , "A03.0900" }, { "QUANTUM FIREBALLlct20 30" , "APL.0900" }, { "TSSTcorp CDDVDW SH-S202J" , "SB00" }, { "TSSTcorp CDDVDW SH-S202J" , "SB01" }, { "TSSTcorp CDDVDW SH-S202N" , "SB00" }, { "TSSTcorp CDDVDW SH-S202N" , "SB01" }, { "TSSTcorp CDDVDW SH-S202H" , "SB00" }, { "TSSTcorp CDDVDW SH-S202H" , "SB01" }, { "SAMSUNG SP0822N" , "WA100-10" }, { NULL , NULL } }; /* * All hosts that use the 80c ribbon must use! * The name is derived from upper byte of word 93 and the 80c ribbon. */ u8 eighty_ninty_three(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u16 *id = drive->id; int ivb = ide_in_drive_list(id, ivb_list); if (hwif->cbl == ATA_CBL_SATA || hwif->cbl == ATA_CBL_PATA40_SHORT) return 1; if (ivb) printk(KERN_DEBUG "%s: skipping word 93 validity check\n", drive->name); if (ata_id_is_sata(id) && !ivb) return 1; if (hwif->cbl != ATA_CBL_PATA80 && !ivb) goto no_80w; /* * FIXME: * - change master/slave IDENTIFY order * - force bit13 (80c cable present) check also for !ivb devices * (unless the slave device is pre-ATA3) */ if (id[ATA_ID_HW_CONFIG] & 0x4000) return 1; if (ivb) { const char *model = (char *)&id[ATA_ID_PROD]; if (strstr(model, "TSSTcorp CDDVDW SH-S202")) { /* * These ATAPI devices always report 80c cable * so we have to depend on the host in this case. */ if (hwif->cbl == ATA_CBL_PATA80) return 1; } else { /* Depend on the device side cable detection. */ if (id[ATA_ID_HW_CONFIG] & 0x2000) return 1; } } no_80w: if (drive->dev_flags & IDE_DFLAG_UDMA33_WARNED) return 0; printk(KERN_WARNING "%s: %s side 80-wire cable detection failed, " "limiting max speed to UDMA33\n", drive->name, hwif->cbl == ATA_CBL_PATA80 ? "drive" : "host"); drive->dev_flags |= IDE_DFLAG_UDMA33_WARNED; return 0; } static const char *nien_quirk_list[] = { "QUANTUM FIREBALLlct08 08", "QUANTUM FIREBALLP KA6.4", "QUANTUM FIREBALLP KA9.1", "QUANTUM FIREBALLP KX13.6", "QUANTUM FIREBALLP KX20.5", "QUANTUM FIREBALLP KX27.3", "QUANTUM FIREBALLP LM20.4", "QUANTUM FIREBALLP LM20.5", "FUJITSU MHZ2160BH G2", NULL }; void ide_check_nien_quirk_list(ide_drive_t *drive) { const char **list, *m = (char *)&drive->id[ATA_ID_PROD]; for (list = nien_quirk_list; *list != NULL; list++) if (strstr(m, *list) != NULL) { drive->dev_flags |= IDE_DFLAG_NIEN_QUIRK; return; } } int ide_driveid_update(ide_drive_t *drive) { u16 *id; int rc; id = kmalloc(SECTOR_SIZE, GFP_ATOMIC); if (id == NULL) return 0; SELECT_MASK(drive, 1); rc = ide_dev_read_id(drive, ATA_CMD_ID_ATA, id, 1); SELECT_MASK(drive, 0); if (rc) goto out_err; drive->id[ATA_ID_UDMA_MODES] = id[ATA_ID_UDMA_MODES]; drive->id[ATA_ID_MWDMA_MODES] = id[ATA_ID_MWDMA_MODES]; drive->id[ATA_ID_SWDMA_MODES] = id[ATA_ID_SWDMA_MODES]; drive->id[ATA_ID_CFA_MODES] = id[ATA_ID_CFA_MODES]; /* anything more ? */ kfree(id); return 1; out_err: if (rc == 2) printk(KERN_ERR "%s: %s: bad status\n", drive->name, __func__); kfree(id); return 0; } int ide_config_drive_speed(ide_drive_t *drive, u8 speed) { ide_hwif_t *hwif = drive->hwif; const struct ide_tp_ops *tp_ops = hwif->tp_ops; struct ide_taskfile tf; u16 *id = drive->id, i; int error = 0; u8 stat; #ifdef CONFIG_BLK_DEV_IDEDMA if (hwif->dma_ops) /* check if host supports DMA */ hwif->dma_ops->dma_host_set(drive, 0); #endif /* Skip setting PIO flow-control modes on pre-EIDE drives */ if ((speed & 0xf8) == XFER_PIO_0 && ata_id_has_iordy(drive->id) == 0) goto skip; /* * Don't use ide_wait_cmd here - it will * attempt to set_geometry and recalibrate, * but for some reason these don't work at * this point (lost interrupt). */ udelay(1); tp_ops->dev_select(drive); SELECT_MASK(drive, 1); udelay(1); tp_ops->write_devctl(hwif, ATA_NIEN | ATA_DEVCTL_OBS); memset(&tf, 0, sizeof(tf)); tf.feature = SETFEATURES_XFER; tf.nsect = speed; tp_ops->tf_load(drive, &tf, IDE_VALID_FEATURE | IDE_VALID_NSECT); tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES); if (drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); error = __ide_wait_stat(drive, drive->ready_stat, ATA_BUSY | ATA_DRQ | ATA_ERR, WAIT_CMD, &stat); SELECT_MASK(drive, 0); if (error) { (void) ide_dump_status(drive, "set_drive_speed_status", stat); return error; } if (speed >= XFER_SW_DMA_0) { id[ATA_ID_UDMA_MODES] &= ~0xFF00; id[ATA_ID_MWDMA_MODES] &= ~0x0700; id[ATA_ID_SWDMA_MODES] &= ~0x0700; if (ata_id_is_cfa(id)) id[ATA_ID_CFA_MODES] &= ~0x0E00; } else if (ata_id_is_cfa(id)) id[ATA_ID_CFA_MODES] &= ~0x01C0; skip: #ifdef CONFIG_BLK_DEV_IDEDMA if (speed >= XFER_SW_DMA_0 && (drive->dev_flags & IDE_DFLAG_USING_DMA)) hwif->dma_ops->dma_host_set(drive, 1); else if (hwif->dma_ops) /* check if host supports DMA */ ide_dma_off_quietly(drive); #endif if (speed >= XFER_UDMA_0) { i = 1 << (speed - XFER_UDMA_0); id[ATA_ID_UDMA_MODES] |= (i << 8 | i); } else if (ata_id_is_cfa(id) && speed >= XFER_MW_DMA_3) { i = speed - XFER_MW_DMA_2; id[ATA_ID_CFA_MODES] |= i << 9; } else if (speed >= XFER_MW_DMA_0) { i = 1 << (speed - XFER_MW_DMA_0); id[ATA_ID_MWDMA_MODES] |= (i << 8 | i); } else if (speed >= XFER_SW_DMA_0) { i = 1 << (speed - XFER_SW_DMA_0); id[ATA_ID_SWDMA_MODES] |= (i << 8 | i); } else if (ata_id_is_cfa(id) && speed >= XFER_PIO_5) { i = speed - XFER_PIO_4; id[ATA_ID_CFA_MODES] |= i << 6; } if (!drive->init_speed) drive->init_speed = speed; drive->current_speed = speed; return error; } /* * This should get invoked any time we exit the driver to * wait for an interrupt response from a drive. handler() points * at the appropriate code to handle the next interrupt, and a * timer is started to prevent us from waiting forever in case * something goes wrong (see the ide_timer_expiry() handler later on). * * See also ide_execute_command */ void __ide_set_handler(ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout) { ide_hwif_t *hwif = drive->hwif; BUG_ON(hwif->handler); hwif->handler = handler; hwif->timer.expires = jiffies + timeout; hwif->req_gen_timer = hwif->req_gen; add_timer(&hwif->timer); } void ide_set_handler(ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout) { ide_hwif_t *hwif = drive->hwif; unsigned long flags; spin_lock_irqsave(&hwif->lock, flags); __ide_set_handler(drive, handler, timeout); spin_unlock_irqrestore(&hwif->lock, flags); } EXPORT_SYMBOL(ide_set_handler); /** * ide_execute_command - execute an IDE command * @drive: IDE drive to issue the command against * @cmd: command * @handler: handler for next phase * @timeout: timeout for command * * Helper function to issue an IDE command. This handles the * atomicity requirements, command timing and ensures that the * handler and IRQ setup do not race. All IDE command kick off * should go via this function or do equivalent locking. */ void ide_execute_command(ide_drive_t *drive, struct ide_cmd *cmd, ide_handler_t *handler, unsigned timeout) { ide_hwif_t *hwif = drive->hwif; unsigned long flags; spin_lock_irqsave(&hwif->lock, flags); if ((cmd->protocol != ATAPI_PROT_DMA && cmd->protocol != ATAPI_PROT_PIO) || (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT)) __ide_set_handler(drive, handler, timeout); hwif->tp_ops->exec_command(hwif, cmd->tf.command); /* * Drive takes 400nS to respond, we must avoid the IRQ being * serviced before that. * * FIXME: we could skip this delay with care on non shared devices */ ndelay(400); spin_unlock_irqrestore(&hwif->lock, flags); } /* * ide_wait_not_busy() waits for the currently selected device on the hwif * to report a non-busy status, see comments in ide_probe_port(). */ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout) { u8 stat = 0; while (timeout--) { /* * Turn this into a schedule() sleep once I'm sure * about locking issues (2.5 work ?). */ mdelay(1); stat = hwif->tp_ops->read_status(hwif); if ((stat & ATA_BUSY) == 0) return 0; /* * Assume a value of 0xff means nothing is connected to * the interface and it doesn't implement the pull-down * resistor on D7. */ if (stat == 0xff) return -ENODEV; touch_softlockup_watchdog(); touch_nmi_watchdog(); } return -EBUSY; }
gpl-2.0
ricardon/intel-audio
drivers/media/usb/au0828/au0828-cards.c
92
10503
/* * Driver for the Auvitek USB bridge * * Copyright (c) 2008 Steven Toth <stoth@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "au0828.h" #include "au0828-cards.h" #include "au8522.h" #include "media/tuner.h" #include "media/v4l2-common.h" static void hvr950q_cs5340_audio(void *priv, int enable) { /* Because the HVR-950q shares an i2s bus between the cs5340 and the au8522, we need to hold cs5340 in reset when using the au8522 */ struct au0828_dev *dev = priv; if (enable == 1) au0828_set(dev, REG_000, 0x10); else au0828_clear(dev, REG_000, 0x10); } struct au0828_board au0828_boards[] = { [AU0828_BOARD_UNKNOWN] = { .name = "Unknown board", .tuner_type = UNSET, .tuner_addr = ADDR_UNSET, }, [AU0828_BOARD_HAUPPAUGE_HVR850] = { .name = "Hauppauge HVR850", .tuner_type = TUNER_XC5000, .tuner_addr = 0x61, .has_ir_i2c = 1, .has_analog = 1, .i2c_clk_divider = AU0828_I2C_CLK_250KHZ, .input = { { .type = AU0828_VMUX_TELEVISION, .vmux = AU8522_COMPOSITE_CH4_SIF, .amux = AU8522_AUDIO_SIF, }, { .type = AU0828_VMUX_COMPOSITE, .vmux = AU8522_COMPOSITE_CH1, .amux = AU8522_AUDIO_NONE, .audio_setup = hvr950q_cs5340_audio, }, { .type = AU0828_VMUX_SVIDEO, .vmux = AU8522_SVIDEO_CH13, .amux = AU8522_AUDIO_NONE, .audio_setup = hvr950q_cs5340_audio, }, }, }, [AU0828_BOARD_HAUPPAUGE_HVR950Q] = { .name = "Hauppauge HVR950Q", .tuner_type = TUNER_XC5000, .tuner_addr = 0x61, .has_ir_i2c = 1, .has_analog = 1, .i2c_clk_divider = AU0828_I2C_CLK_250KHZ, .input = { { .type = AU0828_VMUX_TELEVISION, .vmux = AU8522_COMPOSITE_CH4_SIF, .amux = AU8522_AUDIO_SIF, }, { .type = AU0828_VMUX_COMPOSITE, .vmux = AU8522_COMPOSITE_CH1, .amux = AU8522_AUDIO_NONE, .audio_setup = hvr950q_cs5340_audio, }, { .type = AU0828_VMUX_SVIDEO, .vmux = AU8522_SVIDEO_CH13, .amux = AU8522_AUDIO_NONE, .audio_setup = hvr950q_cs5340_audio, }, }, }, [AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL] = { .name = "Hauppauge HVR950Q rev xxF8", .tuner_type = TUNER_XC5000, .tuner_addr = 0x61, .i2c_clk_divider = AU0828_I2C_CLK_250KHZ, }, [AU0828_BOARD_DVICO_FUSIONHDTV7] = { .name = "DViCO FusionHDTV USB", .tuner_type = TUNER_XC5000, .tuner_addr = 0x61, .i2c_clk_divider = AU0828_I2C_CLK_250KHZ, }, [AU0828_BOARD_HAUPPAUGE_WOODBURY] = { .name = "Hauppauge Woodbury", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .i2c_clk_divider = AU0828_I2C_CLK_250KHZ, }, }; /* Tuner callback function for au0828 boards. Currently only needed * for HVR1500Q, which has an xc5000 tuner. */ int au0828_tuner_callback(void *priv, int component, int command, int arg) { struct au0828_dev *dev = priv; dprintk(1, "%s()\n", __func__); switch (dev->boardnr) { case AU0828_BOARD_HAUPPAUGE_HVR850: case AU0828_BOARD_HAUPPAUGE_HVR950Q: case AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL: case AU0828_BOARD_DVICO_FUSIONHDTV7: if (command == 0) { /* Tuner Reset Command from xc5000 */ /* Drive the tuner into reset and out */ au0828_clear(dev, REG_001, 2); mdelay(10); au0828_set(dev, REG_001, 2); mdelay(10); return 0; } else { pr_err("%s(): Unknown command.\n", __func__); return -EINVAL; } break; } return 0; /* Should never be here */ } static void hauppauge_eeprom(struct au0828_dev *dev, u8 *eeprom_data) { struct tveeprom tv; tveeprom_hauppauge_analog(&dev->i2c_client, &tv, eeprom_data); dev->board.tuner_type = tv.tuner_type; /* Make sure we support the board model */ switch (tv.model) { case 72000: /* WinTV-HVR950q (Retail, IR, ATSC/QAM */ case 72001: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */ case 72101: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */ case 72201: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */ case 72211: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */ case 72221: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */ case 72231: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */ case 72241: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */ case 72251: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */ case 72261: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */ case 72271: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */ case 72281: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */ case 72301: /* WinTV-HVR850 (Retail, IR, ATSC and analog video */ case 72500: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM */ break; default: pr_warn("%s: warning: unknown hauppauge model #%d\n", __func__, tv.model); break; } pr_info("%s: hauppauge eeprom: model=%d\n", __func__, tv.model); } void au0828_card_analog_fe_setup(struct au0828_dev *dev); void au0828_card_setup(struct au0828_dev *dev) { static u8 eeprom[256]; dprintk(1, "%s()\n", __func__); dev->board = au0828_boards[dev->boardnr]; if (dev->i2c_rc == 0) { dev->i2c_client.addr = 0xa0 >> 1; tveeprom_read(&dev->i2c_client, eeprom, sizeof(eeprom)); } switch (dev->boardnr) { case AU0828_BOARD_HAUPPAUGE_HVR850: case AU0828_BOARD_HAUPPAUGE_HVR950Q: case AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL: case AU0828_BOARD_HAUPPAUGE_WOODBURY: if (dev->i2c_rc == 0) hauppauge_eeprom(dev, eeprom+0xa0); break; } au0828_card_analog_fe_setup(dev); } void au0828_card_analog_fe_setup(struct au0828_dev *dev) { #ifdef CONFIG_VIDEO_AU0828_V4L2 struct tuner_setup tun_setup; struct v4l2_subdev *sd; unsigned int mode_mask = T_ANALOG_TV; if (AUVI_INPUT(0).type != AU0828_VMUX_UNDEFINED) { /* Load the analog demodulator driver (note this would need to be abstracted out if we ever need to support a different demod) */ sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "au8522", 0x8e >> 1, NULL); if (sd == NULL) pr_err("analog subdev registration failed\n"); } /* Setup tuners */ if (dev->board.tuner_type != TUNER_ABSENT && dev->board.has_analog) { /* Load the tuner module, which does the attach */ sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tuner", dev->board.tuner_addr, NULL); if (sd == NULL) pr_err("tuner subdev registration fail\n"); tun_setup.mode_mask = mode_mask; tun_setup.type = dev->board.tuner_type; tun_setup.addr = dev->board.tuner_addr; tun_setup.tuner_callback = au0828_tuner_callback; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup); } #endif } /* * The bridge has between 8 and 12 gpios. * Regs 1 and 0 deal with output enables. * Regs 3 and 2 deal with direction. */ void au0828_gpio_setup(struct au0828_dev *dev) { dprintk(1, "%s()\n", __func__); switch (dev->boardnr) { case AU0828_BOARD_HAUPPAUGE_HVR850: case AU0828_BOARD_HAUPPAUGE_HVR950Q: case AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL: case AU0828_BOARD_HAUPPAUGE_WOODBURY: /* GPIO's * 4 - CS5340 * 5 - AU8522 Demodulator * 6 - eeprom W/P * 7 - power supply * 9 - XC5000 Tuner */ /* Set relevant GPIOs as outputs (leave the EEPROM W/P as an input since we will never touch it and it has a pullup) */ au0828_write(dev, REG_003, 0x02); au0828_write(dev, REG_002, 0x80 | 0x20 | 0x10); /* Into reset */ au0828_write(dev, REG_001, 0x0); au0828_write(dev, REG_000, 0x0); msleep(50); /* Bring power supply out of reset */ au0828_write(dev, REG_000, 0x80); msleep(50); /* Bring xc5000 and au8522 out of reset (leave the cs5340 in reset until needed) */ au0828_write(dev, REG_001, 0x02); /* xc5000 */ au0828_write(dev, REG_000, 0x80 | 0x20); /* PS + au8522 */ msleep(250); break; case AU0828_BOARD_DVICO_FUSIONHDTV7: /* GPIO's * 6 - ? * 8 - AU8522 Demodulator * 9 - XC5000 Tuner */ /* Into reset */ au0828_write(dev, REG_003, 0x02); au0828_write(dev, REG_002, 0xa0); au0828_write(dev, REG_001, 0x0); au0828_write(dev, REG_000, 0x0); msleep(100); /* Out of reset */ au0828_write(dev, REG_003, 0x02); au0828_write(dev, REG_002, 0xa0); au0828_write(dev, REG_001, 0x02); au0828_write(dev, REG_000, 0xa0); msleep(250); break; } } /* table of devices that work with this driver */ struct usb_device_id au0828_usb_id_table[] = { { USB_DEVICE(0x2040, 0x7200), .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q }, { USB_DEVICE(0x2040, 0x7240), .driver_info = AU0828_BOARD_HAUPPAUGE_HVR850 }, { USB_DEVICE(0x0fe9, 0xd620), .driver_info = AU0828_BOARD_DVICO_FUSIONHDTV7 }, { USB_DEVICE(0x2040, 0x7210), .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q }, { USB_DEVICE(0x2040, 0x7217), .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q }, { USB_DEVICE(0x2040, 0x721b), .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q }, { USB_DEVICE(0x2040, 0x721e), .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q }, { USB_DEVICE(0x2040, 0x721f), .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q }, { USB_DEVICE(0x2040, 0x7280), .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q }, { USB_DEVICE(0x0fd9, 0x0008), .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q }, { USB_DEVICE(0x2040, 0x7201), .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL }, { USB_DEVICE(0x2040, 0x7211), .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL }, { USB_DEVICE(0x2040, 0x7281), .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL }, { USB_DEVICE(0x05e1, 0x0480), .driver_info = AU0828_BOARD_HAUPPAUGE_WOODBURY }, { USB_DEVICE(0x2040, 0x8200), .driver_info = AU0828_BOARD_HAUPPAUGE_WOODBURY }, { USB_DEVICE(0x2040, 0x7260), .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q }, { USB_DEVICE(0x2040, 0x7213), .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q }, { USB_DEVICE(0x2040, 0x7270), .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q }, { }, }; MODULE_DEVICE_TABLE(usb, au0828_usb_id_table);
gpl-2.0
sztupy/universal_lagfix_kernel
drivers/usb/atm/ueagle-atm.c
348
68679
/*- * Copyright (c) 2003, 2004 * Damien Bergamini <damien.bergamini@free.fr>. All rights reserved. * * Copyright (c) 2005-2007 Matthieu Castet <castet.matthieu@free.fr> * Copyright (c) 2005-2007 Stanislaw Gruszka <stf_xl@wp.pl> * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * BSD license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * GPL license : * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * * HISTORY : some part of the code was base on ueagle 1.3 BSD driver, * Damien Bergamini agree to put his code under a DUAL GPL/BSD license. * * The rest of the code was was rewritten from scratch. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/crc32.h> #include <linux/usb.h> #include <linux/firmware.h> #include <linux/ctype.h> #include <linux/sched.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/freezer.h> #include <asm/unaligned.h> #include "usbatm.h" #define EAGLEUSBVERSION "ueagle 1.4" /* * Debug macros */ #define uea_dbg(usb_dev, format, args...) \ do { \ if (debug >= 1) \ dev_dbg(&(usb_dev)->dev, \ "[ueagle-atm dbg] %s: " format, \ __func__, ##args); \ } while (0) #define uea_vdbg(usb_dev, format, args...) \ do { \ if (debug >= 2) \ dev_dbg(&(usb_dev)->dev, \ "[ueagle-atm vdbg] " format, ##args); \ } while (0) #define uea_enters(usb_dev) \ uea_vdbg(usb_dev, "entering %s\n", __func__) #define uea_leaves(usb_dev) \ uea_vdbg(usb_dev, "leaving %s\n", __func__) #define uea_err(usb_dev, format,args...) \ dev_err(&(usb_dev)->dev ,"[UEAGLE-ATM] " format , ##args) #define uea_warn(usb_dev, format,args...) \ dev_warn(&(usb_dev)->dev ,"[Ueagle-atm] " format, ##args) #define uea_info(usb_dev, format,args...) \ dev_info(&(usb_dev)->dev ,"[ueagle-atm] " format, ##args) struct intr_pkt; /* cmv's from firmware */ struct uea_cmvs_v1 { u32 address; u16 offset; u32 data; } __attribute__ ((packed)); struct uea_cmvs_v2 { u32 group; u32 address; u32 offset; u32 data; } __attribute__ ((packed)); /* information about currently processed cmv */ struct cmv_dsc_e1 { u8 function; u16 idx; u32 address; u16 offset; }; struct cmv_dsc_e4 { u16 function; u16 offset; u16 address; u16 group; }; union cmv_dsc { struct cmv_dsc_e1 e1; struct cmv_dsc_e4 e4; }; struct uea_softc { struct usb_device *usb_dev; struct usbatm_data *usbatm; int modem_index; unsigned int driver_info; int annex; #define ANNEXA 0 #define ANNEXB 1 int booting; int reset; wait_queue_head_t sync_q; struct task_struct *kthread; u32 data; u32 data1; int cmv_ack; union cmv_dsc cmv_dsc; struct work_struct task; struct workqueue_struct *work_q; u16 pageno; u16 ovl; const struct firmware *dsp_firm; struct urb *urb_int; void (*dispatch_cmv) (struct uea_softc *, struct intr_pkt *); void (*schedule_load_page) (struct uea_softc *, struct intr_pkt *); int (*stat) (struct uea_softc *); int (*send_cmvs) (struct uea_softc *); /* keep in sync with eaglectl */ struct uea_stats { struct { u32 state; u32 flags; u32 mflags; u32 vidcpe; u32 vidco; u32 dsrate; u32 usrate; u32 dsunc; u32 usunc; u32 dscorr; u32 uscorr; u32 txflow; u32 rxflow; u32 usattenuation; u32 dsattenuation; u32 dsmargin; u32 usmargin; u32 firmid; } phy; } stats; }; /* * Elsa IDs */ #define ELSA_VID 0x05CC #define ELSA_PID_PSTFIRM 0x3350 #define ELSA_PID_PREFIRM 0x3351 #define ELSA_PID_A_PREFIRM 0x3352 #define ELSA_PID_A_PSTFIRM 0x3353 #define ELSA_PID_B_PREFIRM 0x3362 #define ELSA_PID_B_PSTFIRM 0x3363 /* * Devolo IDs : pots if (pid & 0x10) */ #define DEVOLO_VID 0x1039 #define DEVOLO_EAGLE_I_A_PID_PSTFIRM 0x2110 #define DEVOLO_EAGLE_I_A_PID_PREFIRM 0x2111 #define DEVOLO_EAGLE_I_B_PID_PSTFIRM 0x2100 #define DEVOLO_EAGLE_I_B_PID_PREFIRM 0x2101 #define DEVOLO_EAGLE_II_A_PID_PSTFIRM 0x2130 #define DEVOLO_EAGLE_II_A_PID_PREFIRM 0x2131 #define DEVOLO_EAGLE_II_B_PID_PSTFIRM 0x2120 #define DEVOLO_EAGLE_II_B_PID_PREFIRM 0x2121 /* * Reference design USB IDs */ #define ANALOG_VID 0x1110 #define ADI930_PID_PREFIRM 0x9001 #define ADI930_PID_PSTFIRM 0x9000 #define EAGLE_I_PID_PREFIRM 0x9010 /* Eagle I */ #define EAGLE_I_PID_PSTFIRM 0x900F /* Eagle I */ #define EAGLE_IIC_PID_PREFIRM 0x9024 /* Eagle IIC */ #define EAGLE_IIC_PID_PSTFIRM 0x9023 /* Eagle IIC */ #define EAGLE_II_PID_PREFIRM 0x9022 /* Eagle II */ #define EAGLE_II_PID_PSTFIRM 0x9021 /* Eagle II */ #define EAGLE_III_PID_PREFIRM 0x9032 /* Eagle III */ #define EAGLE_III_PID_PSTFIRM 0x9031 /* Eagle III */ #define EAGLE_IV_PID_PREFIRM 0x9042 /* Eagle IV */ #define EAGLE_IV_PID_PSTFIRM 0x9041 /* Eagle IV */ /* * USR USB IDs */ #define USR_VID 0x0BAF #define MILLER_A_PID_PREFIRM 0x00F2 #define MILLER_A_PID_PSTFIRM 0x00F1 #define MILLER_B_PID_PREFIRM 0x00FA #define MILLER_B_PID_PSTFIRM 0x00F9 #define HEINEKEN_A_PID_PREFIRM 0x00F6 #define HEINEKEN_A_PID_PSTFIRM 0x00F5 #define HEINEKEN_B_PID_PREFIRM 0x00F8 #define HEINEKEN_B_PID_PSTFIRM 0x00F7 #define PREFIRM 0 #define PSTFIRM (1<<7) #define AUTO_ANNEX_A (1<<8) #define AUTO_ANNEX_B (1<<9) enum { ADI930 = 0, EAGLE_I, EAGLE_II, EAGLE_III, EAGLE_IV }; /* macros for both struct usb_device_id and struct uea_softc */ #define UEA_IS_PREFIRM(x) \ (!((x)->driver_info & PSTFIRM)) #define UEA_CHIP_VERSION(x) \ ((x)->driver_info & 0xf) #define IS_ISDN(x) \ ((x)->annex & ANNEXB) #define INS_TO_USBDEV(ins) ins->usb_dev #define GET_STATUS(data) \ ((data >> 8) & 0xf) #define IS_OPERATIONAL(sc) \ ((UEA_CHIP_VERSION(sc) != EAGLE_IV) ? \ (GET_STATUS(sc->stats.phy.state) == 2) : \ (sc->stats.phy.state == 7)) /* * Set of macros to handle unaligned data in the firmware blob. * The FW_GET_BYTE() macro is provided only for consistency. */ #define FW_GET_BYTE(p) *((__u8 *) (p)) #define FW_DIR "ueagle-atm/" #define UEA_FW_NAME_MAX 30 #define NB_MODEM 4 #define BULK_TIMEOUT 300 #define CTRL_TIMEOUT 1000 #define ACK_TIMEOUT msecs_to_jiffies(3000) #define UEA_INTR_IFACE_NO 0 #define UEA_US_IFACE_NO 1 #define UEA_DS_IFACE_NO 2 #define FASTEST_ISO_INTF 8 #define UEA_BULK_DATA_PIPE 0x02 #define UEA_IDMA_PIPE 0x04 #define UEA_INTR_PIPE 0x04 #define UEA_ISO_DATA_PIPE 0x08 #define UEA_E1_SET_BLOCK 0x0001 #define UEA_E4_SET_BLOCK 0x002c #define UEA_SET_MODE 0x0003 #define UEA_SET_2183_DATA 0x0004 #define UEA_SET_TIMEOUT 0x0011 #define UEA_LOOPBACK_OFF 0x0002 #define UEA_LOOPBACK_ON 0x0003 #define UEA_BOOT_IDMA 0x0006 #define UEA_START_RESET 0x0007 #define UEA_END_RESET 0x0008 #define UEA_SWAP_MAILBOX (0x3fcd | 0x4000) #define UEA_MPTX_START (0x3fce | 0x4000) #define UEA_MPTX_MAILBOX (0x3fd6 | 0x4000) #define UEA_MPRX_MAILBOX (0x3fdf | 0x4000) /* block information in eagle4 dsp firmware */ struct block_index { __le32 PageOffset; __le32 NotLastBlock; __le32 dummy; __le32 PageSize; __le32 PageAddress; __le16 dummy1; __le16 PageNumber; } __attribute__ ((packed)); #define E4_IS_BOOT_PAGE(PageSize) ((le32_to_cpu(PageSize)) & 0x80000000) #define E4_PAGE_BYTES(PageSize) ((le32_to_cpu(PageSize) & 0x7fffffff) * 4) #define E4_L1_STRING_HEADER 0x10 #define E4_MAX_PAGE_NUMBER 0x58 #define E4_NO_SWAPPAGE_HEADERS 0x31 /* l1_code is eagle4 dsp firmware format */ struct l1_code { u8 string_header[E4_L1_STRING_HEADER]; u8 page_number_to_block_index[E4_MAX_PAGE_NUMBER]; struct block_index page_header[E4_NO_SWAPPAGE_HEADERS]; u8 code [0]; } __attribute__ ((packed)); /* structures describing a block within a DSP page */ struct block_info_e1 { __le16 wHdr; __le16 wAddress; __le16 wSize; __le16 wOvlOffset; __le16 wOvl; /* overlay */ __le16 wLast; } __attribute__ ((packed)); #define E1_BLOCK_INFO_SIZE 12 struct block_info_e4 { __be16 wHdr; __u8 bBootPage; __u8 bPageNumber; __be32 dwSize; __be32 dwAddress; __be16 wReserved; } __attribute__ ((packed)); #define E4_BLOCK_INFO_SIZE 14 #define UEA_BIHDR 0xabcd #define UEA_RESERVED 0xffff /* constants describing cmv type */ #define E1_PREAMBLE 0x535c #define E1_MODEMTOHOST 0x01 #define E1_HOSTTOMODEM 0x10 #define E1_MEMACCESS 0x1 #define E1_ADSLDIRECTIVE 0x7 #define E1_FUNCTION_TYPE(f) ((f) >> 4) #define E1_FUNCTION_SUBTYPE(f) ((f) & 0x0f) #define E4_MEMACCESS 0 #define E4_ADSLDIRECTIVE 0xf #define E4_FUNCTION_TYPE(f) ((f) >> 8) #define E4_FUNCTION_SIZE(f) ((f) & 0x0f) #define E4_FUNCTION_SUBTYPE(f) (((f) >> 4) & 0x0f) /* for MEMACCESS */ #define E1_REQUESTREAD 0x0 #define E1_REQUESTWRITE 0x1 #define E1_REPLYREAD 0x2 #define E1_REPLYWRITE 0x3 #define E4_REQUESTREAD 0x0 #define E4_REQUESTWRITE 0x4 #define E4_REPLYREAD (E4_REQUESTREAD | 1) #define E4_REPLYWRITE (E4_REQUESTWRITE | 1) /* for ADSLDIRECTIVE */ #define E1_KERNELREADY 0x0 #define E1_MODEMREADY 0x1 #define E4_KERNELREADY 0x0 #define E4_MODEMREADY 0x1 #define E1_MAKEFUNCTION(t, s) (((t) & 0xf) << 4 | ((s) & 0xf)) #define E4_MAKEFUNCTION(t, st, s) (((t) & 0xf) << 8 | ((st) & 0xf) << 4 | ((s) & 0xf)) #define E1_MAKESA(a, b, c, d) \ (((c) & 0xff) << 24 | \ ((d) & 0xff) << 16 | \ ((a) & 0xff) << 8 | \ ((b) & 0xff)) #define E1_GETSA1(a) ((a >> 8) & 0xff) #define E1_GETSA2(a) (a & 0xff) #define E1_GETSA3(a) ((a >> 24) & 0xff) #define E1_GETSA4(a) ((a >> 16) & 0xff) #define E1_SA_CNTL E1_MAKESA('C', 'N', 'T', 'L') #define E1_SA_DIAG E1_MAKESA('D', 'I', 'A', 'G') #define E1_SA_INFO E1_MAKESA('I', 'N', 'F', 'O') #define E1_SA_OPTN E1_MAKESA('O', 'P', 'T', 'N') #define E1_SA_RATE E1_MAKESA('R', 'A', 'T', 'E') #define E1_SA_STAT E1_MAKESA('S', 'T', 'A', 'T') #define E4_SA_CNTL 1 #define E4_SA_STAT 2 #define E4_SA_INFO 3 #define E4_SA_TEST 4 #define E4_SA_OPTN 5 #define E4_SA_RATE 6 #define E4_SA_DIAG 7 #define E4_SA_CNFG 8 /* structures representing a CMV (Configuration and Management Variable) */ struct cmv_e1 { __le16 wPreamble; __u8 bDirection; __u8 bFunction; __le16 wIndex; __le32 dwSymbolicAddress; __le16 wOffsetAddress; __le32 dwData; } __attribute__ ((packed)); struct cmv_e4 { __be16 wGroup; __be16 wFunction; __be16 wOffset; __be16 wAddress; __be32 dwData [6]; } __attribute__ ((packed)); /* structures representing swap information */ struct swap_info_e1 { __u8 bSwapPageNo; __u8 bOvl; /* overlay */ } __attribute__ ((packed)); struct swap_info_e4 { __u8 bSwapPageNo; } __attribute__ ((packed)); /* structures representing interrupt data */ #define e1_bSwapPageNo u.e1.s1.swapinfo.bSwapPageNo #define e1_bOvl u.e1.s1.swapinfo.bOvl #define e4_bSwapPageNo u.e4.s1.swapinfo.bSwapPageNo #define INT_LOADSWAPPAGE 0x0001 #define INT_INCOMINGCMV 0x0002 union intr_data_e1 { struct { struct swap_info_e1 swapinfo; __le16 wDataSize; } __attribute__ ((packed)) s1; struct { struct cmv_e1 cmv; __le16 wDataSize; } __attribute__ ((packed)) s2; } __attribute__ ((packed)); union intr_data_e4 { struct { struct swap_info_e4 swapinfo; __le16 wDataSize; } __attribute__ ((packed)) s1; struct { struct cmv_e4 cmv; __le16 wDataSize; } __attribute__ ((packed)) s2; } __attribute__ ((packed)); struct intr_pkt { __u8 bType; __u8 bNotification; __le16 wValue; __le16 wIndex; __le16 wLength; __le16 wInterrupt; union { union intr_data_e1 e1; union intr_data_e4 e4; } u; } __attribute__ ((packed)); #define E1_INTR_PKT_SIZE 28 #define E4_INTR_PKT_SIZE 64 static struct usb_driver uea_driver; static DEFINE_MUTEX(uea_mutex); static const char *chip_name[] = {"ADI930", "Eagle I", "Eagle II", "Eagle III", "Eagle IV"}; static int modem_index; static unsigned int debug; static unsigned int altsetting[NB_MODEM] = {[0 ... (NB_MODEM - 1)] = FASTEST_ISO_INTF}; static int sync_wait[NB_MODEM]; static char *cmv_file[NB_MODEM]; static int annex[NB_MODEM]; module_param(debug, uint, 0644); MODULE_PARM_DESC(debug, "module debug level (0=off,1=on,2=verbose)"); module_param_array(altsetting, uint, NULL, 0644); MODULE_PARM_DESC(altsetting, "alternate setting for incoming traffic: 0=bulk, " "1=isoc slowest, ... , 8=isoc fastest (default)"); module_param_array(sync_wait, bool, NULL, 0644); MODULE_PARM_DESC(sync_wait, "wait the synchronisation before starting ATM"); module_param_array(cmv_file, charp, NULL, 0644); MODULE_PARM_DESC(cmv_file, "file name with configuration and management variables"); module_param_array(annex, uint, NULL, 0644); MODULE_PARM_DESC(annex, "manually set annex a/b (0=auto, 1=annex a, 2=annex b)"); #define uea_wait(sc, cond, timeo) \ ({ \ int _r = wait_event_interruptible_timeout(sc->sync_q, \ (cond) || kthread_should_stop(), timeo); \ if (kthread_should_stop()) \ _r = -ENODEV; \ _r; \ }) #define UPDATE_ATM_STAT(type, val) \ do { \ if (sc->usbatm->atm_dev) \ sc->usbatm->atm_dev->type = val; \ } while (0) /* Firmware loading */ #define LOAD_INTERNAL 0xA0 #define F8051_USBCS 0x7f92 /** * uea_send_modem_cmd - Send a command for pre-firmware devices. */ static int uea_send_modem_cmd(struct usb_device *usb, u16 addr, u16 size, const u8 *buff) { int ret = -ENOMEM; u8 *xfer_buff; xfer_buff = kmemdup(buff, size, GFP_KERNEL); if (xfer_buff) { ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0), LOAD_INTERNAL, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, addr, 0, xfer_buff, size, CTRL_TIMEOUT); kfree(xfer_buff); } if (ret < 0) return ret; return (ret == size) ? 0 : -EIO; } static void uea_upload_pre_firmware(const struct firmware *fw_entry, void *context) { struct usb_device *usb = context; const u8 *pfw; u8 value; u32 crc = 0; int ret, size; uea_enters(usb); if (!fw_entry) { uea_err(usb, "firmware is not available\n"); goto err; } pfw = fw_entry->data; size = fw_entry->size; if (size < 4) goto err_fw_corrupted; crc = get_unaligned_le32(pfw); pfw += 4; size -= 4; if (crc32_be(0, pfw, size) != crc) goto err_fw_corrupted; /* * Start to upload firmware : send reset */ value = 1; ret = uea_send_modem_cmd(usb, F8051_USBCS, sizeof(value), &value); if (ret < 0) { uea_err(usb, "modem reset failed with error %d\n", ret); goto err; } while (size > 3) { u8 len = FW_GET_BYTE(pfw); u16 add = get_unaligned_le16(pfw + 1); size -= len + 3; if (size < 0) goto err_fw_corrupted; ret = uea_send_modem_cmd(usb, add, len, pfw + 3); if (ret < 0) { uea_err(usb, "uploading firmware data failed " "with error %d\n", ret); goto err; } pfw += len + 3; } if (size != 0) goto err_fw_corrupted; /* * Tell the modem we finish : de-assert reset */ value = 0; ret = uea_send_modem_cmd(usb, F8051_USBCS, 1, &value); if (ret < 0) uea_err(usb, "modem de-assert failed with error %d\n", ret); else uea_info(usb, "firmware uploaded\n"); uea_leaves(usb); return; err_fw_corrupted: uea_err(usb, "firmware is corrupted\n"); err: uea_leaves(usb); } /** * uea_load_firmware - Load usb firmware for pre-firmware devices. */ static int uea_load_firmware(struct usb_device *usb, unsigned int ver) { int ret; char *fw_name = FW_DIR "eagle.fw"; uea_enters(usb); uea_info(usb, "pre-firmware device, uploading firmware\n"); switch (ver) { case ADI930: fw_name = FW_DIR "adi930.fw"; break; case EAGLE_I: fw_name = FW_DIR "eagleI.fw"; break; case EAGLE_II: fw_name = FW_DIR "eagleII.fw"; break; case EAGLE_III: fw_name = FW_DIR "eagleIII.fw"; break; case EAGLE_IV: fw_name = FW_DIR "eagleIV.fw"; break; } ret = request_firmware_nowait(THIS_MODULE, 1, fw_name, &usb->dev, usb, uea_upload_pre_firmware); if (ret) uea_err(usb, "firmware %s is not available\n", fw_name); else uea_info(usb, "loading firmware %s\n", fw_name); uea_leaves(usb); return ret; } /* modem management : dsp firmware, send/read CMV, monitoring statistic */ /* * Make sure that the DSP code provided is safe to use. */ static int check_dsp_e1(const u8 *dsp, unsigned int len) { u8 pagecount, blockcount; u16 blocksize; u32 pageoffset; unsigned int i, j, p, pp; pagecount = FW_GET_BYTE(dsp); p = 1; /* enough space for page offsets? */ if (p + 4 * pagecount > len) return 1; for (i = 0; i < pagecount; i++) { pageoffset = get_unaligned_le32(dsp + p); p += 4; if (pageoffset == 0) continue; /* enough space for blockcount? */ if (pageoffset >= len) return 1; pp = pageoffset; blockcount = FW_GET_BYTE(dsp + pp); pp += 1; for (j = 0; j < blockcount; j++) { /* enough space for block header? */ if (pp + 4 > len) return 1; pp += 2; /* skip blockaddr */ blocksize = get_unaligned_le16(dsp + pp); pp += 2; /* enough space for block data? */ if (pp + blocksize > len) return 1; pp += blocksize; } } return 0; } static int check_dsp_e4(const u8 *dsp, int len) { int i; struct l1_code *p = (struct l1_code *) dsp; unsigned int sum = p->code - dsp; if (len < sum) return 1; if (strcmp("STRATIPHY ANEXA", p->string_header) != 0 && strcmp("STRATIPHY ANEXB", p->string_header) != 0) return 1; for (i = 0; i < E4_MAX_PAGE_NUMBER; i++) { struct block_index *blockidx; u8 blockno = p->page_number_to_block_index[i]; if (blockno >= E4_NO_SWAPPAGE_HEADERS) continue; do { u64 l; if (blockno >= E4_NO_SWAPPAGE_HEADERS) return 1; blockidx = &p->page_header[blockno++]; if ((u8 *)(blockidx + 1) - dsp >= len) return 1; if (le16_to_cpu(blockidx->PageNumber) != i) return 1; l = E4_PAGE_BYTES(blockidx->PageSize); sum += l; l += le32_to_cpu(blockidx->PageOffset); if (l > len) return 1; /* zero is zero regardless endianes */ } while (blockidx->NotLastBlock); } return (sum == len) ? 0 : 1; } /* * send data to the idma pipe * */ static int uea_idma_write(struct uea_softc *sc, const void *data, u32 size) { int ret = -ENOMEM; u8 *xfer_buff; int bytes_read; xfer_buff = kmemdup(data, size, GFP_KERNEL); if (!xfer_buff) { uea_err(INS_TO_USBDEV(sc), "can't allocate xfer_buff\n"); return ret; } ret = usb_bulk_msg(sc->usb_dev, usb_sndbulkpipe(sc->usb_dev, UEA_IDMA_PIPE), xfer_buff, size, &bytes_read, BULK_TIMEOUT); kfree(xfer_buff); if (ret < 0) return ret; if (size != bytes_read) { uea_err(INS_TO_USBDEV(sc), "size != bytes_read %d %d\n", size, bytes_read); return -EIO; } return 0; } static int request_dsp(struct uea_softc *sc) { int ret; char *dsp_name; if (UEA_CHIP_VERSION(sc) == EAGLE_IV) { if (IS_ISDN(sc)) dsp_name = FW_DIR "DSP4i.bin"; else dsp_name = FW_DIR "DSP4p.bin"; } else if (UEA_CHIP_VERSION(sc) == ADI930) { if (IS_ISDN(sc)) dsp_name = FW_DIR "DSP9i.bin"; else dsp_name = FW_DIR "DSP9p.bin"; } else { if (IS_ISDN(sc)) dsp_name = FW_DIR "DSPei.bin"; else dsp_name = FW_DIR "DSPep.bin"; } ret = request_firmware(&sc->dsp_firm, dsp_name, &sc->usb_dev->dev); if (ret < 0) { uea_err(INS_TO_USBDEV(sc), "requesting firmware %s failed with error %d\n", dsp_name, ret); return ret; } if (UEA_CHIP_VERSION(sc) == EAGLE_IV) ret = check_dsp_e4(sc->dsp_firm->data, sc->dsp_firm->size); else ret = check_dsp_e1(sc->dsp_firm->data, sc->dsp_firm->size); if (ret) { uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", dsp_name); release_firmware(sc->dsp_firm); sc->dsp_firm = NULL; return -EILSEQ; } return 0; } /* * The uea_load_page() function must be called within a process context */ static void uea_load_page_e1(struct work_struct *work) { struct uea_softc *sc = container_of(work, struct uea_softc, task); u16 pageno = sc->pageno; u16 ovl = sc->ovl; struct block_info_e1 bi; const u8 *p; u8 pagecount, blockcount; u16 blockaddr, blocksize; u32 pageoffset; int i; /* reload firmware when reboot start and it's loaded already */ if (ovl == 0 && pageno == 0 && sc->dsp_firm) { release_firmware(sc->dsp_firm); sc->dsp_firm = NULL; } if (sc->dsp_firm == NULL && request_dsp(sc) < 0) return; p = sc->dsp_firm->data; pagecount = FW_GET_BYTE(p); p += 1; if (pageno >= pagecount) goto bad1; p += 4 * pageno; pageoffset = get_unaligned_le32(p); if (pageoffset == 0) goto bad1; p = sc->dsp_firm->data + pageoffset; blockcount = FW_GET_BYTE(p); p += 1; uea_dbg(INS_TO_USBDEV(sc), "sending %u blocks for DSP page %u\n", blockcount, pageno); bi.wHdr = cpu_to_le16(UEA_BIHDR); bi.wOvl = cpu_to_le16(ovl); bi.wOvlOffset = cpu_to_le16(ovl | 0x8000); for (i = 0; i < blockcount; i++) { blockaddr = get_unaligned_le16(p); p += 2; blocksize = get_unaligned_le16(p); p += 2; bi.wSize = cpu_to_le16(blocksize); bi.wAddress = cpu_to_le16(blockaddr); bi.wLast = cpu_to_le16((i == blockcount - 1) ? 1 : 0); /* send block info through the IDMA pipe */ if (uea_idma_write(sc, &bi, E1_BLOCK_INFO_SIZE)) goto bad2; /* send block data through the IDMA pipe */ if (uea_idma_write(sc, p, blocksize)) goto bad2; p += blocksize; } return; bad2: uea_err(INS_TO_USBDEV(sc), "sending DSP block %u failed\n", i); return; bad1: uea_err(INS_TO_USBDEV(sc), "invalid DSP page %u requested\n", pageno); } static void __uea_load_page_e4(struct uea_softc *sc, u8 pageno, int boot) { struct block_info_e4 bi; struct block_index *blockidx; struct l1_code *p = (struct l1_code *) sc->dsp_firm->data; u8 blockno = p->page_number_to_block_index[pageno]; bi.wHdr = cpu_to_be16(UEA_BIHDR); bi.bBootPage = boot; bi.bPageNumber = pageno; bi.wReserved = cpu_to_be16(UEA_RESERVED); do { const u8 *blockoffset; unsigned int blocksize; blockidx = &p->page_header[blockno]; blocksize = E4_PAGE_BYTES(blockidx->PageSize); blockoffset = sc->dsp_firm->data + le32_to_cpu(blockidx->PageOffset); bi.dwSize = cpu_to_be32(blocksize); bi.dwAddress = cpu_to_be32(le32_to_cpu(blockidx->PageAddress)); uea_dbg(INS_TO_USBDEV(sc), "sending block %u for DSP page %u size %u address %x\n", blockno, pageno, blocksize, le32_to_cpu(blockidx->PageAddress)); /* send block info through the IDMA pipe */ if (uea_idma_write(sc, &bi, E4_BLOCK_INFO_SIZE)) goto bad; /* send block data through the IDMA pipe */ if (uea_idma_write(sc, blockoffset, blocksize)) goto bad; blockno++; } while (blockidx->NotLastBlock); return; bad: uea_err(INS_TO_USBDEV(sc), "sending DSP block %u failed\n", blockno); return; } static void uea_load_page_e4(struct work_struct *work) { struct uea_softc *sc = container_of(work, struct uea_softc, task); u8 pageno = sc->pageno; int i; struct block_info_e4 bi; struct l1_code *p; uea_dbg(INS_TO_USBDEV(sc), "sending DSP page %u\n", pageno); /* reload firmware when reboot start and it's loaded already */ if (pageno == 0 && sc->dsp_firm) { release_firmware(sc->dsp_firm); sc->dsp_firm = NULL; } if (sc->dsp_firm == NULL && request_dsp(sc) < 0) return; p = (struct l1_code *) sc->dsp_firm->data; if (pageno >= le16_to_cpu(p->page_header[0].PageNumber)) { uea_err(INS_TO_USBDEV(sc), "invalid DSP page %u requested\n", pageno); return; } if (pageno != 0) { __uea_load_page_e4(sc, pageno, 0); return; } uea_dbg(INS_TO_USBDEV(sc), "sending Main DSP page %u\n", p->page_header[0].PageNumber); for (i = 0; i < le16_to_cpu(p->page_header[0].PageNumber); i++) { if (E4_IS_BOOT_PAGE(p->page_header[i].PageSize)) __uea_load_page_e4(sc, i, 1); } uea_dbg(INS_TO_USBDEV(sc),"sending start bi\n"); bi.wHdr = cpu_to_be16(UEA_BIHDR); bi.bBootPage = 0; bi.bPageNumber = 0xff; bi.wReserved = cpu_to_be16(UEA_RESERVED); bi.dwSize = cpu_to_be32(E4_PAGE_BYTES(p->page_header[0].PageSize)); bi.dwAddress = cpu_to_be32(le32_to_cpu(p->page_header[0].PageAddress)); /* send block info through the IDMA pipe */ if (uea_idma_write(sc, &bi, E4_BLOCK_INFO_SIZE)) uea_err(INS_TO_USBDEV(sc), "sending DSP start bi failed\n"); } static inline void wake_up_cmv_ack(struct uea_softc *sc) { BUG_ON(sc->cmv_ack); sc->cmv_ack = 1; wake_up(&sc->sync_q); } static inline int wait_cmv_ack(struct uea_softc *sc) { int ret = uea_wait(sc, sc->cmv_ack , ACK_TIMEOUT); sc->cmv_ack = 0; uea_dbg(INS_TO_USBDEV(sc), "wait_event_timeout : %d ms\n", jiffies_to_msecs(ret)); if (ret < 0) return ret; return (ret == 0) ? -ETIMEDOUT : 0; } #define UCDC_SEND_ENCAPSULATED_COMMAND 0x00 static int uea_request(struct uea_softc *sc, u16 value, u16 index, u16 size, const void *data) { u8 *xfer_buff; int ret = -ENOMEM; xfer_buff = kmemdup(data, size, GFP_KERNEL); if (!xfer_buff) { uea_err(INS_TO_USBDEV(sc), "can't allocate xfer_buff\n"); return ret; } ret = usb_control_msg(sc->usb_dev, usb_sndctrlpipe(sc->usb_dev, 0), UCDC_SEND_ENCAPSULATED_COMMAND, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, xfer_buff, size, CTRL_TIMEOUT); kfree(xfer_buff); if (ret < 0) { uea_err(INS_TO_USBDEV(sc), "usb_control_msg error %d\n", ret); return ret; } if (ret != size) { uea_err(INS_TO_USBDEV(sc), "usb_control_msg send only %d bytes (instead of %d)\n", ret, size); return -EIO; } return 0; } static int uea_cmv_e1(struct uea_softc *sc, u8 function, u32 address, u16 offset, u32 data) { struct cmv_e1 cmv; int ret; uea_enters(INS_TO_USBDEV(sc)); uea_vdbg(INS_TO_USBDEV(sc), "Function : %d-%d, Address : %c%c%c%c, " "offset : 0x%04x, data : 0x%08x\n", E1_FUNCTION_TYPE(function), E1_FUNCTION_SUBTYPE(function), E1_GETSA1(address), E1_GETSA2(address), E1_GETSA3(address), E1_GETSA4(address), offset, data); /* we send a request, but we expect a reply */ sc->cmv_dsc.e1.function = function | 0x2; sc->cmv_dsc.e1.idx++; sc->cmv_dsc.e1.address = address; sc->cmv_dsc.e1.offset = offset; cmv.wPreamble = cpu_to_le16(E1_PREAMBLE); cmv.bDirection = E1_HOSTTOMODEM; cmv.bFunction = function; cmv.wIndex = cpu_to_le16(sc->cmv_dsc.e1.idx); put_unaligned_le32(address, &cmv.dwSymbolicAddress); cmv.wOffsetAddress = cpu_to_le16(offset); put_unaligned_le32(data >> 16 | data << 16, &cmv.dwData); ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv); if (ret < 0) return ret; ret = wait_cmv_ack(sc); uea_leaves(INS_TO_USBDEV(sc)); return ret; } static int uea_cmv_e4(struct uea_softc *sc, u16 function, u16 group, u16 address, u16 offset, u32 data) { struct cmv_e4 cmv; int ret; uea_enters(INS_TO_USBDEV(sc)); memset(&cmv, 0, sizeof(cmv)); uea_vdbg(INS_TO_USBDEV(sc), "Function : %d-%d, Group : 0x%04x, " "Address : 0x%04x, offset : 0x%04x, data : 0x%08x\n", E4_FUNCTION_TYPE(function), E4_FUNCTION_SUBTYPE(function), group, address, offset, data); /* we send a request, but we expect a reply */ sc->cmv_dsc.e4.function = function | (0x1 << 4); sc->cmv_dsc.e4.offset = offset; sc->cmv_dsc.e4.address = address; sc->cmv_dsc.e4.group = group; cmv.wFunction = cpu_to_be16(function); cmv.wGroup = cpu_to_be16(group); cmv.wAddress = cpu_to_be16(address); cmv.wOffset = cpu_to_be16(offset); cmv.dwData[0] = cpu_to_be32(data); ret = uea_request(sc, UEA_E4_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv); if (ret < 0) return ret; ret = wait_cmv_ack(sc); uea_leaves(INS_TO_USBDEV(sc)); return ret; } static inline int uea_read_cmv_e1(struct uea_softc *sc, u32 address, u16 offset, u32 *data) { int ret = uea_cmv_e1(sc, E1_MAKEFUNCTION(E1_MEMACCESS, E1_REQUESTREAD), address, offset, 0); if (ret < 0) uea_err(INS_TO_USBDEV(sc), "reading cmv failed with error %d\n", ret); else *data = sc->data; return ret; } static inline int uea_read_cmv_e4(struct uea_softc *sc, u8 size, u16 group, u16 address, u16 offset, u32 *data) { int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS, E4_REQUESTREAD, size), group, address, offset, 0); if (ret < 0) uea_err(INS_TO_USBDEV(sc), "reading cmv failed with error %d\n", ret); else { *data = sc->data; /* size is in 16-bit word quantities */ if (size > 2) *(data + 1) = sc->data1; } return ret; } static inline int uea_write_cmv_e1(struct uea_softc *sc, u32 address, u16 offset, u32 data) { int ret = uea_cmv_e1(sc, E1_MAKEFUNCTION(E1_MEMACCESS, E1_REQUESTWRITE), address, offset, data); if (ret < 0) uea_err(INS_TO_USBDEV(sc), "writing cmv failed with error %d\n", ret); return ret; } static inline int uea_write_cmv_e4(struct uea_softc *sc, u8 size, u16 group, u16 address, u16 offset, u32 data) { int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS, E4_REQUESTWRITE, size), group, address, offset, data); if (ret < 0) uea_err(INS_TO_USBDEV(sc), "writing cmv failed with error %d\n", ret); return ret; } static void uea_set_bulk_timeout(struct uea_softc *sc, u32 dsrate) { int ret; u16 timeout; /* in bulk mode the modem have problem with high rate * changing internal timing could improve things, but the * value is misterious. * ADI930 don't support it (-EPIPE error). */ if (UEA_CHIP_VERSION(sc) == ADI930 || altsetting[sc->modem_index] > 0 || sc->stats.phy.dsrate == dsrate) return; /* Original timming (1Mbit/s) from ADI (used in windows driver) */ timeout = (dsrate <= 1024*1024) ? 0 : 1; ret = uea_request(sc, UEA_SET_TIMEOUT, timeout, 0, NULL); uea_info(INS_TO_USBDEV(sc), "setting new timeout %d%s\n", timeout, ret < 0 ? " failed" : ""); } /* * Monitor the modem and update the stat * return 0 if everything is ok * return < 0 if an error occurs (-EAGAIN reboot needed) */ static int uea_stat_e1(struct uea_softc *sc) { u32 data; int ret; uea_enters(INS_TO_USBDEV(sc)); data = sc->stats.phy.state; ret = uea_read_cmv_e1(sc, E1_SA_STAT, 0, &sc->stats.phy.state); if (ret < 0) return ret; switch (GET_STATUS(sc->stats.phy.state)) { case 0: /* not yet synchronized */ uea_dbg(INS_TO_USBDEV(sc), "modem not yet synchronized\n"); return 0; case 1: /* initialization */ uea_dbg(INS_TO_USBDEV(sc), "modem initializing\n"); return 0; case 2: /* operational */ uea_vdbg(INS_TO_USBDEV(sc), "modem operational\n"); break; case 3: /* fail ... */ uea_info(INS_TO_USBDEV(sc), "modem synchronization failed" " (may be try other cmv/dsp)\n"); return -EAGAIN; case 4 ... 6: /* test state */ uea_warn(INS_TO_USBDEV(sc), "modem in test mode - not supported\n"); return -EAGAIN; case 7: /* fast-retain ... */ uea_info(INS_TO_USBDEV(sc), "modem in fast-retain mode\n"); return 0; default: uea_err(INS_TO_USBDEV(sc), "modem invalid SW mode %d\n", GET_STATUS(sc->stats.phy.state)); return -EAGAIN; } if (GET_STATUS(data) != 2) { uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_OFF, 0, NULL); uea_info(INS_TO_USBDEV(sc), "modem operational\n"); /* release the dsp firmware as it is not needed until * the next failure */ if (sc->dsp_firm) { release_firmware(sc->dsp_firm); sc->dsp_firm = NULL; } } /* always update it as atm layer could not be init when we switch to * operational state */ UPDATE_ATM_STAT(signal, ATM_PHY_SIG_FOUND); /* wake up processes waiting for synchronization */ wake_up(&sc->sync_q); ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 2, &sc->stats.phy.flags); if (ret < 0) return ret; sc->stats.phy.mflags |= sc->stats.phy.flags; /* in case of a flags ( for example delineation LOSS (& 0x10)), * we check the status again in order to detect the failure earlier */ if (sc->stats.phy.flags) { uea_dbg(INS_TO_USBDEV(sc), "Stat flag = 0x%x\n", sc->stats.phy.flags); return 0; } ret = uea_read_cmv_e1(sc, E1_SA_RATE, 0, &data); if (ret < 0) return ret; uea_set_bulk_timeout(sc, (data >> 16) * 32); sc->stats.phy.dsrate = (data >> 16) * 32; sc->stats.phy.usrate = (data & 0xffff) * 32; UPDATE_ATM_STAT(link_rate, sc->stats.phy.dsrate * 1000 / 424); ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 23, &data); if (ret < 0) return ret; sc->stats.phy.dsattenuation = (data & 0xff) / 2; ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 47, &data); if (ret < 0) return ret; sc->stats.phy.usattenuation = (data & 0xff) / 2; ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 25, &sc->stats.phy.dsmargin); if (ret < 0) return ret; ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 49, &sc->stats.phy.usmargin); if (ret < 0) return ret; ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 51, &sc->stats.phy.rxflow); if (ret < 0) return ret; ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 52, &sc->stats.phy.txflow); if (ret < 0) return ret; ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 54, &sc->stats.phy.dsunc); if (ret < 0) return ret; /* only for atu-c */ ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 58, &sc->stats.phy.usunc); if (ret < 0) return ret; ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 53, &sc->stats.phy.dscorr); if (ret < 0) return ret; /* only for atu-c */ ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 57, &sc->stats.phy.uscorr); if (ret < 0) return ret; ret = uea_read_cmv_e1(sc, E1_SA_INFO, 8, &sc->stats.phy.vidco); if (ret < 0) return ret; ret = uea_read_cmv_e1(sc, E1_SA_INFO, 13, &sc->stats.phy.vidcpe); if (ret < 0) return ret; return 0; } static int uea_stat_e4(struct uea_softc *sc) { u32 data; u32 tmp_arr[2]; int ret; uea_enters(INS_TO_USBDEV(sc)); data = sc->stats.phy.state; /* XXX only need to be done before operationnal... */ ret = uea_read_cmv_e4(sc, 1, E4_SA_STAT, 0, 0, &sc->stats.phy.state); if (ret < 0) return ret; switch (sc->stats.phy.state) { case 0x0: /* not yet synchronized */ case 0x1: case 0x3: case 0x4: uea_dbg(INS_TO_USBDEV(sc), "modem not yet synchronized\n"); return 0; case 0x5: /* initialization */ case 0x6: case 0x9: case 0xa: uea_dbg(INS_TO_USBDEV(sc), "modem initializing\n"); return 0; case 0x2: /* fail ... */ uea_info(INS_TO_USBDEV(sc), "modem synchronization failed" " (may be try other cmv/dsp)\n"); return -EAGAIN; case 0x7: /* operational */ break; default: uea_warn(INS_TO_USBDEV(sc), "unknown state: %x\n", sc->stats.phy.state); return 0; } if (data != 7) { uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_OFF, 0, NULL); uea_info(INS_TO_USBDEV(sc), "modem operational\n"); /* release the dsp firmware as it is not needed until * the next failure */ if (sc->dsp_firm) { release_firmware(sc->dsp_firm); sc->dsp_firm = NULL; } } /* always update it as atm layer could not be init when we switch to * operational state */ UPDATE_ATM_STAT(signal, ATM_PHY_SIG_FOUND); /* wake up processes waiting for synchronization */ wake_up(&sc->sync_q); /* TODO improve this state machine : * we need some CMV info : what they do and their unit * we should find the equivalent of eagle3- CMV */ /* check flags */ ret = uea_read_cmv_e4(sc, 1, E4_SA_DIAG, 0, 0, &sc->stats.phy.flags); if (ret < 0) return ret; sc->stats.phy.mflags |= sc->stats.phy.flags; /* in case of a flags ( for example delineation LOSS (& 0x10)), * we check the status again in order to detect the failure earlier */ if (sc->stats.phy.flags) { uea_dbg(INS_TO_USBDEV(sc), "Stat flag = 0x%x\n", sc->stats.phy.flags); if (sc->stats.phy.flags & 1) //delineation LOSS return -EAGAIN; if (sc->stats.phy.flags & 0x4000) //Reset Flag return -EAGAIN; return 0; } /* rate data may be in upper or lower half of 64 bit word, strange */ ret = uea_read_cmv_e4(sc, 4, E4_SA_RATE, 0, 0, tmp_arr); if (ret < 0) return ret; data = (tmp_arr[0]) ? tmp_arr[0] : tmp_arr[1]; sc->stats.phy.usrate = data / 1000; ret = uea_read_cmv_e4(sc, 4, E4_SA_RATE, 1, 0, tmp_arr); if (ret < 0) return ret; data = (tmp_arr[0]) ? tmp_arr[0] : tmp_arr[1]; uea_set_bulk_timeout(sc, data / 1000); sc->stats.phy.dsrate = data / 1000; UPDATE_ATM_STAT(link_rate, sc->stats.phy.dsrate * 1000 / 424); ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 68, 1, &data); if (ret < 0) return ret; sc->stats.phy.dsattenuation = data / 10; ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 69, 1, &data); if (ret < 0) return ret; sc->stats.phy.usattenuation = data / 10; ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 68, 3, &data); if (ret < 0) return ret; sc->stats.phy.dsmargin = data / 2; ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 69, 3, &data); if (ret < 0) return ret; sc->stats.phy.usmargin = data / 10; return 0; } static void cmvs_file_name(struct uea_softc *sc, char *const cmv_name, int ver) { char file_arr[] = "CMVxy.bin"; char *file; /* set proper name corresponding modem version and line type */ if (cmv_file[sc->modem_index] == NULL) { if (UEA_CHIP_VERSION(sc) == ADI930) file_arr[3] = '9'; else if (UEA_CHIP_VERSION(sc) == EAGLE_IV) file_arr[3] = '4'; else file_arr[3] = 'e'; file_arr[4] = IS_ISDN(sc) ? 'i' : 'p'; file = file_arr; } else file = cmv_file[sc->modem_index]; strcpy(cmv_name, FW_DIR); strlcat(cmv_name, file, UEA_FW_NAME_MAX); if (ver == 2) strlcat(cmv_name, ".v2", UEA_FW_NAME_MAX); } static int request_cmvs_old(struct uea_softc *sc, void **cmvs, const struct firmware **fw) { int ret, size; u8 *data; char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */ cmvs_file_name(sc, cmv_name, 1); ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev); if (ret < 0) { uea_err(INS_TO_USBDEV(sc), "requesting firmware %s failed with error %d\n", cmv_name, ret); return ret; } data = (u8 *) (*fw)->data; size = (*fw)->size; if (size < 1) goto err_fw_corrupted; if (size != *data * sizeof(struct uea_cmvs_v1) + 1) goto err_fw_corrupted; *cmvs = (void *)(data + 1); return *data; err_fw_corrupted: uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", cmv_name); release_firmware(*fw); return -EILSEQ; } static int request_cmvs(struct uea_softc *sc, void **cmvs, const struct firmware **fw, int *ver) { int ret, size; u32 crc; u8 *data; char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */ cmvs_file_name(sc, cmv_name, 2); ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev); if (ret < 0) { /* if caller can handle old version, try to provide it */ if (*ver == 1) { uea_warn(INS_TO_USBDEV(sc), "requesting firmware %s failed, " "try to get older cmvs\n", cmv_name); return request_cmvs_old(sc, cmvs, fw); } uea_err(INS_TO_USBDEV(sc), "requesting firmware %s failed with error %d\n", cmv_name, ret); return ret; } size = (*fw)->size; data = (u8 *) (*fw)->data; if (size < 4 || strncmp(data, "cmv2", 4) != 0) { if (*ver == 1) { uea_warn(INS_TO_USBDEV(sc), "firmware %s is corrupted, " "try to get older cmvs\n", cmv_name); release_firmware(*fw); return request_cmvs_old(sc, cmvs, fw); } goto err_fw_corrupted; } *ver = 2; data += 4; size -= 4; if (size < 5) goto err_fw_corrupted; crc = get_unaligned_le32(data); data += 4; size -= 4; if (crc32_be(0, data, size) != crc) goto err_fw_corrupted; if (size != *data * sizeof(struct uea_cmvs_v2) + 1) goto err_fw_corrupted; *cmvs = (void *) (data + 1); return *data; err_fw_corrupted: uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", cmv_name); release_firmware(*fw); return -EILSEQ; } static int uea_send_cmvs_e1(struct uea_softc *sc) { int i, ret, len; void *cmvs_ptr; const struct firmware *cmvs_fw; int ver = 1; // we can handle v1 cmv firmware version; /* Enter in R-IDLE (cmv) until instructed otherwise */ ret = uea_write_cmv_e1(sc, E1_SA_CNTL, 0, 1); if (ret < 0) return ret; /* Dump firmware version */ ret = uea_read_cmv_e1(sc, E1_SA_INFO, 10, &sc->stats.phy.firmid); if (ret < 0) return ret; uea_info(INS_TO_USBDEV(sc), "ATU-R firmware version : %x\n", sc->stats.phy.firmid); /* get options */ ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver); if (ret < 0) return ret; /* send options */ if (ver == 1) { struct uea_cmvs_v1 *cmvs_v1 = cmvs_ptr; uea_warn(INS_TO_USBDEV(sc), "use deprecated cmvs version, " "please update your firmware\n"); for (i = 0; i < len; i++) { ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v1[i].address), get_unaligned_le16(&cmvs_v1[i].offset), get_unaligned_le32(&cmvs_v1[i].data)); if (ret < 0) goto out; } } else if (ver == 2) { struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr; for (i = 0; i < len; i++) { ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v2[i].address), (u16) get_unaligned_le32(&cmvs_v2[i].offset), get_unaligned_le32(&cmvs_v2[i].data)); if (ret < 0) goto out; } } else { /* This realy should not happen */ uea_err(INS_TO_USBDEV(sc), "bad cmvs version %d\n", ver); goto out; } /* Enter in R-ACT-REQ */ ret = uea_write_cmv_e1(sc, E1_SA_CNTL, 0, 2); uea_vdbg(INS_TO_USBDEV(sc), "Entering in R-ACT-REQ state\n"); uea_info(INS_TO_USBDEV(sc), "modem started, waiting synchronization...\n"); out: release_firmware(cmvs_fw); return ret; } static int uea_send_cmvs_e4(struct uea_softc *sc) { int i, ret, len; void *cmvs_ptr; const struct firmware *cmvs_fw; int ver = 2; // we can only handle v2 cmv firmware version; /* Enter in R-IDLE (cmv) until instructed otherwise */ ret = uea_write_cmv_e4(sc, 1, E4_SA_CNTL, 0, 0, 1); if (ret < 0) return ret; /* Dump firmware version */ /* XXX don't read the 3th byte as it is always 6 */ ret = uea_read_cmv_e4(sc, 2, E4_SA_INFO, 55, 0, &sc->stats.phy.firmid); if (ret < 0) return ret; uea_info(INS_TO_USBDEV(sc), "ATU-R firmware version : %x\n", sc->stats.phy.firmid); /* get options */ ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver); if (ret < 0) return ret; /* send options */ if (ver == 2) { struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr; for (i = 0; i < len; i++) { ret = uea_write_cmv_e4(sc, 1, get_unaligned_le32(&cmvs_v2[i].group), get_unaligned_le32(&cmvs_v2[i].address), get_unaligned_le32(&cmvs_v2[i].offset), get_unaligned_le32(&cmvs_v2[i].data)); if (ret < 0) goto out; } } else { /* This realy should not happen */ uea_err(INS_TO_USBDEV(sc), "bad cmvs version %d\n", ver); goto out; } /* Enter in R-ACT-REQ */ ret = uea_write_cmv_e4(sc, 1, E4_SA_CNTL, 0, 0, 2); uea_vdbg(INS_TO_USBDEV(sc), "Entering in R-ACT-REQ state\n"); uea_info(INS_TO_USBDEV(sc), "modem started, waiting synchronization...\n"); out: release_firmware(cmvs_fw); return ret; } /* Start boot post firmware modem: * - send reset commands through usb control pipe * - start workqueue for DSP loading * - send CMV options to modem */ static int uea_start_reset(struct uea_softc *sc) { u16 zero = 0; /* ;-) */ int ret; uea_enters(INS_TO_USBDEV(sc)); uea_info(INS_TO_USBDEV(sc), "(re)booting started\n"); /* mask interrupt */ sc->booting = 1; /* We need to set this here because, a ack timeout could have occured, * but before we start the reboot, the ack occurs and set this to 1. * So we will failed to wait Ready CMV. */ sc->cmv_ack = 0; UPDATE_ATM_STAT(signal, ATM_PHY_SIG_LOST); /* reset statistics */ memset(&sc->stats, 0, sizeof(struct uea_stats)); /* tell the modem that we want to boot in IDMA mode */ uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_ON, 0, NULL); uea_request(sc, UEA_SET_MODE, UEA_BOOT_IDMA, 0, NULL); /* enter reset mode */ uea_request(sc, UEA_SET_MODE, UEA_START_RESET, 0, NULL); /* original driver use 200ms, but windows driver use 100ms */ ret = uea_wait(sc, 0, msecs_to_jiffies(100)); if (ret < 0) return ret; /* leave reset mode */ uea_request(sc, UEA_SET_MODE, UEA_END_RESET, 0, NULL); if (UEA_CHIP_VERSION(sc) != EAGLE_IV) { /* clear tx and rx mailboxes */ uea_request(sc, UEA_SET_2183_DATA, UEA_MPTX_MAILBOX, 2, &zero); uea_request(sc, UEA_SET_2183_DATA, UEA_MPRX_MAILBOX, 2, &zero); uea_request(sc, UEA_SET_2183_DATA, UEA_SWAP_MAILBOX, 2, &zero); } ret = uea_wait(sc, 0, msecs_to_jiffies(1000)); if (ret < 0) return ret; if (UEA_CHIP_VERSION(sc) == EAGLE_IV) sc->cmv_dsc.e4.function = E4_MAKEFUNCTION(E4_ADSLDIRECTIVE, E4_MODEMREADY, 1); else sc->cmv_dsc.e1.function = E1_MAKEFUNCTION(E1_ADSLDIRECTIVE, E1_MODEMREADY); /* demask interrupt */ sc->booting = 0; /* start loading DSP */ sc->pageno = 0; sc->ovl = 0; queue_work(sc->work_q, &sc->task); /* wait for modem ready CMV */ ret = wait_cmv_ack(sc); if (ret < 0) return ret; uea_vdbg(INS_TO_USBDEV(sc), "Ready CMV received\n"); ret = sc->send_cmvs(sc); if (ret < 0) return ret; sc->reset = 0; uea_leaves(INS_TO_USBDEV(sc)); return ret; } /* * In case of an error wait 1s before rebooting the modem * if the modem don't request reboot (-EAGAIN). * Monitor the modem every 1s. */ static int uea_kthread(void *data) { struct uea_softc *sc = data; int ret = -EAGAIN; set_freezable(); uea_enters(INS_TO_USBDEV(sc)); while (!kthread_should_stop()) { if (ret < 0 || sc->reset) ret = uea_start_reset(sc); if (!ret) ret = sc->stat(sc); if (ret != -EAGAIN) uea_wait(sc, 0, msecs_to_jiffies(1000)); try_to_freeze(); } uea_leaves(INS_TO_USBDEV(sc)); return ret; } /* Load second usb firmware for ADI930 chip */ static int load_XILINX_firmware(struct uea_softc *sc) { const struct firmware *fw_entry; int ret, size, u, ln; const u8 *pfw; u8 value; char *fw_name = FW_DIR "930-fpga.bin"; uea_enters(INS_TO_USBDEV(sc)); ret = request_firmware(&fw_entry, fw_name, &sc->usb_dev->dev); if (ret) { uea_err(INS_TO_USBDEV(sc), "firmware %s is not available\n", fw_name); goto err0; } pfw = fw_entry->data; size = fw_entry->size; if (size != 0x577B) { uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", fw_name); ret = -EILSEQ; goto err1; } for (u = 0; u < size; u += ln) { ln = min(size - u, 64); ret = uea_request(sc, 0xe, 0, ln, pfw + u); if (ret < 0) { uea_err(INS_TO_USBDEV(sc), "elsa download data failed (%d)\n", ret); goto err1; } } /* finish to send the fpga */ ret = uea_request(sc, 0xe, 1, 0, NULL); if (ret < 0) { uea_err(INS_TO_USBDEV(sc), "elsa download data failed (%d)\n", ret); goto err1; } /* Tell the modem we finish : de-assert reset */ value = 0; ret = uea_send_modem_cmd(sc->usb_dev, 0xe, 1, &value); if (ret < 0) uea_err(sc->usb_dev, "elsa de-assert failed with error %d\n", ret); err1: release_firmware(fw_entry); err0: uea_leaves(INS_TO_USBDEV(sc)); return ret; } /* The modem send us an ack. First with check if it right */ static void uea_dispatch_cmv_e1(struct uea_softc *sc, struct intr_pkt *intr) { struct cmv_dsc_e1 *dsc = &sc->cmv_dsc.e1; struct cmv_e1 *cmv = &intr->u.e1.s2.cmv; uea_enters(INS_TO_USBDEV(sc)); if (le16_to_cpu(cmv->wPreamble) != E1_PREAMBLE) goto bad1; if (cmv->bDirection != E1_MODEMTOHOST) goto bad1; /* FIXME : ADI930 reply wrong preambule (func = 2, sub = 2) to * the first MEMACESS cmv. Ignore it... */ if (cmv->bFunction != dsc->function) { if (UEA_CHIP_VERSION(sc) == ADI930 && cmv->bFunction == E1_MAKEFUNCTION(2, 2)) { cmv->wIndex = cpu_to_le16(dsc->idx); put_unaligned_le32(dsc->address, &cmv->dwSymbolicAddress); cmv->wOffsetAddress = cpu_to_le16(dsc->offset); } else goto bad2; } if (cmv->bFunction == E1_MAKEFUNCTION(E1_ADSLDIRECTIVE, E1_MODEMREADY)) { wake_up_cmv_ack(sc); uea_leaves(INS_TO_USBDEV(sc)); return; } /* in case of MEMACCESS */ if (le16_to_cpu(cmv->wIndex) != dsc->idx || get_unaligned_le32(&cmv->dwSymbolicAddress) != dsc->address || le16_to_cpu(cmv->wOffsetAddress) != dsc->offset) goto bad2; sc->data = get_unaligned_le32(&cmv->dwData); sc->data = sc->data << 16 | sc->data >> 16; wake_up_cmv_ack(sc); uea_leaves(INS_TO_USBDEV(sc)); return; bad2: uea_err(INS_TO_USBDEV(sc), "unexpected cmv received, " "Function : %d, Subfunction : %d\n", E1_FUNCTION_TYPE(cmv->bFunction), E1_FUNCTION_SUBTYPE(cmv->bFunction)); uea_leaves(INS_TO_USBDEV(sc)); return; bad1: uea_err(INS_TO_USBDEV(sc), "invalid cmv received, " "wPreamble %d, bDirection %d\n", le16_to_cpu(cmv->wPreamble), cmv->bDirection); uea_leaves(INS_TO_USBDEV(sc)); } /* The modem send us an ack. First with check if it right */ static void uea_dispatch_cmv_e4(struct uea_softc *sc, struct intr_pkt *intr) { struct cmv_dsc_e4 *dsc = &sc->cmv_dsc.e4; struct cmv_e4 *cmv = &intr->u.e4.s2.cmv; uea_enters(INS_TO_USBDEV(sc)); uea_dbg(INS_TO_USBDEV(sc), "cmv %x %x %x %x %x %x\n", be16_to_cpu(cmv->wGroup), be16_to_cpu(cmv->wFunction), be16_to_cpu(cmv->wOffset), be16_to_cpu(cmv->wAddress), be32_to_cpu(cmv->dwData[0]), be32_to_cpu(cmv->dwData[1])); if (be16_to_cpu(cmv->wFunction) != dsc->function) goto bad2; if (be16_to_cpu(cmv->wFunction) == E4_MAKEFUNCTION(E4_ADSLDIRECTIVE, E4_MODEMREADY, 1)) { wake_up_cmv_ack(sc); uea_leaves(INS_TO_USBDEV(sc)); return; } /* in case of MEMACCESS */ if (be16_to_cpu(cmv->wOffset) != dsc->offset || be16_to_cpu(cmv->wGroup) != dsc->group || be16_to_cpu(cmv->wAddress) != dsc->address) goto bad2; sc->data = be32_to_cpu(cmv->dwData[0]); sc->data1 = be32_to_cpu(cmv->dwData[1]); wake_up_cmv_ack(sc); uea_leaves(INS_TO_USBDEV(sc)); return; bad2: uea_err(INS_TO_USBDEV(sc), "unexpected cmv received, " "Function : %d, Subfunction : %d\n", E4_FUNCTION_TYPE(cmv->wFunction), E4_FUNCTION_SUBTYPE(cmv->wFunction)); uea_leaves(INS_TO_USBDEV(sc)); return; } static void uea_schedule_load_page_e1(struct uea_softc *sc, struct intr_pkt *intr) { sc->pageno = intr->e1_bSwapPageNo; sc->ovl = intr->e1_bOvl >> 4 | intr->e1_bOvl << 4; queue_work(sc->work_q, &sc->task); } static void uea_schedule_load_page_e4(struct uea_softc *sc, struct intr_pkt *intr) { sc->pageno = intr->e4_bSwapPageNo; queue_work(sc->work_q, &sc->task); } /* * interrupt handler */ static void uea_intr(struct urb *urb) { struct uea_softc *sc = urb->context; struct intr_pkt *intr = urb->transfer_buffer; int status = urb->status; uea_enters(INS_TO_USBDEV(sc)); if (unlikely(status < 0)) { uea_err(INS_TO_USBDEV(sc), "uea_intr() failed with %d\n", status); return; } /* device-to-host interrupt */ if (intr->bType != 0x08 || sc->booting) { uea_err(INS_TO_USBDEV(sc), "wrong interrupt\n"); goto resubmit; } switch (le16_to_cpu(intr->wInterrupt)) { case INT_LOADSWAPPAGE: sc->schedule_load_page(sc, intr); break; case INT_INCOMINGCMV: sc->dispatch_cmv(sc, intr); break; default: uea_err(INS_TO_USBDEV(sc), "unknown interrupt %u\n", le16_to_cpu(intr->wInterrupt)); } resubmit: usb_submit_urb(sc->urb_int, GFP_ATOMIC); } /* * Start the modem : init the data and start kernel thread */ static int uea_boot(struct uea_softc *sc) { int ret, size; struct intr_pkt *intr; uea_enters(INS_TO_USBDEV(sc)); if (UEA_CHIP_VERSION(sc) == EAGLE_IV) { size = E4_INTR_PKT_SIZE; sc->dispatch_cmv = uea_dispatch_cmv_e4; sc->schedule_load_page = uea_schedule_load_page_e4; sc->stat = uea_stat_e4; sc->send_cmvs = uea_send_cmvs_e4; INIT_WORK(&sc->task, uea_load_page_e4); } else { size = E1_INTR_PKT_SIZE; sc->dispatch_cmv = uea_dispatch_cmv_e1; sc->schedule_load_page = uea_schedule_load_page_e1; sc->stat = uea_stat_e1; sc->send_cmvs = uea_send_cmvs_e1; INIT_WORK(&sc->task, uea_load_page_e1); } init_waitqueue_head(&sc->sync_q); sc->work_q = create_workqueue("ueagle-dsp"); if (!sc->work_q) { uea_err(INS_TO_USBDEV(sc), "cannot allocate workqueue\n"); uea_leaves(INS_TO_USBDEV(sc)); return -ENOMEM; } if (UEA_CHIP_VERSION(sc) == ADI930) load_XILINX_firmware(sc); intr = kmalloc(size, GFP_KERNEL); if (!intr) { uea_err(INS_TO_USBDEV(sc), "cannot allocate interrupt package\n"); goto err0; } sc->urb_int = usb_alloc_urb(0, GFP_KERNEL); if (!sc->urb_int) { uea_err(INS_TO_USBDEV(sc), "cannot allocate interrupt URB\n"); goto err1; } usb_fill_int_urb(sc->urb_int, sc->usb_dev, usb_rcvintpipe(sc->usb_dev, UEA_INTR_PIPE), intr, size, uea_intr, sc, sc->usb_dev->actconfig->interface[0]->altsetting[0]. endpoint[0].desc.bInterval); ret = usb_submit_urb(sc->urb_int, GFP_KERNEL); if (ret < 0) { uea_err(INS_TO_USBDEV(sc), "urb submition failed with error %d\n", ret); goto err1; } sc->kthread = kthread_run(uea_kthread, sc, "ueagle-atm"); if (sc->kthread == ERR_PTR(-ENOMEM)) { uea_err(INS_TO_USBDEV(sc), "failed to create thread\n"); goto err2; } uea_leaves(INS_TO_USBDEV(sc)); return 0; err2: usb_kill_urb(sc->urb_int); err1: usb_free_urb(sc->urb_int); sc->urb_int = NULL; kfree(intr); err0: destroy_workqueue(sc->work_q); uea_leaves(INS_TO_USBDEV(sc)); return -ENOMEM; } /* * Stop the modem : kill kernel thread and free data */ static void uea_stop(struct uea_softc *sc) { int ret; uea_enters(INS_TO_USBDEV(sc)); ret = kthread_stop(sc->kthread); uea_dbg(INS_TO_USBDEV(sc), "kthread finish with status %d\n", ret); uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_ON, 0, NULL); usb_kill_urb(sc->urb_int); kfree(sc->urb_int->transfer_buffer); usb_free_urb(sc->urb_int); /* stop any pending boot process, when no one can schedule work */ destroy_workqueue(sc->work_q); if (sc->dsp_firm) release_firmware(sc->dsp_firm); uea_leaves(INS_TO_USBDEV(sc)); } /* syfs interface */ static struct uea_softc *dev_to_uea(struct device *dev) { struct usb_interface *intf; struct usbatm_data *usbatm; intf = to_usb_interface(dev); if (!intf) return NULL; usbatm = usb_get_intfdata(intf); if (!usbatm) return NULL; return usbatm->driver_data; } static ssize_t read_status(struct device *dev, struct device_attribute *attr, char *buf) { int ret = -ENODEV; struct uea_softc *sc; mutex_lock(&uea_mutex); sc = dev_to_uea(dev); if (!sc) goto out; ret = snprintf(buf, 10, "%08x\n", sc->stats.phy.state); out: mutex_unlock(&uea_mutex); return ret; } static ssize_t reboot(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret = -ENODEV; struct uea_softc *sc; mutex_lock(&uea_mutex); sc = dev_to_uea(dev); if (!sc) goto out; sc->reset = 1; ret = count; out: mutex_unlock(&uea_mutex); return ret; } static DEVICE_ATTR(stat_status, S_IWUGO | S_IRUGO, read_status, reboot); static ssize_t read_human_status(struct device *dev, struct device_attribute *attr, char *buf) { int ret = -ENODEV; int modem_state; struct uea_softc *sc; mutex_lock(&uea_mutex); sc = dev_to_uea(dev); if (!sc) goto out; if (UEA_CHIP_VERSION(sc) == EAGLE_IV) { switch (sc->stats.phy.state) { case 0x0: /* not yet synchronized */ case 0x1: case 0x3: case 0x4: modem_state = 0; break; case 0x5: /* initialization */ case 0x6: case 0x9: case 0xa: modem_state = 1; break; case 0x7: /* operational */ modem_state = 2; break; case 0x2: /* fail ... */ modem_state = 3; break; default: /* unknown */ modem_state = 4; break; } } else modem_state = GET_STATUS(sc->stats.phy.state); switch (modem_state) { case 0: ret = sprintf(buf, "Modem is booting\n"); break; case 1: ret = sprintf(buf, "Modem is initializing\n"); break; case 2: ret = sprintf(buf, "Modem is operational\n"); break; case 3: ret = sprintf(buf, "Modem synchronization failed\n"); break; default: ret = sprintf(buf, "Modem state is unknown\n"); break; } out: mutex_unlock(&uea_mutex); return ret; } static DEVICE_ATTR(stat_human_status, S_IWUGO | S_IRUGO, read_human_status, NULL); static ssize_t read_delin(struct device *dev, struct device_attribute *attr, char *buf) { int ret = -ENODEV; struct uea_softc *sc; char *delin = "GOOD"; mutex_lock(&uea_mutex); sc = dev_to_uea(dev); if (!sc) goto out; if (UEA_CHIP_VERSION(sc) == EAGLE_IV) { if (sc->stats.phy.flags & 0x4000) delin = "RESET"; else if (sc->stats.phy.flags & 0x0001) delin = "LOSS"; } else { if (sc->stats.phy.flags & 0x0C00) delin = "ERROR"; else if (sc->stats.phy.flags & 0x0030) delin = "LOSS"; } ret = sprintf(buf, "%s\n", delin); out: mutex_unlock(&uea_mutex); return ret; } static DEVICE_ATTR(stat_delin, S_IWUGO | S_IRUGO, read_delin, NULL); #define UEA_ATTR(name, reset) \ \ static ssize_t read_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ int ret = -ENODEV; \ struct uea_softc *sc; \ \ mutex_lock(&uea_mutex); \ sc = dev_to_uea(dev); \ if (!sc) \ goto out; \ ret = snprintf(buf, 10, "%08x\n", sc->stats.phy.name); \ if (reset) \ sc->stats.phy.name = 0; \ out: \ mutex_unlock(&uea_mutex); \ return ret; \ } \ \ static DEVICE_ATTR(stat_##name, S_IRUGO, read_##name, NULL) UEA_ATTR(mflags, 1); UEA_ATTR(vidcpe, 0); UEA_ATTR(usrate, 0); UEA_ATTR(dsrate, 0); UEA_ATTR(usattenuation, 0); UEA_ATTR(dsattenuation, 0); UEA_ATTR(usmargin, 0); UEA_ATTR(dsmargin, 0); UEA_ATTR(txflow, 0); UEA_ATTR(rxflow, 0); UEA_ATTR(uscorr, 0); UEA_ATTR(dscorr, 0); UEA_ATTR(usunc, 0); UEA_ATTR(dsunc, 0); UEA_ATTR(firmid, 0); /* Retrieve the device End System Identifier (MAC) */ #define htoi(x) (isdigit(x) ? x-'0' : toupper(x)-'A'+10) static int uea_getesi(struct uea_softc *sc, u_char * esi) { unsigned char mac_str[2 * ETH_ALEN + 1]; int i; if (usb_string (sc->usb_dev, sc->usb_dev->descriptor.iSerialNumber, mac_str, sizeof(mac_str)) != 2 * ETH_ALEN) return 1; for (i = 0; i < ETH_ALEN; i++) esi[i] = htoi(mac_str[2 * i]) * 16 + htoi(mac_str[2 * i + 1]); return 0; } /* ATM stuff */ static int uea_atm_open(struct usbatm_data *usbatm, struct atm_dev *atm_dev) { struct uea_softc *sc = usbatm->driver_data; return uea_getesi(sc, atm_dev->esi); } static int uea_heavy(struct usbatm_data *usbatm, struct usb_interface *intf) { struct uea_softc *sc = usbatm->driver_data; wait_event_interruptible(sc->sync_q, IS_OPERATIONAL(sc)); return 0; } static int claim_interface(struct usb_device *usb_dev, struct usbatm_data *usbatm, int ifnum) { int ret; struct usb_interface *intf = usb_ifnum_to_if(usb_dev, ifnum); if (!intf) { uea_err(usb_dev, "interface %d not found\n", ifnum); return -ENODEV; } ret = usb_driver_claim_interface(&uea_driver, intf, usbatm); if (ret != 0) uea_err(usb_dev, "can't claim interface %d, error %d\n", ifnum, ret); return ret; } static struct attribute *attrs[] = { &dev_attr_stat_status.attr, &dev_attr_stat_mflags.attr, &dev_attr_stat_human_status.attr, &dev_attr_stat_delin.attr, &dev_attr_stat_vidcpe.attr, &dev_attr_stat_usrate.attr, &dev_attr_stat_dsrate.attr, &dev_attr_stat_usattenuation.attr, &dev_attr_stat_dsattenuation.attr, &dev_attr_stat_usmargin.attr, &dev_attr_stat_dsmargin.attr, &dev_attr_stat_txflow.attr, &dev_attr_stat_rxflow.attr, &dev_attr_stat_uscorr.attr, &dev_attr_stat_dscorr.attr, &dev_attr_stat_usunc.attr, &dev_attr_stat_dsunc.attr, &dev_attr_stat_firmid.attr, NULL, }; static struct attribute_group attr_grp = { .attrs = attrs, }; static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usb = interface_to_usbdev(intf); struct uea_softc *sc; int ret, ifnum = intf->altsetting->desc.bInterfaceNumber; unsigned int alt; uea_enters(usb); /* interface 0 is for firmware/monitoring */ if (ifnum != UEA_INTR_IFACE_NO) return -ENODEV; usbatm->flags = (sync_wait[modem_index] ? 0 : UDSL_SKIP_HEAVY_INIT); /* interface 1 is for outbound traffic */ ret = claim_interface(usb, usbatm, UEA_US_IFACE_NO); if (ret < 0) return ret; /* ADI930 has only 2 interfaces and inbound traffic is on interface 1 */ if (UEA_CHIP_VERSION(id) != ADI930) { /* interface 2 is for inbound traffic */ ret = claim_interface(usb, usbatm, UEA_DS_IFACE_NO); if (ret < 0) return ret; } sc = kzalloc(sizeof(struct uea_softc), GFP_KERNEL); if (!sc) { uea_err(usb, "uea_init: not enough memory !\n"); return -ENOMEM; } sc->usb_dev = usb; usbatm->driver_data = sc; sc->usbatm = usbatm; sc->modem_index = (modem_index < NB_MODEM) ? modem_index++ : 0; sc->driver_info = id->driver_info; /* first try to use module parameter */ if (annex[sc->modem_index] == 1) sc->annex = ANNEXA; else if (annex[sc->modem_index] == 2) sc->annex = ANNEXB; /* try to autodetect annex */ else if (sc->driver_info & AUTO_ANNEX_A) sc->annex = ANNEXA; else if (sc->driver_info & AUTO_ANNEX_B) sc->annex = ANNEXB; else sc->annex = (le16_to_cpu(sc->usb_dev->descriptor.bcdDevice) & 0x80)?ANNEXB:ANNEXA; alt = altsetting[sc->modem_index]; /* ADI930 don't support iso */ if (UEA_CHIP_VERSION(id) != ADI930 && alt > 0) { if (alt <= 8 && usb_set_interface(usb, UEA_DS_IFACE_NO, alt) == 0) { uea_dbg(usb, "set alternate %u for 2 interface\n", alt); uea_info(usb, "using iso mode\n"); usbatm->flags |= UDSL_USE_ISOC | UDSL_IGNORE_EILSEQ; } else { uea_err(usb, "setting alternate %u failed for " "2 interface, using bulk mode\n", alt); } } ret = sysfs_create_group(&intf->dev.kobj, &attr_grp); if (ret < 0) goto error; ret = uea_boot(sc); if (ret < 0) goto error_rm_grp; return 0; error_rm_grp: sysfs_remove_group(&intf->dev.kobj, &attr_grp); error: kfree(sc); return ret; } static void uea_unbind(struct usbatm_data *usbatm, struct usb_interface *intf) { struct uea_softc *sc = usbatm->driver_data; sysfs_remove_group(&intf->dev.kobj, &attr_grp); uea_stop(sc); kfree(sc); } static struct usbatm_driver uea_usbatm_driver = { .driver_name = "ueagle-atm", .bind = uea_bind, .atm_start = uea_atm_open, .unbind = uea_unbind, .heavy_init = uea_heavy, .bulk_in = UEA_BULK_DATA_PIPE, .bulk_out = UEA_BULK_DATA_PIPE, .isoc_in = UEA_ISO_DATA_PIPE, }; static int uea_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usb = interface_to_usbdev(intf); uea_enters(usb); uea_info(usb, "ADSL device founded vid (%#X) pid (%#X) Rev (%#X): %s\n", le16_to_cpu(usb->descriptor.idVendor), le16_to_cpu(usb->descriptor.idProduct), le16_to_cpu(usb->descriptor.bcdDevice), chip_name[UEA_CHIP_VERSION(id)]); usb_reset_device(usb); if (UEA_IS_PREFIRM(id)) return uea_load_firmware(usb, UEA_CHIP_VERSION(id)); return usbatm_usb_probe(intf, id, &uea_usbatm_driver); } static void uea_disconnect(struct usb_interface *intf) { struct usb_device *usb = interface_to_usbdev(intf); int ifnum = intf->altsetting->desc.bInterfaceNumber; uea_enters(usb); /* ADI930 has 2 interfaces and eagle 3 interfaces. * Pre-firmware device has one interface */ if (usb->config->desc.bNumInterfaces != 1 && ifnum == 0) { mutex_lock(&uea_mutex); usbatm_usb_disconnect(intf); mutex_unlock(&uea_mutex); uea_info(usb, "ADSL device removed\n"); } uea_leaves(usb); } /* * List of supported VID/PID */ static const struct usb_device_id uea_ids[] = { {USB_DEVICE(ANALOG_VID, ADI930_PID_PREFIRM), .driver_info = ADI930 | PREFIRM}, {USB_DEVICE(ANALOG_VID, ADI930_PID_PSTFIRM), .driver_info = ADI930 | PSTFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PREFIRM), .driver_info = EAGLE_III | PREFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PSTFIRM), .driver_info = EAGLE_III | PSTFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PREFIRM), .driver_info = EAGLE_IV | PREFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PSTFIRM), .driver_info = EAGLE_IV | PSTFIRM}, {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM}, {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A}, {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM}, {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B}, {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM}, {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_A}, {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM}, {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_B}, {USB_DEVICE(ELSA_VID, ELSA_PID_PREFIRM), .driver_info = ADI930 | PREFIRM}, {USB_DEVICE(ELSA_VID, ELSA_PID_PSTFIRM), .driver_info = ADI930 | PSTFIRM}, {USB_DEVICE(ELSA_VID, ELSA_PID_A_PREFIRM), .driver_info = ADI930 | PREFIRM}, {USB_DEVICE(ELSA_VID, ELSA_PID_A_PSTFIRM), .driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_A}, {USB_DEVICE(ELSA_VID, ELSA_PID_B_PREFIRM), .driver_info = ADI930 | PREFIRM}, {USB_DEVICE(ELSA_VID, ELSA_PID_B_PSTFIRM), .driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_B}, {USB_DEVICE(USR_VID, MILLER_A_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM}, {USB_DEVICE(USR_VID, MILLER_A_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A}, {USB_DEVICE(USR_VID, MILLER_B_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM}, {USB_DEVICE(USR_VID, MILLER_B_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B}, {USB_DEVICE(USR_VID, HEINEKEN_A_PID_PREFIRM),.driver_info = EAGLE_I | PREFIRM}, {USB_DEVICE(USR_VID, HEINEKEN_A_PID_PSTFIRM),.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A}, {USB_DEVICE(USR_VID, HEINEKEN_B_PID_PREFIRM),.driver_info = EAGLE_I | PREFIRM}, {USB_DEVICE(USR_VID, HEINEKEN_B_PID_PSTFIRM),.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B}, {} }; /* * USB driver descriptor */ static struct usb_driver uea_driver = { .name = "ueagle-atm", .id_table = uea_ids, .probe = uea_probe, .disconnect = uea_disconnect, }; MODULE_DEVICE_TABLE(usb, uea_ids); /** * uea_init - Initialize the module. * Register to USB subsystem */ static int __init uea_init(void) { printk(KERN_INFO "[ueagle-atm] driver " EAGLEUSBVERSION " loaded\n"); usb_register(&uea_driver); return 0; } module_init(uea_init); /** * uea_exit - Destroy module * Deregister with USB subsystem */ static void __exit uea_exit(void) { /* * This calls automatically the uea_disconnect method if necessary: */ usb_deregister(&uea_driver); printk(KERN_INFO "[ueagle-atm] driver unloaded\n"); } module_exit(uea_exit); MODULE_AUTHOR("Damien Bergamini/Matthieu Castet/Stanislaw W. Gruszka"); MODULE_DESCRIPTION("ADI 930/Eagle USB ADSL Modem driver"); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
armani-dev/kernel_test
fs/namei.c
348
86657
/* * linux/fs/namei.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* * Some corrections by tytso. */ /* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname * lookup logic. */ /* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture. */ #include <linux/init.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/pagemap.h> #include <linux/fsnotify.h> #include <linux/personality.h> #include <linux/security.h> #include <linux/ima.h> #include <linux/syscalls.h> #include <linux/mount.h> #include <linux/audit.h> #include <linux/capability.h> #include <linux/file.h> #include <linux/fcntl.h> #include <linux/device_cgroup.h> #include <linux/fs_struct.h> #include <linux/posix_acl.h> #include <asm/uaccess.h> #include "internal.h" #include "mount.h" /* [Feb-1997 T. Schoebel-Theuer] * Fundamental changes in the pathname lookup mechanisms (namei) * were necessary because of omirr. The reason is that omirr needs * to know the _real_ pathname, not the user-supplied one, in case * of symlinks (and also when transname replacements occur). * * The new code replaces the old recursive symlink resolution with * an iterative one (in case of non-nested symlink chains). It does * this with calls to <fs>_follow_link(). * As a side effect, dir_namei(), _namei() and follow_link() are now * replaced with a single function lookup_dentry() that can handle all * the special cases of the former code. * * With the new dcache, the pathname is stored at each inode, at least as * long as the refcount of the inode is positive. As a side effect, the * size of the dcache depends on the inode cache and thus is dynamic. * * [29-Apr-1998 C. Scott Ananian] Updated above description of symlink * resolution to correspond with current state of the code. * * Note that the symlink resolution is not *completely* iterative. * There is still a significant amount of tail- and mid- recursion in * the algorithm. Also, note that <fs>_readlink() is not used in * lookup_dentry(): lookup_dentry() on the result of <fs>_readlink() * may return different results than <fs>_follow_link(). Many virtual * filesystems (including /proc) exhibit this behavior. */ /* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation: * New symlink semantics: when open() is called with flags O_CREAT | O_EXCL * and the name already exists in form of a symlink, try to create the new * name indicated by the symlink. The old code always complained that the * name already exists, due to not following the symlink even if its target * is nonexistent. The new semantics affects also mknod() and link() when * the name is a symlink pointing to a non-existent name. * * I don't know which semantics is the right one, since I have no access * to standards. But I found by trial that HP-UX 9.0 has the full "new" * semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the * "old" one. Personally, I think the new semantics is much more logical. * Note that "ln old new" where "new" is a symlink pointing to a non-existing * file does succeed in both HP-UX and SunOs, but not in Solaris * and in the old Linux semantics. */ /* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink * semantics. See the comments in "open_namei" and "do_link" below. * * [10-Sep-98 Alan Modra] Another symlink change. */ /* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks: * inside the path - always follow. * in the last component in creation/removal/renaming - never follow. * if LOOKUP_FOLLOW passed - follow. * if the pathname has trailing slashes - follow. * otherwise - don't follow. * (applied in that order). * * [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT * restored for 2.4. This is the last surviving part of old 4.2BSD bug. * During the 2.4 we need to fix the userland stuff depending on it - * hopefully we will be able to get rid of that wart in 2.5. So far only * XEmacs seems to be relying on it... */ /* * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives * any extra contention... */ /* In order to reduce some races, while at the same time doing additional * checking and hopefully speeding things up, we copy filenames to the * kernel data space before using them.. * * POSIX.1 2.4: an empty pathname is invalid (ENOENT). * PATH_MAX includes the nul terminator --RR. */ static int do_getname(const char __user *filename, char *page) { int retval; unsigned long len = PATH_MAX; if (!segment_eq(get_fs(), KERNEL_DS)) { if ((unsigned long) filename >= TASK_SIZE) return -EFAULT; if (TASK_SIZE - (unsigned long) filename < PATH_MAX) len = TASK_SIZE - (unsigned long) filename; } retval = strncpy_from_user(page, filename, len); if (retval > 0) { if (retval < len) return 0; return -ENAMETOOLONG; } else if (!retval) retval = -ENOENT; return retval; } static char *getname_flags(const char __user *filename, int flags, int *empty) { char *result = __getname(); int retval; if (!result) return ERR_PTR(-ENOMEM); retval = do_getname(filename, result); if (retval < 0) { if (retval == -ENOENT && empty) *empty = 1; if (retval != -ENOENT || !(flags & LOOKUP_EMPTY)) { __putname(result); return ERR_PTR(retval); } } audit_getname(result); return result; } char *getname(const char __user * filename) { return getname_flags(filename, 0, NULL); } #ifdef CONFIG_AUDITSYSCALL void putname(const char *name) { if (unlikely(!audit_dummy_context())) audit_putname(name); else __putname(name); } EXPORT_SYMBOL(putname); #endif static int check_acl(struct inode *inode, int mask) { #ifdef CONFIG_FS_POSIX_ACL struct posix_acl *acl; if (mask & MAY_NOT_BLOCK) { acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS); if (!acl) return -EAGAIN; /* no ->get_acl() calls in RCU mode... */ if (acl == ACL_NOT_CACHED) return -ECHILD; return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK); } acl = get_cached_acl(inode, ACL_TYPE_ACCESS); /* * A filesystem can force a ACL callback by just never filling the * ACL cache. But normally you'd fill the cache either at inode * instantiation time, or on the first ->get_acl call. * * If the filesystem doesn't have a get_acl() function at all, we'll * just create the negative cache entry. */ if (acl == ACL_NOT_CACHED) { if (inode->i_op->get_acl) { acl = inode->i_op->get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl)) return PTR_ERR(acl); } else { set_cached_acl(inode, ACL_TYPE_ACCESS, NULL); return -EAGAIN; } } if (acl) { int error = posix_acl_permission(inode, acl, mask); posix_acl_release(acl); return error; } #endif return -EAGAIN; } /* * This does the basic permission checking */ static int acl_permission_check(struct inode *inode, int mask) { unsigned int mode = inode->i_mode; if (current_user_ns() != inode_userns(inode)) goto other_perms; if (likely(current_fsuid() == inode->i_uid)) mode >>= 6; else { if (IS_POSIXACL(inode) && (mode & S_IRWXG)) { int error = check_acl(inode, mask); if (error != -EAGAIN) return error; } if (in_group_p(inode->i_gid)) mode >>= 3; } other_perms: /* * If the DACs are ok we don't need any capability check. */ if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) return 0; return -EACCES; } /** * generic_permission - check for access rights on a Posix-like filesystem * @inode: inode to check access rights for * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...) * * Used to check for read/write/execute permissions on a file. * We use "fsuid" for this, letting us set arbitrary permissions * for filesystem access without changing the "normal" uids which * are used for other things. * * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk * request cannot be satisfied (eg. requires blocking or too much complexity). * It would then be called again in ref-walk mode. */ int generic_permission(struct inode *inode, int mask) { int ret; /* * Do the basic permission checks. */ ret = acl_permission_check(inode, mask); if (ret != -EACCES) return ret; if (S_ISDIR(inode->i_mode)) { /* DACs are overridable for directories */ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) return 0; if (!(mask & MAY_WRITE)) if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) return 0; return -EACCES; } /* * Read/write DACs are always overridable. * Executable DACs are overridable when there is * at least one exec bit set. */ if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO)) if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) return 0; /* * Searching includes executable on directories, else just read. */ mask &= MAY_READ | MAY_WRITE | MAY_EXEC; if (mask == MAY_READ) if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) return 0; return -EACCES; } /* * We _really_ want to just do "generic_permission()" without * even looking at the inode->i_op values. So we keep a cache * flag in inode->i_opflags, that says "this has not special * permission function, use the fast case". */ static inline int do_inode_permission(struct inode *inode, int mask) { if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) { if (likely(inode->i_op->permission)) return inode->i_op->permission(inode, mask); /* This gets set once for the inode lifetime */ spin_lock(&inode->i_lock); inode->i_opflags |= IOP_FASTPERM; spin_unlock(&inode->i_lock); } return generic_permission(inode, mask); } /** * inode_permission - check for access rights to a given inode * @inode: inode to check permission on * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...) * * Used to check for read/write/execute permissions on an inode. * We use "fsuid" for this, letting us set arbitrary permissions * for filesystem access without changing the "normal" uids which * are used for other things. * * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask. */ int inode_permission(struct inode *inode, int mask) { int retval; if (unlikely(mask & MAY_WRITE)) { umode_t mode = inode->i_mode; /* * Nobody gets write access to a read-only fs. */ if (IS_RDONLY(inode) && (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) return -EROFS; /* * Nobody gets write access to an immutable file. */ if (IS_IMMUTABLE(inode)) return -EACCES; } retval = do_inode_permission(inode, mask); if (retval) return retval; retval = devcgroup_inode_permission(inode, mask); if (retval) return retval; return security_inode_permission(inode, mask); } /** * path_get - get a reference to a path * @path: path to get the reference to * * Given a path increment the reference count to the dentry and the vfsmount. */ void path_get(struct path *path) { mntget(path->mnt); dget(path->dentry); } EXPORT_SYMBOL(path_get); /** * path_put - put a reference to a path * @path: path to put the reference to * * Given a path decrement the reference count to the dentry and the vfsmount. */ void path_put(struct path *path) { dput(path->dentry); mntput(path->mnt); } EXPORT_SYMBOL(path_put); /* * Path walking has 2 modes, rcu-walk and ref-walk (see * Documentation/filesystems/path-lookup.txt). In situations when we can't * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab * normal reference counts on dentries and vfsmounts to transition to rcu-walk * mode. Refcounts are grabbed at the last known good point before rcu-walk * got stuck, so ref-walk may continue from there. If this is not successful * (eg. a seqcount has changed), then failure is returned and it's up to caller * to restart the path walk from the beginning in ref-walk mode. */ /** * unlazy_walk - try to switch to ref-walk mode. * @nd: nameidata pathwalk data * @dentry: child of nd->path.dentry or NULL * Returns: 0 on success, -ECHILD on failure * * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry * for ref-walk mode. @dentry must be a path found by a do_lookup call on * @nd or NULL. Must be called from rcu-walk context. */ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry) { struct fs_struct *fs = current->fs; struct dentry *parent = nd->path.dentry; int want_root = 0; BUG_ON(!(nd->flags & LOOKUP_RCU)); if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { want_root = 1; spin_lock(&fs->lock); if (nd->root.mnt != fs->root.mnt || nd->root.dentry != fs->root.dentry) goto err_root; } spin_lock(&parent->d_lock); if (!dentry) { if (!__d_rcu_to_refcount(parent, nd->seq)) goto err_parent; BUG_ON(nd->inode != parent->d_inode); } else { if (dentry->d_parent != parent) goto err_parent; spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); if (!__d_rcu_to_refcount(dentry, nd->seq)) goto err_child; /* * If the sequence check on the child dentry passed, then * the child has not been removed from its parent. This * means the parent dentry must be valid and able to take * a reference at this point. */ BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent); BUG_ON(!parent->d_count); parent->d_count++; spin_unlock(&dentry->d_lock); } spin_unlock(&parent->d_lock); if (want_root) { path_get(&nd->root); spin_unlock(&fs->lock); } mntget(nd->path.mnt); rcu_read_unlock(); br_read_unlock(vfsmount_lock); nd->flags &= ~LOOKUP_RCU; return 0; err_child: spin_unlock(&dentry->d_lock); err_parent: spin_unlock(&parent->d_lock); err_root: if (want_root) spin_unlock(&fs->lock); return -ECHILD; } /** * release_open_intent - free up open intent resources * @nd: pointer to nameidata */ void release_open_intent(struct nameidata *nd) { struct file *file = nd->intent.open.file; if (file && !IS_ERR(file)) { if (file->f_path.dentry == NULL) put_filp(file); else fput(file); } } static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd) { return dentry->d_op->d_revalidate(dentry, nd); } /** * complete_walk - successful completion of path walk * @nd: pointer nameidata * * If we had been in RCU mode, drop out of it and legitimize nd->path. * Revalidate the final result, unless we'd already done that during * the path walk or the filesystem doesn't ask for it. Return 0 on * success, -error on failure. In case of failure caller does not * need to drop nd->path. */ static int complete_walk(struct nameidata *nd) { struct dentry *dentry = nd->path.dentry; int status; if (nd->flags & LOOKUP_RCU) { nd->flags &= ~LOOKUP_RCU; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; spin_lock(&dentry->d_lock); if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) { spin_unlock(&dentry->d_lock); rcu_read_unlock(); br_read_unlock(vfsmount_lock); return -ECHILD; } BUG_ON(nd->inode != dentry->d_inode); spin_unlock(&dentry->d_lock); mntget(nd->path.mnt); rcu_read_unlock(); br_read_unlock(vfsmount_lock); } if (likely(!(nd->flags & LOOKUP_JUMPED))) return 0; if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE))) return 0; if (likely(!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT))) return 0; /* Note: we do not d_invalidate() */ status = d_revalidate(dentry, nd); if (status > 0) return 0; if (!status) status = -ESTALE; path_put(&nd->path); return status; } static __always_inline void set_root(struct nameidata *nd) { if (!nd->root.mnt) get_fs_root(current->fs, &nd->root); } static int link_path_walk(const char *, struct nameidata *); static __always_inline void set_root_rcu(struct nameidata *nd) { if (!nd->root.mnt) { struct fs_struct *fs = current->fs; unsigned seq; do { seq = read_seqcount_begin(&fs->seq); nd->root = fs->root; nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } } static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link) { int ret; if (IS_ERR(link)) goto fail; if (*link == '/') { set_root(nd); path_put(&nd->path); nd->path = nd->root; path_get(&nd->root); nd->flags |= LOOKUP_JUMPED; } nd->inode = nd->path.dentry->d_inode; ret = link_path_walk(link, nd); return ret; fail: path_put(&nd->path); return PTR_ERR(link); } static void path_put_conditional(struct path *path, struct nameidata *nd) { dput(path->dentry); if (path->mnt != nd->path.mnt) mntput(path->mnt); } static inline void path_to_nameidata(const struct path *path, struct nameidata *nd) { if (!(nd->flags & LOOKUP_RCU)) { dput(nd->path.dentry); if (nd->path.mnt != path->mnt) mntput(nd->path.mnt); } nd->path.mnt = path->mnt; nd->path.dentry = path->dentry; } static inline void put_link(struct nameidata *nd, struct path *link, void *cookie) { struct inode *inode = link->dentry->d_inode; if (!IS_ERR(cookie) && inode->i_op->put_link) inode->i_op->put_link(link->dentry, nd, cookie); path_put(link); } static __always_inline int follow_link(struct path *link, struct nameidata *nd, void **p) { int error; struct dentry *dentry = link->dentry; BUG_ON(nd->flags & LOOKUP_RCU); if (link->mnt == nd->path.mnt) mntget(link->mnt); if (unlikely(current->total_link_count >= 40)) { *p = ERR_PTR(-ELOOP); /* no ->put_link(), please */ path_put(&nd->path); return -ELOOP; } cond_resched(); current->total_link_count++; touch_atime(link); nd_set_link(nd, NULL); error = security_inode_follow_link(link->dentry, nd); if (error) { *p = ERR_PTR(error); /* no ->put_link(), please */ path_put(&nd->path); return error; } nd->last_type = LAST_BIND; *p = dentry->d_inode->i_op->follow_link(dentry, nd); error = PTR_ERR(*p); if (!IS_ERR(*p)) { char *s = nd_get_link(nd); error = 0; if (s) error = __vfs_follow_link(nd, s); else if (nd->last_type == LAST_BIND) { nd->flags |= LOOKUP_JUMPED; nd->inode = nd->path.dentry->d_inode; if (nd->inode->i_op->follow_link) { /* stepped on a _really_ weird one */ path_put(&nd->path); error = -ELOOP; } } } return error; } static int follow_up_rcu(struct path *path) { struct mount *mnt = real_mount(path->mnt); struct mount *parent; struct dentry *mountpoint; parent = mnt->mnt_parent; if (&parent->mnt == path->mnt) return 0; mountpoint = mnt->mnt_mountpoint; path->dentry = mountpoint; path->mnt = &parent->mnt; return 1; } int follow_up(struct path *path) { struct mount *mnt = real_mount(path->mnt); struct mount *parent; struct dentry *mountpoint; br_read_lock(vfsmount_lock); parent = mnt->mnt_parent; if (&parent->mnt == path->mnt) { br_read_unlock(vfsmount_lock); return 0; } mntget(&parent->mnt); mountpoint = dget(mnt->mnt_mountpoint); br_read_unlock(vfsmount_lock); dput(path->dentry); path->dentry = mountpoint; mntput(path->mnt); path->mnt = &parent->mnt; return 1; } /* * Perform an automount * - return -EISDIR to tell follow_managed() to stop and return the path we * were called with. */ static int follow_automount(struct path *path, unsigned flags, bool *need_mntput) { struct vfsmount *mnt; int err; if (!path->dentry->d_op || !path->dentry->d_op->d_automount) return -EREMOTE; /* We don't want to mount if someone's just doing a stat - * unless they're stat'ing a directory and appended a '/' to * the name. * * We do, however, want to mount if someone wants to open or * create a file of any type under the mountpoint, wants to * traverse through the mountpoint or wants to open the * mounted directory. Also, autofs may mark negative dentries * as being automount points. These will need the attentions * of the daemon to instantiate them before they can be used. */ if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) && path->dentry->d_inode) return -EISDIR; current->total_link_count++; if (current->total_link_count >= 40) return -ELOOP; mnt = path->dentry->d_op->d_automount(path); if (IS_ERR(mnt)) { /* * The filesystem is allowed to return -EISDIR here to indicate * it doesn't want to automount. For instance, autofs would do * this so that its userspace daemon can mount on this dentry. * * However, we can only permit this if it's a terminal point in * the path being looked up; if it wasn't then the remainder of * the path is inaccessible and we should say so. */ if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_PARENT)) return -EREMOTE; return PTR_ERR(mnt); } if (!mnt) /* mount collision */ return 0; if (!*need_mntput) { /* lock_mount() may release path->mnt on error */ mntget(path->mnt); *need_mntput = true; } err = finish_automount(mnt, path); switch (err) { case -EBUSY: /* Someone else made a mount here whilst we were busy */ return 0; case 0: path_put(path); path->mnt = mnt; path->dentry = dget(mnt->mnt_root); return 0; default: return err; } } /* * Handle a dentry that is managed in some way. * - Flagged for transit management (autofs) * - Flagged as mountpoint * - Flagged as automount point * * This may only be called in refwalk mode. * * Serialization is taken care of in namespace.c */ static int follow_managed(struct path *path, unsigned flags) { struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */ unsigned managed; bool need_mntput = false; int ret = 0; /* Given that we're not holding a lock here, we retain the value in a * local variable for each dentry as we look at it so that we don't see * the components of that value change under us */ while (managed = ACCESS_ONCE(path->dentry->d_flags), managed &= DCACHE_MANAGED_DENTRY, unlikely(managed != 0)) { /* Allow the filesystem to manage the transit without i_mutex * being held. */ if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage(path->dentry, false); if (ret < 0) break; } /* Transit to a mounted filesystem. */ if (managed & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); if (mounted) { dput(path->dentry); if (need_mntput) mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); need_mntput = true; continue; } /* Something is mounted on this dentry in another * namespace and/or whatever was mounted there in this * namespace got unmounted before we managed to get the * vfsmount_lock */ } /* Handle an automount point */ if (managed & DCACHE_NEED_AUTOMOUNT) { ret = follow_automount(path, flags, &need_mntput); if (ret < 0) break; continue; } /* We didn't change the current path point */ break; } if (need_mntput && path->mnt == mnt) mntput(path->mnt); if (ret == -EISDIR) ret = 0; return ret < 0 ? ret : need_mntput; } int follow_down_one(struct path *path) { struct vfsmount *mounted; mounted = lookup_mnt(path); if (mounted) { dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); return 1; } return 0; } static inline bool managed_dentry_might_block(struct dentry *dentry) { return (dentry->d_flags & DCACHE_MANAGE_TRANSIT && dentry->d_op->d_manage(dentry, true) < 0); } /* * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if * we meet a managed dentry that would need blocking. */ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, struct inode **inode) { for (;;) { struct mount *mounted; /* * Don't forget we might have a non-mountpoint managed dentry * that wants to block transit. */ if (unlikely(managed_dentry_might_block(path->dentry))) return false; if (!d_mountpoint(path->dentry)) break; mounted = __lookup_mnt(path->mnt, path->dentry, 1); if (!mounted) break; path->mnt = &mounted->mnt; path->dentry = mounted->mnt.mnt_root; nd->flags |= LOOKUP_JUMPED; nd->seq = read_seqcount_begin(&path->dentry->d_seq); /* * Update the inode too. We don't need to re-check the * dentry sequence number here after this d_inode read, * because a mount-point is always pinned. */ *inode = path->dentry->d_inode; } return true; } static void follow_mount_rcu(struct nameidata *nd) { while (d_mountpoint(nd->path.dentry)) { struct mount *mounted; mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry, 1); if (!mounted) break; nd->path.mnt = &mounted->mnt; nd->path.dentry = mounted->mnt.mnt_root; nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); } } static int follow_dotdot_rcu(struct nameidata *nd) { set_root_rcu(nd); while (1) { if (nd->path.dentry == nd->root.dentry && nd->path.mnt == nd->root.mnt) { break; } if (nd->path.dentry != nd->path.mnt->mnt_root) { struct dentry *old = nd->path.dentry; struct dentry *parent = old->d_parent; unsigned seq; seq = read_seqcount_begin(&parent->d_seq); if (read_seqcount_retry(&old->d_seq, nd->seq)) goto failed; nd->path.dentry = parent; nd->seq = seq; break; } if (!follow_up_rcu(&nd->path)) break; nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); } follow_mount_rcu(nd); nd->inode = nd->path.dentry->d_inode; return 0; failed: nd->flags &= ~LOOKUP_RCU; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); br_read_unlock(vfsmount_lock); return -ECHILD; } /* * Follow down to the covering mount currently visible to userspace. At each * point, the filesystem owning that dentry may be queried as to whether the * caller is permitted to proceed or not. */ int follow_down(struct path *path) { unsigned managed; int ret; while (managed = ACCESS_ONCE(path->dentry->d_flags), unlikely(managed & DCACHE_MANAGED_DENTRY)) { /* Allow the filesystem to manage the transit without i_mutex * being held. * * We indicate to the filesystem if someone is trying to mount * something here. This gives autofs the chance to deny anyone * other than its daemon the right to mount on its * superstructure. * * The filesystem may sleep at this point. */ if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage( path->dentry, false); if (ret < 0) return ret == -EISDIR ? 0 : ret; } /* Transit to a mounted filesystem. */ if (managed & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); if (!mounted) break; dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); continue; } /* Don't handle automount points here */ break; } return 0; } /* * Skip to top of mountpoint pile in refwalk mode for follow_dotdot() */ static void follow_mount(struct path *path) { while (d_mountpoint(path->dentry)) { struct vfsmount *mounted = lookup_mnt(path); if (!mounted) break; dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); } } static void follow_dotdot(struct nameidata *nd) { set_root(nd); while(1) { struct dentry *old = nd->path.dentry; if (nd->path.dentry == nd->root.dentry && nd->path.mnt == nd->root.mnt) { break; } if (nd->path.dentry != nd->path.mnt->mnt_root) { /* rare case of legitimate dget_parent()... */ nd->path.dentry = dget_parent(nd->path.dentry); dput(old); break; } if (!follow_up(&nd->path)) break; } follow_mount(&nd->path); nd->inode = nd->path.dentry->d_inode; } /* * This looks up the name in dcache, possibly revalidates the old dentry and * allocates a new one if not found or not valid. In the need_lookup argument * returns whether i_op->lookup is necessary. * * dir->d_inode->i_mutex must be held */ static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir, struct nameidata *nd, bool *need_lookup) { struct dentry *dentry; int error; *need_lookup = false; dentry = d_lookup(dir, name); if (dentry) { if (d_need_lookup(dentry)) { *need_lookup = true; } else if (dentry->d_flags & DCACHE_OP_REVALIDATE) { error = d_revalidate(dentry, nd); if (unlikely(error <= 0)) { if (error < 0) { dput(dentry); return ERR_PTR(error); } else if (!d_invalidate(dentry)) { dput(dentry); dentry = NULL; } } } } if (!dentry) { dentry = d_alloc(dir, name); if (unlikely(!dentry)) return ERR_PTR(-ENOMEM); *need_lookup = true; } return dentry; } /* * Call i_op->lookup on the dentry. The dentry must be negative but may be * hashed if it was pouplated with DCACHE_NEED_LOOKUP. * * dir->d_inode->i_mutex must be held */ static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct dentry *old; /* Don't create child dentry for a dead directory. */ if (unlikely(IS_DEADDIR(dir))) { dput(dentry); return ERR_PTR(-ENOENT); } old = dir->i_op->lookup(dir, dentry, nd); if (unlikely(old)) { dput(dentry); dentry = old; } return dentry; } static struct dentry *__lookup_hash(struct qstr *name, struct dentry *base, struct nameidata *nd) { bool need_lookup; struct dentry *dentry; dentry = lookup_dcache(name, base, nd, &need_lookup); if (!need_lookup) return dentry; return lookup_real(base->d_inode, dentry, nd); } /* * It's more convoluted than I'd like it to be, but... it's still fairly * small and for now I'd prefer to have fast path as straight as possible. * It _is_ time-critical. */ static int do_lookup(struct nameidata *nd, struct qstr *name, struct path *path, struct inode **inode) { struct vfsmount *mnt = nd->path.mnt; struct dentry *dentry, *parent = nd->path.dentry; int need_reval = 1; int status = 1; int err; /* * Rename seqlock is not required here because in the off chance * of a false negative due to a concurrent rename, we're going to * do the non-racy lookup, below. */ if (nd->flags & LOOKUP_RCU) { unsigned seq; *inode = nd->inode; dentry = __d_lookup_rcu(parent, name, &seq, inode); if (!dentry) goto unlazy; /* Memory barrier in read_seqcount_begin of child is enough */ if (__read_seqcount_retry(&parent->d_seq, nd->seq)) return -ECHILD; nd->seq = seq; if (unlikely(d_need_lookup(dentry))) goto unlazy; if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { status = d_revalidate(dentry, nd); if (unlikely(status <= 0)) { if (status != -ECHILD) need_reval = 0; goto unlazy; } } path->mnt = mnt; path->dentry = dentry; if (unlikely(!__follow_mount_rcu(nd, path, inode))) goto unlazy; if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT)) goto unlazy; return 0; unlazy: if (unlazy_walk(nd, dentry)) return -ECHILD; } else { dentry = __d_lookup(parent, name); } if (unlikely(!dentry)) goto need_lookup; if (unlikely(d_need_lookup(dentry))) { dput(dentry); goto need_lookup; } if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval) status = d_revalidate(dentry, nd); if (unlikely(status <= 0)) { if (status < 0) { dput(dentry); return status; } if (!d_invalidate(dentry)) { dput(dentry); goto need_lookup; } } done: path->mnt = mnt; path->dentry = dentry; err = follow_managed(path, nd->flags); if (unlikely(err < 0)) { path_put_conditional(path, nd); return err; } if (err) nd->flags |= LOOKUP_JUMPED; *inode = path->dentry->d_inode; return 0; need_lookup: BUG_ON(nd->inode != parent->d_inode); mutex_lock(&parent->d_inode->i_mutex); dentry = __lookup_hash(name, parent, nd); mutex_unlock(&parent->d_inode->i_mutex); if (IS_ERR(dentry)) return PTR_ERR(dentry); goto done; } static inline int may_lookup(struct nameidata *nd) { if (nd->flags & LOOKUP_RCU) { int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK); if (err != -ECHILD) return err; if (unlazy_walk(nd, NULL)) return -ECHILD; } return inode_permission(nd->inode, MAY_EXEC); } static inline int handle_dots(struct nameidata *nd, int type) { if (type == LAST_DOTDOT) { if (nd->flags & LOOKUP_RCU) { if (follow_dotdot_rcu(nd)) return -ECHILD; } else follow_dotdot(nd); } return 0; } static void terminate_walk(struct nameidata *nd) { if (!(nd->flags & LOOKUP_RCU)) { path_put(&nd->path); } else { nd->flags &= ~LOOKUP_RCU; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); br_read_unlock(vfsmount_lock); } } /* * Do we need to follow links? We _really_ want to be able * to do this check without having to look at inode->i_op, * so we keep a cache of "no, this doesn't need follow_link" * for the common case. */ static inline int should_follow_link(struct inode *inode, int follow) { if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { if (likely(inode->i_op->follow_link)) return follow; /* This gets set once for the inode lifetime */ spin_lock(&inode->i_lock); inode->i_opflags |= IOP_NOFOLLOW; spin_unlock(&inode->i_lock); } return 0; } static inline int walk_component(struct nameidata *nd, struct path *path, struct qstr *name, int type, int follow) { struct inode *inode; int err; /* * "." and ".." are special - ".." especially so because it has * to be able to know about the current root directory and * parent relationships. */ if (unlikely(type != LAST_NORM)) return handle_dots(nd, type); err = do_lookup(nd, name, path, &inode); if (unlikely(err)) { terminate_walk(nd); return err; } if (!inode) { path_to_nameidata(path, nd); terminate_walk(nd); return -ENOENT; } if (should_follow_link(inode, follow)) { if (nd->flags & LOOKUP_RCU) { if (unlikely(unlazy_walk(nd, path->dentry))) { terminate_walk(nd); return -ECHILD; } } BUG_ON(inode != path->dentry->d_inode); return 1; } path_to_nameidata(path, nd); nd->inode = inode; return 0; } /* * This limits recursive symlink follows to 8, while * limiting consecutive symlinks to 40. * * Without that kind of total limit, nasty chains of consecutive * symlinks can cause almost arbitrarily long lookups. */ static inline int nested_symlink(struct path *path, struct nameidata *nd) { int res; if (unlikely(current->link_count >= MAX_NESTED_LINKS)) { path_put_conditional(path, nd); path_put(&nd->path); return -ELOOP; } BUG_ON(nd->depth >= MAX_NESTED_LINKS); nd->depth++; current->link_count++; do { struct path link = *path; void *cookie; res = follow_link(&link, nd, &cookie); if (!res) res = walk_component(nd, path, &nd->last, nd->last_type, LOOKUP_FOLLOW); put_link(nd, &link, cookie); } while (res > 0); current->link_count--; nd->depth--; return res; } /* * We really don't want to look at inode->i_op->lookup * when we don't have to. So we keep a cache bit in * the inode ->i_opflags field that says "yes, we can * do lookup on this inode". */ static inline int can_lookup(struct inode *inode) { if (likely(inode->i_opflags & IOP_LOOKUP)) return 1; if (likely(!inode->i_op->lookup)) return 0; /* We do this once for the lifetime of the inode */ spin_lock(&inode->i_lock); inode->i_opflags |= IOP_LOOKUP; spin_unlock(&inode->i_lock); return 1; } /* * We can do the critical dentry name comparison and hashing * operations one word at a time, but we are limited to: * * - Architectures with fast unaligned word accesses. We could * do a "get_unaligned()" if this helps and is sufficiently * fast. * * - Little-endian machines (so that we can generate the mask * of low bytes efficiently). Again, we *could* do a byte * swapping load on big-endian architectures if that is not * expensive enough to make the optimization worthless. * * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we * do not trap on the (extremely unlikely) case of a page * crossing operation. * * - Furthermore, we need an efficient 64-bit compile for the * 64-bit case in order to generate the "number of bytes in * the final mask". Again, that could be replaced with a * efficient population count instruction or similar. */ #ifdef CONFIG_DCACHE_WORD_ACCESS #include <asm/word-at-a-time.h> #ifdef CONFIG_64BIT static inline unsigned int fold_hash(unsigned long hash) { hash += hash >> (8*sizeof(int)); return hash; } #else /* 32-bit case */ #define fold_hash(x) (x) #endif unsigned int full_name_hash(const unsigned char *name, unsigned int len) { unsigned long a, mask; unsigned long hash = 0; for (;;) { a = load_unaligned_zeropad(name); if (len < sizeof(unsigned long)) break; hash += a; hash *= 9; name += sizeof(unsigned long); len -= sizeof(unsigned long); if (!len) goto done; } mask = ~(~0ul << len*8); hash += mask & a; done: return fold_hash(hash); } EXPORT_SYMBOL(full_name_hash); /* * Calculate the length and hash of the path component, and * return the length of the component; */ static inline unsigned long hash_name(const char *name, unsigned int *hashp) { unsigned long a, mask, hash, len; hash = a = 0; len = -sizeof(unsigned long); do { hash = (hash + a) * 9; len += sizeof(unsigned long); a = load_unaligned_zeropad(name+len); /* Do we have any NUL or '/' bytes in this word? */ mask = has_zero(a) | has_zero(a ^ REPEAT_BYTE('/')); } while (!mask); /* The mask *below* the first high bit set */ mask = (mask - 1) & ~mask; mask >>= 7; hash += a & mask; *hashp = fold_hash(hash); return len + count_masked_bytes(mask); } #else unsigned int full_name_hash(const unsigned char *name, unsigned int len) { unsigned long hash = init_name_hash(); while (len--) hash = partial_name_hash(*name++, hash); return end_name_hash(hash); } EXPORT_SYMBOL(full_name_hash); /* * We know there's a real path component here of at least * one character. */ static inline unsigned long hash_name(const char *name, unsigned int *hashp) { unsigned long hash = init_name_hash(); unsigned long len = 0, c; c = (unsigned char)*name; do { len++; hash = partial_name_hash(c, hash); c = (unsigned char)name[len]; } while (c && c != '/'); *hashp = end_name_hash(hash); return len; } #endif /* * Name resolution. * This is the basic name resolution function, turning a pathname into * the final dentry. We expect 'base' to be positive and a directory. * * Returns 0 and nd will have valid dentry and mnt on success. * Returns error and drops reference to input namei data on failure. */ static int link_path_walk(const char *name, struct nameidata *nd) { struct path next; int err; while (*name=='/') name++; if (!*name) return 0; /* At this point we know we have a real path component. */ for(;;) { struct qstr this; long len; int type; err = may_lookup(nd); if (err) break; len = hash_name(name, &this.hash); this.name = name; this.len = len; type = LAST_NORM; if (name[0] == '.') switch (len) { case 2: if (name[1] == '.') { type = LAST_DOTDOT; nd->flags |= LOOKUP_JUMPED; } break; case 1: type = LAST_DOT; } if (likely(type == LAST_NORM)) { struct dentry *parent = nd->path.dentry; nd->flags &= ~LOOKUP_JUMPED; if (unlikely(parent->d_flags & DCACHE_OP_HASH)) { err = parent->d_op->d_hash(parent, nd->inode, &this); if (err < 0) break; } } if (!name[len]) goto last_component; /* * If it wasn't NUL, we know it was '/'. Skip that * slash, and continue until no more slashes. */ do { len++; } while (unlikely(name[len] == '/')); if (!name[len]) goto last_component; name += len; err = walk_component(nd, &next, &this, type, LOOKUP_FOLLOW); if (err < 0) return err; if (err) { err = nested_symlink(&next, nd); if (err) return err; } if (can_lookup(nd->inode)) continue; err = -ENOTDIR; break; /* here ends the main loop */ last_component: nd->last = this; nd->last_type = type; return 0; } terminate_walk(nd); return err; } static int path_init(int dfd, const char *name, unsigned int flags, struct nameidata *nd, struct file **fp) { int retval = 0; int fput_needed; struct file *file; nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->flags = flags | LOOKUP_JUMPED; nd->depth = 0; if (flags & LOOKUP_ROOT) { struct inode *inode = nd->root.dentry->d_inode; if (*name) { if (!inode->i_op->lookup) return -ENOTDIR; retval = inode_permission(inode, MAY_EXEC); if (retval) return retval; } nd->path = nd->root; nd->inode = inode; if (flags & LOOKUP_RCU) { br_read_lock(vfsmount_lock); rcu_read_lock(); nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); } else { path_get(&nd->path); } return 0; } nd->root.mnt = NULL; if (*name=='/') { if (flags & LOOKUP_RCU) { br_read_lock(vfsmount_lock); rcu_read_lock(); set_root_rcu(nd); } else { set_root(nd); path_get(&nd->root); } nd->path = nd->root; } else if (dfd == AT_FDCWD) { if (flags & LOOKUP_RCU) { struct fs_struct *fs = current->fs; unsigned seq; br_read_lock(vfsmount_lock); rcu_read_lock(); do { seq = read_seqcount_begin(&fs->seq); nd->path = fs->pwd; nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } else { get_fs_pwd(current->fs, &nd->path); } } else { struct dentry *dentry; file = fget_raw_light(dfd, &fput_needed); retval = -EBADF; if (!file) goto out_fail; dentry = file->f_path.dentry; if (*name) { retval = -ENOTDIR; if (!S_ISDIR(dentry->d_inode->i_mode)) goto fput_fail; retval = inode_permission(dentry->d_inode, MAY_EXEC); if (retval) goto fput_fail; } nd->path = file->f_path; if (flags & LOOKUP_RCU) { if (fput_needed) *fp = file; nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); br_read_lock(vfsmount_lock); rcu_read_lock(); } else { path_get(&file->f_path); fput_light(file, fput_needed); } } nd->inode = nd->path.dentry->d_inode; return 0; fput_fail: fput_light(file, fput_needed); out_fail: return retval; } static inline int lookup_last(struct nameidata *nd, struct path *path) { if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; nd->flags &= ~LOOKUP_PARENT; return walk_component(nd, path, &nd->last, nd->last_type, nd->flags & LOOKUP_FOLLOW); } /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ static int path_lookupat(int dfd, const char *name, unsigned int flags, struct nameidata *nd) { struct file *base = NULL; struct path path; int err; /* * Path walking is largely split up into 2 different synchronisation * schemes, rcu-walk and ref-walk (explained in * Documentation/filesystems/path-lookup.txt). These share much of the * path walk code, but some things particularly setup, cleanup, and * following mounts are sufficiently divergent that functions are * duplicated. Typically there is a function foo(), and its RCU * analogue, foo_rcu(). * * -ECHILD is the error number of choice (just to avoid clashes) that * is returned if some aspect of an rcu-walk fails. Such an error must * be handled by restarting a traditional ref-walk (which will always * be able to complete). */ err = path_init(dfd, name, flags | LOOKUP_PARENT, nd, &base); if (unlikely(err)) return err; current->total_link_count = 0; err = link_path_walk(name, nd); if (!err && !(flags & LOOKUP_PARENT)) { err = lookup_last(nd, &path); while (err > 0) { void *cookie; struct path link = path; nd->flags |= LOOKUP_PARENT; err = follow_link(&link, nd, &cookie); if (!err) err = lookup_last(nd, &path); put_link(nd, &link, cookie); } } if (!err) err = complete_walk(nd); if (!err && nd->flags & LOOKUP_DIRECTORY) { if (!nd->inode->i_op->lookup) { path_put(&nd->path); err = -ENOTDIR; } } if (base) fput(base); if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { path_put(&nd->root); nd->root.mnt = NULL; } return err; } static int do_path_lookup(int dfd, const char *name, unsigned int flags, struct nameidata *nd) { int retval = path_lookupat(dfd, name, flags | LOOKUP_RCU, nd); if (unlikely(retval == -ECHILD)) retval = path_lookupat(dfd, name, flags, nd); if (unlikely(retval == -ESTALE)) retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd); if (likely(!retval)) { if (unlikely(!audit_dummy_context())) { if (nd->path.dentry && nd->inode) audit_inode(name, nd->path.dentry); } } return retval; } int kern_path_parent(const char *name, struct nameidata *nd) { return do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, nd); } int kern_path(const char *name, unsigned int flags, struct path *path) { struct nameidata nd; int res = do_path_lookup(AT_FDCWD, name, flags, &nd); if (!res) *path = nd.path; return res; } /** * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair * @dentry: pointer to dentry of the base directory * @mnt: pointer to vfs mount of the base directory * @name: pointer to file name * @flags: lookup flags * @path: pointer to struct path to fill */ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, const char *name, unsigned int flags, struct path *path) { struct nameidata nd; int err; nd.root.dentry = dentry; nd.root.mnt = mnt; BUG_ON(flags & LOOKUP_PARENT); /* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */ err = do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, &nd); if (!err) *path = nd.path; return err; } /* * Restricted form of lookup. Doesn't follow links, single-component only, * needs parent already locked. Doesn't follow mounts. * SMP-safe. */ static struct dentry *lookup_hash(struct nameidata *nd) { return __lookup_hash(&nd->last, nd->path.dentry, nd); } /** * lookup_one_len - filesystem helper to lookup single pathname component * @name: pathname component to lookup * @base: base directory to lookup from * @len: maximum length @len should be interpreted to * * Note that this routine is purely a helper for filesystem usage and should * not be called by generic code. Also note that by using this function the * nameidata argument is passed to the filesystem methods and a filesystem * using this helper needs to be prepared for that. */ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) { struct qstr this; unsigned int c; int err; WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex)); this.name = name; this.len = len; this.hash = full_name_hash(name, len); if (!len) return ERR_PTR(-EACCES); while (len--) { c = *(const unsigned char *)name++; if (c == '/' || c == '\0') return ERR_PTR(-EACCES); } /* * See if the low-level filesystem might want * to use its own hash.. */ if (base->d_flags & DCACHE_OP_HASH) { int err = base->d_op->d_hash(base, base->d_inode, &this); if (err < 0) return ERR_PTR(err); } err = inode_permission(base->d_inode, MAY_EXEC); if (err) return ERR_PTR(err); return __lookup_hash(&this, base, NULL); } int user_path_at_empty(int dfd, const char __user *name, unsigned flags, struct path *path, int *empty) { struct nameidata nd; char *tmp = getname_flags(name, flags, empty); int err = PTR_ERR(tmp); if (!IS_ERR(tmp)) { BUG_ON(flags & LOOKUP_PARENT); err = do_path_lookup(dfd, tmp, flags, &nd); putname(tmp); if (!err) *path = nd.path; } return err; } int user_path_at(int dfd, const char __user *name, unsigned flags, struct path *path) { return user_path_at_empty(dfd, name, flags, path, NULL); } static int user_path_parent(int dfd, const char __user *path, struct nameidata *nd, char **name) { char *s = getname(path); int error; if (IS_ERR(s)) return PTR_ERR(s); error = do_path_lookup(dfd, s, LOOKUP_PARENT, nd); if (error) putname(s); else *name = s; return error; } /* * It's inline, so penalty for filesystems that don't use sticky bit is * minimal. */ static inline int check_sticky(struct inode *dir, struct inode *inode) { uid_t fsuid = current_fsuid(); if (!(dir->i_mode & S_ISVTX)) return 0; if (current_user_ns() != inode_userns(inode)) goto other_userns; if (inode->i_uid == fsuid) return 0; if (dir->i_uid == fsuid) return 0; other_userns: return !ns_capable(inode_userns(inode), CAP_FOWNER); } /* * Check whether we can remove a link victim from directory dir, check * whether the type of victim is right. * 1. We can't do it if dir is read-only (done in permission()) * 2. We should have write and exec permissions on dir * 3. We can't remove anything from append-only dir * 4. We can't do anything with immutable dir (done in permission()) * 5. If the sticky bit on dir is set we should either * a. be owner of dir, or * b. be owner of victim, or * c. have CAP_FOWNER capability * 6. If the victim is append-only or immutable we can't do antyhing with * links pointing to it. * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR. * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR. * 9. We can't remove a root or mountpoint. * 10. We don't allow removal of NFS sillyrenamed files; it's handled by * nfs_async_unlink(). */ static int may_delete(struct inode *dir,struct dentry *victim,int isdir) { int error; if (!victim->d_inode) return -ENOENT; BUG_ON(victim->d_parent->d_inode != dir); audit_inode_child(victim, dir); error = inode_permission(dir, MAY_WRITE | MAY_EXEC); if (error) return error; if (IS_APPEND(dir)) return -EPERM; if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)|| IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) return -EPERM; if (isdir) { if (!S_ISDIR(victim->d_inode->i_mode)) return -ENOTDIR; if (IS_ROOT(victim)) return -EBUSY; } else if (S_ISDIR(victim->d_inode->i_mode)) return -EISDIR; if (IS_DEADDIR(dir)) return -ENOENT; if (victim->d_flags & DCACHE_NFSFS_RENAMED) return -EBUSY; return 0; } /* Check whether we can create an object with dentry child in directory * dir. * 1. We can't do it if child already exists (open has special treatment for * this case, but since we are inlined it's OK) * 2. We can't do it if dir is read-only (done in permission()) * 3. We should have write and exec permissions on dir * 4. We can't do it if dir is immutable (done in permission()) */ static inline int may_create(struct inode *dir, struct dentry *child) { if (child->d_inode) return -EEXIST; if (IS_DEADDIR(dir)) return -ENOENT; return inode_permission(dir, MAY_WRITE | MAY_EXEC); } /* * p1 and p2 should be directories on the same fs. */ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) { struct dentry *p; if (p1 == p2) { mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); return NULL; } mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex); p = d_ancestor(p2, p1); if (p) { mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD); return p; } p = d_ancestor(p1, p2); if (p) { mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); return p; } mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); return NULL; } void unlock_rename(struct dentry *p1, struct dentry *p2) { mutex_unlock(&p1->d_inode->i_mutex); if (p1 != p2) { mutex_unlock(&p2->d_inode->i_mutex); mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex); } } int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->create) return -EACCES; /* shouldn't it be ENOSYS? */ mode &= S_IALLUGO; mode |= S_IFREG; error = security_inode_create(dir, dentry, mode); if (error) return error; error = dir->i_op->create(dir, dentry, mode, nd); if (error) return error; error = security_inode_post_create(dir, dentry, mode); if (error) return error; if (!error) fsnotify_create(dir, dentry); return error; } static int may_open(struct path *path, int acc_mode, int flag) { struct dentry *dentry = path->dentry; struct inode *inode = dentry->d_inode; int error; /* O_PATH? */ if (!acc_mode) return 0; if (!inode) return -ENOENT; switch (inode->i_mode & S_IFMT) { case S_IFLNK: return -ELOOP; case S_IFDIR: if (acc_mode & MAY_WRITE) return -EISDIR; break; case S_IFBLK: case S_IFCHR: if (path->mnt->mnt_flags & MNT_NODEV) return -EACCES; /*FALLTHRU*/ case S_IFIFO: case S_IFSOCK: flag &= ~O_TRUNC; break; } error = inode_permission(inode, acc_mode); if (error) return error; /* * An append-only file must be opened in append mode for writing. */ if (IS_APPEND(inode)) { if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND)) return -EPERM; if (flag & O_TRUNC) return -EPERM; } /* O_NOATIME can only be set by the owner or superuser */ if (flag & O_NOATIME && !inode_owner_or_capable(inode)) return -EPERM; return 0; } static int handle_truncate(struct file *filp) { struct path *path = &filp->f_path; struct inode *inode = path->dentry->d_inode; int error = get_write_access(inode); if (error) return error; /* * Refuse to truncate files with mandatory locks held on them. */ error = locks_verify_locked(inode); if (!error) error = security_path_truncate(path); if (!error) { error = do_truncate(path->dentry, 0, ATTR_MTIME|ATTR_CTIME|ATTR_OPEN, filp); } put_write_access(inode); return error; } static inline int open_to_namei_flags(int flag) { if ((flag & O_ACCMODE) == 3) flag--; return flag; } /* * Handle the last step of open() */ static struct file *do_last(struct nameidata *nd, struct path *path, const struct open_flags *op, const char *pathname) { struct dentry *dir = nd->path.dentry; struct dentry *dentry; int open_flag = op->open_flag; int will_truncate = open_flag & O_TRUNC; int want_write = 0; int acc_mode = op->acc_mode; struct file *filp; int error; nd->flags &= ~LOOKUP_PARENT; nd->flags |= op->intent; switch (nd->last_type) { case LAST_DOTDOT: case LAST_DOT: error = handle_dots(nd, nd->last_type); if (error) return ERR_PTR(error); /* fallthrough */ case LAST_ROOT: error = complete_walk(nd); if (error) return ERR_PTR(error); audit_inode(pathname, nd->path.dentry); if (open_flag & O_CREAT) { error = -EISDIR; goto exit; } goto ok; case LAST_BIND: error = complete_walk(nd); if (error) return ERR_PTR(error); audit_inode(pathname, dir); goto ok; } if (!(open_flag & O_CREAT)) { int symlink_ok = 0; if (nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW)) symlink_ok = 1; /* we _can_ be in RCU mode here */ error = walk_component(nd, path, &nd->last, LAST_NORM, !symlink_ok); if (error < 0) return ERR_PTR(error); if (error) /* symlink */ return NULL; /* sayonara */ error = complete_walk(nd); if (error) return ERR_PTR(error); error = -ENOTDIR; if (nd->flags & LOOKUP_DIRECTORY) { if (!nd->inode->i_op->lookup) goto exit; } audit_inode(pathname, nd->path.dentry); goto ok; } /* create side of things */ /* * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED has been * cleared when we got to the last component we are about to look up */ error = complete_walk(nd); if (error) return ERR_PTR(error); audit_inode(pathname, dir); error = -EISDIR; /* trailing slashes? */ if (nd->last.name[nd->last.len]) goto exit; mutex_lock(&dir->d_inode->i_mutex); dentry = lookup_hash(nd); error = PTR_ERR(dentry); if (IS_ERR(dentry)) { mutex_unlock(&dir->d_inode->i_mutex); goto exit; } path->dentry = dentry; path->mnt = nd->path.mnt; /* Negative dentry, just create the file */ if (!dentry->d_inode) { umode_t mode = op->mode; if (!IS_POSIXACL(dir->d_inode)) mode &= ~current_umask(); /* * This write is needed to ensure that a * rw->ro transition does not occur between * the time when the file is created and when * a permanent write count is taken through * the 'struct file' in nameidata_to_filp(). */ error = mnt_want_write(nd->path.mnt); if (error) goto exit_mutex_unlock; want_write = 1; /* Don't check for write permission, don't truncate */ open_flag &= ~O_TRUNC; will_truncate = 0; acc_mode = MAY_OPEN; error = security_path_mknod(&nd->path, dentry, mode, 0); if (error) goto exit_mutex_unlock; error = vfs_create(dir->d_inode, dentry, mode, nd); if (error) goto exit_mutex_unlock; mutex_unlock(&dir->d_inode->i_mutex); dput(nd->path.dentry); nd->path.dentry = dentry; goto common; } /* * It already exists. */ mutex_unlock(&dir->d_inode->i_mutex); audit_inode(pathname, path->dentry); error = -EEXIST; if (open_flag & O_EXCL) goto exit_dput; error = follow_managed(path, nd->flags); if (error < 0) goto exit_dput; if (error) nd->flags |= LOOKUP_JUMPED; error = -ENOENT; if (!path->dentry->d_inode) goto exit_dput; if (path->dentry->d_inode->i_op->follow_link) return NULL; path_to_nameidata(path, nd); nd->inode = path->dentry->d_inode; /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ error = complete_walk(nd); if (error) return ERR_PTR(error); error = -EISDIR; if (S_ISDIR(nd->inode->i_mode)) goto exit; ok: if (!S_ISREG(nd->inode->i_mode)) will_truncate = 0; if (will_truncate) { error = mnt_want_write(nd->path.mnt); if (error) goto exit; want_write = 1; } common: error = may_open(&nd->path, acc_mode, open_flag); if (error) goto exit; filp = nameidata_to_filp(nd); if (!IS_ERR(filp)) { error = ima_file_check(filp, op->acc_mode); if (error) { fput(filp); filp = ERR_PTR(error); } } if (!IS_ERR(filp)) { if (will_truncate) { error = handle_truncate(filp); if (error) { fput(filp); filp = ERR_PTR(error); } } } out: if (want_write) mnt_drop_write(nd->path.mnt); path_put(&nd->path); return filp; exit_mutex_unlock: mutex_unlock(&dir->d_inode->i_mutex); exit_dput: path_put_conditional(path, nd); exit: filp = ERR_PTR(error); goto out; } static struct file *path_openat(int dfd, const char *pathname, struct nameidata *nd, const struct open_flags *op, int flags) { struct file *base = NULL; struct file *filp; struct path path; int error; filp = get_empty_filp(); if (!filp) return ERR_PTR(-ENFILE); filp->f_flags = op->open_flag; nd->intent.open.file = filp; nd->intent.open.flags = open_to_namei_flags(op->open_flag); nd->intent.open.create_mode = op->mode; error = path_init(dfd, pathname, flags | LOOKUP_PARENT, nd, &base); if (unlikely(error)) goto out_filp; current->total_link_count = 0; error = link_path_walk(pathname, nd); if (unlikely(error)) goto out_filp; filp = do_last(nd, &path, op, pathname); while (unlikely(!filp)) { /* trailing symlink */ struct path link = path; void *cookie; if (!(nd->flags & LOOKUP_FOLLOW)) { path_put_conditional(&path, nd); path_put(&nd->path); filp = ERR_PTR(-ELOOP); break; } nd->flags |= LOOKUP_PARENT; nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL); error = follow_link(&link, nd, &cookie); if (unlikely(error)) filp = ERR_PTR(error); else filp = do_last(nd, &path, op, pathname); put_link(nd, &link, cookie); } out: if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) path_put(&nd->root); if (base) fput(base); release_open_intent(nd); return filp; out_filp: filp = ERR_PTR(error); goto out; } struct file *do_filp_open(int dfd, const char *pathname, const struct open_flags *op, int flags) { struct nameidata nd; struct file *filp; filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_RCU); if (unlikely(filp == ERR_PTR(-ECHILD))) filp = path_openat(dfd, pathname, &nd, op, flags); if (unlikely(filp == ERR_PTR(-ESTALE))) filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_REVAL); return filp; } struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, const char *name, const struct open_flags *op, int flags) { struct nameidata nd; struct file *file; nd.root.mnt = mnt; nd.root.dentry = dentry; flags |= LOOKUP_ROOT; if (dentry->d_inode->i_op->follow_link && op->intent & LOOKUP_OPEN) return ERR_PTR(-ELOOP); file = path_openat(-1, name, &nd, op, flags | LOOKUP_RCU); if (unlikely(file == ERR_PTR(-ECHILD))) file = path_openat(-1, name, &nd, op, flags); if (unlikely(file == ERR_PTR(-ESTALE))) file = path_openat(-1, name, &nd, op, flags | LOOKUP_REVAL); return file; } struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, int is_dir) { struct dentry *dentry = ERR_PTR(-EEXIST); struct nameidata nd; int error = do_path_lookup(dfd, pathname, LOOKUP_PARENT, &nd); if (error) return ERR_PTR(error); /* * Yucky last component or no last component at all? * (foo/., foo/.., /////) */ if (nd.last_type != LAST_NORM) goto out; nd.flags &= ~LOOKUP_PARENT; nd.flags |= LOOKUP_CREATE | LOOKUP_EXCL; nd.intent.open.flags = O_EXCL; /* * Do the final lookup. */ mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_hash(&nd); if (IS_ERR(dentry)) goto fail; if (dentry->d_inode) goto eexist; /* * Special case - lookup gave negative, but... we had foo/bar/ * From the vfs_mknod() POV we just have a negative dentry - * all is fine. Let's be bastards - you had / on the end, you've * been asking for (non-existent) directory. -ENOENT for you. */ if (unlikely(!is_dir && nd.last.name[nd.last.len])) { dput(dentry); dentry = ERR_PTR(-ENOENT); goto fail; } *path = nd.path; return dentry; eexist: dput(dentry); dentry = ERR_PTR(-EEXIST); fail: mutex_unlock(&nd.path.dentry->d_inode->i_mutex); out: path_put(&nd.path); return dentry; } EXPORT_SYMBOL(kern_path_create); struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, int is_dir) { char *tmp = getname(pathname); struct dentry *res; if (IS_ERR(tmp)) return ERR_CAST(tmp); res = kern_path_create(dfd, tmp, path, is_dir); putname(tmp); return res; } EXPORT_SYMBOL(user_path_create); int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { int error = may_create(dir, dentry); if (error) return error; if ((S_ISCHR(mode) || S_ISBLK(mode)) && !ns_capable(inode_userns(dir), CAP_MKNOD)) return -EPERM; if (!dir->i_op->mknod) return -EPERM; error = devcgroup_inode_mknod(mode, dev); if (error) return error; error = security_inode_mknod(dir, dentry, mode, dev); if (error) return error; error = dir->i_op->mknod(dir, dentry, mode, dev); if (error) return error; error = security_inode_post_create(dir, dentry, mode); if (error) return error; if (!error) fsnotify_create(dir, dentry); return error; } static int may_mknod(umode_t mode) { switch (mode & S_IFMT) { case S_IFREG: case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: case 0: /* zero mode translates to S_IFREG */ return 0; case S_IFDIR: return -EPERM; default: return -EINVAL; } } SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev) { struct dentry *dentry; struct path path; int error; if (S_ISDIR(mode)) return -EPERM; dentry = user_path_create(dfd, filename, &path, 0); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); error = may_mknod(mode); if (error) goto out_dput; error = mnt_want_write(path.mnt); if (error) goto out_dput; error = security_path_mknod(&path, dentry, mode, dev); if (error) goto out_drop_write; switch (mode & S_IFMT) { case 0: case S_IFREG: error = vfs_create(path.dentry->d_inode,dentry,mode,NULL); break; case S_IFCHR: case S_IFBLK: error = vfs_mknod(path.dentry->d_inode,dentry,mode, new_decode_dev(dev)); break; case S_IFIFO: case S_IFSOCK: error = vfs_mknod(path.dentry->d_inode,dentry,mode,0); break; } out_drop_write: mnt_drop_write(path.mnt); out_dput: dput(dentry); mutex_unlock(&path.dentry->d_inode->i_mutex); path_put(&path); return error; } SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev) { return sys_mknodat(AT_FDCWD, filename, mode, dev); } int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { int error = may_create(dir, dentry); unsigned max_links = dir->i_sb->s_max_links; if (error) return error; if (!dir->i_op->mkdir) return -EPERM; mode &= (S_IRWXUGO|S_ISVTX); error = security_inode_mkdir(dir, dentry, mode); if (error) return error; if (max_links && dir->i_nlink >= max_links) return -EMLINK; error = dir->i_op->mkdir(dir, dentry, mode); if (!error) fsnotify_mkdir(dir, dentry); return error; } SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode) { struct dentry *dentry; struct path path; int error; dentry = user_path_create(dfd, pathname, &path, 1); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); error = mnt_want_write(path.mnt); if (error) goto out_dput; error = security_path_mkdir(&path, dentry, mode); if (error) goto out_drop_write; error = vfs_mkdir(path.dentry->d_inode, dentry, mode); out_drop_write: mnt_drop_write(path.mnt); out_dput: dput(dentry); mutex_unlock(&path.dentry->d_inode->i_mutex); path_put(&path); return error; } SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode) { return sys_mkdirat(AT_FDCWD, pathname, mode); } /* * The dentry_unhash() helper will try to drop the dentry early: we * should have a usage count of 1 if we're the only user of this * dentry, and if that is true (possibly after pruning the dcache), * then we drop the dentry now. * * A low-level filesystem can, if it choses, legally * do a * * if (!d_unhashed(dentry)) * return -EBUSY; * * if it cannot handle the case of removing a directory * that is still in use by something else.. */ void dentry_unhash(struct dentry *dentry) { shrink_dcache_parent(dentry); spin_lock(&dentry->d_lock); if (dentry->d_count == 1) __d_drop(dentry); spin_unlock(&dentry->d_lock); } int vfs_rmdir(struct inode *dir, struct dentry *dentry) { int error = may_delete(dir, dentry, 1); if (error) return error; if (!dir->i_op->rmdir) return -EPERM; dget(dentry); mutex_lock(&dentry->d_inode->i_mutex); error = -EBUSY; if (d_mountpoint(dentry)) goto out; error = security_inode_rmdir(dir, dentry); if (error) goto out; shrink_dcache_parent(dentry); error = dir->i_op->rmdir(dir, dentry); if (error) goto out; dentry->d_inode->i_flags |= S_DEAD; dont_mount(dentry); out: mutex_unlock(&dentry->d_inode->i_mutex); dput(dentry); if (!error) d_delete(dentry); return error; } static long do_rmdir(int dfd, const char __user *pathname) { int error = 0; char * name; struct dentry *dentry; struct nameidata nd; error = user_path_parent(dfd, pathname, &nd, &name); if (error) return error; switch(nd.last_type) { case LAST_DOTDOT: error = -ENOTEMPTY; goto exit1; case LAST_DOT: error = -EINVAL; goto exit1; case LAST_ROOT: error = -EBUSY; goto exit1; } nd.flags &= ~LOOKUP_PARENT; mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_hash(&nd); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto exit2; if (!dentry->d_inode) { error = -ENOENT; goto exit3; } error = mnt_want_write(nd.path.mnt); if (error) goto exit3; error = security_path_rmdir(&nd.path, dentry); if (error) goto exit4; error = vfs_rmdir(nd.path.dentry->d_inode, dentry); exit4: mnt_drop_write(nd.path.mnt); exit3: dput(dentry); exit2: mutex_unlock(&nd.path.dentry->d_inode->i_mutex); exit1: path_put(&nd.path); putname(name); return error; } SYSCALL_DEFINE1(rmdir, const char __user *, pathname) { return do_rmdir(AT_FDCWD, pathname); } int vfs_unlink(struct inode *dir, struct dentry *dentry) { int error = may_delete(dir, dentry, 0); if (error) return error; if (!dir->i_op->unlink) return -EPERM; mutex_lock(&dentry->d_inode->i_mutex); if (d_mountpoint(dentry)) error = -EBUSY; else { error = security_inode_unlink(dir, dentry); if (!error) { error = dir->i_op->unlink(dir, dentry); if (!error) dont_mount(dentry); } } mutex_unlock(&dentry->d_inode->i_mutex); /* We don't d_delete() NFS sillyrenamed files--they still exist. */ if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { fsnotify_link_count(dentry->d_inode); d_delete(dentry); } return error; } /* * Make sure that the actual truncation of the file will occur outside its * directory's i_mutex. Truncate can take a long time if there is a lot of * writeout happening, and we don't want to prevent access to the directory * while waiting on the I/O. */ static long do_unlinkat(int dfd, const char __user *pathname) { int error; char *name; struct dentry *dentry; struct nameidata nd; struct inode *inode = NULL; error = user_path_parent(dfd, pathname, &nd, &name); if (error) return error; error = -EISDIR; if (nd.last_type != LAST_NORM) goto exit1; nd.flags &= ~LOOKUP_PARENT; mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_hash(&nd); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { /* Why not before? Because we want correct error value */ if (nd.last.name[nd.last.len]) goto slashes; inode = dentry->d_inode; if (!inode) goto slashes; ihold(inode); error = mnt_want_write(nd.path.mnt); if (error) goto exit2; error = security_path_unlink(&nd.path, dentry); if (error) goto exit3; error = vfs_unlink(nd.path.dentry->d_inode, dentry); exit3: mnt_drop_write(nd.path.mnt); exit2: dput(dentry); } mutex_unlock(&nd.path.dentry->d_inode->i_mutex); if (inode) iput(inode); /* truncate the inode here */ exit1: path_put(&nd.path); putname(name); return error; slashes: error = !dentry->d_inode ? -ENOENT : S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR; goto exit2; } SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag) { if ((flag & ~AT_REMOVEDIR) != 0) return -EINVAL; if (flag & AT_REMOVEDIR) return do_rmdir(dfd, pathname); return do_unlinkat(dfd, pathname); } SYSCALL_DEFINE1(unlink, const char __user *, pathname) { return do_unlinkat(AT_FDCWD, pathname); } int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->symlink) return -EPERM; error = security_inode_symlink(dir, dentry, oldname); if (error) return error; error = dir->i_op->symlink(dir, dentry, oldname); if (!error) fsnotify_create(dir, dentry); return error; } SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, int, newdfd, const char __user *, newname) { int error; char *from; struct dentry *dentry; struct path path; from = getname(oldname); if (IS_ERR(from)) return PTR_ERR(from); dentry = user_path_create(newdfd, newname, &path, 0); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_putname; error = mnt_want_write(path.mnt); if (error) goto out_dput; error = security_path_symlink(&path, dentry, from); if (error) goto out_drop_write; error = vfs_symlink(path.dentry->d_inode, dentry, from); out_drop_write: mnt_drop_write(path.mnt); out_dput: dput(dentry); mutex_unlock(&path.dentry->d_inode->i_mutex); path_put(&path); out_putname: putname(from); return error; } SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname) { return sys_symlinkat(oldname, AT_FDCWD, newname); } int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { struct inode *inode = old_dentry->d_inode; unsigned max_links = dir->i_sb->s_max_links; int error; if (!inode) return -ENOENT; error = may_create(dir, new_dentry); if (error) return error; if (dir->i_sb != inode->i_sb) return -EXDEV; /* * A link to an append-only or immutable file cannot be created. */ if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; if (!dir->i_op->link) return -EPERM; if (S_ISDIR(inode->i_mode)) return -EPERM; error = security_inode_link(old_dentry, dir, new_dentry); if (error) return error; mutex_lock(&inode->i_mutex); /* Make sure we don't allow creating hardlink to an unlinked file */ if (inode->i_nlink == 0) error = -ENOENT; else if (max_links && inode->i_nlink >= max_links) error = -EMLINK; else error = dir->i_op->link(old_dentry, dir, new_dentry); mutex_unlock(&inode->i_mutex); if (!error) fsnotify_link(dir, inode, new_dentry); return error; } /* * Hardlinks are often used in delicate situations. We avoid * security-related surprises by not following symlinks on the * newname. --KAB * * We don't follow them on the oldname either to be compatible * with linux 2.0, and to avoid hard-linking to directories * and other special files. --ADM */ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, int, flags) { struct dentry *new_dentry; struct path old_path, new_path; int how = 0; int error; if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) return -EINVAL; /* * To use null names we require CAP_DAC_READ_SEARCH * This ensures that not everyone will be able to create * handlink using the passed filedescriptor. */ if (flags & AT_EMPTY_PATH) { if (!capable(CAP_DAC_READ_SEARCH)) return -ENOENT; how = LOOKUP_EMPTY; } if (flags & AT_SYMLINK_FOLLOW) how |= LOOKUP_FOLLOW; error = user_path_at(olddfd, oldname, how, &old_path); if (error) return error; new_dentry = user_path_create(newdfd, newname, &new_path, 0); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto out; error = -EXDEV; if (old_path.mnt != new_path.mnt) goto out_dput; error = mnt_want_write(new_path.mnt); if (error) goto out_dput; error = security_path_link(old_path.dentry, &new_path, new_dentry); if (error) goto out_drop_write; error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry); out_drop_write: mnt_drop_write(new_path.mnt); out_dput: dput(new_dentry); mutex_unlock(&new_path.dentry->d_inode->i_mutex); path_put(&new_path); out: path_put(&old_path); return error; } SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname) { return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0); } /* * The worst of all namespace operations - renaming directory. "Perverted" * doesn't even start to describe it. Somebody in UCB had a heck of a trip... * Problems: * a) we can get into loop creation. Check is done in is_subdir(). * b) race potential - two innocent renames can create a loop together. * That's where 4.4 screws up. Current fix: serialization on * sb->s_vfs_rename_mutex. We might be more accurate, but that's another * story. * c) we have to lock _three_ objects - parents and victim (if it exists). * And that - after we got ->i_mutex on parents (until then we don't know * whether the target exists). Solution: try to be smart with locking * order for inodes. We rely on the fact that tree topology may change * only under ->s_vfs_rename_mutex _and_ that parent of the object we * move will be locked. Thus we can rank directories by the tree * (ancestors first) and rank all non-directories after them. * That works since everybody except rename does "lock parent, lookup, * lock child" and rename is under ->s_vfs_rename_mutex. * HOWEVER, it relies on the assumption that any object with ->lookup() * has no more than 1 dentry. If "hybrid" objects will ever appear, * we'd better make sure that there's no link(2) for them. * d) conversion from fhandle to dentry may come in the wrong moment - when * we are removing the target. Solution: we will have to grab ->i_mutex * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on * ->i_mutex on parents, which works but leads to some truly excessive * locking]. */ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { int error = 0; struct inode *target = new_dentry->d_inode; unsigned max_links = new_dir->i_sb->s_max_links; /* * If we are going to change the parent - check write permissions, * we'll need to flip '..'. */ if (new_dir != old_dir) { error = inode_permission(old_dentry->d_inode, MAY_WRITE); if (error) return error; } error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry); if (error) return error; dget(new_dentry); if (target) mutex_lock(&target->i_mutex); error = -EBUSY; if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry)) goto out; error = -EMLINK; if (max_links && !target && new_dir != old_dir && new_dir->i_nlink >= max_links) goto out; if (target) shrink_dcache_parent(new_dentry); error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); if (error) goto out; if (target) { target->i_flags |= S_DEAD; dont_mount(new_dentry); } out: if (target) mutex_unlock(&target->i_mutex); dput(new_dentry); if (!error) if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) d_move(old_dentry,new_dentry); return error; } static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *target = new_dentry->d_inode; int error; error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry); if (error) return error; dget(new_dentry); if (target) mutex_lock(&target->i_mutex); error = -EBUSY; if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) goto out; error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); if (error) goto out; if (target) dont_mount(new_dentry); if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) d_move(old_dentry, new_dentry); out: if (target) mutex_unlock(&target->i_mutex); dput(new_dentry); return error; } int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { int error; int is_dir = S_ISDIR(old_dentry->d_inode->i_mode); const unsigned char *old_name; if (old_dentry->d_inode == new_dentry->d_inode) return 0; error = may_delete(old_dir, old_dentry, is_dir); if (error) return error; if (!new_dentry->d_inode) error = may_create(new_dir, new_dentry); else error = may_delete(new_dir, new_dentry, is_dir); if (error) return error; if (!old_dir->i_op->rename) return -EPERM; old_name = fsnotify_oldname_init(old_dentry->d_name.name); if (is_dir) error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry); else error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry); if (!error) fsnotify_move(old_dir, new_dir, old_name, is_dir, new_dentry->d_inode, old_dentry); fsnotify_oldname_free(old_name); return error; } SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname) { struct dentry *old_dir, *new_dir; struct dentry *old_dentry, *new_dentry; struct dentry *trap; struct nameidata oldnd, newnd; char *from; char *to; int error; error = user_path_parent(olddfd, oldname, &oldnd, &from); if (error) goto exit; error = user_path_parent(newdfd, newname, &newnd, &to); if (error) goto exit1; error = -EXDEV; if (oldnd.path.mnt != newnd.path.mnt) goto exit2; old_dir = oldnd.path.dentry; error = -EBUSY; if (oldnd.last_type != LAST_NORM) goto exit2; new_dir = newnd.path.dentry; if (newnd.last_type != LAST_NORM) goto exit2; oldnd.flags &= ~LOOKUP_PARENT; newnd.flags &= ~LOOKUP_PARENT; newnd.flags |= LOOKUP_RENAME_TARGET; trap = lock_rename(new_dir, old_dir); old_dentry = lookup_hash(&oldnd); error = PTR_ERR(old_dentry); if (IS_ERR(old_dentry)) goto exit3; /* source must exist */ error = -ENOENT; if (!old_dentry->d_inode) goto exit4; /* unless the source is a directory trailing slashes give -ENOTDIR */ if (!S_ISDIR(old_dentry->d_inode->i_mode)) { error = -ENOTDIR; if (oldnd.last.name[oldnd.last.len]) goto exit4; if (newnd.last.name[newnd.last.len]) goto exit4; } /* source should not be ancestor of target */ error = -EINVAL; if (old_dentry == trap) goto exit4; new_dentry = lookup_hash(&newnd); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto exit4; /* target should not be an ancestor of source */ error = -ENOTEMPTY; if (new_dentry == trap) goto exit5; error = mnt_want_write(oldnd.path.mnt); if (error) goto exit5; error = security_path_rename(&oldnd.path, old_dentry, &newnd.path, new_dentry); if (error) goto exit6; error = vfs_rename(old_dir->d_inode, old_dentry, new_dir->d_inode, new_dentry); exit6: mnt_drop_write(oldnd.path.mnt); exit5: dput(new_dentry); exit4: dput(old_dentry); exit3: unlock_rename(new_dir, old_dir); exit2: path_put(&newnd.path); putname(to); exit1: path_put(&oldnd.path); putname(from); exit: return error; } SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname) { return sys_renameat(AT_FDCWD, oldname, AT_FDCWD, newname); } int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link) { int len; len = PTR_ERR(link); if (IS_ERR(link)) goto out; len = strlen(link); if (len > (unsigned) buflen) len = buflen; if (copy_to_user(buffer, link, len)) len = -EFAULT; out: return len; } /* * A helper for ->readlink(). This should be used *ONLY* for symlinks that * have ->follow_link() touching nd only in nd_set_link(). Using (or not * using) it for any given inode is up to filesystem. */ int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen) { struct nameidata nd; void *cookie; int res; nd.depth = 0; cookie = dentry->d_inode->i_op->follow_link(dentry, &nd); if (IS_ERR(cookie)) return PTR_ERR(cookie); res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd)); if (dentry->d_inode->i_op->put_link) dentry->d_inode->i_op->put_link(dentry, &nd, cookie); return res; } int vfs_follow_link(struct nameidata *nd, const char *link) { return __vfs_follow_link(nd, link); } /* get the link contents into pagecache */ static char *page_getlink(struct dentry * dentry, struct page **ppage) { char *kaddr; struct page *page; struct address_space *mapping = dentry->d_inode->i_mapping; page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) return (char*)page; *ppage = page; kaddr = kmap(page); nd_terminate_link(kaddr, dentry->d_inode->i_size, PAGE_SIZE - 1); return kaddr; } int page_readlink(struct dentry *dentry, char __user *buffer, int buflen) { struct page *page = NULL; char *s = page_getlink(dentry, &page); int res = vfs_readlink(dentry,buffer,buflen,s); if (page) { kunmap(page); page_cache_release(page); } return res; } void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd) { struct page *page = NULL; nd_set_link(nd, page_getlink(dentry, &page)); return page; } void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { struct page *page = cookie; if (page) { kunmap(page); page_cache_release(page); } } /* * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS */ int __page_symlink(struct inode *inode, const char *symname, int len, int nofs) { struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; int err; char *kaddr; unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE; if (nofs) flags |= AOP_FLAG_NOFS; retry: err = pagecache_write_begin(NULL, mapping, 0, len-1, flags, &page, &fsdata); if (err) goto fail; kaddr = kmap_atomic(page); memcpy(kaddr, symname, len-1); kunmap_atomic(kaddr); err = pagecache_write_end(NULL, mapping, 0, len-1, len-1, page, fsdata); if (err < 0) goto fail; if (err < len-1) goto retry; mark_inode_dirty(inode); return 0; fail: return err; } int page_symlink(struct inode *inode, const char *symname, int len) { return __page_symlink(inode, symname, len, !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS)); } const struct inode_operations page_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, }; EXPORT_SYMBOL(user_path_at); EXPORT_SYMBOL(follow_down_one); EXPORT_SYMBOL(follow_down); EXPORT_SYMBOL(follow_up); EXPORT_SYMBOL(get_write_access); /* binfmt_aout */ EXPORT_SYMBOL(getname); EXPORT_SYMBOL(lock_rename); EXPORT_SYMBOL(lookup_one_len); EXPORT_SYMBOL(page_follow_link_light); EXPORT_SYMBOL(page_put_link); EXPORT_SYMBOL(page_readlink); EXPORT_SYMBOL(__page_symlink); EXPORT_SYMBOL(page_symlink); EXPORT_SYMBOL(page_symlink_inode_operations); EXPORT_SYMBOL(kern_path); EXPORT_SYMBOL(vfs_path_lookup); EXPORT_SYMBOL(inode_permission); EXPORT_SYMBOL(unlock_rename); EXPORT_SYMBOL(vfs_create); EXPORT_SYMBOL(vfs_follow_link); EXPORT_SYMBOL(vfs_link); EXPORT_SYMBOL(vfs_mkdir); EXPORT_SYMBOL(vfs_mknod); EXPORT_SYMBOL(generic_permission); EXPORT_SYMBOL(vfs_readlink); EXPORT_SYMBOL(vfs_rename); EXPORT_SYMBOL(vfs_rmdir); EXPORT_SYMBOL(vfs_symlink); EXPORT_SYMBOL(vfs_unlink); EXPORT_SYMBOL(dentry_unhash); EXPORT_SYMBOL(generic_readlink);
gpl-2.0
ratnamanoj/kernel-4.0.4
drivers/usb/gadget/udc/s3c-hsudc.c
604
36310
/* linux/drivers/usb/gadget/s3c-hsudc.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * S3C24XX USB 2.0 High-speed USB controller gadget driver * * The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints. * Each endpoint can be configured as either in or out endpoint. Endpoints * can be configured for Bulk or Interrupt transfer mode. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/usb/otg.h> #include <linux/prefetch.h> #include <linux/platform_data/s3c-hsudc.h> #include <linux/regulator/consumer.h> #include <linux/pm_runtime.h> #include <mach/regs-s3c2443-clock.h> #define S3C_HSUDC_REG(x) (x) /* Non-Indexed Registers */ #define S3C_IR S3C_HSUDC_REG(0x00) /* Index Register */ #define S3C_EIR S3C_HSUDC_REG(0x04) /* EP Intr Status */ #define S3C_EIR_EP0 (1<<0) #define S3C_EIER S3C_HSUDC_REG(0x08) /* EP Intr Enable */ #define S3C_FAR S3C_HSUDC_REG(0x0c) /* Gadget Address */ #define S3C_FNR S3C_HSUDC_REG(0x10) /* Frame Number */ #define S3C_EDR S3C_HSUDC_REG(0x14) /* EP Direction */ #define S3C_TR S3C_HSUDC_REG(0x18) /* Test Register */ #define S3C_SSR S3C_HSUDC_REG(0x1c) /* System Status */ #define S3C_SSR_DTZIEN_EN (0xff8f) #define S3C_SSR_ERR (0xff80) #define S3C_SSR_VBUSON (1 << 8) #define S3C_SSR_HSP (1 << 4) #define S3C_SSR_SDE (1 << 3) #define S3C_SSR_RESUME (1 << 2) #define S3C_SSR_SUSPEND (1 << 1) #define S3C_SSR_RESET (1 << 0) #define S3C_SCR S3C_HSUDC_REG(0x20) /* System Control */ #define S3C_SCR_DTZIEN_EN (1 << 14) #define S3C_SCR_RRD_EN (1 << 5) #define S3C_SCR_SUS_EN (1 << 1) #define S3C_SCR_RST_EN (1 << 0) #define S3C_EP0SR S3C_HSUDC_REG(0x24) /* EP0 Status */ #define S3C_EP0SR_EP0_LWO (1 << 6) #define S3C_EP0SR_STALL (1 << 4) #define S3C_EP0SR_TX_SUCCESS (1 << 1) #define S3C_EP0SR_RX_SUCCESS (1 << 0) #define S3C_EP0CR S3C_HSUDC_REG(0x28) /* EP0 Control */ #define S3C_BR(_x) S3C_HSUDC_REG(0x60 + (_x * 4)) /* Indexed Registers */ #define S3C_ESR S3C_HSUDC_REG(0x2c) /* EPn Status */ #define S3C_ESR_FLUSH (1 << 6) #define S3C_ESR_STALL (1 << 5) #define S3C_ESR_LWO (1 << 4) #define S3C_ESR_PSIF_ONE (1 << 2) #define S3C_ESR_PSIF_TWO (2 << 2) #define S3C_ESR_TX_SUCCESS (1 << 1) #define S3C_ESR_RX_SUCCESS (1 << 0) #define S3C_ECR S3C_HSUDC_REG(0x30) /* EPn Control */ #define S3C_ECR_DUEN (1 << 7) #define S3C_ECR_FLUSH (1 << 6) #define S3C_ECR_STALL (1 << 1) #define S3C_ECR_IEMS (1 << 0) #define S3C_BRCR S3C_HSUDC_REG(0x34) /* Read Count */ #define S3C_BWCR S3C_HSUDC_REG(0x38) /* Write Count */ #define S3C_MPR S3C_HSUDC_REG(0x3c) /* Max Pkt Size */ #define WAIT_FOR_SETUP (0) #define DATA_STATE_XMIT (1) #define DATA_STATE_RECV (2) static const char * const s3c_hsudc_supply_names[] = { "vdda", /* analog phy supply, 3.3V */ "vddi", /* digital phy supply, 1.2V */ "vddosc", /* oscillator supply, 1.8V - 3.3V */ }; /** * struct s3c_hsudc_ep - Endpoint representation used by driver. * @ep: USB gadget layer representation of device endpoint. * @name: Endpoint name (as required by ep autoconfiguration). * @dev: Reference to the device controller to which this EP belongs. * @desc: Endpoint descriptor obtained from the gadget driver. * @queue: Transfer request queue for the endpoint. * @stopped: Maintains state of endpoint, set if EP is halted. * @bEndpointAddress: EP address (including direction bit). * @fifo: Base address of EP FIFO. */ struct s3c_hsudc_ep { struct usb_ep ep; char name[20]; struct s3c_hsudc *dev; struct list_head queue; u8 stopped; u8 wedge; u8 bEndpointAddress; void __iomem *fifo; }; /** * struct s3c_hsudc_req - Driver encapsulation of USB gadget transfer request. * @req: Reference to USB gadget transfer request. * @queue: Used for inserting this request to the endpoint request queue. */ struct s3c_hsudc_req { struct usb_request req; struct list_head queue; }; /** * struct s3c_hsudc - Driver's abstraction of the device controller. * @gadget: Instance of usb_gadget which is referenced by gadget driver. * @driver: Reference to currenty active gadget driver. * @dev: The device reference used by probe function. * @lock: Lock to synchronize the usage of Endpoints (EP's are indexed). * @regs: Remapped base address of controller's register space. * irq: IRQ number used by the controller. * uclk: Reference to the controller clock. * ep0state: Current state of EP0. * ep: List of endpoints supported by the controller. */ struct s3c_hsudc { struct usb_gadget gadget; struct usb_gadget_driver *driver; struct device *dev; struct s3c24xx_hsudc_platdata *pd; struct usb_phy *transceiver; struct regulator_bulk_data supplies[ARRAY_SIZE(s3c_hsudc_supply_names)]; spinlock_t lock; void __iomem *regs; int irq; struct clk *uclk; int ep0state; struct s3c_hsudc_ep ep[]; }; #define ep_maxpacket(_ep) ((_ep)->ep.maxpacket) #define ep_is_in(_ep) ((_ep)->bEndpointAddress & USB_DIR_IN) #define ep_index(_ep) ((_ep)->bEndpointAddress & \ USB_ENDPOINT_NUMBER_MASK) static const char driver_name[] = "s3c-udc"; static const char ep0name[] = "ep0-control"; static inline struct s3c_hsudc_req *our_req(struct usb_request *req) { return container_of(req, struct s3c_hsudc_req, req); } static inline struct s3c_hsudc_ep *our_ep(struct usb_ep *ep) { return container_of(ep, struct s3c_hsudc_ep, ep); } static inline struct s3c_hsudc *to_hsudc(struct usb_gadget *gadget) { return container_of(gadget, struct s3c_hsudc, gadget); } static inline void set_index(struct s3c_hsudc *hsudc, int ep_addr) { ep_addr &= USB_ENDPOINT_NUMBER_MASK; writel(ep_addr, hsudc->regs + S3C_IR); } static inline void __orr32(void __iomem *ptr, u32 val) { writel(readl(ptr) | val, ptr); } static void s3c_hsudc_init_phy(void) { u32 cfg; cfg = readl(S3C2443_PWRCFG) | S3C2443_PWRCFG_USBPHY; writel(cfg, S3C2443_PWRCFG); cfg = readl(S3C2443_URSTCON); cfg |= (S3C2443_URSTCON_FUNCRST | S3C2443_URSTCON_PHYRST); writel(cfg, S3C2443_URSTCON); mdelay(1); cfg = readl(S3C2443_URSTCON); cfg &= ~(S3C2443_URSTCON_FUNCRST | S3C2443_URSTCON_PHYRST); writel(cfg, S3C2443_URSTCON); cfg = readl(S3C2443_PHYCTRL); cfg &= ~(S3C2443_PHYCTRL_CLKSEL | S3C2443_PHYCTRL_DSPORT); cfg |= (S3C2443_PHYCTRL_EXTCLK | S3C2443_PHYCTRL_PLLSEL); writel(cfg, S3C2443_PHYCTRL); cfg = readl(S3C2443_PHYPWR); cfg &= ~(S3C2443_PHYPWR_FSUSPEND | S3C2443_PHYPWR_PLL_PWRDN | S3C2443_PHYPWR_XO_ON | S3C2443_PHYPWR_PLL_REFCLK | S3C2443_PHYPWR_ANALOG_PD); cfg |= S3C2443_PHYPWR_COMMON_ON; writel(cfg, S3C2443_PHYPWR); cfg = readl(S3C2443_UCLKCON); cfg |= (S3C2443_UCLKCON_DETECT_VBUS | S3C2443_UCLKCON_FUNC_CLKEN | S3C2443_UCLKCON_TCLKEN); writel(cfg, S3C2443_UCLKCON); } static void s3c_hsudc_uninit_phy(void) { u32 cfg; cfg = readl(S3C2443_PWRCFG) & ~S3C2443_PWRCFG_USBPHY; writel(cfg, S3C2443_PWRCFG); writel(S3C2443_PHYPWR_FSUSPEND, S3C2443_PHYPWR); cfg = readl(S3C2443_UCLKCON) & ~S3C2443_UCLKCON_FUNC_CLKEN; writel(cfg, S3C2443_UCLKCON); } /** * s3c_hsudc_complete_request - Complete a transfer request. * @hsep: Endpoint to which the request belongs. * @hsreq: Transfer request to be completed. * @status: Transfer completion status for the transfer request. */ static void s3c_hsudc_complete_request(struct s3c_hsudc_ep *hsep, struct s3c_hsudc_req *hsreq, int status) { unsigned int stopped = hsep->stopped; struct s3c_hsudc *hsudc = hsep->dev; list_del_init(&hsreq->queue); hsreq->req.status = status; if (!ep_index(hsep)) { hsudc->ep0state = WAIT_FOR_SETUP; hsep->bEndpointAddress &= ~USB_DIR_IN; } hsep->stopped = 1; spin_unlock(&hsudc->lock); usb_gadget_giveback_request(&hsep->ep, &hsreq->req); spin_lock(&hsudc->lock); hsep->stopped = stopped; } /** * s3c_hsudc_nuke_ep - Terminate all requests queued for a endpoint. * @hsep: Endpoint for which queued requests have to be terminated. * @status: Transfer completion status for the transfer request. */ static void s3c_hsudc_nuke_ep(struct s3c_hsudc_ep *hsep, int status) { struct s3c_hsudc_req *hsreq; while (!list_empty(&hsep->queue)) { hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); s3c_hsudc_complete_request(hsep, hsreq, status); } } /** * s3c_hsudc_stop_activity - Stop activity on all endpoints. * @hsudc: Device controller for which EP activity is to be stopped. * * All the endpoints are stopped and any pending transfer requests if any on * the endpoint are terminated. */ static void s3c_hsudc_stop_activity(struct s3c_hsudc *hsudc) { struct s3c_hsudc_ep *hsep; int epnum; hsudc->gadget.speed = USB_SPEED_UNKNOWN; for (epnum = 0; epnum < hsudc->pd->epnum; epnum++) { hsep = &hsudc->ep[epnum]; hsep->stopped = 1; s3c_hsudc_nuke_ep(hsep, -ESHUTDOWN); } } /** * s3c_hsudc_read_setup_pkt - Read the received setup packet from EP0 fifo. * @hsudc: Device controller from which setup packet is to be read. * @buf: The buffer into which the setup packet is read. * * The setup packet received in the EP0 fifo is read and stored into a * given buffer address. */ static void s3c_hsudc_read_setup_pkt(struct s3c_hsudc *hsudc, u16 *buf) { int count; count = readl(hsudc->regs + S3C_BRCR); while (count--) *buf++ = (u16)readl(hsudc->regs + S3C_BR(0)); writel(S3C_EP0SR_RX_SUCCESS, hsudc->regs + S3C_EP0SR); } /** * s3c_hsudc_write_fifo - Write next chunk of transfer data to EP fifo. * @hsep: Endpoint to which the data is to be written. * @hsreq: Transfer request from which the next chunk of data is written. * * Write the next chunk of data from a transfer request to the endpoint FIFO. * If the transfer request completes, 1 is returned, otherwise 0 is returned. */ static int s3c_hsudc_write_fifo(struct s3c_hsudc_ep *hsep, struct s3c_hsudc_req *hsreq) { u16 *buf; u32 max = ep_maxpacket(hsep); u32 count, length; bool is_last; void __iomem *fifo = hsep->fifo; buf = hsreq->req.buf + hsreq->req.actual; prefetch(buf); length = hsreq->req.length - hsreq->req.actual; length = min(length, max); hsreq->req.actual += length; writel(length, hsep->dev->regs + S3C_BWCR); for (count = 0; count < length; count += 2) writel(*buf++, fifo); if (count != max) { is_last = true; } else { if (hsreq->req.length != hsreq->req.actual || hsreq->req.zero) is_last = false; else is_last = true; } if (is_last) { s3c_hsudc_complete_request(hsep, hsreq, 0); return 1; } return 0; } /** * s3c_hsudc_read_fifo - Read the next chunk of data from EP fifo. * @hsep: Endpoint from which the data is to be read. * @hsreq: Transfer request to which the next chunk of data read is written. * * Read the next chunk of data from the endpoint FIFO and a write it to the * transfer request buffer. If the transfer request completes, 1 is returned, * otherwise 0 is returned. */ static int s3c_hsudc_read_fifo(struct s3c_hsudc_ep *hsep, struct s3c_hsudc_req *hsreq) { struct s3c_hsudc *hsudc = hsep->dev; u32 csr, offset; u16 *buf, word; u32 buflen, rcnt, rlen; void __iomem *fifo = hsep->fifo; u32 is_short = 0; offset = (ep_index(hsep)) ? S3C_ESR : S3C_EP0SR; csr = readl(hsudc->regs + offset); if (!(csr & S3C_ESR_RX_SUCCESS)) return -EINVAL; buf = hsreq->req.buf + hsreq->req.actual; prefetchw(buf); buflen = hsreq->req.length - hsreq->req.actual; rcnt = readl(hsudc->regs + S3C_BRCR); rlen = (csr & S3C_ESR_LWO) ? (rcnt * 2 - 1) : (rcnt * 2); hsreq->req.actual += min(rlen, buflen); is_short = (rlen < hsep->ep.maxpacket); while (rcnt-- != 0) { word = (u16)readl(fifo); if (buflen) { *buf++ = word; buflen--; } else { hsreq->req.status = -EOVERFLOW; } } writel(S3C_ESR_RX_SUCCESS, hsudc->regs + offset); if (is_short || hsreq->req.actual == hsreq->req.length) { s3c_hsudc_complete_request(hsep, hsreq, 0); return 1; } return 0; } /** * s3c_hsudc_epin_intr - Handle in-endpoint interrupt. * @hsudc - Device controller for which the interrupt is to be handled. * @ep_idx - Endpoint number on which an interrupt is pending. * * Handles interrupt for a in-endpoint. The interrupts that are handled are * stall and data transmit complete interrupt. */ static void s3c_hsudc_epin_intr(struct s3c_hsudc *hsudc, u32 ep_idx) { struct s3c_hsudc_ep *hsep = &hsudc->ep[ep_idx]; struct s3c_hsudc_req *hsreq; u32 csr; csr = readl(hsudc->regs + S3C_ESR); if (csr & S3C_ESR_STALL) { writel(S3C_ESR_STALL, hsudc->regs + S3C_ESR); return; } if (csr & S3C_ESR_TX_SUCCESS) { writel(S3C_ESR_TX_SUCCESS, hsudc->regs + S3C_ESR); if (list_empty(&hsep->queue)) return; hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); if ((s3c_hsudc_write_fifo(hsep, hsreq) == 0) && (csr & S3C_ESR_PSIF_TWO)) s3c_hsudc_write_fifo(hsep, hsreq); } } /** * s3c_hsudc_epout_intr - Handle out-endpoint interrupt. * @hsudc - Device controller for which the interrupt is to be handled. * @ep_idx - Endpoint number on which an interrupt is pending. * * Handles interrupt for a out-endpoint. The interrupts that are handled are * stall, flush and data ready interrupt. */ static void s3c_hsudc_epout_intr(struct s3c_hsudc *hsudc, u32 ep_idx) { struct s3c_hsudc_ep *hsep = &hsudc->ep[ep_idx]; struct s3c_hsudc_req *hsreq; u32 csr; csr = readl(hsudc->regs + S3C_ESR); if (csr & S3C_ESR_STALL) { writel(S3C_ESR_STALL, hsudc->regs + S3C_ESR); return; } if (csr & S3C_ESR_FLUSH) { __orr32(hsudc->regs + S3C_ECR, S3C_ECR_FLUSH); return; } if (csr & S3C_ESR_RX_SUCCESS) { if (list_empty(&hsep->queue)) return; hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); if (((s3c_hsudc_read_fifo(hsep, hsreq)) == 0) && (csr & S3C_ESR_PSIF_TWO)) s3c_hsudc_read_fifo(hsep, hsreq); } } /** s3c_hsudc_set_halt - Set or clear a endpoint halt. * @_ep: Endpoint on which halt has to be set or cleared. * @value: 1 for setting halt on endpoint, 0 to clear halt. * * Set or clear endpoint halt. If halt is set, the endpoint is stopped. * If halt is cleared, for in-endpoints, if there are any pending * transfer requests, transfers are started. */ static int s3c_hsudc_set_halt(struct usb_ep *_ep, int value) { struct s3c_hsudc_ep *hsep = our_ep(_ep); struct s3c_hsudc *hsudc = hsep->dev; struct s3c_hsudc_req *hsreq; unsigned long irqflags; u32 ecr; u32 offset; if (value && ep_is_in(hsep) && !list_empty(&hsep->queue)) return -EAGAIN; spin_lock_irqsave(&hsudc->lock, irqflags); set_index(hsudc, ep_index(hsep)); offset = (ep_index(hsep)) ? S3C_ECR : S3C_EP0CR; ecr = readl(hsudc->regs + offset); if (value) { ecr |= S3C_ECR_STALL; if (ep_index(hsep)) ecr |= S3C_ECR_FLUSH; hsep->stopped = 1; } else { ecr &= ~S3C_ECR_STALL; hsep->stopped = hsep->wedge = 0; } writel(ecr, hsudc->regs + offset); if (ep_is_in(hsep) && !list_empty(&hsep->queue) && !value) { hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); if (hsreq) s3c_hsudc_write_fifo(hsep, hsreq); } spin_unlock_irqrestore(&hsudc->lock, irqflags); return 0; } /** s3c_hsudc_set_wedge - Sets the halt feature with the clear requests ignored * @_ep: Endpoint on which wedge has to be set. * * Sets the halt feature with the clear requests ignored. */ static int s3c_hsudc_set_wedge(struct usb_ep *_ep) { struct s3c_hsudc_ep *hsep = our_ep(_ep); if (!hsep) return -EINVAL; hsep->wedge = 1; return usb_ep_set_halt(_ep); } /** s3c_hsudc_handle_reqfeat - Handle set feature or clear feature requests. * @_ep: Device controller on which the set/clear feature needs to be handled. * @ctrl: Control request as received on the endpoint 0. * * Handle set feature or clear feature control requests on the control endpoint. */ static int s3c_hsudc_handle_reqfeat(struct s3c_hsudc *hsudc, struct usb_ctrlrequest *ctrl) { struct s3c_hsudc_ep *hsep; bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE); u8 ep_num = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK; if (ctrl->bRequestType == USB_RECIP_ENDPOINT) { hsep = &hsudc->ep[ep_num]; switch (le16_to_cpu(ctrl->wValue)) { case USB_ENDPOINT_HALT: if (set || (!set && !hsep->wedge)) s3c_hsudc_set_halt(&hsep->ep, set); return 0; } } return -ENOENT; } /** * s3c_hsudc_process_req_status - Handle get status control request. * @hsudc: Device controller on which get status request has be handled. * @ctrl: Control request as received on the endpoint 0. * * Handle get status control request received on control endpoint. */ static void s3c_hsudc_process_req_status(struct s3c_hsudc *hsudc, struct usb_ctrlrequest *ctrl) { struct s3c_hsudc_ep *hsep0 = &hsudc->ep[0]; struct s3c_hsudc_req hsreq; struct s3c_hsudc_ep *hsep; __le16 reply; u8 epnum; switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: reply = cpu_to_le16(0); break; case USB_RECIP_INTERFACE: reply = cpu_to_le16(0); break; case USB_RECIP_ENDPOINT: epnum = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK; hsep = &hsudc->ep[epnum]; reply = cpu_to_le16(hsep->stopped ? 1 : 0); break; } INIT_LIST_HEAD(&hsreq.queue); hsreq.req.length = 2; hsreq.req.buf = &reply; hsreq.req.actual = 0; hsreq.req.complete = NULL; s3c_hsudc_write_fifo(hsep0, &hsreq); } /** * s3c_hsudc_process_setup - Process control request received on endpoint 0. * @hsudc: Device controller on which control request has been received. * * Read the control request received on endpoint 0, decode it and handle * the request. */ static void s3c_hsudc_process_setup(struct s3c_hsudc *hsudc) { struct s3c_hsudc_ep *hsep = &hsudc->ep[0]; struct usb_ctrlrequest ctrl = {0}; int ret; s3c_hsudc_nuke_ep(hsep, -EPROTO); s3c_hsudc_read_setup_pkt(hsudc, (u16 *)&ctrl); if (ctrl.bRequestType & USB_DIR_IN) { hsep->bEndpointAddress |= USB_DIR_IN; hsudc->ep0state = DATA_STATE_XMIT; } else { hsep->bEndpointAddress &= ~USB_DIR_IN; hsudc->ep0state = DATA_STATE_RECV; } switch (ctrl.bRequest) { case USB_REQ_SET_ADDRESS: if (ctrl.bRequestType != (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) break; hsudc->ep0state = WAIT_FOR_SETUP; return; case USB_REQ_GET_STATUS: if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) break; s3c_hsudc_process_req_status(hsudc, &ctrl); return; case USB_REQ_SET_FEATURE: case USB_REQ_CLEAR_FEATURE: if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) break; s3c_hsudc_handle_reqfeat(hsudc, &ctrl); hsudc->ep0state = WAIT_FOR_SETUP; return; } if (hsudc->driver) { spin_unlock(&hsudc->lock); ret = hsudc->driver->setup(&hsudc->gadget, &ctrl); spin_lock(&hsudc->lock); if (ctrl.bRequest == USB_REQ_SET_CONFIGURATION) { hsep->bEndpointAddress &= ~USB_DIR_IN; hsudc->ep0state = WAIT_FOR_SETUP; } if (ret < 0) { dev_err(hsudc->dev, "setup failed, returned %d\n", ret); s3c_hsudc_set_halt(&hsep->ep, 1); hsudc->ep0state = WAIT_FOR_SETUP; hsep->bEndpointAddress &= ~USB_DIR_IN; } } } /** s3c_hsudc_handle_ep0_intr - Handle endpoint 0 interrupt. * @hsudc: Device controller on which endpoint 0 interrupt has occured. * * Handle endpoint 0 interrupt when it occurs. EP0 interrupt could occur * when a stall handshake is sent to host or data is sent/received on * endpoint 0. */ static void s3c_hsudc_handle_ep0_intr(struct s3c_hsudc *hsudc) { struct s3c_hsudc_ep *hsep = &hsudc->ep[0]; struct s3c_hsudc_req *hsreq; u32 csr = readl(hsudc->regs + S3C_EP0SR); u32 ecr; if (csr & S3C_EP0SR_STALL) { ecr = readl(hsudc->regs + S3C_EP0CR); ecr &= ~(S3C_ECR_STALL | S3C_ECR_FLUSH); writel(ecr, hsudc->regs + S3C_EP0CR); writel(S3C_EP0SR_STALL, hsudc->regs + S3C_EP0SR); hsep->stopped = 0; s3c_hsudc_nuke_ep(hsep, -ECONNABORTED); hsudc->ep0state = WAIT_FOR_SETUP; hsep->bEndpointAddress &= ~USB_DIR_IN; return; } if (csr & S3C_EP0SR_TX_SUCCESS) { writel(S3C_EP0SR_TX_SUCCESS, hsudc->regs + S3C_EP0SR); if (ep_is_in(hsep)) { if (list_empty(&hsep->queue)) return; hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); s3c_hsudc_write_fifo(hsep, hsreq); } } if (csr & S3C_EP0SR_RX_SUCCESS) { if (hsudc->ep0state == WAIT_FOR_SETUP) s3c_hsudc_process_setup(hsudc); else { if (!ep_is_in(hsep)) { if (list_empty(&hsep->queue)) return; hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); s3c_hsudc_read_fifo(hsep, hsreq); } } } } /** * s3c_hsudc_ep_enable - Enable a endpoint. * @_ep: The endpoint to be enabled. * @desc: Endpoint descriptor. * * Enables a endpoint when called from the gadget driver. Endpoint stall if * any is cleared, transfer type is configured and endpoint interrupt is * enabled. */ static int s3c_hsudc_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct s3c_hsudc_ep *hsep; struct s3c_hsudc *hsudc; unsigned long flags; u32 ecr = 0; hsep = our_ep(_ep); if (!_ep || !desc || _ep->name == ep0name || desc->bDescriptorType != USB_DT_ENDPOINT || hsep->bEndpointAddress != desc->bEndpointAddress || ep_maxpacket(hsep) < usb_endpoint_maxp(desc)) return -EINVAL; if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK && usb_endpoint_maxp(desc) != ep_maxpacket(hsep)) || !desc->wMaxPacketSize) return -ERANGE; hsudc = hsep->dev; if (!hsudc->driver || hsudc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; spin_lock_irqsave(&hsudc->lock, flags); set_index(hsudc, hsep->bEndpointAddress); ecr |= ((usb_endpoint_xfer_int(desc)) ? S3C_ECR_IEMS : S3C_ECR_DUEN); writel(ecr, hsudc->regs + S3C_ECR); hsep->stopped = hsep->wedge = 0; hsep->ep.desc = desc; hsep->ep.maxpacket = usb_endpoint_maxp(desc); s3c_hsudc_set_halt(_ep, 0); __set_bit(ep_index(hsep), hsudc->regs + S3C_EIER); spin_unlock_irqrestore(&hsudc->lock, flags); return 0; } /** * s3c_hsudc_ep_disable - Disable a endpoint. * @_ep: The endpoint to be disabled. * @desc: Endpoint descriptor. * * Disables a endpoint when called from the gadget driver. */ static int s3c_hsudc_ep_disable(struct usb_ep *_ep) { struct s3c_hsudc_ep *hsep = our_ep(_ep); struct s3c_hsudc *hsudc = hsep->dev; unsigned long flags; if (!_ep || !hsep->ep.desc) return -EINVAL; spin_lock_irqsave(&hsudc->lock, flags); set_index(hsudc, hsep->bEndpointAddress); __clear_bit(ep_index(hsep), hsudc->regs + S3C_EIER); s3c_hsudc_nuke_ep(hsep, -ESHUTDOWN); hsep->ep.desc = NULL; hsep->stopped = 1; spin_unlock_irqrestore(&hsudc->lock, flags); return 0; } /** * s3c_hsudc_alloc_request - Allocate a new request. * @_ep: Endpoint for which request is allocated (not used). * @gfp_flags: Flags used for the allocation. * * Allocates a single transfer request structure when called from gadget driver. */ static struct usb_request *s3c_hsudc_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) { struct s3c_hsudc_req *hsreq; hsreq = kzalloc(sizeof(*hsreq), gfp_flags); if (!hsreq) return NULL; INIT_LIST_HEAD(&hsreq->queue); return &hsreq->req; } /** * s3c_hsudc_free_request - Deallocate a request. * @ep: Endpoint for which request is deallocated (not used). * @_req: Request to be deallocated. * * Allocates a single transfer request structure when called from gadget driver. */ static void s3c_hsudc_free_request(struct usb_ep *ep, struct usb_request *_req) { struct s3c_hsudc_req *hsreq; hsreq = our_req(_req); WARN_ON(!list_empty(&hsreq->queue)); kfree(hsreq); } /** * s3c_hsudc_queue - Queue a transfer request for the endpoint. * @_ep: Endpoint for which the request is queued. * @_req: Request to be queued. * @gfp_flags: Not used. * * Start or enqueue a request for a endpoint when called from gadget driver. */ static int s3c_hsudc_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct s3c_hsudc_req *hsreq; struct s3c_hsudc_ep *hsep; struct s3c_hsudc *hsudc; unsigned long flags; u32 offset; u32 csr; hsreq = our_req(_req); if ((!_req || !_req->complete || !_req->buf || !list_empty(&hsreq->queue))) return -EINVAL; hsep = our_ep(_ep); hsudc = hsep->dev; if (!hsudc->driver || hsudc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; spin_lock_irqsave(&hsudc->lock, flags); set_index(hsudc, hsep->bEndpointAddress); _req->status = -EINPROGRESS; _req->actual = 0; if (!ep_index(hsep) && _req->length == 0) { hsudc->ep0state = WAIT_FOR_SETUP; s3c_hsudc_complete_request(hsep, hsreq, 0); spin_unlock_irqrestore(&hsudc->lock, flags); return 0; } if (list_empty(&hsep->queue) && !hsep->stopped) { offset = (ep_index(hsep)) ? S3C_ESR : S3C_EP0SR; if (ep_is_in(hsep)) { csr = readl(hsudc->regs + offset); if (!(csr & S3C_ESR_TX_SUCCESS) && (s3c_hsudc_write_fifo(hsep, hsreq) == 1)) hsreq = NULL; } else { csr = readl(hsudc->regs + offset); if ((csr & S3C_ESR_RX_SUCCESS) && (s3c_hsudc_read_fifo(hsep, hsreq) == 1)) hsreq = NULL; } } if (hsreq) list_add_tail(&hsreq->queue, &hsep->queue); spin_unlock_irqrestore(&hsudc->lock, flags); return 0; } /** * s3c_hsudc_dequeue - Dequeue a transfer request from an endpoint. * @_ep: Endpoint from which the request is dequeued. * @_req: Request to be dequeued. * * Dequeue a request from a endpoint when called from gadget driver. */ static int s3c_hsudc_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct s3c_hsudc_ep *hsep = our_ep(_ep); struct s3c_hsudc *hsudc = hsep->dev; struct s3c_hsudc_req *hsreq; unsigned long flags; hsep = our_ep(_ep); if (!_ep || hsep->ep.name == ep0name) return -EINVAL; spin_lock_irqsave(&hsudc->lock, flags); list_for_each_entry(hsreq, &hsep->queue, queue) { if (&hsreq->req == _req) break; } if (&hsreq->req != _req) { spin_unlock_irqrestore(&hsudc->lock, flags); return -EINVAL; } set_index(hsudc, hsep->bEndpointAddress); s3c_hsudc_complete_request(hsep, hsreq, -ECONNRESET); spin_unlock_irqrestore(&hsudc->lock, flags); return 0; } static struct usb_ep_ops s3c_hsudc_ep_ops = { .enable = s3c_hsudc_ep_enable, .disable = s3c_hsudc_ep_disable, .alloc_request = s3c_hsudc_alloc_request, .free_request = s3c_hsudc_free_request, .queue = s3c_hsudc_queue, .dequeue = s3c_hsudc_dequeue, .set_halt = s3c_hsudc_set_halt, .set_wedge = s3c_hsudc_set_wedge, }; /** * s3c_hsudc_initep - Initialize a endpoint to default state. * @hsudc - Reference to the device controller. * @hsep - Endpoint to be initialized. * @epnum - Address to be assigned to the endpoint. * * Initialize a endpoint with default configuration. */ static void s3c_hsudc_initep(struct s3c_hsudc *hsudc, struct s3c_hsudc_ep *hsep, int epnum) { char *dir; if ((epnum % 2) == 0) { dir = "out"; } else { dir = "in"; hsep->bEndpointAddress = USB_DIR_IN; } hsep->bEndpointAddress |= epnum; if (epnum) snprintf(hsep->name, sizeof(hsep->name), "ep%d%s", epnum, dir); else snprintf(hsep->name, sizeof(hsep->name), "%s", ep0name); INIT_LIST_HEAD(&hsep->queue); INIT_LIST_HEAD(&hsep->ep.ep_list); if (epnum) list_add_tail(&hsep->ep.ep_list, &hsudc->gadget.ep_list); hsep->dev = hsudc; hsep->ep.name = hsep->name; usb_ep_set_maxpacket_limit(&hsep->ep, epnum ? 512 : 64); hsep->ep.ops = &s3c_hsudc_ep_ops; hsep->fifo = hsudc->regs + S3C_BR(epnum); hsep->ep.desc = NULL; hsep->stopped = 0; hsep->wedge = 0; set_index(hsudc, epnum); writel(hsep->ep.maxpacket, hsudc->regs + S3C_MPR); } /** * s3c_hsudc_setup_ep - Configure all endpoints to default state. * @hsudc: Reference to device controller. * * Configures all endpoints to default state. */ static void s3c_hsudc_setup_ep(struct s3c_hsudc *hsudc) { int epnum; hsudc->ep0state = WAIT_FOR_SETUP; INIT_LIST_HEAD(&hsudc->gadget.ep_list); for (epnum = 0; epnum < hsudc->pd->epnum; epnum++) s3c_hsudc_initep(hsudc, &hsudc->ep[epnum], epnum); } /** * s3c_hsudc_reconfig - Reconfigure the device controller to default state. * @hsudc: Reference to device controller. * * Reconfigures the device controller registers to a default state. */ static void s3c_hsudc_reconfig(struct s3c_hsudc *hsudc) { writel(0xAA, hsudc->regs + S3C_EDR); writel(1, hsudc->regs + S3C_EIER); writel(0, hsudc->regs + S3C_TR); writel(S3C_SCR_DTZIEN_EN | S3C_SCR_RRD_EN | S3C_SCR_SUS_EN | S3C_SCR_RST_EN, hsudc->regs + S3C_SCR); writel(0, hsudc->regs + S3C_EP0CR); s3c_hsudc_setup_ep(hsudc); } /** * s3c_hsudc_irq - Interrupt handler for device controller. * @irq: Not used. * @_dev: Reference to the device controller. * * Interrupt handler for the device controller. This handler handles controller * interrupts and endpoint interrupts. */ static irqreturn_t s3c_hsudc_irq(int irq, void *_dev) { struct s3c_hsudc *hsudc = _dev; struct s3c_hsudc_ep *hsep; u32 ep_intr; u32 sys_status; u32 ep_idx; spin_lock(&hsudc->lock); sys_status = readl(hsudc->regs + S3C_SSR); ep_intr = readl(hsudc->regs + S3C_EIR) & 0x3FF; if (!ep_intr && !(sys_status & S3C_SSR_DTZIEN_EN)) { spin_unlock(&hsudc->lock); return IRQ_HANDLED; } if (sys_status) { if (sys_status & S3C_SSR_VBUSON) writel(S3C_SSR_VBUSON, hsudc->regs + S3C_SSR); if (sys_status & S3C_SSR_ERR) writel(S3C_SSR_ERR, hsudc->regs + S3C_SSR); if (sys_status & S3C_SSR_SDE) { writel(S3C_SSR_SDE, hsudc->regs + S3C_SSR); hsudc->gadget.speed = (sys_status & S3C_SSR_HSP) ? USB_SPEED_HIGH : USB_SPEED_FULL; } if (sys_status & S3C_SSR_SUSPEND) { writel(S3C_SSR_SUSPEND, hsudc->regs + S3C_SSR); if (hsudc->gadget.speed != USB_SPEED_UNKNOWN && hsudc->driver && hsudc->driver->suspend) hsudc->driver->suspend(&hsudc->gadget); } if (sys_status & S3C_SSR_RESUME) { writel(S3C_SSR_RESUME, hsudc->regs + S3C_SSR); if (hsudc->gadget.speed != USB_SPEED_UNKNOWN && hsudc->driver && hsudc->driver->resume) hsudc->driver->resume(&hsudc->gadget); } if (sys_status & S3C_SSR_RESET) { writel(S3C_SSR_RESET, hsudc->regs + S3C_SSR); for (ep_idx = 0; ep_idx < hsudc->pd->epnum; ep_idx++) { hsep = &hsudc->ep[ep_idx]; hsep->stopped = 1; s3c_hsudc_nuke_ep(hsep, -ECONNRESET); } s3c_hsudc_reconfig(hsudc); hsudc->ep0state = WAIT_FOR_SETUP; } } if (ep_intr & S3C_EIR_EP0) { writel(S3C_EIR_EP0, hsudc->regs + S3C_EIR); set_index(hsudc, 0); s3c_hsudc_handle_ep0_intr(hsudc); } ep_intr >>= 1; ep_idx = 1; while (ep_intr) { if (ep_intr & 1) { hsep = &hsudc->ep[ep_idx]; set_index(hsudc, ep_idx); writel(1 << ep_idx, hsudc->regs + S3C_EIR); if (ep_is_in(hsep)) s3c_hsudc_epin_intr(hsudc, ep_idx); else s3c_hsudc_epout_intr(hsudc, ep_idx); } ep_intr >>= 1; ep_idx++; } spin_unlock(&hsudc->lock); return IRQ_HANDLED; } static int s3c_hsudc_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct s3c_hsudc *hsudc = to_hsudc(gadget); int ret; if (!driver || driver->max_speed < USB_SPEED_FULL || !driver->setup) return -EINVAL; if (!hsudc) return -ENODEV; if (hsudc->driver) return -EBUSY; hsudc->driver = driver; ret = regulator_bulk_enable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies); if (ret != 0) { dev_err(hsudc->dev, "failed to enable supplies: %d\n", ret); goto err_supplies; } /* connect to bus through transceiver */ if (!IS_ERR_OR_NULL(hsudc->transceiver)) { ret = otg_set_peripheral(hsudc->transceiver->otg, &hsudc->gadget); if (ret) { dev_err(hsudc->dev, "%s: can't bind to transceiver\n", hsudc->gadget.name); goto err_otg; } } enable_irq(hsudc->irq); s3c_hsudc_reconfig(hsudc); pm_runtime_get_sync(hsudc->dev); s3c_hsudc_init_phy(); if (hsudc->pd->gpio_init) hsudc->pd->gpio_init(); return 0; err_otg: regulator_bulk_disable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies); err_supplies: hsudc->driver = NULL; return ret; } static int s3c_hsudc_stop(struct usb_gadget *gadget) { struct s3c_hsudc *hsudc = to_hsudc(gadget); unsigned long flags; if (!hsudc) return -ENODEV; spin_lock_irqsave(&hsudc->lock, flags); hsudc->gadget.speed = USB_SPEED_UNKNOWN; s3c_hsudc_uninit_phy(); pm_runtime_put(hsudc->dev); if (hsudc->pd->gpio_uninit) hsudc->pd->gpio_uninit(); s3c_hsudc_stop_activity(hsudc); spin_unlock_irqrestore(&hsudc->lock, flags); if (!IS_ERR_OR_NULL(hsudc->transceiver)) (void) otg_set_peripheral(hsudc->transceiver->otg, NULL); disable_irq(hsudc->irq); regulator_bulk_disable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies); hsudc->driver = NULL; return 0; } static inline u32 s3c_hsudc_read_frameno(struct s3c_hsudc *hsudc) { return readl(hsudc->regs + S3C_FNR) & 0x3FF; } static int s3c_hsudc_gadget_getframe(struct usb_gadget *gadget) { return s3c_hsudc_read_frameno(to_hsudc(gadget)); } static int s3c_hsudc_vbus_draw(struct usb_gadget *gadget, unsigned mA) { struct s3c_hsudc *hsudc = to_hsudc(gadget); if (!hsudc) return -ENODEV; if (!IS_ERR_OR_NULL(hsudc->transceiver)) return usb_phy_set_power(hsudc->transceiver, mA); return -EOPNOTSUPP; } static const struct usb_gadget_ops s3c_hsudc_gadget_ops = { .get_frame = s3c_hsudc_gadget_getframe, .udc_start = s3c_hsudc_start, .udc_stop = s3c_hsudc_stop, .vbus_draw = s3c_hsudc_vbus_draw, }; static int s3c_hsudc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res; struct s3c_hsudc *hsudc; struct s3c24xx_hsudc_platdata *pd = dev_get_platdata(&pdev->dev); int ret, i; hsudc = devm_kzalloc(&pdev->dev, sizeof(struct s3c_hsudc) + sizeof(struct s3c_hsudc_ep) * pd->epnum, GFP_KERNEL); if (!hsudc) return -ENOMEM; platform_set_drvdata(pdev, dev); hsudc->dev = dev; hsudc->pd = dev_get_platdata(&pdev->dev); hsudc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2); for (i = 0; i < ARRAY_SIZE(hsudc->supplies); i++) hsudc->supplies[i].supply = s3c_hsudc_supply_names[i]; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(hsudc->supplies), hsudc->supplies); if (ret != 0) { dev_err(dev, "failed to request supplies: %d\n", ret); goto err_supplies; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hsudc->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(hsudc->regs)) { ret = PTR_ERR(hsudc->regs); goto err_res; } spin_lock_init(&hsudc->lock); hsudc->gadget.max_speed = USB_SPEED_HIGH; hsudc->gadget.ops = &s3c_hsudc_gadget_ops; hsudc->gadget.name = dev_name(dev); hsudc->gadget.ep0 = &hsudc->ep[0].ep; hsudc->gadget.is_otg = 0; hsudc->gadget.is_a_peripheral = 0; hsudc->gadget.speed = USB_SPEED_UNKNOWN; s3c_hsudc_setup_ep(hsudc); ret = platform_get_irq(pdev, 0); if (ret < 0) { dev_err(dev, "unable to obtain IRQ number\n"); goto err_res; } hsudc->irq = ret; ret = devm_request_irq(&pdev->dev, hsudc->irq, s3c_hsudc_irq, 0, driver_name, hsudc); if (ret < 0) { dev_err(dev, "irq request failed\n"); goto err_res; } hsudc->uclk = devm_clk_get(&pdev->dev, "usb-device"); if (IS_ERR(hsudc->uclk)) { dev_err(dev, "failed to find usb-device clock source\n"); ret = PTR_ERR(hsudc->uclk); goto err_res; } clk_enable(hsudc->uclk); local_irq_disable(); disable_irq(hsudc->irq); local_irq_enable(); ret = usb_add_gadget_udc(&pdev->dev, &hsudc->gadget); if (ret) goto err_add_udc; pm_runtime_enable(dev); return 0; err_add_udc: clk_disable(hsudc->uclk); err_res: if (!IS_ERR_OR_NULL(hsudc->transceiver)) usb_put_phy(hsudc->transceiver); err_supplies: return ret; } static struct platform_driver s3c_hsudc_driver = { .driver = { .name = "s3c-hsudc", }, .probe = s3c_hsudc_probe, }; module_platform_driver(s3c_hsudc_driver); MODULE_DESCRIPTION("Samsung S3C24XX USB high-speed controller driver"); MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:s3c-hsudc");
gpl-2.0
smipi1/elce2015-tiny-linux
drivers/input/misc/bma150.c
1116
16323
/* * Copyright (c) 2011 Bosch Sensortec GmbH * Copyright (c) 2011 Unixphere * * This driver adds support for Bosch Sensortec's digital acceleration * sensors BMA150 and SMB380. * The SMB380 is fully compatible with BMA150 and only differs in packaging. * * The datasheet for the BMA150 chip can be found here: * http://www.bosch-sensortec.com/content/language1/downloads/BST-BMA150-DS000-07.pdf * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/input-polldev.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/bma150.h> #define ABSMAX_ACC_VAL 0x01FF #define ABSMIN_ACC_VAL -(ABSMAX_ACC_VAL) /* Each axis is represented by a 2-byte data word */ #define BMA150_XYZ_DATA_SIZE 6 /* Input poll interval in milliseconds */ #define BMA150_POLL_INTERVAL 10 #define BMA150_POLL_MAX 200 #define BMA150_POLL_MIN 0 #define BMA150_MODE_NORMAL 0 #define BMA150_MODE_SLEEP 2 #define BMA150_MODE_WAKE_UP 3 /* Data register addresses */ #define BMA150_DATA_0_REG 0x00 #define BMA150_DATA_1_REG 0x01 #define BMA150_DATA_2_REG 0x02 /* Control register addresses */ #define BMA150_CTRL_0_REG 0x0A #define BMA150_CTRL_1_REG 0x0B #define BMA150_CTRL_2_REG 0x14 #define BMA150_CTRL_3_REG 0x15 /* Configuration/Setting register addresses */ #define BMA150_CFG_0_REG 0x0C #define BMA150_CFG_1_REG 0x0D #define BMA150_CFG_2_REG 0x0E #define BMA150_CFG_3_REG 0x0F #define BMA150_CFG_4_REG 0x10 #define BMA150_CFG_5_REG 0x11 #define BMA150_CHIP_ID 2 #define BMA180_CHIP_ID 3 #define BMA150_CHIP_ID_REG BMA150_DATA_0_REG #define BMA150_ACC_X_LSB_REG BMA150_DATA_2_REG #define BMA150_SLEEP_POS 0 #define BMA150_SLEEP_MSK 0x01 #define BMA150_SLEEP_REG BMA150_CTRL_0_REG #define BMA150_BANDWIDTH_POS 0 #define BMA150_BANDWIDTH_MSK 0x07 #define BMA150_BANDWIDTH_REG BMA150_CTRL_2_REG #define BMA150_RANGE_POS 3 #define BMA150_RANGE_MSK 0x18 #define BMA150_RANGE_REG BMA150_CTRL_2_REG #define BMA150_WAKE_UP_POS 0 #define BMA150_WAKE_UP_MSK 0x01 #define BMA150_WAKE_UP_REG BMA150_CTRL_3_REG #define BMA150_SW_RES_POS 1 #define BMA150_SW_RES_MSK 0x02 #define BMA150_SW_RES_REG BMA150_CTRL_0_REG /* Any-motion interrupt register fields */ #define BMA150_ANY_MOTION_EN_POS 6 #define BMA150_ANY_MOTION_EN_MSK 0x40 #define BMA150_ANY_MOTION_EN_REG BMA150_CTRL_1_REG #define BMA150_ANY_MOTION_DUR_POS 6 #define BMA150_ANY_MOTION_DUR_MSK 0xC0 #define BMA150_ANY_MOTION_DUR_REG BMA150_CFG_5_REG #define BMA150_ANY_MOTION_THRES_REG BMA150_CFG_4_REG /* Advanced interrupt register fields */ #define BMA150_ADV_INT_EN_POS 6 #define BMA150_ADV_INT_EN_MSK 0x40 #define BMA150_ADV_INT_EN_REG BMA150_CTRL_3_REG /* High-G interrupt register fields */ #define BMA150_HIGH_G_EN_POS 1 #define BMA150_HIGH_G_EN_MSK 0x02 #define BMA150_HIGH_G_EN_REG BMA150_CTRL_1_REG #define BMA150_HIGH_G_HYST_POS 3 #define BMA150_HIGH_G_HYST_MSK 0x38 #define BMA150_HIGH_G_HYST_REG BMA150_CFG_5_REG #define BMA150_HIGH_G_DUR_REG BMA150_CFG_3_REG #define BMA150_HIGH_G_THRES_REG BMA150_CFG_2_REG /* Low-G interrupt register fields */ #define BMA150_LOW_G_EN_POS 0 #define BMA150_LOW_G_EN_MSK 0x01 #define BMA150_LOW_G_EN_REG BMA150_CTRL_1_REG #define BMA150_LOW_G_HYST_POS 0 #define BMA150_LOW_G_HYST_MSK 0x07 #define BMA150_LOW_G_HYST_REG BMA150_CFG_5_REG #define BMA150_LOW_G_DUR_REG BMA150_CFG_1_REG #define BMA150_LOW_G_THRES_REG BMA150_CFG_0_REG struct bma150_data { struct i2c_client *client; struct input_polled_dev *input_polled; struct input_dev *input; u8 mode; }; /* * The settings for the given range, bandwidth and interrupt features * are stated and verified by Bosch Sensortec where they are configured * to provide a generic sensitivity performance. */ static struct bma150_cfg default_cfg = { .any_motion_int = 1, .hg_int = 1, .lg_int = 1, .any_motion_dur = 0, .any_motion_thres = 0, .hg_hyst = 0, .hg_dur = 150, .hg_thres = 160, .lg_hyst = 0, .lg_dur = 150, .lg_thres = 20, .range = BMA150_RANGE_2G, .bandwidth = BMA150_BW_50HZ }; static int bma150_write_byte(struct i2c_client *client, u8 reg, u8 val) { s32 ret; /* As per specification, disable irq in between register writes */ if (client->irq) disable_irq_nosync(client->irq); ret = i2c_smbus_write_byte_data(client, reg, val); if (client->irq) enable_irq(client->irq); return ret; } static int bma150_set_reg_bits(struct i2c_client *client, int val, int shift, u8 mask, u8 reg) { int data; data = i2c_smbus_read_byte_data(client, reg); if (data < 0) return data; data = (data & ~mask) | ((val << shift) & mask); return bma150_write_byte(client, reg, data); } static int bma150_set_mode(struct bma150_data *bma150, u8 mode) { int error; error = bma150_set_reg_bits(bma150->client, mode, BMA150_WAKE_UP_POS, BMA150_WAKE_UP_MSK, BMA150_WAKE_UP_REG); if (error) return error; error = bma150_set_reg_bits(bma150->client, mode, BMA150_SLEEP_POS, BMA150_SLEEP_MSK, BMA150_SLEEP_REG); if (error) return error; if (mode == BMA150_MODE_NORMAL) msleep(2); bma150->mode = mode; return 0; } static int bma150_soft_reset(struct bma150_data *bma150) { int error; error = bma150_set_reg_bits(bma150->client, 1, BMA150_SW_RES_POS, BMA150_SW_RES_MSK, BMA150_SW_RES_REG); if (error) return error; msleep(2); return 0; } static int bma150_set_range(struct bma150_data *bma150, u8 range) { return bma150_set_reg_bits(bma150->client, range, BMA150_RANGE_POS, BMA150_RANGE_MSK, BMA150_RANGE_REG); } static int bma150_set_bandwidth(struct bma150_data *bma150, u8 bw) { return bma150_set_reg_bits(bma150->client, bw, BMA150_BANDWIDTH_POS, BMA150_BANDWIDTH_MSK, BMA150_BANDWIDTH_REG); } static int bma150_set_low_g_interrupt(struct bma150_data *bma150, u8 enable, u8 hyst, u8 dur, u8 thres) { int error; error = bma150_set_reg_bits(bma150->client, hyst, BMA150_LOW_G_HYST_POS, BMA150_LOW_G_HYST_MSK, BMA150_LOW_G_HYST_REG); if (error) return error; error = bma150_write_byte(bma150->client, BMA150_LOW_G_DUR_REG, dur); if (error) return error; error = bma150_write_byte(bma150->client, BMA150_LOW_G_THRES_REG, thres); if (error) return error; return bma150_set_reg_bits(bma150->client, !!enable, BMA150_LOW_G_EN_POS, BMA150_LOW_G_EN_MSK, BMA150_LOW_G_EN_REG); } static int bma150_set_high_g_interrupt(struct bma150_data *bma150, u8 enable, u8 hyst, u8 dur, u8 thres) { int error; error = bma150_set_reg_bits(bma150->client, hyst, BMA150_HIGH_G_HYST_POS, BMA150_HIGH_G_HYST_MSK, BMA150_HIGH_G_HYST_REG); if (error) return error; error = bma150_write_byte(bma150->client, BMA150_HIGH_G_DUR_REG, dur); if (error) return error; error = bma150_write_byte(bma150->client, BMA150_HIGH_G_THRES_REG, thres); if (error) return error; return bma150_set_reg_bits(bma150->client, !!enable, BMA150_HIGH_G_EN_POS, BMA150_HIGH_G_EN_MSK, BMA150_HIGH_G_EN_REG); } static int bma150_set_any_motion_interrupt(struct bma150_data *bma150, u8 enable, u8 dur, u8 thres) { int error; error = bma150_set_reg_bits(bma150->client, dur, BMA150_ANY_MOTION_DUR_POS, BMA150_ANY_MOTION_DUR_MSK, BMA150_ANY_MOTION_DUR_REG); if (error) return error; error = bma150_write_byte(bma150->client, BMA150_ANY_MOTION_THRES_REG, thres); if (error) return error; error = bma150_set_reg_bits(bma150->client, !!enable, BMA150_ADV_INT_EN_POS, BMA150_ADV_INT_EN_MSK, BMA150_ADV_INT_EN_REG); if (error) return error; return bma150_set_reg_bits(bma150->client, !!enable, BMA150_ANY_MOTION_EN_POS, BMA150_ANY_MOTION_EN_MSK, BMA150_ANY_MOTION_EN_REG); } static void bma150_report_xyz(struct bma150_data *bma150) { u8 data[BMA150_XYZ_DATA_SIZE]; s16 x, y, z; s32 ret; ret = i2c_smbus_read_i2c_block_data(bma150->client, BMA150_ACC_X_LSB_REG, BMA150_XYZ_DATA_SIZE, data); if (ret != BMA150_XYZ_DATA_SIZE) return; x = ((0xc0 & data[0]) >> 6) | (data[1] << 2); y = ((0xc0 & data[2]) >> 6) | (data[3] << 2); z = ((0xc0 & data[4]) >> 6) | (data[5] << 2); /* sign extension */ x = (s16) (x << 6) >> 6; y = (s16) (y << 6) >> 6; z = (s16) (z << 6) >> 6; input_report_abs(bma150->input, ABS_X, x); input_report_abs(bma150->input, ABS_Y, y); input_report_abs(bma150->input, ABS_Z, z); input_sync(bma150->input); } static irqreturn_t bma150_irq_thread(int irq, void *dev) { bma150_report_xyz(dev); return IRQ_HANDLED; } static void bma150_poll(struct input_polled_dev *dev) { bma150_report_xyz(dev->private); } static int bma150_open(struct bma150_data *bma150) { int error; error = pm_runtime_get_sync(&bma150->client->dev); if (error < 0 && error != -ENOSYS) return error; /* * See if runtime PM woke up the device. If runtime PM * is disabled we need to do it ourselves. */ if (bma150->mode != BMA150_MODE_NORMAL) { error = bma150_set_mode(bma150, BMA150_MODE_NORMAL); if (error) return error; } return 0; } static void bma150_close(struct bma150_data *bma150) { pm_runtime_put_sync(&bma150->client->dev); if (bma150->mode != BMA150_MODE_SLEEP) bma150_set_mode(bma150, BMA150_MODE_SLEEP); } static int bma150_irq_open(struct input_dev *input) { struct bma150_data *bma150 = input_get_drvdata(input); return bma150_open(bma150); } static void bma150_irq_close(struct input_dev *input) { struct bma150_data *bma150 = input_get_drvdata(input); bma150_close(bma150); } static void bma150_poll_open(struct input_polled_dev *ipoll_dev) { struct bma150_data *bma150 = ipoll_dev->private; bma150_open(bma150); } static void bma150_poll_close(struct input_polled_dev *ipoll_dev) { struct bma150_data *bma150 = ipoll_dev->private; bma150_close(bma150); } static int bma150_initialize(struct bma150_data *bma150, const struct bma150_cfg *cfg) { int error; error = bma150_soft_reset(bma150); if (error) return error; error = bma150_set_bandwidth(bma150, cfg->bandwidth); if (error) return error; error = bma150_set_range(bma150, cfg->range); if (error) return error; if (bma150->client->irq) { error = bma150_set_any_motion_interrupt(bma150, cfg->any_motion_int, cfg->any_motion_dur, cfg->any_motion_thres); if (error) return error; error = bma150_set_high_g_interrupt(bma150, cfg->hg_int, cfg->hg_hyst, cfg->hg_dur, cfg->hg_thres); if (error) return error; error = bma150_set_low_g_interrupt(bma150, cfg->lg_int, cfg->lg_hyst, cfg->lg_dur, cfg->lg_thres); if (error) return error; } return bma150_set_mode(bma150, BMA150_MODE_SLEEP); } static void bma150_init_input_device(struct bma150_data *bma150, struct input_dev *idev) { idev->name = BMA150_DRIVER; idev->phys = BMA150_DRIVER "/input0"; idev->id.bustype = BUS_I2C; idev->dev.parent = &bma150->client->dev; idev->evbit[0] = BIT_MASK(EV_ABS); input_set_abs_params(idev, ABS_X, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0); input_set_abs_params(idev, ABS_Y, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0); input_set_abs_params(idev, ABS_Z, ABSMIN_ACC_VAL, ABSMAX_ACC_VAL, 0, 0); } static int bma150_register_input_device(struct bma150_data *bma150) { struct input_dev *idev; int error; idev = input_allocate_device(); if (!idev) return -ENOMEM; bma150_init_input_device(bma150, idev); idev->open = bma150_irq_open; idev->close = bma150_irq_close; input_set_drvdata(idev, bma150); error = input_register_device(idev); if (error) { input_free_device(idev); return error; } bma150->input = idev; return 0; } static int bma150_register_polled_device(struct bma150_data *bma150) { struct input_polled_dev *ipoll_dev; int error; ipoll_dev = input_allocate_polled_device(); if (!ipoll_dev) return -ENOMEM; ipoll_dev->private = bma150; ipoll_dev->open = bma150_poll_open; ipoll_dev->close = bma150_poll_close; ipoll_dev->poll = bma150_poll; ipoll_dev->poll_interval = BMA150_POLL_INTERVAL; ipoll_dev->poll_interval_min = BMA150_POLL_MIN; ipoll_dev->poll_interval_max = BMA150_POLL_MAX; bma150_init_input_device(bma150, ipoll_dev->input); error = input_register_polled_device(ipoll_dev); if (error) { input_free_polled_device(ipoll_dev); return error; } bma150->input_polled = ipoll_dev; bma150->input = ipoll_dev->input; return 0; } static int bma150_probe(struct i2c_client *client, const struct i2c_device_id *id) { const struct bma150_platform_data *pdata = dev_get_platdata(&client->dev); const struct bma150_cfg *cfg; struct bma150_data *bma150; int chip_id; int error; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(&client->dev, "i2c_check_functionality error\n"); return -EIO; } chip_id = i2c_smbus_read_byte_data(client, BMA150_CHIP_ID_REG); if (chip_id != BMA150_CHIP_ID && chip_id != BMA180_CHIP_ID) { dev_err(&client->dev, "BMA150 chip id error: %d\n", chip_id); return -EINVAL; } bma150 = kzalloc(sizeof(struct bma150_data), GFP_KERNEL); if (!bma150) return -ENOMEM; bma150->client = client; if (pdata) { if (pdata->irq_gpio_cfg) { error = pdata->irq_gpio_cfg(); if (error) { dev_err(&client->dev, "IRQ GPIO conf. error %d, error %d\n", client->irq, error); goto err_free_mem; } } cfg = &pdata->cfg; } else { cfg = &default_cfg; } error = bma150_initialize(bma150, cfg); if (error) goto err_free_mem; if (client->irq > 0) { error = bma150_register_input_device(bma150); if (error) goto err_free_mem; error = request_threaded_irq(client->irq, NULL, bma150_irq_thread, IRQF_TRIGGER_RISING | IRQF_ONESHOT, BMA150_DRIVER, bma150); if (error) { dev_err(&client->dev, "irq request failed %d, error %d\n", client->irq, error); input_unregister_device(bma150->input); goto err_free_mem; } } else { error = bma150_register_polled_device(bma150); if (error) goto err_free_mem; } i2c_set_clientdata(client, bma150); pm_runtime_enable(&client->dev); return 0; err_free_mem: kfree(bma150); return error; } static int bma150_remove(struct i2c_client *client) { struct bma150_data *bma150 = i2c_get_clientdata(client); pm_runtime_disable(&client->dev); if (client->irq > 0) { free_irq(client->irq, bma150); input_unregister_device(bma150->input); } else { input_unregister_polled_device(bma150->input_polled); input_free_polled_device(bma150->input_polled); } kfree(bma150); return 0; } #ifdef CONFIG_PM static int bma150_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct bma150_data *bma150 = i2c_get_clientdata(client); return bma150_set_mode(bma150, BMA150_MODE_SLEEP); } static int bma150_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct bma150_data *bma150 = i2c_get_clientdata(client); return bma150_set_mode(bma150, BMA150_MODE_NORMAL); } #endif static UNIVERSAL_DEV_PM_OPS(bma150_pm, bma150_suspend, bma150_resume, NULL); static const struct i2c_device_id bma150_id[] = { { "bma150", 0 }, { "bma180", 0 }, { "smb380", 0 }, { "bma023", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, bma150_id); static struct i2c_driver bma150_driver = { .driver = { .owner = THIS_MODULE, .name = BMA150_DRIVER, .pm = &bma150_pm, }, .class = I2C_CLASS_HWMON, .id_table = bma150_id, .probe = bma150_probe, .remove = bma150_remove, }; module_i2c_driver(bma150_driver); MODULE_AUTHOR("Albert Zhang <xu.zhang@bosch-sensortec.com>"); MODULE_DESCRIPTION("BMA150 driver"); MODULE_LICENSE("GPL");
gpl-2.0
darshan1205/yu_kernel
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2140
98605
/******************************************************************************* Intel 82599 Virtual Function driver Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ /****************************************************************************** Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code ******************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/bitops.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include <linux/string.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/sctp.h> #include <linux/ipv6.h> #include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <linux/ethtool.h> #include <linux/if.h> #include <linux/if_vlan.h> #include <linux/prefetch.h> #include "ixgbevf.h" const char ixgbevf_driver_name[] = "ixgbevf"; static const char ixgbevf_driver_string[] = "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; #define DRV_VERSION "2.7.12-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = "Copyright (c) 2009 - 2012 Intel Corporation."; static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, [board_X540_vf] = &ixgbevf_X540_vf_info, }; /* ixgbevf_pci_tbl - PCI Device ID Table * * Wildcard entries (PCI_ANY_ID) should come last * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, * Class, Class Mask, private data (not used) } */ static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, /* required last entry */ {0, } }; MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); /* forward decls */ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, struct ixgbevf_ring *rx_ring, u32 val) { /* * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val); } /** * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors * @adapter: pointer to adapter struct * @direction: 0 for Rx, 1 for Tx, -1 for other causes * @queue: queue to map the corresponding interrupt to * @msix_vector: the vector to map to the corresponding queue */ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, u8 queue, u8 msix_vector) { u32 ivar, index; struct ixgbe_hw *hw = &adapter->hw; if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); ivar &= ~0xFF; ivar |= msix_vector; IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); } else { /* tx or rx causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; index = ((16 * (queue & 1)) + (8 * direction)); ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); ivar &= ~(0xFF << index); ivar |= (msix_vector << index); IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); } } static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *tx_buffer_info) { if (tx_buffer_info->dma) { if (tx_buffer_info->mapped_as_page) dma_unmap_page(tx_ring->dev, tx_buffer_info->dma, tx_buffer_info->length, DMA_TO_DEVICE); else dma_unmap_single(tx_ring->dev, tx_buffer_info->dma, tx_buffer_info->length, DMA_TO_DEVICE); tx_buffer_info->dma = 0; } if (tx_buffer_info->skb) { dev_kfree_skb_any(tx_buffer_info->skb); tx_buffer_info->skb = NULL; } tx_buffer_info->time_stamp = 0; /* tx_buffer_info must be completely set up in the transmit path */ } #define IXGBE_MAX_TXD_PWR 14 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) #define DESC_NEEDED (MAX_SKB_FRAGS + 4) static void ixgbevf_tx_timeout(struct net_device *netdev); /** * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: board private structure * @tx_ring: tx ring to clean **/ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring *tx_ring) { struct ixgbevf_adapter *adapter = q_vector->adapter; union ixgbe_adv_tx_desc *tx_desc, *eop_desc; struct ixgbevf_tx_buffer *tx_buffer_info; unsigned int i, count = 0; unsigned int total_bytes = 0, total_packets = 0; if (test_bit(__IXGBEVF_DOWN, &adapter->state)) return true; i = tx_ring->next_to_clean; tx_buffer_info = &tx_ring->tx_buffer_info[i]; eop_desc = tx_buffer_info->next_to_watch; do { bool cleaned = false; /* if next_to_watch is not set then there is no work pending */ if (!eop_desc) break; /* prevent any other reads prior to eop_desc */ read_barrier_depends(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) break; /* clear next_to_watch to prevent false hangs */ tx_buffer_info->next_to_watch = NULL; for ( ; !cleaned; count++) { struct sk_buff *skb; tx_desc = IXGBEVF_TX_DESC(tx_ring, i); cleaned = (tx_desc == eop_desc); skb = tx_buffer_info->skb; if (cleaned && skb) { unsigned int segs, bytecount; /* gso_segs is currently only valid for tcp */ segs = skb_shinfo(skb)->gso_segs ?: 1; /* multiply data chunks by size of headers */ bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; total_packets += segs; total_bytes += bytecount; } ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); tx_desc->wb.status = 0; i++; if (i == tx_ring->count) i = 0; tx_buffer_info = &tx_ring->tx_buffer_info[i]; } eop_desc = tx_buffer_info->next_to_watch; } while (count < tx_ring->count); tx_ring->next_to_clean = i; #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && !test_bit(__IXGBEVF_DOWN, &adapter->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++adapter->restart_queue; } } u64_stats_update_begin(&tx_ring->syncp); tx_ring->total_bytes += total_bytes; tx_ring->total_packets += total_packets; u64_stats_update_end(&tx_ring->syncp); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; return count < tx_ring->count; } /** * ixgbevf_receive_skb - Send a completed packet up the stack * @q_vector: structure containing interrupt and ring information * @skb: packet to send up * @status: hardware indication of status of receive * @rx_desc: rx descriptor **/ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, struct sk_buff *skb, u8 status, union ixgbe_adv_rx_desc *rx_desc) { struct ixgbevf_adapter *adapter = q_vector->adapter; bool is_vlan = (status & IXGBE_RXD_STAT_VP); u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) napi_gro_receive(&q_vector->napi, skb); else netif_rx(skb); } /** * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: pointer to Rx descriptor ring structure * @status_err: hardware indication of status of receive * @skb: skb currently being received and modified **/ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, u32 status_err, struct sk_buff *skb) { skb_checksum_none_assert(skb); /* Rx csum disabled */ if (!(ring->netdev->features & NETIF_F_RXCSUM)) return; /* if IP and error */ if ((status_err & IXGBE_RXD_STAT_IPCS) && (status_err & IXGBE_RXDADV_ERR_IPE)) { ring->hw_csum_rx_error++; return; } if (!(status_err & IXGBE_RXD_STAT_L4CS)) return; if (status_err & IXGBE_RXDADV_ERR_TCPE) { ring->hw_csum_rx_error++; return; } /* It must be a TCP or UDP packet with a valid checksum */ skb->ip_summed = CHECKSUM_UNNECESSARY; ring->hw_csum_rx_good++; } /** * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split * @adapter: address of board private structure **/ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *rx_ring, int cleaned_count) { struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc; struct ixgbevf_rx_buffer *bi; unsigned int i = rx_ring->next_to_use; bi = &rx_ring->rx_buffer_info[i]; while (cleaned_count--) { rx_desc = IXGBEVF_RX_DESC(rx_ring, i); if (!bi->skb) { struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(rx_ring->netdev, rx_ring->rx_buf_len); if (!skb) { adapter->alloc_rx_buff_failed++; goto no_buffers; } bi->skb = skb; bi->dma = dma_map_single(&pdev->dev, skb->data, rx_ring->rx_buf_len, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, bi->dma)) { dev_kfree_skb(skb); bi->skb = NULL; dev_err(&pdev->dev, "RX DMA map failed\n"); break; } } rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); i++; if (i == rx_ring->count) i = 0; bi = &rx_ring->rx_buffer_info[i]; } no_buffers: if (rx_ring->next_to_use != i) { rx_ring->next_to_use = i; ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); } } static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, u32 qmask) { struct ixgbe_hw *hw = &adapter->hw; IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); } static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring *rx_ring, int budget) { struct ixgbevf_adapter *adapter = q_vector->adapter; struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc, *next_rxd; struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; struct sk_buff *skb; unsigned int i; u32 len, staterr; int cleaned_count = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0; i = rx_ring->next_to_clean; rx_desc = IXGBEVF_RX_DESC(rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); rx_buffer_info = &rx_ring->rx_buffer_info[i]; while (staterr & IXGBE_RXD_STAT_DD) { if (!budget) break; budget--; rmb(); /* read descriptor and rx_buffer_info after status DD */ len = le16_to_cpu(rx_desc->wb.upper.length); skb = rx_buffer_info->skb; prefetch(skb->data - NET_IP_ALIGN); rx_buffer_info->skb = NULL; if (rx_buffer_info->dma) { dma_unmap_single(&pdev->dev, rx_buffer_info->dma, rx_ring->rx_buf_len, DMA_FROM_DEVICE); rx_buffer_info->dma = 0; skb_put(skb, len); } i++; if (i == rx_ring->count) i = 0; next_rxd = IXGBEVF_RX_DESC(rx_ring, i); prefetch(next_rxd); cleaned_count++; next_buffer = &rx_ring->rx_buffer_info[i]; if (!(staterr & IXGBE_RXD_STAT_EOP)) { skb->next = next_buffer->skb; IXGBE_CB(skb->next)->prev = skb; adapter->non_eop_descs++; goto next_desc; } /* we should not be chaining buffers, if we did drop the skb */ if (IXGBE_CB(skb)->prev) { do { struct sk_buff *this = skb; skb = IXGBE_CB(skb)->prev; dev_kfree_skb(this); } while (skb); goto next_desc; } /* ERR_MASK will only have valid bits if EOP set */ if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { dev_kfree_skb_irq(skb); goto next_desc; } ixgbevf_rx_checksum(rx_ring, staterr, skb); /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; /* * Work around issue of some types of VM to VM loop back * packets not getting split correctly */ if (staterr & IXGBE_RXD_STAT_LB) { u32 header_fixup_len = skb_headlen(skb); if (header_fixup_len < 14) skb_push(skb, header_fixup_len); } skb->protocol = eth_type_trans(skb, rx_ring->netdev); /* Workaround hardware that can't do proper VEPA multicast * source pruning. */ if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && !(compare_ether_addr(adapter->netdev->dev_addr, eth_hdr(skb)->h_source))) { dev_kfree_skb_irq(skb); goto next_desc; } ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc); next_desc: rx_desc->wb.upper.status_error = 0; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); cleaned_count = 0; } /* use prefetched values */ rx_desc = next_rxd; rx_buffer_info = &rx_ring->rx_buffer_info[i]; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); } rx_ring->next_to_clean = i; cleaned_count = IXGBE_DESC_UNUSED(rx_ring); if (cleaned_count) ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); u64_stats_update_begin(&rx_ring->syncp); rx_ring->total_packets += total_rx_packets; rx_ring->total_bytes += total_rx_bytes; u64_stats_update_end(&rx_ring->syncp); q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; return !!budget; } /** * ixgbevf_poll - NAPI polling calback * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets * * This function will clean more than one or more rings associated with a * q_vector. **/ static int ixgbevf_poll(struct napi_struct *napi, int budget) { struct ixgbevf_q_vector *q_vector = container_of(napi, struct ixgbevf_q_vector, napi); struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_ring *ring; int per_ring_budget; bool clean_complete = true; ixgbevf_for_each_ring(ring, q_vector->tx) clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling */ if (q_vector->rx.count > 1) per_ring_budget = max(budget/q_vector->rx.count, 1); else per_ring_budget = budget; adapter->flags |= IXGBE_FLAG_IN_NETPOLL; ixgbevf_for_each_ring(ring, q_vector->rx) clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring, per_ring_budget); adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; /* all work done, exit the polling mode */ napi_complete(napi); if (adapter->rx_itr_setting & 1) ixgbevf_set_itr(q_vector); if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) ixgbevf_irq_enable_queues(adapter, 1 << q_vector->v_idx); return 0; } /** * ixgbevf_write_eitr - write VTEITR register in hardware specific way * @q_vector: structure containing interrupt and ring information */ static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) { struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbe_hw *hw = &adapter->hw; int v_idx = q_vector->v_idx; u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; /* * set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt */ itr_reg |= IXGBE_EITR_CNT_WDIS; IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); } /** * ixgbevf_configure_msix - Configure MSI-X hardware * @adapter: board private structure * * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X * interrupts. **/ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) { struct ixgbevf_q_vector *q_vector; int q_vectors, v_idx; q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; adapter->eims_enable_mask = 0; /* * Populate the IVAR table and set the ITR values to the * corresponding register. */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { struct ixgbevf_ring *ring; q_vector = adapter->q_vector[v_idx]; ixgbevf_for_each_ring(ring, q_vector->rx) ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); ixgbevf_for_each_ring(ring, q_vector->tx) ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); if (q_vector->tx.ring && !q_vector->rx.ring) { /* tx only vector */ if (adapter->tx_itr_setting == 1) q_vector->itr = IXGBE_10K_ITR; else q_vector->itr = adapter->tx_itr_setting; } else { /* rx or rx/tx vector */ if (adapter->rx_itr_setting == 1) q_vector->itr = IXGBE_20K_ITR; else q_vector->itr = adapter->rx_itr_setting; } /* add q_vector eims value to global eims_enable_mask */ adapter->eims_enable_mask |= 1 << v_idx; ixgbevf_write_eitr(q_vector); } ixgbevf_set_ivar(adapter, -1, 1, v_idx); /* setup eims_other and add value to global eims_enable_mask */ adapter->eims_other = 1 << v_idx; adapter->eims_enable_mask |= adapter->eims_other; } enum latency_range { lowest_latency = 0, low_latency = 1, bulk_latency = 2, latency_invalid = 255 }; /** * ixgbevf_update_itr - update the dynamic ITR value based on statistics * @q_vector: structure containing interrupt and ring information * @ring_container: structure containing ring performance data * * Stores a new ITR value based on packets and byte * counts during the last interrupt. The advantage of per interrupt * computation is faster updates and more accurate ITR for the current * traffic pattern. Constants in this function were computed * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring_container *ring_container) { int bytes = ring_container->total_bytes; int packets = ring_container->total_packets; u32 timepassed_us; u64 bytes_perint; u8 itr_setting = ring_container->itr; if (packets == 0) return; /* simple throttlerate management * 0-20MB/s lowest (100000 ints/s) * 20-100MB/s low (20000 ints/s) * 100-1249MB/s bulk (8000 ints/s) */ /* what was last interrupt timeslice? */ timepassed_us = q_vector->itr >> 2; bytes_perint = bytes / timepassed_us; /* bytes/usec */ switch (itr_setting) { case lowest_latency: if (bytes_perint > 10) itr_setting = low_latency; break; case low_latency: if (bytes_perint > 20) itr_setting = bulk_latency; else if (bytes_perint <= 10) itr_setting = lowest_latency; break; case bulk_latency: if (bytes_perint <= 20) itr_setting = low_latency; break; } /* clear work counters since we have the values we need */ ring_container->total_bytes = 0; ring_container->total_packets = 0; /* write updated itr to ring container */ ring_container->itr = itr_setting; } static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) { u32 new_itr = q_vector->itr; u8 current_itr; ixgbevf_update_itr(q_vector, &q_vector->tx); ixgbevf_update_itr(q_vector, &q_vector->rx); current_itr = max(q_vector->rx.itr, q_vector->tx.itr); switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ case lowest_latency: new_itr = IXGBE_100K_ITR; break; case low_latency: new_itr = IXGBE_20K_ITR; break; case bulk_latency: default: new_itr = IXGBE_8K_ITR; break; } if (new_itr != q_vector->itr) { /* do an exponential smoothing */ new_itr = (10 * new_itr * q_vector->itr) / ((9 * new_itr) + q_vector->itr); /* save the algorithm value here */ q_vector->itr = new_itr; ixgbevf_write_eitr(q_vector); } } static irqreturn_t ixgbevf_msix_other(int irq, void *data) { struct ixgbevf_adapter *adapter = data; struct pci_dev *pdev = adapter->pdev; struct ixgbe_hw *hw = &adapter->hw; u32 msg; bool got_ack = false; hw->mac.get_link_status = 1; if (!hw->mbx.ops.check_for_ack(hw)) got_ack = true; if (!hw->mbx.ops.check_for_msg(hw)) { hw->mbx.ops.read(hw, &msg, 1); if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) { mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 1)); adapter->link_up = false; } if (msg & IXGBE_VT_MSGTYPE_NACK) dev_info(&pdev->dev, "Last Request of type %2.2x to PF Nacked\n", msg & 0xFF); hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS; } /* checking for the ack clears the PFACK bit. Place * it back in the v2p_mailbox cache so that anyone * polling for an ack will not miss it */ if (got_ack) hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); return IRQ_HANDLED; } /** * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) * @irq: unused * @data: pointer to our q_vector struct for this interrupt vector **/ static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) { struct ixgbevf_q_vector *q_vector = data; /* EIAM disabled interrupts (on this vector) for us */ if (q_vector->rx.ring || q_vector->tx.ring) napi_schedule(&q_vector->napi); return IRQ_HANDLED; } static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, int r_idx) { struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; a->rx_ring[r_idx].next = q_vector->rx.ring; q_vector->rx.ring = &a->rx_ring[r_idx]; q_vector->rx.count++; } static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, int t_idx) { struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; a->tx_ring[t_idx].next = q_vector->tx.ring; q_vector->tx.ring = &a->tx_ring[t_idx]; q_vector->tx.count++; } /** * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors * @adapter: board private structure to initialize * * This function maps descriptor rings to the queue-specific vectors * we were allotted through the MSI-X enabling code. Ideally, we'd have * one vector per ring/queue, but on a constrained vector budget, we * group the rings as "efficiently" as possible. You would add new * mapping configurations in here. **/ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) { int q_vectors; int v_start = 0; int rxr_idx = 0, txr_idx = 0; int rxr_remaining = adapter->num_rx_queues; int txr_remaining = adapter->num_tx_queues; int i, j; int rqpv, tqpv; int err = 0; q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; /* * The ideal configuration... * We have enough vectors to map one per queue. */ if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) map_vector_to_rxq(adapter, v_start, rxr_idx); for (; txr_idx < txr_remaining; v_start++, txr_idx++) map_vector_to_txq(adapter, v_start, txr_idx); goto out; } /* * If we don't have enough vectors for a 1-to-1 * mapping, we'll have to group them so there are * multiple queues per vector. */ /* Re-adjusting *qpv takes care of the remainder. */ for (i = v_start; i < q_vectors; i++) { rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); for (j = 0; j < rqpv; j++) { map_vector_to_rxq(adapter, i, rxr_idx); rxr_idx++; rxr_remaining--; } } for (i = v_start; i < q_vectors; i++) { tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); for (j = 0; j < tqpv; j++) { map_vector_to_txq(adapter, i, txr_idx); txr_idx++; txr_remaining--; } } out: return err; } /** * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts * @adapter: board private structure * * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests * interrupts from the kernel. **/ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; int vector, err; int ri = 0, ti = 0; for (vector = 0; vector < q_vectors; vector++) { struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; struct msix_entry *entry = &adapter->msix_entries[vector]; if (q_vector->tx.ring && q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", netdev->name, "TxRx", ri++); ti++; } else if (q_vector->rx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", netdev->name, "rx", ri++); } else if (q_vector->tx.ring) { snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", netdev->name, "tx", ti++); } else { /* skip this unused q_vector */ continue; } err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, q_vector->name, q_vector); if (err) { hw_dbg(&adapter->hw, "request_irq failed for MSIX interrupt " "Error: %d\n", err); goto free_queue_irqs; } } err = request_irq(adapter->msix_entries[vector].vector, &ixgbevf_msix_other, 0, netdev->name, adapter); if (err) { hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n", err); goto free_queue_irqs; } return 0; free_queue_irqs: while (vector) { vector--; free_irq(adapter->msix_entries[vector].vector, adapter->q_vector[vector]); } /* This failure is non-recoverable - it indicates the system is * out of MSIX vector resources and the VF driver cannot run * without them. Set the number of msix vectors to zero * indicating that not enough can be allocated. The error * will be returned to the user indicating device open failed. * Any further attempts to force the driver to open will also * fail. The only way to recover is to unload the driver and * reload it again. If the system has recovered some MSIX * vectors then it may succeed. */ adapter->num_msix_vectors = 0; return err; } static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) { int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (i = 0; i < q_vectors; i++) { struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; q_vector->rx.ring = NULL; q_vector->tx.ring = NULL; q_vector->rx.count = 0; q_vector->tx.count = 0; } } /** * ixgbevf_request_irq - initialize interrupts * @adapter: board private structure * * Attempts to configure interrupts using the best available * capabilities of the hardware and kernel. **/ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) { int err = 0; err = ixgbevf_request_msix_irqs(adapter); if (err) hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); return err; } static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) { int i, q_vectors; q_vectors = adapter->num_msix_vectors; i = q_vectors - 1; free_irq(adapter->msix_entries[i].vector, adapter); i--; for (; i >= 0; i--) { /* free only the irqs that were actually requested */ if (!adapter->q_vector[i]->rx.ring && !adapter->q_vector[i]->tx.ring) continue; free_irq(adapter->msix_entries[i].vector, adapter->q_vector[i]); } ixgbevf_reset_q_vectors(adapter); } /** * ixgbevf_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int i; IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); IXGBE_WRITE_FLUSH(hw); for (i = 0; i < adapter->num_msix_vectors; i++) synchronize_irq(adapter->msix_entries[i].vector); } /** * ixgbevf_irq_enable - Enable default interrupt generation settings * @adapter: board private structure **/ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); } /** * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. **/ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) { u64 tdba; struct ixgbe_hw *hw = &adapter->hw; u32 i, j, tdlen, txctrl; /* Setup the HW Tx Head and Tail descriptor pointers */ for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbevf_ring *ring = &adapter->tx_ring[i]; j = ring->reg_idx; tdba = ring->dma; tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), (tdba & DMA_BIT_MASK(32))); IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen); IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0); IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0); adapter->tx_ring[i].head = IXGBE_VFTDH(j); adapter->tx_ring[i].tail = IXGBE_VFTDT(j); /* Disable Tx Head Writeback RO bit, since this hoses * bookkeeping if things aren't delivered in order. */ txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); } } #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) { struct ixgbevf_ring *rx_ring; struct ixgbe_hw *hw = &adapter->hw; u32 srrctl; rx_ring = &adapter->rx_ring[index]; srrctl = IXGBE_SRRCTL_DROP_EN; srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); } static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; int i; u16 rx_buf_len; /* notify the PF of our intent to use this size of frame */ ixgbevf_rlpml_set_vf(hw, max_frame); /* PF will allow an extra 4 bytes past for vlan tagged frames */ max_frame += VLAN_HLEN; /* * Allocate buffer sizes that fit well into 32K and * take into account max frame size of 9.5K */ if ((hw->mac.type == ixgbe_mac_X540_vf) && (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; else if (max_frame <= IXGBEVF_RXBUFFER_2K) rx_buf_len = IXGBEVF_RXBUFFER_2K; else if (max_frame <= IXGBEVF_RXBUFFER_4K) rx_buf_len = IXGBEVF_RXBUFFER_4K; else if (max_frame <= IXGBEVF_RXBUFFER_8K) rx_buf_len = IXGBEVF_RXBUFFER_8K; else rx_buf_len = IXGBEVF_RXBUFFER_10K; for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i].rx_buf_len = rx_buf_len; } /** * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset. **/ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) { u64 rdba; struct ixgbe_hw *hw = &adapter->hw; int i, j; u32 rdlen; /* PSRTYPE must be initialized in 82599 */ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); /* set_rx_buffer_len must be called before ring initialization */ ixgbevf_set_rx_buffer_len(adapter); rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ for (i = 0; i < adapter->num_rx_queues; i++) { rdba = adapter->rx_ring[i].dma; j = adapter->rx_ring[i].reg_idx; IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), (rdba & DMA_BIT_MASK(32))); IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen); IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0); IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); adapter->rx_ring[i].head = IXGBE_VFRDH(j); adapter->rx_ring[i].tail = IXGBE_VFRDT(j); ixgbevf_configure_srrctl(adapter, j); } } static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; int err; spin_lock_bh(&adapter->mbx_lock); /* add VID to filter table */ err = hw->mac.ops.set_vfta(hw, vid, 0, true); spin_unlock_bh(&adapter->mbx_lock); /* translate error return types so error makes sense */ if (err == IXGBE_ERR_MBX) return -EIO; if (err == IXGBE_ERR_INVALID_ARGUMENT) return -EACCES; set_bit(vid, adapter->active_vlans); return err; } static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; int err = -EOPNOTSUPP; spin_lock_bh(&adapter->mbx_lock); /* remove VID from filter table */ err = hw->mac.ops.set_vfta(hw, vid, 0, false); spin_unlock_bh(&adapter->mbx_lock); clear_bit(vid, adapter->active_vlans); return err; } static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) { u16 vid; for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) ixgbevf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } static int ixgbevf_write_uc_addr_list(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; int count = 0; if ((netdev_uc_count(netdev)) > 10) { pr_err("Too many unicast filters - No Space\n"); return -ENOSPC; } if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha; netdev_for_each_uc_addr(ha, netdev) { hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); udelay(200); } } else { /* * If the list is empty then send message to PF driver to * clear all macvlans on this VF. */ hw->mac.ops.set_uc_addr(hw, 0, NULL); } return count; } /** * ixgbevf_set_rx_mode - Multicast and unicast set * @netdev: network interface device structure * * The set_rx_method entry point is called whenever the multicast address * list, unicast address list or the network interface flags are updated. * This routine is responsible for configuring the hardware for proper * multicast mode and configuring requested unicast filters. **/ static void ixgbevf_set_rx_mode(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; spin_lock_bh(&adapter->mbx_lock); /* reprogram multicast list */ hw->mac.ops.update_mc_addr_list(hw, netdev); ixgbevf_write_uc_addr_list(netdev); spin_unlock_bh(&adapter->mbx_lock); } static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) { int q_idx; struct ixgbevf_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; napi_enable(&q_vector->napi); } } static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) { int q_idx; struct ixgbevf_q_vector *q_vector; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; napi_disable(&q_vector->napi); } } static void ixgbevf_configure(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int i; ixgbevf_set_rx_mode(netdev); ixgbevf_restore_vlan(adapter); ixgbevf_configure_tx(adapter); ixgbevf_configure_rx(adapter); for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbevf_ring *ring = &adapter->rx_ring[i]; ixgbevf_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring)); } } #define IXGBE_MAX_RX_DESC_POLL 10 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, int rxr) { struct ixgbe_hw *hw = &adapter->hw; int j = adapter->rx_ring[rxr].reg_idx; int k; for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) break; else msleep(1); } if (k >= IXGBE_MAX_RX_DESC_POLL) { hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d " "not set within the polling period\n", rxr); } ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr], adapter->rx_ring[rxr].count - 1); } static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) { /* Only save pre-reset stats if there are some */ if (adapter->stats.vfgprc || adapter->stats.vfgptc) { adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - adapter->stats.base_vfgprc; adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - adapter->stats.base_vfgptc; adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - adapter->stats.base_vfgorc; adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - adapter->stats.base_vfgotc; adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - adapter->stats.base_vfmprc; } } static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); adapter->stats.last_vfgorc |= (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); adapter->stats.last_vfgotc |= (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; } static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int api[] = { ixgbe_mbox_api_11, ixgbe_mbox_api_10, ixgbe_mbox_api_unknown }; int err = 0, idx = 0; spin_lock_bh(&adapter->mbx_lock); while (api[idx] != ixgbe_mbox_api_unknown) { err = ixgbevf_negotiate_api_version(hw, api[idx]); if (!err) break; idx++; } spin_unlock_bh(&adapter->mbx_lock); } static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; int i, j = 0; int num_rx_rings = adapter->num_rx_queues; u32 txdctl, rxdctl; for (i = 0; i < adapter->num_tx_queues; i++) { j = adapter->tx_ring[i].reg_idx; txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); /* enable WTHRESH=8 descriptors, to encourage burst writeback */ txdctl |= (8 << 16); IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); } for (i = 0; i < adapter->num_tx_queues; i++) { j = adapter->tx_ring[i].reg_idx; txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); txdctl |= IXGBE_TXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); } for (i = 0; i < num_rx_rings; i++) { j = adapter->rx_ring[i].reg_idx; rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; if (hw->mac.type == ixgbe_mac_X540_vf) { rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) | IXGBE_RXDCTL_RLPML_EN); } IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); ixgbevf_rx_desc_queue_enable(adapter, i); } ixgbevf_configure_msix(adapter); spin_lock_bh(&adapter->mbx_lock); if (is_valid_ether_addr(hw->mac.addr)) hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); else hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); spin_unlock_bh(&adapter->mbx_lock); clear_bit(__IXGBEVF_DOWN, &adapter->state); ixgbevf_napi_enable_all(adapter); /* enable transmits */ netif_tx_start_all_queues(netdev); ixgbevf_save_reset_stats(adapter); ixgbevf_init_last_counter_stats(adapter); hw->mac.get_link_status = 1; mod_timer(&adapter->watchdog_timer, jiffies); } static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbevf_ring *rx_ring; unsigned int def_q = 0; unsigned int num_tcs = 0; unsigned int num_rx_queues = 1; int err, i; spin_lock_bh(&adapter->mbx_lock); /* fetch queue configuration from the PF */ err = ixgbevf_get_queues(hw, &num_tcs, &def_q); spin_unlock_bh(&adapter->mbx_lock); if (err) return err; if (num_tcs > 1) { /* update default Tx ring register index */ adapter->tx_ring[0].reg_idx = def_q; /* we need as many queues as traffic classes */ num_rx_queues = num_tcs; } /* nothing to do if we have the correct number of queues */ if (adapter->num_rx_queues == num_rx_queues) return 0; /* allocate new rings */ rx_ring = kcalloc(num_rx_queues, sizeof(struct ixgbevf_ring), GFP_KERNEL); if (!rx_ring) return -ENOMEM; /* setup ring fields */ for (i = 0; i < num_rx_queues; i++) { rx_ring[i].count = adapter->rx_ring_count; rx_ring[i].queue_index = i; rx_ring[i].reg_idx = i; rx_ring[i].dev = &adapter->pdev->dev; rx_ring[i].netdev = adapter->netdev; /* allocate resources on the ring */ err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); if (err) { while (i) { i--; ixgbevf_free_rx_resources(adapter, &rx_ring[i]); } kfree(rx_ring); return err; } } /* free the existing rings and queues */ ixgbevf_free_all_rx_resources(adapter); adapter->num_rx_queues = 0; kfree(adapter->rx_ring); /* move new rings into position on the adapter struct */ adapter->rx_ring = rx_ring; adapter->num_rx_queues = num_rx_queues; /* reset ring to vector mapping */ ixgbevf_reset_q_vectors(adapter); ixgbevf_map_rings_to_vectors(adapter); return 0; } void ixgbevf_up(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; ixgbevf_negotiate_api(adapter); ixgbevf_reset_queues(adapter); ixgbevf_configure(adapter); ixgbevf_up_complete(adapter); /* clear any pending interrupts, may auto mask */ IXGBE_READ_REG(hw, IXGBE_VTEICR); ixgbevf_irq_enable(adapter); } /** * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue * @adapter: board private structure * @rx_ring: ring to free buffers from **/ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *rx_ring) { struct pci_dev *pdev = adapter->pdev; unsigned long size; unsigned int i; if (!rx_ring->rx_buffer_info) return; /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct ixgbevf_rx_buffer *rx_buffer_info; rx_buffer_info = &rx_ring->rx_buffer_info[i]; if (rx_buffer_info->dma) { dma_unmap_single(&pdev->dev, rx_buffer_info->dma, rx_ring->rx_buf_len, DMA_FROM_DEVICE); rx_buffer_info->dma = 0; } if (rx_buffer_info->skb) { struct sk_buff *skb = rx_buffer_info->skb; rx_buffer_info->skb = NULL; do { struct sk_buff *this = skb; skb = IXGBE_CB(skb)->prev; dev_kfree_skb(this); } while (skb); } } size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; memset(rx_ring->rx_buffer_info, 0, size); /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; if (rx_ring->head) writel(0, adapter->hw.hw_addr + rx_ring->head); if (rx_ring->tail) writel(0, adapter->hw.hw_addr + rx_ring->tail); } /** * ixgbevf_clean_tx_ring - Free Tx Buffers * @adapter: board private structure * @tx_ring: ring to be cleaned **/ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *tx_ring) { struct ixgbevf_tx_buffer *tx_buffer_info; unsigned long size; unsigned int i; if (!tx_ring->tx_buffer_info) return; /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); } size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; memset(tx_ring->tx_buffer_info, 0, size); memset(tx_ring->desc, 0, tx_ring->size); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; if (tx_ring->head) writel(0, adapter->hw.hw_addr + tx_ring->head); if (tx_ring->tail) writel(0, adapter->hw.hw_addr + tx_ring->tail); } /** * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues * @adapter: board private structure **/ static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]); } /** * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues * @adapter: board private structure **/ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]); } void ixgbevf_down(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; u32 txdctl; int i, j; /* signal that we are down to the interrupt handler */ set_bit(__IXGBEVF_DOWN, &adapter->state); /* disable receives */ netif_tx_disable(netdev); msleep(10); netif_tx_stop_all_queues(netdev); ixgbevf_irq_disable(adapter); ixgbevf_napi_disable_all(adapter); del_timer_sync(&adapter->watchdog_timer); /* can't call flush scheduled work here because it can deadlock * if linkwatch_event tries to acquire the rtnl_lock which we are * holding */ while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK) msleep(1); /* disable transmits in the hardware now that interrupts are off */ for (i = 0; i < adapter->num_tx_queues; i++) { j = adapter->tx_ring[i].reg_idx; txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), (txdctl & ~IXGBE_TXDCTL_ENABLE)); } netif_carrier_off(netdev); if (!pci_channel_offline(adapter->pdev)) ixgbevf_reset(adapter); ixgbevf_clean_all_tx_rings(adapter); ixgbevf_clean_all_rx_rings(adapter); } void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) { WARN_ON(in_interrupt()); while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) msleep(1); ixgbevf_down(adapter); ixgbevf_up(adapter); clear_bit(__IXGBEVF_RESETTING, &adapter->state); } void ixgbevf_reset(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; if (hw->mac.ops.reset_hw(hw)) hw_dbg(hw, "PF still resetting\n"); else hw->mac.ops.init_hw(hw); if (is_valid_ether_addr(adapter->hw.mac.addr)) { memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); } } static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, int vectors) { int err = 0; int vector_threshold; /* We'll want at least 2 (vector_threshold): * 1) TxQ[0] + RxQ[0] handler * 2) Other (Link Status Change, etc.) */ vector_threshold = MIN_MSIX_COUNT; /* The more we get, the more we will assign to Tx/Rx Cleanup * for the separate queues...where Rx Cleanup >= Tx Cleanup. * Right now, we simply care about how many we'll get; we'll * set them up later while requesting irq's. */ while (vectors >= vector_threshold) { err = pci_enable_msix(adapter->pdev, adapter->msix_entries, vectors); if (!err || err < 0) /* Success or a nasty failure. */ break; else /* err == number of vectors we should try again with */ vectors = err; } if (vectors < vector_threshold) err = -ENOMEM; if (err) { dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n"); kfree(adapter->msix_entries); adapter->msix_entries = NULL; } else { /* * Adjust for only the vectors we'll use, which is minimum * of max_msix_q_vectors + NON_Q_VECTORS, or the number of * vectors we were allocated. */ adapter->num_msix_vectors = vectors; } return err; } /** * ixgbevf_set_num_queues - Allocate queues for device, feature dependent * @adapter: board private structure to initialize * * This is the top level queue allocation routine. The order here is very * important, starting with the "most" number of features turned on at once, * and ending with the smallest set of features. This way large combinations * can be allocated if they're turned on, and smaller combinations are the * fallthrough conditions. * **/ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) { /* Start with base case */ adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; } /** * ixgbevf_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * * We allocate one ring per queue at run-time since we don't know the * number of queues at compile-time. The polling_netdev array is * intended for Multiqueue, but should work fine with a single queue. **/ static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) { int i; adapter->tx_ring = kcalloc(adapter->num_tx_queues, sizeof(struct ixgbevf_ring), GFP_KERNEL); if (!adapter->tx_ring) goto err_tx_ring_allocation; adapter->rx_ring = kcalloc(adapter->num_rx_queues, sizeof(struct ixgbevf_ring), GFP_KERNEL); if (!adapter->rx_ring) goto err_rx_ring_allocation; for (i = 0; i < adapter->num_tx_queues; i++) { adapter->tx_ring[i].count = adapter->tx_ring_count; adapter->tx_ring[i].queue_index = i; /* reg_idx may be remapped later by DCB config */ adapter->tx_ring[i].reg_idx = i; adapter->tx_ring[i].dev = &adapter->pdev->dev; adapter->tx_ring[i].netdev = adapter->netdev; } for (i = 0; i < adapter->num_rx_queues; i++) { adapter->rx_ring[i].count = adapter->rx_ring_count; adapter->rx_ring[i].queue_index = i; adapter->rx_ring[i].reg_idx = i; adapter->rx_ring[i].dev = &adapter->pdev->dev; adapter->rx_ring[i].netdev = adapter->netdev; } return 0; err_rx_ring_allocation: kfree(adapter->tx_ring); err_tx_ring_allocation: return -ENOMEM; } /** * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported * @adapter: board private structure to initialize * * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel. **/ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err = 0; int vector, v_budget; /* * It's easy to be greedy for MSI-X vectors, but it really * doesn't do us much good if we have a lot more vectors * than CPU's. So let's be conservative and only ask for * (roughly) the same number of vectors as there are CPU's. * The default is to use pairs of vectors. */ v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); v_budget = min_t(int, v_budget, num_online_cpus()); v_budget += NON_Q_VECTORS; /* A failure in MSI-X entry allocation isn't fatal, but it does * mean we disable MSI-X capabilities of the adapter. */ adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); if (!adapter->msix_entries) { err = -ENOMEM; goto out; } for (vector = 0; vector < v_budget; vector++) adapter->msix_entries[vector].entry = vector; err = ixgbevf_acquire_msix_vectors(adapter, v_budget); if (err) goto out; err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); if (err) goto out; err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); out: return err; } /** * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors * @adapter: board private structure to initialize * * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. **/ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) { int q_idx, num_q_vectors; struct ixgbevf_q_vector *q_vector; num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); if (!q_vector) goto err_out; q_vector->adapter = adapter; q_vector->v_idx = q_idx; netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64); adapter->q_vector[q_idx] = q_vector; } return 0; err_out: while (q_idx) { q_idx--; q_vector = adapter->q_vector[q_idx]; netif_napi_del(&q_vector->napi); kfree(q_vector); adapter->q_vector[q_idx] = NULL; } return -ENOMEM; } /** * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors * @adapter: board private structure to initialize * * This function frees the memory allocated to the q_vectors. In addition if * NAPI is enabled it will delete any references to the NAPI struct prior * to freeing the q_vector. **/ static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) { int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; adapter->q_vector[q_idx] = NULL; netif_napi_del(&q_vector->napi); kfree(q_vector); } } /** * ixgbevf_reset_interrupt_capability - Reset MSIX setup * @adapter: board private structure * **/ static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) { pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; } /** * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init * @adapter: board private structure to initialize * **/ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) { int err; /* Number of supported queues */ ixgbevf_set_num_queues(adapter); err = ixgbevf_set_interrupt_capability(adapter); if (err) { hw_dbg(&adapter->hw, "Unable to setup interrupt capabilities\n"); goto err_set_interrupt; } err = ixgbevf_alloc_q_vectors(adapter); if (err) { hw_dbg(&adapter->hw, "Unable to allocate memory for queue " "vectors\n"); goto err_alloc_q_vectors; } err = ixgbevf_alloc_queues(adapter); if (err) { pr_err("Unable to allocate memory for queues\n"); goto err_alloc_queues; } hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " "Tx Queue count = %u\n", (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); set_bit(__IXGBEVF_DOWN, &adapter->state); return 0; err_alloc_queues: ixgbevf_free_q_vectors(adapter); err_alloc_q_vectors: ixgbevf_reset_interrupt_capability(adapter); err_set_interrupt: return err; } /** * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings * @adapter: board private structure to clear interrupt scheme on * * We go through and clear interrupt specific resources and reset the structure * to pre-load conditions **/ static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) { adapter->num_tx_queues = 0; adapter->num_rx_queues = 0; ixgbevf_free_q_vectors(adapter); ixgbevf_reset_interrupt_capability(adapter); } /** * ixgbevf_sw_init - Initialize general software structures * (struct ixgbevf_adapter) * @adapter: board private structure to initialize * * ixgbevf_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; int err; /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->revision_id = pdev->revision; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; hw->mbx.ops.init_params(hw); /* assume legacy case in which PF would only give VF 2 queues */ hw->mac.max_tx_queues = 2; hw->mac.max_rx_queues = 2; err = hw->mac.ops.reset_hw(hw); if (err) { dev_info(&pdev->dev, "PF still in reset state. Is the PF interface up?\n"); } else { err = hw->mac.ops.init_hw(hw); if (err) { pr_err("init_shared_code failed: %d\n", err); goto out; } err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); if (err) dev_info(&pdev->dev, "Error reading MAC address\n"); else if (is_zero_ether_addr(adapter->hw.mac.addr)) dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); } if (!is_valid_ether_addr(netdev->dev_addr)) { dev_info(&pdev->dev, "Assigning random MAC address\n"); eth_hw_addr_random(netdev); memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len); } /* lock to protect mailbox accesses */ spin_lock_init(&adapter->mbx_lock); /* Enable dynamic interrupt throttling rates */ adapter->rx_itr_setting = 1; adapter->tx_itr_setting = 1; /* set default ring sizes */ adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; set_bit(__IXGBEVF_DOWN, &adapter->state); return 0; out: return err; } #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ { \ u32 current_counter = IXGBE_READ_REG(hw, reg); \ if (current_counter < last_counter) \ counter += 0x100000000LL; \ last_counter = current_counter; \ counter &= 0xFFFFFFFF00000000LL; \ counter |= current_counter; \ } #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ { \ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ u64 current_counter = (current_counter_msb << 32) | \ current_counter_lsb; \ if (current_counter < last_counter) \ counter += 0x1000000000LL; \ last_counter = current_counter; \ counter &= 0xFFFFFFF000000000LL; \ counter |= current_counter; \ } /** * ixgbevf_update_stats - Update the board statistics counters. * @adapter: board private structure **/ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int i; if (!adapter->link_up) return; UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, adapter->stats.vfgprc); UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, adapter->stats.vfgptc); UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, adapter->stats.last_vfgorc, adapter->stats.vfgorc); UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, adapter->stats.last_vfgotc, adapter->stats.vfgotc); UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, adapter->stats.vfmprc); for (i = 0; i < adapter->num_rx_queues; i++) { adapter->hw_csum_rx_error += adapter->rx_ring[i].hw_csum_rx_error; adapter->hw_csum_rx_good += adapter->rx_ring[i].hw_csum_rx_good; adapter->rx_ring[i].hw_csum_rx_error = 0; adapter->rx_ring[i].hw_csum_rx_good = 0; } } /** * ixgbevf_watchdog - Timer Call-back * @data: pointer to adapter cast into an unsigned long **/ static void ixgbevf_watchdog(unsigned long data) { struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; struct ixgbe_hw *hw = &adapter->hw; u32 eics = 0; int i; /* * Do the watchdog outside of interrupt context due to the lovely * delays that some of the newer hardware requires */ if (test_bit(__IXGBEVF_DOWN, &adapter->state)) goto watchdog_short_circuit; /* get one bit for every active tx/rx interrupt vector */ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { struct ixgbevf_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) eics |= 1 << i; } IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); watchdog_short_circuit: schedule_work(&adapter->watchdog_task); } /** * ixgbevf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ static void ixgbevf_tx_timeout(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ schedule_work(&adapter->reset_task); } static void ixgbevf_reset_task(struct work_struct *work) { struct ixgbevf_adapter *adapter; adapter = container_of(work, struct ixgbevf_adapter, reset_task); /* If we're already down or resetting, just bail */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) return; adapter->tx_timeout_count++; ixgbevf_reinit_locked(adapter); } /** * ixgbevf_watchdog_task - worker thread to bring link up * @work: pointer to work_struct containing our data **/ static void ixgbevf_watchdog_task(struct work_struct *work) { struct ixgbevf_adapter *adapter = container_of(work, struct ixgbevf_adapter, watchdog_task); struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; u32 link_speed = adapter->link_speed; bool link_up = adapter->link_up; s32 need_reset; adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; /* * Always check the link on the watchdog because we have * no LSC interrupt */ spin_lock_bh(&adapter->mbx_lock); need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); spin_unlock_bh(&adapter->mbx_lock); if (need_reset) { adapter->link_up = link_up; adapter->link_speed = link_speed; netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); schedule_work(&adapter->reset_task); goto pf_has_reset; } adapter->link_up = link_up; adapter->link_speed = link_speed; if (link_up) { if (!netif_carrier_ok(netdev)) { char *link_speed_string; switch (link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: link_speed_string = "10 Gbps"; break; case IXGBE_LINK_SPEED_1GB_FULL: link_speed_string = "1 Gbps"; break; case IXGBE_LINK_SPEED_100_FULL: link_speed_string = "100 Mbps"; break; default: link_speed_string = "unknown speed"; break; } dev_info(&adapter->pdev->dev, "NIC Link is Up, %s\n", link_speed_string); netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); } } else { adapter->link_up = false; adapter->link_speed = 0; if (netif_carrier_ok(netdev)) { dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); } } ixgbevf_update_stats(adapter); pf_has_reset: /* Reset the timer */ if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + (2 * HZ))); adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; } /** * ixgbevf_free_tx_resources - Free Tx Resources per Queue * @adapter: board private structure * @tx_ring: Tx descriptor ring for a specific queue * * Free all transmit software resources **/ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *tx_ring) { struct pci_dev *pdev = adapter->pdev; ixgbevf_clean_tx_ring(adapter, tx_ring); vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } /** * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources **/ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tx_ring[i].desc) ixgbevf_free_tx_resources(adapter, &adapter->tx_ring[i]); } /** * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: board private structure * @tx_ring: tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *tx_ring) { struct pci_dev *pdev = adapter->pdev; int size; size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; tx_ring->tx_buffer_info = vzalloc(size); if (!tx_ring->tx_buffer_info) goto err; /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) goto err; tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; return 0; err: vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " "descriptor ring\n"); return -ENOMEM; } /** * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) { int i, err = 0; for (i = 0; i < adapter->num_tx_queues; i++) { err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]); if (!err) continue; hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); break; } return err; } /** * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: board private structure * @rx_ring: rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *rx_ring) { struct pci_dev *pdev = adapter->pdev; int size; size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; rx_ring->rx_buffer_info = vzalloc(size); if (!rx_ring->rx_buffer_info) goto alloc_failed; /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; goto alloc_failed; } rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; return 0; alloc_failed: return -ENOMEM; } /** * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources * @adapter: board private structure * * If this function returns with an error, then it's possible one or * more of the rings is populated (while the rest are not). It is the * callers duty to clean those orphaned rings. * * Return 0 on success, negative on failure **/ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) { int i, err = 0; for (i = 0; i < adapter->num_rx_queues; i++) { err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]); if (!err) continue; hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); break; } return err; } /** * ixgbevf_free_rx_resources - Free Rx Resources * @adapter: board private structure * @rx_ring: ring to clean the resources from * * Free all receive software resources **/ void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *rx_ring) { struct pci_dev *pdev = adapter->pdev; ixgbevf_clean_rx_ring(adapter, rx_ring); vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } /** * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues * @adapter: board private structure * * Free all receive software resources **/ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) if (adapter->rx_ring[i].desc) ixgbevf_free_rx_resources(adapter, &adapter->rx_ring[i]); } static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbevf_ring *rx_ring; unsigned int def_q = 0; unsigned int num_tcs = 0; unsigned int num_rx_queues = 1; int err, i; spin_lock_bh(&adapter->mbx_lock); /* fetch queue configuration from the PF */ err = ixgbevf_get_queues(hw, &num_tcs, &def_q); spin_unlock_bh(&adapter->mbx_lock); if (err) return err; if (num_tcs > 1) { /* update default Tx ring register index */ adapter->tx_ring[0].reg_idx = def_q; /* we need as many queues as traffic classes */ num_rx_queues = num_tcs; } /* nothing to do if we have the correct number of queues */ if (adapter->num_rx_queues == num_rx_queues) return 0; /* allocate new rings */ rx_ring = kcalloc(num_rx_queues, sizeof(struct ixgbevf_ring), GFP_KERNEL); if (!rx_ring) return -ENOMEM; /* setup ring fields */ for (i = 0; i < num_rx_queues; i++) { rx_ring[i].count = adapter->rx_ring_count; rx_ring[i].queue_index = i; rx_ring[i].reg_idx = i; rx_ring[i].dev = &adapter->pdev->dev; rx_ring[i].netdev = adapter->netdev; } /* free the existing ring and queues */ adapter->num_rx_queues = 0; kfree(adapter->rx_ring); /* move new rings into position on the adapter struct */ adapter->rx_ring = rx_ring; adapter->num_rx_queues = num_rx_queues; return 0; } /** * ixgbevf_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ static int ixgbevf_open(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; int err; /* A previous failure to open the device because of a lack of * available MSIX vector resources may have reset the number * of msix vectors variable to zero. The only way to recover * is to unload/reload the driver and hope that the system has * been able to recover some MSIX vector resources. */ if (!adapter->num_msix_vectors) return -ENOMEM; /* disallow open during test */ if (test_bit(__IXGBEVF_TESTING, &adapter->state)) return -EBUSY; if (hw->adapter_stopped) { ixgbevf_reset(adapter); /* if adapter is still stopped then PF isn't up and * the vf can't start. */ if (hw->adapter_stopped) { err = IXGBE_ERR_MBX; pr_err("Unable to start - perhaps the PF Driver isn't " "up yet\n"); goto err_setup_reset; } } ixgbevf_negotiate_api(adapter); /* setup queue reg_idx and Rx queue count */ err = ixgbevf_setup_queues(adapter); if (err) goto err_setup_queues; /* allocate transmit descriptors */ err = ixgbevf_setup_all_tx_resources(adapter); if (err) goto err_setup_tx; /* allocate receive descriptors */ err = ixgbevf_setup_all_rx_resources(adapter); if (err) goto err_setup_rx; ixgbevf_configure(adapter); /* * Map the Tx/Rx rings to the vectors we were allotted. * if request_irq will be called in this function map_rings * must be called *before* up_complete */ ixgbevf_map_rings_to_vectors(adapter); ixgbevf_up_complete(adapter); /* clear any pending interrupts, may auto mask */ IXGBE_READ_REG(hw, IXGBE_VTEICR); err = ixgbevf_request_irq(adapter); if (err) goto err_req_irq; ixgbevf_irq_enable(adapter); return 0; err_req_irq: ixgbevf_down(adapter); err_setup_rx: ixgbevf_free_all_rx_resources(adapter); err_setup_tx: ixgbevf_free_all_tx_resources(adapter); err_setup_queues: ixgbevf_reset(adapter); err_setup_reset: return err; } /** * ixgbevf_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ static int ixgbevf_close(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); ixgbevf_down(adapter); ixgbevf_free_irq(adapter); ixgbevf_free_all_tx_resources(adapter); ixgbevf_free_all_rx_resources(adapter); return 0; } static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, u32 vlan_macip_lens, u32 type_tucmd, u32 mss_l4len_idx) { struct ixgbe_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); i++; tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; /* set bits to identify this as an advanced context descriptor */ type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); context_desc->seqnum_seed = 0; context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); } static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) { u32 vlan_macip_lens, type_tucmd; u32 mss_l4len_idx, l4len; if (!skb_is_gso(skb)) return 0; if (skb_header_cloned(skb)) { int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (err) return err; } /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } /* compute header lengths */ l4len = tcp_hdrlen(skb); *hdr_len += l4len; *hdr_len = skb_transport_offset(skb) + l4len; /* mss_l4len_id: use 1 as index for TSO */ mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = skb_network_header_len(skb); vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); return 1; } static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags) { u32 vlan_macip_lens = 0; u32 mss_l4len_idx = 0; u32 type_tucmd = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 l4_hdr = 0; switch (skb->protocol) { case __constant_htons(ETH_P_IP): vlan_macip_lens |= skb_network_header_len(skb); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; l4_hdr = ip_hdr(skb)->protocol; break; case __constant_htons(ETH_P_IPV6): vlan_macip_lens |= skb_network_header_len(skb); l4_hdr = ipv6_hdr(skb)->nexthdr; break; default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, "partial checksum but proto=%x!\n", skb->protocol); } break; } switch (l4_hdr) { case IPPROTO_TCP: type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; mss_l4len_idx = tcp_hdrlen(skb) << IXGBE_ADVTXD_L4LEN_SHIFT; break; case IPPROTO_SCTP: type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; mss_l4len_idx = sizeof(struct sctphdr) << IXGBE_ADVTXD_L4LEN_SHIFT; break; case IPPROTO_UDP: mss_l4len_idx = sizeof(struct udphdr) << IXGBE_ADVTXD_L4LEN_SHIFT; break; default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, "partial checksum but l4 proto=%x!\n", l4_hdr); } break; } } /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); return (skb->ip_summed == CHECKSUM_PARTIAL); } static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags) { struct ixgbevf_tx_buffer *tx_buffer_info; unsigned int len; unsigned int total = skb->len; unsigned int offset = 0, size; int count = 0; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int f; int i; i = tx_ring->next_to_use; len = min(skb_headlen(skb), total); while (len) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); tx_buffer_info->length = size; tx_buffer_info->mapped_as_page = false; tx_buffer_info->dma = dma_map_single(tx_ring->dev, skb->data + offset, size, DMA_TO_DEVICE); if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) goto dma_error; len -= size; total -= size; offset += size; count++; i++; if (i == tx_ring->count) i = 0; } for (f = 0; f < nr_frags; f++) { const struct skb_frag_struct *frag; frag = &skb_shinfo(skb)->frags[f]; len = min((unsigned int)skb_frag_size(frag), total); offset = 0; while (len) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); tx_buffer_info->length = size; tx_buffer_info->dma = skb_frag_dma_map(tx_ring->dev, frag, offset, size, DMA_TO_DEVICE); if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) goto dma_error; tx_buffer_info->mapped_as_page = true; len -= size; total -= size; offset += size; count++; i++; if (i == tx_ring->count) i = 0; } if (total == 0) break; } if (i == 0) i = tx_ring->count - 1; else i = i - 1; tx_ring->tx_buffer_info[i].skb = skb; return count; dma_error: dev_err(tx_ring->dev, "TX DMA map failed\n"); /* clear timestamp and dma mappings for failed tx_buffer_info map */ tx_buffer_info->dma = 0; count--; /* clear timestamp and dma mappings for remaining portion of packet */ while (count >= 0) { count--; i--; if (i < 0) i += tx_ring->count; tx_buffer_info = &tx_ring->tx_buffer_info[i]; ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); } return count; } static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, int count, unsigned int first, u32 paylen, u8 hdr_len) { union ixgbe_adv_tx_desc *tx_desc = NULL; struct ixgbevf_tx_buffer *tx_buffer_info; u32 olinfo_status = 0, cmd_type_len = 0; unsigned int i; u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; if (tx_flags & IXGBE_TX_FLAGS_VLAN) cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; if (tx_flags & IXGBE_TX_FLAGS_CSUM) olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM; if (tx_flags & IXGBE_TX_FLAGS_TSO) { cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; /* use index 1 context for tso */ olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); if (tx_flags & IXGBE_TX_FLAGS_IPV4) olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; } /* * Check Context must be set if Tx switch is enabled, which it * always is for case where virtual functions are running */ olinfo_status |= IXGBE_ADVTXD_CC; olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); i = tx_ring->next_to_use; while (count--) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; tx_desc = IXGBEVF_TX_DESC(tx_ring, i); tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type_len | tx_buffer_info->length); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); i++; if (i == tx_ring->count) i = 0; } tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); tx_ring->tx_buffer_info[first].time_stamp = jiffies; /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); tx_ring->tx_buffer_info[first].next_to_watch = tx_desc; tx_ring->next_to_use = i; } static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) { struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); * but since that doesn't exist yet, just open code it. */ smp_mb(); /* We need to check again in a case another CPU has just * made room available. */ if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) return -EBUSY; /* A reprieve! - use start_queue because it doesn't call schedule */ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); ++adapter->restart_queue; return 0; } static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) { if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) return 0; return __ixgbevf_maybe_stop_tx(tx_ring, size); } static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_ring *tx_ring; unsigned int first; unsigned int tx_flags = 0; u8 hdr_len = 0; int r_idx = 0, tso; u16 count = TXD_USE_COUNT(skb_headlen(skb)); #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD unsigned short f; #endif u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); if (!dst_mac || is_link_local_ether_addr(dst_mac)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } tx_ring = &adapter->tx_ring[r_idx]; /* * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, * + 1 desc for context descriptor, * otherwise try next time */ #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); #else count += skb_shinfo(skb)->nr_frags; #endif if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { adapter->tx_busy++; return NETDEV_TX_BUSY; } if (vlan_tx_tag_present(skb)) { tx_flags |= vlan_tx_tag_get(skb); tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_VLAN; } first = tx_ring->next_to_use; if (skb->protocol == htons(ETH_P_IP)) tx_flags |= IXGBE_TX_FLAGS_IPV4; tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len); if (tso < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (tso) tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags)) tx_flags |= IXGBE_TX_FLAGS_CSUM; ixgbevf_tx_queue(tx_ring, tx_flags, ixgbevf_tx_map(tx_ring, skb, tx_flags), first, skb->len, hdr_len); writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail); ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); return NETDEV_TX_OK; } /** * ixgbevf_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure * * Returns 0 on success, negative on failure **/ static int ixgbevf_set_mac(struct net_device *netdev, void *p) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); spin_lock_bh(&adapter->mbx_lock); hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); spin_unlock_bh(&adapter->mbx_lock); return 0; } /** * ixgbevf_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; switch (adapter->hw.api_version) { case ixgbe_mbox_api_11: max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; break; default: if (adapter->hw.mac.type == ixgbe_mac_X540_vf) max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; break; } /* MTU < 68 is an error and causes problems on some kernels */ if ((new_mtu < 68) || (max_frame > max_possible_frame)) return -EINVAL; hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; if (netif_running(netdev)) ixgbevf_reinit_locked(adapter); return 0; } static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); #ifdef CONFIG_PM int retval = 0; #endif netif_device_detach(netdev); if (netif_running(netdev)) { rtnl_lock(); ixgbevf_down(adapter); ixgbevf_free_irq(adapter); ixgbevf_free_all_tx_resources(adapter); ixgbevf_free_all_rx_resources(adapter); rtnl_unlock(); } ixgbevf_clear_interrupt_scheme(adapter); #ifdef CONFIG_PM retval = pci_save_state(pdev); if (retval) return retval; #endif pci_disable_device(pdev); return 0; } #ifdef CONFIG_PM static int ixgbevf_resume(struct pci_dev *pdev) { struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; u32 err; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* * pci_restore_state clears dev->state_saved so call * pci_save_state to restore it. */ pci_save_state(pdev); err = pci_enable_device_mem(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); rtnl_lock(); err = ixgbevf_init_interrupt_scheme(adapter); rtnl_unlock(); if (err) { dev_err(&pdev->dev, "Cannot initialize interrupts\n"); return err; } ixgbevf_reset(adapter); if (netif_running(netdev)) { err = ixgbevf_open(netdev); if (err) return err; } netif_device_attach(netdev); return err; } #endif /* CONFIG_PM */ static void ixgbevf_shutdown(struct pci_dev *pdev) { ixgbevf_suspend(pdev, PMSG_SUSPEND); } static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); unsigned int start; u64 bytes, packets; const struct ixgbevf_ring *ring; int i; ixgbevf_update_stats(adapter); stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; for (i = 0; i < adapter->num_rx_queues; i++) { ring = &adapter->rx_ring[i]; do { start = u64_stats_fetch_begin_bh(&ring->syncp); bytes = ring->total_bytes; packets = ring->total_packets; } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); stats->rx_bytes += bytes; stats->rx_packets += packets; } for (i = 0; i < adapter->num_tx_queues; i++) { ring = &adapter->tx_ring[i]; do { start = u64_stats_fetch_begin_bh(&ring->syncp); bytes = ring->total_bytes; packets = ring->total_packets; } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); stats->tx_bytes += bytes; stats->tx_packets += packets; } return stats; } static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_open = ixgbevf_open, .ndo_stop = ixgbevf_close, .ndo_start_xmit = ixgbevf_xmit_frame, .ndo_set_rx_mode = ixgbevf_set_rx_mode, .ndo_get_stats64 = ixgbevf_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbevf_set_mac, .ndo_change_mtu = ixgbevf_change_mtu, .ndo_tx_timeout = ixgbevf_tx_timeout, .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, }; static void ixgbevf_assign_netdev_ops(struct net_device *dev) { dev->netdev_ops = &ixgbevf_netdev_ops; ixgbevf_set_ethtool_ops(dev); dev->watchdog_timeo = 5 * HZ; } /** * ixgbevf_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in ixgbevf_pci_tbl * * Returns 0 on success, negative on failure * * ixgbevf_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct ixgbevf_adapter *adapter = NULL; struct ixgbe_hw *hw = NULL; const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; static int cards_found; int err, pci_using_dac; err = pci_enable_device(pdev); if (err) return err; if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA " "configuration, aborting\n"); goto err_dma; } } pci_using_dac = 0; } err = pci_request_regions(pdev, ixgbevf_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); goto err_pci_reg; } pci_set_master(pdev); netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), MAX_TX_QUEUES); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; } SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; hw = &adapter->hw; hw->back = adapter; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); /* * call save state here in standalone driver because it relies on * adapter struct to exist, and needs to call netdev_priv */ pci_save_state(pdev); hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!hw->hw_addr) { err = -EIO; goto err_ioremap; } ixgbevf_assign_netdev_ops(netdev); adapter->bd_number = cards_found; /* Setup hw api */ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); hw->mac.type = ii->mac; memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, sizeof(struct ixgbe_mbx_operations)); /* setup the private structure */ err = ixgbevf_sw_init(adapter); if (err) goto err_sw_init; /* The HW MAC address was set and/or determined in sw_init */ if (!is_valid_ether_addr(netdev->dev_addr)) { pr_err("invalid MAC address\n"); err = -EIO; goto err_sw_init; } netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM; netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO6; netdev->vlan_features |= NETIF_F_IP_CSUM; netdev->vlan_features |= NETIF_F_IPV6_CSUM; netdev->vlan_features |= NETIF_F_SG; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; netdev->priv_flags |= IFF_UNICAST_FLT; init_timer(&adapter->watchdog_timer); adapter->watchdog_timer.function = ixgbevf_watchdog; adapter->watchdog_timer.data = (unsigned long)adapter; INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); err = ixgbevf_init_interrupt_scheme(adapter); if (err) goto err_sw_init; strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); if (err) goto err_register; netif_carrier_off(netdev); ixgbevf_init_last_counter_stats(adapter); /* print the MAC address */ hw_dbg(hw, "%pM\n", netdev->dev_addr); hw_dbg(hw, "MAC: %d\n", hw->mac.type); hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); cards_found++; return 0; err_register: ixgbevf_clear_interrupt_scheme(adapter); err_sw_init: ixgbevf_reset_interrupt_capability(adapter); iounmap(hw->hw_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: pci_release_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); return err; } /** * ixgbevf_remove - Device Removal Routine * @pdev: PCI device information struct * * ixgbevf_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ static void ixgbevf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); set_bit(__IXGBEVF_DOWN, &adapter->state); del_timer_sync(&adapter->watchdog_timer); cancel_work_sync(&adapter->reset_task); cancel_work_sync(&adapter->watchdog_task); if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); ixgbevf_clear_interrupt_scheme(adapter); ixgbevf_reset_interrupt_capability(adapter); iounmap(adapter->hw.hw_addr); pci_release_regions(pdev); hw_dbg(&adapter->hw, "Remove complete\n"); kfree(adapter->tx_ring); kfree(adapter->rx_ring); free_netdev(netdev); pci_disable_device(pdev); } /** * ixgbevf_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state * * This function is called after a PCI bus error affecting * this device has been detected. */ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); netif_device_detach(netdev); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; if (netif_running(netdev)) ixgbevf_down(adapter); pci_disable_device(pdev); /* Request a slot slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /** * ixgbevf_io_slot_reset - called after the pci bus has been reset. * @pdev: Pointer to PCI device * * Restart the card from scratch, as if from a cold-boot. Implementation * resembles the first-half of the ixgbevf_resume routine. */ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); if (pci_enable_device_mem(pdev)) { dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); ixgbevf_reset(adapter); return PCI_ERS_RESULT_RECOVERED; } /** * ixgbevf_io_resume - called when traffic can start flowing again. * @pdev: Pointer to PCI device * * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the * second-half of the ixgbevf_resume routine. */ static void ixgbevf_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) ixgbevf_up(adapter); netif_device_attach(netdev); } /* PCI Error Recovery (ERS) */ static const struct pci_error_handlers ixgbevf_err_handler = { .error_detected = ixgbevf_io_error_detected, .slot_reset = ixgbevf_io_slot_reset, .resume = ixgbevf_io_resume, }; static struct pci_driver ixgbevf_driver = { .name = ixgbevf_driver_name, .id_table = ixgbevf_pci_tbl, .probe = ixgbevf_probe, .remove = ixgbevf_remove, #ifdef CONFIG_PM /* Power Management Hooks */ .suspend = ixgbevf_suspend, .resume = ixgbevf_resume, #endif .shutdown = ixgbevf_shutdown, .err_handler = &ixgbevf_err_handler }; /** * ixgbevf_init_module - Driver Registration Routine * * ixgbevf_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ static int __init ixgbevf_init_module(void) { int ret; pr_info("%s - version %s\n", ixgbevf_driver_string, ixgbevf_driver_version); pr_info("%s\n", ixgbevf_copyright); ret = pci_register_driver(&ixgbevf_driver); return ret; } module_init(ixgbevf_init_module); /** * ixgbevf_exit_module - Driver Exit Cleanup Routine * * ixgbevf_exit_module is called just before the driver is removed * from memory. **/ static void __exit ixgbevf_exit_module(void) { pci_unregister_driver(&ixgbevf_driver); } #ifdef DEBUG /** * ixgbevf_get_hw_dev_name - return device name string * used by hardware layer to print debugging information **/ char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) { struct ixgbevf_adapter *adapter = hw->back; return adapter->netdev->name; } #endif module_exit(ixgbevf_exit_module); /* ixgbevf_main.c */
gpl-2.0
daishi4u/J7_Afterburner
drivers/input/joystick/warrior.c
2652
6142
/* * Copyright (c) 1999-2001 Vojtech Pavlik */ /* * Logitech WingMan Warrior joystick driver for Linux */ /* * This program is free warftware; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/init.h> #define DRIVER_DESC "Logitech WingMan Warrior joystick driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* * Constants. */ #define WARRIOR_MAX_LENGTH 16 static char warrior_lengths[] = { 0, 4, 12, 3, 4, 4, 0, 0 }; /* * Per-Warrior data. */ struct warrior { struct input_dev *dev; int idx, len; unsigned char data[WARRIOR_MAX_LENGTH]; char phys[32]; }; /* * warrior_process_packet() decodes packets the driver receives from the * Warrior. It updates the data accordingly. */ static void warrior_process_packet(struct warrior *warrior) { struct input_dev *dev = warrior->dev; unsigned char *data = warrior->data; if (!warrior->idx) return; switch ((data[0] >> 4) & 7) { case 1: /* Button data */ input_report_key(dev, BTN_TRIGGER, data[3] & 1); input_report_key(dev, BTN_THUMB, (data[3] >> 1) & 1); input_report_key(dev, BTN_TOP, (data[3] >> 2) & 1); input_report_key(dev, BTN_TOP2, (data[3] >> 3) & 1); break; case 3: /* XY-axis info->data */ input_report_abs(dev, ABS_X, ((data[0] & 8) << 5) - (data[2] | ((data[0] & 4) << 5))); input_report_abs(dev, ABS_Y, (data[1] | ((data[0] & 1) << 7)) - ((data[0] & 2) << 7)); break; case 5: /* Throttle, spinner, hat info->data */ input_report_abs(dev, ABS_THROTTLE, (data[1] | ((data[0] & 1) << 7)) - ((data[0] & 2) << 7)); input_report_abs(dev, ABS_HAT0X, (data[3] & 2 ? 1 : 0) - (data[3] & 1 ? 1 : 0)); input_report_abs(dev, ABS_HAT0Y, (data[3] & 8 ? 1 : 0) - (data[3] & 4 ? 1 : 0)); input_report_rel(dev, REL_DIAL, (data[2] | ((data[0] & 4) << 5)) - ((data[0] & 8) << 5)); break; } input_sync(dev); } /* * warrior_interrupt() is called by the low level driver when characters * are ready for us. We then buffer them for further processing, or call the * packet processing routine. */ static irqreturn_t warrior_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct warrior *warrior = serio_get_drvdata(serio); if (data & 0x80) { if (warrior->idx) warrior_process_packet(warrior); warrior->idx = 0; warrior->len = warrior_lengths[(data >> 4) & 7]; } if (warrior->idx < warrior->len) warrior->data[warrior->idx++] = data; if (warrior->idx == warrior->len) { if (warrior->idx) warrior_process_packet(warrior); warrior->idx = 0; warrior->len = 0; } return IRQ_HANDLED; } /* * warrior_disconnect() is the opposite of warrior_connect() */ static void warrior_disconnect(struct serio *serio) { struct warrior *warrior = serio_get_drvdata(serio); serio_close(serio); serio_set_drvdata(serio, NULL); input_unregister_device(warrior->dev); kfree(warrior); } /* * warrior_connect() is the routine that is called when someone adds a * new serio device. It looks for the Warrior, and if found, registers * it as an input device. */ static int warrior_connect(struct serio *serio, struct serio_driver *drv) { struct warrior *warrior; struct input_dev *input_dev; int err = -ENOMEM; warrior = kzalloc(sizeof(struct warrior), GFP_KERNEL); input_dev = input_allocate_device(); if (!warrior || !input_dev) goto fail1; warrior->dev = input_dev; snprintf(warrior->phys, sizeof(warrior->phys), "%s/input0", serio->phys); input_dev->name = "Logitech WingMan Warrior"; input_dev->phys = warrior->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_WARRIOR; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TRIGGER)] = BIT_MASK(BTN_TRIGGER) | BIT_MASK(BTN_THUMB) | BIT_MASK(BTN_TOP) | BIT_MASK(BTN_TOP2); input_dev->relbit[0] = BIT_MASK(REL_DIAL); input_set_abs_params(input_dev, ABS_X, -64, 64, 0, 8); input_set_abs_params(input_dev, ABS_Y, -64, 64, 0, 8); input_set_abs_params(input_dev, ABS_THROTTLE, -112, 112, 0, 0); input_set_abs_params(input_dev, ABS_HAT0X, -1, 1, 0, 0); input_set_abs_params(input_dev, ABS_HAT0Y, -1, 1, 0, 0); serio_set_drvdata(serio, warrior); err = serio_open(serio, drv); if (err) goto fail2; err = input_register_device(warrior->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(warrior); return err; } /* * The serio driver structure. */ static struct serio_device_id warrior_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_WARRIOR, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, warrior_serio_ids); static struct serio_driver warrior_drv = { .driver = { .name = "warrior", }, .description = DRIVER_DESC, .id_table = warrior_serio_ids, .interrupt = warrior_interrupt, .connect = warrior_connect, .disconnect = warrior_disconnect, }; module_serio_driver(warrior_drv);
gpl-2.0
ashishtanwer/NFTable-porting-on-Android-Goldfish
drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
2908
11352
/* * Copyright(c) 2007 Atheros Corporation. All rights reserved. * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/slab.h> #include "atl1e.h" static int atl1e_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP); if (hw->nic_type == athr_l1e) ecmd->supported |= SUPPORTED_1000baseT_Full; ecmd->advertising = ADVERTISED_TP; ecmd->advertising |= ADVERTISED_Autoneg; ecmd->advertising |= hw->autoneg_advertised; ecmd->port = PORT_TP; ecmd->phy_address = 0; ecmd->transceiver = XCVR_INTERNAL; if (adapter->link_speed != SPEED_0) { ethtool_cmd_speed_set(ecmd, adapter->link_speed); if (adapter->link_duplex == FULL_DUPLEX) ecmd->duplex = DUPLEX_FULL; else ecmd->duplex = DUPLEX_HALF; } else { ethtool_cmd_speed_set(ecmd, -1); ecmd->duplex = -1; } ecmd->autoneg = AUTONEG_ENABLE; return 0; } static int atl1e_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) msleep(1); if (ecmd->autoneg == AUTONEG_ENABLE) { u16 adv4, adv9; if ((ecmd->advertising&ADVERTISE_1000_FULL)) { if (hw->nic_type == athr_l1e) { hw->autoneg_advertised = ecmd->advertising & AT_ADV_MASK; } else { clear_bit(__AT_RESETTING, &adapter->flags); return -EINVAL; } } else if (ecmd->advertising&ADVERTISE_1000_HALF) { clear_bit(__AT_RESETTING, &adapter->flags); return -EINVAL; } else { hw->autoneg_advertised = ecmd->advertising & AT_ADV_MASK; } ecmd->advertising = hw->autoneg_advertised | ADVERTISED_TP | ADVERTISED_Autoneg; adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL; adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK; if (hw->autoneg_advertised & ADVERTISE_10_HALF) adv4 |= ADVERTISE_10HALF; if (hw->autoneg_advertised & ADVERTISE_10_FULL) adv4 |= ADVERTISE_10FULL; if (hw->autoneg_advertised & ADVERTISE_100_HALF) adv4 |= ADVERTISE_100HALF; if (hw->autoneg_advertised & ADVERTISE_100_FULL) adv4 |= ADVERTISE_100FULL; if (hw->autoneg_advertised & ADVERTISE_1000_FULL) adv9 |= ADVERTISE_1000FULL; if (adv4 != hw->mii_autoneg_adv_reg || adv9 != hw->mii_1000t_ctrl_reg) { hw->mii_autoneg_adv_reg = adv4; hw->mii_1000t_ctrl_reg = adv9; hw->re_autoneg = true; } } else { clear_bit(__AT_RESETTING, &adapter->flags); return -EINVAL; } /* reset the link */ if (netif_running(adapter->netdev)) { atl1e_down(adapter); atl1e_up(adapter); } else atl1e_reset_hw(&adapter->hw); clear_bit(__AT_RESETTING, &adapter->flags); return 0; } static u32 atl1e_get_msglevel(struct net_device *netdev) { #ifdef DBG return 1; #else return 0; #endif } static int atl1e_get_regs_len(struct net_device *netdev) { return AT_REGS_LEN * sizeof(u32); } static void atl1e_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; u32 *regs_buff = p; u16 phy_data; memset(p, 0, AT_REGS_LEN * sizeof(u32)); regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; regs_buff[0] = AT_READ_REG(hw, REG_VPD_CAP); regs_buff[1] = AT_READ_REG(hw, REG_SPI_FLASH_CTRL); regs_buff[2] = AT_READ_REG(hw, REG_SPI_FLASH_CONFIG); regs_buff[3] = AT_READ_REG(hw, REG_TWSI_CTRL); regs_buff[4] = AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL); regs_buff[5] = AT_READ_REG(hw, REG_MASTER_CTRL); regs_buff[6] = AT_READ_REG(hw, REG_MANUAL_TIMER_INIT); regs_buff[7] = AT_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT); regs_buff[8] = AT_READ_REG(hw, REG_GPHY_CTRL); regs_buff[9] = AT_READ_REG(hw, REG_CMBDISDMA_TIMER); regs_buff[10] = AT_READ_REG(hw, REG_IDLE_STATUS); regs_buff[11] = AT_READ_REG(hw, REG_MDIO_CTRL); regs_buff[12] = AT_READ_REG(hw, REG_SERDES_LOCK); regs_buff[13] = AT_READ_REG(hw, REG_MAC_CTRL); regs_buff[14] = AT_READ_REG(hw, REG_MAC_IPG_IFG); regs_buff[15] = AT_READ_REG(hw, REG_MAC_STA_ADDR); regs_buff[16] = AT_READ_REG(hw, REG_MAC_STA_ADDR+4); regs_buff[17] = AT_READ_REG(hw, REG_RX_HASH_TABLE); regs_buff[18] = AT_READ_REG(hw, REG_RX_HASH_TABLE+4); regs_buff[19] = AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL); regs_buff[20] = AT_READ_REG(hw, REG_MTU); regs_buff[21] = AT_READ_REG(hw, REG_WOL_CTRL); regs_buff[22] = AT_READ_REG(hw, REG_SRAM_TRD_ADDR); regs_buff[23] = AT_READ_REG(hw, REG_SRAM_TRD_LEN); regs_buff[24] = AT_READ_REG(hw, REG_SRAM_RXF_ADDR); regs_buff[25] = AT_READ_REG(hw, REG_SRAM_RXF_LEN); regs_buff[26] = AT_READ_REG(hw, REG_SRAM_TXF_ADDR); regs_buff[27] = AT_READ_REG(hw, REG_SRAM_TXF_LEN); regs_buff[28] = AT_READ_REG(hw, REG_SRAM_TCPH_ADDR); regs_buff[29] = AT_READ_REG(hw, REG_SRAM_PKTH_ADDR); atl1e_read_phy_reg(hw, MII_BMCR, &phy_data); regs_buff[73] = (u32)phy_data; atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); regs_buff[74] = (u32)phy_data; } static int atl1e_get_eeprom_len(struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); if (!atl1e_check_eeprom_exist(&adapter->hw)) return AT_EEPROM_LEN; else return 0; } static int atl1e_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; u32 *eeprom_buff; int first_dword, last_dword; int ret_val = 0; int i; if (eeprom->len == 0) return -EINVAL; if (atl1e_check_eeprom_exist(hw)) /* not exist */ return -EINVAL; eeprom->magic = hw->vendor_id | (hw->device_id << 16); first_dword = eeprom->offset >> 2; last_dword = (eeprom->offset + eeprom->len - 1) >> 2; eeprom_buff = kmalloc(sizeof(u32) * (last_dword - first_dword + 1), GFP_KERNEL); if (eeprom_buff == NULL) return -ENOMEM; for (i = first_dword; i < last_dword; i++) { if (!atl1e_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) { kfree(eeprom_buff); return -EIO; } } memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), eeprom->len); kfree(eeprom_buff); return ret_val; } static int atl1e_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; u32 *eeprom_buff; u32 *ptr; int first_dword, last_dword; int ret_val = 0; int i; if (eeprom->len == 0) return -EOPNOTSUPP; if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) return -EINVAL; first_dword = eeprom->offset >> 2; last_dword = (eeprom->offset + eeprom->len - 1) >> 2; eeprom_buff = kmalloc(AT_EEPROM_LEN, GFP_KERNEL); if (eeprom_buff == NULL) return -ENOMEM; ptr = eeprom_buff; if (eeprom->offset & 3) { /* need read/modify/write of first changed EEPROM word */ /* only the second byte of the word is being modified */ if (!atl1e_read_eeprom(hw, first_dword * 4, &(eeprom_buff[0]))) { ret_val = -EIO; goto out; } ptr++; } if (((eeprom->offset + eeprom->len) & 3)) { /* need read/modify/write of last changed EEPROM word */ /* only the first byte of the word is being modified */ if (!atl1e_read_eeprom(hw, last_dword * 4, &(eeprom_buff[last_dword - first_dword]))) { ret_val = -EIO; goto out; } } /* Device's eeprom is always little-endian, word addressable */ memcpy(ptr, bytes, eeprom->len); for (i = 0; i < last_dword - first_dword + 1; i++) { if (!atl1e_write_eeprom(hw, ((first_dword + i) * 4), eeprom_buff[i])) { ret_val = -EIO; goto out; } } out: kfree(eeprom_buff); return ret_val; } static void atl1e_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct atl1e_adapter *adapter = netdev_priv(netdev); strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, atl1e_driver_version, sizeof(drvinfo->version)); strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_stats = 0; drvinfo->testinfo_len = 0; drvinfo->regdump_len = atl1e_get_regs_len(netdev); drvinfo->eedump_len = atl1e_get_eeprom_len(netdev); } static void atl1e_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl1e_adapter *adapter = netdev_priv(netdev); wol->supported = WAKE_MAGIC | WAKE_PHY; wol->wolopts = 0; if (adapter->wol & AT_WUFC_EX) wol->wolopts |= WAKE_UCAST; if (adapter->wol & AT_WUFC_MC) wol->wolopts |= WAKE_MCAST; if (adapter->wol & AT_WUFC_BC) wol->wolopts |= WAKE_BCAST; if (adapter->wol & AT_WUFC_MAG) wol->wolopts |= WAKE_MAGIC; if (adapter->wol & AT_WUFC_LNKC) wol->wolopts |= WAKE_PHY; } static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl1e_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)) return -EOPNOTSUPP; /* these settings will always override what we currently have */ adapter->wol = 0; if (wol->wolopts & WAKE_MAGIC) adapter->wol |= AT_WUFC_MAG; if (wol->wolopts & WAKE_PHY) adapter->wol |= AT_WUFC_LNKC; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); return 0; } static int atl1e_nway_reset(struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); if (netif_running(netdev)) atl1e_reinit_locked(adapter); return 0; } static const struct ethtool_ops atl1e_ethtool_ops = { .get_settings = atl1e_get_settings, .set_settings = atl1e_set_settings, .get_drvinfo = atl1e_get_drvinfo, .get_regs_len = atl1e_get_regs_len, .get_regs = atl1e_get_regs, .get_wol = atl1e_get_wol, .set_wol = atl1e_set_wol, .get_msglevel = atl1e_get_msglevel, .nway_reset = atl1e_nway_reset, .get_link = ethtool_op_get_link, .get_eeprom_len = atl1e_get_eeprom_len, .get_eeprom = atl1e_get_eeprom, .set_eeprom = atl1e_set_eeprom, }; void atl1e_set_ethtool_ops(struct net_device *netdev) { SET_ETHTOOL_OPS(netdev, &atl1e_ethtool_ops); }
gpl-2.0
roguesyko/the_reaper_g3
fs/hfs/super.c
4444
12378
/* * linux/fs/hfs/super.c * * Copyright (C) 1995-1997 Paul H. Hargrove * (C) 2003 Ardis Technologies <roman@ardistech.com> * This file may be distributed under the terms of the GNU General Public License. * * This file contains hfs_read_super(), some of the super_ops and * init_hfs_fs() and exit_hfs_fs(). The remaining super_ops are in * inode.c since they deal with inodes. * * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/mount.h> #include <linux/init.h> #include <linux/nls.h> #include <linux/parser.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/vfs.h> #include "hfs_fs.h" #include "btree.h" static struct kmem_cache *hfs_inode_cachep; MODULE_LICENSE("GPL"); /* * hfs_write_super() * * Description: * This function is called by the VFS only. When the filesystem * is mounted r/w it updates the MDB on disk. * Input Variable(s): * struct super_block *sb: Pointer to the hfs superblock * Output Variable(s): * NONE * Returns: * void * Preconditions: * 'sb' points to a "valid" (struct super_block). * Postconditions: * The MDB is marked 'unsuccessfully unmounted' by clearing bit 8 of drAtrb * (hfs_put_super() must set this flag!). Some MDB fields are updated * and the MDB buffer is written to disk by calling hfs_mdb_commit(). */ static void hfs_write_super(struct super_block *sb) { lock_super(sb); sb->s_dirt = 0; /* sync everything to the buffers */ if (!(sb->s_flags & MS_RDONLY)) hfs_mdb_commit(sb); unlock_super(sb); } static int hfs_sync_fs(struct super_block *sb, int wait) { lock_super(sb); hfs_mdb_commit(sb); sb->s_dirt = 0; unlock_super(sb); return 0; } /* * hfs_put_super() * * This is the put_super() entry in the super_operations structure for * HFS filesystems. The purpose is to release the resources * associated with the superblock sb. */ static void hfs_put_super(struct super_block *sb) { if (sb->s_dirt) hfs_write_super(sb); hfs_mdb_close(sb); /* release the MDB's resources */ hfs_mdb_put(sb); } /* * hfs_statfs() * * This is the statfs() entry in the super_operations structure for * HFS filesystems. The purpose is to return various data about the * filesystem. * * changed f_files/f_ffree to reflect the fs_ablock/free_ablocks. */ static int hfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = HFS_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = (u32)HFS_SB(sb)->fs_ablocks * HFS_SB(sb)->fs_div; buf->f_bfree = (u32)HFS_SB(sb)->free_ablocks * HFS_SB(sb)->fs_div; buf->f_bavail = buf->f_bfree; buf->f_files = HFS_SB(sb)->fs_ablocks; buf->f_ffree = HFS_SB(sb)->free_ablocks; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); buf->f_namelen = HFS_NAMELEN; return 0; } static int hfs_remount(struct super_block *sb, int *flags, char *data) { *flags |= MS_NODIRATIME; if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) return 0; if (!(*flags & MS_RDONLY)) { if (!(HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) { printk(KERN_WARNING "hfs: filesystem was not cleanly unmounted, " "running fsck.hfs is recommended. leaving read-only.\n"); sb->s_flags |= MS_RDONLY; *flags |= MS_RDONLY; } else if (HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_SLOCK)) { printk(KERN_WARNING "hfs: filesystem is marked locked, leaving read-only.\n"); sb->s_flags |= MS_RDONLY; *flags |= MS_RDONLY; } } return 0; } static int hfs_show_options(struct seq_file *seq, struct dentry *root) { struct hfs_sb_info *sbi = HFS_SB(root->d_sb); if (sbi->s_creator != cpu_to_be32(0x3f3f3f3f)) seq_printf(seq, ",creator=%.4s", (char *)&sbi->s_creator); if (sbi->s_type != cpu_to_be32(0x3f3f3f3f)) seq_printf(seq, ",type=%.4s", (char *)&sbi->s_type); seq_printf(seq, ",uid=%u,gid=%u", sbi->s_uid, sbi->s_gid); if (sbi->s_file_umask != 0133) seq_printf(seq, ",file_umask=%o", sbi->s_file_umask); if (sbi->s_dir_umask != 0022) seq_printf(seq, ",dir_umask=%o", sbi->s_dir_umask); if (sbi->part >= 0) seq_printf(seq, ",part=%u", sbi->part); if (sbi->session >= 0) seq_printf(seq, ",session=%u", sbi->session); if (sbi->nls_disk) seq_printf(seq, ",codepage=%s", sbi->nls_disk->charset); if (sbi->nls_io) seq_printf(seq, ",iocharset=%s", sbi->nls_io->charset); if (sbi->s_quiet) seq_printf(seq, ",quiet"); return 0; } static struct inode *hfs_alloc_inode(struct super_block *sb) { struct hfs_inode_info *i; i = kmem_cache_alloc(hfs_inode_cachep, GFP_KERNEL); return i ? &i->vfs_inode : NULL; } static void hfs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(hfs_inode_cachep, HFS_I(inode)); } static void hfs_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, hfs_i_callback); } static const struct super_operations hfs_super_operations = { .alloc_inode = hfs_alloc_inode, .destroy_inode = hfs_destroy_inode, .write_inode = hfs_write_inode, .evict_inode = hfs_evict_inode, .put_super = hfs_put_super, .write_super = hfs_write_super, .sync_fs = hfs_sync_fs, .statfs = hfs_statfs, .remount_fs = hfs_remount, .show_options = hfs_show_options, }; enum { opt_uid, opt_gid, opt_umask, opt_file_umask, opt_dir_umask, opt_part, opt_session, opt_type, opt_creator, opt_quiet, opt_codepage, opt_iocharset, opt_err }; static const match_table_t tokens = { { opt_uid, "uid=%u" }, { opt_gid, "gid=%u" }, { opt_umask, "umask=%o" }, { opt_file_umask, "file_umask=%o" }, { opt_dir_umask, "dir_umask=%o" }, { opt_part, "part=%u" }, { opt_session, "session=%u" }, { opt_type, "type=%s" }, { opt_creator, "creator=%s" }, { opt_quiet, "quiet" }, { opt_codepage, "codepage=%s" }, { opt_iocharset, "iocharset=%s" }, { opt_err, NULL } }; static inline int match_fourchar(substring_t *arg, u32 *result) { if (arg->to - arg->from != 4) return -EINVAL; memcpy(result, arg->from, 4); return 0; } /* * parse_options() * * adapted from linux/fs/msdos/inode.c written 1992,93 by Werner Almesberger * This function is called by hfs_read_super() to parse the mount options. */ static int parse_options(char *options, struct hfs_sb_info *hsb) { char *p; substring_t args[MAX_OPT_ARGS]; int tmp, token; /* initialize the sb with defaults */ hsb->s_uid = current_uid(); hsb->s_gid = current_gid(); hsb->s_file_umask = 0133; hsb->s_dir_umask = 0022; hsb->s_type = hsb->s_creator = cpu_to_be32(0x3f3f3f3f); /* == '????' */ hsb->s_quiet = 0; hsb->part = -1; hsb->session = -1; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case opt_uid: if (match_int(&args[0], &tmp)) { printk(KERN_ERR "hfs: uid requires an argument\n"); return 0; } hsb->s_uid = (uid_t)tmp; break; case opt_gid: if (match_int(&args[0], &tmp)) { printk(KERN_ERR "hfs: gid requires an argument\n"); return 0; } hsb->s_gid = (gid_t)tmp; break; case opt_umask: if (match_octal(&args[0], &tmp)) { printk(KERN_ERR "hfs: umask requires a value\n"); return 0; } hsb->s_file_umask = (umode_t)tmp; hsb->s_dir_umask = (umode_t)tmp; break; case opt_file_umask: if (match_octal(&args[0], &tmp)) { printk(KERN_ERR "hfs: file_umask requires a value\n"); return 0; } hsb->s_file_umask = (umode_t)tmp; break; case opt_dir_umask: if (match_octal(&args[0], &tmp)) { printk(KERN_ERR "hfs: dir_umask requires a value\n"); return 0; } hsb->s_dir_umask = (umode_t)tmp; break; case opt_part: if (match_int(&args[0], &hsb->part)) { printk(KERN_ERR "hfs: part requires an argument\n"); return 0; } break; case opt_session: if (match_int(&args[0], &hsb->session)) { printk(KERN_ERR "hfs: session requires an argument\n"); return 0; } break; case opt_type: if (match_fourchar(&args[0], &hsb->s_type)) { printk(KERN_ERR "hfs: type requires a 4 character value\n"); return 0; } break; case opt_creator: if (match_fourchar(&args[0], &hsb->s_creator)) { printk(KERN_ERR "hfs: creator requires a 4 character value\n"); return 0; } break; case opt_quiet: hsb->s_quiet = 1; break; case opt_codepage: if (hsb->nls_disk) { printk(KERN_ERR "hfs: unable to change codepage\n"); return 0; } p = match_strdup(&args[0]); if (p) hsb->nls_disk = load_nls(p); if (!hsb->nls_disk) { printk(KERN_ERR "hfs: unable to load codepage \"%s\"\n", p); kfree(p); return 0; } kfree(p); break; case opt_iocharset: if (hsb->nls_io) { printk(KERN_ERR "hfs: unable to change iocharset\n"); return 0; } p = match_strdup(&args[0]); if (p) hsb->nls_io = load_nls(p); if (!hsb->nls_io) { printk(KERN_ERR "hfs: unable to load iocharset \"%s\"\n", p); kfree(p); return 0; } kfree(p); break; default: return 0; } } if (hsb->nls_disk && !hsb->nls_io) { hsb->nls_io = load_nls_default(); if (!hsb->nls_io) { printk(KERN_ERR "hfs: unable to load default iocharset\n"); return 0; } } hsb->s_dir_umask &= 0777; hsb->s_file_umask &= 0577; return 1; } /* * hfs_read_super() * * This is the function that is responsible for mounting an HFS * filesystem. It performs all the tasks necessary to get enough data * from the disk to read the root inode. This includes parsing the * mount options, dealing with Macintosh partitions, reading the * superblock and the allocation bitmap blocks, calling * hfs_btree_init() to get the necessary data about the extents and * catalog B-trees and, finally, reading the root inode into memory. */ static int hfs_fill_super(struct super_block *sb, void *data, int silent) { struct hfs_sb_info *sbi; struct hfs_find_data fd; hfs_cat_rec rec; struct inode *root_inode; int res; sbi = kzalloc(sizeof(struct hfs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; res = -EINVAL; if (!parse_options((char *)data, sbi)) { printk(KERN_ERR "hfs: unable to parse mount options.\n"); goto bail; } sb->s_op = &hfs_super_operations; sb->s_flags |= MS_NODIRATIME; mutex_init(&sbi->bitmap_lock); res = hfs_mdb_get(sb); if (res) { if (!silent) printk(KERN_WARNING "hfs: can't find a HFS filesystem on dev %s.\n", hfs_mdb_name(sb)); res = -EINVAL; goto bail; } /* try to get the root inode */ hfs_find_init(HFS_SB(sb)->cat_tree, &fd); res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd); if (!res) { if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) { res = -EIO; goto bail; } hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength); } if (res) { hfs_find_exit(&fd); goto bail_no_root; } res = -EINVAL; root_inode = hfs_iget(sb, &fd.search_key->cat, &rec); hfs_find_exit(&fd); if (!root_inode) goto bail_no_root; sb->s_d_op = &hfs_dentry_operations; res = -ENOMEM; sb->s_root = d_make_root(root_inode); if (!sb->s_root) goto bail_no_root; /* everything's okay */ return 0; bail_no_root: printk(KERN_ERR "hfs: get root inode failed.\n"); bail: hfs_mdb_put(sb); return res; } static struct dentry *hfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, hfs_fill_super); } static struct file_system_type hfs_fs_type = { .owner = THIS_MODULE, .name = "hfs", .mount = hfs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; static void hfs_init_once(void *p) { struct hfs_inode_info *i = p; inode_init_once(&i->vfs_inode); } static int __init init_hfs_fs(void) { int err; hfs_inode_cachep = kmem_cache_create("hfs_inode_cache", sizeof(struct hfs_inode_info), 0, SLAB_HWCACHE_ALIGN, hfs_init_once); if (!hfs_inode_cachep) return -ENOMEM; err = register_filesystem(&hfs_fs_type); if (err) kmem_cache_destroy(hfs_inode_cachep); return err; } static void __exit exit_hfs_fs(void) { unregister_filesystem(&hfs_fs_type); kmem_cache_destroy(hfs_inode_cachep); } module_init(init_hfs_fs) module_exit(exit_hfs_fs)
gpl-2.0
ffosilva/android_kernel_sony_msm8974
fs/proc/meminfo.c
4700
4969
#include <linux/fs.h> #include <linux/hugetlb.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/mmzone.h> #include <linux/proc_fs.h> #include <linux/quicklist.h> #include <linux/seq_file.h> #include <linux/swap.h> #include <linux/vmstat.h> #include <linux/atomic.h> #include <asm/page.h> #include <asm/pgtable.h> #include "internal.h" void __attribute__((weak)) arch_report_meminfo(struct seq_file *m) { } static int meminfo_proc_show(struct seq_file *m, void *v) { struct sysinfo i; unsigned long committed; unsigned long allowed; struct vmalloc_info vmi; long cached; unsigned long pages[NR_LRU_LISTS]; int lru; /* * display in kilobytes. */ #define K(x) ((x) << (PAGE_SHIFT - 10)) si_meminfo(&i); si_swapinfo(&i); committed = percpu_counter_read_positive(&vm_committed_as); allowed = ((totalram_pages - hugetlb_total_pages()) * sysctl_overcommit_ratio / 100) + total_swap_pages; cached = global_page_state(NR_FILE_PAGES) - total_swapcache_pages - i.bufferram; if (cached < 0) cached = 0; get_vmalloc_info(&vmi); for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) pages[lru] = global_page_state(NR_LRU_BASE + lru); /* * Tagged format, for easy grepping and expansion. */ seq_printf(m, "MemTotal: %8lu kB\n" "MemFree: %8lu kB\n" "Buffers: %8lu kB\n" "Cached: %8lu kB\n" "SwapCached: %8lu kB\n" "Active: %8lu kB\n" "Inactive: %8lu kB\n" "Active(anon): %8lu kB\n" "Inactive(anon): %8lu kB\n" "Active(file): %8lu kB\n" "Inactive(file): %8lu kB\n" "Unevictable: %8lu kB\n" "Mlocked: %8lu kB\n" #ifdef CONFIG_HIGHMEM "HighTotal: %8lu kB\n" "HighFree: %8lu kB\n" "LowTotal: %8lu kB\n" "LowFree: %8lu kB\n" #endif #ifndef CONFIG_MMU "MmapCopy: %8lu kB\n" #endif "SwapTotal: %8lu kB\n" "SwapFree: %8lu kB\n" "Dirty: %8lu kB\n" "Writeback: %8lu kB\n" "AnonPages: %8lu kB\n" "Mapped: %8lu kB\n" "Shmem: %8lu kB\n" "Slab: %8lu kB\n" "SReclaimable: %8lu kB\n" "SUnreclaim: %8lu kB\n" "KernelStack: %8lu kB\n" "PageTables: %8lu kB\n" #ifdef CONFIG_QUICKLIST "Quicklists: %8lu kB\n" #endif "NFS_Unstable: %8lu kB\n" "Bounce: %8lu kB\n" "WritebackTmp: %8lu kB\n" "CommitLimit: %8lu kB\n" "Committed_AS: %8lu kB\n" "VmallocTotal: %8lu kB\n" "VmallocUsed: %8lu kB\n" "VmallocChunk: %8lu kB\n" #ifdef CONFIG_MEMORY_FAILURE "HardwareCorrupted: %5lu kB\n" #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE "AnonHugePages: %8lu kB\n" #endif , K(i.totalram), K(i.freeram), K(i.bufferram), K(cached), K(total_swapcache_pages), K(pages[LRU_ACTIVE_ANON] + pages[LRU_ACTIVE_FILE]), K(pages[LRU_INACTIVE_ANON] + pages[LRU_INACTIVE_FILE]), K(pages[LRU_ACTIVE_ANON]), K(pages[LRU_INACTIVE_ANON]), K(pages[LRU_ACTIVE_FILE]), K(pages[LRU_INACTIVE_FILE]), K(pages[LRU_UNEVICTABLE]), K(global_page_state(NR_MLOCK)), #ifdef CONFIG_HIGHMEM K(i.totalhigh), K(i.freehigh), K(i.totalram-i.totalhigh), K(i.freeram-i.freehigh), #endif #ifndef CONFIG_MMU K((unsigned long) atomic_long_read(&mmap_pages_allocated)), #endif K(i.totalswap), K(i.freeswap), K(global_page_state(NR_FILE_DIRTY)), K(global_page_state(NR_WRITEBACK)), #ifdef CONFIG_TRANSPARENT_HUGEPAGE K(global_page_state(NR_ANON_PAGES) + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * HPAGE_PMD_NR), #else K(global_page_state(NR_ANON_PAGES)), #endif K(global_page_state(NR_FILE_MAPPED)), K(global_page_state(NR_SHMEM)), K(global_page_state(NR_SLAB_RECLAIMABLE) + global_page_state(NR_SLAB_UNRECLAIMABLE)), K(global_page_state(NR_SLAB_RECLAIMABLE)), K(global_page_state(NR_SLAB_UNRECLAIMABLE)), global_page_state(NR_KERNEL_STACK) * THREAD_SIZE / 1024, K(global_page_state(NR_PAGETABLE)), #ifdef CONFIG_QUICKLIST K(quicklist_total_size()), #endif K(global_page_state(NR_UNSTABLE_NFS)), K(global_page_state(NR_BOUNCE)), K(global_page_state(NR_WRITEBACK_TEMP)), K(allowed), K(committed), (unsigned long)VMALLOC_TOTAL >> 10, vmi.used >> 10, vmi.largest_chunk >> 10 #ifdef CONFIG_MEMORY_FAILURE ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10) #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * HPAGE_PMD_NR) #endif ); hugetlb_report_meminfo(m); arch_report_meminfo(m); return 0; #undef K } static int meminfo_proc_open(struct inode *inode, struct file *file) { return single_open(file, meminfo_proc_show, NULL); } static const struct file_operations meminfo_proc_fops = { .open = meminfo_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init proc_meminfo_init(void) { proc_create("meminfo", 0, NULL, &meminfo_proc_fops); return 0; } module_init(proc_meminfo_init);
gpl-2.0
arrrghhh/android_kernel_samsung_mondrianwifi
fs/proc/meminfo.c
4700
4969
#include <linux/fs.h> #include <linux/hugetlb.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/mmzone.h> #include <linux/proc_fs.h> #include <linux/quicklist.h> #include <linux/seq_file.h> #include <linux/swap.h> #include <linux/vmstat.h> #include <linux/atomic.h> #include <asm/page.h> #include <asm/pgtable.h> #include "internal.h" void __attribute__((weak)) arch_report_meminfo(struct seq_file *m) { } static int meminfo_proc_show(struct seq_file *m, void *v) { struct sysinfo i; unsigned long committed; unsigned long allowed; struct vmalloc_info vmi; long cached; unsigned long pages[NR_LRU_LISTS]; int lru; /* * display in kilobytes. */ #define K(x) ((x) << (PAGE_SHIFT - 10)) si_meminfo(&i); si_swapinfo(&i); committed = percpu_counter_read_positive(&vm_committed_as); allowed = ((totalram_pages - hugetlb_total_pages()) * sysctl_overcommit_ratio / 100) + total_swap_pages; cached = global_page_state(NR_FILE_PAGES) - total_swapcache_pages - i.bufferram; if (cached < 0) cached = 0; get_vmalloc_info(&vmi); for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) pages[lru] = global_page_state(NR_LRU_BASE + lru); /* * Tagged format, for easy grepping and expansion. */ seq_printf(m, "MemTotal: %8lu kB\n" "MemFree: %8lu kB\n" "Buffers: %8lu kB\n" "Cached: %8lu kB\n" "SwapCached: %8lu kB\n" "Active: %8lu kB\n" "Inactive: %8lu kB\n" "Active(anon): %8lu kB\n" "Inactive(anon): %8lu kB\n" "Active(file): %8lu kB\n" "Inactive(file): %8lu kB\n" "Unevictable: %8lu kB\n" "Mlocked: %8lu kB\n" #ifdef CONFIG_HIGHMEM "HighTotal: %8lu kB\n" "HighFree: %8lu kB\n" "LowTotal: %8lu kB\n" "LowFree: %8lu kB\n" #endif #ifndef CONFIG_MMU "MmapCopy: %8lu kB\n" #endif "SwapTotal: %8lu kB\n" "SwapFree: %8lu kB\n" "Dirty: %8lu kB\n" "Writeback: %8lu kB\n" "AnonPages: %8lu kB\n" "Mapped: %8lu kB\n" "Shmem: %8lu kB\n" "Slab: %8lu kB\n" "SReclaimable: %8lu kB\n" "SUnreclaim: %8lu kB\n" "KernelStack: %8lu kB\n" "PageTables: %8lu kB\n" #ifdef CONFIG_QUICKLIST "Quicklists: %8lu kB\n" #endif "NFS_Unstable: %8lu kB\n" "Bounce: %8lu kB\n" "WritebackTmp: %8lu kB\n" "CommitLimit: %8lu kB\n" "Committed_AS: %8lu kB\n" "VmallocTotal: %8lu kB\n" "VmallocUsed: %8lu kB\n" "VmallocChunk: %8lu kB\n" #ifdef CONFIG_MEMORY_FAILURE "HardwareCorrupted: %5lu kB\n" #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE "AnonHugePages: %8lu kB\n" #endif , K(i.totalram), K(i.freeram), K(i.bufferram), K(cached), K(total_swapcache_pages), K(pages[LRU_ACTIVE_ANON] + pages[LRU_ACTIVE_FILE]), K(pages[LRU_INACTIVE_ANON] + pages[LRU_INACTIVE_FILE]), K(pages[LRU_ACTIVE_ANON]), K(pages[LRU_INACTIVE_ANON]), K(pages[LRU_ACTIVE_FILE]), K(pages[LRU_INACTIVE_FILE]), K(pages[LRU_UNEVICTABLE]), K(global_page_state(NR_MLOCK)), #ifdef CONFIG_HIGHMEM K(i.totalhigh), K(i.freehigh), K(i.totalram-i.totalhigh), K(i.freeram-i.freehigh), #endif #ifndef CONFIG_MMU K((unsigned long) atomic_long_read(&mmap_pages_allocated)), #endif K(i.totalswap), K(i.freeswap), K(global_page_state(NR_FILE_DIRTY)), K(global_page_state(NR_WRITEBACK)), #ifdef CONFIG_TRANSPARENT_HUGEPAGE K(global_page_state(NR_ANON_PAGES) + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * HPAGE_PMD_NR), #else K(global_page_state(NR_ANON_PAGES)), #endif K(global_page_state(NR_FILE_MAPPED)), K(global_page_state(NR_SHMEM)), K(global_page_state(NR_SLAB_RECLAIMABLE) + global_page_state(NR_SLAB_UNRECLAIMABLE)), K(global_page_state(NR_SLAB_RECLAIMABLE)), K(global_page_state(NR_SLAB_UNRECLAIMABLE)), global_page_state(NR_KERNEL_STACK) * THREAD_SIZE / 1024, K(global_page_state(NR_PAGETABLE)), #ifdef CONFIG_QUICKLIST K(quicklist_total_size()), #endif K(global_page_state(NR_UNSTABLE_NFS)), K(global_page_state(NR_BOUNCE)), K(global_page_state(NR_WRITEBACK_TEMP)), K(allowed), K(committed), (unsigned long)VMALLOC_TOTAL >> 10, vmi.used >> 10, vmi.largest_chunk >> 10 #ifdef CONFIG_MEMORY_FAILURE ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10) #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * HPAGE_PMD_NR) #endif ); hugetlb_report_meminfo(m); arch_report_meminfo(m); return 0; #undef K } static int meminfo_proc_open(struct inode *inode, struct file *file) { return single_open(file, meminfo_proc_show, NULL); } static const struct file_operations meminfo_proc_fops = { .open = meminfo_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init proc_meminfo_init(void) { proc_create("meminfo", 0, NULL, &meminfo_proc_fops); return 0; } module_init(proc_meminfo_init);
gpl-2.0
faux123/Galaxy_Note_2
fs/ocfs2/localalloc.c
8028
34144
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * localalloc.c * * Node local data allocation * * Copyright (C) 2002, 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/bitops.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "alloc.h" #include "blockcheck.h" #include "dlmglue.h" #include "inode.h" #include "journal.h" #include "localalloc.h" #include "suballoc.h" #include "super.h" #include "sysfile.h" #include "ocfs2_trace.h" #include "buffer_head_io.h" #define OCFS2_LOCAL_ALLOC(dinode) (&((dinode)->id2.i_lab)) static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc); static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb, struct ocfs2_dinode *alloc, u32 *numbits, struct ocfs2_alloc_reservation *resv); static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc); static int ocfs2_sync_local_to_main(struct ocfs2_super *osb, handle_t *handle, struct ocfs2_dinode *alloc, struct inode *main_bm_inode, struct buffer_head *main_bm_bh); static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb, struct ocfs2_alloc_context **ac, struct inode **bitmap_inode, struct buffer_head **bitmap_bh); static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb, handle_t *handle, struct ocfs2_alloc_context *ac); static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb, struct inode *local_alloc_inode); /* * ocfs2_la_default_mb() - determine a default size, in megabytes of * the local alloc. * * Generally, we'd like to pick as large a local alloc as * possible. Performance on large workloads tends to scale * proportionally to la size. In addition to that, the reservations * code functions more efficiently as it can reserve more windows for * write. * * Some things work against us when trying to choose a large local alloc: * * - We need to ensure our sizing is picked to leave enough space in * group descriptors for other allocations (such as block groups, * etc). Picking default sizes which are a multiple of 4 could help * - block groups are allocated in 2mb and 4mb chunks. * * - Likewise, we don't want to starve other nodes of bits on small * file systems. This can easily be taken care of by limiting our * default to a reasonable size (256M) on larger cluster sizes. * * - Some file systems can't support very large sizes - 4k and 8k in * particular are limited to less than 128 and 256 megabytes respectively. * * The following reference table shows group descriptor and local * alloc maximums at various cluster sizes (4k blocksize) * * csize: 4K group: 126M la: 121M * csize: 8K group: 252M la: 243M * csize: 16K group: 504M la: 486M * csize: 32K group: 1008M la: 972M * csize: 64K group: 2016M la: 1944M * csize: 128K group: 4032M la: 3888M * csize: 256K group: 8064M la: 7776M * csize: 512K group: 16128M la: 15552M * csize: 1024K group: 32256M la: 31104M */ #define OCFS2_LA_MAX_DEFAULT_MB 256 #define OCFS2_LA_OLD_DEFAULT 8 unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb) { unsigned int la_mb; unsigned int gd_mb; unsigned int la_max_mb; unsigned int megs_per_slot; struct super_block *sb = osb->sb; gd_mb = ocfs2_clusters_to_megabytes(osb->sb, 8 * ocfs2_group_bitmap_size(sb, 0, osb->s_feature_incompat)); /* * This takes care of files systems with very small group * descriptors - 512 byte blocksize at cluster sizes lower * than 16K and also 1k blocksize with 4k cluster size. */ if ((sb->s_blocksize == 512 && osb->s_clustersize <= 8192) || (sb->s_blocksize == 1024 && osb->s_clustersize == 4096)) return OCFS2_LA_OLD_DEFAULT; /* * Leave enough room for some block groups and make the final * value we work from a multiple of 4. */ gd_mb -= 16; gd_mb &= 0xFFFFFFFB; la_mb = gd_mb; /* * Keep window sizes down to a reasonable default */ if (la_mb > OCFS2_LA_MAX_DEFAULT_MB) { /* * Some clustersize / blocksize combinations will have * given us a larger than OCFS2_LA_MAX_DEFAULT_MB * default size, but get poor distribution when * limited to exactly 256 megabytes. * * As an example, 16K clustersize at 4K blocksize * gives us a cluster group size of 504M. Paring the * local alloc size down to 256 however, would give us * only one window and around 200MB left in the * cluster group. Instead, find the first size below * 256 which would give us an even distribution. * * Larger cluster group sizes actually work out pretty * well when pared to 256, so we don't have to do this * for any group that fits more than two * OCFS2_LA_MAX_DEFAULT_MB windows. */ if (gd_mb > (2 * OCFS2_LA_MAX_DEFAULT_MB)) la_mb = 256; else { unsigned int gd_mult = gd_mb; while (gd_mult > 256) gd_mult = gd_mult >> 1; la_mb = gd_mult; } } megs_per_slot = osb->osb_clusters_at_boot / osb->max_slots; megs_per_slot = ocfs2_clusters_to_megabytes(osb->sb, megs_per_slot); /* Too many nodes, too few disk clusters. */ if (megs_per_slot < la_mb) la_mb = megs_per_slot; /* We can't store more bits than we can in a block. */ la_max_mb = ocfs2_clusters_to_megabytes(osb->sb, ocfs2_local_alloc_size(sb) * 8); if (la_mb > la_max_mb) la_mb = la_max_mb; return la_mb; } void ocfs2_la_set_sizes(struct ocfs2_super *osb, int requested_mb) { struct super_block *sb = osb->sb; unsigned int la_default_mb = ocfs2_la_default_mb(osb); unsigned int la_max_mb; la_max_mb = ocfs2_clusters_to_megabytes(sb, ocfs2_local_alloc_size(sb) * 8); trace_ocfs2_la_set_sizes(requested_mb, la_max_mb, la_default_mb); if (requested_mb == -1) { /* No user request - use defaults */ osb->local_alloc_default_bits = ocfs2_megabytes_to_clusters(sb, la_default_mb); } else if (requested_mb > la_max_mb) { /* Request is too big, we give the maximum available */ osb->local_alloc_default_bits = ocfs2_megabytes_to_clusters(sb, la_max_mb); } else { osb->local_alloc_default_bits = ocfs2_megabytes_to_clusters(sb, requested_mb); } osb->local_alloc_bits = osb->local_alloc_default_bits; } static inline int ocfs2_la_state_enabled(struct ocfs2_super *osb) { return (osb->local_alloc_state == OCFS2_LA_THROTTLED || osb->local_alloc_state == OCFS2_LA_ENABLED); } void ocfs2_local_alloc_seen_free_bits(struct ocfs2_super *osb, unsigned int num_clusters) { spin_lock(&osb->osb_lock); if (osb->local_alloc_state == OCFS2_LA_DISABLED || osb->local_alloc_state == OCFS2_LA_THROTTLED) if (num_clusters >= osb->local_alloc_default_bits) { cancel_delayed_work(&osb->la_enable_wq); osb->local_alloc_state = OCFS2_LA_ENABLED; } spin_unlock(&osb->osb_lock); } void ocfs2_la_enable_worker(struct work_struct *work) { struct ocfs2_super *osb = container_of(work, struct ocfs2_super, la_enable_wq.work); spin_lock(&osb->osb_lock); osb->local_alloc_state = OCFS2_LA_ENABLED; spin_unlock(&osb->osb_lock); } /* * Tell us whether a given allocation should use the local alloc * file. Otherwise, it has to go to the main bitmap. * * This function does semi-dirty reads of local alloc size and state! * This is ok however, as the values are re-checked once under mutex. */ int ocfs2_alloc_should_use_local(struct ocfs2_super *osb, u64 bits) { int ret = 0; int la_bits; spin_lock(&osb->osb_lock); la_bits = osb->local_alloc_bits; if (!ocfs2_la_state_enabled(osb)) goto bail; /* la_bits should be at least twice the size (in clusters) of * a new block group. We want to be sure block group * allocations go through the local alloc, so allow an * allocation to take up to half the bitmap. */ if (bits > (la_bits / 2)) goto bail; ret = 1; bail: trace_ocfs2_alloc_should_use_local( (unsigned long long)bits, osb->local_alloc_state, la_bits, ret); spin_unlock(&osb->osb_lock); return ret; } int ocfs2_load_local_alloc(struct ocfs2_super *osb) { int status = 0; struct ocfs2_dinode *alloc = NULL; struct buffer_head *alloc_bh = NULL; u32 num_used; struct inode *inode = NULL; struct ocfs2_local_alloc *la; if (osb->local_alloc_bits == 0) goto bail; if (osb->local_alloc_bits >= osb->bitmap_cpg) { mlog(ML_NOTICE, "Requested local alloc window %d is larger " "than max possible %u. Using defaults.\n", osb->local_alloc_bits, (osb->bitmap_cpg - 1)); osb->local_alloc_bits = ocfs2_megabytes_to_clusters(osb->sb, ocfs2_la_default_mb(osb)); } /* read the alloc off disk */ inode = ocfs2_get_system_file_inode(osb, LOCAL_ALLOC_SYSTEM_INODE, osb->slot_num); if (!inode) { status = -EINVAL; mlog_errno(status); goto bail; } status = ocfs2_read_inode_block_full(inode, &alloc_bh, OCFS2_BH_IGNORE_CACHE); if (status < 0) { mlog_errno(status); goto bail; } alloc = (struct ocfs2_dinode *) alloc_bh->b_data; la = OCFS2_LOCAL_ALLOC(alloc); if (!(le32_to_cpu(alloc->i_flags) & (OCFS2_LOCAL_ALLOC_FL|OCFS2_BITMAP_FL))) { mlog(ML_ERROR, "Invalid local alloc inode, %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); status = -EINVAL; goto bail; } if ((la->la_size == 0) || (le16_to_cpu(la->la_size) > ocfs2_local_alloc_size(inode->i_sb))) { mlog(ML_ERROR, "Local alloc size is invalid (la_size = %u)\n", le16_to_cpu(la->la_size)); status = -EINVAL; goto bail; } /* do a little verification. */ num_used = ocfs2_local_alloc_count_bits(alloc); /* hopefully the local alloc has always been recovered before * we load it. */ if (num_used || alloc->id1.bitmap1.i_used || alloc->id1.bitmap1.i_total || la->la_bm_off) mlog(ML_ERROR, "Local alloc hasn't been recovered!\n" "found = %u, set = %u, taken = %u, off = %u\n", num_used, le32_to_cpu(alloc->id1.bitmap1.i_used), le32_to_cpu(alloc->id1.bitmap1.i_total), OCFS2_LOCAL_ALLOC(alloc)->la_bm_off); osb->local_alloc_bh = alloc_bh; osb->local_alloc_state = OCFS2_LA_ENABLED; bail: if (status < 0) brelse(alloc_bh); if (inode) iput(inode); trace_ocfs2_load_local_alloc(osb->local_alloc_bits); if (status) mlog_errno(status); return status; } /* * return any unused bits to the bitmap and write out a clean * local_alloc. * * local_alloc_bh is optional. If not passed, we will simply use the * one off osb. If you do pass it however, be warned that it *will* be * returned brelse'd and NULL'd out.*/ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb) { int status; handle_t *handle; struct inode *local_alloc_inode = NULL; struct buffer_head *bh = NULL; struct buffer_head *main_bm_bh = NULL; struct inode *main_bm_inode = NULL; struct ocfs2_dinode *alloc_copy = NULL; struct ocfs2_dinode *alloc = NULL; cancel_delayed_work(&osb->la_enable_wq); flush_workqueue(ocfs2_wq); if (osb->local_alloc_state == OCFS2_LA_UNUSED) goto out; local_alloc_inode = ocfs2_get_system_file_inode(osb, LOCAL_ALLOC_SYSTEM_INODE, osb->slot_num); if (!local_alloc_inode) { status = -ENOENT; mlog_errno(status); goto out; } osb->local_alloc_state = OCFS2_LA_DISABLED; ocfs2_resmap_uninit(&osb->osb_la_resmap); main_bm_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!main_bm_inode) { status = -EINVAL; mlog_errno(status); goto out; } mutex_lock(&main_bm_inode->i_mutex); status = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1); if (status < 0) { mlog_errno(status); goto out_mutex; } /* WINDOW_MOVE_CREDITS is a bit heavy... */ handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS); if (IS_ERR(handle)) { mlog_errno(PTR_ERR(handle)); handle = NULL; goto out_unlock; } bh = osb->local_alloc_bh; alloc = (struct ocfs2_dinode *) bh->b_data; alloc_copy = kmalloc(bh->b_size, GFP_NOFS); if (!alloc_copy) { status = -ENOMEM; goto out_commit; } memcpy(alloc_copy, alloc, bh->b_size); status = ocfs2_journal_access_di(handle, INODE_CACHE(local_alloc_inode), bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto out_commit; } ocfs2_clear_local_alloc(alloc); ocfs2_journal_dirty(handle, bh); brelse(bh); osb->local_alloc_bh = NULL; osb->local_alloc_state = OCFS2_LA_UNUSED; status = ocfs2_sync_local_to_main(osb, handle, alloc_copy, main_bm_inode, main_bm_bh); if (status < 0) mlog_errno(status); out_commit: ocfs2_commit_trans(osb, handle); out_unlock: brelse(main_bm_bh); ocfs2_inode_unlock(main_bm_inode, 1); out_mutex: mutex_unlock(&main_bm_inode->i_mutex); iput(main_bm_inode); out: if (local_alloc_inode) iput(local_alloc_inode); if (alloc_copy) kfree(alloc_copy); } /* * We want to free the bitmap bits outside of any recovery context as * we'll need a cluster lock to do so, but we must clear the local * alloc before giving up the recovered nodes journal. To solve this, * we kmalloc a copy of the local alloc before it's change for the * caller to process with ocfs2_complete_local_alloc_recovery */ int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb, int slot_num, struct ocfs2_dinode **alloc_copy) { int status = 0; struct buffer_head *alloc_bh = NULL; struct inode *inode = NULL; struct ocfs2_dinode *alloc; trace_ocfs2_begin_local_alloc_recovery(slot_num); *alloc_copy = NULL; inode = ocfs2_get_system_file_inode(osb, LOCAL_ALLOC_SYSTEM_INODE, slot_num); if (!inode) { status = -EINVAL; mlog_errno(status); goto bail; } mutex_lock(&inode->i_mutex); status = ocfs2_read_inode_block_full(inode, &alloc_bh, OCFS2_BH_IGNORE_CACHE); if (status < 0) { mlog_errno(status); goto bail; } *alloc_copy = kmalloc(alloc_bh->b_size, GFP_KERNEL); if (!(*alloc_copy)) { status = -ENOMEM; goto bail; } memcpy((*alloc_copy), alloc_bh->b_data, alloc_bh->b_size); alloc = (struct ocfs2_dinode *) alloc_bh->b_data; ocfs2_clear_local_alloc(alloc); ocfs2_compute_meta_ecc(osb->sb, alloc_bh->b_data, &alloc->i_check); status = ocfs2_write_block(osb, alloc_bh, INODE_CACHE(inode)); if (status < 0) mlog_errno(status); bail: if ((status < 0) && (*alloc_copy)) { kfree(*alloc_copy); *alloc_copy = NULL; } brelse(alloc_bh); if (inode) { mutex_unlock(&inode->i_mutex); iput(inode); } if (status) mlog_errno(status); return status; } /* * Step 2: By now, we've completed the journal recovery, we've stamped * a clean local alloc on disk and dropped the node out of the * recovery map. Dlm locks will no longer stall, so lets clear out the * main bitmap. */ int ocfs2_complete_local_alloc_recovery(struct ocfs2_super *osb, struct ocfs2_dinode *alloc) { int status; handle_t *handle; struct buffer_head *main_bm_bh = NULL; struct inode *main_bm_inode; main_bm_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!main_bm_inode) { status = -EINVAL; mlog_errno(status); goto out; } mutex_lock(&main_bm_inode->i_mutex); status = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1); if (status < 0) { mlog_errno(status); goto out_mutex; } handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; mlog_errno(status); goto out_unlock; } /* we want the bitmap change to be recorded on disk asap */ handle->h_sync = 1; status = ocfs2_sync_local_to_main(osb, handle, alloc, main_bm_inode, main_bm_bh); if (status < 0) mlog_errno(status); ocfs2_commit_trans(osb, handle); out_unlock: ocfs2_inode_unlock(main_bm_inode, 1); out_mutex: mutex_unlock(&main_bm_inode->i_mutex); brelse(main_bm_bh); iput(main_bm_inode); out: if (!status) ocfs2_init_steal_slots(osb); if (status) mlog_errno(status); return status; } /* * make sure we've got at least bits_wanted contiguous bits in the * local alloc. You lose them when you drop i_mutex. * * We will add ourselves to the transaction passed in, but may start * our own in order to shift windows. */ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb, u32 bits_wanted, struct ocfs2_alloc_context *ac) { int status; struct ocfs2_dinode *alloc; struct inode *local_alloc_inode; unsigned int free_bits; BUG_ON(!ac); local_alloc_inode = ocfs2_get_system_file_inode(osb, LOCAL_ALLOC_SYSTEM_INODE, osb->slot_num); if (!local_alloc_inode) { status = -ENOENT; mlog_errno(status); goto bail; } mutex_lock(&local_alloc_inode->i_mutex); /* * We must double check state and allocator bits because * another process may have changed them while holding i_mutex. */ spin_lock(&osb->osb_lock); if (!ocfs2_la_state_enabled(osb) || (bits_wanted > osb->local_alloc_bits)) { spin_unlock(&osb->osb_lock); status = -ENOSPC; goto bail; } spin_unlock(&osb->osb_lock); alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; #ifdef CONFIG_OCFS2_DEBUG_FS if (le32_to_cpu(alloc->id1.bitmap1.i_used) != ocfs2_local_alloc_count_bits(alloc)) { ocfs2_error(osb->sb, "local alloc inode %llu says it has " "%u free bits, but a count shows %u", (unsigned long long)le64_to_cpu(alloc->i_blkno), le32_to_cpu(alloc->id1.bitmap1.i_used), ocfs2_local_alloc_count_bits(alloc)); status = -EIO; goto bail; } #endif free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) - le32_to_cpu(alloc->id1.bitmap1.i_used); if (bits_wanted > free_bits) { /* uhoh, window change time. */ status = ocfs2_local_alloc_slide_window(osb, local_alloc_inode); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); goto bail; } /* * Under certain conditions, the window slide code * might have reduced the number of bits available or * disabled the the local alloc entirely. Re-check * here and return -ENOSPC if necessary. */ status = -ENOSPC; if (!ocfs2_la_state_enabled(osb)) goto bail; free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) - le32_to_cpu(alloc->id1.bitmap1.i_used); if (bits_wanted > free_bits) goto bail; } ac->ac_inode = local_alloc_inode; /* We should never use localalloc from another slot */ ac->ac_alloc_slot = osb->slot_num; ac->ac_which = OCFS2_AC_USE_LOCAL; get_bh(osb->local_alloc_bh); ac->ac_bh = osb->local_alloc_bh; status = 0; bail: if (status < 0 && local_alloc_inode) { mutex_unlock(&local_alloc_inode->i_mutex); iput(local_alloc_inode); } trace_ocfs2_reserve_local_alloc_bits( (unsigned long long)ac->ac_max_block, bits_wanted, osb->slot_num, status); if (status) mlog_errno(status); return status; } int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, handle_t *handle, struct ocfs2_alloc_context *ac, u32 bits_wanted, u32 *bit_off, u32 *num_bits) { int status, start; struct inode *local_alloc_inode; void *bitmap; struct ocfs2_dinode *alloc; struct ocfs2_local_alloc *la; BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL); local_alloc_inode = ac->ac_inode; alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; la = OCFS2_LOCAL_ALLOC(alloc); start = ocfs2_local_alloc_find_clear_bits(osb, alloc, &bits_wanted, ac->ac_resv); if (start == -1) { /* TODO: Shouldn't we just BUG here? */ status = -ENOSPC; mlog_errno(status); goto bail; } bitmap = la->la_bitmap; *bit_off = le32_to_cpu(la->la_bm_off) + start; *num_bits = bits_wanted; status = ocfs2_journal_access_di(handle, INODE_CACHE(local_alloc_inode), osb->local_alloc_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } ocfs2_resmap_claimed_bits(&osb->osb_la_resmap, ac->ac_resv, start, bits_wanted); while(bits_wanted--) ocfs2_set_bit(start++, bitmap); le32_add_cpu(&alloc->id1.bitmap1.i_used, *num_bits); ocfs2_journal_dirty(handle, osb->local_alloc_bh); bail: if (status) mlog_errno(status); return status; } static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc) { int i; u8 *buffer; u32 count = 0; struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); buffer = la->la_bitmap; for (i = 0; i < le16_to_cpu(la->la_size); i++) count += hweight8(buffer[i]); trace_ocfs2_local_alloc_count_bits(count); return count; } static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb, struct ocfs2_dinode *alloc, u32 *numbits, struct ocfs2_alloc_reservation *resv) { int numfound, bitoff, left, startoff, lastzero; int local_resv = 0; struct ocfs2_alloc_reservation r; void *bitmap = NULL; struct ocfs2_reservation_map *resmap = &osb->osb_la_resmap; if (!alloc->id1.bitmap1.i_total) { bitoff = -1; goto bail; } if (!resv) { local_resv = 1; ocfs2_resv_init_once(&r); ocfs2_resv_set_type(&r, OCFS2_RESV_FLAG_TMP); resv = &r; } numfound = *numbits; if (ocfs2_resmap_resv_bits(resmap, resv, &bitoff, &numfound) == 0) { if (numfound < *numbits) *numbits = numfound; goto bail; } /* * Code error. While reservations are enabled, local * allocation should _always_ go through them. */ BUG_ON(osb->osb_resv_level != 0); /* * Reservations are disabled. Handle this the old way. */ bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap; numfound = bitoff = startoff = 0; lastzero = -1; left = le32_to_cpu(alloc->id1.bitmap1.i_total); while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) != -1) { if (bitoff == left) { /* mlog(0, "bitoff (%d) == left", bitoff); */ break; } /* mlog(0, "Found a zero: bitoff = %d, startoff = %d, " "numfound = %d\n", bitoff, startoff, numfound);*/ /* Ok, we found a zero bit... is it contig. or do we * start over?*/ if (bitoff == startoff) { /* we found a zero */ numfound++; startoff++; } else { /* got a zero after some ones */ numfound = 1; startoff = bitoff+1; } /* we got everything we needed */ if (numfound == *numbits) { /* mlog(0, "Found it all!\n"); */ break; } } trace_ocfs2_local_alloc_find_clear_bits_search_bitmap(bitoff, numfound); if (numfound == *numbits) bitoff = startoff - numfound; else bitoff = -1; bail: if (local_resv) ocfs2_resv_discard(resmap, resv); trace_ocfs2_local_alloc_find_clear_bits(*numbits, le32_to_cpu(alloc->id1.bitmap1.i_total), bitoff, numfound); return bitoff; } static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc) { struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); int i; alloc->id1.bitmap1.i_total = 0; alloc->id1.bitmap1.i_used = 0; la->la_bm_off = 0; for(i = 0; i < le16_to_cpu(la->la_size); i++) la->la_bitmap[i] = 0; } #if 0 /* turn this on and uncomment below to aid debugging window shifts. */ static void ocfs2_verify_zero_bits(unsigned long *bitmap, unsigned int start, unsigned int count) { unsigned int tmp = count; while(tmp--) { if (ocfs2_test_bit(start + tmp, bitmap)) { printk("ocfs2_verify_zero_bits: start = %u, count = " "%u\n", start, count); printk("ocfs2_verify_zero_bits: bit %u is set!", start + tmp); BUG(); } } } #endif /* * sync the local alloc to main bitmap. * * assumes you've already locked the main bitmap -- the bitmap inode * passed is used for caching. */ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb, handle_t *handle, struct ocfs2_dinode *alloc, struct inode *main_bm_inode, struct buffer_head *main_bm_bh) { int status = 0; int bit_off, left, count, start; u64 la_start_blk; u64 blkno; void *bitmap; struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc); trace_ocfs2_sync_local_to_main( le32_to_cpu(alloc->id1.bitmap1.i_total), le32_to_cpu(alloc->id1.bitmap1.i_used)); if (!alloc->id1.bitmap1.i_total) { goto bail; } if (le32_to_cpu(alloc->id1.bitmap1.i_used) == le32_to_cpu(alloc->id1.bitmap1.i_total)) { goto bail; } la_start_blk = ocfs2_clusters_to_blocks(osb->sb, le32_to_cpu(la->la_bm_off)); bitmap = la->la_bitmap; start = count = bit_off = 0; left = le32_to_cpu(alloc->id1.bitmap1.i_total); while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start)) != -1) { if ((bit_off < left) && (bit_off == start)) { count++; start++; continue; } if (count) { blkno = la_start_blk + ocfs2_clusters_to_blocks(osb->sb, start - count); trace_ocfs2_sync_local_to_main_free( count, start - count, (unsigned long long)la_start_blk, (unsigned long long)blkno); status = ocfs2_release_clusters(handle, main_bm_inode, main_bm_bh, blkno, count); if (status < 0) { mlog_errno(status); goto bail; } } if (bit_off >= left) break; count = 1; start = bit_off + 1; } bail: if (status) mlog_errno(status); return status; } enum ocfs2_la_event { OCFS2_LA_EVENT_SLIDE, /* Normal window slide. */ OCFS2_LA_EVENT_FRAGMENTED, /* The global bitmap has * enough bits theoretically * free, but a contiguous * allocation could not be * found. */ OCFS2_LA_EVENT_ENOSPC, /* Global bitmap doesn't have * enough bits free to satisfy * our request. */ }; #define OCFS2_LA_ENABLE_INTERVAL (30 * HZ) /* * Given an event, calculate the size of our next local alloc window. * * This should always be called under i_mutex of the local alloc inode * so that local alloc disabling doesn't race with processes trying to * use the allocator. * * Returns the state which the local alloc was left in. This value can * be ignored by some paths. */ static int ocfs2_recalc_la_window(struct ocfs2_super *osb, enum ocfs2_la_event event) { unsigned int bits; int state; spin_lock(&osb->osb_lock); if (osb->local_alloc_state == OCFS2_LA_DISABLED) { WARN_ON_ONCE(osb->local_alloc_state == OCFS2_LA_DISABLED); goto out_unlock; } /* * ENOSPC and fragmentation are treated similarly for now. */ if (event == OCFS2_LA_EVENT_ENOSPC || event == OCFS2_LA_EVENT_FRAGMENTED) { /* * We ran out of contiguous space in the primary * bitmap. Drastically reduce the number of bits used * by local alloc until we have to disable it. */ bits = osb->local_alloc_bits >> 1; if (bits > ocfs2_megabytes_to_clusters(osb->sb, 1)) { /* * By setting state to THROTTLED, we'll keep * the number of local alloc bits used down * until an event occurs which would give us * reason to assume the bitmap situation might * have changed. */ osb->local_alloc_state = OCFS2_LA_THROTTLED; osb->local_alloc_bits = bits; } else { osb->local_alloc_state = OCFS2_LA_DISABLED; } queue_delayed_work(ocfs2_wq, &osb->la_enable_wq, OCFS2_LA_ENABLE_INTERVAL); goto out_unlock; } /* * Don't increase the size of the local alloc window until we * know we might be able to fulfill the request. Otherwise, we * risk bouncing around the global bitmap during periods of * low space. */ if (osb->local_alloc_state != OCFS2_LA_THROTTLED) osb->local_alloc_bits = osb->local_alloc_default_bits; out_unlock: state = osb->local_alloc_state; spin_unlock(&osb->osb_lock); return state; } static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb, struct ocfs2_alloc_context **ac, struct inode **bitmap_inode, struct buffer_head **bitmap_bh) { int status; *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL); if (!(*ac)) { status = -ENOMEM; mlog_errno(status); goto bail; } retry_enospc: (*ac)->ac_bits_wanted = osb->local_alloc_default_bits; status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac); if (status == -ENOSPC) { if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_ENOSPC) == OCFS2_LA_DISABLED) goto bail; ocfs2_free_ac_resource(*ac); memset(*ac, 0, sizeof(struct ocfs2_alloc_context)); goto retry_enospc; } if (status < 0) { mlog_errno(status); goto bail; } *bitmap_inode = (*ac)->ac_inode; igrab(*bitmap_inode); *bitmap_bh = (*ac)->ac_bh; get_bh(*bitmap_bh); status = 0; bail: if ((status < 0) && *ac) { ocfs2_free_alloc_context(*ac); *ac = NULL; } if (status) mlog_errno(status); return status; } /* * pass it the bitmap lock in lock_bh if you have it. */ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb, handle_t *handle, struct ocfs2_alloc_context *ac) { int status = 0; u32 cluster_off, cluster_count; struct ocfs2_dinode *alloc = NULL; struct ocfs2_local_alloc *la; alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; la = OCFS2_LOCAL_ALLOC(alloc); trace_ocfs2_local_alloc_new_window( le32_to_cpu(alloc->id1.bitmap1.i_total), osb->local_alloc_bits); /* Instruct the allocation code to try the most recently used * cluster group. We'll re-record the group used this pass * below. */ ac->ac_last_group = osb->la_last_gd; /* we used the generic suballoc reserve function, but we set * everything up nicely, so there's no reason why we can't use * the more specific cluster api to claim bits. */ status = ocfs2_claim_clusters(handle, ac, osb->local_alloc_bits, &cluster_off, &cluster_count); if (status == -ENOSPC) { retry_enospc: /* * Note: We could also try syncing the journal here to * allow use of any free bits which the current * transaction can't give us access to. --Mark */ if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_FRAGMENTED) == OCFS2_LA_DISABLED) goto bail; ac->ac_bits_wanted = osb->local_alloc_default_bits; status = ocfs2_claim_clusters(handle, ac, osb->local_alloc_bits, &cluster_off, &cluster_count); if (status == -ENOSPC) goto retry_enospc; /* * We only shrunk the *minimum* number of in our * request - it's entirely possible that the allocator * might give us more than we asked for. */ if (status == 0) { spin_lock(&osb->osb_lock); osb->local_alloc_bits = cluster_count; spin_unlock(&osb->osb_lock); } } if (status < 0) { if (status != -ENOSPC) mlog_errno(status); goto bail; } osb->la_last_gd = ac->ac_last_group; la->la_bm_off = cpu_to_le32(cluster_off); alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count); /* just in case... In the future when we find space ourselves, * we don't have to get all contiguous -- but we'll have to * set all previously used bits in bitmap and update * la_bits_set before setting the bits in the main bitmap. */ alloc->id1.bitmap1.i_used = 0; memset(OCFS2_LOCAL_ALLOC(alloc)->la_bitmap, 0, le16_to_cpu(la->la_size)); ocfs2_resmap_restart(&osb->osb_la_resmap, cluster_count, OCFS2_LOCAL_ALLOC(alloc)->la_bitmap); trace_ocfs2_local_alloc_new_window_result( OCFS2_LOCAL_ALLOC(alloc)->la_bm_off, le32_to_cpu(alloc->id1.bitmap1.i_total)); bail: if (status) mlog_errno(status); return status; } /* Note that we do *NOT* lock the local alloc inode here as * it's been locked already for us. */ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb, struct inode *local_alloc_inode) { int status = 0; struct buffer_head *main_bm_bh = NULL; struct inode *main_bm_inode = NULL; handle_t *handle = NULL; struct ocfs2_dinode *alloc; struct ocfs2_dinode *alloc_copy = NULL; struct ocfs2_alloc_context *ac = NULL; ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_SLIDE); /* This will lock the main bitmap for us. */ status = ocfs2_local_alloc_reserve_for_window(osb, &ac, &main_bm_inode, &main_bm_bh); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); goto bail; } handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; mlog_errno(status); goto bail; } alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data; /* We want to clear the local alloc before doing anything * else, so that if we error later during this operation, * local alloc shutdown won't try to double free main bitmap * bits. Make a copy so the sync function knows which bits to * free. */ alloc_copy = kmalloc(osb->local_alloc_bh->b_size, GFP_NOFS); if (!alloc_copy) { status = -ENOMEM; mlog_errno(status); goto bail; } memcpy(alloc_copy, alloc, osb->local_alloc_bh->b_size); status = ocfs2_journal_access_di(handle, INODE_CACHE(local_alloc_inode), osb->local_alloc_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } ocfs2_clear_local_alloc(alloc); ocfs2_journal_dirty(handle, osb->local_alloc_bh); status = ocfs2_sync_local_to_main(osb, handle, alloc_copy, main_bm_inode, main_bm_bh); if (status < 0) { mlog_errno(status); goto bail; } status = ocfs2_local_alloc_new_window(osb, handle, ac); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); goto bail; } atomic_inc(&osb->alloc_stats.moves); bail: if (handle) ocfs2_commit_trans(osb, handle); brelse(main_bm_bh); if (main_bm_inode) iput(main_bm_inode); if (alloc_copy) kfree(alloc_copy); if (ac) ocfs2_free_alloc_context(ac); if (status) mlog_errno(status); return status; }
gpl-2.0
Nihhaar/android_kernel_xiaomi_mocha
arch/x86/kernel/reboot_fixups_32.c
12636
2573
/* * This is a good place to put board specific reboot fixups. * * List of supported fixups: * geode-gx1/cs5530a - Jaya Kumar <jayalk@intworks.biz> * geode-gx/lx/cs5536 - Andres Salomon <dilinger@debian.org> * */ #include <asm/delay.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <asm/reboot_fixups.h> #include <asm/msr.h> #include <linux/cs5535.h> static void cs5530a_warm_reset(struct pci_dev *dev) { /* writing 1 to the reset control register, 0x44 causes the cs5530a to perform a system warm reset */ pci_write_config_byte(dev, 0x44, 0x1); udelay(50); /* shouldn't get here but be safe and spin-a-while */ return; } static void cs5536_warm_reset(struct pci_dev *dev) { /* writing 1 to the LSB of this MSR causes a hard reset */ wrmsrl(MSR_DIVIL_SOFT_RESET, 1ULL); udelay(50); /* shouldn't get here but be safe and spin a while */ } static void rdc321x_reset(struct pci_dev *dev) { unsigned i; /* Voluntary reset the watchdog timer */ outl(0x80003840, 0xCF8); /* Generate a CPU reset on next tick */ i = inl(0xCFC); /* Use the minimum timer resolution */ i |= 0x1600; outl(i, 0xCFC); outb(1, 0x92); } static void ce4100_reset(struct pci_dev *dev) { int i; for (i = 0; i < 10; i++) { outb(0x2, 0xcf9); udelay(50); } } struct device_fixup { unsigned int vendor; unsigned int device; void (*reboot_fixup)(struct pci_dev *); }; /* * PCI ids solely used for fixups_table go here */ #define PCI_DEVICE_ID_INTEL_CE4100 0x0708 static const struct device_fixup fixups_table[] = { { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, cs5530a_warm_reset }, { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, cs5536_warm_reset }, { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE, cs5530a_warm_reset }, { PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030, rdc321x_reset }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CE4100, ce4100_reset }, }; /* * we see if any fixup is available for our current hardware. if there * is a fixup, we call it and we expect to never return from it. if we * do return, we keep looking and then eventually fall back to the * standard mach_reboot on return. */ void mach_reboot_fixups(void) { const struct device_fixup *cur; struct pci_dev *dev; int i; /* we can be called from sysrq-B code. In such a case it is * prohibited to dig PCI */ if (in_interrupt()) return; for (i=0; i < ARRAY_SIZE(fixups_table); i++) { cur = &(fixups_table[i]); dev = pci_get_device(cur->vendor, cur->device, NULL); if (!dev) continue; cur->reboot_fixup(dev); pci_dev_put(dev); } }
gpl-2.0
touchpro/android_kernel_lge_msm8226_old_bad
arch/x86/kernel/reboot_fixups_32.c
12636
2573
/* * This is a good place to put board specific reboot fixups. * * List of supported fixups: * geode-gx1/cs5530a - Jaya Kumar <jayalk@intworks.biz> * geode-gx/lx/cs5536 - Andres Salomon <dilinger@debian.org> * */ #include <asm/delay.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <asm/reboot_fixups.h> #include <asm/msr.h> #include <linux/cs5535.h> static void cs5530a_warm_reset(struct pci_dev *dev) { /* writing 1 to the reset control register, 0x44 causes the cs5530a to perform a system warm reset */ pci_write_config_byte(dev, 0x44, 0x1); udelay(50); /* shouldn't get here but be safe and spin-a-while */ return; } static void cs5536_warm_reset(struct pci_dev *dev) { /* writing 1 to the LSB of this MSR causes a hard reset */ wrmsrl(MSR_DIVIL_SOFT_RESET, 1ULL); udelay(50); /* shouldn't get here but be safe and spin a while */ } static void rdc321x_reset(struct pci_dev *dev) { unsigned i; /* Voluntary reset the watchdog timer */ outl(0x80003840, 0xCF8); /* Generate a CPU reset on next tick */ i = inl(0xCFC); /* Use the minimum timer resolution */ i |= 0x1600; outl(i, 0xCFC); outb(1, 0x92); } static void ce4100_reset(struct pci_dev *dev) { int i; for (i = 0; i < 10; i++) { outb(0x2, 0xcf9); udelay(50); } } struct device_fixup { unsigned int vendor; unsigned int device; void (*reboot_fixup)(struct pci_dev *); }; /* * PCI ids solely used for fixups_table go here */ #define PCI_DEVICE_ID_INTEL_CE4100 0x0708 static const struct device_fixup fixups_table[] = { { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, cs5530a_warm_reset }, { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, cs5536_warm_reset }, { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE, cs5530a_warm_reset }, { PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030, rdc321x_reset }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CE4100, ce4100_reset }, }; /* * we see if any fixup is available for our current hardware. if there * is a fixup, we call it and we expect to never return from it. if we * do return, we keep looking and then eventually fall back to the * standard mach_reboot on return. */ void mach_reboot_fixups(void) { const struct device_fixup *cur; struct pci_dev *dev; int i; /* we can be called from sysrq-B code. In such a case it is * prohibited to dig PCI */ if (in_interrupt()) return; for (i=0; i < ARRAY_SIZE(fixups_table); i++) { cur = &(fixups_table[i]); dev = pci_get_device(cur->vendor, cur->device, NULL); if (!dev) continue; cur->reboot_fixup(dev); pci_dev_put(dev); } }
gpl-2.0
CaptainThrowback/kernel_htc_m8_Sense_4.4.4
net/wireless/sme.c
93
26048
/* * SME code for cfg80211's connect emulation. * * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> * Copyright (C) 2009 Intel Corporation. All rights reserved. */ #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/wireless.h> #include <linux/export.h> #include <net/iw_handler.h> #include <net/cfg80211.h> #include <net/rtnetlink.h> #include "nl80211.h" #include "reg.h" struct cfg80211_conn { struct cfg80211_connect_params params; enum { CFG80211_CONN_IDLE, CFG80211_CONN_SCANNING, CFG80211_CONN_SCAN_AGAIN, CFG80211_CONN_AUTHENTICATE_NEXT, CFG80211_CONN_AUTHENTICATING, CFG80211_CONN_ASSOCIATE_NEXT, CFG80211_CONN_ASSOCIATING, CFG80211_CONN_DEAUTH_ASSOC_FAIL, } state; u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; u8 *ie; size_t ie_len; bool auto_auth, prev_bssid_valid; }; static bool cfg80211_is_all_idle(void) { struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; bool is_all_idle = true; mutex_lock(&cfg80211_mutex); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { cfg80211_lock_rdev(rdev); list_for_each_entry(wdev, &rdev->netdev_list, list) { wdev_lock(wdev); if (wdev->sme_state != CFG80211_SME_IDLE) is_all_idle = false; wdev_unlock(wdev); } cfg80211_unlock_rdev(rdev); } mutex_unlock(&cfg80211_mutex); return is_all_idle; } static bool cfg80211_is_all_countryie_ignore(void) { struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; bool is_all_countryie_ignore = true; mutex_lock(&cfg80211_mutex); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { cfg80211_lock_rdev(rdev); list_for_each_entry(wdev, &rdev->netdev_list, list) { wdev_lock(wdev); if (!(wdev->wiphy->country_ie_pref & NL80211_COUNTRY_IE_IGNORE_CORE)) { is_all_countryie_ignore = false; wdev_unlock(wdev); cfg80211_unlock_rdev(rdev); goto out; } wdev_unlock(wdev); } cfg80211_unlock_rdev(rdev); } out: mutex_unlock(&cfg80211_mutex); return is_all_countryie_ignore; } static void disconnect_work(struct work_struct *work) { if (!cfg80211_is_all_idle()) return; if (cfg80211_is_all_countryie_ignore()) return; } static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); static int cfg80211_conn_scan(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct cfg80211_scan_request *request; int n_channels, err; ASSERT_RTNL(); ASSERT_RDEV_LOCK(rdev); ASSERT_WDEV_LOCK(wdev); if (rdev->scan_req) return -EBUSY; if (wdev->conn->params.channel) { n_channels = 1; } else { enum ieee80211_band band; n_channels = 0; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (!wdev->wiphy->bands[band]) continue; n_channels += wdev->wiphy->bands[band]->n_channels; } } request = kzalloc(sizeof(*request) + sizeof(request->ssids[0]) + sizeof(request->channels[0]) * n_channels, GFP_KERNEL); if (!request) return -ENOMEM; if (wdev->conn->params.channel) request->channels[0] = wdev->conn->params.channel; else { int i = 0, j; enum ieee80211_band band; struct ieee80211_supported_band *bands; struct ieee80211_channel *channel; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { bands = wdev->wiphy->bands[band]; if (!bands) continue; for (j = 0; j < bands->n_channels; j++) { channel = &bands->channels[j]; if (channel->flags & IEEE80211_CHAN_DISABLED) continue; request->channels[i++] = channel; } request->rates[band] = (1 << bands->n_bitrates) - 1; } n_channels = i; } request->n_channels = n_channels; request->ssids = (void *)&request->channels[n_channels]; request->n_ssids = 1; memcpy(request->ssids[0].ssid, wdev->conn->params.ssid, wdev->conn->params.ssid_len); request->ssids[0].ssid_len = wdev->conn->params.ssid_len; request->dev = wdev->netdev; request->wiphy = &rdev->wiphy; rdev->scan_req = request; err = rdev->ops->scan(wdev->wiphy, wdev->netdev, request); if (!err) { wdev->conn->state = CFG80211_CONN_SCANNING; nl80211_send_scan_start(rdev, wdev->netdev); dev_hold(wdev->netdev); } else { rdev->scan_req = NULL; kfree(request); } return err; } static int cfg80211_conn_do_work(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct cfg80211_connect_params *params; const u8 *prev_bssid = NULL; int err; ASSERT_WDEV_LOCK(wdev); if (!wdev->conn) return 0; params = &wdev->conn->params; switch (wdev->conn->state) { case CFG80211_CONN_SCAN_AGAIN: return cfg80211_conn_scan(wdev); case CFG80211_CONN_AUTHENTICATE_NEXT: BUG_ON(!rdev->ops->auth); wdev->conn->state = CFG80211_CONN_AUTHENTICATING; return __cfg80211_mlme_auth(rdev, wdev->netdev, params->channel, params->auth_type, params->bssid, params->ssid, params->ssid_len, NULL, 0, params->key, params->key_len, params->key_idx); case CFG80211_CONN_ASSOCIATE_NEXT: BUG_ON(!rdev->ops->assoc); wdev->conn->state = CFG80211_CONN_ASSOCIATING; if (wdev->conn->prev_bssid_valid) prev_bssid = wdev->conn->prev_bssid; err = __cfg80211_mlme_assoc(rdev, wdev->netdev, params->channel, params->bssid, prev_bssid, params->ssid, params->ssid_len, params->ie, params->ie_len, params->mfp != NL80211_MFP_NO, &params->crypto, params->flags, &params->ht_capa, &params->ht_capa_mask); if (err) __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); return err; case CFG80211_CONN_DEAUTH_ASSOC_FAIL: __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); return -EINVAL; default: return 0; } } void cfg80211_conn_work(struct work_struct *work) { struct cfg80211_registered_device *rdev = container_of(work, struct cfg80211_registered_device, conn_work); struct wireless_dev *wdev; u8 bssid_buf[ETH_ALEN], *bssid = NULL; rtnl_lock(); cfg80211_lock_rdev(rdev); mutex_lock(&rdev->devlist_mtx); list_for_each_entry(wdev, &rdev->netdev_list, list) { wdev_lock(wdev); if (!netif_running(wdev->netdev)) { wdev_unlock(wdev); continue; } if (wdev->sme_state != CFG80211_SME_CONNECTING) { wdev_unlock(wdev); continue; } if (wdev->conn->params.bssid) { memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN); bssid = bssid_buf; } if (cfg80211_conn_do_work(wdev)) __cfg80211_connect_result( wdev->netdev, bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, false, NULL); wdev_unlock(wdev); } mutex_unlock(&rdev->devlist_mtx); cfg80211_unlock_rdev(rdev); rtnl_unlock(); } static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct cfg80211_bss *bss; u16 capa = WLAN_CAPABILITY_ESS; ASSERT_WDEV_LOCK(wdev); if (wdev->conn->params.privacy) capa |= WLAN_CAPABILITY_PRIVACY; bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel, wdev->conn->params.bssid, wdev->conn->params.ssid, wdev->conn->params.ssid_len, WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY, capa); if (!bss) return NULL; memcpy(wdev->conn->bssid, bss->bssid, ETH_ALEN); wdev->conn->params.bssid = wdev->conn->bssid; wdev->conn->params.channel = bss->channel; wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; schedule_work(&rdev->conn_work); return bss; } static void __cfg80211_sme_scan_done(struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct cfg80211_bss *bss; ASSERT_WDEV_LOCK(wdev); if (wdev->sme_state != CFG80211_SME_CONNECTING) return; if (!wdev->conn) return; if (wdev->conn->state != CFG80211_CONN_SCANNING && wdev->conn->state != CFG80211_CONN_SCAN_AGAIN) return; bss = cfg80211_get_conn_bss(wdev); if (bss) { cfg80211_put_bss(bss); } else { if (wdev->conn->state == CFG80211_CONN_SCAN_AGAIN) schedule_work(&rdev->conn_work); else __cfg80211_connect_result( wdev->netdev, wdev->conn->params.bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, false, NULL); } } void cfg80211_sme_scan_done(struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); wdev_lock(wdev); __cfg80211_sme_scan_done(dev); wdev_unlock(wdev); mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); } void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; u16 status_code = le16_to_cpu(mgmt->u.auth.status_code); ASSERT_WDEV_LOCK(wdev); if (wdev->sme_state != CFG80211_SME_CONNECTING) return; if (WARN_ON(!wdev->conn)) return; if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG && wdev->conn->auto_auth && wdev->conn->params.auth_type != NL80211_AUTHTYPE_NETWORK_EAP) { switch (wdev->conn->params.auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: if (wdev->connect_keys) wdev->conn->params.auth_type = NL80211_AUTHTYPE_SHARED_KEY; else wdev->conn->params.auth_type = NL80211_AUTHTYPE_NETWORK_EAP; break; case NL80211_AUTHTYPE_SHARED_KEY: wdev->conn->params.auth_type = NL80211_AUTHTYPE_NETWORK_EAP; break; default: wdev->conn->params.auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; break; } wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; schedule_work(&rdev->conn_work); } else if (status_code != WLAN_STATUS_SUCCESS) { __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, status_code, false, NULL); } else if (wdev->sme_state == CFG80211_SME_CONNECTING && wdev->conn->state == CFG80211_CONN_AUTHENTICATING) { wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; schedule_work(&rdev->conn_work); } } bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); if (WARN_ON(!wdev->conn)) return false; if (!wdev->conn->prev_bssid_valid) return false; wdev->conn->prev_bssid_valid = false; wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT; schedule_work(&rdev->conn_work); return true; } void cfg80211_sme_failed_assoc(struct wireless_dev *wdev) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); wdev->conn->state = CFG80211_CONN_DEAUTH_ASSOC_FAIL; schedule_work(&rdev->conn_work); } void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, u16 status, bool wextev, struct cfg80211_bss *bss) { struct wireless_dev *wdev = dev->ieee80211_ptr; u8 *country_ie; #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; #endif ASSERT_WDEV_LOCK(wdev); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) return; if (wdev->sme_state != CFG80211_SME_CONNECTING) return; nl80211_send_connect_result(wiphy_to_dev(wdev->wiphy), dev, bssid, req_ie, req_ie_len, resp_ie, resp_ie_len, status, GFP_KERNEL); #ifdef CONFIG_CFG80211_WEXT if (wextev) { if (req_ie && status == WLAN_STATUS_SUCCESS) { memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = req_ie_len; wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, req_ie); } if (resp_ie && status == WLAN_STATUS_SUCCESS) { memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = resp_ie_len; wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, resp_ie); } memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; if (bssid && status == WLAN_STATUS_SUCCESS) { memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN); memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN); wdev->wext.prev_bssid_valid = true; } wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); } #endif if (wdev->current_bss) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); wdev->current_bss = NULL; } if (wdev->conn) wdev->conn->state = CFG80211_CONN_IDLE; if (status != WLAN_STATUS_SUCCESS) { wdev->sme_state = CFG80211_SME_IDLE; if (wdev->conn) kfree(wdev->conn->ie); kfree(wdev->conn); wdev->conn = NULL; kfree(wdev->connect_keys); wdev->connect_keys = NULL; wdev->ssid_len = 0; cfg80211_put_bss(bss); return; } if (!bss) bss = cfg80211_get_bss(wdev->wiphy, wdev->conn ? wdev->conn->params.channel : NULL, bssid, wdev->ssid, wdev->ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); if (WARN_ON(!bss)) return; cfg80211_hold_bss(bss_from_pub(bss)); wdev->current_bss = bss_from_pub(bss); wdev->sme_state = CFG80211_SME_CONNECTED; cfg80211_upload_connect_keys(wdev); country_ie = (u8 *) ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY); if (!country_ie) return; } void cfg80211_connect_result(struct net_device *dev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, u16 status, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING); ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); if (!ev) return; ev->type = EVENT_CONNECT_RESULT; if (bssid) memcpy(ev->cr.bssid, bssid, ETH_ALEN); if (req_ie_len) { ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev); ev->cr.req_ie_len = req_ie_len; memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len); } if (resp_ie_len) { ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len; ev->cr.resp_ie_len = resp_ie_len; memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len); } ev->cr.status = status; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_connect_result); void __cfg80211_roamed(struct wireless_dev *wdev, struct cfg80211_bss *bss, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len) { #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; #endif ASSERT_WDEV_LOCK(wdev); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) goto out; if (wdev->sme_state != CFG80211_SME_CONNECTED) goto out; if (WARN_ON(!wdev->current_bss)) { goto out; } cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); wdev->current_bss = NULL; cfg80211_hold_bss(bss_from_pub(bss)); wdev->current_bss = bss_from_pub(bss); nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), wdev->netdev, bss->bssid, req_ie, req_ie_len, resp_ie, resp_ie_len, GFP_KERNEL); #ifdef CONFIG_CFG80211_WEXT if (req_ie) { memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = req_ie_len; wireless_send_event(wdev->netdev, IWEVASSOCREQIE, &wrqu, req_ie); } if (resp_ie) { memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = resp_ie_len; wireless_send_event(wdev->netdev, IWEVASSOCRESPIE, &wrqu, resp_ie); } memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN); memcpy(wdev->wext.prev_bssid, bss->bssid, ETH_ALEN); wdev->wext.prev_bssid_valid = true; wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL); #endif return; out: cfg80211_put_bss(bss); } void cfg80211_roamed(struct net_device *dev, struct ieee80211_channel *channel, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_bss *bss; CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid, wdev->ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); if (WARN_ON(!bss)) return; cfg80211_roamed_bss(dev, bss, req_ie, req_ie_len, resp_ie, resp_ie_len, gfp); } EXPORT_SYMBOL(cfg80211_roamed); void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); if (WARN_ON(!bss)) return; ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp); if (!ev) { cfg80211_put_bss(bss); return; } ev->type = EVENT_ROAMED; ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev); ev->rm.req_ie_len = req_ie_len; memcpy((void *)ev->rm.req_ie, req_ie, req_ie_len); ev->rm.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len; ev->rm.resp_ie_len = resp_ie_len; memcpy((void *)ev->rm.resp_ie, resp_ie, resp_ie_len); ev->rm.bss = bss; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_roamed_bss); void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, size_t ie_len, u16 reason, bool from_ap) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); int i; #ifdef CONFIG_CFG80211_WEXT union iwreq_data wrqu; #endif ASSERT_WDEV_LOCK(wdev); if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) return; #ifndef CONFIG_CFG80211_ALLOW_RECONNECT if (wdev->sme_state != CFG80211_SME_CONNECTED) return; #endif if (wdev->current_bss) { cfg80211_unhold_bss(wdev->current_bss); cfg80211_put_bss(&wdev->current_bss->pub); } wdev->current_bss = NULL; wdev->sme_state = CFG80211_SME_IDLE; wdev->ssid_len = 0; if (wdev->conn) { kfree(wdev->conn->ie); wdev->conn->ie = NULL; kfree(wdev->conn); wdev->conn = NULL; } nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap); if (rdev->ops->del_key) for (i = 0; i < 6; i++) rdev->ops->del_key(wdev->wiphy, dev, i, false, NULL); if (rdev->ops->set_qos_map) { rdev->ops->set_qos_map(&rdev->wiphy, dev, NULL); } #ifdef CONFIG_CFG80211_WEXT memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); wdev->wext.connect.ssid_len = 0; #endif schedule_work(&cfg80211_disconnect_work); } void cfg80211_disconnected(struct net_device *dev, u16 reason, u8 *ie, size_t ie_len, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); struct cfg80211_event *ev; unsigned long flags; CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED); ev = kzalloc(sizeof(*ev) + ie_len, gfp); if (!ev) return; ev->type = EVENT_DISCONNECTED; ev->dc.ie = ((u8 *)ev) + sizeof(*ev); ev->dc.ie_len = ie_len; memcpy((void *)ev->dc.ie, ie, ie_len); ev->dc.reason = reason; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); spin_unlock_irqrestore(&wdev->event_lock, flags); queue_work(cfg80211_wq, &rdev->event_work); } EXPORT_SYMBOL(cfg80211_disconnected); int __cfg80211_connect(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_connect_params *connect, struct cfg80211_cached_keys *connkeys, const u8 *prev_bssid) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_bss *bss = NULL; int err; ASSERT_WDEV_LOCK(wdev); #ifndef CONFIG_CFG80211_ALLOW_RECONNECT if (wdev->sme_state != CFG80211_SME_IDLE) return -EALREADY; if (WARN_ON(wdev->connect_keys)) { #else if (wdev->connect_keys) { #endif kfree(wdev->connect_keys); wdev->connect_keys = NULL; } cfg80211_oper_and_ht_capa(&connect->ht_capa_mask, rdev->wiphy.ht_capa_mod_mask); if (connkeys && connkeys->def >= 0) { int idx; u32 cipher; idx = connkeys->def; cipher = connkeys->params[idx].cipher; if (cipher == WLAN_CIPHER_SUITE_WEP40 || cipher == WLAN_CIPHER_SUITE_WEP104) { connect->key_idx = idx; connect->key = connkeys->params[idx].key; connect->key_len = connkeys->params[idx].key_len; if (connect->crypto.cipher_group == 0) connect->crypto.cipher_group = cipher; if (connect->crypto.n_ciphers_pairwise == 0) { connect->crypto.n_ciphers_pairwise = 1; connect->crypto.ciphers_pairwise[0] = cipher; } } } if (!rdev->ops->connect) { if (!rdev->ops->auth || !rdev->ops->assoc) return -EOPNOTSUPP; if (WARN_ON(wdev->conn)) return -EINPROGRESS; wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL); if (!wdev->conn) return -ENOMEM; memcpy(&wdev->conn->params, connect, sizeof(*connect)); if (connect->bssid) { wdev->conn->params.bssid = wdev->conn->bssid; memcpy(wdev->conn->bssid, connect->bssid, ETH_ALEN); } if (connect->ie) { wdev->conn->ie = kmemdup(connect->ie, connect->ie_len, GFP_KERNEL); wdev->conn->params.ie = wdev->conn->ie; if (!wdev->conn->ie) { kfree(wdev->conn); wdev->conn = NULL; return -ENOMEM; } } if (connect->auth_type == NL80211_AUTHTYPE_AUTOMATIC) { wdev->conn->auto_auth = true; wdev->conn->params.auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; } else { wdev->conn->auto_auth = false; } memcpy(wdev->ssid, connect->ssid, connect->ssid_len); wdev->ssid_len = connect->ssid_len; wdev->conn->params.ssid = wdev->ssid; wdev->conn->params.ssid_len = connect->ssid_len; bss = cfg80211_get_conn_bss(wdev); wdev->sme_state = CFG80211_SME_CONNECTING; wdev->connect_keys = connkeys; if (prev_bssid) { memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN); wdev->conn->prev_bssid_valid = true; } if (bss) { wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT; err = cfg80211_conn_do_work(wdev); cfg80211_put_bss(bss); } else { err = cfg80211_conn_scan(wdev); if (err == -EBUSY) { err = 0; wdev->conn->state = CFG80211_CONN_SCAN_AGAIN; } } if (err) { kfree(wdev->conn->ie); kfree(wdev->conn); wdev->conn = NULL; wdev->sme_state = CFG80211_SME_IDLE; wdev->connect_keys = NULL; wdev->ssid_len = 0; } return err; } else { wdev->sme_state = CFG80211_SME_CONNECTING; wdev->connect_keys = connkeys; err = rdev->ops->connect(&rdev->wiphy, dev, connect); if (err) { wdev->connect_keys = NULL; wdev->sme_state = CFG80211_SME_IDLE; return err; } memcpy(wdev->ssid, connect->ssid, connect->ssid_len); wdev->ssid_len = connect->ssid_len; return 0; } } int cfg80211_connect(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_connect_params *connect, struct cfg80211_cached_keys *connkeys) { int err; mutex_lock(&rdev->devlist_mtx); wdev_lock(dev->ieee80211_ptr); err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL); wdev_unlock(dev->ieee80211_ptr); mutex_unlock(&rdev->devlist_mtx); return err; } int __cfg80211_disconnect(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 reason, bool wextev) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; ASSERT_WDEV_LOCK(wdev); if (wdev->sme_state == CFG80211_SME_IDLE) return -EINVAL; kfree(wdev->connect_keys); wdev->connect_keys = NULL; if (!rdev->ops->disconnect) { if (!rdev->ops->deauth) return -EOPNOTSUPP; if (!wdev->conn) { cfg80211_mlme_down(rdev, dev); return 0; } if (wdev->sme_state == CFG80211_SME_CONNECTING && (wdev->conn->state == CFG80211_CONN_SCANNING || wdev->conn->state == CFG80211_CONN_SCAN_AGAIN)) { wdev->sme_state = CFG80211_SME_IDLE; kfree(wdev->conn->ie); kfree(wdev->conn); wdev->conn = NULL; wdev->ssid_len = 0; return 0; } err = __cfg80211_mlme_deauth(rdev, dev, wdev->conn->params.bssid, NULL, 0, reason, false); if (err) return err; } else { err = rdev->ops->disconnect(&rdev->wiphy, dev, reason); if (err) return err; } if (wdev->sme_state == CFG80211_SME_CONNECTED) __cfg80211_disconnected(dev, NULL, 0, 0, false); else if (wdev->sme_state == CFG80211_SME_CONNECTING) __cfg80211_connect_result(dev, NULL, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, wextev, NULL); return 0; } int cfg80211_disconnect(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 reason, bool wextev) { int err; wdev_lock(dev->ieee80211_ptr); err = __cfg80211_disconnect(rdev, dev, reason, wextev); wdev_unlock(dev->ieee80211_ptr); return err; } void cfg80211_sme_disassoc(struct net_device *dev, struct cfg80211_internal_bss *bss) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); u8 bssid[ETH_ALEN]; ASSERT_WDEV_LOCK(wdev); if (!wdev->conn) return; if (wdev->conn->state == CFG80211_CONN_IDLE) return; memcpy(bssid, bss->pub.bssid, ETH_ALEN); __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0, WLAN_REASON_DEAUTH_LEAVING, false); }
gpl-2.0
Garcia98/kernel-amami
drivers/usb/gadget/f_diag.c
349
23025
/* drivers/usb/gadget/f_diag.c * Diag Function Device - Route ARM9 and ARM11 DIAG messages * between HOST and DEVICE. * Copyright (C) 2007 Google, Inc. * Copyright (c) 2008-2013, The Linux Foundation. All rights reserved. * Author: Brian Swetland <swetland@google.com> * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/ratelimit.h> #include <mach/usbdiag.h> #include <linux/usb/composite.h> #include <linux/usb/gadget.h> #include <linux/workqueue.h> #include <linux/debugfs.h> #include <linux/kmemleak.h> static DEFINE_SPINLOCK(ch_lock); static LIST_HEAD(usb_diag_ch_list); static struct usb_interface_descriptor intf_desc = { .bLength = sizeof intf_desc, .bDescriptorType = USB_DT_INTERFACE, .bNumEndpoints = 2, .bInterfaceClass = 0xFF, .bInterfaceSubClass = 0xFF, .bInterfaceProtocol = 0xFF, }; static struct usb_endpoint_descriptor hs_bulk_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(512), .bInterval = 0, }; static struct usb_endpoint_descriptor fs_bulk_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(64), .bInterval = 0, }; static struct usb_endpoint_descriptor hs_bulk_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(512), .bInterval = 0, }; static struct usb_endpoint_descriptor fs_bulk_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(64), .bInterval = 0, }; static struct usb_endpoint_descriptor ss_bulk_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(1024), }; static struct usb_ss_ep_comp_descriptor ss_bulk_in_comp_desc = { .bLength = sizeof ss_bulk_in_comp_desc, .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, /* the following 2 values can be tweaked if necessary */ /* .bMaxBurst = 0, */ /* .bmAttributes = 0, */ }; static struct usb_endpoint_descriptor ss_bulk_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = __constant_cpu_to_le16(1024), }; static struct usb_ss_ep_comp_descriptor ss_bulk_out_comp_desc = { .bLength = sizeof ss_bulk_out_comp_desc, .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, /* the following 2 values can be tweaked if necessary */ /* .bMaxBurst = 0, */ /* .bmAttributes = 0, */ }; static struct usb_descriptor_header *fs_diag_desc[] = { (struct usb_descriptor_header *) &intf_desc, (struct usb_descriptor_header *) &fs_bulk_in_desc, (struct usb_descriptor_header *) &fs_bulk_out_desc, NULL, }; static struct usb_descriptor_header *hs_diag_desc[] = { (struct usb_descriptor_header *) &intf_desc, (struct usb_descriptor_header *) &hs_bulk_in_desc, (struct usb_descriptor_header *) &hs_bulk_out_desc, NULL, }; static struct usb_descriptor_header *ss_diag_desc[] = { (struct usb_descriptor_header *) &intf_desc, (struct usb_descriptor_header *) &ss_bulk_in_desc, (struct usb_descriptor_header *) &ss_bulk_in_comp_desc, (struct usb_descriptor_header *) &ss_bulk_out_desc, (struct usb_descriptor_header *) &ss_bulk_out_comp_desc, NULL, }; /** * struct diag_context - USB diag function driver private structure * @function: function structure for USB interface * @out: USB OUT endpoint struct * @in: USB IN endpoint struct * @in_desc: USB IN endpoint descriptor struct * @out_desc: USB OUT endpoint descriptor struct * @read_pool: List of requests used for Rx (OUT ep) * @write_pool: List of requests used for Tx (IN ep) * @lock: Spinlock to proctect read_pool, write_pool lists * @cdev: USB composite device struct * @ch: USB diag channel * */ struct diag_context { struct usb_function function; struct usb_ep *out; struct usb_ep *in; struct list_head read_pool; struct list_head write_pool; spinlock_t lock; unsigned configured; struct usb_composite_dev *cdev; int (*update_pid_and_serial_num)(uint32_t, const char *); struct usb_diag_ch *ch; /* pkt counters */ unsigned long dpkts_tolaptop; unsigned long dpkts_tomodem; unsigned dpkts_tolaptop_pending; /* A list node inside the diag_dev_list */ struct list_head list_item; }; static struct list_head diag_dev_list; static inline struct diag_context *func_to_diag(struct usb_function *f) { return container_of(f, struct diag_context, function); } static void diag_update_pid_and_serial_num(struct diag_context *ctxt) { struct usb_composite_dev *cdev = ctxt->cdev; struct usb_gadget_strings *table; struct usb_string *s; if (!ctxt->update_pid_and_serial_num) return; /* * update pid and serail number to dload only if diag * interface is zeroth interface. */ if (intf_desc.bInterfaceNumber) return; /* pass on product id and serial number to dload */ if (!cdev->desc.iSerialNumber) { ctxt->update_pid_and_serial_num( cdev->desc.idProduct, 0); return; } /* * Serial number is filled by the composite driver. So * it is fair enough to assume that it will always be * found at first table of strings. */ table = *(cdev->driver->strings); for (s = table->strings; s && s->s; s++) if (s->id == cdev->desc.iSerialNumber) { ctxt->update_pid_and_serial_num( cdev->desc.idProduct, s->s); break; } } static void diag_write_complete(struct usb_ep *ep, struct usb_request *req) { struct diag_context *ctxt = ep->driver_data; struct diag_request *d_req = req->context; unsigned long flags; ctxt->dpkts_tolaptop_pending--; if (!req->status) { if ((req->length >= ep->maxpacket) && ((req->length % ep->maxpacket) == 0)) { ctxt->dpkts_tolaptop_pending++; req->length = 0; d_req->actual = req->actual; d_req->status = req->status; /* Queue zero length packet */ usb_ep_queue(ctxt->in, req, GFP_ATOMIC); return; } } spin_lock_irqsave(&ctxt->lock, flags); list_add_tail(&req->list, &ctxt->write_pool); if (req->length != 0) { d_req->actual = req->actual; d_req->status = req->status; } spin_unlock_irqrestore(&ctxt->lock, flags); if (ctxt->ch && ctxt->ch->notify) ctxt->ch->notify(ctxt->ch->priv, USB_DIAG_WRITE_DONE, d_req); } static void diag_read_complete(struct usb_ep *ep, struct usb_request *req) { struct diag_context *ctxt = ep->driver_data; struct diag_request *d_req = req->context; unsigned long flags; d_req->actual = req->actual; d_req->status = req->status; spin_lock_irqsave(&ctxt->lock, flags); list_add_tail(&req->list, &ctxt->read_pool); spin_unlock_irqrestore(&ctxt->lock, flags); ctxt->dpkts_tomodem++; if (ctxt->ch && ctxt->ch->notify) ctxt->ch->notify(ctxt->ch->priv, USB_DIAG_READ_DONE, d_req); } /** * usb_diag_open() - Open a diag channel over USB * @name: Name of the channel * @priv: Private structure pointer which will be passed in notify() * @notify: Callback function to receive notifications * * This function iterates overs the available channels and returns * the channel handler if the name matches. The notify callback is called * for CONNECT, DISCONNECT, READ_DONE and WRITE_DONE events. * */ struct usb_diag_ch *usb_diag_open(const char *name, void *priv, void (*notify)(void *, unsigned, struct diag_request *)) { struct usb_diag_ch *ch; unsigned long flags; int found = 0; spin_lock_irqsave(&ch_lock, flags); /* Check if we already have a channel with this name */ list_for_each_entry(ch, &usb_diag_ch_list, list) { if (!strcmp(name, ch->name)) { found = 1; break; } } spin_unlock_irqrestore(&ch_lock, flags); if (!found) { ch = kzalloc(sizeof(*ch), GFP_KERNEL); if (!ch) return ERR_PTR(-ENOMEM); } ch->name = name; ch->priv = priv; ch->notify = notify; spin_lock_irqsave(&ch_lock, flags); list_add_tail(&ch->list, &usb_diag_ch_list); spin_unlock_irqrestore(&ch_lock, flags); return ch; } EXPORT_SYMBOL(usb_diag_open); /** * usb_diag_close() - Close a diag channel over USB * @ch: Channel handler * * This function closes the diag channel. * */ void usb_diag_close(struct usb_diag_ch *ch) { struct diag_context *dev = NULL; unsigned long flags; spin_lock_irqsave(&ch_lock, flags); ch->priv = NULL; ch->notify = NULL; /* Free-up the resources if channel is no more active */ list_del(&ch->list); list_for_each_entry(dev, &diag_dev_list, list_item) if (dev->ch == ch) dev->ch = NULL; kfree(ch); spin_unlock_irqrestore(&ch_lock, flags); } EXPORT_SYMBOL(usb_diag_close); static void free_reqs(struct diag_context *ctxt) { struct list_head *act, *tmp; struct usb_request *req; list_for_each_safe(act, tmp, &ctxt->write_pool) { req = list_entry(act, struct usb_request, list); list_del(&req->list); usb_ep_free_request(ctxt->in, req); } list_for_each_safe(act, tmp, &ctxt->read_pool) { req = list_entry(act, struct usb_request, list); list_del(&req->list); usb_ep_free_request(ctxt->out, req); } } /** * usb_diag_alloc_req() - Allocate USB requests * @ch: Channel handler * @n_write: Number of requests for Tx * @n_read: Number of requests for Rx * * This function allocate read and write USB requests for the interface * associated with this channel. The actual buffer is not allocated. * The buffer is passed by diag char driver. * */ int usb_diag_alloc_req(struct usb_diag_ch *ch, int n_write, int n_read) { struct diag_context *ctxt = ch->priv_usb; struct usb_request *req; int i; unsigned long flags; if (!ctxt) return -ENODEV; spin_lock_irqsave(&ctxt->lock, flags); /* Free previous session's stale requests */ free_reqs(ctxt); for (i = 0; i < n_write; i++) { req = usb_ep_alloc_request(ctxt->in, GFP_ATOMIC); if (!req) goto fail; kmemleak_not_leak(req); req->complete = diag_write_complete; list_add_tail(&req->list, &ctxt->write_pool); } for (i = 0; i < n_read; i++) { req = usb_ep_alloc_request(ctxt->out, GFP_ATOMIC); if (!req) goto fail; kmemleak_not_leak(req); req->complete = diag_read_complete; list_add_tail(&req->list, &ctxt->read_pool); } spin_unlock_irqrestore(&ctxt->lock, flags); return 0; fail: free_reqs(ctxt); spin_unlock_irqrestore(&ctxt->lock, flags); return -ENOMEM; } EXPORT_SYMBOL(usb_diag_alloc_req); /** * usb_diag_read() - Read data from USB diag channel * @ch: Channel handler * @d_req: Diag request struct * * Enqueue a request on OUT endpoint of the interface corresponding to this * channel. This function returns proper error code when interface is not * in configured state, no Rx requests available and ep queue is failed. * * This function operates asynchronously. READ_DONE event is notified after * completion of OUT request. * */ int usb_diag_read(struct usb_diag_ch *ch, struct diag_request *d_req) { struct diag_context *ctxt = ch->priv_usb; unsigned long flags; struct usb_request *req; static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1); if (!ctxt) return -ENODEV; spin_lock_irqsave(&ctxt->lock, flags); if (!ctxt->configured) { spin_unlock_irqrestore(&ctxt->lock, flags); return -EIO; } if (list_empty(&ctxt->read_pool)) { spin_unlock_irqrestore(&ctxt->lock, flags); ERROR(ctxt->cdev, "%s: no requests available\n", __func__); return -EAGAIN; } req = list_first_entry(&ctxt->read_pool, struct usb_request, list); list_del(&req->list); spin_unlock_irqrestore(&ctxt->lock, flags); req->buf = d_req->buf; req->length = d_req->length; req->context = d_req; if (usb_ep_queue(ctxt->out, req, GFP_ATOMIC)) { /* If error add the link to linked list again*/ spin_lock_irqsave(&ctxt->lock, flags); list_add_tail(&req->list, &ctxt->read_pool); spin_unlock_irqrestore(&ctxt->lock, flags); /* 1 error message for every 10 sec */ if (__ratelimit(&rl)) ERROR(ctxt->cdev, "%s: cannot queue" " read request\n", __func__); return -EIO; } return 0; } EXPORT_SYMBOL(usb_diag_read); /** * usb_diag_write() - Write data from USB diag channel * @ch: Channel handler * @d_req: Diag request struct * * Enqueue a request on IN endpoint of the interface corresponding to this * channel. This function returns proper error code when interface is not * in configured state, no Tx requests available and ep queue is failed. * * This function operates asynchronously. WRITE_DONE event is notified after * completion of IN request. * */ int usb_diag_write(struct usb_diag_ch *ch, struct diag_request *d_req) { struct diag_context *ctxt = ch->priv_usb; unsigned long flags; struct usb_request *req = NULL; static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1); if (!ctxt) return -ENODEV; spin_lock_irqsave(&ctxt->lock, flags); if (!ctxt->configured) { spin_unlock_irqrestore(&ctxt->lock, flags); return -EIO; } if (list_empty(&ctxt->write_pool)) { spin_unlock_irqrestore(&ctxt->lock, flags); ERROR(ctxt->cdev, "%s: no requests available\n", __func__); return -EAGAIN; } req = list_first_entry(&ctxt->write_pool, struct usb_request, list); list_del(&req->list); spin_unlock_irqrestore(&ctxt->lock, flags); req->buf = d_req->buf; req->length = d_req->length; req->context = d_req; if (usb_ep_queue(ctxt->in, req, GFP_ATOMIC)) { /* If error add the link to linked list again*/ spin_lock_irqsave(&ctxt->lock, flags); list_add_tail(&req->list, &ctxt->write_pool); /* 1 error message for every 10 sec */ if (__ratelimit(&rl)) ERROR(ctxt->cdev, "%s: cannot queue" " read request\n", __func__); spin_unlock_irqrestore(&ctxt->lock, flags); return -EIO; } ctxt->dpkts_tolaptop++; ctxt->dpkts_tolaptop_pending++; return 0; } EXPORT_SYMBOL(usb_diag_write); static void diag_function_disable(struct usb_function *f) { struct diag_context *dev = func_to_diag(f); unsigned long flags; DBG(dev->cdev, "diag_function_disable\n"); spin_lock_irqsave(&dev->lock, flags); dev->configured = 0; spin_unlock_irqrestore(&dev->lock, flags); if (dev->ch && dev->ch->notify) dev->ch->notify(dev->ch->priv, USB_DIAG_DISCONNECT, NULL); usb_ep_disable(dev->in); dev->in->driver_data = NULL; usb_ep_disable(dev->out); dev->out->driver_data = NULL; if (dev->ch) dev->ch->priv_usb = NULL; } static int diag_function_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct diag_context *dev = func_to_diag(f); struct usb_composite_dev *cdev = f->config->cdev; unsigned long flags; int rc = 0; if (config_ep_by_speed(cdev->gadget, f, dev->in) || config_ep_by_speed(cdev->gadget, f, dev->out)) { dev->in->desc = NULL; dev->out->desc = NULL; return -EINVAL; } if (!dev->ch) return -ENODEV; /* * Indicate to the diag channel that the active diag device is dev. * Since a few diag devices can point to the same channel. */ dev->ch->priv_usb = dev; dev->in->driver_data = dev; rc = usb_ep_enable(dev->in); if (rc) { ERROR(dev->cdev, "can't enable %s, result %d\n", dev->in->name, rc); return rc; } dev->out->driver_data = dev; rc = usb_ep_enable(dev->out); if (rc) { ERROR(dev->cdev, "can't enable %s, result %d\n", dev->out->name, rc); usb_ep_disable(dev->in); return rc; } dev->dpkts_tolaptop = 0; dev->dpkts_tomodem = 0; dev->dpkts_tolaptop_pending = 0; spin_lock_irqsave(&dev->lock, flags); dev->configured = 1; spin_unlock_irqrestore(&dev->lock, flags); if (dev->ch->notify) dev->ch->notify(dev->ch->priv, USB_DIAG_CONNECT, NULL); return rc; } static void diag_function_unbind(struct usb_configuration *c, struct usb_function *f) { struct diag_context *ctxt = func_to_diag(f); unsigned long flags; if (gadget_is_superspeed(c->cdev->gadget)) usb_free_descriptors(f->ss_descriptors); if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); /* * Channel priv_usb may point to other diag function. * Clear the priv_usb only if the channel is used by the * diag dev we unbind here. */ if (ctxt->ch && ctxt->ch->priv_usb == ctxt) ctxt->ch->priv_usb = NULL; list_del(&ctxt->list_item); /* Free any pending USB requests from last session */ spin_lock_irqsave(&ctxt->lock, flags); free_reqs(ctxt); spin_unlock_irqrestore(&ctxt->lock, flags); kfree(ctxt); } static int diag_function_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct diag_context *ctxt = func_to_diag(f); struct usb_ep *ep; int status = -ENODEV; intf_desc.bInterfaceNumber = usb_interface_id(c, f); ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_in_desc); if (!ep) goto fail; ctxt->in = ep; ep->driver_data = ctxt; ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_out_desc); if (!ep) goto fail; ctxt->out = ep; ep->driver_data = ctxt; status = -ENOMEM; /* copy descriptors, and track endpoint copies */ f->descriptors = usb_copy_descriptors(fs_diag_desc); if (!f->descriptors) goto fail; if (gadget_is_dualspeed(c->cdev->gadget)) { hs_bulk_in_desc.bEndpointAddress = fs_bulk_in_desc.bEndpointAddress; hs_bulk_out_desc.bEndpointAddress = fs_bulk_out_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(hs_diag_desc); if (!f->hs_descriptors) goto fail; } if (gadget_is_superspeed(c->cdev->gadget)) { ss_bulk_in_desc.bEndpointAddress = fs_bulk_in_desc.bEndpointAddress; ss_bulk_out_desc.bEndpointAddress = fs_bulk_out_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->ss_descriptors = usb_copy_descriptors(ss_diag_desc); if (!f->ss_descriptors) goto fail; } diag_update_pid_and_serial_num(ctxt); return 0; fail: if (f->ss_descriptors) usb_free_descriptors(f->ss_descriptors); if (f->hs_descriptors) usb_free_descriptors(f->hs_descriptors); if (f->descriptors) usb_free_descriptors(f->descriptors); if (ctxt->out) ctxt->out->driver_data = NULL; if (ctxt->in) ctxt->in->driver_data = NULL; return status; } int diag_function_add(struct usb_configuration *c, const char *name, int (*update_pid)(uint32_t, const char *)) { struct diag_context *dev; struct usb_diag_ch *_ch; int found = 0, ret; DBG(c->cdev, "diag_function_add\n"); list_for_each_entry(_ch, &usb_diag_ch_list, list) { if (!strcmp(name, _ch->name)) { found = 1; break; } } if (!found) { ERROR(c->cdev, "unable to get diag usb channel\n"); return -ENODEV; } dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; list_add_tail(&dev->list_item, &diag_dev_list); /* * A few diag devices can point to the same channel, in case that * the diag devices belong to different configurations, however * only the active diag device will claim the channel by setting * the ch->priv_usb (see diag_function_set_alt). */ dev->ch = _ch; dev->update_pid_and_serial_num = update_pid; dev->cdev = c->cdev; dev->function.name = _ch->name; dev->function.descriptors = fs_diag_desc; dev->function.hs_descriptors = hs_diag_desc; dev->function.bind = diag_function_bind; dev->function.unbind = diag_function_unbind; dev->function.set_alt = diag_function_set_alt; dev->function.disable = diag_function_disable; spin_lock_init(&dev->lock); INIT_LIST_HEAD(&dev->read_pool); INIT_LIST_HEAD(&dev->write_pool); ret = usb_add_function(c, &dev->function); if (ret) { INFO(c->cdev, "usb_add_function failed\n"); list_del(&dev->list_item); kfree(dev); } return ret; } #if defined(CONFIG_DEBUG_FS) static char debug_buffer[PAGE_SIZE]; static ssize_t debug_read_stats(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { char *buf = debug_buffer; int temp = 0; struct usb_diag_ch *ch; list_for_each_entry(ch, &usb_diag_ch_list, list) { struct diag_context *ctxt = ch->priv_usb; if (ctxt) temp += scnprintf(buf + temp, PAGE_SIZE - temp, "---Name: %s---\n" "endpoints: %s, %s\n" "dpkts_tolaptop: %lu\n" "dpkts_tomodem: %lu\n" "pkts_tolaptop_pending: %u\n", ch->name, ctxt->in->name, ctxt->out->name, ctxt->dpkts_tolaptop, ctxt->dpkts_tomodem, ctxt->dpkts_tolaptop_pending); } return simple_read_from_buffer(ubuf, count, ppos, buf, temp); } static ssize_t debug_reset_stats(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct usb_diag_ch *ch; list_for_each_entry(ch, &usb_diag_ch_list, list) { struct diag_context *ctxt = ch->priv_usb; if (ctxt) { ctxt->dpkts_tolaptop = 0; ctxt->dpkts_tomodem = 0; ctxt->dpkts_tolaptop_pending = 0; } } return count; } static int debug_open(struct inode *inode, struct file *file) { return 0; } static const struct file_operations debug_fdiag_ops = { .open = debug_open, .read = debug_read_stats, .write = debug_reset_stats, }; struct dentry *dent_diag; static void fdiag_debugfs_init(void) { struct dentry *dent_diag_status; dent_diag = debugfs_create_dir("usb_diag", 0); if (!dent_diag || IS_ERR(dent_diag)) return; dent_diag_status = debugfs_create_file("status", 0444, dent_diag, 0, &debug_fdiag_ops); if (!dent_diag_status || IS_ERR(dent_diag_status)) { debugfs_remove(dent_diag); dent_diag = NULL; return; } } static void fdiag_debugfs_remove(void) { debugfs_remove_recursive(dent_diag); } #else static inline void fdiag_debugfs_init(void) {} static inline void fdiag_debugfs_remove(void) {} #endif static void diag_cleanup(void) { struct list_head *act, *tmp; struct usb_diag_ch *_ch; unsigned long flags; fdiag_debugfs_remove(); list_for_each_safe(act, tmp, &usb_diag_ch_list) { _ch = list_entry(act, struct usb_diag_ch, list); spin_lock_irqsave(&ch_lock, flags); /* Free if diagchar is not using the channel anymore */ if (!_ch->priv) { list_del(&_ch->list); kfree(_ch); } spin_unlock_irqrestore(&ch_lock, flags); } } static int diag_setup(void) { INIT_LIST_HEAD(&diag_dev_list); fdiag_debugfs_init(); return 0; }
gpl-2.0
VincenzoDo/my-kernel
arch/arm/mach-omap2/omap_hwmod_81xx_data.c
349
30435
/* * DM81xx hwmod data. * * Copyright (C) 2010 Texas Instruments, Inc. - http://www.ti.com/ * Copyright (C) 2013 SKTB SKiT, http://www.skitlab.ru/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/platform_data/gpio-omap.h> #include <linux/platform_data/hsmmc-omap.h> #include <linux/platform_data/spi-omap2-mcspi.h> #include <plat/dmtimer.h> #include "omap_hwmod_common_data.h" #include "cm81xx.h" #include "ti81xx.h" #include "wd_timer.h" /* * DM816X hardware modules integration data * * Note: This is incomplete and at present, not generated from h/w database. */ /* * The alwon .clkctrl_offs field is offset from the CM_ALWON, that's * TRM 18.7.17 CM_ALWON device register values minus 0x1400. */ #define DM816X_DM_ALWON_BASE 0x1400 #define DM816X_CM_ALWON_MCASP0_CLKCTRL (0x1540 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_MCASP1_CLKCTRL (0x1544 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_MCASP2_CLKCTRL (0x1548 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_MCBSP_CLKCTRL (0x154c - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_UART_0_CLKCTRL (0x1550 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_UART_1_CLKCTRL (0x1554 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_UART_2_CLKCTRL (0x1558 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_GPIO_0_CLKCTRL (0x155c - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_GPIO_1_CLKCTRL (0x1560 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_I2C_0_CLKCTRL (0x1564 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_I2C_1_CLKCTRL (0x1568 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_1_CLKCTRL (0x1570 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_2_CLKCTRL (0x1574 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_3_CLKCTRL (0x1578 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_4_CLKCTRL (0x157c - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_5_CLKCTRL (0x1580 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_6_CLKCTRL (0x1584 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TIMER_7_CLKCTRL (0x1588 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_WDTIMER_CLKCTRL (0x158c - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_SPI_CLKCTRL (0x1590 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_MAILBOX_CLKCTRL (0x1594 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_SPINBOX_CLKCTRL (0x1598 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_MMUDATA_CLKCTRL (0x159c - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_MMUCFG_CLKCTRL (0x15a8 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_SDIO_CLKCTRL (0x15b0 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_OCMC_0_CLKCTRL (0x15b4 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_OCMC_1_CLKCTRL (0x15b8 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_CONTRL_CLKCTRL (0x15c4 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_GPMC_CLKCTRL (0x15d0 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_ETHERNET_0_CLKCTRL (0x15d4 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_ETHERNET_1_CLKCTRL (0x15d8 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_MPU_CLKCTRL (0x15dc - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_L3_CLKCTRL (0x15e4 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_L4HS_CLKCTRL (0x15e8 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_L4LS_CLKCTRL (0x15ec - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_RTC_CLKCTRL (0x15f0 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TPCC_CLKCTRL (0x15f4 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TPTC0_CLKCTRL (0x15f8 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TPTC1_CLKCTRL (0x15fc - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TPTC2_CLKCTRL (0x1600 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_TPTC3_CLKCTRL (0x1604 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_SR_0_CLKCTRL (0x1608 - DM816X_DM_ALWON_BASE) #define DM816X_CM_ALWON_SR_1_CLKCTRL (0x160c - DM816X_DM_ALWON_BASE) /* * The default .clkctrl_offs field is offset from CM_DEFAULT, that's * TRM 18.7.6 CM_DEFAULT device register values minus 0x500 */ #define DM816X_CM_DEFAULT_OFFSET 0x500 #define DM816X_CM_DEFAULT_USB_CLKCTRL (0x558 - DM816X_CM_DEFAULT_OFFSET) /* L3 Interconnect entries clocked at 125, 250 and 500MHz */ static struct omap_hwmod dm816x_alwon_l3_slow_hwmod = { .name = "alwon_l3_slow", .clkdm_name = "alwon_l3s_clkdm", .class = &l3_hwmod_class, .flags = HWMOD_NO_IDLEST, }; static struct omap_hwmod dm816x_default_l3_slow_hwmod = { .name = "default_l3_slow", .clkdm_name = "default_l3_slow_clkdm", .class = &l3_hwmod_class, .flags = HWMOD_NO_IDLEST, }; static struct omap_hwmod dm816x_alwon_l3_med_hwmod = { .name = "l3_med", .clkdm_name = "alwon_l3_med_clkdm", .class = &l3_hwmod_class, .flags = HWMOD_NO_IDLEST, }; static struct omap_hwmod dm816x_alwon_l3_fast_hwmod = { .name = "l3_fast", .clkdm_name = "alwon_l3_fast_clkdm", .class = &l3_hwmod_class, .flags = HWMOD_NO_IDLEST, }; /* * L4 standard peripherals, see TRM table 1-12 for devices using this. * See TRM table 1-73 for devices using the 125MHz SYSCLK6 clock. */ static struct omap_hwmod dm816x_l4_ls_hwmod = { .name = "l4_ls", .clkdm_name = "alwon_l3s_clkdm", .class = &l4_hwmod_class, }; /* * L4 high-speed peripherals. For devices using this, please see the TRM * table 1-13. On dm816x, only EMAC, MDIO and SATA use this. See also TRM * table 1-73 for devices using 250MHz SYSCLK5 clock. */ static struct omap_hwmod dm816x_l4_hs_hwmod = { .name = "l4_hs", .clkdm_name = "alwon_l3_med_clkdm", .class = &l4_hwmod_class, }; /* L3 slow -> L4 ls peripheral interface running at 125MHz */ static struct omap_hwmod_ocp_if dm816x_alwon_l3_slow__l4_ls = { .master = &dm816x_alwon_l3_slow_hwmod, .slave = &dm816x_l4_ls_hwmod, .user = OCP_USER_MPU, }; /* L3 med -> L4 fast peripheral interface running at 250MHz */ static struct omap_hwmod_ocp_if dm816x_alwon_l3_slow__l4_hs = { .master = &dm816x_alwon_l3_med_hwmod, .slave = &dm816x_l4_hs_hwmod, .user = OCP_USER_MPU, }; /* MPU */ static struct omap_hwmod dm816x_mpu_hwmod = { .name = "mpu", .clkdm_name = "alwon_mpu_clkdm", .class = &mpu_hwmod_class, .flags = HWMOD_INIT_NO_IDLE, .main_clk = "mpu_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_MPU_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; static struct omap_hwmod_ocp_if dm816x_mpu__alwon_l3_slow = { .master = &dm816x_mpu_hwmod, .slave = &dm816x_alwon_l3_slow_hwmod, .user = OCP_USER_MPU, }; /* L3 med peripheral interface running at 250MHz */ static struct omap_hwmod_ocp_if dm816x_mpu__alwon_l3_med = { .master = &dm816x_mpu_hwmod, .slave = &dm816x_alwon_l3_med_hwmod, .user = OCP_USER_MPU, }; /* UART common */ static struct omap_hwmod_class_sysconfig uart_sysc = { .rev_offs = 0x50, .sysc_offs = 0x54, .syss_offs = 0x58, .sysc_flags = SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | MSTANDBY_SMART_WKUP, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class uart_class = { .name = "uart", .sysc = &uart_sysc, }; static struct omap_hwmod dm816x_uart1_hwmod = { .name = "uart1", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_UART_0_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &uart_class, .flags = DEBUG_TI81XXUART1_FLAGS, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__uart1 = { .master = &dm816x_l4_ls_hwmod, .slave = &dm816x_uart1_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_uart2_hwmod = { .name = "uart2", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_UART_1_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &uart_class, .flags = DEBUG_TI81XXUART2_FLAGS, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__uart2 = { .master = &dm816x_l4_ls_hwmod, .slave = &dm816x_uart2_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_uart3_hwmod = { .name = "uart3", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_UART_2_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &uart_class, .flags = DEBUG_TI81XXUART3_FLAGS, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__uart3 = { .master = &dm816x_l4_ls_hwmod, .slave = &dm816x_uart3_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_class_sysconfig wd_timer_sysc = { .rev_offs = 0x0, .sysc_offs = 0x10, .syss_offs = 0x14, .sysc_flags = SYSC_HAS_EMUFREE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class wd_timer_class = { .name = "wd_timer", .sysc = &wd_timer_sysc, .pre_shutdown = &omap2_wd_timer_disable, .reset = &omap2_wd_timer_reset, }; static struct omap_hwmod dm816x_wd_timer_hwmod = { .name = "wd_timer", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk18_ck", .flags = HWMOD_NO_IDLEST, .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_WDTIMER_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &wd_timer_class, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__wd_timer1 = { .master = &dm816x_l4_ls_hwmod, .slave = &dm816x_wd_timer_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; /* I2C common */ static struct omap_hwmod_class_sysconfig i2c_sysc = { .rev_offs = 0x0, .sysc_offs = 0x10, .syss_offs = 0x90, .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class i2c_class = { .name = "i2c", .sysc = &i2c_sysc, }; static struct omap_hwmod dm81xx_i2c1_hwmod = { .name = "i2c1", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_I2C_0_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &i2c_class, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__i2c1 = { .master = &dm816x_l4_ls_hwmod, .slave = &dm81xx_i2c1_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_i2c2_hwmod = { .name = "i2c2", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_I2C_1_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &i2c_class, }; static struct omap_hwmod_class_sysconfig dm81xx_elm_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__i2c2 = { .master = &dm816x_l4_ls_hwmod, .slave = &dm816x_i2c2_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_class dm81xx_elm_hwmod_class = { .name = "elm", .sysc = &dm81xx_elm_sysc, }; static struct omap_hwmod dm81xx_elm_hwmod = { .name = "elm", .clkdm_name = "alwon_l3s_clkdm", .class = &dm81xx_elm_hwmod_class, .main_clk = "sysclk6_ck", }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__elm = { .master = &dm816x_l4_ls_hwmod, .slave = &dm81xx_elm_hwmod, .user = OCP_USER_MPU, }; static struct omap_hwmod_class_sysconfig dm81xx_gpio_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0114, .sysc_flags = SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class dm81xx_gpio_hwmod_class = { .name = "gpio", .sysc = &dm81xx_gpio_sysc, .rev = 2, }; static struct omap_gpio_dev_attr gpio_dev_attr = { .bank_width = 32, .dbck_flag = true, }; static struct omap_hwmod_opt_clk gpio1_opt_clks[] = { { .role = "dbclk", .clk = "sysclk18_ck" }, }; static struct omap_hwmod dm81xx_gpio1_hwmod = { .name = "gpio1", .clkdm_name = "alwon_l3s_clkdm", .class = &dm81xx_gpio_hwmod_class, .main_clk = "sysclk6_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_GPIO_0_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .opt_clks = gpio1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio1_opt_clks), .dev_attr = &gpio_dev_attr, }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__gpio1 = { .master = &dm816x_l4_ls_hwmod, .slave = &dm81xx_gpio1_hwmod, .user = OCP_USER_MPU, }; static struct omap_hwmod_opt_clk gpio2_opt_clks[] = { { .role = "dbclk", .clk = "sysclk18_ck" }, }; static struct omap_hwmod dm81xx_gpio2_hwmod = { .name = "gpio2", .clkdm_name = "alwon_l3s_clkdm", .class = &dm81xx_gpio_hwmod_class, .main_clk = "sysclk6_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_GPIO_1_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .opt_clks = gpio2_opt_clks, .opt_clks_cnt = ARRAY_SIZE(gpio2_opt_clks), .dev_attr = &gpio_dev_attr, }; static struct omap_hwmod_ocp_if dm81xx_l4_ls__gpio2 = { .master = &dm816x_l4_ls_hwmod, .slave = &dm81xx_gpio2_hwmod, .user = OCP_USER_MPU, }; static struct omap_hwmod_class_sysconfig dm81xx_gpmc_sysc = { .rev_offs = 0x0, .sysc_offs = 0x10, .syss_offs = 0x14, .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class dm81xx_gpmc_hwmod_class = { .name = "gpmc", .sysc = &dm81xx_gpmc_sysc, }; static struct omap_hwmod dm81xx_gpmc_hwmod = { .name = "gpmc", .clkdm_name = "alwon_l3s_clkdm", .class = &dm81xx_gpmc_hwmod_class, .main_clk = "sysclk6_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_GPMC_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; struct omap_hwmod_ocp_if dm81xx_alwon_l3_slow__gpmc = { .master = &dm816x_alwon_l3_slow_hwmod, .slave = &dm81xx_gpmc_hwmod, .user = OCP_USER_MPU, }; static struct omap_hwmod_class_sysconfig dm81xx_usbhsotg_sysc = { .rev_offs = 0x0, .sysc_offs = 0x10, .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE | SYSC_HAS_SOFTRESET, .idlemodes = SIDLE_SMART | MSTANDBY_FORCE | MSTANDBY_SMART, .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class dm81xx_usbotg_class = { .name = "usbotg", .sysc = &dm81xx_usbhsotg_sysc, }; static struct omap_hwmod dm81xx_usbss_hwmod = { .name = "usb_otg_hs", .clkdm_name = "default_l3_slow_clkdm", .main_clk = "sysclk6_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_DEFAULT_USB_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &dm81xx_usbotg_class, }; static struct omap_hwmod_ocp_if dm81xx_default_l3_slow__usbss = { .master = &dm816x_default_l3_slow_hwmod, .slave = &dm81xx_usbss_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_class_sysconfig dm816x_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP, .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class dm816x_timer_hwmod_class = { .name = "timer", .sysc = &dm816x_timer_sysc, }; static struct omap_timer_capability_dev_attr capability_alwon_dev_attr = { .timer_capability = OMAP_TIMER_ALWON, }; static struct omap_hwmod dm816x_timer1_hwmod = { .name = "timer1", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "timer1_fck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_TIMER_1_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .class = &dm816x_timer_hwmod_class, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer1 = { .master = &dm816x_l4_ls_hwmod, .slave = &dm816x_timer1_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_timer2_hwmod = { .name = "timer2", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "timer2_fck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_TIMER_2_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .class = &dm816x_timer_hwmod_class, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer2 = { .master = &dm816x_l4_ls_hwmod, .slave = &dm816x_timer2_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_timer3_hwmod = { .name = "timer3", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "timer3_fck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_TIMER_3_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .class = &dm816x_timer_hwmod_class, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer3 = { .master = &dm816x_l4_ls_hwmod, .slave = &dm816x_timer3_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_timer4_hwmod = { .name = "timer4", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "timer4_fck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_TIMER_4_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .class = &dm816x_timer_hwmod_class, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer4 = { .master = &dm816x_l4_ls_hwmod, .slave = &dm816x_timer4_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_timer5_hwmod = { .name = "timer5", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "timer5_fck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_TIMER_5_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .class = &dm816x_timer_hwmod_class, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer5 = { .master = &dm816x_l4_ls_hwmod, .slave = &dm816x_timer5_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_timer6_hwmod = { .name = "timer6", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "timer6_fck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_TIMER_6_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .class = &dm816x_timer_hwmod_class, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer6 = { .master = &dm816x_l4_ls_hwmod, .slave = &dm816x_timer6_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_timer7_hwmod = { .name = "timer7", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "timer7_fck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_TIMER_7_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &capability_alwon_dev_attr, .class = &dm816x_timer_hwmod_class, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__timer7 = { .master = &dm816x_l4_ls_hwmod, .slave = &dm816x_timer7_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; /* EMAC Ethernet */ static struct omap_hwmod_class_sysconfig dm816x_emac_sysc = { .rev_offs = 0x0, .sysc_offs = 0x4, .sysc_flags = SYSC_HAS_SOFTRESET, .sysc_fields = &omap_hwmod_sysc_type2, }; static struct omap_hwmod_class dm816x_emac_hwmod_class = { .name = "emac", .sysc = &dm816x_emac_sysc, }; /* * On dm816x the MDIO is within EMAC0. As the MDIO driver is a separate * driver probed before EMAC0, we let MDIO do the clock idling. */ static struct omap_hwmod dm816x_emac0_hwmod = { .name = "emac0", .clkdm_name = "alwon_ethernet_clkdm", .class = &dm816x_emac_hwmod_class, }; static struct omap_hwmod_ocp_if dm816x_l4_hs__emac0 = { .master = &dm816x_l4_hs_hwmod, .slave = &dm816x_emac0_hwmod, .clk = "sysclk5_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_class dm816x_mdio_hwmod_class = { .name = "davinci_mdio", .sysc = &dm816x_emac_sysc, }; struct omap_hwmod dm816x_emac0_mdio_hwmod = { .name = "davinci_mdio", .class = &dm816x_mdio_hwmod_class, .clkdm_name = "alwon_ethernet_clkdm", .main_clk = "sysclk24_ck", .flags = HWMOD_NO_IDLEST, /* * REVISIT: This should be moved to the emac0_hwmod * once we have a better way to handle device slaves. */ .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_ETHERNET_0_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; struct omap_hwmod_ocp_if dm816x_emac0__mdio = { .master = &dm816x_l4_hs_hwmod, .slave = &dm816x_emac0_mdio_hwmod, .user = OCP_USER_MPU, }; static struct omap_hwmod dm816x_emac1_hwmod = { .name = "emac1", .clkdm_name = "alwon_ethernet_clkdm", .main_clk = "sysclk24_ck", .flags = HWMOD_NO_IDLEST, .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_ETHERNET_1_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &dm816x_emac_hwmod_class, }; static struct omap_hwmod_ocp_if dm816x_l4_hs__emac1 = { .master = &dm816x_l4_hs_hwmod, .slave = &dm816x_emac1_hwmod, .clk = "sysclk5_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_class_sysconfig dm816x_mmc_sysc = { .rev_offs = 0x0, .sysc_offs = 0x110, .syss_offs = 0x114, .sysc_flags = SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class dm816x_mmc_class = { .name = "mmc", .sysc = &dm816x_mmc_sysc, }; static struct omap_hwmod_opt_clk dm816x_mmc1_opt_clks[] = { { .role = "dbck", .clk = "sysclk18_ck", }, }; static struct omap_hsmmc_dev_attr mmc1_dev_attr = { .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT, }; static struct omap_hwmod dm816x_mmc1_hwmod = { .name = "mmc1", .clkdm_name = "alwon_l3s_clkdm", .opt_clks = dm816x_mmc1_opt_clks, .opt_clks_cnt = ARRAY_SIZE(dm816x_mmc1_opt_clks), .main_clk = "sysclk10_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_SDIO_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .dev_attr = &mmc1_dev_attr, .class = &dm816x_mmc_class, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__mmc1 = { .master = &dm816x_l4_ls_hwmod, .slave = &dm816x_mmc1_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, .flags = OMAP_FIREWALL_L4 }; static struct omap_hwmod_class_sysconfig dm816x_mcspi_sysc = { .rev_offs = 0x0, .sysc_offs = 0x110, .syss_offs = 0x114, .sysc_flags = SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class dm816x_mcspi_class = { .name = "mcspi", .sysc = &dm816x_mcspi_sysc, .rev = OMAP3_MCSPI_REV, }; static struct omap2_mcspi_dev_attr dm816x_mcspi1_dev_attr = { .num_chipselect = 4, }; static struct omap_hwmod dm816x_mcspi1_hwmod = { .name = "mcspi1", .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk10_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_SPI_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, .class = &dm816x_mcspi_class, .dev_attr = &dm816x_mcspi1_dev_attr, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__mcspi1 = { .master = &dm816x_l4_ls_hwmod, .slave = &dm816x_mcspi1_hwmod, .clk = "sysclk6_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_class_sysconfig dm816x_mailbox_sysc = { .rev_offs = 0x000, .sysc_offs = 0x010, .syss_offs = 0x014, .sysc_flags = SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE, .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART, .sysc_fields = &omap_hwmod_sysc_type1, }; static struct omap_hwmod_class dm816x_mailbox_hwmod_class = { .name = "mailbox", .sysc = &dm816x_mailbox_sysc, }; static struct omap_hwmod dm816x_mailbox_hwmod = { .name = "mailbox", .clkdm_name = "alwon_l3s_clkdm", .class = &dm816x_mailbox_hwmod_class, .main_clk = "sysclk6_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_MAILBOX_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; static struct omap_hwmod_ocp_if dm816x_l4_ls__mailbox = { .master = &dm816x_l4_ls_hwmod, .slave = &dm816x_mailbox_hwmod, .user = OCP_USER_MPU, }; static struct omap_hwmod_class dm816x_tpcc_hwmod_class = { .name = "tpcc", }; struct omap_hwmod dm816x_tpcc_hwmod = { .name = "tpcc", .class = &dm816x_tpcc_hwmod_class, .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk4_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_TPCC_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; struct omap_hwmod_ocp_if dm816x_alwon_l3_fast__tpcc = { .master = &dm816x_alwon_l3_fast_hwmod, .slave = &dm816x_tpcc_hwmod, .clk = "sysclk4_ck", .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space dm816x_tptc0_addr_space[] = { { .pa_start = 0x49800000, .pa_end = 0x49800000 + SZ_8K - 1, .flags = ADDR_TYPE_RT, }, { }, }; static struct omap_hwmod_class dm816x_tptc0_hwmod_class = { .name = "tptc0", }; struct omap_hwmod dm816x_tptc0_hwmod = { .name = "tptc0", .class = &dm816x_tptc0_hwmod_class, .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk4_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_TPTC0_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; struct omap_hwmod_ocp_if dm816x_alwon_l3_fast__tptc0 = { .master = &dm816x_alwon_l3_fast_hwmod, .slave = &dm816x_tptc0_hwmod, .clk = "sysclk4_ck", .addr = dm816x_tptc0_addr_space, .user = OCP_USER_MPU, }; struct omap_hwmod_ocp_if dm816x_tptc0__alwon_l3_fast = { .master = &dm816x_tptc0_hwmod, .slave = &dm816x_alwon_l3_fast_hwmod, .clk = "sysclk4_ck", .addr = dm816x_tptc0_addr_space, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space dm816x_tptc1_addr_space[] = { { .pa_start = 0x49900000, .pa_end = 0x49900000 + SZ_8K - 1, .flags = ADDR_TYPE_RT, }, { }, }; static struct omap_hwmod_class dm816x_tptc1_hwmod_class = { .name = "tptc1", }; struct omap_hwmod dm816x_tptc1_hwmod = { .name = "tptc1", .class = &dm816x_tptc1_hwmod_class, .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk4_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_TPTC1_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; struct omap_hwmod_ocp_if dm816x_alwon_l3_fast__tptc1 = { .master = &dm816x_alwon_l3_fast_hwmod, .slave = &dm816x_tptc1_hwmod, .clk = "sysclk4_ck", .addr = dm816x_tptc1_addr_space, .user = OCP_USER_MPU, }; struct omap_hwmod_ocp_if dm816x_tptc1__alwon_l3_fast = { .master = &dm816x_tptc1_hwmod, .slave = &dm816x_alwon_l3_fast_hwmod, .clk = "sysclk4_ck", .addr = dm816x_tptc1_addr_space, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space dm816x_tptc2_addr_space[] = { { .pa_start = 0x49a00000, .pa_end = 0x49a00000 + SZ_8K - 1, .flags = ADDR_TYPE_RT, }, { }, }; static struct omap_hwmod_class dm816x_tptc2_hwmod_class = { .name = "tptc2", }; struct omap_hwmod dm816x_tptc2_hwmod = { .name = "tptc2", .class = &dm816x_tptc2_hwmod_class, .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk4_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_TPTC2_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; struct omap_hwmod_ocp_if dm816x_alwon_l3_fast__tptc2 = { .master = &dm816x_alwon_l3_fast_hwmod, .slave = &dm816x_tptc2_hwmod, .clk = "sysclk4_ck", .addr = dm816x_tptc2_addr_space, .user = OCP_USER_MPU, }; struct omap_hwmod_ocp_if dm816x_tptc2__alwon_l3_fast = { .master = &dm816x_tptc2_hwmod, .slave = &dm816x_alwon_l3_fast_hwmod, .clk = "sysclk4_ck", .addr = dm816x_tptc2_addr_space, .user = OCP_USER_MPU, }; static struct omap_hwmod_addr_space dm816x_tptc3_addr_space[] = { { .pa_start = 0x49b00000, .pa_end = 0x49b00000 + SZ_8K - 1, .flags = ADDR_TYPE_RT, }, { }, }; static struct omap_hwmod_class dm816x_tptc3_hwmod_class = { .name = "tptc3", }; struct omap_hwmod dm816x_tptc3_hwmod = { .name = "tptc3", .class = &dm816x_tptc3_hwmod_class, .clkdm_name = "alwon_l3s_clkdm", .main_clk = "sysclk4_ck", .prcm = { .omap4 = { .clkctrl_offs = DM816X_CM_ALWON_TPTC3_CLKCTRL, .modulemode = MODULEMODE_SWCTRL, }, }, }; struct omap_hwmod_ocp_if dm816x_alwon_l3_fast__tptc3 = { .master = &dm816x_alwon_l3_fast_hwmod, .slave = &dm816x_tptc3_hwmod, .clk = "sysclk4_ck", .addr = dm816x_tptc3_addr_space, .user = OCP_USER_MPU, }; struct omap_hwmod_ocp_if dm816x_tptc3__alwon_l3_fast = { .master = &dm816x_tptc3_hwmod, .slave = &dm816x_alwon_l3_fast_hwmod, .clk = "sysclk4_ck", .addr = dm816x_tptc3_addr_space, .user = OCP_USER_MPU, }; static struct omap_hwmod_ocp_if *dm816x_hwmod_ocp_ifs[] __initdata = { &dm816x_mpu__alwon_l3_slow, &dm816x_mpu__alwon_l3_med, &dm816x_alwon_l3_slow__l4_ls, &dm816x_alwon_l3_slow__l4_hs, &dm816x_l4_ls__uart1, &dm816x_l4_ls__uart2, &dm816x_l4_ls__uart3, &dm816x_l4_ls__wd_timer1, &dm816x_l4_ls__i2c1, &dm816x_l4_ls__i2c2, &dm81xx_l4_ls__gpio1, &dm81xx_l4_ls__gpio2, &dm81xx_l4_ls__elm, &dm816x_l4_ls__mmc1, &dm816x_l4_ls__timer1, &dm816x_l4_ls__timer2, &dm816x_l4_ls__timer3, &dm816x_l4_ls__timer4, &dm816x_l4_ls__timer5, &dm816x_l4_ls__timer6, &dm816x_l4_ls__timer7, &dm816x_l4_ls__mcspi1, &dm816x_l4_ls__mailbox, &dm816x_l4_hs__emac0, &dm816x_emac0__mdio, &dm816x_l4_hs__emac1, &dm816x_alwon_l3_fast__tpcc, &dm816x_alwon_l3_fast__tptc0, &dm816x_alwon_l3_fast__tptc1, &dm816x_alwon_l3_fast__tptc2, &dm816x_alwon_l3_fast__tptc3, &dm816x_tptc0__alwon_l3_fast, &dm816x_tptc1__alwon_l3_fast, &dm816x_tptc2__alwon_l3_fast, &dm816x_tptc3__alwon_l3_fast, &dm81xx_alwon_l3_slow__gpmc, &dm81xx_default_l3_slow__usbss, NULL, }; int __init ti81xx_hwmod_init(void) { omap_hwmod_init(); return omap_hwmod_register_links(dm816x_hwmod_ocp_ifs); }
gpl-2.0
KlinkOnE/android_kernel_kyleoc2
net/ipv4/ip_fragment.c
605
20364
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * The IP fragmentation functionality. * * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> * Alan Cox <alan@lxorguk.ukuu.org.uk> * * Fixes: * Alan Cox : Split from ip.c , see ip_input.c for history. * David S. Miller : Begin massive cleanup... * Andi Kleen : Add sysctls. * xxxx : Overlapfrag bug. * Ultima : ip_expire() kernel panic. * Bill Hawes : Frag accounting and evictor fixes. * John McDonald : 0 length frag bug. * Alexey Kuznetsov: SMP races, threading, cleanup. * Patrick McHardy : LRU queue of frag heads for evictor. */ #define pr_fmt(fmt) "IPv4: " fmt #include <linux/compiler.h> #include <linux/module.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/jiffies.h> #include <linux/skbuff.h> #include <linux/list.h> #include <linux/ip.h> #include <linux/icmp.h> #include <linux/netdevice.h> #include <linux/jhash.h> #include <linux/random.h> #include <linux/slab.h> #include <net/route.h> #include <net/dst.h> #include <net/sock.h> #include <net/ip.h> #include <net/icmp.h> #include <net/checksum.h> #include <net/inetpeer.h> #include <net/inet_frag.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/inet.h> #include <linux/netfilter_ipv4.h> #include <net/inet_ecn.h> /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c * as well. Or notify me, at least. --ANK */ static int sysctl_ipfrag_max_dist __read_mostly = 64; struct ipfrag_skb_cb { struct inet_skb_parm h; int offset; }; #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) /* Describe an entry in the "incomplete datagrams" queue. */ struct ipq { struct inet_frag_queue q; u32 user; __be32 saddr; __be32 daddr; __be16 id; u8 protocol; u8 ecn; /* RFC3168 support */ int iif; unsigned int rid; struct inet_peer *peer; }; /* RFC 3168 support : * We want to check ECN values of all fragments, do detect invalid combinations. * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value. */ #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */ #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */ #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */ #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */ static inline u8 ip4_frag_ecn(u8 tos) { return 1 << (tos & INET_ECN_MASK); } /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements * Value : 0xff if frame should be dropped. * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field */ static const u8 ip4_frag_ecn_table[16] = { /* at least one fragment had CE, and others ECT_0 or ECT_1 */ [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE, [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE, [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE, /* invalid combinations : drop frame */ [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff, [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff, [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff, [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff, [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff, [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff, [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff, }; static struct inet_frags ip4_frags; int ip_frag_nqueues(struct net *net) { return net->ipv4.frags.nqueues; } int ip_frag_mem(struct net *net) { return atomic_read(&net->ipv4.frags.mem); } static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, struct net_device *dev); struct ip4_create_arg { struct iphdr *iph; u32 user; }; static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) { return jhash_3words((__force u32)id << 16 | prot, (__force u32)saddr, (__force u32)daddr, ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1); } static unsigned int ip4_hashfn(struct inet_frag_queue *q) { struct ipq *ipq; ipq = container_of(q, struct ipq, q); return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); } static int ip4_frag_match(struct inet_frag_queue *q, void *a) { struct ipq *qp; struct ip4_create_arg *arg = a; qp = container_of(q, struct ipq, q); return qp->id == arg->iph->id && qp->saddr == arg->iph->saddr && qp->daddr == arg->iph->daddr && qp->protocol == arg->iph->protocol && qp->user == arg->user; } /* Memory Tracking Functions. */ static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb) { atomic_sub(skb->truesize, &nf->mem); kfree_skb(skb); } static void ip4_frag_init(struct inet_frag_queue *q, void *a) { struct ipq *qp = container_of(q, struct ipq, q); struct ip4_create_arg *arg = a; qp->protocol = arg->iph->protocol; qp->id = arg->iph->id; qp->ecn = ip4_frag_ecn(arg->iph->tos); qp->saddr = arg->iph->saddr; qp->daddr = arg->iph->daddr; qp->user = arg->user; qp->peer = sysctl_ipfrag_max_dist ? inet_getpeer_v4(arg->iph->saddr, 1) : NULL; } static __inline__ void ip4_frag_free(struct inet_frag_queue *q) { struct ipq *qp; qp = container_of(q, struct ipq, q); if (qp->peer) inet_putpeer(qp->peer); } /* Destruction primitives. */ static __inline__ void ipq_put(struct ipq *ipq) { inet_frag_put(&ipq->q, &ip4_frags); } /* Kill ipq entry. It is not destroyed immediately, * because caller (and someone more) holds reference count. */ static void ipq_kill(struct ipq *ipq) { inet_frag_kill(&ipq->q, &ip4_frags); } /* Memory limiting on fragments. Evictor trashes the oldest * fragment queue until we are back under the threshold. */ static void ip_evictor(struct net *net) { int evicted; evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags); if (evicted) IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted); } /* * Oops, a fragment queue timed out. Kill it and send an ICMP reply. */ static void ip_expire(unsigned long arg) { struct ipq *qp; struct net *net; qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); net = container_of(qp->q.net, struct net, ipv4.frags); spin_lock(&qp->q.lock); if (qp->q.last_in & INET_FRAG_COMPLETE) goto out; ipq_kill(qp); IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT); IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { struct sk_buff *head = qp->q.fragments; const struct iphdr *iph; int err; rcu_read_lock(); head->dev = dev_get_by_index_rcu(net, qp->iif); if (!head->dev) goto out_rcu_unlock; /* skb has no dst, perform route lookup again */ iph = ip_hdr(head); err = ip_route_input_noref(head, iph->daddr, iph->saddr, iph->tos, head->dev); if (err) goto out_rcu_unlock; /* * Only an end host needs to send an ICMP * "Fragment Reassembly Timeout" message, per RFC792. */ if (qp->user == IP_DEFRAG_CONNTRACK_IN && skb_rtable(head)->rt_type != RTN_LOCAL) goto out_rcu_unlock; /* Send an ICMP "Fragment Reassembly Timeout" message. */ icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); out_rcu_unlock: rcu_read_unlock(); } out: spin_unlock(&qp->q.lock); ipq_put(qp); } /* Find the correct entry in the "incomplete datagrams" queue for * this IP datagram, and create new one, if nothing is found. */ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) { struct inet_frag_queue *q; struct ip4_create_arg arg; unsigned int hash; arg.iph = iph; arg.user = user; read_lock(&ip4_frags.lock); hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); if (IS_ERR_OR_NULL(q)) { inet_frag_maybe_warn_overflow(q, pr_fmt()); return NULL; } return container_of(q, struct ipq, q); } /* Is the fragment too far ahead to be part of ipq? */ static inline int ip_frag_too_far(struct ipq *qp) { struct inet_peer *peer = qp->peer; unsigned int max = sysctl_ipfrag_max_dist; unsigned int start, end; int rc; if (!peer || !max) return 0; start = qp->rid; end = atomic_inc_return(&peer->rid); qp->rid = end; rc = qp->q.fragments && (end - start) > max; if (rc) { struct net *net; net = container_of(qp->q.net, struct net, ipv4.frags); IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); } return rc; } static int ip_frag_reinit(struct ipq *qp) { struct sk_buff *fp; if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { atomic_inc(&qp->q.refcnt); return -ETIMEDOUT; } fp = qp->q.fragments; do { struct sk_buff *xp = fp->next; frag_kfree_skb(qp->q.net, fp); fp = xp; } while (fp); qp->q.last_in = 0; qp->q.len = 0; qp->q.meat = 0; qp->q.fragments = NULL; qp->q.fragments_tail = NULL; qp->iif = 0; qp->ecn = 0; return 0; } /* Add new segment to existing queue. */ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) { struct sk_buff *prev, *next; struct net_device *dev; int flags, offset; int ihl, end; int err = -ENOENT; u8 ecn; if (qp->q.last_in & INET_FRAG_COMPLETE) goto err; if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && unlikely(ip_frag_too_far(qp)) && unlikely(err = ip_frag_reinit(qp))) { ipq_kill(qp); goto err; } ecn = ip4_frag_ecn(ip_hdr(skb)->tos); offset = ntohs(ip_hdr(skb)->frag_off); flags = offset & ~IP_OFFSET; offset &= IP_OFFSET; offset <<= 3; /* offset is in 8-byte chunks */ ihl = ip_hdrlen(skb); /* Determine the position of this fragment. */ end = offset + skb->len - ihl; err = -EINVAL; /* Is this the final fragment? */ if ((flags & IP_MF) == 0) { /* If we already have some bits beyond end * or have different end, the segment is corrrupted. */ if (end < qp->q.len || ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len)) goto err; qp->q.last_in |= INET_FRAG_LAST_IN; qp->q.len = end; } else { if (end&7) { end &= ~7; if (skb->ip_summed != CHECKSUM_UNNECESSARY) skb->ip_summed = CHECKSUM_NONE; } if (end > qp->q.len) { /* Some bits beyond end -> corruption. */ if (qp->q.last_in & INET_FRAG_LAST_IN) goto err; qp->q.len = end; } } if (end == offset) goto err; err = -ENOMEM; if (pskb_pull(skb, ihl) == NULL) goto err; err = pskb_trim_rcsum(skb, end - offset); if (err) goto err; /* Find out which fragments are in front and at the back of us * in the chain of fragments so far. We must know where to put * this fragment, right? */ prev = qp->q.fragments_tail; if (!prev || FRAG_CB(prev)->offset < offset) { next = NULL; goto found; } prev = NULL; for (next = qp->q.fragments; next != NULL; next = next->next) { if (FRAG_CB(next)->offset >= offset) break; /* bingo! */ prev = next; } found: /* We found where to put this one. Check for overlap with * preceding fragment, and, if needed, align things so that * any overlaps are eliminated. */ if (prev) { int i = (FRAG_CB(prev)->offset + prev->len) - offset; if (i > 0) { offset += i; err = -EINVAL; if (end <= offset) goto err; err = -ENOMEM; if (!pskb_pull(skb, i)) goto err; if (skb->ip_summed != CHECKSUM_UNNECESSARY) skb->ip_summed = CHECKSUM_NONE; } } err = -ENOMEM; while (next && FRAG_CB(next)->offset < end) { int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ if (i < next->len) { /* Eat head of the next overlapped fragment * and leave the loop. The next ones cannot overlap. */ if (!pskb_pull(next, i)) goto err; FRAG_CB(next)->offset += i; qp->q.meat -= i; if (next->ip_summed != CHECKSUM_UNNECESSARY) next->ip_summed = CHECKSUM_NONE; break; } else { struct sk_buff *free_it = next; /* Old fragment is completely overridden with * new one drop it. */ next = next->next; if (prev) prev->next = next; else qp->q.fragments = next; qp->q.meat -= free_it->len; frag_kfree_skb(qp->q.net, free_it); } } FRAG_CB(skb)->offset = offset; /* Insert this fragment in the chain of fragments. */ skb->next = next; if (!next) qp->q.fragments_tail = skb; if (prev) prev->next = skb; else qp->q.fragments = skb; dev = skb->dev; if (dev) { qp->iif = dev->ifindex; skb->dev = NULL; } qp->q.stamp = skb->tstamp; qp->q.meat += skb->len; qp->ecn |= ecn; atomic_add(skb->truesize, &qp->q.net->mem); if (offset == 0) qp->q.last_in |= INET_FRAG_FIRST_IN; if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && qp->q.meat == qp->q.len) { unsigned long orefdst = skb->_skb_refdst; skb->_skb_refdst = 0UL; err = ip_frag_reasm(qp, prev, dev); skb->_skb_refdst = orefdst; return err; } skb_dst_drop(skb); write_lock(&ip4_frags.lock); list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list); write_unlock(&ip4_frags.lock); return -EINPROGRESS; err: kfree_skb(skb); return err; } /* Build a new IP datagram from all its fragments. */ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, struct net_device *dev) { struct net *net = container_of(qp->q.net, struct net, ipv4.frags); struct iphdr *iph; struct sk_buff *fp, *head = qp->q.fragments; int len; int ihlen; int err; u8 ecn; ipq_kill(qp); ecn = ip4_frag_ecn_table[qp->ecn]; if (unlikely(ecn == 0xff)) { err = -EINVAL; goto out_fail; } /* Make the one we just received the head. */ if (prev) { head = prev->next; fp = skb_clone(head, GFP_ATOMIC); if (!fp) goto out_nomem; fp->next = head->next; if (!fp->next) qp->q.fragments_tail = fp; prev->next = fp; skb_morph(head, qp->q.fragments); head->next = qp->q.fragments->next; kfree_skb(qp->q.fragments); qp->q.fragments = head; } WARN_ON(head == NULL); WARN_ON(FRAG_CB(head)->offset != 0); /* Allocate a new buffer for the datagram. */ ihlen = ip_hdrlen(head); len = ihlen + qp->q.len; err = -E2BIG; if (len > 65535) goto out_oversize; /* Head of list must not be cloned. */ if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) goto out_nomem; /* If the first fragment is fragmented itself, we split * it to two chunks: the first with data and paged part * and the second, holding only fragments. */ if (skb_has_frag_list(head)) { struct sk_buff *clone; int i, plen = 0; if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) goto out_nomem; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i=0; i<skb_shinfo(head)->nr_frags; i++) plen += skb_shinfo(head)->frags[i].size; clone->len = clone->data_len = head->data_len - plen; head->data_len -= clone->len; head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; atomic_add(clone->truesize, &qp->q.net->mem); } skb_shinfo(head)->frag_list = head->next; skb_push(head, head->data - skb_network_header(head)); for (fp=head->next; fp; fp = fp->next) { head->data_len += fp->len; head->len += fp->len; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; } atomic_sub(head->truesize, &qp->q.net->mem); head->next = NULL; head->dev = dev; head->tstamp = qp->q.stamp; iph = ip_hdr(head); iph->frag_off = 0; iph->tot_len = htons(len); iph->tos |= ecn; IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); qp->q.fragments = NULL; qp->q.fragments_tail = NULL; return 0; out_nomem: LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing " "queue %p\n", qp); err = -ENOMEM; goto out_fail; out_oversize: if (net_ratelimit()) printk(KERN_INFO "Oversized IP packet from %pI4.\n", &qp->saddr); out_fail: IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); return err; } /* Process an incoming IP datagram fragment. */ int ip_defrag(struct sk_buff *skb, u32 user) { struct ipq *qp; struct net *net; net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev); IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); /* Start by cleaning up the memory. */ if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) ip_evictor(net); /* Lookup (or create) queue header */ if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) { int ret; spin_lock(&qp->q.lock); ret = ip_frag_queue(qp, skb); spin_unlock(&qp->q.lock); ipq_put(qp); return ret; } IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -ENOMEM; } EXPORT_SYMBOL(ip_defrag); #ifdef CONFIG_SYSCTL static int zero; static struct ctl_table ip4_frags_ns_ctl_table[] = { { .procname = "ipfrag_high_thresh", .data = &init_net.ipv4.frags.high_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "ipfrag_low_thresh", .data = &init_net.ipv4.frags.low_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "ipfrag_time", .data = &init_net.ipv4.frags.timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { } }; static struct ctl_table ip4_frags_ctl_table[] = { { .procname = "ipfrag_secret_interval", .data = &ip4_frags.secret_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "ipfrag_max_dist", .data = &sysctl_ipfrag_max_dist, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero }, { } }; static int __net_init ip4_frags_ns_ctl_register(struct net *net) { struct ctl_table *table; struct ctl_table_header *hdr; table = ip4_frags_ns_ctl_table; if (!net_eq(net, &init_net)) { table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); if (table == NULL) goto err_alloc; table[0].data = &net->ipv4.frags.high_thresh; table[1].data = &net->ipv4.frags.low_thresh; table[2].data = &net->ipv4.frags.timeout; } hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table); if (hdr == NULL) goto err_reg; net->ipv4.frags_hdr = hdr; return 0; err_reg: if (!net_eq(net, &init_net)) kfree(table); err_alloc: return -ENOMEM; } static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) { struct ctl_table *table; table = net->ipv4.frags_hdr->ctl_table_arg; unregister_net_sysctl_table(net->ipv4.frags_hdr); kfree(table); } static void ip4_frags_ctl_register(void) { register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table); } #else static inline int ip4_frags_ns_ctl_register(struct net *net) { return 0; } static inline void ip4_frags_ns_ctl_unregister(struct net *net) { } static inline void ip4_frags_ctl_register(void) { } #endif static int __net_init ipv4_frags_init_net(struct net *net) { /* * Fragment cache limits. We will commit 256K at one time. Should we * cross that limit we will prune down to 192K. This should cope with * even the most extreme cases without allowing an attacker to * measurably harm machine performance. */ net->ipv4.frags.high_thresh = 256 * 1024; net->ipv4.frags.low_thresh = 192 * 1024; /* * Important NOTE! Fragment queue must be destroyed before MSL expires. * RFC791 is wrong proposing to prolongate timer each fragment arrival * by TTL. */ net->ipv4.frags.timeout = IP_FRAG_TIME; inet_frags_init_net(&net->ipv4.frags); return ip4_frags_ns_ctl_register(net); } static void __net_exit ipv4_frags_exit_net(struct net *net) { ip4_frags_ns_ctl_unregister(net); inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); } static struct pernet_operations ip4_frags_ops = { .init = ipv4_frags_init_net, .exit = ipv4_frags_exit_net, }; void __init ipfrag_init(void) { ip4_frags_ctl_register(); register_pernet_subsys(&ip4_frags_ops); ip4_frags.hashfn = ip4_hashfn; ip4_frags.constructor = ip4_frag_init; ip4_frags.destructor = ip4_frag_free; ip4_frags.skb_free = NULL; ip4_frags.qsize = sizeof(struct ipq); ip4_frags.match = ip4_frag_match; ip4_frags.frag_expire = ip_expire; ip4_frags.secret_interval = 10 * 60 * HZ; inet_frags_init(&ip4_frags); }
gpl-2.0
Dinjesk/android_kernel_oneplus_msm8996
drivers/net/wireless/airo.c
605
222798
/*====================================================================== Aironet driver for 4500 and 4800 series cards This code is released under both the GPL version 2 and BSD licenses. Either license may be used. The respective licenses are found at the end of this file. This code was developed by Benjamin Reed <breed@users.sourceforge.net> including portions of which come from the Aironet PC4500 Developer's Reference Manual and used with permission. Copyright (C) 1999 Benjamin Reed. All Rights Reserved. Permission to use code in the Developer's manual was granted for this driver by Aironet. Major code contributions were received from Javier Achirica <achirica@users.sourceforge.net> and Jean Tourrilhes <jt@hpl.hp.com>. Code was also integrated from the Cisco Aironet driver for Linux. Support for MPI350 cards was added by Fabrice Bellet <fabrice@bellet.info>. ======================================================================*/ #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/bitops.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <linux/io.h> #include <asm/unaligned.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include <linux/pci.h> #include <linux/uaccess.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <net/cfg80211.h> #include <net/iw_handler.h> #include "airo.h" #define DRV_NAME "airo" #ifdef CONFIG_PCI static const struct pci_device_id card_ids[] = { { 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, }, { 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID }, { 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, }, { 0x14b9, 0x0340, PCI_ANY_ID, PCI_ANY_ID, }, { 0x14b9, 0x0350, PCI_ANY_ID, PCI_ANY_ID, }, { 0x14b9, 0x5000, PCI_ANY_ID, PCI_ANY_ID, }, { 0x14b9, 0xa504, PCI_ANY_ID, PCI_ANY_ID, }, { 0, } }; MODULE_DEVICE_TABLE(pci, card_ids); static int airo_pci_probe(struct pci_dev *, const struct pci_device_id *); static void airo_pci_remove(struct pci_dev *); static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state); static int airo_pci_resume(struct pci_dev *pdev); static struct pci_driver airo_driver = { .name = DRV_NAME, .id_table = card_ids, .probe = airo_pci_probe, .remove = airo_pci_remove, .suspend = airo_pci_suspend, .resume = airo_pci_resume, }; #endif /* CONFIG_PCI */ /* Include Wireless Extension definition and check version - Jean II */ #include <linux/wireless.h> #define WIRELESS_SPY /* enable iwspy support */ #define CISCO_EXT /* enable Cisco extensions */ #ifdef CISCO_EXT #include <linux/delay.h> #endif /* Hack to do some power saving */ #define POWER_ON_DOWN /* As you can see this list is HUGH! I really don't know what a lot of these counts are about, but they are all here for completeness. If the IGNLABEL macro is put in infront of the label, that statistic will not be included in the list of statistics in the /proc filesystem */ #define IGNLABEL(comment) NULL static const char *statsLabels[] = { "RxOverrun", IGNLABEL("RxPlcpCrcErr"), IGNLABEL("RxPlcpFormatErr"), IGNLABEL("RxPlcpLengthErr"), "RxMacCrcErr", "RxMacCrcOk", "RxWepErr", "RxWepOk", "RetryLong", "RetryShort", "MaxRetries", "NoAck", "NoCts", "RxAck", "RxCts", "TxAck", "TxRts", "TxCts", "TxMc", "TxBc", "TxUcFrags", "TxUcPackets", "TxBeacon", "RxBeacon", "TxSinColl", "TxMulColl", "DefersNo", "DefersProt", "DefersEngy", "DupFram", "RxFragDisc", "TxAged", "RxAged", "LostSync-MaxRetry", "LostSync-MissedBeacons", "LostSync-ArlExceeded", "LostSync-Deauth", "LostSync-Disassoced", "LostSync-TsfTiming", "HostTxMc", "HostTxBc", "HostTxUc", "HostTxFail", "HostRxMc", "HostRxBc", "HostRxUc", "HostRxDiscard", IGNLABEL("HmacTxMc"), IGNLABEL("HmacTxBc"), IGNLABEL("HmacTxUc"), IGNLABEL("HmacTxFail"), IGNLABEL("HmacRxMc"), IGNLABEL("HmacRxBc"), IGNLABEL("HmacRxUc"), IGNLABEL("HmacRxDiscard"), IGNLABEL("HmacRxAccepted"), "SsidMismatch", "ApMismatch", "RatesMismatch", "AuthReject", "AuthTimeout", "AssocReject", "AssocTimeout", IGNLABEL("ReasonOutsideTable"), IGNLABEL("ReasonStatus1"), IGNLABEL("ReasonStatus2"), IGNLABEL("ReasonStatus3"), IGNLABEL("ReasonStatus4"), IGNLABEL("ReasonStatus5"), IGNLABEL("ReasonStatus6"), IGNLABEL("ReasonStatus7"), IGNLABEL("ReasonStatus8"), IGNLABEL("ReasonStatus9"), IGNLABEL("ReasonStatus10"), IGNLABEL("ReasonStatus11"), IGNLABEL("ReasonStatus12"), IGNLABEL("ReasonStatus13"), IGNLABEL("ReasonStatus14"), IGNLABEL("ReasonStatus15"), IGNLABEL("ReasonStatus16"), IGNLABEL("ReasonStatus17"), IGNLABEL("ReasonStatus18"), IGNLABEL("ReasonStatus19"), "RxMan", "TxMan", "RxRefresh", "TxRefresh", "RxPoll", "TxPoll", "HostRetries", "LostSync-HostReq", "HostTxBytes", "HostRxBytes", "ElapsedUsec", "ElapsedSec", "LostSyncBetterAP", "PrivacyMismatch", "Jammed", "DiscRxNotWepped", "PhyEleMismatch", (char*)-1 }; #ifndef RUN_AT #define RUN_AT(x) (jiffies+(x)) #endif /* These variables are for insmod, since it seems that the rates can only be set in setup_card. Rates should be a comma separated (no spaces) list of rates (up to 8). */ static int rates[8]; static char *ssids[3]; static int io[4]; static int irq[4]; static int maxencrypt /* = 0 */; /* The highest rate that the card can encrypt at. 0 means no limit. For old cards this was 4 */ static int auto_wep /* = 0 */; /* If set, it tries to figure out the wep mode */ static int aux_bap /* = 0 */; /* Checks to see if the aux ports are needed to read the bap, needed on some older cards and buses. */ static int adhoc; static int probe = 1; static kuid_t proc_kuid; static int proc_uid /* = 0 */; static kgid_t proc_kgid; static int proc_gid /* = 0 */; static int airo_perm = 0555; static int proc_perm = 0644; MODULE_AUTHOR("Benjamin Reed"); MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet cards. " "Direct support for ISA/PCI/MPI cards and support for PCMCIA when used with airo_cs."); MODULE_LICENSE("Dual BSD/GPL"); MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340/350"); module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param_array(rates, int, NULL, 0); module_param_array(ssids, charp, NULL, 0); module_param(auto_wep, int, 0); MODULE_PARM_DESC(auto_wep, "If non-zero, the driver will keep looping through the authentication options until an association is made. " "The value of auto_wep is number of the wep keys to check. " "A value of 2 will try using the key at index 0 and index 1."); module_param(aux_bap, int, 0); MODULE_PARM_DESC(aux_bap, "If non-zero, the driver will switch into a mode that seems to work better for older cards with some older buses. " "Before switching it checks that the switch is needed."); module_param(maxencrypt, int, 0); MODULE_PARM_DESC(maxencrypt, "The maximum speed that the card can do encryption. " "Units are in 512kbs. " "Zero (default) means there is no limit. " "Older cards used to be limited to 2mbs (4)."); module_param(adhoc, int, 0); MODULE_PARM_DESC(adhoc, "If non-zero, the card will start in adhoc mode."); module_param(probe, int, 0); MODULE_PARM_DESC(probe, "If zero, the driver won't start the card."); module_param(proc_uid, int, 0); MODULE_PARM_DESC(proc_uid, "The uid that the /proc files will belong to."); module_param(proc_gid, int, 0); MODULE_PARM_DESC(proc_gid, "The gid that the /proc files will belong to."); module_param(airo_perm, int, 0); MODULE_PARM_DESC(airo_perm, "The permission bits of /proc/[driver/]aironet."); module_param(proc_perm, int, 0); MODULE_PARM_DESC(proc_perm, "The permission bits of the files in /proc"); /* This is a kind of sloppy hack to get this information to OUT4500 and IN4500. I would be extremely interested in the situation where this doesn't work though!!! */ static int do8bitIO /* = 0 */; /* Return codes */ #define SUCCESS 0 #define ERROR -1 #define NO_PACKET -2 /* Commands */ #define NOP2 0x0000 #define MAC_ENABLE 0x0001 #define MAC_DISABLE 0x0002 #define CMD_LOSE_SYNC 0x0003 /* Not sure what this does... */ #define CMD_SOFTRESET 0x0004 #define HOSTSLEEP 0x0005 #define CMD_MAGIC_PKT 0x0006 #define CMD_SETWAKEMASK 0x0007 #define CMD_READCFG 0x0008 #define CMD_SETMODE 0x0009 #define CMD_ALLOCATETX 0x000a #define CMD_TRANSMIT 0x000b #define CMD_DEALLOCATETX 0x000c #define NOP 0x0010 #define CMD_WORKAROUND 0x0011 #define CMD_ALLOCATEAUX 0x0020 #define CMD_ACCESS 0x0021 #define CMD_PCIBAP 0x0022 #define CMD_PCIAUX 0x0023 #define CMD_ALLOCBUF 0x0028 #define CMD_GETTLV 0x0029 #define CMD_PUTTLV 0x002a #define CMD_DELTLV 0x002b #define CMD_FINDNEXTTLV 0x002c #define CMD_PSPNODES 0x0030 #define CMD_SETCW 0x0031 #define CMD_SETPCF 0x0032 #define CMD_SETPHYREG 0x003e #define CMD_TXTEST 0x003f #define MAC_ENABLETX 0x0101 #define CMD_LISTBSS 0x0103 #define CMD_SAVECFG 0x0108 #define CMD_ENABLEAUX 0x0111 #define CMD_WRITERID 0x0121 #define CMD_USEPSPNODES 0x0130 #define MAC_ENABLERX 0x0201 /* Command errors */ #define ERROR_QUALIF 0x00 #define ERROR_ILLCMD 0x01 #define ERROR_ILLFMT 0x02 #define ERROR_INVFID 0x03 #define ERROR_INVRID 0x04 #define ERROR_LARGE 0x05 #define ERROR_NDISABL 0x06 #define ERROR_ALLOCBSY 0x07 #define ERROR_NORD 0x0B #define ERROR_NOWR 0x0C #define ERROR_INVFIDTX 0x0D #define ERROR_TESTACT 0x0E #define ERROR_TAGNFND 0x12 #define ERROR_DECODE 0x20 #define ERROR_DESCUNAV 0x21 #define ERROR_BADLEN 0x22 #define ERROR_MODE 0x80 #define ERROR_HOP 0x81 #define ERROR_BINTER 0x82 #define ERROR_RXMODE 0x83 #define ERROR_MACADDR 0x84 #define ERROR_RATES 0x85 #define ERROR_ORDER 0x86 #define ERROR_SCAN 0x87 #define ERROR_AUTH 0x88 #define ERROR_PSMODE 0x89 #define ERROR_RTYPE 0x8A #define ERROR_DIVER 0x8B #define ERROR_SSID 0x8C #define ERROR_APLIST 0x8D #define ERROR_AUTOWAKE 0x8E #define ERROR_LEAP 0x8F /* Registers */ #define COMMAND 0x00 #define PARAM0 0x02 #define PARAM1 0x04 #define PARAM2 0x06 #define STATUS 0x08 #define RESP0 0x0a #define RESP1 0x0c #define RESP2 0x0e #define LINKSTAT 0x10 #define SELECT0 0x18 #define OFFSET0 0x1c #define RXFID 0x20 #define TXALLOCFID 0x22 #define TXCOMPLFID 0x24 #define DATA0 0x36 #define EVSTAT 0x30 #define EVINTEN 0x32 #define EVACK 0x34 #define SWS0 0x28 #define SWS1 0x2a #define SWS2 0x2c #define SWS3 0x2e #define AUXPAGE 0x3A #define AUXOFF 0x3C #define AUXDATA 0x3E #define FID_TX 1 #define FID_RX 2 /* Offset into aux memory for descriptors */ #define AUX_OFFSET 0x800 /* Size of allocated packets */ #define PKTSIZE 1840 #define RIDSIZE 2048 /* Size of the transmit queue */ #define MAXTXQ 64 /* BAP selectors */ #define BAP0 0 /* Used for receiving packets */ #define BAP1 2 /* Used for xmiting packets and working with RIDS */ /* Flags */ #define COMMAND_BUSY 0x8000 #define BAP_BUSY 0x8000 #define BAP_ERR 0x4000 #define BAP_DONE 0x2000 #define PROMISC 0xffff #define NOPROMISC 0x0000 #define EV_CMD 0x10 #define EV_CLEARCOMMANDBUSY 0x4000 #define EV_RX 0x01 #define EV_TX 0x02 #define EV_TXEXC 0x04 #define EV_ALLOC 0x08 #define EV_LINK 0x80 #define EV_AWAKE 0x100 #define EV_TXCPY 0x400 #define EV_UNKNOWN 0x800 #define EV_MIC 0x1000 /* Message Integrity Check Interrupt */ #define EV_AWAKEN 0x2000 #define STATUS_INTS (EV_AWAKE|EV_LINK|EV_TXEXC|EV_TX|EV_TXCPY|EV_RX|EV_MIC) #ifdef CHECK_UNKNOWN_INTS #define IGNORE_INTS ( EV_CMD | EV_UNKNOWN) #else #define IGNORE_INTS (~STATUS_INTS) #endif /* RID TYPES */ #define RID_RW 0x20 /* The RIDs */ #define RID_CAPABILITIES 0xFF00 #define RID_APINFO 0xFF01 #define RID_RADIOINFO 0xFF02 #define RID_UNKNOWN3 0xFF03 #define RID_RSSI 0xFF04 #define RID_CONFIG 0xFF10 #define RID_SSID 0xFF11 #define RID_APLIST 0xFF12 #define RID_DRVNAME 0xFF13 #define RID_ETHERENCAP 0xFF14 #define RID_WEP_TEMP 0xFF15 #define RID_WEP_PERM 0xFF16 #define RID_MODULATION 0xFF17 #define RID_OPTIONS 0xFF18 #define RID_ACTUALCONFIG 0xFF20 /*readonly*/ #define RID_FACTORYCONFIG 0xFF21 #define RID_UNKNOWN22 0xFF22 #define RID_LEAPUSERNAME 0xFF23 #define RID_LEAPPASSWORD 0xFF24 #define RID_STATUS 0xFF50 #define RID_BEACON_HST 0xFF51 #define RID_BUSY_HST 0xFF52 #define RID_RETRIES_HST 0xFF53 #define RID_UNKNOWN54 0xFF54 #define RID_UNKNOWN55 0xFF55 #define RID_UNKNOWN56 0xFF56 #define RID_MIC 0xFF57 #define RID_STATS16 0xFF60 #define RID_STATS16DELTA 0xFF61 #define RID_STATS16DELTACLEAR 0xFF62 #define RID_STATS 0xFF68 #define RID_STATSDELTA 0xFF69 #define RID_STATSDELTACLEAR 0xFF6A #define RID_ECHOTEST_RID 0xFF70 #define RID_ECHOTEST_RESULTS 0xFF71 #define RID_BSSLISTFIRST 0xFF72 #define RID_BSSLISTNEXT 0xFF73 #define RID_WPA_BSSLISTFIRST 0xFF74 #define RID_WPA_BSSLISTNEXT 0xFF75 typedef struct { u16 cmd; u16 parm0; u16 parm1; u16 parm2; } Cmd; typedef struct { u16 status; u16 rsp0; u16 rsp1; u16 rsp2; } Resp; /* * Rids and endian-ness: The Rids will always be in cpu endian, since * this all the patches from the big-endian guys end up doing that. * so all rid access should use the read/writeXXXRid routines. */ /* This structure came from an email sent to me from an engineer at aironet for inclusion into this driver */ typedef struct WepKeyRid WepKeyRid; struct WepKeyRid { __le16 len; __le16 kindex; u8 mac[ETH_ALEN]; __le16 klen; u8 key[16]; } __packed; /* These structures are from the Aironet's PC4500 Developers Manual */ typedef struct Ssid Ssid; struct Ssid { __le16 len; u8 ssid[32]; } __packed; typedef struct SsidRid SsidRid; struct SsidRid { __le16 len; Ssid ssids[3]; } __packed; typedef struct ModulationRid ModulationRid; struct ModulationRid { __le16 len; __le16 modulation; #define MOD_DEFAULT cpu_to_le16(0) #define MOD_CCK cpu_to_le16(1) #define MOD_MOK cpu_to_le16(2) } __packed; typedef struct ConfigRid ConfigRid; struct ConfigRid { __le16 len; /* sizeof(ConfigRid) */ __le16 opmode; /* operating mode */ #define MODE_STA_IBSS cpu_to_le16(0) #define MODE_STA_ESS cpu_to_le16(1) #define MODE_AP cpu_to_le16(2) #define MODE_AP_RPTR cpu_to_le16(3) #define MODE_CFG_MASK cpu_to_le16(0xff) #define MODE_ETHERNET_HOST cpu_to_le16(0<<8) /* rx payloads converted */ #define MODE_LLC_HOST cpu_to_le16(1<<8) /* rx payloads left as is */ #define MODE_AIRONET_EXTEND cpu_to_le16(1<<9) /* enable Aironet extenstions */ #define MODE_AP_INTERFACE cpu_to_le16(1<<10) /* enable ap interface extensions */ #define MODE_ANTENNA_ALIGN cpu_to_le16(1<<11) /* enable antenna alignment */ #define MODE_ETHER_LLC cpu_to_le16(1<<12) /* enable ethernet LLC */ #define MODE_LEAF_NODE cpu_to_le16(1<<13) /* enable leaf node bridge */ #define MODE_CF_POLLABLE cpu_to_le16(1<<14) /* enable CF pollable */ #define MODE_MIC cpu_to_le16(1<<15) /* enable MIC */ __le16 rmode; /* receive mode */ #define RXMODE_BC_MC_ADDR cpu_to_le16(0) #define RXMODE_BC_ADDR cpu_to_le16(1) /* ignore multicasts */ #define RXMODE_ADDR cpu_to_le16(2) /* ignore multicast and broadcast */ #define RXMODE_RFMON cpu_to_le16(3) /* wireless monitor mode */ #define RXMODE_RFMON_ANYBSS cpu_to_le16(4) #define RXMODE_LANMON cpu_to_le16(5) /* lan style monitor -- data packets only */ #define RXMODE_MASK cpu_to_le16(255) #define RXMODE_DISABLE_802_3_HEADER cpu_to_le16(1<<8) /* disables 802.3 header on rx */ #define RXMODE_FULL_MASK (RXMODE_MASK | RXMODE_DISABLE_802_3_HEADER) #define RXMODE_NORMALIZED_RSSI cpu_to_le16(1<<9) /* return normalized RSSI */ __le16 fragThresh; __le16 rtsThres; u8 macAddr[ETH_ALEN]; u8 rates[8]; __le16 shortRetryLimit; __le16 longRetryLimit; __le16 txLifetime; /* in kusec */ __le16 rxLifetime; /* in kusec */ __le16 stationary; __le16 ordering; __le16 u16deviceType; /* for overriding device type */ __le16 cfpRate; __le16 cfpDuration; __le16 _reserved1[3]; /*---------- Scanning/Associating ----------*/ __le16 scanMode; #define SCANMODE_ACTIVE cpu_to_le16(0) #define SCANMODE_PASSIVE cpu_to_le16(1) #define SCANMODE_AIROSCAN cpu_to_le16(2) __le16 probeDelay; /* in kusec */ __le16 probeEnergyTimeout; /* in kusec */ __le16 probeResponseTimeout; __le16 beaconListenTimeout; __le16 joinNetTimeout; __le16 authTimeout; __le16 authType; #define AUTH_OPEN cpu_to_le16(0x1) #define AUTH_ENCRYPT cpu_to_le16(0x101) #define AUTH_SHAREDKEY cpu_to_le16(0x102) #define AUTH_ALLOW_UNENCRYPTED cpu_to_le16(0x200) __le16 associationTimeout; __le16 specifiedApTimeout; __le16 offlineScanInterval; __le16 offlineScanDuration; __le16 linkLossDelay; __le16 maxBeaconLostTime; __le16 refreshInterval; #define DISABLE_REFRESH cpu_to_le16(0xFFFF) __le16 _reserved1a[1]; /*---------- Power save operation ----------*/ __le16 powerSaveMode; #define POWERSAVE_CAM cpu_to_le16(0) #define POWERSAVE_PSP cpu_to_le16(1) #define POWERSAVE_PSPCAM cpu_to_le16(2) __le16 sleepForDtims; __le16 listenInterval; __le16 fastListenInterval; __le16 listenDecay; __le16 fastListenDelay; __le16 _reserved2[2]; /*---------- Ap/Ibss config items ----------*/ __le16 beaconPeriod; __le16 atimDuration; __le16 hopPeriod; __le16 channelSet; __le16 channel; __le16 dtimPeriod; __le16 bridgeDistance; __le16 radioID; /*---------- Radio configuration ----------*/ __le16 radioType; #define RADIOTYPE_DEFAULT cpu_to_le16(0) #define RADIOTYPE_802_11 cpu_to_le16(1) #define RADIOTYPE_LEGACY cpu_to_le16(2) u8 rxDiversity; u8 txDiversity; __le16 txPower; #define TXPOWER_DEFAULT 0 __le16 rssiThreshold; #define RSSI_DEFAULT 0 __le16 modulation; #define PREAMBLE_AUTO cpu_to_le16(0) #define PREAMBLE_LONG cpu_to_le16(1) #define PREAMBLE_SHORT cpu_to_le16(2) __le16 preamble; __le16 homeProduct; __le16 radioSpecific; /*---------- Aironet Extensions ----------*/ u8 nodeName[16]; __le16 arlThreshold; __le16 arlDecay; __le16 arlDelay; __le16 _reserved4[1]; /*---------- Aironet Extensions ----------*/ u8 magicAction; #define MAGIC_ACTION_STSCHG 1 #define MAGIC_ACTION_RESUME 2 #define MAGIC_IGNORE_MCAST (1<<8) #define MAGIC_IGNORE_BCAST (1<<9) #define MAGIC_SWITCH_TO_PSP (0<<10) #define MAGIC_STAY_IN_CAM (1<<10) u8 magicControl; __le16 autoWake; } __packed; typedef struct StatusRid StatusRid; struct StatusRid { __le16 len; u8 mac[ETH_ALEN]; __le16 mode; __le16 errorCode; __le16 sigQuality; __le16 SSIDlen; char SSID[32]; char apName[16]; u8 bssid[4][ETH_ALEN]; __le16 beaconPeriod; __le16 dimPeriod; __le16 atimDuration; __le16 hopPeriod; __le16 channelSet; __le16 channel; __le16 hopsToBackbone; __le16 apTotalLoad; __le16 generatedLoad; __le16 accumulatedArl; __le16 signalQuality; __le16 currentXmitRate; __le16 apDevExtensions; __le16 normalizedSignalStrength; __le16 shortPreamble; u8 apIP[4]; u8 noisePercent; /* Noise percent in last second */ u8 noisedBm; /* Noise dBm in last second */ u8 noiseAvePercent; /* Noise percent in last minute */ u8 noiseAvedBm; /* Noise dBm in last minute */ u8 noiseMaxPercent; /* Highest noise percent in last minute */ u8 noiseMaxdBm; /* Highest noise dbm in last minute */ __le16 load; u8 carrier[4]; __le16 assocStatus; #define STAT_NOPACKETS 0 #define STAT_NOCARRIERSET 10 #define STAT_GOTCARRIERSET 11 #define STAT_WRONGSSID 20 #define STAT_BADCHANNEL 25 #define STAT_BADBITRATES 30 #define STAT_BADPRIVACY 35 #define STAT_APFOUND 40 #define STAT_APREJECTED 50 #define STAT_AUTHENTICATING 60 #define STAT_DEAUTHENTICATED 61 #define STAT_AUTHTIMEOUT 62 #define STAT_ASSOCIATING 70 #define STAT_DEASSOCIATED 71 #define STAT_ASSOCTIMEOUT 72 #define STAT_NOTAIROAP 73 #define STAT_ASSOCIATED 80 #define STAT_LEAPING 90 #define STAT_LEAPFAILED 91 #define STAT_LEAPTIMEDOUT 92 #define STAT_LEAPCOMPLETE 93 } __packed; typedef struct StatsRid StatsRid; struct StatsRid { __le16 len; __le16 spacer; __le32 vals[100]; } __packed; typedef struct APListRid APListRid; struct APListRid { __le16 len; u8 ap[4][ETH_ALEN]; } __packed; typedef struct CapabilityRid CapabilityRid; struct CapabilityRid { __le16 len; char oui[3]; char zero; __le16 prodNum; char manName[32]; char prodName[16]; char prodVer[8]; char factoryAddr[ETH_ALEN]; char aironetAddr[ETH_ALEN]; __le16 radioType; __le16 country; char callid[ETH_ALEN]; char supportedRates[8]; char rxDiversity; char txDiversity; __le16 txPowerLevels[8]; __le16 hardVer; __le16 hardCap; __le16 tempRange; __le16 softVer; __le16 softSubVer; __le16 interfaceVer; __le16 softCap; __le16 bootBlockVer; __le16 requiredHard; __le16 extSoftCap; } __packed; /* Only present on firmware >= 5.30.17 */ typedef struct BSSListRidExtra BSSListRidExtra; struct BSSListRidExtra { __le16 unknown[4]; u8 fixed[12]; /* WLAN management frame */ u8 iep[624]; } __packed; typedef struct BSSListRid BSSListRid; struct BSSListRid { __le16 len; __le16 index; /* First is 0 and 0xffff means end of list */ #define RADIO_FH 1 /* Frequency hopping radio type */ #define RADIO_DS 2 /* Direct sequence radio type */ #define RADIO_TMA 4 /* Proprietary radio used in old cards (2500) */ __le16 radioType; u8 bssid[ETH_ALEN]; /* Mac address of the BSS */ u8 zero; u8 ssidLen; u8 ssid[32]; __le16 dBm; #define CAP_ESS cpu_to_le16(1<<0) #define CAP_IBSS cpu_to_le16(1<<1) #define CAP_PRIVACY cpu_to_le16(1<<4) #define CAP_SHORTHDR cpu_to_le16(1<<5) __le16 cap; __le16 beaconInterval; u8 rates[8]; /* Same as rates for config rid */ struct { /* For frequency hopping only */ __le16 dwell; u8 hopSet; u8 hopPattern; u8 hopIndex; u8 fill; } fh; __le16 dsChannel; __le16 atimWindow; /* Only present on firmware >= 5.30.17 */ BSSListRidExtra extra; } __packed; typedef struct { BSSListRid bss; struct list_head list; } BSSListElement; typedef struct tdsRssiEntry tdsRssiEntry; struct tdsRssiEntry { u8 rssipct; u8 rssidBm; } __packed; typedef struct tdsRssiRid tdsRssiRid; struct tdsRssiRid { u16 len; tdsRssiEntry x[256]; } __packed; typedef struct MICRid MICRid; struct MICRid { __le16 len; __le16 state; __le16 multicastValid; u8 multicast[16]; __le16 unicastValid; u8 unicast[16]; } __packed; typedef struct MICBuffer MICBuffer; struct MICBuffer { __be16 typelen; union { u8 snap[8]; struct { u8 dsap; u8 ssap; u8 control; u8 orgcode[3]; u8 fieldtype[2]; } llc; } u; __be32 mic; __be32 seq; } __packed; typedef struct { u8 da[ETH_ALEN]; u8 sa[ETH_ALEN]; } etherHead; #define TXCTL_TXOK (1<<1) /* report if tx is ok */ #define TXCTL_TXEX (1<<2) /* report if tx fails */ #define TXCTL_802_3 (0<<3) /* 802.3 packet */ #define TXCTL_802_11 (1<<3) /* 802.11 mac packet */ #define TXCTL_ETHERNET (0<<4) /* payload has ethertype */ #define TXCTL_LLC (1<<4) /* payload is llc */ #define TXCTL_RELEASE (0<<5) /* release after completion */ #define TXCTL_NORELEASE (1<<5) /* on completion returns to host */ #define BUSY_FID 0x10000 #ifdef CISCO_EXT #define AIROMAGIC 0xa55a /* Warning : SIOCDEVPRIVATE may disapear during 2.5.X - Jean II */ #ifdef SIOCIWFIRSTPRIV #ifdef SIOCDEVPRIVATE #define AIROOLDIOCTL SIOCDEVPRIVATE #define AIROOLDIDIFC AIROOLDIOCTL + 1 #endif /* SIOCDEVPRIVATE */ #else /* SIOCIWFIRSTPRIV */ #define SIOCIWFIRSTPRIV SIOCDEVPRIVATE #endif /* SIOCIWFIRSTPRIV */ /* This may be wrong. When using the new SIOCIWFIRSTPRIV range, we probably * should use only "GET" ioctls (last bit set to 1). "SET" ioctls are root * only and don't return the modified struct ifreq to the application which * is usually a problem. - Jean II */ #define AIROIOCTL SIOCIWFIRSTPRIV #define AIROIDIFC AIROIOCTL + 1 /* Ioctl constants to be used in airo_ioctl.command */ #define AIROGCAP 0 // Capability rid #define AIROGCFG 1 // USED A LOT #define AIROGSLIST 2 // System ID list #define AIROGVLIST 3 // List of specified AP's #define AIROGDRVNAM 4 // NOTUSED #define AIROGEHTENC 5 // NOTUSED #define AIROGWEPKTMP 6 #define AIROGWEPKNV 7 #define AIROGSTAT 8 #define AIROGSTATSC32 9 #define AIROGSTATSD32 10 #define AIROGMICRID 11 #define AIROGMICSTATS 12 #define AIROGFLAGS 13 #define AIROGID 14 #define AIRORRID 15 #define AIRORSWVERSION 17 /* Leave gap of 40 commands after AIROGSTATSD32 for future */ #define AIROPCAP AIROGSTATSD32 + 40 #define AIROPVLIST AIROPCAP + 1 #define AIROPSLIST AIROPVLIST + 1 #define AIROPCFG AIROPSLIST + 1 #define AIROPSIDS AIROPCFG + 1 #define AIROPAPLIST AIROPSIDS + 1 #define AIROPMACON AIROPAPLIST + 1 /* Enable mac */ #define AIROPMACOFF AIROPMACON + 1 /* Disable mac */ #define AIROPSTCLR AIROPMACOFF + 1 #define AIROPWEPKEY AIROPSTCLR + 1 #define AIROPWEPKEYNV AIROPWEPKEY + 1 #define AIROPLEAPPWD AIROPWEPKEYNV + 1 #define AIROPLEAPUSR AIROPLEAPPWD + 1 /* Flash codes */ #define AIROFLSHRST AIROPWEPKEYNV + 40 #define AIROFLSHGCHR AIROFLSHRST + 1 #define AIROFLSHSTFL AIROFLSHGCHR + 1 #define AIROFLSHPCHR AIROFLSHSTFL + 1 #define AIROFLPUTBUF AIROFLSHPCHR + 1 #define AIRORESTART AIROFLPUTBUF + 1 #define FLASHSIZE 32768 #define AUXMEMSIZE (256 * 1024) typedef struct aironet_ioctl { unsigned short command; // What to do unsigned short len; // Len of data unsigned short ridnum; // rid number unsigned char __user *data; // d-data } aironet_ioctl; static const char swversion[] = "2.1"; #endif /* CISCO_EXT */ #define NUM_MODULES 2 #define MIC_MSGLEN_MAX 2400 #define EMMH32_MSGLEN_MAX MIC_MSGLEN_MAX #define AIRO_DEF_MTU 2312 typedef struct { u32 size; // size u8 enabled; // MIC enabled or not u32 rxSuccess; // successful packets received u32 rxIncorrectMIC; // pkts dropped due to incorrect MIC comparison u32 rxNotMICed; // pkts dropped due to not being MIC'd u32 rxMICPlummed; // pkts dropped due to not having a MIC plummed u32 rxWrongSequence; // pkts dropped due to sequence number violation u32 reserve[32]; } mic_statistics; typedef struct { u32 coeff[((EMMH32_MSGLEN_MAX)+3)>>2]; u64 accum; // accumulated mic, reduced to u32 in final() int position; // current position (byte offset) in message union { u8 d8[4]; __be32 d32; } part; // saves partial message word across update() calls } emmh32_context; typedef struct { emmh32_context seed; // Context - the seed u32 rx; // Received sequence number u32 tx; // Tx sequence number u32 window; // Start of window u8 valid; // Flag to say if context is valid or not u8 key[16]; } miccntx; typedef struct { miccntx mCtx; // Multicast context miccntx uCtx; // Unicast context } mic_module; typedef struct { unsigned int rid: 16; unsigned int len: 15; unsigned int valid: 1; dma_addr_t host_addr; } Rid; typedef struct { unsigned int offset: 15; unsigned int eoc: 1; unsigned int len: 15; unsigned int valid: 1; dma_addr_t host_addr; } TxFid; struct rx_hdr { __le16 status, len; u8 rssi[2]; u8 rate; u8 freq; __le16 tmp[4]; } __packed; typedef struct { unsigned int ctl: 15; unsigned int rdy: 1; unsigned int len: 15; unsigned int valid: 1; dma_addr_t host_addr; } RxFid; /* * Host receive descriptor */ typedef struct { unsigned char __iomem *card_ram_off; /* offset into card memory of the desc */ RxFid rx_desc; /* card receive descriptor */ char *virtual_host_addr; /* virtual address of host receive buffer */ int pending; } HostRxDesc; /* * Host transmit descriptor */ typedef struct { unsigned char __iomem *card_ram_off; /* offset into card memory of the desc */ TxFid tx_desc; /* card transmit descriptor */ char *virtual_host_addr; /* virtual address of host receive buffer */ int pending; } HostTxDesc; /* * Host RID descriptor */ typedef struct { unsigned char __iomem *card_ram_off; /* offset into card memory of the descriptor */ Rid rid_desc; /* card RID descriptor */ char *virtual_host_addr; /* virtual address of host receive buffer */ } HostRidDesc; typedef struct { u16 sw0; u16 sw1; u16 status; u16 len; #define HOST_SET (1 << 0) #define HOST_INT_TX (1 << 1) /* Interrupt on successful TX */ #define HOST_INT_TXERR (1 << 2) /* Interrupt on unseccessful TX */ #define HOST_LCC_PAYLOAD (1 << 4) /* LLC payload, 0 = Ethertype */ #define HOST_DONT_RLSE (1 << 5) /* Don't release buffer when done */ #define HOST_DONT_RETRY (1 << 6) /* Don't retry trasmit */ #define HOST_CLR_AID (1 << 7) /* clear AID failure */ #define HOST_RTS (1 << 9) /* Force RTS use */ #define HOST_SHORT (1 << 10) /* Do short preamble */ u16 ctl; u16 aid; u16 retries; u16 fill; } TxCtlHdr; typedef struct { u16 ctl; u16 duration; char addr1[6]; char addr2[6]; char addr3[6]; u16 seq; char addr4[6]; } WifiHdr; typedef struct { TxCtlHdr ctlhdr; u16 fill1; u16 fill2; WifiHdr wifihdr; u16 gaplen; u16 status; } WifiCtlHdr; static WifiCtlHdr wifictlhdr8023 = { .ctlhdr = { .ctl = HOST_DONT_RLSE, } }; // A few details needed for WEP (Wireless Equivalent Privacy) #define MAX_KEY_SIZE 13 // 128 (?) bits #define MIN_KEY_SIZE 5 // 40 bits RC4 - WEP typedef struct wep_key_t { u16 len; u8 key[16]; /* 40-bit and 104-bit keys */ } wep_key_t; /* List of Wireless Handlers (new API) */ static const struct iw_handler_def airo_handler_def; static const char version[] = "airo.c 0.6 (Ben Reed & Javier Achirica)"; struct airo_info; static int get_dec_u16( char *buffer, int *start, int limit ); static void OUT4500( struct airo_info *, u16 register, u16 value ); static unsigned short IN4500( struct airo_info *, u16 register ); static u16 setup_card(struct airo_info*, u8 *mac, int lock); static int enable_MAC(struct airo_info *ai, int lock); static void disable_MAC(struct airo_info *ai, int lock); static void enable_interrupts(struct airo_info*); static void disable_interrupts(struct airo_info*); static u16 issuecommand(struct airo_info*, Cmd *pCmd, Resp *pRsp); static int bap_setup(struct airo_info*, u16 rid, u16 offset, int whichbap); static int aux_bap_read(struct airo_info*, __le16 *pu16Dst, int bytelen, int whichbap); static int fast_bap_read(struct airo_info*, __le16 *pu16Dst, int bytelen, int whichbap); static int bap_write(struct airo_info*, const __le16 *pu16Src, int bytelen, int whichbap); static int PC4500_accessrid(struct airo_info*, u16 rid, u16 accmd); static int PC4500_readrid(struct airo_info*, u16 rid, void *pBuf, int len, int lock); static int PC4500_writerid(struct airo_info*, u16 rid, const void *pBuf, int len, int lock); static int do_writerid( struct airo_info*, u16 rid, const void *rid_data, int len, int dummy ); static u16 transmit_allocate(struct airo_info*, int lenPayload, int raw); static int transmit_802_3_packet(struct airo_info*, int len, char *pPacket); static int transmit_802_11_packet(struct airo_info*, int len, char *pPacket); static int mpi_send_packet (struct net_device *dev); static void mpi_unmap_card(struct pci_dev *pci); static void mpi_receive_802_3(struct airo_info *ai); static void mpi_receive_802_11(struct airo_info *ai); static int waitbusy (struct airo_info *ai); static irqreturn_t airo_interrupt( int irq, void* dev_id); static int airo_thread(void *data); static void timer_func( struct net_device *dev ); static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static struct iw_statistics *airo_get_wireless_stats (struct net_device *dev); static void airo_read_wireless_stats (struct airo_info *local); #ifdef CISCO_EXT static int readrids(struct net_device *dev, aironet_ioctl *comp); static int writerids(struct net_device *dev, aironet_ioctl *comp); static int flashcard(struct net_device *dev, aironet_ioctl *comp); #endif /* CISCO_EXT */ static void micinit(struct airo_info *ai); static int micsetup(struct airo_info *ai); static int encapsulate(struct airo_info *ai, etherHead *pPacket, MICBuffer *buffer, int len); static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *pPacket, u16 payLen); static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi); static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm); static void airo_networks_free(struct airo_info *ai); struct airo_info { struct net_device *dev; struct list_head dev_list; /* Note, we can have MAX_FIDS outstanding. FIDs are 16-bits, so we use the high bit to mark whether it is in use. */ #define MAX_FIDS 6 #define MPI_MAX_FIDS 1 u32 fids[MAX_FIDS]; ConfigRid config; char keyindex; // Used with auto wep char defindex; // Used with auto wep struct proc_dir_entry *proc_entry; spinlock_t aux_lock; #define FLAG_RADIO_OFF 0 /* User disabling of MAC */ #define FLAG_RADIO_DOWN 1 /* ifup/ifdown disabling of MAC */ #define FLAG_RADIO_MASK 0x03 #define FLAG_ENABLED 2 #define FLAG_ADHOC 3 /* Needed by MIC */ #define FLAG_MIC_CAPABLE 4 #define FLAG_UPDATE_MULTI 5 #define FLAG_UPDATE_UNI 6 #define FLAG_802_11 7 #define FLAG_PROMISC 8 /* IFF_PROMISC 0x100 - include/linux/if.h */ #define FLAG_PENDING_XMIT 9 #define FLAG_PENDING_XMIT11 10 #define FLAG_MPI 11 #define FLAG_REGISTERED 12 #define FLAG_COMMIT 13 #define FLAG_RESET 14 #define FLAG_FLASHING 15 #define FLAG_WPA_CAPABLE 16 unsigned long flags; #define JOB_DIE 0 #define JOB_XMIT 1 #define JOB_XMIT11 2 #define JOB_STATS 3 #define JOB_PROMISC 4 #define JOB_MIC 5 #define JOB_EVENT 6 #define JOB_AUTOWEP 7 #define JOB_WSTATS 8 #define JOB_SCAN_RESULTS 9 unsigned long jobs; int (*bap_read)(struct airo_info*, __le16 *pu16Dst, int bytelen, int whichbap); unsigned short *flash; tdsRssiEntry *rssi; struct task_struct *list_bss_task; struct task_struct *airo_thread_task; struct semaphore sem; wait_queue_head_t thr_wait; unsigned long expires; struct { struct sk_buff *skb; int fid; } xmit, xmit11; struct net_device *wifidev; struct iw_statistics wstats; // wireless stats unsigned long scan_timeout; /* Time scan should be read */ struct iw_spy_data spy_data; struct iw_public_data wireless_data; /* MIC stuff */ struct crypto_cipher *tfm; mic_module mod[2]; mic_statistics micstats; HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors HostTxDesc txfids[MPI_MAX_FIDS]; HostRidDesc config_desc; unsigned long ridbus; // phys addr of config_desc struct sk_buff_head txq;// tx queue used by mpi350 code struct pci_dev *pci; unsigned char __iomem *pcimem; unsigned char __iomem *pciaux; unsigned char *shared; dma_addr_t shared_dma; pm_message_t power; SsidRid *SSID; APListRid *APList; #define PCI_SHARED_LEN 2*MPI_MAX_FIDS*PKTSIZE+RIDSIZE char proc_name[IFNAMSIZ]; int wep_capable; int max_wep_idx; /* WPA-related stuff */ unsigned int bssListFirst; unsigned int bssListNext; unsigned int bssListRidLen; struct list_head network_list; struct list_head network_free_list; BSSListElement *networks; }; static inline int bap_read(struct airo_info *ai, __le16 *pu16Dst, int bytelen, int whichbap) { return ai->bap_read(ai, pu16Dst, bytelen, whichbap); } static int setup_proc_entry( struct net_device *dev, struct airo_info *apriv ); static int takedown_proc_entry( struct net_device *dev, struct airo_info *apriv ); static int cmdreset(struct airo_info *ai); static int setflashmode (struct airo_info *ai); static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime); static int flashputbuf(struct airo_info *ai); static int flashrestart(struct airo_info *ai,struct net_device *dev); #define airo_print(type, name, fmt, args...) \ printk(type DRV_NAME "(%s): " fmt "\n", name, ##args) #define airo_print_info(name, fmt, args...) \ airo_print(KERN_INFO, name, fmt, ##args) #define airo_print_dbg(name, fmt, args...) \ airo_print(KERN_DEBUG, name, fmt, ##args) #define airo_print_warn(name, fmt, args...) \ airo_print(KERN_WARNING, name, fmt, ##args) #define airo_print_err(name, fmt, args...) \ airo_print(KERN_ERR, name, fmt, ##args) #define AIRO_FLASH(dev) (((struct airo_info *)dev->ml_priv)->flash) /*********************************************************************** * MIC ROUTINES * *********************************************************************** */ static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq); static void MoveWindow(miccntx *context, u32 micSeq); static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_cipher *tfm); static void emmh32_init(emmh32_context *context); static void emmh32_update(emmh32_context *context, u8 *pOctets, int len); static void emmh32_final(emmh32_context *context, u8 digest[4]); static int flashpchar(struct airo_info *ai,int byte,int dwelltime); static void age_mic_context(miccntx *cur, miccntx *old, u8 *key, int key_len, struct crypto_cipher *tfm) { /* If the current MIC context is valid and its key is the same as * the MIC register, there's nothing to do. */ if (cur->valid && (memcmp(cur->key, key, key_len) == 0)) return; /* Age current mic Context */ memcpy(old, cur, sizeof(*cur)); /* Initialize new context */ memcpy(cur->key, key, key_len); cur->window = 33; /* Window always points to the middle */ cur->rx = 0; /* Rx Sequence numbers */ cur->tx = 0; /* Tx sequence numbers */ cur->valid = 1; /* Key is now valid */ /* Give key to mic seed */ emmh32_setseed(&cur->seed, key, key_len, tfm); } /* micinit - Initialize mic seed */ static void micinit(struct airo_info *ai) { MICRid mic_rid; clear_bit(JOB_MIC, &ai->jobs); PC4500_readrid(ai, RID_MIC, &mic_rid, sizeof(mic_rid), 0); up(&ai->sem); ai->micstats.enabled = (le16_to_cpu(mic_rid.state) & 0x00FF) ? 1 : 0; if (!ai->micstats.enabled) { /* So next time we have a valid key and mic is enabled, we will * update the sequence number if the key is the same as before. */ ai->mod[0].uCtx.valid = 0; ai->mod[0].mCtx.valid = 0; return; } if (mic_rid.multicastValid) { age_mic_context(&ai->mod[0].mCtx, &ai->mod[1].mCtx, mic_rid.multicast, sizeof(mic_rid.multicast), ai->tfm); } if (mic_rid.unicastValid) { age_mic_context(&ai->mod[0].uCtx, &ai->mod[1].uCtx, mic_rid.unicast, sizeof(mic_rid.unicast), ai->tfm); } } /* micsetup - Get ready for business */ static int micsetup(struct airo_info *ai) { int i; if (ai->tfm == NULL) ai->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(ai->tfm)) { airo_print_err(ai->dev->name, "failed to load transform for AES"); ai->tfm = NULL; return ERROR; } for (i=0; i < NUM_MODULES; i++) { memset(&ai->mod[i].mCtx,0,sizeof(miccntx)); memset(&ai->mod[i].uCtx,0,sizeof(miccntx)); } return SUCCESS; } static const u8 micsnap[] = {0xAA,0xAA,0x03,0x00,0x40,0x96,0x00,0x02}; /*=========================================================================== * Description: Mic a packet * * Inputs: etherHead * pointer to an 802.3 frame * * Returns: BOOLEAN if successful, otherwise false. * PacketTxLen will be updated with the mic'd packets size. * * Caveats: It is assumed that the frame buffer will already * be big enough to hold the largets mic message possible. * (No memory allocation is done here). * * Author: sbraneky (10/15/01) * Merciless hacks by rwilcher (1/14/02) */ static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, int payLen) { miccntx *context; // Determine correct context // If not adhoc, always use unicast key if (test_bit(FLAG_ADHOC, &ai->flags) && (frame->da[0] & 0x1)) context = &ai->mod[0].mCtx; else context = &ai->mod[0].uCtx; if (!context->valid) return ERROR; mic->typelen = htons(payLen + 16); //Length of Mic'd packet memcpy(&mic->u.snap, micsnap, sizeof(micsnap)); // Add Snap // Add Tx sequence mic->seq = htonl(context->tx); context->tx += 2; emmh32_init(&context->seed); // Mic the packet emmh32_update(&context->seed,frame->da,ETH_ALEN * 2); // DA,SA emmh32_update(&context->seed,(u8*)&mic->typelen,10); // Type/Length and Snap emmh32_update(&context->seed,(u8*)&mic->seq,sizeof(mic->seq)); //SEQ emmh32_update(&context->seed,(u8*)(frame + 1),payLen); //payload emmh32_final(&context->seed, (u8*)&mic->mic); /* New Type/length ?????????? */ mic->typelen = 0; //Let NIC know it could be an oversized packet return SUCCESS; } typedef enum { NONE, NOMIC, NOMICPLUMMED, SEQUENCE, INCORRECTMIC, } mic_error; /*=========================================================================== * Description: Decapsulates a MIC'd packet and returns the 802.3 packet * (removes the MIC stuff) if packet is a valid packet. * * Inputs: etherHead pointer to the 802.3 packet * * Returns: BOOLEAN - TRUE if packet should be dropped otherwise FALSE * * Author: sbraneky (10/15/01) * Merciless hacks by rwilcher (1/14/02) *--------------------------------------------------------------------------- */ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *eth, u16 payLen) { int i; u32 micSEQ; miccntx *context; u8 digest[4]; mic_error micError = NONE; // Check if the packet is a Mic'd packet if (!ai->micstats.enabled) { //No Mic set or Mic OFF but we received a MIC'd packet. if (memcmp ((u8*)eth + 14, micsnap, sizeof(micsnap)) == 0) { ai->micstats.rxMICPlummed++; return ERROR; } return SUCCESS; } if (ntohs(mic->typelen) == 0x888E) return SUCCESS; if (memcmp (mic->u.snap, micsnap, sizeof(micsnap)) != 0) { // Mic enabled but packet isn't Mic'd ai->micstats.rxMICPlummed++; return ERROR; } micSEQ = ntohl(mic->seq); //store SEQ as CPU order //At this point we a have a mic'd packet and mic is enabled //Now do the mic error checking. //Receive seq must be odd if ( (micSEQ & 1) == 0 ) { ai->micstats.rxWrongSequence++; return ERROR; } for (i = 0; i < NUM_MODULES; i++) { int mcast = eth->da[0] & 1; //Determine proper context context = mcast ? &ai->mod[i].mCtx : &ai->mod[i].uCtx; //Make sure context is valid if (!context->valid) { if (i == 0) micError = NOMICPLUMMED; continue; } //DeMic it if (!mic->typelen) mic->typelen = htons(payLen + sizeof(MICBuffer) - 2); emmh32_init(&context->seed); emmh32_update(&context->seed, eth->da, ETH_ALEN*2); emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap)); emmh32_update(&context->seed, (u8 *)&mic->seq,sizeof(mic->seq)); emmh32_update(&context->seed, (u8 *)(eth + 1),payLen); //Calculate MIC emmh32_final(&context->seed, digest); if (memcmp(digest, &mic->mic, 4)) { //Make sure the mics match //Invalid Mic if (i == 0) micError = INCORRECTMIC; continue; } //Check Sequence number if mics pass if (RxSeqValid(ai, context, mcast, micSEQ) == SUCCESS) { ai->micstats.rxSuccess++; return SUCCESS; } if (i == 0) micError = SEQUENCE; } // Update statistics switch (micError) { case NOMICPLUMMED: ai->micstats.rxMICPlummed++; break; case SEQUENCE: ai->micstats.rxWrongSequence++; break; case INCORRECTMIC: ai->micstats.rxIncorrectMIC++; break; case NONE: break; case NOMIC: break; } return ERROR; } /*=========================================================================== * Description: Checks the Rx Seq number to make sure it is valid * and hasn't already been received * * Inputs: miccntx - mic context to check seq against * micSeq - the Mic seq number * * Returns: TRUE if valid otherwise FALSE. * * Author: sbraneky (10/15/01) * Merciless hacks by rwilcher (1/14/02) *--------------------------------------------------------------------------- */ static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq) { u32 seq,index; //Allow for the ap being rebooted - if it is then use the next //sequence number of the current sequence number - might go backwards if (mcast) { if (test_bit(FLAG_UPDATE_MULTI, &ai->flags)) { clear_bit (FLAG_UPDATE_MULTI, &ai->flags); context->window = (micSeq > 33) ? micSeq : 33; context->rx = 0; // Reset rx } } else if (test_bit(FLAG_UPDATE_UNI, &ai->flags)) { clear_bit (FLAG_UPDATE_UNI, &ai->flags); context->window = (micSeq > 33) ? micSeq : 33; // Move window context->rx = 0; // Reset rx } //Make sequence number relative to START of window seq = micSeq - (context->window - 33); //Too old of a SEQ number to check. if ((s32)seq < 0) return ERROR; if ( seq > 64 ) { //Window is infinite forward MoveWindow(context,micSeq); return SUCCESS; } // We are in the window. Now check the context rx bit to see if it was already sent seq >>= 1; //divide by 2 because we only have odd numbers index = 1 << seq; //Get an index number if (!(context->rx & index)) { //micSEQ falls inside the window. //Add seqence number to the list of received numbers. context->rx |= index; MoveWindow(context,micSeq); return SUCCESS; } return ERROR; } static void MoveWindow(miccntx *context, u32 micSeq) { u32 shift; //Move window if seq greater than the middle of the window if (micSeq > context->window) { shift = (micSeq - context->window) >> 1; //Shift out old if (shift < 32) context->rx >>= shift; else context->rx = 0; context->window = micSeq; //Move window } } /*==============================================*/ /*========== EMMH ROUTINES ====================*/ /*==============================================*/ /* mic accumulate */ #define MIC_ACCUM(val) \ context->accum += (u64)(val) * context->coeff[coeff_position++]; static unsigned char aes_counter[16]; /* expand the key to fill the MMH coefficient array */ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_cipher *tfm) { /* take the keying material, expand if necessary, truncate at 16-bytes */ /* run through AES counter mode to generate context->coeff[] */ int i,j; u32 counter; u8 *cipher, plain[16]; crypto_cipher_setkey(tfm, pkey, 16); counter = 0; for (i = 0; i < ARRAY_SIZE(context->coeff); ) { aes_counter[15] = (u8)(counter >> 0); aes_counter[14] = (u8)(counter >> 8); aes_counter[13] = (u8)(counter >> 16); aes_counter[12] = (u8)(counter >> 24); counter++; memcpy (plain, aes_counter, 16); crypto_cipher_encrypt_one(tfm, plain, plain); cipher = plain; for (j = 0; (j < 16) && (i < ARRAY_SIZE(context->coeff)); ) { context->coeff[i++] = ntohl(*(__be32 *)&cipher[j]); j += 4; } } } /* prepare for calculation of a new mic */ static void emmh32_init(emmh32_context *context) { /* prepare for new mic calculation */ context->accum = 0; context->position = 0; } /* add some bytes to the mic calculation */ static void emmh32_update(emmh32_context *context, u8 *pOctets, int len) { int coeff_position, byte_position; if (len == 0) return; coeff_position = context->position >> 2; /* deal with partial 32-bit word left over from last update */ byte_position = context->position & 3; if (byte_position) { /* have a partial word in part to deal with */ do { if (len == 0) return; context->part.d8[byte_position++] = *pOctets++; context->position++; len--; } while (byte_position < 4); MIC_ACCUM(ntohl(context->part.d32)); } /* deal with full 32-bit words */ while (len >= 4) { MIC_ACCUM(ntohl(*(__be32 *)pOctets)); context->position += 4; pOctets += 4; len -= 4; } /* deal with partial 32-bit word that will be left over from this update */ byte_position = 0; while (len > 0) { context->part.d8[byte_position++] = *pOctets++; context->position++; len--; } } /* mask used to zero empty bytes for final partial word */ static u32 mask32[4] = { 0x00000000L, 0xFF000000L, 0xFFFF0000L, 0xFFFFFF00L }; /* calculate the mic */ static void emmh32_final(emmh32_context *context, u8 digest[4]) { int coeff_position, byte_position; u32 val; u64 sum, utmp; s64 stmp; coeff_position = context->position >> 2; /* deal with partial 32-bit word left over from last update */ byte_position = context->position & 3; if (byte_position) { /* have a partial word in part to deal with */ val = ntohl(context->part.d32); MIC_ACCUM(val & mask32[byte_position]); /* zero empty bytes */ } /* reduce the accumulated u64 to a 32-bit MIC */ sum = context->accum; stmp = (sum & 0xffffffffLL) - ((sum >> 32) * 15); utmp = (stmp & 0xffffffffLL) - ((stmp >> 32) * 15); sum = utmp & 0xffffffffLL; if (utmp > 0x10000000fLL) sum -= 15; val = (u32)sum; digest[0] = (val>>24) & 0xFF; digest[1] = (val>>16) & 0xFF; digest[2] = (val>>8) & 0xFF; digest[3] = val & 0xFF; } static int readBSSListRid(struct airo_info *ai, int first, BSSListRid *list) { Cmd cmd; Resp rsp; if (first == 1) { if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN; memset(&cmd, 0, sizeof(cmd)); cmd.cmd=CMD_LISTBSS; if (down_interruptible(&ai->sem)) return -ERESTARTSYS; ai->list_bss_task = current; issuecommand(ai, &cmd, &rsp); up(&ai->sem); /* Let the command take effect */ schedule_timeout_uninterruptible(3 * HZ); ai->list_bss_task = NULL; } return PC4500_readrid(ai, first ? ai->bssListFirst : ai->bssListNext, list, ai->bssListRidLen, 1); } static int readWepKeyRid(struct airo_info *ai, WepKeyRid *wkr, int temp, int lock) { return PC4500_readrid(ai, temp ? RID_WEP_TEMP : RID_WEP_PERM, wkr, sizeof(*wkr), lock); } static int writeWepKeyRid(struct airo_info *ai, WepKeyRid *wkr, int perm, int lock) { int rc; rc = PC4500_writerid(ai, RID_WEP_TEMP, wkr, sizeof(*wkr), lock); if (rc!=SUCCESS) airo_print_err(ai->dev->name, "WEP_TEMP set %x", rc); if (perm) { rc = PC4500_writerid(ai, RID_WEP_PERM, wkr, sizeof(*wkr), lock); if (rc!=SUCCESS) airo_print_err(ai->dev->name, "WEP_PERM set %x", rc); } return rc; } static int readSsidRid(struct airo_info*ai, SsidRid *ssidr) { return PC4500_readrid(ai, RID_SSID, ssidr, sizeof(*ssidr), 1); } static int writeSsidRid(struct airo_info*ai, SsidRid *pssidr, int lock) { return PC4500_writerid(ai, RID_SSID, pssidr, sizeof(*pssidr), lock); } static int readConfigRid(struct airo_info *ai, int lock) { int rc; ConfigRid cfg; if (ai->config.len) return SUCCESS; rc = PC4500_readrid(ai, RID_ACTUALCONFIG, &cfg, sizeof(cfg), lock); if (rc != SUCCESS) return rc; ai->config = cfg; return SUCCESS; } static inline void checkThrottle(struct airo_info *ai) { int i; /* Old hardware had a limit on encryption speed */ if (ai->config.authType != AUTH_OPEN && maxencrypt) { for(i=0; i<8; i++) { if (ai->config.rates[i] > maxencrypt) { ai->config.rates[i] = 0; } } } } static int writeConfigRid(struct airo_info *ai, int lock) { ConfigRid cfgr; if (!test_bit (FLAG_COMMIT, &ai->flags)) return SUCCESS; clear_bit (FLAG_COMMIT, &ai->flags); clear_bit (FLAG_RESET, &ai->flags); checkThrottle(ai); cfgr = ai->config; if ((cfgr.opmode & MODE_CFG_MASK) == MODE_STA_IBSS) set_bit(FLAG_ADHOC, &ai->flags); else clear_bit(FLAG_ADHOC, &ai->flags); return PC4500_writerid( ai, RID_CONFIG, &cfgr, sizeof(cfgr), lock); } static int readStatusRid(struct airo_info *ai, StatusRid *statr, int lock) { return PC4500_readrid(ai, RID_STATUS, statr, sizeof(*statr), lock); } static int readAPListRid(struct airo_info *ai, APListRid *aplr) { return PC4500_readrid(ai, RID_APLIST, aplr, sizeof(*aplr), 1); } static int writeAPListRid(struct airo_info *ai, APListRid *aplr, int lock) { return PC4500_writerid(ai, RID_APLIST, aplr, sizeof(*aplr), lock); } static int readCapabilityRid(struct airo_info *ai, CapabilityRid *capr, int lock) { return PC4500_readrid(ai, RID_CAPABILITIES, capr, sizeof(*capr), lock); } static int readStatsRid(struct airo_info*ai, StatsRid *sr, int rid, int lock) { return PC4500_readrid(ai, rid, sr, sizeof(*sr), lock); } static void try_auto_wep(struct airo_info *ai) { if (auto_wep && !test_bit(FLAG_RADIO_DOWN, &ai->flags)) { ai->expires = RUN_AT(3*HZ); wake_up_interruptible(&ai->thr_wait); } } static int airo_open(struct net_device *dev) { struct airo_info *ai = dev->ml_priv; int rc = 0; if (test_bit(FLAG_FLASHING, &ai->flags)) return -EIO; /* Make sure the card is configured. * Wireless Extensions may postpone config changes until the card * is open (to pipeline changes and speed-up card setup). If * those changes are not yet committed, do it now - Jean II */ if (test_bit(FLAG_COMMIT, &ai->flags)) { disable_MAC(ai, 1); writeConfigRid(ai, 1); } if (ai->wifidev != dev) { clear_bit(JOB_DIE, &ai->jobs); ai->airo_thread_task = kthread_run(airo_thread, dev, "%s", dev->name); if (IS_ERR(ai->airo_thread_task)) return (int)PTR_ERR(ai->airo_thread_task); rc = request_irq(dev->irq, airo_interrupt, IRQF_SHARED, dev->name, dev); if (rc) { airo_print_err(dev->name, "register interrupt %d failed, rc %d", dev->irq, rc); set_bit(JOB_DIE, &ai->jobs); kthread_stop(ai->airo_thread_task); return rc; } /* Power on the MAC controller (which may have been disabled) */ clear_bit(FLAG_RADIO_DOWN, &ai->flags); enable_interrupts(ai); try_auto_wep(ai); } enable_MAC(ai, 1); netif_start_queue(dev); return 0; } static netdev_tx_t mpi_start_xmit(struct sk_buff *skb, struct net_device *dev) { int npacks, pending; unsigned long flags; struct airo_info *ai = dev->ml_priv; if (!skb) { airo_print_err(dev->name, "%s: skb == NULL!",__func__); return NETDEV_TX_OK; } npacks = skb_queue_len (&ai->txq); if (npacks >= MAXTXQ - 1) { netif_stop_queue (dev); if (npacks > MAXTXQ) { dev->stats.tx_fifo_errors++; return NETDEV_TX_BUSY; } skb_queue_tail (&ai->txq, skb); return NETDEV_TX_OK; } spin_lock_irqsave(&ai->aux_lock, flags); skb_queue_tail (&ai->txq, skb); pending = test_bit(FLAG_PENDING_XMIT, &ai->flags); spin_unlock_irqrestore(&ai->aux_lock,flags); netif_wake_queue (dev); if (pending == 0) { set_bit(FLAG_PENDING_XMIT, &ai->flags); mpi_send_packet (dev); } return NETDEV_TX_OK; } /* * @mpi_send_packet * * Attempt to transmit a packet. Can be called from interrupt * or transmit . return number of packets we tried to send */ static int mpi_send_packet (struct net_device *dev) { struct sk_buff *skb; unsigned char *buffer; s16 len; __le16 *payloadLen; struct airo_info *ai = dev->ml_priv; u8 *sendbuf; /* get a packet to send */ if ((skb = skb_dequeue(&ai->txq)) == NULL) { airo_print_err(dev->name, "%s: Dequeue'd zero in send_packet()", __func__); return 0; } /* check min length*/ len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; buffer = skb->data; ai->txfids[0].tx_desc.offset = 0; ai->txfids[0].tx_desc.valid = 1; ai->txfids[0].tx_desc.eoc = 1; ai->txfids[0].tx_desc.len =len+sizeof(WifiHdr); /* * Magic, the cards firmware needs a length count (2 bytes) in the host buffer * right after TXFID_HDR.The TXFID_HDR contains the status short so payloadlen * is immediately after it. ------------------------------------------------ * |TXFIDHDR+STATUS|PAYLOADLEN|802.3HDR|PACKETDATA| * ------------------------------------------------ */ memcpy(ai->txfids[0].virtual_host_addr, (char *)&wifictlhdr8023, sizeof(wifictlhdr8023)); payloadLen = (__le16 *)(ai->txfids[0].virtual_host_addr + sizeof(wifictlhdr8023)); sendbuf = ai->txfids[0].virtual_host_addr + sizeof(wifictlhdr8023) + 2 ; /* * Firmware automatically puts 802 header on so * we don't need to account for it in the length */ if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled && (ntohs(((__be16 *)buffer)[6]) != 0x888E)) { MICBuffer pMic; if (encapsulate(ai, (etherHead *)buffer, &pMic, len - sizeof(etherHead)) != SUCCESS) return ERROR; *payloadLen = cpu_to_le16(len-sizeof(etherHead)+sizeof(pMic)); ai->txfids[0].tx_desc.len += sizeof(pMic); /* copy data into airo dma buffer */ memcpy (sendbuf, buffer, sizeof(etherHead)); buffer += sizeof(etherHead); sendbuf += sizeof(etherHead); memcpy (sendbuf, &pMic, sizeof(pMic)); sendbuf += sizeof(pMic); memcpy (sendbuf, buffer, len - sizeof(etherHead)); } else { *payloadLen = cpu_to_le16(len - sizeof(etherHead)); dev->trans_start = jiffies; /* copy data into airo dma buffer */ memcpy(sendbuf, buffer, len); } memcpy_toio(ai->txfids[0].card_ram_off, &ai->txfids[0].tx_desc, sizeof(TxFid)); OUT4500(ai, EVACK, 8); dev_kfree_skb_any(skb); return 1; } static void get_tx_error(struct airo_info *ai, s32 fid) { __le16 status; if (fid < 0) status = ((WifiCtlHdr *)ai->txfids[0].virtual_host_addr)->ctlhdr.status; else { if (bap_setup(ai, ai->fids[fid] & 0xffff, 4, BAP0) != SUCCESS) return; bap_read(ai, &status, 2, BAP0); } if (le16_to_cpu(status) & 2) /* Too many retries */ ai->dev->stats.tx_aborted_errors++; if (le16_to_cpu(status) & 4) /* Transmit lifetime exceeded */ ai->dev->stats.tx_heartbeat_errors++; if (le16_to_cpu(status) & 8) /* Aid fail */ { } if (le16_to_cpu(status) & 0x10) /* MAC disabled */ ai->dev->stats.tx_carrier_errors++; if (le16_to_cpu(status) & 0x20) /* Association lost */ { } /* We produce a TXDROP event only for retry or lifetime * exceeded, because that's the only status that really mean * that this particular node went away. * Other errors means that *we* screwed up. - Jean II */ if ((le16_to_cpu(status) & 2) || (le16_to_cpu(status) & 4)) { union iwreq_data wrqu; char junk[0x18]; /* Faster to skip over useless data than to do * another bap_setup(). We are at offset 0x6 and * need to go to 0x18 and read 6 bytes - Jean II */ bap_read(ai, (__le16 *) junk, 0x18, BAP0); /* Copy 802.11 dest address. * We use the 802.11 header because the frame may * not be 802.3 or may be mangled... * In Ad-Hoc mode, it will be the node address. * In managed mode, it will be most likely the AP addr * User space will figure out how to convert it to * whatever it needs (IP address or else). * - Jean II */ memcpy(wrqu.addr.sa_data, junk + 0x12, ETH_ALEN); wrqu.addr.sa_family = ARPHRD_ETHER; /* Send event to user space */ wireless_send_event(ai->dev, IWEVTXDROP, &wrqu, NULL); } } static void airo_end_xmit(struct net_device *dev) { u16 status; int i; struct airo_info *priv = dev->ml_priv; struct sk_buff *skb = priv->xmit.skb; int fid = priv->xmit.fid; u32 *fids = priv->fids; clear_bit(JOB_XMIT, &priv->jobs); clear_bit(FLAG_PENDING_XMIT, &priv->flags); status = transmit_802_3_packet (priv, fids[fid], skb->data); up(&priv->sem); i = 0; if ( status == SUCCESS ) { dev->trans_start = jiffies; for (; i < MAX_FIDS / 2 && (priv->fids[i] & 0xffff0000); i++); } else { priv->fids[fid] &= 0xffff; dev->stats.tx_window_errors++; } if (i < MAX_FIDS / 2) netif_wake_queue(dev); dev_kfree_skb(skb); } static netdev_tx_t airo_start_xmit(struct sk_buff *skb, struct net_device *dev) { s16 len; int i, j; struct airo_info *priv = dev->ml_priv; u32 *fids = priv->fids; if ( skb == NULL ) { airo_print_err(dev->name, "%s: skb == NULL!", __func__); return NETDEV_TX_OK; } /* Find a vacant FID */ for( i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++ ); for( j = i + 1; j < MAX_FIDS / 2 && (fids[j] & 0xffff0000); j++ ); if ( j >= MAX_FIDS / 2 ) { netif_stop_queue(dev); if (i == MAX_FIDS / 2) { dev->stats.tx_fifo_errors++; return NETDEV_TX_BUSY; } } /* check min length*/ len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; /* Mark fid as used & save length for later */ fids[i] |= (len << 16); priv->xmit.skb = skb; priv->xmit.fid = i; if (down_trylock(&priv->sem) != 0) { set_bit(FLAG_PENDING_XMIT, &priv->flags); netif_stop_queue(dev); set_bit(JOB_XMIT, &priv->jobs); wake_up_interruptible(&priv->thr_wait); } else airo_end_xmit(dev); return NETDEV_TX_OK; } static void airo_end_xmit11(struct net_device *dev) { u16 status; int i; struct airo_info *priv = dev->ml_priv; struct sk_buff *skb = priv->xmit11.skb; int fid = priv->xmit11.fid; u32 *fids = priv->fids; clear_bit(JOB_XMIT11, &priv->jobs); clear_bit(FLAG_PENDING_XMIT11, &priv->flags); status = transmit_802_11_packet (priv, fids[fid], skb->data); up(&priv->sem); i = MAX_FIDS / 2; if ( status == SUCCESS ) { dev->trans_start = jiffies; for (; i < MAX_FIDS && (priv->fids[i] & 0xffff0000); i++); } else { priv->fids[fid] &= 0xffff; dev->stats.tx_window_errors++; } if (i < MAX_FIDS) netif_wake_queue(dev); dev_kfree_skb(skb); } static netdev_tx_t airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) { s16 len; int i, j; struct airo_info *priv = dev->ml_priv; u32 *fids = priv->fids; if (test_bit(FLAG_MPI, &priv->flags)) { /* Not implemented yet for MPI350 */ netif_stop_queue(dev); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if ( skb == NULL ) { airo_print_err(dev->name, "%s: skb == NULL!", __func__); return NETDEV_TX_OK; } /* Find a vacant FID */ for( i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++ ); for( j = i + 1; j < MAX_FIDS && (fids[j] & 0xffff0000); j++ ); if ( j >= MAX_FIDS ) { netif_stop_queue(dev); if (i == MAX_FIDS) { dev->stats.tx_fifo_errors++; return NETDEV_TX_BUSY; } } /* check min length*/ len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; /* Mark fid as used & save length for later */ fids[i] |= (len << 16); priv->xmit11.skb = skb; priv->xmit11.fid = i; if (down_trylock(&priv->sem) != 0) { set_bit(FLAG_PENDING_XMIT11, &priv->flags); netif_stop_queue(dev); set_bit(JOB_XMIT11, &priv->jobs); wake_up_interruptible(&priv->thr_wait); } else airo_end_xmit11(dev); return NETDEV_TX_OK; } static void airo_read_stats(struct net_device *dev) { struct airo_info *ai = dev->ml_priv; StatsRid stats_rid; __le32 *vals = stats_rid.vals; clear_bit(JOB_STATS, &ai->jobs); if (ai->power.event) { up(&ai->sem); return; } readStatsRid(ai, &stats_rid, RID_STATS, 0); up(&ai->sem); dev->stats.rx_packets = le32_to_cpu(vals[43]) + le32_to_cpu(vals[44]) + le32_to_cpu(vals[45]); dev->stats.tx_packets = le32_to_cpu(vals[39]) + le32_to_cpu(vals[40]) + le32_to_cpu(vals[41]); dev->stats.rx_bytes = le32_to_cpu(vals[92]); dev->stats.tx_bytes = le32_to_cpu(vals[91]); dev->stats.rx_errors = le32_to_cpu(vals[0]) + le32_to_cpu(vals[2]) + le32_to_cpu(vals[3]) + le32_to_cpu(vals[4]); dev->stats.tx_errors = le32_to_cpu(vals[42]) + dev->stats.tx_fifo_errors; dev->stats.multicast = le32_to_cpu(vals[43]); dev->stats.collisions = le32_to_cpu(vals[89]); /* detailed rx_errors: */ dev->stats.rx_length_errors = le32_to_cpu(vals[3]); dev->stats.rx_crc_errors = le32_to_cpu(vals[4]); dev->stats.rx_frame_errors = le32_to_cpu(vals[2]); dev->stats.rx_fifo_errors = le32_to_cpu(vals[0]); } static struct net_device_stats *airo_get_stats(struct net_device *dev) { struct airo_info *local = dev->ml_priv; if (!test_bit(JOB_STATS, &local->jobs)) { /* Get stats out of the card if available */ if (down_trylock(&local->sem) != 0) { set_bit(JOB_STATS, &local->jobs); wake_up_interruptible(&local->thr_wait); } else airo_read_stats(dev); } return &dev->stats; } static void airo_set_promisc(struct airo_info *ai) { Cmd cmd; Resp rsp; memset(&cmd, 0, sizeof(cmd)); cmd.cmd=CMD_SETMODE; clear_bit(JOB_PROMISC, &ai->jobs); cmd.parm0=(ai->flags&IFF_PROMISC) ? PROMISC : NOPROMISC; issuecommand(ai, &cmd, &rsp); up(&ai->sem); } static void airo_set_multicast_list(struct net_device *dev) { struct airo_info *ai = dev->ml_priv; if ((dev->flags ^ ai->flags) & IFF_PROMISC) { change_bit(FLAG_PROMISC, &ai->flags); if (down_trylock(&ai->sem) != 0) { set_bit(JOB_PROMISC, &ai->jobs); wake_up_interruptible(&ai->thr_wait); } else airo_set_promisc(ai); } if ((dev->flags&IFF_ALLMULTI) || !netdev_mc_empty(dev)) { /* Turn on multicast. (Should be already setup...) */ } } static int airo_set_mac_address(struct net_device *dev, void *p) { struct airo_info *ai = dev->ml_priv; struct sockaddr *addr = p; readConfigRid(ai, 1); memcpy (ai->config.macAddr, addr->sa_data, dev->addr_len); set_bit (FLAG_COMMIT, &ai->flags); disable_MAC(ai, 1); writeConfigRid (ai, 1); enable_MAC(ai, 1); memcpy (ai->dev->dev_addr, addr->sa_data, dev->addr_len); if (ai->wifidev) memcpy (ai->wifidev->dev_addr, addr->sa_data, dev->addr_len); return 0; } static int airo_change_mtu(struct net_device *dev, int new_mtu) { if ((new_mtu < 68) || (new_mtu > 2400)) return -EINVAL; dev->mtu = new_mtu; return 0; } static LIST_HEAD(airo_devices); static void add_airo_dev(struct airo_info *ai) { /* Upper layers already keep track of PCI devices, * so we only need to remember our non-PCI cards. */ if (!ai->pci) list_add_tail(&ai->dev_list, &airo_devices); } static void del_airo_dev(struct airo_info *ai) { if (!ai->pci) list_del(&ai->dev_list); } static int airo_close(struct net_device *dev) { struct airo_info *ai = dev->ml_priv; netif_stop_queue(dev); if (ai->wifidev != dev) { #ifdef POWER_ON_DOWN /* Shut power to the card. The idea is that the user can save * power when he doesn't need the card with "ifconfig down". * That's the method that is most friendly towards the network * stack (i.e. the network stack won't try to broadcast * anything on the interface and routes are gone. Jean II */ set_bit(FLAG_RADIO_DOWN, &ai->flags); disable_MAC(ai, 1); #endif disable_interrupts( ai ); free_irq(dev->irq, dev); set_bit(JOB_DIE, &ai->jobs); kthread_stop(ai->airo_thread_task); } return 0; } void stop_airo_card( struct net_device *dev, int freeres ) { struct airo_info *ai = dev->ml_priv; set_bit(FLAG_RADIO_DOWN, &ai->flags); disable_MAC(ai, 1); disable_interrupts(ai); takedown_proc_entry( dev, ai ); if (test_bit(FLAG_REGISTERED, &ai->flags)) { unregister_netdev( dev ); if (ai->wifidev) { unregister_netdev(ai->wifidev); free_netdev(ai->wifidev); ai->wifidev = NULL; } clear_bit(FLAG_REGISTERED, &ai->flags); } /* * Clean out tx queue */ if (test_bit(FLAG_MPI, &ai->flags) && !skb_queue_empty(&ai->txq)) { struct sk_buff *skb = NULL; for (;(skb = skb_dequeue(&ai->txq));) dev_kfree_skb(skb); } airo_networks_free (ai); kfree(ai->flash); kfree(ai->rssi); kfree(ai->APList); kfree(ai->SSID); if (freeres) { /* PCMCIA frees this stuff, so only for PCI and ISA */ release_region( dev->base_addr, 64 ); if (test_bit(FLAG_MPI, &ai->flags)) { if (ai->pci) mpi_unmap_card(ai->pci); if (ai->pcimem) iounmap(ai->pcimem); if (ai->pciaux) iounmap(ai->pciaux); pci_free_consistent(ai->pci, PCI_SHARED_LEN, ai->shared, ai->shared_dma); } } crypto_free_cipher(ai->tfm); del_airo_dev(ai); free_netdev( dev ); } EXPORT_SYMBOL(stop_airo_card); static int wll_header_parse(const struct sk_buff *skb, unsigned char *haddr) { memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); return ETH_ALEN; } static void mpi_unmap_card(struct pci_dev *pci) { unsigned long mem_start = pci_resource_start(pci, 1); unsigned long mem_len = pci_resource_len(pci, 1); unsigned long aux_start = pci_resource_start(pci, 2); unsigned long aux_len = AUXMEMSIZE; release_mem_region(aux_start, aux_len); release_mem_region(mem_start, mem_len); } /************************************************************* * This routine assumes that descriptors have been setup . * Run at insmod time or after reset when the decriptors * have been initialized . Returns 0 if all is well nz * otherwise . Does not allocate memory but sets up card * using previously allocated descriptors. */ static int mpi_init_descriptors (struct airo_info *ai) { Cmd cmd; Resp rsp; int i; int rc = SUCCESS; /* Alloc card RX descriptors */ netif_stop_queue(ai->dev); memset(&rsp,0,sizeof(rsp)); memset(&cmd,0,sizeof(cmd)); cmd.cmd = CMD_ALLOCATEAUX; cmd.parm0 = FID_RX; cmd.parm1 = (ai->rxfids[0].card_ram_off - ai->pciaux); cmd.parm2 = MPI_MAX_FIDS; rc=issuecommand(ai, &cmd, &rsp); if (rc != SUCCESS) { airo_print_err(ai->dev->name, "Couldn't allocate RX FID"); return rc; } for (i=0; i<MPI_MAX_FIDS; i++) { memcpy_toio(ai->rxfids[i].card_ram_off, &ai->rxfids[i].rx_desc, sizeof(RxFid)); } /* Alloc card TX descriptors */ memset(&rsp,0,sizeof(rsp)); memset(&cmd,0,sizeof(cmd)); cmd.cmd = CMD_ALLOCATEAUX; cmd.parm0 = FID_TX; cmd.parm1 = (ai->txfids[0].card_ram_off - ai->pciaux); cmd.parm2 = MPI_MAX_FIDS; for (i=0; i<MPI_MAX_FIDS; i++) { ai->txfids[i].tx_desc.valid = 1; memcpy_toio(ai->txfids[i].card_ram_off, &ai->txfids[i].tx_desc, sizeof(TxFid)); } ai->txfids[i-1].tx_desc.eoc = 1; /* Last descriptor has EOC set */ rc=issuecommand(ai, &cmd, &rsp); if (rc != SUCCESS) { airo_print_err(ai->dev->name, "Couldn't allocate TX FID"); return rc; } /* Alloc card Rid descriptor */ memset(&rsp,0,sizeof(rsp)); memset(&cmd,0,sizeof(cmd)); cmd.cmd = CMD_ALLOCATEAUX; cmd.parm0 = RID_RW; cmd.parm1 = (ai->config_desc.card_ram_off - ai->pciaux); cmd.parm2 = 1; /* Magic number... */ rc=issuecommand(ai, &cmd, &rsp); if (rc != SUCCESS) { airo_print_err(ai->dev->name, "Couldn't allocate RID"); return rc; } memcpy_toio(ai->config_desc.card_ram_off, &ai->config_desc.rid_desc, sizeof(Rid)); return rc; } /* * We are setting up three things here: * 1) Map AUX memory for descriptors: Rid, TxFid, or RxFid. * 2) Map PCI memory for issuing commands. * 3) Allocate memory (shared) to send and receive ethernet frames. */ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci) { unsigned long mem_start, mem_len, aux_start, aux_len; int rc = -1; int i; dma_addr_t busaddroff; unsigned char *vpackoff; unsigned char __iomem *pciaddroff; mem_start = pci_resource_start(pci, 1); mem_len = pci_resource_len(pci, 1); aux_start = pci_resource_start(pci, 2); aux_len = AUXMEMSIZE; if (!request_mem_region(mem_start, mem_len, DRV_NAME)) { airo_print_err("", "Couldn't get region %x[%x]", (int)mem_start, (int)mem_len); goto out; } if (!request_mem_region(aux_start, aux_len, DRV_NAME)) { airo_print_err("", "Couldn't get region %x[%x]", (int)aux_start, (int)aux_len); goto free_region1; } ai->pcimem = ioremap(mem_start, mem_len); if (!ai->pcimem) { airo_print_err("", "Couldn't map region %x[%x]", (int)mem_start, (int)mem_len); goto free_region2; } ai->pciaux = ioremap(aux_start, aux_len); if (!ai->pciaux) { airo_print_err("", "Couldn't map region %x[%x]", (int)aux_start, (int)aux_len); goto free_memmap; } /* Reserve PKTSIZE for each fid and 2K for the Rids */ ai->shared = pci_alloc_consistent(pci, PCI_SHARED_LEN, &ai->shared_dma); if (!ai->shared) { airo_print_err("", "Couldn't alloc_consistent %d", PCI_SHARED_LEN); goto free_auxmap; } /* * Setup descriptor RX, TX, CONFIG */ busaddroff = ai->shared_dma; pciaddroff = ai->pciaux + AUX_OFFSET; vpackoff = ai->shared; /* RX descriptor setup */ for(i = 0; i < MPI_MAX_FIDS; i++) { ai->rxfids[i].pending = 0; ai->rxfids[i].card_ram_off = pciaddroff; ai->rxfids[i].virtual_host_addr = vpackoff; ai->rxfids[i].rx_desc.host_addr = busaddroff; ai->rxfids[i].rx_desc.valid = 1; ai->rxfids[i].rx_desc.len = PKTSIZE; ai->rxfids[i].rx_desc.rdy = 0; pciaddroff += sizeof(RxFid); busaddroff += PKTSIZE; vpackoff += PKTSIZE; } /* TX descriptor setup */ for(i = 0; i < MPI_MAX_FIDS; i++) { ai->txfids[i].card_ram_off = pciaddroff; ai->txfids[i].virtual_host_addr = vpackoff; ai->txfids[i].tx_desc.valid = 1; ai->txfids[i].tx_desc.host_addr = busaddroff; memcpy(ai->txfids[i].virtual_host_addr, &wifictlhdr8023, sizeof(wifictlhdr8023)); pciaddroff += sizeof(TxFid); busaddroff += PKTSIZE; vpackoff += PKTSIZE; } ai->txfids[i-1].tx_desc.eoc = 1; /* Last descriptor has EOC set */ /* Rid descriptor setup */ ai->config_desc.card_ram_off = pciaddroff; ai->config_desc.virtual_host_addr = vpackoff; ai->config_desc.rid_desc.host_addr = busaddroff; ai->ridbus = busaddroff; ai->config_desc.rid_desc.rid = 0; ai->config_desc.rid_desc.len = RIDSIZE; ai->config_desc.rid_desc.valid = 1; pciaddroff += sizeof(Rid); busaddroff += RIDSIZE; vpackoff += RIDSIZE; /* Tell card about descriptors */ if (mpi_init_descriptors (ai) != SUCCESS) goto free_shared; return 0; free_shared: pci_free_consistent(pci, PCI_SHARED_LEN, ai->shared, ai->shared_dma); free_auxmap: iounmap(ai->pciaux); free_memmap: iounmap(ai->pcimem); free_region2: release_mem_region(aux_start, aux_len); free_region1: release_mem_region(mem_start, mem_len); out: return rc; } static const struct header_ops airo_header_ops = { .parse = wll_header_parse, }; static const struct net_device_ops airo11_netdev_ops = { .ndo_open = airo_open, .ndo_stop = airo_close, .ndo_start_xmit = airo_start_xmit11, .ndo_get_stats = airo_get_stats, .ndo_set_mac_address = airo_set_mac_address, .ndo_do_ioctl = airo_ioctl, .ndo_change_mtu = airo_change_mtu, }; static void wifi_setup(struct net_device *dev) { dev->netdev_ops = &airo11_netdev_ops; dev->header_ops = &airo_header_ops; dev->wireless_handlers = &airo_handler_def; dev->type = ARPHRD_IEEE80211; dev->hard_header_len = ETH_HLEN; dev->mtu = AIRO_DEF_MTU; dev->addr_len = ETH_ALEN; dev->tx_queue_len = 100; memset(dev->broadcast,0xFF, ETH_ALEN); dev->flags = IFF_BROADCAST|IFF_MULTICAST; } static struct net_device *init_wifidev(struct airo_info *ai, struct net_device *ethdev) { int err; struct net_device *dev = alloc_netdev(0, "wifi%d", NET_NAME_UNKNOWN, wifi_setup); if (!dev) return NULL; dev->ml_priv = ethdev->ml_priv; dev->irq = ethdev->irq; dev->base_addr = ethdev->base_addr; dev->wireless_data = ethdev->wireless_data; SET_NETDEV_DEV(dev, ethdev->dev.parent); eth_hw_addr_inherit(dev, ethdev); err = register_netdev(dev); if (err<0) { free_netdev(dev); return NULL; } return dev; } static int reset_card( struct net_device *dev , int lock) { struct airo_info *ai = dev->ml_priv; if (lock && down_interruptible(&ai->sem)) return -1; waitbusy (ai); OUT4500(ai,COMMAND,CMD_SOFTRESET); msleep(200); waitbusy (ai); msleep(200); if (lock) up(&ai->sem); return 0; } #define AIRO_MAX_NETWORK_COUNT 64 static int airo_networks_allocate(struct airo_info *ai) { if (ai->networks) return 0; ai->networks = kcalloc(AIRO_MAX_NETWORK_COUNT, sizeof(BSSListElement), GFP_KERNEL); if (!ai->networks) { airo_print_warn("", "Out of memory allocating beacons"); return -ENOMEM; } return 0; } static void airo_networks_free(struct airo_info *ai) { kfree(ai->networks); ai->networks = NULL; } static void airo_networks_initialize(struct airo_info *ai) { int i; INIT_LIST_HEAD(&ai->network_free_list); INIT_LIST_HEAD(&ai->network_list); for (i = 0; i < AIRO_MAX_NETWORK_COUNT; i++) list_add_tail(&ai->networks[i].list, &ai->network_free_list); } static const struct net_device_ops airo_netdev_ops = { .ndo_open = airo_open, .ndo_stop = airo_close, .ndo_start_xmit = airo_start_xmit, .ndo_get_stats = airo_get_stats, .ndo_set_rx_mode = airo_set_multicast_list, .ndo_set_mac_address = airo_set_mac_address, .ndo_do_ioctl = airo_ioctl, .ndo_change_mtu = airo_change_mtu, .ndo_validate_addr = eth_validate_addr, }; static const struct net_device_ops mpi_netdev_ops = { .ndo_open = airo_open, .ndo_stop = airo_close, .ndo_start_xmit = mpi_start_xmit, .ndo_get_stats = airo_get_stats, .ndo_set_rx_mode = airo_set_multicast_list, .ndo_set_mac_address = airo_set_mac_address, .ndo_do_ioctl = airo_ioctl, .ndo_change_mtu = airo_change_mtu, .ndo_validate_addr = eth_validate_addr, }; static struct net_device *_init_airo_card( unsigned short irq, int port, int is_pcmcia, struct pci_dev *pci, struct device *dmdev ) { struct net_device *dev; struct airo_info *ai; int i, rc; CapabilityRid cap_rid; /* Create the network device object. */ dev = alloc_netdev(sizeof(*ai), "", NET_NAME_UNKNOWN, ether_setup); if (!dev) { airo_print_err("", "Couldn't alloc_etherdev"); return NULL; } ai = dev->ml_priv = netdev_priv(dev); ai->wifidev = NULL; ai->flags = 1 << FLAG_RADIO_DOWN; ai->jobs = 0; ai->dev = dev; if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) { airo_print_dbg("", "Found an MPI350 card"); set_bit(FLAG_MPI, &ai->flags); } spin_lock_init(&ai->aux_lock); sema_init(&ai->sem, 1); ai->config.len = 0; ai->pci = pci; init_waitqueue_head (&ai->thr_wait); ai->tfm = NULL; add_airo_dev(ai); if (airo_networks_allocate (ai)) goto err_out_free; airo_networks_initialize (ai); skb_queue_head_init (&ai->txq); /* The Airo-specific entries in the device structure. */ if (test_bit(FLAG_MPI,&ai->flags)) dev->netdev_ops = &mpi_netdev_ops; else dev->netdev_ops = &airo_netdev_ops; dev->wireless_handlers = &airo_handler_def; ai->wireless_data.spy_data = &ai->spy_data; dev->wireless_data = &ai->wireless_data; dev->irq = irq; dev->base_addr = port; dev->priv_flags &= ~IFF_TX_SKB_SHARING; SET_NETDEV_DEV(dev, dmdev); reset_card (dev, 1); msleep(400); if (!is_pcmcia) { if (!request_region(dev->base_addr, 64, DRV_NAME)) { rc = -EBUSY; airo_print_err(dev->name, "Couldn't request region"); goto err_out_nets; } } if (test_bit(FLAG_MPI,&ai->flags)) { if (mpi_map_card(ai, pci)) { airo_print_err("", "Could not map memory"); goto err_out_res; } } if (probe) { if (setup_card(ai, dev->dev_addr, 1) != SUCCESS) { airo_print_err(dev->name, "MAC could not be enabled" ); rc = -EIO; goto err_out_map; } } else if (!test_bit(FLAG_MPI,&ai->flags)) { ai->bap_read = fast_bap_read; set_bit(FLAG_FLASHING, &ai->flags); } strcpy(dev->name, "eth%d"); rc = register_netdev(dev); if (rc) { airo_print_err(dev->name, "Couldn't register_netdev"); goto err_out_map; } ai->wifidev = init_wifidev(ai, dev); if (!ai->wifidev) goto err_out_reg; rc = readCapabilityRid(ai, &cap_rid, 1); if (rc != SUCCESS) { rc = -EIO; goto err_out_wifi; } /* WEP capability discovery */ ai->wep_capable = (cap_rid.softCap & cpu_to_le16(0x02)) ? 1 : 0; ai->max_wep_idx = (cap_rid.softCap & cpu_to_le16(0x80)) ? 3 : 0; airo_print_info(dev->name, "Firmware version %x.%x.%02d", ((le16_to_cpu(cap_rid.softVer) >> 8) & 0xF), (le16_to_cpu(cap_rid.softVer) & 0xFF), le16_to_cpu(cap_rid.softSubVer)); /* Test for WPA support */ /* Only firmware versions 5.30.17 or better can do WPA */ if (le16_to_cpu(cap_rid.softVer) > 0x530 || (le16_to_cpu(cap_rid.softVer) == 0x530 && le16_to_cpu(cap_rid.softSubVer) >= 17)) { airo_print_info(ai->dev->name, "WPA supported."); set_bit(FLAG_WPA_CAPABLE, &ai->flags); ai->bssListFirst = RID_WPA_BSSLISTFIRST; ai->bssListNext = RID_WPA_BSSLISTNEXT; ai->bssListRidLen = sizeof(BSSListRid); } else { airo_print_info(ai->dev->name, "WPA unsupported with firmware " "versions older than 5.30.17."); ai->bssListFirst = RID_BSSLISTFIRST; ai->bssListNext = RID_BSSLISTNEXT; ai->bssListRidLen = sizeof(BSSListRid) - sizeof(BSSListRidExtra); } set_bit(FLAG_REGISTERED,&ai->flags); airo_print_info(dev->name, "MAC enabled %pM", dev->dev_addr); /* Allocate the transmit buffers */ if (probe && !test_bit(FLAG_MPI,&ai->flags)) for( i = 0; i < MAX_FIDS; i++ ) ai->fids[i] = transmit_allocate(ai,AIRO_DEF_MTU,i>=MAX_FIDS/2); if (setup_proc_entry(dev, dev->ml_priv) < 0) goto err_out_wifi; return dev; err_out_wifi: unregister_netdev(ai->wifidev); free_netdev(ai->wifidev); err_out_reg: unregister_netdev(dev); err_out_map: if (test_bit(FLAG_MPI,&ai->flags) && pci) { pci_free_consistent(pci, PCI_SHARED_LEN, ai->shared, ai->shared_dma); iounmap(ai->pciaux); iounmap(ai->pcimem); mpi_unmap_card(ai->pci); } err_out_res: if (!is_pcmcia) release_region( dev->base_addr, 64 ); err_out_nets: airo_networks_free(ai); err_out_free: del_airo_dev(ai); free_netdev(dev); return NULL; } struct net_device *init_airo_card( unsigned short irq, int port, int is_pcmcia, struct device *dmdev) { return _init_airo_card ( irq, port, is_pcmcia, NULL, dmdev); } EXPORT_SYMBOL(init_airo_card); static int waitbusy (struct airo_info *ai) { int delay = 0; while ((IN4500(ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) { udelay (10); if ((++delay % 20) == 0) OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY); } return delay < 10000; } int reset_airo_card( struct net_device *dev ) { int i; struct airo_info *ai = dev->ml_priv; if (reset_card (dev, 1)) return -1; if ( setup_card(ai, dev->dev_addr, 1 ) != SUCCESS ) { airo_print_err(dev->name, "MAC could not be enabled"); return -1; } airo_print_info(dev->name, "MAC enabled %pM", dev->dev_addr); /* Allocate the transmit buffers if needed */ if (!test_bit(FLAG_MPI,&ai->flags)) for( i = 0; i < MAX_FIDS; i++ ) ai->fids[i] = transmit_allocate (ai,AIRO_DEF_MTU,i>=MAX_FIDS/2); enable_interrupts( ai ); netif_wake_queue(dev); return 0; } EXPORT_SYMBOL(reset_airo_card); static void airo_send_event(struct net_device *dev) { struct airo_info *ai = dev->ml_priv; union iwreq_data wrqu; StatusRid status_rid; clear_bit(JOB_EVENT, &ai->jobs); PC4500_readrid(ai, RID_STATUS, &status_rid, sizeof(status_rid), 0); up(&ai->sem); wrqu.data.length = 0; wrqu.data.flags = 0; memcpy(wrqu.ap_addr.sa_data, status_rid.bssid[0], ETH_ALEN); wrqu.ap_addr.sa_family = ARPHRD_ETHER; /* Send event to user space */ wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); } static void airo_process_scan_results (struct airo_info *ai) { union iwreq_data wrqu; BSSListRid bss; int rc; BSSListElement * loop_net; BSSListElement * tmp_net; /* Blow away current list of scan results */ list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) { list_move_tail (&loop_net->list, &ai->network_free_list); /* Don't blow away ->list, just BSS data */ memset (loop_net, 0, sizeof (loop_net->bss)); } /* Try to read the first entry of the scan result */ rc = PC4500_readrid(ai, ai->bssListFirst, &bss, ai->bssListRidLen, 0); if((rc) || (bss.index == cpu_to_le16(0xffff))) { /* No scan results */ goto out; } /* Read and parse all entries */ tmp_net = NULL; while((!rc) && (bss.index != cpu_to_le16(0xffff))) { /* Grab a network off the free list */ if (!list_empty(&ai->network_free_list)) { tmp_net = list_entry(ai->network_free_list.next, BSSListElement, list); list_del(ai->network_free_list.next); } if (tmp_net != NULL) { memcpy(tmp_net, &bss, sizeof(tmp_net->bss)); list_add_tail(&tmp_net->list, &ai->network_list); tmp_net = NULL; } /* Read next entry */ rc = PC4500_readrid(ai, ai->bssListNext, &bss, ai->bssListRidLen, 0); } out: ai->scan_timeout = 0; clear_bit(JOB_SCAN_RESULTS, &ai->jobs); up(&ai->sem); /* Send an empty event to user space. * We don't send the received data on * the event because it would require * us to do complex transcoding, and * we want to minimise the work done in * the irq handler. Use a request to * extract the data - Jean II */ wrqu.data.length = 0; wrqu.data.flags = 0; wireless_send_event(ai->dev, SIOCGIWSCAN, &wrqu, NULL); } static int airo_thread(void *data) { struct net_device *dev = data; struct airo_info *ai = dev->ml_priv; int locked; set_freezable(); while(1) { /* make swsusp happy with our thread */ try_to_freeze(); if (test_bit(JOB_DIE, &ai->jobs)) break; if (ai->jobs) { locked = down_interruptible(&ai->sem); } else { wait_queue_t wait; init_waitqueue_entry(&wait, current); add_wait_queue(&ai->thr_wait, &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (ai->jobs) break; if (ai->expires || ai->scan_timeout) { if (ai->scan_timeout && time_after_eq(jiffies,ai->scan_timeout)){ set_bit(JOB_SCAN_RESULTS, &ai->jobs); break; } else if (ai->expires && time_after_eq(jiffies,ai->expires)){ set_bit(JOB_AUTOWEP, &ai->jobs); break; } if (!kthread_should_stop() && !freezing(current)) { unsigned long wake_at; if (!ai->expires || !ai->scan_timeout) { wake_at = max(ai->expires, ai->scan_timeout); } else { wake_at = min(ai->expires, ai->scan_timeout); } schedule_timeout(wake_at - jiffies); continue; } } else if (!kthread_should_stop() && !freezing(current)) { schedule(); continue; } break; } current->state = TASK_RUNNING; remove_wait_queue(&ai->thr_wait, &wait); locked = 1; } if (locked) continue; if (test_bit(JOB_DIE, &ai->jobs)) { up(&ai->sem); break; } if (ai->power.event || test_bit(FLAG_FLASHING, &ai->flags)) { up(&ai->sem); continue; } if (test_bit(JOB_XMIT, &ai->jobs)) airo_end_xmit(dev); else if (test_bit(JOB_XMIT11, &ai->jobs)) airo_end_xmit11(dev); else if (test_bit(JOB_STATS, &ai->jobs)) airo_read_stats(dev); else if (test_bit(JOB_WSTATS, &ai->jobs)) airo_read_wireless_stats(ai); else if (test_bit(JOB_PROMISC, &ai->jobs)) airo_set_promisc(ai); else if (test_bit(JOB_MIC, &ai->jobs)) micinit(ai); else if (test_bit(JOB_EVENT, &ai->jobs)) airo_send_event(dev); else if (test_bit(JOB_AUTOWEP, &ai->jobs)) timer_func(dev); else if (test_bit(JOB_SCAN_RESULTS, &ai->jobs)) airo_process_scan_results(ai); else /* Shouldn't get here, but we make sure to unlock */ up(&ai->sem); } return 0; } static int header_len(__le16 ctl) { u16 fc = le16_to_cpu(ctl); switch (fc & 0xc) { case 4: if ((fc & 0xe0) == 0xc0) return 10; /* one-address control packet */ return 16; /* two-address control packet */ case 8: if ((fc & 0x300) == 0x300) return 30; /* WDS packet */ } return 24; } static void airo_handle_cisco_mic(struct airo_info *ai) { if (test_bit(FLAG_MIC_CAPABLE, &ai->flags)) { set_bit(JOB_MIC, &ai->jobs); wake_up_interruptible(&ai->thr_wait); } } /* Airo Status codes */ #define STAT_NOBEACON 0x8000 /* Loss of sync - missed beacons */ #define STAT_MAXRETRIES 0x8001 /* Loss of sync - max retries */ #define STAT_MAXARL 0x8002 /* Loss of sync - average retry level exceeded*/ #define STAT_FORCELOSS 0x8003 /* Loss of sync - host request */ #define STAT_TSFSYNC 0x8004 /* Loss of sync - TSF synchronization */ #define STAT_DEAUTH 0x8100 /* low byte is 802.11 reason code */ #define STAT_DISASSOC 0x8200 /* low byte is 802.11 reason code */ #define STAT_ASSOC_FAIL 0x8400 /* low byte is 802.11 reason code */ #define STAT_AUTH_FAIL 0x0300 /* low byte is 802.11 reason code */ #define STAT_ASSOC 0x0400 /* Associated */ #define STAT_REASSOC 0x0600 /* Reassociated? Only on firmware >= 5.30.17 */ static void airo_print_status(const char *devname, u16 status) { u8 reason = status & 0xFF; switch (status & 0xFF00) { case STAT_NOBEACON: switch (status) { case STAT_NOBEACON: airo_print_dbg(devname, "link lost (missed beacons)"); break; case STAT_MAXRETRIES: case STAT_MAXARL: airo_print_dbg(devname, "link lost (max retries)"); break; case STAT_FORCELOSS: airo_print_dbg(devname, "link lost (local choice)"); break; case STAT_TSFSYNC: airo_print_dbg(devname, "link lost (TSF sync lost)"); break; default: airo_print_dbg(devname, "unknow status %x\n", status); break; } break; case STAT_DEAUTH: airo_print_dbg(devname, "deauthenticated (reason: %d)", reason); break; case STAT_DISASSOC: airo_print_dbg(devname, "disassociated (reason: %d)", reason); break; case STAT_ASSOC_FAIL: airo_print_dbg(devname, "association failed (reason: %d)", reason); break; case STAT_AUTH_FAIL: airo_print_dbg(devname, "authentication failed (reason: %d)", reason); break; case STAT_ASSOC: case STAT_REASSOC: break; default: airo_print_dbg(devname, "unknow status %x\n", status); break; } } static void airo_handle_link(struct airo_info *ai) { union iwreq_data wrqu; int scan_forceloss = 0; u16 status; /* Get new status and acknowledge the link change */ status = le16_to_cpu(IN4500(ai, LINKSTAT)); OUT4500(ai, EVACK, EV_LINK); if ((status == STAT_FORCELOSS) && (ai->scan_timeout > 0)) scan_forceloss = 1; airo_print_status(ai->dev->name, status); if ((status == STAT_ASSOC) || (status == STAT_REASSOC)) { if (auto_wep) ai->expires = 0; if (ai->list_bss_task) wake_up_process(ai->list_bss_task); set_bit(FLAG_UPDATE_UNI, &ai->flags); set_bit(FLAG_UPDATE_MULTI, &ai->flags); if (down_trylock(&ai->sem) != 0) { set_bit(JOB_EVENT, &ai->jobs); wake_up_interruptible(&ai->thr_wait); } else airo_send_event(ai->dev); } else if (!scan_forceloss) { if (auto_wep && !ai->expires) { ai->expires = RUN_AT(3*HZ); wake_up_interruptible(&ai->thr_wait); } /* Send event to user space */ memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN); wrqu.ap_addr.sa_family = ARPHRD_ETHER; wireless_send_event(ai->dev, SIOCGIWAP, &wrqu, NULL); } } static void airo_handle_rx(struct airo_info *ai) { struct sk_buff *skb = NULL; __le16 fc, v, *buffer, tmpbuf[4]; u16 len, hdrlen = 0, gap, fid; struct rx_hdr hdr; int success = 0; if (test_bit(FLAG_MPI, &ai->flags)) { if (test_bit(FLAG_802_11, &ai->flags)) mpi_receive_802_11(ai); else mpi_receive_802_3(ai); OUT4500(ai, EVACK, EV_RX); return; } fid = IN4500(ai, RXFID); /* Get the packet length */ if (test_bit(FLAG_802_11, &ai->flags)) { bap_setup (ai, fid, 4, BAP0); bap_read (ai, (__le16*)&hdr, sizeof(hdr), BAP0); /* Bad CRC. Ignore packet */ if (le16_to_cpu(hdr.status) & 2) hdr.len = 0; if (ai->wifidev == NULL) hdr.len = 0; } else { bap_setup(ai, fid, 0x36, BAP0); bap_read(ai, &hdr.len, 2, BAP0); } len = le16_to_cpu(hdr.len); if (len > AIRO_DEF_MTU) { airo_print_err(ai->dev->name, "Bad size %d", len); goto done; } if (len == 0) goto done; if (test_bit(FLAG_802_11, &ai->flags)) { bap_read(ai, &fc, sizeof (fc), BAP0); hdrlen = header_len(fc); } else hdrlen = ETH_ALEN * 2; skb = dev_alloc_skb(len + hdrlen + 2 + 2); if (!skb) { ai->dev->stats.rx_dropped++; goto done; } skb_reserve(skb, 2); /* This way the IP header is aligned */ buffer = (__le16 *) skb_put(skb, len + hdrlen); if (test_bit(FLAG_802_11, &ai->flags)) { buffer[0] = fc; bap_read(ai, buffer + 1, hdrlen - 2, BAP0); if (hdrlen == 24) bap_read(ai, tmpbuf, 6, BAP0); bap_read(ai, &v, sizeof(v), BAP0); gap = le16_to_cpu(v); if (gap) { if (gap <= 8) { bap_read(ai, tmpbuf, gap, BAP0); } else { airo_print_err(ai->dev->name, "gaplen too " "big. Problems will follow..."); } } bap_read(ai, buffer + hdrlen/2, len, BAP0); } else { MICBuffer micbuf; bap_read(ai, buffer, ETH_ALEN * 2, BAP0); if (ai->micstats.enabled) { bap_read(ai, (__le16 *) &micbuf, sizeof (micbuf), BAP0); if (ntohs(micbuf.typelen) > 0x05DC) bap_setup(ai, fid, 0x44, BAP0); else { if (len <= sizeof (micbuf)) { dev_kfree_skb_irq(skb); goto done; } len -= sizeof(micbuf); skb_trim(skb, len + hdrlen); } } bap_read(ai, buffer + ETH_ALEN, len, BAP0); if (decapsulate(ai, &micbuf, (etherHead*) buffer, len)) dev_kfree_skb_irq (skb); else success = 1; } #ifdef WIRELESS_SPY if (success && (ai->spy_data.spy_number > 0)) { char *sa; struct iw_quality wstats; /* Prepare spy data : addr + qual */ if (!test_bit(FLAG_802_11, &ai->flags)) { sa = (char *) buffer + 6; bap_setup(ai, fid, 8, BAP0); bap_read(ai, (__le16 *) hdr.rssi, 2, BAP0); } else sa = (char *) buffer + 10; wstats.qual = hdr.rssi[0]; if (ai->rssi) wstats.level = 0x100 - ai->rssi[hdr.rssi[1]].rssidBm; else wstats.level = (hdr.rssi[1] + 321) / 2; wstats.noise = ai->wstats.qual.noise; wstats.updated = IW_QUAL_LEVEL_UPDATED | IW_QUAL_QUAL_UPDATED | IW_QUAL_DBM; /* Update spy records */ wireless_spy_update(ai->dev, sa, &wstats); } #endif /* WIRELESS_SPY */ done: OUT4500(ai, EVACK, EV_RX); if (success) { if (test_bit(FLAG_802_11, &ai->flags)) { skb_reset_mac_header(skb); skb->pkt_type = PACKET_OTHERHOST; skb->dev = ai->wifidev; skb->protocol = htons(ETH_P_802_2); } else skb->protocol = eth_type_trans(skb, ai->dev); skb->ip_summed = CHECKSUM_NONE; netif_rx(skb); } } static void airo_handle_tx(struct airo_info *ai, u16 status) { int i, len = 0, index = -1; u16 fid; if (test_bit(FLAG_MPI, &ai->flags)) { unsigned long flags; if (status & EV_TXEXC) get_tx_error(ai, -1); spin_lock_irqsave(&ai->aux_lock, flags); if (!skb_queue_empty(&ai->txq)) { spin_unlock_irqrestore(&ai->aux_lock,flags); mpi_send_packet(ai->dev); } else { clear_bit(FLAG_PENDING_XMIT, &ai->flags); spin_unlock_irqrestore(&ai->aux_lock,flags); netif_wake_queue(ai->dev); } OUT4500(ai, EVACK, status & (EV_TX | EV_TXCPY | EV_TXEXC)); return; } fid = IN4500(ai, TXCOMPLFID); for(i = 0; i < MAX_FIDS; i++) { if ((ai->fids[i] & 0xffff) == fid) { len = ai->fids[i] >> 16; index = i; } } if (index != -1) { if (status & EV_TXEXC) get_tx_error(ai, index); OUT4500(ai, EVACK, status & (EV_TX | EV_TXEXC)); /* Set up to be used again */ ai->fids[index] &= 0xffff; if (index < MAX_FIDS / 2) { if (!test_bit(FLAG_PENDING_XMIT, &ai->flags)) netif_wake_queue(ai->dev); } else { if (!test_bit(FLAG_PENDING_XMIT11, &ai->flags)) netif_wake_queue(ai->wifidev); } } else { OUT4500(ai, EVACK, status & (EV_TX | EV_TXCPY | EV_TXEXC)); airo_print_err(ai->dev->name, "Unallocated FID was used to xmit"); } } static irqreturn_t airo_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; u16 status, savedInterrupts = 0; struct airo_info *ai = dev->ml_priv; int handled = 0; if (!netif_device_present(dev)) return IRQ_NONE; for (;;) { status = IN4500(ai, EVSTAT); if (!(status & STATUS_INTS) || (status == 0xffff)) break; handled = 1; if (status & EV_AWAKE) { OUT4500(ai, EVACK, EV_AWAKE); OUT4500(ai, EVACK, EV_AWAKE); } if (!savedInterrupts) { savedInterrupts = IN4500(ai, EVINTEN); OUT4500(ai, EVINTEN, 0); } if (status & EV_MIC) { OUT4500(ai, EVACK, EV_MIC); airo_handle_cisco_mic(ai); } if (status & EV_LINK) { /* Link status changed */ airo_handle_link(ai); } /* Check to see if there is something to receive */ if (status & EV_RX) airo_handle_rx(ai); /* Check to see if a packet has been transmitted */ if (status & (EV_TX | EV_TXCPY | EV_TXEXC)) airo_handle_tx(ai, status); if ( status & ~STATUS_INTS & ~IGNORE_INTS ) { airo_print_warn(ai->dev->name, "Got weird status %x", status & ~STATUS_INTS & ~IGNORE_INTS ); } } if (savedInterrupts) OUT4500(ai, EVINTEN, savedInterrupts); return IRQ_RETVAL(handled); } /* * Routines to talk to the card */ /* * This was originally written for the 4500, hence the name * NOTE: If use with 8bit mode and SMP bad things will happen! * Why would some one do 8 bit IO in an SMP machine?!? */ static void OUT4500( struct airo_info *ai, u16 reg, u16 val ) { if (test_bit(FLAG_MPI,&ai->flags)) reg <<= 1; if ( !do8bitIO ) outw( val, ai->dev->base_addr + reg ); else { outb( val & 0xff, ai->dev->base_addr + reg ); outb( val >> 8, ai->dev->base_addr + reg + 1 ); } } static u16 IN4500( struct airo_info *ai, u16 reg ) { unsigned short rc; if (test_bit(FLAG_MPI,&ai->flags)) reg <<= 1; if ( !do8bitIO ) rc = inw( ai->dev->base_addr + reg ); else { rc = inb( ai->dev->base_addr + reg ); rc += ((int)inb( ai->dev->base_addr + reg + 1 )) << 8; } return rc; } static int enable_MAC(struct airo_info *ai, int lock) { int rc; Cmd cmd; Resp rsp; /* FLAG_RADIO_OFF : Radio disabled via /proc or Wireless Extensions * FLAG_RADIO_DOWN : Radio disabled via "ifconfig ethX down" * Note : we could try to use !netif_running(dev) in enable_MAC() * instead of this flag, but I don't trust it *within* the * open/close functions, and testing both flags together is * "cheaper" - Jean II */ if (ai->flags & FLAG_RADIO_MASK) return SUCCESS; if (lock && down_interruptible(&ai->sem)) return -ERESTARTSYS; if (!test_bit(FLAG_ENABLED, &ai->flags)) { memset(&cmd, 0, sizeof(cmd)); cmd.cmd = MAC_ENABLE; rc = issuecommand(ai, &cmd, &rsp); if (rc == SUCCESS) set_bit(FLAG_ENABLED, &ai->flags); } else rc = SUCCESS; if (lock) up(&ai->sem); if (rc) airo_print_err(ai->dev->name, "Cannot enable MAC"); else if ((rsp.status & 0xFF00) != 0) { airo_print_err(ai->dev->name, "Bad MAC enable reason=%x, " "rid=%x, offset=%d", rsp.rsp0, rsp.rsp1, rsp.rsp2); rc = ERROR; } return rc; } static void disable_MAC( struct airo_info *ai, int lock ) { Cmd cmd; Resp rsp; if (lock && down_interruptible(&ai->sem)) return; if (test_bit(FLAG_ENABLED, &ai->flags)) { memset(&cmd, 0, sizeof(cmd)); cmd.cmd = MAC_DISABLE; // disable in case already enabled issuecommand(ai, &cmd, &rsp); clear_bit(FLAG_ENABLED, &ai->flags); } if (lock) up(&ai->sem); } static void enable_interrupts( struct airo_info *ai ) { /* Enable the interrupts */ OUT4500( ai, EVINTEN, STATUS_INTS ); } static void disable_interrupts( struct airo_info *ai ) { OUT4500( ai, EVINTEN, 0 ); } static void mpi_receive_802_3(struct airo_info *ai) { RxFid rxd; int len = 0; struct sk_buff *skb; char *buffer; int off = 0; MICBuffer micbuf; memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd)); /* Make sure we got something */ if (rxd.rdy && rxd.valid == 0) { len = rxd.len + 12; if (len < 12 || len > 2048) goto badrx; skb = dev_alloc_skb(len); if (!skb) { ai->dev->stats.rx_dropped++; goto badrx; } buffer = skb_put(skb,len); memcpy(buffer, ai->rxfids[0].virtual_host_addr, ETH_ALEN * 2); if (ai->micstats.enabled) { memcpy(&micbuf, ai->rxfids[0].virtual_host_addr + ETH_ALEN * 2, sizeof(micbuf)); if (ntohs(micbuf.typelen) <= 0x05DC) { if (len <= sizeof(micbuf) + ETH_ALEN * 2) goto badmic; off = sizeof(micbuf); skb_trim (skb, len - off); } } memcpy(buffer + ETH_ALEN * 2, ai->rxfids[0].virtual_host_addr + ETH_ALEN * 2 + off, len - ETH_ALEN * 2 - off); if (decapsulate (ai, &micbuf, (etherHead*)buffer, len - off - ETH_ALEN * 2)) { badmic: dev_kfree_skb_irq (skb); goto badrx; } #ifdef WIRELESS_SPY if (ai->spy_data.spy_number > 0) { char *sa; struct iw_quality wstats; /* Prepare spy data : addr + qual */ sa = buffer + ETH_ALEN; wstats.qual = 0; /* XXX Where do I get that info from ??? */ wstats.level = 0; wstats.updated = 0; /* Update spy records */ wireless_spy_update(ai->dev, sa, &wstats); } #endif /* WIRELESS_SPY */ skb->ip_summed = CHECKSUM_NONE; skb->protocol = eth_type_trans(skb, ai->dev); netif_rx(skb); } badrx: if (rxd.valid == 0) { rxd.valid = 1; rxd.rdy = 0; rxd.len = PKTSIZE; memcpy_toio(ai->rxfids[0].card_ram_off, &rxd, sizeof(rxd)); } } static void mpi_receive_802_11(struct airo_info *ai) { RxFid rxd; struct sk_buff *skb = NULL; u16 len, hdrlen = 0; __le16 fc; struct rx_hdr hdr; u16 gap; u16 *buffer; char *ptr = ai->rxfids[0].virtual_host_addr + 4; memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd)); memcpy ((char *)&hdr, ptr, sizeof(hdr)); ptr += sizeof(hdr); /* Bad CRC. Ignore packet */ if (le16_to_cpu(hdr.status) & 2) hdr.len = 0; if (ai->wifidev == NULL) hdr.len = 0; len = le16_to_cpu(hdr.len); if (len > AIRO_DEF_MTU) { airo_print_err(ai->dev->name, "Bad size %d", len); goto badrx; } if (len == 0) goto badrx; fc = get_unaligned((__le16 *)ptr); hdrlen = header_len(fc); skb = dev_alloc_skb( len + hdrlen + 2 ); if ( !skb ) { ai->dev->stats.rx_dropped++; goto badrx; } buffer = (u16*)skb_put (skb, len + hdrlen); memcpy ((char *)buffer, ptr, hdrlen); ptr += hdrlen; if (hdrlen == 24) ptr += 6; gap = get_unaligned_le16(ptr); ptr += sizeof(__le16); if (gap) { if (gap <= 8) ptr += gap; else airo_print_err(ai->dev->name, "gaplen too big. Problems will follow..."); } memcpy ((char *)buffer + hdrlen, ptr, len); ptr += len; #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ if (ai->spy_data.spy_number > 0) { char *sa; struct iw_quality wstats; /* Prepare spy data : addr + qual */ sa = (char*)buffer + 10; wstats.qual = hdr.rssi[0]; if (ai->rssi) wstats.level = 0x100 - ai->rssi[hdr.rssi[1]].rssidBm; else wstats.level = (hdr.rssi[1] + 321) / 2; wstats.noise = ai->wstats.qual.noise; wstats.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_DBM; /* Update spy records */ wireless_spy_update(ai->dev, sa, &wstats); } #endif /* IW_WIRELESS_SPY */ skb_reset_mac_header(skb); skb->pkt_type = PACKET_OTHERHOST; skb->dev = ai->wifidev; skb->protocol = htons(ETH_P_802_2); skb->ip_summed = CHECKSUM_NONE; netif_rx( skb ); badrx: if (rxd.valid == 0) { rxd.valid = 1; rxd.rdy = 0; rxd.len = PKTSIZE; memcpy_toio(ai->rxfids[0].card_ram_off, &rxd, sizeof(rxd)); } } static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) { Cmd cmd; Resp rsp; int status; SsidRid mySsid; __le16 lastindex; WepKeyRid wkr; int rc; memset( &mySsid, 0, sizeof( mySsid ) ); kfree (ai->flash); ai->flash = NULL; /* The NOP is the first step in getting the card going */ cmd.cmd = NOP; cmd.parm0 = cmd.parm1 = cmd.parm2 = 0; if (lock && down_interruptible(&ai->sem)) return ERROR; if ( issuecommand( ai, &cmd, &rsp ) != SUCCESS ) { if (lock) up(&ai->sem); return ERROR; } disable_MAC( ai, 0); // Let's figure out if we need to use the AUX port if (!test_bit(FLAG_MPI,&ai->flags)) { cmd.cmd = CMD_ENABLEAUX; if (issuecommand(ai, &cmd, &rsp) != SUCCESS) { if (lock) up(&ai->sem); airo_print_err(ai->dev->name, "Error checking for AUX port"); return ERROR; } if (!aux_bap || rsp.status & 0xff00) { ai->bap_read = fast_bap_read; airo_print_dbg(ai->dev->name, "Doing fast bap_reads"); } else { ai->bap_read = aux_bap_read; airo_print_dbg(ai->dev->name, "Doing AUX bap_reads"); } } if (lock) up(&ai->sem); if (ai->config.len == 0) { int i; tdsRssiRid rssi_rid; CapabilityRid cap_rid; kfree(ai->APList); ai->APList = NULL; kfree(ai->SSID); ai->SSID = NULL; // general configuration (read/modify/write) status = readConfigRid(ai, lock); if ( status != SUCCESS ) return ERROR; status = readCapabilityRid(ai, &cap_rid, lock); if ( status != SUCCESS ) return ERROR; status = PC4500_readrid(ai,RID_RSSI,&rssi_rid,sizeof(rssi_rid),lock); if ( status == SUCCESS ) { if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL) memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */ } else { kfree(ai->rssi); ai->rssi = NULL; if (cap_rid.softCap & cpu_to_le16(8)) ai->config.rmode |= RXMODE_NORMALIZED_RSSI; else airo_print_warn(ai->dev->name, "unknown received signal " "level scale"); } ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS; ai->config.authType = AUTH_OPEN; ai->config.modulation = MOD_CCK; if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) && (cap_rid.extSoftCap & cpu_to_le16(1)) && micsetup(ai) == SUCCESS) { ai->config.opmode |= MODE_MIC; set_bit(FLAG_MIC_CAPABLE, &ai->flags); } /* Save off the MAC */ for( i = 0; i < ETH_ALEN; i++ ) { mac[i] = ai->config.macAddr[i]; } /* Check to see if there are any insmod configured rates to add */ if ( rates[0] ) { memset(ai->config.rates,0,sizeof(ai->config.rates)); for( i = 0; i < 8 && rates[i]; i++ ) { ai->config.rates[i] = rates[i]; } } set_bit (FLAG_COMMIT, &ai->flags); } /* Setup the SSIDs if present */ if ( ssids[0] ) { int i; for( i = 0; i < 3 && ssids[i]; i++ ) { size_t len = strlen(ssids[i]); if (len > 32) len = 32; mySsid.ssids[i].len = cpu_to_le16(len); memcpy(mySsid.ssids[i].ssid, ssids[i], len); } mySsid.len = cpu_to_le16(sizeof(mySsid)); } status = writeConfigRid(ai, lock); if ( status != SUCCESS ) return ERROR; /* Set up the SSID list */ if ( ssids[0] ) { status = writeSsidRid(ai, &mySsid, lock); if ( status != SUCCESS ) return ERROR; } status = enable_MAC(ai, lock); if (status != SUCCESS) return ERROR; /* Grab the initial wep key, we gotta save it for auto_wep */ rc = readWepKeyRid(ai, &wkr, 1, lock); if (rc == SUCCESS) do { lastindex = wkr.kindex; if (wkr.kindex == cpu_to_le16(0xffff)) { ai->defindex = wkr.mac[0]; } rc = readWepKeyRid(ai, &wkr, 0, lock); } while(lastindex != wkr.kindex); try_auto_wep(ai); return SUCCESS; } static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) { // Im really paranoid about letting it run forever! int max_tries = 600000; if (IN4500(ai, EVSTAT) & EV_CMD) OUT4500(ai, EVACK, EV_CMD); OUT4500(ai, PARAM0, pCmd->parm0); OUT4500(ai, PARAM1, pCmd->parm1); OUT4500(ai, PARAM2, pCmd->parm2); OUT4500(ai, COMMAND, pCmd->cmd); while (max_tries-- && (IN4500(ai, EVSTAT) & EV_CMD) == 0) { if ((IN4500(ai, COMMAND)) == pCmd->cmd) // PC4500 didn't notice command, try again OUT4500(ai, COMMAND, pCmd->cmd); if (!in_atomic() && (max_tries & 255) == 0) schedule(); } if ( max_tries == -1 ) { airo_print_err(ai->dev->name, "Max tries exceeded when issuing command"); if (IN4500(ai, COMMAND) & COMMAND_BUSY) OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY); return ERROR; } // command completed pRsp->status = IN4500(ai, STATUS); pRsp->rsp0 = IN4500(ai, RESP0); pRsp->rsp1 = IN4500(ai, RESP1); pRsp->rsp2 = IN4500(ai, RESP2); if ((pRsp->status & 0xff00)!=0 && pCmd->cmd != CMD_SOFTRESET) airo_print_err(ai->dev->name, "cmd:%x status:%x rsp0:%x rsp1:%x rsp2:%x", pCmd->cmd, pRsp->status, pRsp->rsp0, pRsp->rsp1, pRsp->rsp2); // clear stuck command busy if necessary if (IN4500(ai, COMMAND) & COMMAND_BUSY) { OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY); } // acknowledge processing the status/response OUT4500(ai, EVACK, EV_CMD); return SUCCESS; } /* Sets up the bap to start exchange data. whichbap should * be one of the BAP0 or BAP1 defines. Locks should be held before * calling! */ static int bap_setup(struct airo_info *ai, u16 rid, u16 offset, int whichbap ) { int timeout = 50; int max_tries = 3; OUT4500(ai, SELECT0+whichbap, rid); OUT4500(ai, OFFSET0+whichbap, offset); while (1) { int status = IN4500(ai, OFFSET0+whichbap); if (status & BAP_BUSY) { /* This isn't really a timeout, but its kinda close */ if (timeout--) { continue; } } else if ( status & BAP_ERR ) { /* invalid rid or offset */ airo_print_err(ai->dev->name, "BAP error %x %d", status, whichbap ); return ERROR; } else if (status & BAP_DONE) { // success return SUCCESS; } if ( !(max_tries--) ) { airo_print_err(ai->dev->name, "BAP setup error too many retries\n"); return ERROR; } // -- PC4500 missed it, try again OUT4500(ai, SELECT0+whichbap, rid); OUT4500(ai, OFFSET0+whichbap, offset); timeout = 50; } } /* should only be called by aux_bap_read. This aux function and the following use concepts not documented in the developers guide. I got them from a patch given to my by Aironet */ static u16 aux_setup(struct airo_info *ai, u16 page, u16 offset, u16 *len) { u16 next; OUT4500(ai, AUXPAGE, page); OUT4500(ai, AUXOFF, 0); next = IN4500(ai, AUXDATA); *len = IN4500(ai, AUXDATA)&0xff; if (offset != 4) OUT4500(ai, AUXOFF, offset); return next; } /* requires call to bap_setup() first */ static int aux_bap_read(struct airo_info *ai, __le16 *pu16Dst, int bytelen, int whichbap) { u16 len; u16 page; u16 offset; u16 next; int words; int i; unsigned long flags; spin_lock_irqsave(&ai->aux_lock, flags); page = IN4500(ai, SWS0+whichbap); offset = IN4500(ai, SWS2+whichbap); next = aux_setup(ai, page, offset, &len); words = (bytelen+1)>>1; for (i=0; i<words;) { int count; count = (len>>1) < (words-i) ? (len>>1) : (words-i); if ( !do8bitIO ) insw( ai->dev->base_addr+DATA0+whichbap, pu16Dst+i,count ); else insb( ai->dev->base_addr+DATA0+whichbap, pu16Dst+i, count << 1 ); i += count; if (i<words) { next = aux_setup(ai, next, 4, &len); } } spin_unlock_irqrestore(&ai->aux_lock, flags); return SUCCESS; } /* requires call to bap_setup() first */ static int fast_bap_read(struct airo_info *ai, __le16 *pu16Dst, int bytelen, int whichbap) { bytelen = (bytelen + 1) & (~1); // round up to even value if ( !do8bitIO ) insw( ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen>>1 ); else insb( ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen ); return SUCCESS; } /* requires call to bap_setup() first */ static int bap_write(struct airo_info *ai, const __le16 *pu16Src, int bytelen, int whichbap) { bytelen = (bytelen + 1) & (~1); // round up to even value if ( !do8bitIO ) outsw( ai->dev->base_addr+DATA0+whichbap, pu16Src, bytelen>>1 ); else outsb( ai->dev->base_addr+DATA0+whichbap, pu16Src, bytelen ); return SUCCESS; } static int PC4500_accessrid(struct airo_info *ai, u16 rid, u16 accmd) { Cmd cmd; /* for issuing commands */ Resp rsp; /* response from commands */ u16 status; memset(&cmd, 0, sizeof(cmd)); cmd.cmd = accmd; cmd.parm0 = rid; status = issuecommand(ai, &cmd, &rsp); if (status != 0) return status; if ( (rsp.status & 0x7F00) != 0) { return (accmd << 8) + (rsp.rsp0 & 0xFF); } return 0; } /* Note, that we are using BAP1 which is also used by transmit, so * we must get a lock. */ static int PC4500_readrid(struct airo_info *ai, u16 rid, void *pBuf, int len, int lock) { u16 status; int rc = SUCCESS; if (lock) { if (down_interruptible(&ai->sem)) return ERROR; } if (test_bit(FLAG_MPI,&ai->flags)) { Cmd cmd; Resp rsp; memset(&cmd, 0, sizeof(cmd)); memset(&rsp, 0, sizeof(rsp)); ai->config_desc.rid_desc.valid = 1; ai->config_desc.rid_desc.len = RIDSIZE; ai->config_desc.rid_desc.rid = 0; ai->config_desc.rid_desc.host_addr = ai->ridbus; cmd.cmd = CMD_ACCESS; cmd.parm0 = rid; memcpy_toio(ai->config_desc.card_ram_off, &ai->config_desc.rid_desc, sizeof(Rid)); rc = issuecommand(ai, &cmd, &rsp); if (rsp.status & 0x7f00) rc = rsp.rsp0; if (!rc) memcpy(pBuf, ai->config_desc.virtual_host_addr, len); goto done; } else { if ((status = PC4500_accessrid(ai, rid, CMD_ACCESS))!=SUCCESS) { rc = status; goto done; } if (bap_setup(ai, rid, 0, BAP1) != SUCCESS) { rc = ERROR; goto done; } // read the rid length field bap_read(ai, pBuf, 2, BAP1); // length for remaining part of rid len = min(len, (int)le16_to_cpu(*(__le16*)pBuf)) - 2; if ( len <= 2 ) { airo_print_err(ai->dev->name, "Rid %x has a length of %d which is too short", (int)rid, (int)len ); rc = ERROR; goto done; } // read remainder of the rid rc = bap_read(ai, ((__le16*)pBuf)+1, len, BAP1); } done: if (lock) up(&ai->sem); return rc; } /* Note, that we are using BAP1 which is also used by transmit, so * make sure this isn't called when a transmit is happening */ static int PC4500_writerid(struct airo_info *ai, u16 rid, const void *pBuf, int len, int lock) { u16 status; int rc = SUCCESS; *(__le16*)pBuf = cpu_to_le16((u16)len); if (lock) { if (down_interruptible(&ai->sem)) return ERROR; } if (test_bit(FLAG_MPI,&ai->flags)) { Cmd cmd; Resp rsp; if (test_bit(FLAG_ENABLED, &ai->flags) && (RID_WEP_TEMP != rid)) airo_print_err(ai->dev->name, "%s: MAC should be disabled (rid=%04x)", __func__, rid); memset(&cmd, 0, sizeof(cmd)); memset(&rsp, 0, sizeof(rsp)); ai->config_desc.rid_desc.valid = 1; ai->config_desc.rid_desc.len = *((u16 *)pBuf); ai->config_desc.rid_desc.rid = 0; cmd.cmd = CMD_WRITERID; cmd.parm0 = rid; memcpy_toio(ai->config_desc.card_ram_off, &ai->config_desc.rid_desc, sizeof(Rid)); if (len < 4 || len > 2047) { airo_print_err(ai->dev->name, "%s: len=%d", __func__, len); rc = -1; } else { memcpy(ai->config_desc.virtual_host_addr, pBuf, len); rc = issuecommand(ai, &cmd, &rsp); if ((rc & 0xff00) != 0) { airo_print_err(ai->dev->name, "%s: Write rid Error %d", __func__, rc); airo_print_err(ai->dev->name, "%s: Cmd=%04x", __func__, cmd.cmd); } if ((rsp.status & 0x7f00)) rc = rsp.rsp0; } } else { // --- first access so that we can write the rid data if ( (status = PC4500_accessrid(ai, rid, CMD_ACCESS)) != 0) { rc = status; goto done; } // --- now write the rid data if (bap_setup(ai, rid, 0, BAP1) != SUCCESS) { rc = ERROR; goto done; } bap_write(ai, pBuf, len, BAP1); // ---now commit the rid data rc = PC4500_accessrid(ai, rid, 0x100|CMD_ACCESS); } done: if (lock) up(&ai->sem); return rc; } /* Allocates a FID to be used for transmitting packets. We only use one for now. */ static u16 transmit_allocate(struct airo_info *ai, int lenPayload, int raw) { unsigned int loop = 3000; Cmd cmd; Resp rsp; u16 txFid; __le16 txControl; cmd.cmd = CMD_ALLOCATETX; cmd.parm0 = lenPayload; if (down_interruptible(&ai->sem)) return ERROR; if (issuecommand(ai, &cmd, &rsp) != SUCCESS) { txFid = ERROR; goto done; } if ( (rsp.status & 0xFF00) != 0) { txFid = ERROR; goto done; } /* wait for the allocate event/indication * It makes me kind of nervous that this can just sit here and spin, * but in practice it only loops like four times. */ while (((IN4500(ai, EVSTAT) & EV_ALLOC) == 0) && --loop); if (!loop) { txFid = ERROR; goto done; } // get the allocated fid and acknowledge txFid = IN4500(ai, TXALLOCFID); OUT4500(ai, EVACK, EV_ALLOC); /* The CARD is pretty cool since it converts the ethernet packet * into 802.11. Also note that we don't release the FID since we * will be using the same one over and over again. */ /* We only have to setup the control once since we are not * releasing the fid. */ if (raw) txControl = cpu_to_le16(TXCTL_TXOK | TXCTL_TXEX | TXCTL_802_11 | TXCTL_ETHERNET | TXCTL_NORELEASE); else txControl = cpu_to_le16(TXCTL_TXOK | TXCTL_TXEX | TXCTL_802_3 | TXCTL_ETHERNET | TXCTL_NORELEASE); if (bap_setup(ai, txFid, 0x0008, BAP1) != SUCCESS) txFid = ERROR; else bap_write(ai, &txControl, sizeof(txControl), BAP1); done: up(&ai->sem); return txFid; } /* In general BAP1 is dedicated to transmiting packets. However, since we need a BAP when accessing RIDs, we also use BAP1 for that. Make sure the BAP1 spinlock is held when this is called. */ static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket) { __le16 payloadLen; Cmd cmd; Resp rsp; int miclen = 0; u16 txFid = len; MICBuffer pMic; len >>= 16; if (len <= ETH_ALEN * 2) { airo_print_warn(ai->dev->name, "Short packet %d", len); return ERROR; } len -= ETH_ALEN * 2; if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled && (ntohs(((__be16 *)pPacket)[6]) != 0x888E)) { if (encapsulate(ai,(etherHead *)pPacket,&pMic,len) != SUCCESS) return ERROR; miclen = sizeof(pMic); } // packet is destination[6], source[6], payload[len-12] // write the payload length and dst/src/payload if (bap_setup(ai, txFid, 0x0036, BAP1) != SUCCESS) return ERROR; /* The hardware addresses aren't counted as part of the payload, so * we have to subtract the 12 bytes for the addresses off */ payloadLen = cpu_to_le16(len + miclen); bap_write(ai, &payloadLen, sizeof(payloadLen),BAP1); bap_write(ai, (__le16*)pPacket, sizeof(etherHead), BAP1); if (miclen) bap_write(ai, (__le16*)&pMic, miclen, BAP1); bap_write(ai, (__le16*)(pPacket + sizeof(etherHead)), len, BAP1); // issue the transmit command memset( &cmd, 0, sizeof( cmd ) ); cmd.cmd = CMD_TRANSMIT; cmd.parm0 = txFid; if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return ERROR; if ( (rsp.status & 0xFF00) != 0) return ERROR; return SUCCESS; } static int transmit_802_11_packet(struct airo_info *ai, int len, char *pPacket) { __le16 fc, payloadLen; Cmd cmd; Resp rsp; int hdrlen; static u8 tail[(30-10) + 2 + 6] = {[30-10] = 6}; /* padding of header to full size + le16 gaplen (6) + gaplen bytes */ u16 txFid = len; len >>= 16; fc = *(__le16*)pPacket; hdrlen = header_len(fc); if (len < hdrlen) { airo_print_warn(ai->dev->name, "Short packet %d", len); return ERROR; } /* packet is 802.11 header + payload * write the payload length and dst/src/payload */ if (bap_setup(ai, txFid, 6, BAP1) != SUCCESS) return ERROR; /* The 802.11 header aren't counted as part of the payload, so * we have to subtract the header bytes off */ payloadLen = cpu_to_le16(len-hdrlen); bap_write(ai, &payloadLen, sizeof(payloadLen),BAP1); if (bap_setup(ai, txFid, 0x0014, BAP1) != SUCCESS) return ERROR; bap_write(ai, (__le16 *)pPacket, hdrlen, BAP1); bap_write(ai, (__le16 *)(tail + (hdrlen - 10)), 38 - hdrlen, BAP1); bap_write(ai, (__le16 *)(pPacket + hdrlen), len - hdrlen, BAP1); // issue the transmit command memset( &cmd, 0, sizeof( cmd ) ); cmd.cmd = CMD_TRANSMIT; cmd.parm0 = txFid; if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return ERROR; if ( (rsp.status & 0xFF00) != 0) return ERROR; return SUCCESS; } /* * This is the proc_fs routines. It is a bit messier than I would * like! Feel free to clean it up! */ static ssize_t proc_read( struct file *file, char __user *buffer, size_t len, loff_t *offset); static ssize_t proc_write( struct file *file, const char __user *buffer, size_t len, loff_t *offset ); static int proc_close( struct inode *inode, struct file *file ); static int proc_stats_open( struct inode *inode, struct file *file ); static int proc_statsdelta_open( struct inode *inode, struct file *file ); static int proc_status_open( struct inode *inode, struct file *file ); static int proc_SSID_open( struct inode *inode, struct file *file ); static int proc_APList_open( struct inode *inode, struct file *file ); static int proc_BSSList_open( struct inode *inode, struct file *file ); static int proc_config_open( struct inode *inode, struct file *file ); static int proc_wepkey_open( struct inode *inode, struct file *file ); static const struct file_operations proc_statsdelta_ops = { .owner = THIS_MODULE, .read = proc_read, .open = proc_statsdelta_open, .release = proc_close, .llseek = default_llseek, }; static const struct file_operations proc_stats_ops = { .owner = THIS_MODULE, .read = proc_read, .open = proc_stats_open, .release = proc_close, .llseek = default_llseek, }; static const struct file_operations proc_status_ops = { .owner = THIS_MODULE, .read = proc_read, .open = proc_status_open, .release = proc_close, .llseek = default_llseek, }; static const struct file_operations proc_SSID_ops = { .owner = THIS_MODULE, .read = proc_read, .write = proc_write, .open = proc_SSID_open, .release = proc_close, .llseek = default_llseek, }; static const struct file_operations proc_BSSList_ops = { .owner = THIS_MODULE, .read = proc_read, .write = proc_write, .open = proc_BSSList_open, .release = proc_close, .llseek = default_llseek, }; static const struct file_operations proc_APList_ops = { .owner = THIS_MODULE, .read = proc_read, .write = proc_write, .open = proc_APList_open, .release = proc_close, .llseek = default_llseek, }; static const struct file_operations proc_config_ops = { .owner = THIS_MODULE, .read = proc_read, .write = proc_write, .open = proc_config_open, .release = proc_close, .llseek = default_llseek, }; static const struct file_operations proc_wepkey_ops = { .owner = THIS_MODULE, .read = proc_read, .write = proc_write, .open = proc_wepkey_open, .release = proc_close, .llseek = default_llseek, }; static struct proc_dir_entry *airo_entry; struct proc_data { int release_buffer; int readlen; char *rbuffer; int writelen; int maxwritelen; char *wbuffer; void (*on_close) (struct inode *, struct file *); }; static int setup_proc_entry( struct net_device *dev, struct airo_info *apriv ) { struct proc_dir_entry *entry; /* First setup the device directory */ strcpy(apriv->proc_name,dev->name); apriv->proc_entry = proc_mkdir_mode(apriv->proc_name, airo_perm, airo_entry); if (!apriv->proc_entry) return -ENOMEM; proc_set_user(apriv->proc_entry, proc_kuid, proc_kgid); /* Setup the StatsDelta */ entry = proc_create_data("StatsDelta", S_IRUGO & proc_perm, apriv->proc_entry, &proc_statsdelta_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); /* Setup the Stats */ entry = proc_create_data("Stats", S_IRUGO & proc_perm, apriv->proc_entry, &proc_stats_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); /* Setup the Status */ entry = proc_create_data("Status", S_IRUGO & proc_perm, apriv->proc_entry, &proc_status_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); /* Setup the Config */ entry = proc_create_data("Config", proc_perm, apriv->proc_entry, &proc_config_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); /* Setup the SSID */ entry = proc_create_data("SSID", proc_perm, apriv->proc_entry, &proc_SSID_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); /* Setup the APList */ entry = proc_create_data("APList", proc_perm, apriv->proc_entry, &proc_APList_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); /* Setup the BSSList */ entry = proc_create_data("BSSList", proc_perm, apriv->proc_entry, &proc_BSSList_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); /* Setup the WepKey */ entry = proc_create_data("WepKey", proc_perm, apriv->proc_entry, &proc_wepkey_ops, dev); if (!entry) goto fail; proc_set_user(entry, proc_kuid, proc_kgid); return 0; fail: remove_proc_subtree(apriv->proc_name, airo_entry); return -ENOMEM; } static int takedown_proc_entry( struct net_device *dev, struct airo_info *apriv ) { remove_proc_subtree(apriv->proc_name, airo_entry); return 0; } /* * What we want from the proc_fs is to be able to efficiently read * and write the configuration. To do this, we want to read the * configuration when the file is opened and write it when the file is * closed. So basically we allocate a read buffer at open and fill it * with data, and allocate a write buffer and read it at close. */ /* * The read routine is generic, it relies on the preallocated rbuffer * to supply the data. */ static ssize_t proc_read( struct file *file, char __user *buffer, size_t len, loff_t *offset ) { struct proc_data *priv = file->private_data; if (!priv->rbuffer) return -EINVAL; return simple_read_from_buffer(buffer, len, offset, priv->rbuffer, priv->readlen); } /* * The write routine is generic, it fills in a preallocated rbuffer * to supply the data. */ static ssize_t proc_write( struct file *file, const char __user *buffer, size_t len, loff_t *offset ) { ssize_t ret; struct proc_data *priv = file->private_data; if (!priv->wbuffer) return -EINVAL; ret = simple_write_to_buffer(priv->wbuffer, priv->maxwritelen, offset, buffer, len); if (ret > 0) priv->writelen = max_t(int, priv->writelen, *offset); return ret; } static int proc_status_open(struct inode *inode, struct file *file) { struct proc_data *data; struct net_device *dev = PDE_DATA(inode); struct airo_info *apriv = dev->ml_priv; CapabilityRid cap_rid; StatusRid status_rid; u16 mode; int i; if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; data = file->private_data; if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; } readStatusRid(apriv, &status_rid, 1); readCapabilityRid(apriv, &cap_rid, 1); mode = le16_to_cpu(status_rid.mode); i = sprintf(data->rbuffer, "Status: %s%s%s%s%s%s%s%s%s\n", mode & 1 ? "CFG ": "", mode & 2 ? "ACT ": "", mode & 0x10 ? "SYN ": "", mode & 0x20 ? "LNK ": "", mode & 0x40 ? "LEAP ": "", mode & 0x80 ? "PRIV ": "", mode & 0x100 ? "KEY ": "", mode & 0x200 ? "WEP ": "", mode & 0x8000 ? "ERR ": ""); sprintf( data->rbuffer+i, "Mode: %x\n" "Signal Strength: %d\n" "Signal Quality: %d\n" "SSID: %-.*s\n" "AP: %-.16s\n" "Freq: %d\n" "BitRate: %dmbs\n" "Driver Version: %s\n" "Device: %s\nManufacturer: %s\nFirmware Version: %s\n" "Radio type: %x\nCountry: %x\nHardware Version: %x\n" "Software Version: %x\nSoftware Subversion: %x\n" "Boot block version: %x\n", le16_to_cpu(status_rid.mode), le16_to_cpu(status_rid.normalizedSignalStrength), le16_to_cpu(status_rid.signalQuality), le16_to_cpu(status_rid.SSIDlen), status_rid.SSID, status_rid.apName, le16_to_cpu(status_rid.channel), le16_to_cpu(status_rid.currentXmitRate) / 2, version, cap_rid.prodName, cap_rid.manName, cap_rid.prodVer, le16_to_cpu(cap_rid.radioType), le16_to_cpu(cap_rid.country), le16_to_cpu(cap_rid.hardVer), le16_to_cpu(cap_rid.softVer), le16_to_cpu(cap_rid.softSubVer), le16_to_cpu(cap_rid.bootBlockVer)); data->readlen = strlen( data->rbuffer ); return 0; } static int proc_stats_rid_open(struct inode*, struct file*, u16); static int proc_statsdelta_open( struct inode *inode, struct file *file ) { if (file->f_mode&FMODE_WRITE) { return proc_stats_rid_open(inode, file, RID_STATSDELTACLEAR); } return proc_stats_rid_open(inode, file, RID_STATSDELTA); } static int proc_stats_open( struct inode *inode, struct file *file ) { return proc_stats_rid_open(inode, file, RID_STATS); } static int proc_stats_rid_open( struct inode *inode, struct file *file, u16 rid ) { struct proc_data *data; struct net_device *dev = PDE_DATA(inode); struct airo_info *apriv = dev->ml_priv; StatsRid stats; int i, j; __le32 *vals = stats.vals; int len; if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; data = file->private_data; if ((data->rbuffer = kmalloc( 4096, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; } readStatsRid(apriv, &stats, rid, 1); len = le16_to_cpu(stats.len); j = 0; for(i=0; statsLabels[i]!=(char *)-1 && i*4<len; i++) { if (!statsLabels[i]) continue; if (j+strlen(statsLabels[i])+16>4096) { airo_print_warn(apriv->dev->name, "Potentially disastrous buffer overflow averted!"); break; } j+=sprintf(data->rbuffer+j, "%s: %u\n", statsLabels[i], le32_to_cpu(vals[i])); } if (i*4 >= len) { airo_print_warn(apriv->dev->name, "Got a short rid"); } data->readlen = j; return 0; } static int get_dec_u16( char *buffer, int *start, int limit ) { u16 value; int valid = 0; for (value = 0; *start < limit && buffer[*start] >= '0' && buffer[*start] <= '9'; (*start)++) { valid = 1; value *= 10; value += buffer[*start] - '0'; } if ( !valid ) return -1; return value; } static int airo_config_commit(struct net_device *dev, struct iw_request_info *info, void *zwrq, char *extra); static inline int sniffing_mode(struct airo_info *ai) { return (le16_to_cpu(ai->config.rmode) & le16_to_cpu(RXMODE_MASK)) >= le16_to_cpu(RXMODE_RFMON); } static void proc_config_on_close(struct inode *inode, struct file *file) { struct proc_data *data = file->private_data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; char *line; if ( !data->writelen ) return; readConfigRid(ai, 1); set_bit (FLAG_COMMIT, &ai->flags); line = data->wbuffer; while( line[0] ) { /*** Mode processing */ if ( !strncmp( line, "Mode: ", 6 ) ) { line += 6; if (sniffing_mode(ai)) set_bit (FLAG_RESET, &ai->flags); ai->config.rmode &= ~RXMODE_FULL_MASK; clear_bit (FLAG_802_11, &ai->flags); ai->config.opmode &= ~MODE_CFG_MASK; ai->config.scanMode = SCANMODE_ACTIVE; if ( line[0] == 'a' ) { ai->config.opmode |= MODE_STA_IBSS; } else { ai->config.opmode |= MODE_STA_ESS; if ( line[0] == 'r' ) { ai->config.rmode |= RXMODE_RFMON | RXMODE_DISABLE_802_3_HEADER; ai->config.scanMode = SCANMODE_PASSIVE; set_bit (FLAG_802_11, &ai->flags); } else if ( line[0] == 'y' ) { ai->config.rmode |= RXMODE_RFMON_ANYBSS | RXMODE_DISABLE_802_3_HEADER; ai->config.scanMode = SCANMODE_PASSIVE; set_bit (FLAG_802_11, &ai->flags); } else if ( line[0] == 'l' ) ai->config.rmode |= RXMODE_LANMON; } set_bit (FLAG_COMMIT, &ai->flags); } /*** Radio status */ else if (!strncmp(line,"Radio: ", 7)) { line += 7; if (!strncmp(line,"off",3)) { set_bit (FLAG_RADIO_OFF, &ai->flags); } else { clear_bit (FLAG_RADIO_OFF, &ai->flags); } } /*** NodeName processing */ else if ( !strncmp( line, "NodeName: ", 10 ) ) { int j; line += 10; memset( ai->config.nodeName, 0, 16 ); /* Do the name, assume a space between the mode and node name */ for( j = 0; j < 16 && line[j] != '\n'; j++ ) { ai->config.nodeName[j] = line[j]; } set_bit (FLAG_COMMIT, &ai->flags); } /*** PowerMode processing */ else if ( !strncmp( line, "PowerMode: ", 11 ) ) { line += 11; if ( !strncmp( line, "PSPCAM", 6 ) ) { ai->config.powerSaveMode = POWERSAVE_PSPCAM; set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "PSP", 3 ) ) { ai->config.powerSaveMode = POWERSAVE_PSP; set_bit (FLAG_COMMIT, &ai->flags); } else { ai->config.powerSaveMode = POWERSAVE_CAM; set_bit (FLAG_COMMIT, &ai->flags); } } else if ( !strncmp( line, "DataRates: ", 11 ) ) { int v, i = 0, k = 0; /* i is index into line, k is index to rates */ line += 11; while((v = get_dec_u16(line, &i, 3))!=-1) { ai->config.rates[k++] = (u8)v; line += i + 1; i = 0; } set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "Channel: ", 9 ) ) { int v, i = 0; line += 9; v = get_dec_u16(line, &i, i+3); if ( v != -1 ) { ai->config.channelSet = cpu_to_le16(v); set_bit (FLAG_COMMIT, &ai->flags); } } else if ( !strncmp( line, "XmitPower: ", 11 ) ) { int v, i = 0; line += 11; v = get_dec_u16(line, &i, i+3); if ( v != -1 ) { ai->config.txPower = cpu_to_le16(v); set_bit (FLAG_COMMIT, &ai->flags); } } else if ( !strncmp( line, "WEP: ", 5 ) ) { line += 5; switch( line[0] ) { case 's': ai->config.authType = AUTH_SHAREDKEY; break; case 'e': ai->config.authType = AUTH_ENCRYPT; break; default: ai->config.authType = AUTH_OPEN; break; } set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "LongRetryLimit: ", 16 ) ) { int v, i = 0; line += 16; v = get_dec_u16(line, &i, 3); v = (v<0) ? 0 : ((v>255) ? 255 : v); ai->config.longRetryLimit = cpu_to_le16(v); set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "ShortRetryLimit: ", 17 ) ) { int v, i = 0; line += 17; v = get_dec_u16(line, &i, 3); v = (v<0) ? 0 : ((v>255) ? 255 : v); ai->config.shortRetryLimit = cpu_to_le16(v); set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "RTSThreshold: ", 14 ) ) { int v, i = 0; line += 14; v = get_dec_u16(line, &i, 4); v = (v<0) ? 0 : ((v>AIRO_DEF_MTU) ? AIRO_DEF_MTU : v); ai->config.rtsThres = cpu_to_le16(v); set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "TXMSDULifetime: ", 16 ) ) { int v, i = 0; line += 16; v = get_dec_u16(line, &i, 5); v = (v<0) ? 0 : v; ai->config.txLifetime = cpu_to_le16(v); set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "RXMSDULifetime: ", 16 ) ) { int v, i = 0; line += 16; v = get_dec_u16(line, &i, 5); v = (v<0) ? 0 : v; ai->config.rxLifetime = cpu_to_le16(v); set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "TXDiversity: ", 13 ) ) { ai->config.txDiversity = (line[13]=='l') ? 1 : ((line[13]=='r')? 2: 3); set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "RXDiversity: ", 13 ) ) { ai->config.rxDiversity = (line[13]=='l') ? 1 : ((line[13]=='r')? 2: 3); set_bit (FLAG_COMMIT, &ai->flags); } else if ( !strncmp( line, "FragThreshold: ", 15 ) ) { int v, i = 0; line += 15; v = get_dec_u16(line, &i, 4); v = (v<256) ? 256 : ((v>AIRO_DEF_MTU) ? AIRO_DEF_MTU : v); v = v & 0xfffe; /* Make sure its even */ ai->config.fragThresh = cpu_to_le16(v); set_bit (FLAG_COMMIT, &ai->flags); } else if (!strncmp(line, "Modulation: ", 12)) { line += 12; switch(*line) { case 'd': ai->config.modulation=MOD_DEFAULT; set_bit(FLAG_COMMIT, &ai->flags); break; case 'c': ai->config.modulation=MOD_CCK; set_bit(FLAG_COMMIT, &ai->flags); break; case 'm': ai->config.modulation=MOD_MOK; set_bit(FLAG_COMMIT, &ai->flags); break; default: airo_print_warn(ai->dev->name, "Unknown modulation"); } } else if (!strncmp(line, "Preamble: ", 10)) { line += 10; switch(*line) { case 'a': ai->config.preamble=PREAMBLE_AUTO; set_bit(FLAG_COMMIT, &ai->flags); break; case 'l': ai->config.preamble=PREAMBLE_LONG; set_bit(FLAG_COMMIT, &ai->flags); break; case 's': ai->config.preamble=PREAMBLE_SHORT; set_bit(FLAG_COMMIT, &ai->flags); break; default: airo_print_warn(ai->dev->name, "Unknown preamble"); } } else { airo_print_warn(ai->dev->name, "Couldn't figure out %s", line); } while( line[0] && line[0] != '\n' ) line++; if ( line[0] ) line++; } airo_config_commit(dev, NULL, NULL, NULL); } static const char *get_rmode(__le16 mode) { switch(mode & RXMODE_MASK) { case RXMODE_RFMON: return "rfmon"; case RXMODE_RFMON_ANYBSS: return "yna (any) bss rfmon"; case RXMODE_LANMON: return "lanmon"; } return "ESS"; } static int proc_config_open(struct inode *inode, struct file *file) { struct proc_data *data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; int i; __le16 mode; if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; data = file->private_data; if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; } if ((data->wbuffer = kzalloc( 2048, GFP_KERNEL )) == NULL) { kfree (data->rbuffer); kfree (file->private_data); return -ENOMEM; } data->maxwritelen = 2048; data->on_close = proc_config_on_close; readConfigRid(ai, 1); mode = ai->config.opmode & MODE_CFG_MASK; i = sprintf( data->rbuffer, "Mode: %s\n" "Radio: %s\n" "NodeName: %-16s\n" "PowerMode: %s\n" "DataRates: %d %d %d %d %d %d %d %d\n" "Channel: %d\n" "XmitPower: %d\n", mode == MODE_STA_IBSS ? "adhoc" : mode == MODE_STA_ESS ? get_rmode(ai->config.rmode): mode == MODE_AP ? "AP" : mode == MODE_AP_RPTR ? "AP RPTR" : "Error", test_bit(FLAG_RADIO_OFF, &ai->flags) ? "off" : "on", ai->config.nodeName, ai->config.powerSaveMode == POWERSAVE_CAM ? "CAM" : ai->config.powerSaveMode == POWERSAVE_PSP ? "PSP" : ai->config.powerSaveMode == POWERSAVE_PSPCAM ? "PSPCAM" : "Error", (int)ai->config.rates[0], (int)ai->config.rates[1], (int)ai->config.rates[2], (int)ai->config.rates[3], (int)ai->config.rates[4], (int)ai->config.rates[5], (int)ai->config.rates[6], (int)ai->config.rates[7], le16_to_cpu(ai->config.channelSet), le16_to_cpu(ai->config.txPower) ); sprintf( data->rbuffer + i, "LongRetryLimit: %d\n" "ShortRetryLimit: %d\n" "RTSThreshold: %d\n" "TXMSDULifetime: %d\n" "RXMSDULifetime: %d\n" "TXDiversity: %s\n" "RXDiversity: %s\n" "FragThreshold: %d\n" "WEP: %s\n" "Modulation: %s\n" "Preamble: %s\n", le16_to_cpu(ai->config.longRetryLimit), le16_to_cpu(ai->config.shortRetryLimit), le16_to_cpu(ai->config.rtsThres), le16_to_cpu(ai->config.txLifetime), le16_to_cpu(ai->config.rxLifetime), ai->config.txDiversity == 1 ? "left" : ai->config.txDiversity == 2 ? "right" : "both", ai->config.rxDiversity == 1 ? "left" : ai->config.rxDiversity == 2 ? "right" : "both", le16_to_cpu(ai->config.fragThresh), ai->config.authType == AUTH_ENCRYPT ? "encrypt" : ai->config.authType == AUTH_SHAREDKEY ? "shared" : "open", ai->config.modulation == MOD_DEFAULT ? "default" : ai->config.modulation == MOD_CCK ? "cck" : ai->config.modulation == MOD_MOK ? "mok" : "error", ai->config.preamble == PREAMBLE_AUTO ? "auto" : ai->config.preamble == PREAMBLE_LONG ? "long" : ai->config.preamble == PREAMBLE_SHORT ? "short" : "error" ); data->readlen = strlen( data->rbuffer ); return 0; } static void proc_SSID_on_close(struct inode *inode, struct file *file) { struct proc_data *data = file->private_data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; SsidRid SSID_rid; int i; char *p = data->wbuffer; char *end = p + data->writelen; if (!data->writelen) return; *end = '\n'; /* sentinel; we have space for it */ memset(&SSID_rid, 0, sizeof(SSID_rid)); for (i = 0; i < 3 && p < end; i++) { int j = 0; /* copy up to 32 characters from this line */ while (*p != '\n' && j < 32) SSID_rid.ssids[i].ssid[j++] = *p++; if (j == 0) break; SSID_rid.ssids[i].len = cpu_to_le16(j); /* skip to the beginning of the next line */ while (*p++ != '\n') ; } if (i) SSID_rid.len = cpu_to_le16(sizeof(SSID_rid)); disable_MAC(ai, 1); writeSsidRid(ai, &SSID_rid, 1); enable_MAC(ai, 1); } static void proc_APList_on_close( struct inode *inode, struct file *file ) { struct proc_data *data = file->private_data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; APListRid APList_rid; int i; if ( !data->writelen ) return; memset( &APList_rid, 0, sizeof(APList_rid) ); APList_rid.len = cpu_to_le16(sizeof(APList_rid)); for( i = 0; i < 4 && data->writelen >= (i+1)*6*3; i++ ) { int j; for( j = 0; j < 6*3 && data->wbuffer[j+i*6*3]; j++ ) { switch(j%3) { case 0: APList_rid.ap[i][j/3]= hex_to_bin(data->wbuffer[j+i*6*3])<<4; break; case 1: APList_rid.ap[i][j/3]|= hex_to_bin(data->wbuffer[j+i*6*3]); break; } } } disable_MAC(ai, 1); writeAPListRid(ai, &APList_rid, 1); enable_MAC(ai, 1); } /* This function wraps PC4500_writerid with a MAC disable */ static int do_writerid( struct airo_info *ai, u16 rid, const void *rid_data, int len, int dummy ) { int rc; disable_MAC(ai, 1); rc = PC4500_writerid(ai, rid, rid_data, len, 1); enable_MAC(ai, 1); return rc; } /* Returns the WEP key at the specified index, or -1 if that key does * not exist. The buffer is assumed to be at least 16 bytes in length. */ static int get_wep_key(struct airo_info *ai, u16 index, char *buf, u16 buflen) { WepKeyRid wkr; int rc; __le16 lastindex; rc = readWepKeyRid(ai, &wkr, 1, 1); if (rc != SUCCESS) return -1; do { lastindex = wkr.kindex; if (le16_to_cpu(wkr.kindex) == index) { int klen = min_t(int, buflen, le16_to_cpu(wkr.klen)); memcpy(buf, wkr.key, klen); return klen; } rc = readWepKeyRid(ai, &wkr, 0, 1); if (rc != SUCCESS) return -1; } while (lastindex != wkr.kindex); return -1; } static int get_wep_tx_idx(struct airo_info *ai) { WepKeyRid wkr; int rc; __le16 lastindex; rc = readWepKeyRid(ai, &wkr, 1, 1); if (rc != SUCCESS) return -1; do { lastindex = wkr.kindex; if (wkr.kindex == cpu_to_le16(0xffff)) return wkr.mac[0]; rc = readWepKeyRid(ai, &wkr, 0, 1); if (rc != SUCCESS) return -1; } while (lastindex != wkr.kindex); return -1; } static int set_wep_key(struct airo_info *ai, u16 index, const char *key, u16 keylen, int perm, int lock) { static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; WepKeyRid wkr; int rc; if (WARN_ON(keylen == 0)) return -1; memset(&wkr, 0, sizeof(wkr)); wkr.len = cpu_to_le16(sizeof(wkr)); wkr.kindex = cpu_to_le16(index); wkr.klen = cpu_to_le16(keylen); memcpy(wkr.key, key, keylen); memcpy(wkr.mac, macaddr, ETH_ALEN); if (perm) disable_MAC(ai, lock); rc = writeWepKeyRid(ai, &wkr, perm, lock); if (perm) enable_MAC(ai, lock); return rc; } static int set_wep_tx_idx(struct airo_info *ai, u16 index, int perm, int lock) { WepKeyRid wkr; int rc; memset(&wkr, 0, sizeof(wkr)); wkr.len = cpu_to_le16(sizeof(wkr)); wkr.kindex = cpu_to_le16(0xffff); wkr.mac[0] = (char)index; if (perm) { ai->defindex = (char)index; disable_MAC(ai, lock); } rc = writeWepKeyRid(ai, &wkr, perm, lock); if (perm) enable_MAC(ai, lock); return rc; } static void proc_wepkey_on_close( struct inode *inode, struct file *file ) { struct proc_data *data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; int i, rc; char key[16]; u16 index = 0; int j = 0; memset(key, 0, sizeof(key)); data = file->private_data; if ( !data->writelen ) return; if (data->wbuffer[0] >= '0' && data->wbuffer[0] <= '3' && (data->wbuffer[1] == ' ' || data->wbuffer[1] == '\n')) { index = data->wbuffer[0] - '0'; if (data->wbuffer[1] == '\n') { rc = set_wep_tx_idx(ai, index, 1, 1); if (rc < 0) { airo_print_err(ai->dev->name, "failed to set " "WEP transmit index to %d: %d.", index, rc); } return; } j = 2; } else { airo_print_err(ai->dev->name, "WepKey passed invalid key index"); return; } for( i = 0; i < 16*3 && data->wbuffer[i+j]; i++ ) { switch(i%3) { case 0: key[i/3] = hex_to_bin(data->wbuffer[i+j])<<4; break; case 1: key[i/3] |= hex_to_bin(data->wbuffer[i+j]); break; } } rc = set_wep_key(ai, index, key, i/3, 1, 1); if (rc < 0) { airo_print_err(ai->dev->name, "failed to set WEP key at index " "%d: %d.", index, rc); } } static int proc_wepkey_open( struct inode *inode, struct file *file ) { struct proc_data *data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; char *ptr; WepKeyRid wkr; __le16 lastindex; int j=0; int rc; if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; memset(&wkr, 0, sizeof(wkr)); data = file->private_data; if ((data->rbuffer = kzalloc( 180, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; } data->writelen = 0; data->maxwritelen = 80; if ((data->wbuffer = kzalloc( 80, GFP_KERNEL )) == NULL) { kfree (data->rbuffer); kfree (file->private_data); return -ENOMEM; } data->on_close = proc_wepkey_on_close; ptr = data->rbuffer; strcpy(ptr, "No wep keys\n"); rc = readWepKeyRid(ai, &wkr, 1, 1); if (rc == SUCCESS) do { lastindex = wkr.kindex; if (wkr.kindex == cpu_to_le16(0xffff)) { j += sprintf(ptr+j, "Tx key = %d\n", (int)wkr.mac[0]); } else { j += sprintf(ptr+j, "Key %d set with length = %d\n", le16_to_cpu(wkr.kindex), le16_to_cpu(wkr.klen)); } readWepKeyRid(ai, &wkr, 0, 1); } while((lastindex != wkr.kindex) && (j < 180-30)); data->readlen = strlen( data->rbuffer ); return 0; } static int proc_SSID_open(struct inode *inode, struct file *file) { struct proc_data *data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; int i; char *ptr; SsidRid SSID_rid; if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; data = file->private_data; if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; } data->writelen = 0; data->maxwritelen = 33*3; /* allocate maxwritelen + 1; we'll want a sentinel */ if ((data->wbuffer = kzalloc(33*3 + 1, GFP_KERNEL)) == NULL) { kfree (data->rbuffer); kfree (file->private_data); return -ENOMEM; } data->on_close = proc_SSID_on_close; readSsidRid(ai, &SSID_rid); ptr = data->rbuffer; for (i = 0; i < 3; i++) { int j; size_t len = le16_to_cpu(SSID_rid.ssids[i].len); if (!len) break; if (len > 32) len = 32; for (j = 0; j < len && SSID_rid.ssids[i].ssid[j]; j++) *ptr++ = SSID_rid.ssids[i].ssid[j]; *ptr++ = '\n'; } *ptr = '\0'; data->readlen = strlen( data->rbuffer ); return 0; } static int proc_APList_open( struct inode *inode, struct file *file ) { struct proc_data *data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; int i; char *ptr; APListRid APList_rid; if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; data = file->private_data; if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; } data->writelen = 0; data->maxwritelen = 4*6*3; if ((data->wbuffer = kzalloc( data->maxwritelen, GFP_KERNEL )) == NULL) { kfree (data->rbuffer); kfree (file->private_data); return -ENOMEM; } data->on_close = proc_APList_on_close; readAPListRid(ai, &APList_rid); ptr = data->rbuffer; for( i = 0; i < 4; i++ ) { // We end when we find a zero MAC if ( !*(int*)APList_rid.ap[i] && !*(int*)&APList_rid.ap[i][2]) break; ptr += sprintf(ptr, "%pM\n", APList_rid.ap[i]); } if (i==0) ptr += sprintf(ptr, "Not using specific APs\n"); *ptr = '\0'; data->readlen = strlen( data->rbuffer ); return 0; } static int proc_BSSList_open( struct inode *inode, struct file *file ) { struct proc_data *data; struct net_device *dev = PDE_DATA(inode); struct airo_info *ai = dev->ml_priv; char *ptr; BSSListRid BSSList_rid; int rc; /* If doLoseSync is not 1, we won't do a Lose Sync */ int doLoseSync = -1; if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; data = file->private_data; if ((data->rbuffer = kmalloc( 1024, GFP_KERNEL )) == NULL) { kfree (file->private_data); return -ENOMEM; } data->writelen = 0; data->maxwritelen = 0; data->wbuffer = NULL; data->on_close = NULL; if (file->f_mode & FMODE_WRITE) { if (!(file->f_mode & FMODE_READ)) { Cmd cmd; Resp rsp; if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN; memset(&cmd, 0, sizeof(cmd)); cmd.cmd=CMD_LISTBSS; if (down_interruptible(&ai->sem)) return -ERESTARTSYS; issuecommand(ai, &cmd, &rsp); up(&ai->sem); data->readlen = 0; return 0; } doLoseSync = 1; } ptr = data->rbuffer; /* There is a race condition here if there are concurrent opens. Since it is a rare condition, we'll just live with it, otherwise we have to add a spin lock... */ rc = readBSSListRid(ai, doLoseSync, &BSSList_rid); while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) { ptr += sprintf(ptr, "%pM %*s rssi = %d", BSSList_rid.bssid, (int)BSSList_rid.ssidLen, BSSList_rid.ssid, le16_to_cpu(BSSList_rid.dBm)); ptr += sprintf(ptr, " channel = %d %s %s %s %s\n", le16_to_cpu(BSSList_rid.dsChannel), BSSList_rid.cap & CAP_ESS ? "ESS" : "", BSSList_rid.cap & CAP_IBSS ? "adhoc" : "", BSSList_rid.cap & CAP_PRIVACY ? "wep" : "", BSSList_rid.cap & CAP_SHORTHDR ? "shorthdr" : ""); rc = readBSSListRid(ai, 0, &BSSList_rid); } *ptr = '\0'; data->readlen = strlen( data->rbuffer ); return 0; } static int proc_close( struct inode *inode, struct file *file ) { struct proc_data *data = file->private_data; if (data->on_close != NULL) data->on_close(inode, file); kfree(data->rbuffer); kfree(data->wbuffer); kfree(data); return 0; } /* Since the card doesn't automatically switch to the right WEP mode, we will make it do it. If the card isn't associated, every secs we will switch WEP modes to see if that will help. If the card is associated we will check every minute to see if anything has changed. */ static void timer_func( struct net_device *dev ) { struct airo_info *apriv = dev->ml_priv; /* We don't have a link so try changing the authtype */ readConfigRid(apriv, 0); disable_MAC(apriv, 0); switch(apriv->config.authType) { case AUTH_ENCRYPT: /* So drop to OPEN */ apriv->config.authType = AUTH_OPEN; break; case AUTH_SHAREDKEY: if (apriv->keyindex < auto_wep) { set_wep_tx_idx(apriv, apriv->keyindex, 0, 0); apriv->config.authType = AUTH_SHAREDKEY; apriv->keyindex++; } else { /* Drop to ENCRYPT */ apriv->keyindex = 0; set_wep_tx_idx(apriv, apriv->defindex, 0, 0); apriv->config.authType = AUTH_ENCRYPT; } break; default: /* We'll escalate to SHAREDKEY */ apriv->config.authType = AUTH_SHAREDKEY; } set_bit (FLAG_COMMIT, &apriv->flags); writeConfigRid(apriv, 0); enable_MAC(apriv, 0); up(&apriv->sem); /* Schedule check to see if the change worked */ clear_bit(JOB_AUTOWEP, &apriv->jobs); apriv->expires = RUN_AT(HZ*3); } #ifdef CONFIG_PCI static int airo_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pent) { struct net_device *dev; if (pci_enable_device(pdev)) return -ENODEV; pci_set_master(pdev); if (pdev->device == 0x5000 || pdev->device == 0xa504) dev = _init_airo_card(pdev->irq, pdev->resource[0].start, 0, pdev, &pdev->dev); else dev = _init_airo_card(pdev->irq, pdev->resource[2].start, 0, pdev, &pdev->dev); if (!dev) { pci_disable_device(pdev); return -ENODEV; } pci_set_drvdata(pdev, dev); return 0; } static void airo_pci_remove(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); airo_print_info(dev->name, "Unregistering..."); stop_airo_card(dev, 1); pci_disable_device(pdev); } static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); struct airo_info *ai = dev->ml_priv; Cmd cmd; Resp rsp; if (!ai->APList) ai->APList = kmalloc(sizeof(APListRid), GFP_KERNEL); if (!ai->APList) return -ENOMEM; if (!ai->SSID) ai->SSID = kmalloc(sizeof(SsidRid), GFP_KERNEL); if (!ai->SSID) return -ENOMEM; readAPListRid(ai, ai->APList); readSsidRid(ai, ai->SSID); memset(&cmd, 0, sizeof(cmd)); /* the lock will be released at the end of the resume callback */ if (down_interruptible(&ai->sem)) return -EAGAIN; disable_MAC(ai, 0); netif_device_detach(dev); ai->power = state; cmd.cmd = HOSTSLEEP; issuecommand(ai, &cmd, &rsp); pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); pci_save_state(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int airo_pci_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct airo_info *ai = dev->ml_priv; pci_power_t prev_state = pdev->current_state; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); pci_enable_wake(pdev, PCI_D0, 0); if (prev_state != PCI_D1) { reset_card(dev, 0); mpi_init_descriptors(ai); setup_card(ai, dev->dev_addr, 0); clear_bit(FLAG_RADIO_OFF, &ai->flags); clear_bit(FLAG_PENDING_XMIT, &ai->flags); } else { OUT4500(ai, EVACK, EV_AWAKEN); OUT4500(ai, EVACK, EV_AWAKEN); msleep(100); } set_bit(FLAG_COMMIT, &ai->flags); disable_MAC(ai, 0); msleep(200); if (ai->SSID) { writeSsidRid(ai, ai->SSID, 0); kfree(ai->SSID); ai->SSID = NULL; } if (ai->APList) { writeAPListRid(ai, ai->APList, 0); kfree(ai->APList); ai->APList = NULL; } writeConfigRid(ai, 0); enable_MAC(ai, 0); ai->power = PMSG_ON; netif_device_attach(dev); netif_wake_queue(dev); enable_interrupts(ai); up(&ai->sem); return 0; } #endif static int __init airo_init_module( void ) { int i; proc_kuid = make_kuid(&init_user_ns, proc_uid); proc_kgid = make_kgid(&init_user_ns, proc_gid); if (!uid_valid(proc_kuid) || !gid_valid(proc_kgid)) return -EINVAL; airo_entry = proc_mkdir_mode("driver/aironet", airo_perm, NULL); if (airo_entry) proc_set_user(airo_entry, proc_kuid, proc_kgid); for (i = 0; i < 4 && io[i] && irq[i]; i++) { airo_print_info("", "Trying to configure ISA adapter at irq=%d " "io=0x%x", irq[i], io[i] ); if (init_airo_card( irq[i], io[i], 0, NULL )) /* do nothing */ ; } #ifdef CONFIG_PCI airo_print_info("", "Probing for PCI adapters"); i = pci_register_driver(&airo_driver); airo_print_info("", "Finished probing for PCI adapters"); if (i) { remove_proc_entry("driver/aironet", NULL); return i; } #endif /* Always exit with success, as we are a library module * as well as a driver module */ return 0; } static void __exit airo_cleanup_module( void ) { struct airo_info *ai; while(!list_empty(&airo_devices)) { ai = list_entry(airo_devices.next, struct airo_info, dev_list); airo_print_info(ai->dev->name, "Unregistering..."); stop_airo_card(ai->dev, 1); } #ifdef CONFIG_PCI pci_unregister_driver(&airo_driver); #endif remove_proc_entry("driver/aironet", NULL); } /* * Initial Wireless Extension code for Aironet driver by : * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 17 November 00 * Conversion to new driver API by : * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 26 March 02 * Javier also did a good amount of work here, adding some new extensions * and fixing my code. Let's just say that without him this code just * would not work at all... - Jean II */ static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi) { if (!rssi_rid) return 0; return (0x100 - rssi_rid[rssi].rssidBm); } static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm) { int i; if (!rssi_rid) return 0; for (i = 0; i < 256; i++) if (rssi_rid[i].rssidBm == dbm) return rssi_rid[i].rssipct; return 0; } static int airo_get_quality (StatusRid *status_rid, CapabilityRid *cap_rid) { int quality = 0; u16 sq; if ((status_rid->mode & cpu_to_le16(0x3f)) != cpu_to_le16(0x3f)) return 0; if (!(cap_rid->hardCap & cpu_to_le16(8))) return 0; sq = le16_to_cpu(status_rid->signalQuality); if (memcmp(cap_rid->prodName, "350", 3)) if (sq > 0x20) quality = 0; else quality = 0x20 - sq; else if (sq > 0xb0) quality = 0; else if (sq < 0x10) quality = 0xa0; else quality = 0xb0 - sq; return quality; } #define airo_get_max_quality(cap_rid) (memcmp((cap_rid)->prodName, "350", 3) ? 0x20 : 0xa0) #define airo_get_avg_quality(cap_rid) (memcmp((cap_rid)->prodName, "350", 3) ? 0x10 : 0x50); /*------------------------------------------------------------------*/ /* * Wireless Handler : get protocol name */ static int airo_get_name(struct net_device *dev, struct iw_request_info *info, char *cwrq, char *extra) { strcpy(cwrq, "IEEE 802.11-DS"); return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set frequency */ static int airo_set_freq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *fwrq, char *extra) { struct airo_info *local = dev->ml_priv; int rc = -EINPROGRESS; /* Call commit handler */ /* If setting by frequency, convert to a channel */ if(fwrq->e == 1) { int f = fwrq->m / 100000; /* Hack to fall through... */ fwrq->e = 0; fwrq->m = ieee80211_frequency_to_channel(f); } /* Setting by channel number */ if((fwrq->m > 1000) || (fwrq->e > 0)) rc = -EOPNOTSUPP; else { int channel = fwrq->m; /* We should do a better check than that, * based on the card capability !!! */ if((channel < 1) || (channel > 14)) { airo_print_dbg(dev->name, "New channel value of %d is invalid!", fwrq->m); rc = -EINVAL; } else { readConfigRid(local, 1); /* Yes ! We can set it !!! */ local->config.channelSet = cpu_to_le16(channel); set_bit (FLAG_COMMIT, &local->flags); } } return rc; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get frequency */ static int airo_get_freq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *fwrq, char *extra) { struct airo_info *local = dev->ml_priv; StatusRid status_rid; /* Card status info */ int ch; readConfigRid(local, 1); if ((local->config.opmode & MODE_CFG_MASK) == MODE_STA_ESS) status_rid.channel = local->config.channelSet; else readStatusRid(local, &status_rid, 1); ch = le16_to_cpu(status_rid.channel); if((ch > 0) && (ch < 15)) { fwrq->m = 100000 * ieee80211_channel_to_frequency(ch, IEEE80211_BAND_2GHZ); fwrq->e = 1; } else { fwrq->m = ch; fwrq->e = 0; } return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set ESSID */ static int airo_set_essid(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *local = dev->ml_priv; SsidRid SSID_rid; /* SSIDs */ /* Reload the list of current SSID */ readSsidRid(local, &SSID_rid); /* Check if we asked for `any' */ if (dwrq->flags == 0) { /* Just send an empty SSID list */ memset(&SSID_rid, 0, sizeof(SSID_rid)); } else { unsigned index = (dwrq->flags & IW_ENCODE_INDEX) - 1; /* Check the size of the string */ if (dwrq->length > IW_ESSID_MAX_SIZE) return -E2BIG ; /* Check if index is valid */ if (index >= ARRAY_SIZE(SSID_rid.ssids)) return -EINVAL; /* Set the SSID */ memset(SSID_rid.ssids[index].ssid, 0, sizeof(SSID_rid.ssids[index].ssid)); memcpy(SSID_rid.ssids[index].ssid, extra, dwrq->length); SSID_rid.ssids[index].len = cpu_to_le16(dwrq->length); } SSID_rid.len = cpu_to_le16(sizeof(SSID_rid)); /* Write it to the card */ disable_MAC(local, 1); writeSsidRid(local, &SSID_rid, 1); enable_MAC(local, 1); return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get ESSID */ static int airo_get_essid(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *local = dev->ml_priv; StatusRid status_rid; /* Card status info */ readStatusRid(local, &status_rid, 1); /* Note : if dwrq->flags != 0, we should * get the relevant SSID from the SSID list... */ /* Get the current SSID */ memcpy(extra, status_rid.SSID, le16_to_cpu(status_rid.SSIDlen)); /* If none, we may want to get the one that was set */ /* Push it out ! */ dwrq->length = le16_to_cpu(status_rid.SSIDlen); dwrq->flags = 1; /* active */ return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set AP address */ static int airo_set_wap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *awrq, char *extra) { struct airo_info *local = dev->ml_priv; Cmd cmd; Resp rsp; APListRid APList_rid; if (awrq->sa_family != ARPHRD_ETHER) return -EINVAL; else if (is_broadcast_ether_addr(awrq->sa_data) || is_zero_ether_addr(awrq->sa_data)) { memset(&cmd, 0, sizeof(cmd)); cmd.cmd=CMD_LOSE_SYNC; if (down_interruptible(&local->sem)) return -ERESTARTSYS; issuecommand(local, &cmd, &rsp); up(&local->sem); } else { memset(&APList_rid, 0, sizeof(APList_rid)); APList_rid.len = cpu_to_le16(sizeof(APList_rid)); memcpy(APList_rid.ap[0], awrq->sa_data, ETH_ALEN); disable_MAC(local, 1); writeAPListRid(local, &APList_rid, 1); enable_MAC(local, 1); } return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get AP address */ static int airo_get_wap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *awrq, char *extra) { struct airo_info *local = dev->ml_priv; StatusRid status_rid; /* Card status info */ readStatusRid(local, &status_rid, 1); /* Tentative. This seems to work, wow, I'm lucky !!! */ memcpy(awrq->sa_data, status_rid.bssid[0], ETH_ALEN); awrq->sa_family = ARPHRD_ETHER; return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Nickname */ static int airo_set_nick(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *local = dev->ml_priv; /* Check the size of the string */ if(dwrq->length > 16) { return -E2BIG; } readConfigRid(local, 1); memset(local->config.nodeName, 0, sizeof(local->config.nodeName)); memcpy(local->config.nodeName, extra, dwrq->length); set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Nickname */ static int airo_get_nick(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); strncpy(extra, local->config.nodeName, 16); extra[16] = '\0'; dwrq->length = strlen(extra); return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Bit-Rate */ static int airo_set_rate(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; CapabilityRid cap_rid; /* Card capability info */ u8 brate = 0; int i; /* First : get a valid bit rate value */ readCapabilityRid(local, &cap_rid, 1); /* Which type of value ? */ if((vwrq->value < 8) && (vwrq->value >= 0)) { /* Setting by rate index */ /* Find value in the magic rate table */ brate = cap_rid.supportedRates[vwrq->value]; } else { /* Setting by frequency value */ u8 normvalue = (u8) (vwrq->value/500000); /* Check if rate is valid */ for(i = 0 ; i < 8 ; i++) { if(normvalue == cap_rid.supportedRates[i]) { brate = normvalue; break; } } } /* -1 designed the max rate (mostly auto mode) */ if(vwrq->value == -1) { /* Get the highest available rate */ for(i = 0 ; i < 8 ; i++) { if(cap_rid.supportedRates[i] == 0) break; } if(i != 0) brate = cap_rid.supportedRates[i - 1]; } /* Check that it is valid */ if(brate == 0) { return -EINVAL; } readConfigRid(local, 1); /* Now, check if we want a fixed or auto value */ if(vwrq->fixed == 0) { /* Fill all the rates up to this max rate */ memset(local->config.rates, 0, 8); for(i = 0 ; i < 8 ; i++) { local->config.rates[i] = cap_rid.supportedRates[i]; if(local->config.rates[i] == brate) break; } } else { /* Fixed mode */ /* One rate, fixed */ memset(local->config.rates, 0, 8); local->config.rates[0] = brate; } set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Bit-Rate */ static int airo_get_rate(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; StatusRid status_rid; /* Card status info */ readStatusRid(local, &status_rid, 1); vwrq->value = le16_to_cpu(status_rid.currentXmitRate) * 500000; /* If more than one rate, set auto */ readConfigRid(local, 1); vwrq->fixed = (local->config.rates[1] == 0); return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set RTS threshold */ static int airo_set_rts(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; int rthr = vwrq->value; if(vwrq->disabled) rthr = AIRO_DEF_MTU; if((rthr < 0) || (rthr > AIRO_DEF_MTU)) { return -EINVAL; } readConfigRid(local, 1); local->config.rtsThres = cpu_to_le16(rthr); set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } /*------------------------------------------------------------------*/ /* * Wireless Handler : get RTS threshold */ static int airo_get_rts(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); vwrq->value = le16_to_cpu(local->config.rtsThres); vwrq->disabled = (vwrq->value >= AIRO_DEF_MTU); vwrq->fixed = 1; return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Fragmentation threshold */ static int airo_set_frag(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; int fthr = vwrq->value; if(vwrq->disabled) fthr = AIRO_DEF_MTU; if((fthr < 256) || (fthr > AIRO_DEF_MTU)) { return -EINVAL; } fthr &= ~0x1; /* Get an even value - is it really needed ??? */ readConfigRid(local, 1); local->config.fragThresh = cpu_to_le16(fthr); set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Fragmentation threshold */ static int airo_get_frag(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); vwrq->value = le16_to_cpu(local->config.fragThresh); vwrq->disabled = (vwrq->value >= AIRO_DEF_MTU); vwrq->fixed = 1; return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Mode of Operation */ static int airo_set_mode(struct net_device *dev, struct iw_request_info *info, __u32 *uwrq, char *extra) { struct airo_info *local = dev->ml_priv; int reset = 0; readConfigRid(local, 1); if (sniffing_mode(local)) reset = 1; switch(*uwrq) { case IW_MODE_ADHOC: local->config.opmode &= ~MODE_CFG_MASK; local->config.opmode |= MODE_STA_IBSS; local->config.rmode &= ~RXMODE_FULL_MASK; local->config.scanMode = SCANMODE_ACTIVE; clear_bit (FLAG_802_11, &local->flags); break; case IW_MODE_INFRA: local->config.opmode &= ~MODE_CFG_MASK; local->config.opmode |= MODE_STA_ESS; local->config.rmode &= ~RXMODE_FULL_MASK; local->config.scanMode = SCANMODE_ACTIVE; clear_bit (FLAG_802_11, &local->flags); break; case IW_MODE_MASTER: local->config.opmode &= ~MODE_CFG_MASK; local->config.opmode |= MODE_AP; local->config.rmode &= ~RXMODE_FULL_MASK; local->config.scanMode = SCANMODE_ACTIVE; clear_bit (FLAG_802_11, &local->flags); break; case IW_MODE_REPEAT: local->config.opmode &= ~MODE_CFG_MASK; local->config.opmode |= MODE_AP_RPTR; local->config.rmode &= ~RXMODE_FULL_MASK; local->config.scanMode = SCANMODE_ACTIVE; clear_bit (FLAG_802_11, &local->flags); break; case IW_MODE_MONITOR: local->config.opmode &= ~MODE_CFG_MASK; local->config.opmode |= MODE_STA_ESS; local->config.rmode &= ~RXMODE_FULL_MASK; local->config.rmode |= RXMODE_RFMON | RXMODE_DISABLE_802_3_HEADER; local->config.scanMode = SCANMODE_PASSIVE; set_bit (FLAG_802_11, &local->flags); break; default: return -EINVAL; } if (reset) set_bit (FLAG_RESET, &local->flags); set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Mode of Operation */ static int airo_get_mode(struct net_device *dev, struct iw_request_info *info, __u32 *uwrq, char *extra) { struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); /* If not managed, assume it's ad-hoc */ switch (local->config.opmode & MODE_CFG_MASK) { case MODE_STA_ESS: *uwrq = IW_MODE_INFRA; break; case MODE_AP: *uwrq = IW_MODE_MASTER; break; case MODE_AP_RPTR: *uwrq = IW_MODE_REPEAT; break; default: *uwrq = IW_MODE_ADHOC; } return 0; } static inline int valid_index(struct airo_info *ai, int index) { return (index >= 0) && (index <= ai->max_wep_idx); } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Encryption Key */ static int airo_set_encode(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *local = dev->ml_priv; int perm = (dwrq->flags & IW_ENCODE_TEMP ? 0 : 1); __le16 currentAuthType = local->config.authType; int rc = 0; if (!local->wep_capable) return -EOPNOTSUPP; readConfigRid(local, 1); /* Basic checking: do we have a key to set ? * Note : with the new API, it's impossible to get a NULL pointer. * Therefore, we need to check a key size == 0 instead. * New version of iwconfig properly set the IW_ENCODE_NOKEY flag * when no key is present (only change flags), but older versions * don't do it. - Jean II */ if (dwrq->length > 0) { wep_key_t key; int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; int current_index; /* Check the size of the key */ if (dwrq->length > MAX_KEY_SIZE) { return -EINVAL; } current_index = get_wep_tx_idx(local); if (current_index < 0) current_index = 0; /* Check the index (none -> use current) */ if (!valid_index(local, index)) index = current_index; /* Set the length */ if (dwrq->length > MIN_KEY_SIZE) key.len = MAX_KEY_SIZE; else key.len = MIN_KEY_SIZE; /* Check if the key is not marked as invalid */ if(!(dwrq->flags & IW_ENCODE_NOKEY)) { /* Cleanup */ memset(key.key, 0, MAX_KEY_SIZE); /* Copy the key in the driver */ memcpy(key.key, extra, dwrq->length); /* Send the key to the card */ rc = set_wep_key(local, index, key.key, key.len, perm, 1); if (rc < 0) { airo_print_err(local->dev->name, "failed to set" " WEP key at index %d: %d.", index, rc); return rc; } } /* WE specify that if a valid key is set, encryption * should be enabled (user may turn it off later) * This is also how "iwconfig ethX key on" works */ if((index == current_index) && (key.len > 0) && (local->config.authType == AUTH_OPEN)) { local->config.authType = AUTH_ENCRYPT; } } else { /* Do we want to just set the transmit key index ? */ int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; if (valid_index(local, index)) { rc = set_wep_tx_idx(local, index, perm, 1); if (rc < 0) { airo_print_err(local->dev->name, "failed to set" " WEP transmit index to %d: %d.", index, rc); return rc; } } else { /* Don't complain if only change the mode */ if (!(dwrq->flags & IW_ENCODE_MODE)) return -EINVAL; } } /* Read the flags */ if(dwrq->flags & IW_ENCODE_DISABLED) local->config.authType = AUTH_OPEN; // disable encryption if(dwrq->flags & IW_ENCODE_RESTRICTED) local->config.authType = AUTH_SHAREDKEY; // Only Both if(dwrq->flags & IW_ENCODE_OPEN) local->config.authType = AUTH_ENCRYPT; // Only Wep /* Commit the changes to flags if needed */ if (local->config.authType != currentAuthType) set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Encryption Key */ static int airo_get_encode(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *local = dev->ml_priv; int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; int wep_key_len; u8 buf[16]; if (!local->wep_capable) return -EOPNOTSUPP; readConfigRid(local, 1); /* Check encryption mode */ switch(local->config.authType) { case AUTH_ENCRYPT: dwrq->flags = IW_ENCODE_OPEN; break; case AUTH_SHAREDKEY: dwrq->flags = IW_ENCODE_RESTRICTED; break; default: case AUTH_OPEN: dwrq->flags = IW_ENCODE_DISABLED; break; } /* We can't return the key, so set the proper flag and return zero */ dwrq->flags |= IW_ENCODE_NOKEY; memset(extra, 0, 16); /* Which key do we want ? -1 -> tx index */ if (!valid_index(local, index)) { index = get_wep_tx_idx(local); if (index < 0) index = 0; } dwrq->flags |= index + 1; /* Copy the key to the user buffer */ wep_key_len = get_wep_key(local, index, &buf[0], sizeof(buf)); if (wep_key_len < 0) { dwrq->length = 0; } else { dwrq->length = wep_key_len; memcpy(extra, buf, dwrq->length); } return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set extended Encryption parameters */ static int airo_set_encodeext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct airo_info *local = dev->ml_priv; struct iw_point *encoding = &wrqu->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; int perm = ( encoding->flags & IW_ENCODE_TEMP ? 0 : 1 ); __le16 currentAuthType = local->config.authType; int idx, key_len, alg = ext->alg, set_key = 1, rc; wep_key_t key; if (!local->wep_capable) return -EOPNOTSUPP; readConfigRid(local, 1); /* Determine and validate the key index */ idx = encoding->flags & IW_ENCODE_INDEX; if (idx) { if (!valid_index(local, idx - 1)) return -EINVAL; idx--; } else { idx = get_wep_tx_idx(local); if (idx < 0) idx = 0; } if (encoding->flags & IW_ENCODE_DISABLED) alg = IW_ENCODE_ALG_NONE; if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { /* Only set transmit key index here, actual * key is set below if needed. */ rc = set_wep_tx_idx(local, idx, perm, 1); if (rc < 0) { airo_print_err(local->dev->name, "failed to set " "WEP transmit index to %d: %d.", idx, rc); return rc; } set_key = ext->key_len > 0 ? 1 : 0; } if (set_key) { /* Set the requested key first */ memset(key.key, 0, MAX_KEY_SIZE); switch (alg) { case IW_ENCODE_ALG_NONE: key.len = 0; break; case IW_ENCODE_ALG_WEP: if (ext->key_len > MIN_KEY_SIZE) { key.len = MAX_KEY_SIZE; } else if (ext->key_len > 0) { key.len = MIN_KEY_SIZE; } else { return -EINVAL; } key_len = min (ext->key_len, key.len); memcpy(key.key, ext->key, key_len); break; default: return -EINVAL; } if (key.len == 0) { rc = set_wep_tx_idx(local, idx, perm, 1); if (rc < 0) { airo_print_err(local->dev->name, "failed to set WEP transmit index to %d: %d.", idx, rc); return rc; } } else { rc = set_wep_key(local, idx, key.key, key.len, perm, 1); if (rc < 0) { airo_print_err(local->dev->name, "failed to set WEP key at index %d: %d.", idx, rc); return rc; } } } /* Read the flags */ if(encoding->flags & IW_ENCODE_DISABLED) local->config.authType = AUTH_OPEN; // disable encryption if(encoding->flags & IW_ENCODE_RESTRICTED) local->config.authType = AUTH_SHAREDKEY; // Only Both if(encoding->flags & IW_ENCODE_OPEN) local->config.authType = AUTH_ENCRYPT; // Only Wep /* Commit the changes to flags if needed */ if (local->config.authType != currentAuthType) set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get extended Encryption parameters */ static int airo_get_encodeext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct airo_info *local = dev->ml_priv; struct iw_point *encoding = &wrqu->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; int idx, max_key_len, wep_key_len; u8 buf[16]; if (!local->wep_capable) return -EOPNOTSUPP; readConfigRid(local, 1); max_key_len = encoding->length - sizeof(*ext); if (max_key_len < 0) return -EINVAL; idx = encoding->flags & IW_ENCODE_INDEX; if (idx) { if (!valid_index(local, idx - 1)) return -EINVAL; idx--; } else { idx = get_wep_tx_idx(local); if (idx < 0) idx = 0; } encoding->flags = idx + 1; memset(ext, 0, sizeof(*ext)); /* Check encryption mode */ switch(local->config.authType) { case AUTH_ENCRYPT: encoding->flags = IW_ENCODE_ALG_WEP | IW_ENCODE_ENABLED; break; case AUTH_SHAREDKEY: encoding->flags = IW_ENCODE_ALG_WEP | IW_ENCODE_ENABLED; break; default: case AUTH_OPEN: encoding->flags = IW_ENCODE_ALG_NONE | IW_ENCODE_DISABLED; break; } /* We can't return the key, so set the proper flag and return zero */ encoding->flags |= IW_ENCODE_NOKEY; memset(extra, 0, 16); /* Copy the key to the user buffer */ wep_key_len = get_wep_key(local, idx, &buf[0], sizeof(buf)); if (wep_key_len < 0) { ext->key_len = 0; } else { ext->key_len = wep_key_len; memcpy(extra, buf, ext->key_len); } return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set extended authentication parameters */ static int airo_set_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct airo_info *local = dev->ml_priv; struct iw_param *param = &wrqu->param; __le16 currentAuthType = local->config.authType; switch (param->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: case IW_AUTH_CIPHER_PAIRWISE: case IW_AUTH_CIPHER_GROUP: case IW_AUTH_KEY_MGMT: case IW_AUTH_RX_UNENCRYPTED_EAPOL: case IW_AUTH_PRIVACY_INVOKED: /* * airo does not use these parameters */ break; case IW_AUTH_DROP_UNENCRYPTED: if (param->value) { /* Only change auth type if unencrypted */ if (currentAuthType == AUTH_OPEN) local->config.authType = AUTH_ENCRYPT; } else { local->config.authType = AUTH_OPEN; } /* Commit the changes to flags if needed */ if (local->config.authType != currentAuthType) set_bit (FLAG_COMMIT, &local->flags); break; case IW_AUTH_80211_AUTH_ALG: { /* FIXME: What about AUTH_OPEN? This API seems to * disallow setting our auth to AUTH_OPEN. */ if (param->value & IW_AUTH_ALG_SHARED_KEY) { local->config.authType = AUTH_SHAREDKEY; } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) { local->config.authType = AUTH_ENCRYPT; } else return -EINVAL; /* Commit the changes to flags if needed */ if (local->config.authType != currentAuthType) set_bit (FLAG_COMMIT, &local->flags); break; } case IW_AUTH_WPA_ENABLED: /* Silently accept disable of WPA */ if (param->value > 0) return -EOPNOTSUPP; break; default: return -EOPNOTSUPP; } return -EINPROGRESS; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get extended authentication parameters */ static int airo_get_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct airo_info *local = dev->ml_priv; struct iw_param *param = &wrqu->param; __le16 currentAuthType = local->config.authType; switch (param->flags & IW_AUTH_INDEX) { case IW_AUTH_DROP_UNENCRYPTED: switch (currentAuthType) { case AUTH_SHAREDKEY: case AUTH_ENCRYPT: param->value = 1; break; default: param->value = 0; break; } break; case IW_AUTH_80211_AUTH_ALG: switch (currentAuthType) { case AUTH_SHAREDKEY: param->value = IW_AUTH_ALG_SHARED_KEY; break; case AUTH_ENCRYPT: default: param->value = IW_AUTH_ALG_OPEN_SYSTEM; break; } break; case IW_AUTH_WPA_ENABLED: param->value = 0; break; default: return -EOPNOTSUPP; } return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Tx-Power */ static int airo_set_txpow(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; CapabilityRid cap_rid; /* Card capability info */ int i; int rc = -EINVAL; __le16 v = cpu_to_le16(vwrq->value); readCapabilityRid(local, &cap_rid, 1); if (vwrq->disabled) { set_bit (FLAG_RADIO_OFF, &local->flags); set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } if (vwrq->flags != IW_TXPOW_MWATT) { return -EINVAL; } clear_bit (FLAG_RADIO_OFF, &local->flags); for (i = 0; i < 8 && cap_rid.txPowerLevels[i]; i++) if (v == cap_rid.txPowerLevels[i]) { readConfigRid(local, 1); local->config.txPower = v; set_bit (FLAG_COMMIT, &local->flags); rc = -EINPROGRESS; /* Call commit handler */ break; } return rc; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Tx-Power */ static int airo_get_txpow(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); vwrq->value = le16_to_cpu(local->config.txPower); vwrq->fixed = 1; /* No power control */ vwrq->disabled = test_bit(FLAG_RADIO_OFF, &local->flags); vwrq->flags = IW_TXPOW_MWATT; return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Retry limits */ static int airo_set_retry(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; int rc = -EINVAL; if(vwrq->disabled) { return -EINVAL; } readConfigRid(local, 1); if(vwrq->flags & IW_RETRY_LIMIT) { __le16 v = cpu_to_le16(vwrq->value); if(vwrq->flags & IW_RETRY_LONG) local->config.longRetryLimit = v; else if (vwrq->flags & IW_RETRY_SHORT) local->config.shortRetryLimit = v; else { /* No modifier : set both */ local->config.longRetryLimit = v; local->config.shortRetryLimit = v; } set_bit (FLAG_COMMIT, &local->flags); rc = -EINPROGRESS; /* Call commit handler */ } if(vwrq->flags & IW_RETRY_LIFETIME) { local->config.txLifetime = cpu_to_le16(vwrq->value / 1024); set_bit (FLAG_COMMIT, &local->flags); rc = -EINPROGRESS; /* Call commit handler */ } return rc; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Retry limits */ static int airo_get_retry(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; vwrq->disabled = 0; /* Can't be disabled */ readConfigRid(local, 1); /* Note : by default, display the min retry number */ if((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) { vwrq->flags = IW_RETRY_LIFETIME; vwrq->value = le16_to_cpu(local->config.txLifetime) * 1024; } else if((vwrq->flags & IW_RETRY_LONG)) { vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG; vwrq->value = le16_to_cpu(local->config.longRetryLimit); } else { vwrq->flags = IW_RETRY_LIMIT; vwrq->value = le16_to_cpu(local->config.shortRetryLimit); if(local->config.shortRetryLimit != local->config.longRetryLimit) vwrq->flags |= IW_RETRY_SHORT; } return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get range info */ static int airo_get_range(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *local = dev->ml_priv; struct iw_range *range = (struct iw_range *) extra; CapabilityRid cap_rid; /* Card capability info */ int i; int k; readCapabilityRid(local, &cap_rid, 1); dwrq->length = sizeof(struct iw_range); memset(range, 0, sizeof(*range)); range->min_nwid = 0x0000; range->max_nwid = 0x0000; range->num_channels = 14; /* Should be based on cap_rid.country to give only * what the current card support */ k = 0; for(i = 0; i < 14; i++) { range->freq[k].i = i + 1; /* List index */ range->freq[k].m = 100000 * ieee80211_channel_to_frequency(i + 1, IEEE80211_BAND_2GHZ); range->freq[k++].e = 1; /* Values in MHz -> * 10^5 * 10 */ } range->num_frequency = k; range->sensitivity = 65535; /* Hum... Should put the right values there */ if (local->rssi) range->max_qual.qual = 100; /* % */ else range->max_qual.qual = airo_get_max_quality(&cap_rid); range->max_qual.level = 0x100 - 120; /* -120 dBm */ range->max_qual.noise = 0x100 - 120; /* -120 dBm */ /* Experimental measurements - boundary 11/5.5 Mb/s */ /* Note : with or without the (local->rssi), results * are somewhat different. - Jean II */ if (local->rssi) { range->avg_qual.qual = 50; /* % */ range->avg_qual.level = 0x100 - 70; /* -70 dBm */ } else { range->avg_qual.qual = airo_get_avg_quality(&cap_rid); range->avg_qual.level = 0x100 - 80; /* -80 dBm */ } range->avg_qual.noise = 0x100 - 85; /* -85 dBm */ for(i = 0 ; i < 8 ; i++) { range->bitrate[i] = cap_rid.supportedRates[i] * 500000; if(range->bitrate[i] == 0) break; } range->num_bitrates = i; /* Set an indication of the max TCP throughput * in bit/s that we can expect using this interface. * May be use for QoS stuff... Jean II */ if(i > 2) range->throughput = 5000 * 1000; else range->throughput = 1500 * 1000; range->min_rts = 0; range->max_rts = AIRO_DEF_MTU; range->min_frag = 256; range->max_frag = AIRO_DEF_MTU; if(cap_rid.softCap & cpu_to_le16(2)) { // WEP: RC4 40 bits range->encoding_size[0] = 5; // RC4 ~128 bits if (cap_rid.softCap & cpu_to_le16(0x100)) { range->encoding_size[1] = 13; range->num_encoding_sizes = 2; } else range->num_encoding_sizes = 1; range->max_encoding_tokens = cap_rid.softCap & cpu_to_le16(0x80) ? 4 : 1; } else { range->num_encoding_sizes = 0; range->max_encoding_tokens = 0; } range->min_pmp = 0; range->max_pmp = 5000000; /* 5 secs */ range->min_pmt = 0; range->max_pmt = 65535 * 1024; /* ??? */ range->pmp_flags = IW_POWER_PERIOD; range->pmt_flags = IW_POWER_TIMEOUT; range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R; /* Transmit Power - values are in mW */ for(i = 0 ; i < 8 ; i++) { range->txpower[i] = le16_to_cpu(cap_rid.txPowerLevels[i]); if(range->txpower[i] == 0) break; } range->num_txpower = i; range->txpower_capa = IW_TXPOW_MWATT; range->we_version_source = 19; range->we_version_compiled = WIRELESS_EXT; range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME; range->retry_flags = IW_RETRY_LIMIT; range->r_time_flags = IW_RETRY_LIFETIME; range->min_retry = 1; range->max_retry = 65535; range->min_r_time = 1024; range->max_r_time = 65535 * 1024; /* Event capability (kernel + driver) */ range->event_capa[0] = (IW_EVENT_CAPA_K_0 | IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) | IW_EVENT_CAPA_MASK(SIOCGIWAP) | IW_EVENT_CAPA_MASK(SIOCGIWSCAN)); range->event_capa[1] = IW_EVENT_CAPA_K_1; range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVTXDROP); return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Power Management */ static int airo_set_power(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); if (vwrq->disabled) { if (sniffing_mode(local)) return -EINVAL; local->config.powerSaveMode = POWERSAVE_CAM; local->config.rmode &= ~RXMODE_MASK; local->config.rmode |= RXMODE_BC_MC_ADDR; set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { local->config.fastListenDelay = cpu_to_le16((vwrq->value + 500) / 1024); local->config.powerSaveMode = POWERSAVE_PSPCAM; set_bit (FLAG_COMMIT, &local->flags); } else if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) { local->config.fastListenInterval = local->config.listenInterval = cpu_to_le16((vwrq->value + 500) / 1024); local->config.powerSaveMode = POWERSAVE_PSPCAM; set_bit (FLAG_COMMIT, &local->flags); } switch (vwrq->flags & IW_POWER_MODE) { case IW_POWER_UNICAST_R: if (sniffing_mode(local)) return -EINVAL; local->config.rmode &= ~RXMODE_MASK; local->config.rmode |= RXMODE_ADDR; set_bit (FLAG_COMMIT, &local->flags); break; case IW_POWER_ALL_R: if (sniffing_mode(local)) return -EINVAL; local->config.rmode &= ~RXMODE_MASK; local->config.rmode |= RXMODE_BC_MC_ADDR; set_bit (FLAG_COMMIT, &local->flags); case IW_POWER_ON: /* This is broken, fixme ;-) */ break; default: return -EINVAL; } // Note : we may want to factor local->need_commit here // Note2 : may also want to factor RXMODE_RFMON test return -EINPROGRESS; /* Call commit handler */ } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Power Management */ static int airo_get_power(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; __le16 mode; readConfigRid(local, 1); mode = local->config.powerSaveMode; if ((vwrq->disabled = (mode == POWERSAVE_CAM))) return 0; if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { vwrq->value = le16_to_cpu(local->config.fastListenDelay) * 1024; vwrq->flags = IW_POWER_TIMEOUT; } else { vwrq->value = le16_to_cpu(local->config.fastListenInterval) * 1024; vwrq->flags = IW_POWER_PERIOD; } if ((local->config.rmode & RXMODE_MASK) == RXMODE_ADDR) vwrq->flags |= IW_POWER_UNICAST_R; else vwrq->flags |= IW_POWER_ALL_R; return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : set Sensitivity */ static int airo_set_sens(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); local->config.rssiThreshold = cpu_to_le16(vwrq->disabled ? RSSI_DEFAULT : vwrq->value); set_bit (FLAG_COMMIT, &local->flags); return -EINPROGRESS; /* Call commit handler */ } /*------------------------------------------------------------------*/ /* * Wireless Handler : get Sensitivity */ static int airo_get_sens(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct airo_info *local = dev->ml_priv; readConfigRid(local, 1); vwrq->value = le16_to_cpu(local->config.rssiThreshold); vwrq->disabled = (vwrq->value == 0); vwrq->fixed = 1; return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : get AP List * Note : this is deprecated in favor of IWSCAN */ static int airo_get_aplist(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *local = dev->ml_priv; struct sockaddr *address = (struct sockaddr *) extra; struct iw_quality *qual; BSSListRid BSSList; int i; int loseSync = capable(CAP_NET_ADMIN) ? 1: -1; qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL); if (!qual) return -ENOMEM; for (i = 0; i < IW_MAX_AP; i++) { u16 dBm; if (readBSSListRid(local, loseSync, &BSSList)) break; loseSync = 0; memcpy(address[i].sa_data, BSSList.bssid, ETH_ALEN); address[i].sa_family = ARPHRD_ETHER; dBm = le16_to_cpu(BSSList.dBm); if (local->rssi) { qual[i].level = 0x100 - dBm; qual[i].qual = airo_dbm_to_pct(local->rssi, dBm); qual[i].updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_DBM; } else { qual[i].level = (dBm + 321) / 2; qual[i].qual = 0; qual[i].updated = IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_UPDATED | IW_QUAL_DBM; } qual[i].noise = local->wstats.qual.noise; if (BSSList.index == cpu_to_le16(0xffff)) break; } if (!i) { StatusRid status_rid; /* Card status info */ readStatusRid(local, &status_rid, 1); for (i = 0; i < min(IW_MAX_AP, 4) && (status_rid.bssid[i][0] & status_rid.bssid[i][1] & status_rid.bssid[i][2] & status_rid.bssid[i][3] & status_rid.bssid[i][4] & status_rid.bssid[i][5])!=0xff && (status_rid.bssid[i][0] | status_rid.bssid[i][1] | status_rid.bssid[i][2] | status_rid.bssid[i][3] | status_rid.bssid[i][4] | status_rid.bssid[i][5]); i++) { memcpy(address[i].sa_data, status_rid.bssid[i], ETH_ALEN); address[i].sa_family = ARPHRD_ETHER; } } else { dwrq->flags = 1; /* Should be define'd */ memcpy(extra + sizeof(struct sockaddr) * i, qual, sizeof(struct iw_quality) * i); } dwrq->length = i; kfree(qual); return 0; } /*------------------------------------------------------------------*/ /* * Wireless Handler : Initiate Scan */ static int airo_set_scan(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *ai = dev->ml_priv; Cmd cmd; Resp rsp; int wake = 0; /* Note : you may have realised that, as this is a SET operation, * this is privileged and therefore a normal user can't * perform scanning. * This is not an error, while the device perform scanning, * traffic doesn't flow, so it's a perfect DoS... * Jean II */ if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN; if (down_interruptible(&ai->sem)) return -ERESTARTSYS; /* If there's already a scan in progress, don't * trigger another one. */ if (ai->scan_timeout > 0) goto out; /* Initiate a scan command */ ai->scan_timeout = RUN_AT(3*HZ); memset(&cmd, 0, sizeof(cmd)); cmd.cmd=CMD_LISTBSS; issuecommand(ai, &cmd, &rsp); wake = 1; out: up(&ai->sem); if (wake) wake_up_interruptible(&ai->thr_wait); return 0; } /*------------------------------------------------------------------*/ /* * Translate scan data returned from the card to a card independent * format that the Wireless Tools will understand - Jean II */ static inline char *airo_translate_scan(struct net_device *dev, struct iw_request_info *info, char *current_ev, char *end_buf, BSSListRid *bss) { struct airo_info *ai = dev->ml_priv; struct iw_event iwe; /* Temporary buffer */ __le16 capabilities; char * current_val; /* For rates */ int i; char * buf; u16 dBm; /* First entry *MUST* be the AP MAC address */ iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN); current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_ADDR_LEN); /* Other entries will be displayed in the order we give them */ /* Add the ESSID */ iwe.u.data.length = bss->ssidLen; if(iwe.u.data.length > 32) iwe.u.data.length = 32; iwe.cmd = SIOCGIWESSID; iwe.u.data.flags = 1; current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, bss->ssid); /* Add mode */ iwe.cmd = SIOCGIWMODE; capabilities = bss->cap; if(capabilities & (CAP_ESS | CAP_IBSS)) { if(capabilities & CAP_ESS) iwe.u.mode = IW_MODE_MASTER; else iwe.u.mode = IW_MODE_ADHOC; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_UINT_LEN); } /* Add frequency */ iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = le16_to_cpu(bss->dsChannel); iwe.u.freq.m = 100000 * ieee80211_channel_to_frequency(iwe.u.freq.m, IEEE80211_BAND_2GHZ); iwe.u.freq.e = 1; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_FREQ_LEN); dBm = le16_to_cpu(bss->dBm); /* Add quality statistics */ iwe.cmd = IWEVQUAL; if (ai->rssi) { iwe.u.qual.level = 0x100 - dBm; iwe.u.qual.qual = airo_dbm_to_pct(ai->rssi, dBm); iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_DBM; } else { iwe.u.qual.level = (dBm + 321) / 2; iwe.u.qual.qual = 0; iwe.u.qual.updated = IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_UPDATED | IW_QUAL_DBM; } iwe.u.qual.noise = ai->wstats.qual.noise; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_QUAL_LEN); /* Add encryption capability */ iwe.cmd = SIOCGIWENCODE; if(capabilities & CAP_PRIVACY) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; iwe.u.data.length = 0; current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, bss->ssid); /* Rate : stuffing multiple values in a single event require a bit * more of magic - Jean II */ current_val = current_ev + iwe_stream_lcp_len(info); iwe.cmd = SIOCGIWRATE; /* Those two flags are ignored... */ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; /* Max 8 values */ for(i = 0 ; i < 8 ; i++) { /* NULL terminated */ if(bss->rates[i] == 0) break; /* Bit rate given in 500 kb/s units (+ 0x80) */ iwe.u.bitrate.value = ((bss->rates[i] & 0x7f) * 500000); /* Add new value to event */ current_val = iwe_stream_add_value(info, current_ev, current_val, end_buf, &iwe, IW_EV_PARAM_LEN); } /* Check if we added any event */ if ((current_val - current_ev) > iwe_stream_lcp_len(info)) current_ev = current_val; /* Beacon interval */ buf = kmalloc(30, GFP_KERNEL); if (buf) { iwe.cmd = IWEVCUSTOM; sprintf(buf, "bcn_int=%d", bss->beaconInterval); iwe.u.data.length = strlen(buf); current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, buf); kfree(buf); } /* Put WPA/RSN Information Elements into the event stream */ if (test_bit(FLAG_WPA_CAPABLE, &ai->flags)) { unsigned int num_null_ies = 0; u16 length = sizeof (bss->extra.iep); u8 *ie = (void *)&bss->extra.iep; while ((length >= 2) && (num_null_ies < 2)) { if (2 + ie[1] > length) { /* Invalid element, don't continue parsing IE */ break; } switch (ie[0]) { case WLAN_EID_SSID: /* Two zero-length SSID elements * mean we're done parsing elements */ if (!ie[1]) num_null_ies++; break; case WLAN_EID_VENDOR_SPECIFIC: if (ie[1] >= 4 && ie[2] == 0x00 && ie[3] == 0x50 && ie[4] == 0xf2 && ie[5] == 0x01) { iwe.cmd = IWEVGENIE; /* 64 is an arbitrary cut-off */ iwe.u.data.length = min(ie[1] + 2, 64); current_ev = iwe_stream_add_point( info, current_ev, end_buf, &iwe, ie); } break; case WLAN_EID_RSN: iwe.cmd = IWEVGENIE; /* 64 is an arbitrary cut-off */ iwe.u.data.length = min(ie[1] + 2, 64); current_ev = iwe_stream_add_point( info, current_ev, end_buf, &iwe, ie); break; default: break; } length -= 2 + ie[1]; ie += 2 + ie[1]; } } return current_ev; } /*------------------------------------------------------------------*/ /* * Wireless Handler : Read Scan Results */ static int airo_get_scan(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra) { struct airo_info *ai = dev->ml_priv; BSSListElement *net; int err = 0; char *current_ev = extra; /* If a scan is in-progress, return -EAGAIN */ if (ai->scan_timeout > 0) return -EAGAIN; if (down_interruptible(&ai->sem)) return -EAGAIN; list_for_each_entry (net, &ai->network_list, list) { /* Translate to WE format this entry */ current_ev = airo_translate_scan(dev, info, current_ev, extra + dwrq->length, &net->bss); /* Check if there is space for one more entry */ if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) { /* Ask user space to try again with a bigger buffer */ err = -E2BIG; goto out; } } /* Length of data */ dwrq->length = (current_ev - extra); dwrq->flags = 0; /* todo */ out: up(&ai->sem); return err; } /*------------------------------------------------------------------*/ /* * Commit handler : called after a bunch of SET operations */ static int airo_config_commit(struct net_device *dev, struct iw_request_info *info, /* NULL */ void *zwrq, /* NULL */ char *extra) /* NULL */ { struct airo_info *local = dev->ml_priv; if (!test_bit (FLAG_COMMIT, &local->flags)) return 0; /* Some of the "SET" function may have modified some of the * parameters. It's now time to commit them in the card */ disable_MAC(local, 1); if (test_bit (FLAG_RESET, &local->flags)) { APListRid APList_rid; SsidRid SSID_rid; readAPListRid(local, &APList_rid); readSsidRid(local, &SSID_rid); if (test_bit(FLAG_MPI,&local->flags)) setup_card(local, dev->dev_addr, 1 ); else reset_airo_card(dev); disable_MAC(local, 1); writeSsidRid(local, &SSID_rid, 1); writeAPListRid(local, &APList_rid, 1); } if (down_interruptible(&local->sem)) return -ERESTARTSYS; writeConfigRid(local, 0); enable_MAC(local, 0); if (test_bit (FLAG_RESET, &local->flags)) airo_set_promisc(local); else up(&local->sem); return 0; } /*------------------------------------------------------------------*/ /* * Structures to export the Wireless Handlers */ static const struct iw_priv_args airo_private_args[] = { /*{ cmd, set_args, get_args, name } */ { AIROIOCTL, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof (aironet_ioctl), IW_PRIV_TYPE_BYTE | 2047, "airoioctl" }, { AIROIDIFC, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof (aironet_ioctl), IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "airoidifc" }, }; static const iw_handler airo_handler[] = { (iw_handler) airo_config_commit, /* SIOCSIWCOMMIT */ (iw_handler) airo_get_name, /* SIOCGIWNAME */ (iw_handler) NULL, /* SIOCSIWNWID */ (iw_handler) NULL, /* SIOCGIWNWID */ (iw_handler) airo_set_freq, /* SIOCSIWFREQ */ (iw_handler) airo_get_freq, /* SIOCGIWFREQ */ (iw_handler) airo_set_mode, /* SIOCSIWMODE */ (iw_handler) airo_get_mode, /* SIOCGIWMODE */ (iw_handler) airo_set_sens, /* SIOCSIWSENS */ (iw_handler) airo_get_sens, /* SIOCGIWSENS */ (iw_handler) NULL, /* SIOCSIWRANGE */ (iw_handler) airo_get_range, /* SIOCGIWRANGE */ (iw_handler) NULL, /* SIOCSIWPRIV */ (iw_handler) NULL, /* SIOCGIWPRIV */ (iw_handler) NULL, /* SIOCSIWSTATS */ (iw_handler) NULL, /* SIOCGIWSTATS */ iw_handler_set_spy, /* SIOCSIWSPY */ iw_handler_get_spy, /* SIOCGIWSPY */ iw_handler_set_thrspy, /* SIOCSIWTHRSPY */ iw_handler_get_thrspy, /* SIOCGIWTHRSPY */ (iw_handler) airo_set_wap, /* SIOCSIWAP */ (iw_handler) airo_get_wap, /* SIOCGIWAP */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) airo_get_aplist, /* SIOCGIWAPLIST */ (iw_handler) airo_set_scan, /* SIOCSIWSCAN */ (iw_handler) airo_get_scan, /* SIOCGIWSCAN */ (iw_handler) airo_set_essid, /* SIOCSIWESSID */ (iw_handler) airo_get_essid, /* SIOCGIWESSID */ (iw_handler) airo_set_nick, /* SIOCSIWNICKN */ (iw_handler) airo_get_nick, /* SIOCGIWNICKN */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) airo_set_rate, /* SIOCSIWRATE */ (iw_handler) airo_get_rate, /* SIOCGIWRATE */ (iw_handler) airo_set_rts, /* SIOCSIWRTS */ (iw_handler) airo_get_rts, /* SIOCGIWRTS */ (iw_handler) airo_set_frag, /* SIOCSIWFRAG */ (iw_handler) airo_get_frag, /* SIOCGIWFRAG */ (iw_handler) airo_set_txpow, /* SIOCSIWTXPOW */ (iw_handler) airo_get_txpow, /* SIOCGIWTXPOW */ (iw_handler) airo_set_retry, /* SIOCSIWRETRY */ (iw_handler) airo_get_retry, /* SIOCGIWRETRY */ (iw_handler) airo_set_encode, /* SIOCSIWENCODE */ (iw_handler) airo_get_encode, /* SIOCGIWENCODE */ (iw_handler) airo_set_power, /* SIOCSIWPOWER */ (iw_handler) airo_get_power, /* SIOCGIWPOWER */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* SIOCSIWGENIE */ (iw_handler) NULL, /* SIOCGIWGENIE */ (iw_handler) airo_set_auth, /* SIOCSIWAUTH */ (iw_handler) airo_get_auth, /* SIOCGIWAUTH */ (iw_handler) airo_set_encodeext, /* SIOCSIWENCODEEXT */ (iw_handler) airo_get_encodeext, /* SIOCGIWENCODEEXT */ (iw_handler) NULL, /* SIOCSIWPMKSA */ }; /* Note : don't describe AIROIDIFC and AIROOLDIDIFC in here. * We want to force the use of the ioctl code, because those can't be * won't work the iw_handler code (because they simultaneously read * and write data and iw_handler can't do that). * Note that it's perfectly legal to read/write on a single ioctl command, * you just can't use iwpriv and need to force it via the ioctl handler. * Jean II */ static const iw_handler airo_private_handler[] = { NULL, /* SIOCIWFIRSTPRIV */ }; static const struct iw_handler_def airo_handler_def = { .num_standard = ARRAY_SIZE(airo_handler), .num_private = ARRAY_SIZE(airo_private_handler), .num_private_args = ARRAY_SIZE(airo_private_args), .standard = airo_handler, .private = airo_private_handler, .private_args = airo_private_args, .get_wireless_stats = airo_get_wireless_stats, }; /* * This defines the configuration part of the Wireless Extensions * Note : irq and spinlock protection will occur in the subroutines * * TODO : * o Check input value more carefully and fill correct values in range * o Test and shakeout the bugs (if any) * * Jean II * * Javier Achirica did a great job of merging code from the unnamed CISCO * developer that added support for flashing the card. */ static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { int rc = 0; struct airo_info *ai = dev->ml_priv; if (ai->power.event) return 0; switch (cmd) { #ifdef CISCO_EXT case AIROIDIFC: #ifdef AIROOLDIDIFC case AIROOLDIDIFC: #endif { int val = AIROMAGIC; aironet_ioctl com; if (copy_from_user(&com,rq->ifr_data,sizeof(com))) rc = -EFAULT; else if (copy_to_user(com.data,(char *)&val,sizeof(val))) rc = -EFAULT; } break; case AIROIOCTL: #ifdef AIROOLDIOCTL case AIROOLDIOCTL: #endif /* Get the command struct and hand it off for evaluation by * the proper subfunction */ { aironet_ioctl com; if (copy_from_user(&com,rq->ifr_data,sizeof(com))) { rc = -EFAULT; break; } /* Separate R/W functions bracket legality here */ if ( com.command == AIRORSWVERSION ) { if (copy_to_user(com.data, swversion, sizeof(swversion))) rc = -EFAULT; else rc = 0; } else if ( com.command <= AIRORRID) rc = readrids(dev,&com); else if ( com.command >= AIROPCAP && com.command <= (AIROPLEAPUSR+2) ) rc = writerids(dev,&com); else if ( com.command >= AIROFLSHRST && com.command <= AIRORESTART ) rc = flashcard(dev,&com); else rc = -EINVAL; /* Bad command in ioctl */ } break; #endif /* CISCO_EXT */ // All other calls are currently unsupported default: rc = -EOPNOTSUPP; } return rc; } /* * Get the Wireless stats out of the driver * Note : irq and spinlock protection will occur in the subroutines * * TODO : * o Check if work in Ad-Hoc mode (otherwise, use SPY, as in wvlan_cs) * * Jean */ static void airo_read_wireless_stats(struct airo_info *local) { StatusRid status_rid; StatsRid stats_rid; CapabilityRid cap_rid; __le32 *vals = stats_rid.vals; /* Get stats out of the card */ clear_bit(JOB_WSTATS, &local->jobs); if (local->power.event) { up(&local->sem); return; } readCapabilityRid(local, &cap_rid, 0); readStatusRid(local, &status_rid, 0); readStatsRid(local, &stats_rid, RID_STATS, 0); up(&local->sem); /* The status */ local->wstats.status = le16_to_cpu(status_rid.mode); /* Signal quality and co */ if (local->rssi) { local->wstats.qual.level = airo_rssi_to_dbm(local->rssi, le16_to_cpu(status_rid.sigQuality)); /* normalizedSignalStrength appears to be a percentage */ local->wstats.qual.qual = le16_to_cpu(status_rid.normalizedSignalStrength); } else { local->wstats.qual.level = (le16_to_cpu(status_rid.normalizedSignalStrength) + 321) / 2; local->wstats.qual.qual = airo_get_quality(&status_rid, &cap_rid); } if (le16_to_cpu(status_rid.len) >= 124) { local->wstats.qual.noise = 0x100 - status_rid.noisedBm; local->wstats.qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; } else { local->wstats.qual.noise = 0; local->wstats.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID | IW_QUAL_DBM; } /* Packets discarded in the wireless adapter due to wireless * specific problems */ local->wstats.discard.nwid = le32_to_cpu(vals[56]) + le32_to_cpu(vals[57]) + le32_to_cpu(vals[58]); /* SSID Mismatch */ local->wstats.discard.code = le32_to_cpu(vals[6]);/* RxWepErr */ local->wstats.discard.fragment = le32_to_cpu(vals[30]); local->wstats.discard.retries = le32_to_cpu(vals[10]); local->wstats.discard.misc = le32_to_cpu(vals[1]) + le32_to_cpu(vals[32]); local->wstats.miss.beacon = le32_to_cpu(vals[34]); } static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev) { struct airo_info *local = dev->ml_priv; if (!test_bit(JOB_WSTATS, &local->jobs)) { /* Get stats out of the card if available */ if (down_trylock(&local->sem) != 0) { set_bit(JOB_WSTATS, &local->jobs); wake_up_interruptible(&local->thr_wait); } else airo_read_wireless_stats(local); } return &local->wstats; } #ifdef CISCO_EXT /* * This just translates from driver IOCTL codes to the command codes to * feed to the radio's host interface. Things can be added/deleted * as needed. This represents the READ side of control I/O to * the card */ static int readrids(struct net_device *dev, aironet_ioctl *comp) { unsigned short ridcode; unsigned char *iobuf; int len; struct airo_info *ai = dev->ml_priv; if (test_bit(FLAG_FLASHING, &ai->flags)) return -EIO; switch(comp->command) { case AIROGCAP: ridcode = RID_CAPABILITIES; break; case AIROGCFG: ridcode = RID_CONFIG; if (test_bit(FLAG_COMMIT, &ai->flags)) { disable_MAC (ai, 1); writeConfigRid (ai, 1); enable_MAC(ai, 1); } break; case AIROGSLIST: ridcode = RID_SSID; break; case AIROGVLIST: ridcode = RID_APLIST; break; case AIROGDRVNAM: ridcode = RID_DRVNAME; break; case AIROGEHTENC: ridcode = RID_ETHERENCAP; break; case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; /* Only super-user can read WEP keys */ if (!capable(CAP_NET_ADMIN)) return -EPERM; break; case AIROGWEPKNV: ridcode = RID_WEP_PERM; /* Only super-user can read WEP keys */ if (!capable(CAP_NET_ADMIN)) return -EPERM; break; case AIROGSTAT: ridcode = RID_STATUS; break; case AIROGSTATSD32: ridcode = RID_STATSDELTA; break; case AIROGSTATSC32: ridcode = RID_STATS; break; case AIROGMICSTATS: if (copy_to_user(comp->data, &ai->micstats, min((int)comp->len,(int)sizeof(ai->micstats)))) return -EFAULT; return 0; case AIRORRID: ridcode = comp->ridnum; break; default: return -EINVAL; } if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL) return -ENOMEM; PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1); /* get the count of bytes in the rid docs say 1st 2 bytes is it. * then return it to the user * 9/22/2000 Honor user given length */ len = comp->len; if (copy_to_user(comp->data, iobuf, min(len, (int)RIDSIZE))) { kfree (iobuf); return -EFAULT; } kfree (iobuf); return 0; } /* * Danger Will Robinson write the rids here */ static int writerids(struct net_device *dev, aironet_ioctl *comp) { struct airo_info *ai = dev->ml_priv; int ridcode; int enabled; static int (* writer)(struct airo_info *, u16 rid, const void *, int, int); unsigned char *iobuf; /* Only super-user can write RIDs */ if (!capable(CAP_NET_ADMIN)) return -EPERM; if (test_bit(FLAG_FLASHING, &ai->flags)) return -EIO; ridcode = 0; writer = do_writerid; switch(comp->command) { case AIROPSIDS: ridcode = RID_SSID; break; case AIROPCAP: ridcode = RID_CAPABILITIES; break; case AIROPAPLIST: ridcode = RID_APLIST; break; case AIROPCFG: ai->config.len = 0; clear_bit(FLAG_COMMIT, &ai->flags); ridcode = RID_CONFIG; break; case AIROPWEPKEYNV: ridcode = RID_WEP_PERM; break; case AIROPLEAPUSR: ridcode = RID_LEAPUSERNAME; break; case AIROPLEAPPWD: ridcode = RID_LEAPPASSWORD; break; case AIROPWEPKEY: ridcode = RID_WEP_TEMP; writer = PC4500_writerid; break; case AIROPLEAPUSR+1: ridcode = 0xFF2A; break; case AIROPLEAPUSR+2: ridcode = 0xFF2B; break; /* this is not really a rid but a command given to the card * same with MAC off */ case AIROPMACON: if (enable_MAC(ai, 1) != 0) return -EIO; return 0; /* * Evidently this code in the airo driver does not get a symbol * as disable_MAC. it's probably so short the compiler does not gen one. */ case AIROPMACOFF: disable_MAC(ai, 1); return 0; /* This command merely clears the counts does not actually store any data * only reads rid. But as it changes the cards state, I put it in the * writerid routines. */ case AIROPSTCLR: if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL) return -ENOMEM; PC4500_readrid(ai,RID_STATSDELTACLEAR,iobuf,RIDSIZE, 1); enabled = ai->micstats.enabled; memset(&ai->micstats,0,sizeof(ai->micstats)); ai->micstats.enabled = enabled; if (copy_to_user(comp->data, iobuf, min((int)comp->len, (int)RIDSIZE))) { kfree (iobuf); return -EFAULT; } kfree (iobuf); return 0; default: return -EOPNOTSUPP; /* Blarg! */ } if(comp->len > RIDSIZE) return -EINVAL; if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL) return -ENOMEM; if (copy_from_user(iobuf,comp->data,comp->len)) { kfree (iobuf); return -EFAULT; } if (comp->command == AIROPCFG) { ConfigRid *cfg = (ConfigRid *)iobuf; if (test_bit(FLAG_MIC_CAPABLE, &ai->flags)) cfg->opmode |= MODE_MIC; if ((cfg->opmode & MODE_CFG_MASK) == MODE_STA_IBSS) set_bit (FLAG_ADHOC, &ai->flags); else clear_bit (FLAG_ADHOC, &ai->flags); } if((*writer)(ai, ridcode, iobuf,comp->len,1)) { kfree (iobuf); return -EIO; } kfree (iobuf); return 0; } /***************************************************************************** * Ancillary flash / mod functions much black magic lurkes here * ***************************************************************************** */ /* * Flash command switch table */ static int flashcard(struct net_device *dev, aironet_ioctl *comp) { int z; /* Only super-user can modify flash */ if (!capable(CAP_NET_ADMIN)) return -EPERM; switch(comp->command) { case AIROFLSHRST: return cmdreset((struct airo_info *)dev->ml_priv); case AIROFLSHSTFL: if (!AIRO_FLASH(dev) && (AIRO_FLASH(dev) = kmalloc(FLASHSIZE, GFP_KERNEL)) == NULL) return -ENOMEM; return setflashmode((struct airo_info *)dev->ml_priv); case AIROFLSHGCHR: /* Get char from aux */ if(comp->len != sizeof(int)) return -EINVAL; if (copy_from_user(&z,comp->data,comp->len)) return -EFAULT; return flashgchar((struct airo_info *)dev->ml_priv, z, 8000); case AIROFLSHPCHR: /* Send char to card. */ if(comp->len != sizeof(int)) return -EINVAL; if (copy_from_user(&z,comp->data,comp->len)) return -EFAULT; return flashpchar((struct airo_info *)dev->ml_priv, z, 8000); case AIROFLPUTBUF: /* Send 32k to card */ if (!AIRO_FLASH(dev)) return -ENOMEM; if(comp->len > FLASHSIZE) return -EINVAL; if (copy_from_user(AIRO_FLASH(dev), comp->data, comp->len)) return -EFAULT; flashputbuf((struct airo_info *)dev->ml_priv); return 0; case AIRORESTART: if (flashrestart((struct airo_info *)dev->ml_priv, dev)) return -EIO; return 0; } return -EINVAL; } #define FLASH_COMMAND 0x7e7e /* * STEP 1) * Disable MAC and do soft reset on * card. */ static int cmdreset(struct airo_info *ai) { disable_MAC(ai, 1); if(!waitbusy (ai)){ airo_print_info(ai->dev->name, "Waitbusy hang before RESET"); return -EBUSY; } OUT4500(ai,COMMAND,CMD_SOFTRESET); ssleep(1); /* WAS 600 12/7/00 */ if(!waitbusy (ai)){ airo_print_info(ai->dev->name, "Waitbusy hang AFTER RESET"); return -EBUSY; } return 0; } /* STEP 2) * Put the card in legendary flash * mode */ static int setflashmode (struct airo_info *ai) { set_bit (FLAG_FLASHING, &ai->flags); OUT4500(ai, SWS0, FLASH_COMMAND); OUT4500(ai, SWS1, FLASH_COMMAND); if (probe) { OUT4500(ai, SWS0, FLASH_COMMAND); OUT4500(ai, COMMAND,0x10); } else { OUT4500(ai, SWS2, FLASH_COMMAND); OUT4500(ai, SWS3, FLASH_COMMAND); OUT4500(ai, COMMAND,0); } msleep(500); /* 500ms delay */ if(!waitbusy(ai)) { clear_bit (FLAG_FLASHING, &ai->flags); airo_print_info(ai->dev->name, "Waitbusy hang after setflash mode"); return -EIO; } return 0; } /* Put character to SWS0 wait for dwelltime * x 50us for echo . */ static int flashpchar(struct airo_info *ai,int byte,int dwelltime) { int echo; int waittime; byte |= 0x8000; if(dwelltime == 0 ) dwelltime = 200; waittime=dwelltime; /* Wait for busy bit d15 to go false indicating buffer empty */ while ((IN4500 (ai, SWS0) & 0x8000) && waittime > 0) { udelay (50); waittime -= 50; } /* timeout for busy clear wait */ if(waittime <= 0 ){ airo_print_info(ai->dev->name, "flash putchar busywait timeout!"); return -EBUSY; } /* Port is clear now write byte and wait for it to echo back */ do { OUT4500(ai,SWS0,byte); udelay(50); dwelltime -= 50; echo = IN4500(ai,SWS1); } while (dwelltime >= 0 && echo != byte); OUT4500(ai,SWS1,0); return (echo == byte) ? 0 : -EIO; } /* * Get a character from the card matching matchbyte * Step 3) */ static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime){ int rchar; unsigned char rbyte=0; do { rchar = IN4500(ai,SWS1); if(dwelltime && !(0x8000 & rchar)){ dwelltime -= 10; mdelay(10); continue; } rbyte = 0xff & rchar; if( (rbyte == matchbyte) && (0x8000 & rchar) ){ OUT4500(ai,SWS1,0); return 0; } if( rbyte == 0x81 || rbyte == 0x82 || rbyte == 0x83 || rbyte == 0x1a || 0xffff == rchar) break; OUT4500(ai,SWS1,0); }while(dwelltime > 0); return -EIO; } /* * Transfer 32k of firmware data from user buffer to our buffer and * send to the card */ static int flashputbuf(struct airo_info *ai){ int nwords; /* Write stuff */ if (test_bit(FLAG_MPI,&ai->flags)) memcpy_toio(ai->pciaux + 0x8000, ai->flash, FLASHSIZE); else { OUT4500(ai,AUXPAGE,0x100); OUT4500(ai,AUXOFF,0); for(nwords=0;nwords != FLASHSIZE / 2;nwords++){ OUT4500(ai,AUXDATA,ai->flash[nwords] & 0xffff); } } OUT4500(ai,SWS0,0x8000); return 0; } /* * */ static int flashrestart(struct airo_info *ai,struct net_device *dev){ int i,status; ssleep(1); /* Added 12/7/00 */ clear_bit (FLAG_FLASHING, &ai->flags); if (test_bit(FLAG_MPI, &ai->flags)) { status = mpi_init_descriptors(ai); if (status != SUCCESS) return status; } status = setup_card(ai, dev->dev_addr, 1); if (!test_bit(FLAG_MPI,&ai->flags)) for( i = 0; i < MAX_FIDS; i++ ) { ai->fids[i] = transmit_allocate ( ai, AIRO_DEF_MTU, i >= MAX_FIDS / 2 ); } ssleep(1); /* Added 12/7/00 */ return status; } #endif /* CISCO_EXT */ /* This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. In addition: Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ module_init(airo_init_module); module_exit(airo_cleanup_module);
gpl-2.0
WarheadsSE/OX820-2.6-linux
drivers/video/68328fb.c
605
13570
/* * linux/drivers/video/68328fb.c -- Low level implementation of the * mc68x328 LCD frame buffer device * * Copyright (C) 2003 Georges Menie * * This driver assumes an already configured controller (e.g. from config.c) * Keep the code clean of board specific initialization. * * This code has not been tested with colors, colormap management functions * are minimal (no colormap data written to the 68328 registers...) * * initial version of this driver: * Copyright (C) 1998,1999 Kenneth Albanowski <kjahds@kjahds.com>, * The Silver Hammer Group, Ltd. * * this version is based on : * * linux/drivers/video/vfb.c -- Virtual frame buffer device * * Copyright (C) 2002 James Simmons * * Copyright (C) 1997 Geert Uytterhoeven * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <asm/uaccess.h> #include <linux/fb.h> #include <linux/init.h> #if defined(CONFIG_M68VZ328) #include <asm/MC68VZ328.h> #elif defined(CONFIG_M68EZ328) #include <asm/MC68EZ328.h> #elif defined(CONFIG_M68328) #include <asm/MC68328.h> #else #error wrong architecture for the MC68x328 frame buffer device #endif #if defined(CONFIG_FB_68328_INVERT) #define MC68X328FB_MONO_VISUAL FB_VISUAL_MONO01 #else #define MC68X328FB_MONO_VISUAL FB_VISUAL_MONO10 #endif static u_long videomemory; static u_long videomemorysize; static struct fb_info fb_info; static u32 mc68x328fb_pseudo_palette[16]; static struct fb_var_screeninfo mc68x328fb_default __initdata = { .red = { 0, 8, 0 }, .green = { 0, 8, 0 }, .blue = { 0, 8, 0 }, .activate = FB_ACTIVATE_TEST, .height = -1, .width = -1, .pixclock = 20000, .left_margin = 64, .right_margin = 64, .upper_margin = 32, .lower_margin = 32, .hsync_len = 64, .vsync_len = 2, .vmode = FB_VMODE_NONINTERLACED, }; static struct fb_fix_screeninfo mc68x328fb_fix __initdata = { .id = "68328fb", .type = FB_TYPE_PACKED_PIXELS, .xpanstep = 1, .ypanstep = 1, .ywrapstep = 1, .accel = FB_ACCEL_NONE, }; /* * Interface used by the world */ int mc68x328fb_init(void); int mc68x328fb_setup(char *); static int mc68x328fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info); static int mc68x328fb_set_par(struct fb_info *info); static int mc68x328fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info); static int mc68x328fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info); static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma); static struct fb_ops mc68x328fb_ops = { .fb_check_var = mc68x328fb_check_var, .fb_set_par = mc68x328fb_set_par, .fb_setcolreg = mc68x328fb_setcolreg, .fb_pan_display = mc68x328fb_pan_display, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_mmap = mc68x328fb_mmap, }; /* * Internal routines */ static u_long get_line_length(int xres_virtual, int bpp) { u_long length; length = xres_virtual * bpp; length = (length + 31) & ~31; length >>= 3; return (length); } /* * Setting the video mode has been split into two parts. * First part, xxxfb_check_var, must not write anything * to hardware, it should only verify and adjust var. * This means it doesn't alter par but it does use hardware * data from it to check this var. */ static int mc68x328fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { u_long line_length; /* * FB_VMODE_CONUPDATE and FB_VMODE_SMOOTH_XPAN are equal! * as FB_VMODE_SMOOTH_XPAN is only used internally */ if (var->vmode & FB_VMODE_CONUPDATE) { var->vmode |= FB_VMODE_YWRAP; var->xoffset = info->var.xoffset; var->yoffset = info->var.yoffset; } /* * Some very basic checks */ if (!var->xres) var->xres = 1; if (!var->yres) var->yres = 1; if (var->xres > var->xres_virtual) var->xres_virtual = var->xres; if (var->yres > var->yres_virtual) var->yres_virtual = var->yres; if (var->bits_per_pixel <= 1) var->bits_per_pixel = 1; else if (var->bits_per_pixel <= 8) var->bits_per_pixel = 8; else if (var->bits_per_pixel <= 16) var->bits_per_pixel = 16; else if (var->bits_per_pixel <= 24) var->bits_per_pixel = 24; else if (var->bits_per_pixel <= 32) var->bits_per_pixel = 32; else return -EINVAL; if (var->xres_virtual < var->xoffset + var->xres) var->xres_virtual = var->xoffset + var->xres; if (var->yres_virtual < var->yoffset + var->yres) var->yres_virtual = var->yoffset + var->yres; /* * Memory limit */ line_length = get_line_length(var->xres_virtual, var->bits_per_pixel); if (line_length * var->yres_virtual > videomemorysize) return -ENOMEM; /* * Now that we checked it we alter var. The reason being is that the video * mode passed in might not work but slight changes to it might make it * work. This way we let the user know what is acceptable. */ switch (var->bits_per_pixel) { case 1: var->red.offset = 0; var->red.length = 1; var->green.offset = 0; var->green.length = 1; var->blue.offset = 0; var->blue.length = 1; var->transp.offset = 0; var->transp.length = 0; break; case 8: var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case 16: /* RGBA 5551 */ if (var->transp.length) { var->red.offset = 0; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 10; var->blue.length = 5; var->transp.offset = 15; var->transp.length = 1; } else { /* RGB 565 */ var->red.offset = 0; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 11; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; } break; case 24: /* RGB 888 */ var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case 32: /* RGBA 8888 */ var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; } var->red.msb_right = 0; var->green.msb_right = 0; var->blue.msb_right = 0; var->transp.msb_right = 0; return 0; } /* This routine actually sets the video mode. It's in here where we * the hardware state info->par and fix which can be affected by the * change in par. For this driver it doesn't do much. */ static int mc68x328fb_set_par(struct fb_info *info) { info->fix.line_length = get_line_length(info->var.xres_virtual, info->var.bits_per_pixel); return 0; } /* * Set a single color register. The values supplied are already * rounded down to the hardware's capabilities (according to the * entries in the var structure). Return != 0 for invalid regno. */ static int mc68x328fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { if (regno >= 256) /* no. of hw registers */ return 1; /* * Program hardware... do anything you want with transp */ /* grayscale works only partially under directcolor */ if (info->var.grayscale) { /* grayscale = 0.30*R + 0.59*G + 0.11*B */ red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8; } /* Directcolor: * var->{color}.offset contains start of bitfield * var->{color}.length contains length of bitfield * {hardwarespecific} contains width of RAMDAC * cmap[X] is programmed to (X << red.offset) | (X << green.offset) | (X << blue.offset) * RAMDAC[X] is programmed to (red, green, blue) * * Pseudocolor: * uses offset = 0 && length = RAMDAC register width. * var->{color}.offset is 0 * var->{color}.length contains widht of DAC * cmap is not used * RAMDAC[X] is programmed to (red, green, blue) * Truecolor: * does not use DAC. Usually 3 are present. * var->{color}.offset contains start of bitfield * var->{color}.length contains length of bitfield * cmap is programmed to (red << red.offset) | (green << green.offset) | * (blue << blue.offset) | (transp << transp.offset) * RAMDAC does not exist */ #define CNVT_TOHW(val,width) ((((val)<<(width))+0x7FFF-(val))>>16) switch (info->fix.visual) { case FB_VISUAL_TRUECOLOR: case FB_VISUAL_PSEUDOCOLOR: red = CNVT_TOHW(red, info->var.red.length); green = CNVT_TOHW(green, info->var.green.length); blue = CNVT_TOHW(blue, info->var.blue.length); transp = CNVT_TOHW(transp, info->var.transp.length); break; case FB_VISUAL_DIRECTCOLOR: red = CNVT_TOHW(red, 8); /* expect 8 bit DAC */ green = CNVT_TOHW(green, 8); blue = CNVT_TOHW(blue, 8); /* hey, there is bug in transp handling... */ transp = CNVT_TOHW(transp, 8); break; } #undef CNVT_TOHW /* Truecolor has hardware independent palette */ if (info->fix.visual == FB_VISUAL_TRUECOLOR) { u32 v; if (regno >= 16) return 1; v = (red << info->var.red.offset) | (green << info->var.green.offset) | (blue << info->var.blue.offset) | (transp << info->var.transp.offset); switch (info->var.bits_per_pixel) { case 8: break; case 16: ((u32 *) (info->pseudo_palette))[regno] = v; break; case 24: case 32: ((u32 *) (info->pseudo_palette))[regno] = v; break; } return 0; } return 0; } /* * Pan or Wrap the Display * * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag */ static int mc68x328fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { if (var->vmode & FB_VMODE_YWRAP) { if (var->yoffset < 0 || var->yoffset >= info->var.yres_virtual || var->xoffset) return -EINVAL; } else { if (var->xoffset + var->xres > info->var.xres_virtual || var->yoffset + var->yres > info->var.yres_virtual) return -EINVAL; } info->var.xoffset = var->xoffset; info->var.yoffset = var->yoffset; if (var->vmode & FB_VMODE_YWRAP) info->var.vmode |= FB_VMODE_YWRAP; else info->var.vmode &= ~FB_VMODE_YWRAP; return 0; } /* * Most drivers don't need their own mmap function */ static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { #ifndef MMU /* this is uClinux (no MMU) specific code */ vma->vm_flags |= VM_RESERVED; vma->vm_start = videomemory; return 0; #else return -EINVAL; #endif } int __init mc68x328fb_setup(char *options) { #if 0 char *this_opt; #endif if (!options || !*options) return 1; #if 0 while ((this_opt = strsep(&options, ",")) != NULL) { if (!*this_opt) continue; if (!strncmp(this_opt, "disable", 7)) mc68x328fb_enable = 0; } #endif return 1; } /* * Initialisation */ int __init mc68x328fb_init(void) { #ifndef MODULE char *option = NULL; if (fb_get_options("68328fb", &option)) return -ENODEV; mc68x328fb_setup(option); #endif /* * initialize the default mode from the LCD controller registers */ mc68x328fb_default.xres = LXMAX; mc68x328fb_default.yres = LYMAX+1; mc68x328fb_default.xres_virtual = mc68x328fb_default.xres; mc68x328fb_default.yres_virtual = mc68x328fb_default.yres; mc68x328fb_default.bits_per_pixel = 1 + (LPICF & 0x01); videomemory = LSSA; videomemorysize = (mc68x328fb_default.xres_virtual+7) / 8 * mc68x328fb_default.yres_virtual * mc68x328fb_default.bits_per_pixel; fb_info.screen_base = (void *)videomemory; fb_info.fbops = &mc68x328fb_ops; fb_info.var = mc68x328fb_default; fb_info.fix = mc68x328fb_fix; fb_info.fix.smem_start = videomemory; fb_info.fix.smem_len = videomemorysize; fb_info.fix.line_length = get_line_length(mc68x328fb_default.xres_virtual, mc68x328fb_default.bits_per_pixel); fb_info.fix.visual = (mc68x328fb_default.bits_per_pixel) == 1 ? MC68X328FB_MONO_VISUAL : FB_VISUAL_PSEUDOCOLOR; if (fb_info.var.bits_per_pixel == 1) { fb_info.var.red.length = fb_info.var.green.length = fb_info.var.blue.length = 1; fb_info.var.red.offset = fb_info.var.green.offset = fb_info.var.blue.offset = 0; } fb_info.pseudo_palette = &mc68x328fb_pseudo_palette; fb_info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; if (fb_alloc_cmap(&fb_info.cmap, 256, 0)) return -ENOMEM; if (register_framebuffer(&fb_info) < 0) { fb_dealloc_cmap(&fb_info.cmap); return -EINVAL; } printk(KERN_INFO "fb%d: %s frame buffer device\n", fb_info.node, fb_info.fix.id); printk(KERN_INFO "fb%d: %dx%dx%d at 0x%08lx\n", fb_info.node, mc68x328fb_default.xres_virtual, mc68x328fb_default.yres_virtual, 1 << mc68x328fb_default.bits_per_pixel, videomemory); return 0; } module_init(mc68x328fb_init); #ifdef MODULE static void __exit mc68x328fb_cleanup(void) { unregister_framebuffer(&fb_info); fb_dealloc_cmap(&fb_info.cmap); } module_exit(mc68x328fb_cleanup); MODULE_LICENSE("GPL"); #endif /* MODULE */
gpl-2.0
JonnyH/pyra-kernel
drivers/net/wireless/ath/ath9k/htc_drv_debug.c
1373
15010
/* * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "htc.h" static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath9k_htc_priv *priv = file->private_data; struct ath9k_htc_target_int_stats cmd_rsp; char buf[512]; unsigned int len = 0; int ret = 0; memset(&cmd_rsp, 0, sizeof(cmd_rsp)); ath9k_htc_ps_wakeup(priv); WMI_CMD(WMI_INT_STATS_CMDID); if (ret) { ath9k_htc_ps_restore(priv); return -EINVAL; } ath9k_htc_ps_restore(priv); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "RX", be32_to_cpu(cmd_rsp.rx)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "RXORN", be32_to_cpu(cmd_rsp.rxorn)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "RXEOL", be32_to_cpu(cmd_rsp.rxeol)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "TXURN", be32_to_cpu(cmd_rsp.txurn)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "TXTO", be32_to_cpu(cmd_rsp.txto)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "CST", be32_to_cpu(cmd_rsp.cst)); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_tgt_int_stats = { .read = read_file_tgt_int_stats, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t read_file_tgt_tx_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath9k_htc_priv *priv = file->private_data; struct ath9k_htc_target_tx_stats cmd_rsp; char buf[512]; unsigned int len = 0; int ret = 0; memset(&cmd_rsp, 0, sizeof(cmd_rsp)); ath9k_htc_ps_wakeup(priv); WMI_CMD(WMI_TX_STATS_CMDID); if (ret) { ath9k_htc_ps_restore(priv); return -EINVAL; } ath9k_htc_ps_restore(priv); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Xretries", be32_to_cpu(cmd_rsp.xretries)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "FifoErr", be32_to_cpu(cmd_rsp.fifoerr)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Filtered", be32_to_cpu(cmd_rsp.filtered)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "TimerExp", be32_to_cpu(cmd_rsp.timer_exp)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "ShortRetries", be32_to_cpu(cmd_rsp.shortretries)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "LongRetries", be32_to_cpu(cmd_rsp.longretries)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "QueueNull", be32_to_cpu(cmd_rsp.qnull)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "EncapFail", be32_to_cpu(cmd_rsp.encap_fail)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "NoBuf", be32_to_cpu(cmd_rsp.nobuf)); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_tgt_tx_stats = { .read = read_file_tgt_tx_stats, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t read_file_tgt_rx_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath9k_htc_priv *priv = file->private_data; struct ath9k_htc_target_rx_stats cmd_rsp; char buf[512]; unsigned int len = 0; int ret = 0; memset(&cmd_rsp, 0, sizeof(cmd_rsp)); ath9k_htc_ps_wakeup(priv); WMI_CMD(WMI_RX_STATS_CMDID); if (ret) { ath9k_htc_ps_restore(priv); return -EINVAL; } ath9k_htc_ps_restore(priv); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "NoBuf", be32_to_cpu(cmd_rsp.nobuf)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "HostSend", be32_to_cpu(cmd_rsp.host_send)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "HostDone", be32_to_cpu(cmd_rsp.host_done)); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_tgt_rx_stats = { .read = read_file_tgt_rx_stats, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t read_file_xmit(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath9k_htc_priv *priv = file->private_data; char buf[512]; unsigned int len = 0; len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Buffers queued", priv->debug.tx_stats.buf_queued); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Buffers completed", priv->debug.tx_stats.buf_completed); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "SKBs queued", priv->debug.tx_stats.skb_queued); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "SKBs success", priv->debug.tx_stats.skb_success); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "SKBs failed", priv->debug.tx_stats.skb_failed); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "CAB queued", priv->debug.tx_stats.cab_queued); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "BE queued", priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "BK queued", priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "VI queued", priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "VO queued", priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_xmit = { .read = read_file_xmit, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv, struct ath_rx_status *rs) { ath9k_cmn_debug_stat_rx(&priv->debug.rx_stats, rs); } static ssize_t read_file_skb_rx(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath9k_htc_priv *priv = file->private_data; char *buf; unsigned int len = 0, size = 1500; ssize_t retval = 0; buf = kzalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; len += scnprintf(buf + len, size - len, "%20s : %10u\n", "SKBs allocated", priv->debug.skbrx_stats.skb_allocated); len += scnprintf(buf + len, size - len, "%20s : %10u\n", "SKBs completed", priv->debug.skbrx_stats.skb_completed); len += scnprintf(buf + len, size - len, "%20s : %10u\n", "SKBs Dropped", priv->debug.skbrx_stats.skb_dropped); if (len > size) len = size; retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return retval; } static const struct file_operations fops_skb_rx = { .read = read_file_skb_rx, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t read_file_slot(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath9k_htc_priv *priv = file->private_data; char buf[512]; unsigned int len; spin_lock_bh(&priv->tx.tx_lock); len = scnprintf(buf, sizeof(buf), "TX slot bitmap : %*pb\n" "Used slots : %d\n", MAX_TX_BUF_NUM, priv->tx.tx_slot, bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM)); spin_unlock_bh(&priv->tx.tx_lock); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_slot = { .read = read_file_slot, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t read_file_queue(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath9k_htc_priv *priv = file->private_data; char buf[512]; unsigned int len = 0; len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue)); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Failed queue", skb_queue_len(&priv->tx.tx_failed)); spin_lock_bh(&priv->tx.tx_lock); len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n", "Queued count", priv->tx.queued_cnt); spin_unlock_bh(&priv->tx.tx_lock); if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_queue = { .read = read_file_queue, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t read_file_debug(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath9k_htc_priv *priv = file->private_data; struct ath_common *common = ath9k_hw_common(priv->ah); char buf[32]; unsigned int len; len = sprintf(buf, "0x%08x\n", common->debug_mask); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t write_file_debug(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath9k_htc_priv *priv = file->private_data; struct ath_common *common = ath9k_hw_common(priv->ah); unsigned long mask; char buf[32]; ssize_t len; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; if (kstrtoul(buf, 0, &mask)) return -EINVAL; common->debug_mask = mask; return count; } static const struct file_operations fops_debug = { .read = read_file_debug, .write = write_file_debug, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; /* Ethtool support for get-stats */ #define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO" static const char ath9k_htc_gstrings_stats[][ETH_GSTRING_LEN] = { "tx_pkts_nic", "tx_bytes_nic", "rx_pkts_nic", "rx_bytes_nic", AMKSTR(d_tx_pkts), "d_rx_crc_err", "d_rx_decrypt_crc_err", "d_rx_phy_err", "d_rx_mic_err", "d_rx_pre_delim_crc_err", "d_rx_post_delim_crc_err", "d_rx_decrypt_busy_err", "d_rx_phyerr_radar", "d_rx_phyerr_ofdm_timing", "d_rx_phyerr_cck_timing", }; #define ATH9K_HTC_SSTATS_LEN ARRAY_SIZE(ath9k_htc_gstrings_stats) void ath9k_htc_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) memcpy(data, *ath9k_htc_gstrings_stats, sizeof(ath9k_htc_gstrings_stats)); } int ath9k_htc_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int sset) { if (sset == ETH_SS_STATS) return ATH9K_HTC_SSTATS_LEN; return 0; } #define STXBASE priv->debug.tx_stats #define SRXBASE priv->debug.rx_stats #define SKBTXBASE priv->debug.tx_stats #define SKBRXBASE priv->debug.skbrx_stats #define ASTXQ(a) \ data[i++] = STXBASE.a[IEEE80211_AC_BE]; \ data[i++] = STXBASE.a[IEEE80211_AC_BK]; \ data[i++] = STXBASE.a[IEEE80211_AC_VI]; \ data[i++] = STXBASE.a[IEEE80211_AC_VO] void ath9k_htc_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ethtool_stats *stats, u64 *data) { struct ath9k_htc_priv *priv = hw->priv; int i = 0; data[i++] = SKBTXBASE.skb_success; data[i++] = SKBTXBASE.skb_success_bytes; data[i++] = SKBRXBASE.skb_completed; data[i++] = SKBRXBASE.skb_completed_bytes; ASTXQ(queue_stats); data[i++] = SRXBASE.crc_err; data[i++] = SRXBASE.decrypt_crc_err; data[i++] = SRXBASE.phy_err; data[i++] = SRXBASE.mic_err; data[i++] = SRXBASE.pre_delim_crc_err; data[i++] = SRXBASE.post_delim_crc_err; data[i++] = SRXBASE.decrypt_busy_err; data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_RADAR]; data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_OFDM_TIMING]; data[i++] = SRXBASE.phy_err_stats[ATH9K_PHYERR_CCK_TIMING]; WARN_ON(i != ATH9K_HTC_SSTATS_LEN); } void ath9k_htc_deinit_debug(struct ath9k_htc_priv *priv) { ath9k_cmn_spectral_deinit_debug(&priv->spec_priv); } int ath9k_htc_init_debug(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; priv->debug.debugfs_phy = debugfs_create_dir(KBUILD_MODNAME, priv->hw->wiphy->debugfsdir); if (!priv->debug.debugfs_phy) return -ENOMEM; ath9k_cmn_spectral_init_debug(&priv->spec_priv, priv->debug.debugfs_phy); debugfs_create_file("tgt_int_stats", S_IRUSR, priv->debug.debugfs_phy, priv, &fops_tgt_int_stats); debugfs_create_file("tgt_tx_stats", S_IRUSR, priv->debug.debugfs_phy, priv, &fops_tgt_tx_stats); debugfs_create_file("tgt_rx_stats", S_IRUSR, priv->debug.debugfs_phy, priv, &fops_tgt_rx_stats); debugfs_create_file("xmit", S_IRUSR, priv->debug.debugfs_phy, priv, &fops_xmit); debugfs_create_file("skb_rx", S_IRUSR, priv->debug.debugfs_phy, priv, &fops_skb_rx); ath9k_cmn_debug_recv(priv->debug.debugfs_phy, &priv->debug.rx_stats); ath9k_cmn_debug_phy_err(priv->debug.debugfs_phy, &priv->debug.rx_stats); debugfs_create_file("slot", S_IRUSR, priv->debug.debugfs_phy, priv, &fops_slot); debugfs_create_file("queue", S_IRUSR, priv->debug.debugfs_phy, priv, &fops_queue); debugfs_create_file("debug", S_IRUSR | S_IWUSR, priv->debug.debugfs_phy, priv, &fops_debug); ath9k_cmn_debug_base_eeprom(priv->debug.debugfs_phy, priv->ah); ath9k_cmn_debug_modal_eeprom(priv->debug.debugfs_phy, priv->ah); return 0; }
gpl-2.0
jamison904/android_kernel_samsung_trlte
drivers/input/misc/max8925_onkey.c
2141
5319
/** * MAX8925 ONKEY driver * * Copyright (C) 2009 Marvell International Ltd. * Haojian Zhuang <haojian.zhuang@marvell.com> * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of this * archive for more details. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/mfd/max8925.h> #include <linux/slab.h> #define SW_INPUT (1 << 7) /* 0/1 -- up/down */ #define HARDRESET_EN (1 << 7) #define PWREN_EN (1 << 7) struct max8925_onkey_info { struct input_dev *idev; struct i2c_client *i2c; struct device *dev; unsigned int irq[2]; }; /* * MAX8925 gives us an interrupt when ONKEY is pressed or released. * max8925_set_bits() operates I2C bus and may sleep. So implement * it in thread IRQ handler. */ static irqreturn_t max8925_onkey_handler(int irq, void *data) { struct max8925_onkey_info *info = data; int state; state = max8925_reg_read(info->i2c, MAX8925_ON_OFF_STATUS); input_report_key(info->idev, KEY_POWER, state & SW_INPUT); input_sync(info->idev); dev_dbg(info->dev, "onkey state:%d\n", state); /* Enable hardreset to halt if system isn't shutdown on time */ max8925_set_bits(info->i2c, MAX8925_SYSENSEL, HARDRESET_EN, HARDRESET_EN); return IRQ_HANDLED; } static int max8925_onkey_probe(struct platform_device *pdev) { struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); struct max8925_onkey_info *info; struct input_dev *input; int irq[2], error; irq[0] = platform_get_irq(pdev, 0); if (irq[0] < 0) { dev_err(&pdev->dev, "No IRQ resource!\n"); return -EINVAL; } irq[1] = platform_get_irq(pdev, 1); if (irq[1] < 0) { dev_err(&pdev->dev, "No IRQ resource!\n"); return -EINVAL; } info = kzalloc(sizeof(struct max8925_onkey_info), GFP_KERNEL); input = input_allocate_device(); if (!info || !input) { error = -ENOMEM; goto err_free_mem; } info->idev = input; info->i2c = chip->i2c; info->dev = &pdev->dev; info->irq[0] = irq[0]; info->irq[1] = irq[1]; input->name = "max8925_on"; input->phys = "max8925_on/input0"; input->id.bustype = BUS_I2C; input->dev.parent = &pdev->dev; input_set_capability(input, EV_KEY, KEY_POWER); error = request_threaded_irq(irq[0], NULL, max8925_onkey_handler, IRQF_ONESHOT, "onkey-down", info); if (error < 0) { dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n", irq[0], error); goto err_free_mem; } error = request_threaded_irq(irq[1], NULL, max8925_onkey_handler, IRQF_ONESHOT, "onkey-up", info); if (error < 0) { dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n", irq[1], error); goto err_free_irq0; } error = input_register_device(info->idev); if (error) { dev_err(chip->dev, "Can't register input device: %d\n", error); goto err_free_irq1; } platform_set_drvdata(pdev, info); device_init_wakeup(&pdev->dev, 1); return 0; err_free_irq1: free_irq(irq[1], info); err_free_irq0: free_irq(irq[0], info); err_free_mem: input_free_device(input); kfree(info); return error; } static int max8925_onkey_remove(struct platform_device *pdev) { struct max8925_onkey_info *info = platform_get_drvdata(pdev); struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); free_irq(info->irq[0] + chip->irq_base, info); free_irq(info->irq[1] + chip->irq_base, info); input_unregister_device(info->idev); kfree(info); platform_set_drvdata(pdev, NULL); return 0; } #ifdef CONFIG_PM_SLEEP static int max8925_onkey_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct max8925_onkey_info *info = platform_get_drvdata(pdev); struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); if (device_may_wakeup(dev)) { chip->wakeup_flag |= 1 << info->irq[0]; chip->wakeup_flag |= 1 << info->irq[1]; } return 0; } static int max8925_onkey_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct max8925_onkey_info *info = platform_get_drvdata(pdev); struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); if (device_may_wakeup(dev)) { chip->wakeup_flag &= ~(1 << info->irq[0]); chip->wakeup_flag &= ~(1 << info->irq[1]); } return 0; } #endif static SIMPLE_DEV_PM_OPS(max8925_onkey_pm_ops, max8925_onkey_suspend, max8925_onkey_resume); static struct platform_driver max8925_onkey_driver = { .driver = { .name = "max8925-onkey", .owner = THIS_MODULE, .pm = &max8925_onkey_pm_ops, }, .probe = max8925_onkey_probe, .remove = max8925_onkey_remove, }; module_platform_driver(max8925_onkey_driver); MODULE_DESCRIPTION("Maxim MAX8925 ONKEY driver"); MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>"); MODULE_LICENSE("GPL");
gpl-2.0
changbindu/linux-ok6410
drivers/ide/cs5536.c
2397
7736
/* * CS5536 PATA support * (C) 2007 Martin K. Petersen <mkp@mkp.net> * (C) 2009 Bartlomiej Zolnierkiewicz * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Documentation: * Available from AMD web site. * * The IDE timing registers for the CS5536 live in the Geode Machine * Specific Register file and not PCI config space. Most BIOSes * virtualize the PCI registers so the chip looks like a standard IDE * controller. Unfortunately not all implementations get this right. * In particular some have problems with unaligned accesses to the * virtualized PCI registers. This driver always does full dword * writes to work around the issue. Also, in case of a bad BIOS this * driver can be loaded with the "msr=1" parameter which forces using * the Machine Specific Registers to configure the device. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ide.h> #include <asm/msr.h> #define DRV_NAME "cs5536" enum { MSR_IDE_CFG = 0x51300010, PCI_IDE_CFG = 0x40, CFG = 0, DTC = 2, CAST = 3, ETC = 4, IDE_CFG_CHANEN = (1 << 1), IDE_CFG_CABLE = (1 << 17) | (1 << 16), IDE_D0_SHIFT = 24, IDE_D1_SHIFT = 16, IDE_DRV_MASK = 0xff, IDE_CAST_D0_SHIFT = 6, IDE_CAST_D1_SHIFT = 4, IDE_CAST_DRV_MASK = 0x3, IDE_CAST_CMD_SHIFT = 24, IDE_CAST_CMD_MASK = 0xff, IDE_ETC_UDMA_MASK = 0xc0, }; static int use_msr; static int cs5536_read(struct pci_dev *pdev, int reg, u32 *val) { if (unlikely(use_msr)) { u32 dummy; rdmsr(MSR_IDE_CFG + reg, *val, dummy); return 0; } return pci_read_config_dword(pdev, PCI_IDE_CFG + reg * 4, val); } static int cs5536_write(struct pci_dev *pdev, int reg, int val) { if (unlikely(use_msr)) { wrmsr(MSR_IDE_CFG + reg, val, 0); return 0; } return pci_write_config_dword(pdev, PCI_IDE_CFG + reg * 4, val); } static void cs5536_program_dtc(ide_drive_t *drive, u8 tim) { struct pci_dev *pdev = to_pci_dev(drive->hwif->dev); int dshift = (drive->dn & 1) ? IDE_D1_SHIFT : IDE_D0_SHIFT; u32 dtc; cs5536_read(pdev, DTC, &dtc); dtc &= ~(IDE_DRV_MASK << dshift); dtc |= tim << dshift; cs5536_write(pdev, DTC, dtc); } /** * cs5536_cable_detect - detect cable type * @hwif: Port to detect on * * Perform cable detection for ATA66 capable cable. * * Returns a cable type. */ static u8 cs5536_cable_detect(ide_hwif_t *hwif) { struct pci_dev *pdev = to_pci_dev(hwif->dev); u32 cfg; cs5536_read(pdev, CFG, &cfg); if (cfg & IDE_CFG_CABLE) return ATA_CBL_PATA80; else return ATA_CBL_PATA40; } /** * cs5536_set_pio_mode - PIO timing setup * @hwif: ATA port * @drive: ATA device */ static void cs5536_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { static const u8 drv_timings[5] = { 0x98, 0x55, 0x32, 0x21, 0x20, }; static const u8 addr_timings[5] = { 0x2, 0x1, 0x0, 0x0, 0x0, }; static const u8 cmd_timings[5] = { 0x99, 0x92, 0x90, 0x22, 0x20, }; struct pci_dev *pdev = to_pci_dev(hwif->dev); ide_drive_t *pair = ide_get_pair_dev(drive); int cshift = (drive->dn & 1) ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT; unsigned long timings = (unsigned long)ide_get_drivedata(drive); u32 cast; const u8 pio = drive->pio_mode - XFER_PIO_0; u8 cmd_pio = pio; if (pair) cmd_pio = min_t(u8, pio, pair->pio_mode - XFER_PIO_0); timings &= (IDE_DRV_MASK << 8); timings |= drv_timings[pio]; ide_set_drivedata(drive, (void *)timings); cs5536_program_dtc(drive, drv_timings[pio]); cs5536_read(pdev, CAST, &cast); cast &= ~(IDE_CAST_DRV_MASK << cshift); cast |= addr_timings[pio] << cshift; cast &= ~(IDE_CAST_CMD_MASK << IDE_CAST_CMD_SHIFT); cast |= cmd_timings[cmd_pio] << IDE_CAST_CMD_SHIFT; cs5536_write(pdev, CAST, cast); } /** * cs5536_set_dma_mode - DMA timing setup * @hwif: ATA port * @drive: ATA device */ static void cs5536_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { static const u8 udma_timings[6] = { 0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6, }; static const u8 mwdma_timings[3] = { 0x67, 0x21, 0x20, }; struct pci_dev *pdev = to_pci_dev(hwif->dev); int dshift = (drive->dn & 1) ? IDE_D1_SHIFT : IDE_D0_SHIFT; unsigned long timings = (unsigned long)ide_get_drivedata(drive); u32 etc; const u8 mode = drive->dma_mode; cs5536_read(pdev, ETC, &etc); if (mode >= XFER_UDMA_0) { etc &= ~(IDE_DRV_MASK << dshift); etc |= udma_timings[mode - XFER_UDMA_0] << dshift; } else { /* MWDMA */ etc &= ~(IDE_ETC_UDMA_MASK << dshift); timings &= IDE_DRV_MASK; timings |= mwdma_timings[mode - XFER_MW_DMA_0] << 8; ide_set_drivedata(drive, (void *)timings); } cs5536_write(pdev, ETC, etc); } static void cs5536_dma_start(ide_drive_t *drive) { unsigned long timings = (unsigned long)ide_get_drivedata(drive); if (drive->current_speed < XFER_UDMA_0 && (timings >> 8) != (timings & IDE_DRV_MASK)) cs5536_program_dtc(drive, timings >> 8); ide_dma_start(drive); } static int cs5536_dma_end(ide_drive_t *drive) { int ret = ide_dma_end(drive); unsigned long timings = (unsigned long)ide_get_drivedata(drive); if (drive->current_speed < XFER_UDMA_0 && (timings >> 8) != (timings & IDE_DRV_MASK)) cs5536_program_dtc(drive, timings & IDE_DRV_MASK); return ret; } static const struct ide_port_ops cs5536_port_ops = { .set_pio_mode = cs5536_set_pio_mode, .set_dma_mode = cs5536_set_dma_mode, .cable_detect = cs5536_cable_detect, }; static const struct ide_dma_ops cs5536_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = cs5536_dma_start, .dma_end = cs5536_dma_end, .dma_test_irq = ide_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_sff_read_status = ide_dma_sff_read_status, }; static const struct ide_port_info cs5536_info = { .name = DRV_NAME, .port_ops = &cs5536_port_ops, .dma_ops = &cs5536_dma_ops, .host_flags = IDE_HFLAG_SINGLE, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, }; /** * cs5536_init_one * @dev: PCI device * @id: Entry in match table */ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id) { u32 cfg; if (use_msr) printk(KERN_INFO DRV_NAME ": Using MSR regs instead of PCI\n"); cs5536_read(dev, CFG, &cfg); if ((cfg & IDE_CFG_CHANEN) == 0) { printk(KERN_ERR DRV_NAME ": disabled by BIOS\n"); return -ENODEV; } return ide_pci_init_one(dev, &cs5536_info, NULL); } static const struct pci_device_id cs5536_pci_tbl[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), }, { }, }; static struct pci_driver cs5536_pci_driver = { .name = DRV_NAME, .id_table = cs5536_pci_tbl, .probe = cs5536_init_one, .remove = ide_pci_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; module_pci_driver(cs5536_pci_driver); MODULE_AUTHOR("Martin K. Petersen, Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("low-level driver for the CS5536 IDE controller"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, cs5536_pci_tbl); module_param_named(msr, use_msr, int, 0644); MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
gpl-2.0
tbalden/android_kernel_htc_m9pw
drivers/input/touchscreen/hampshire.c
2653
4805
/* * Hampshire serial touchscreen driver * * Copyright (c) 2010 Adam Bennett * Based on the dynapro driver (c) Tias Guns * */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ /* * 2010/04/08 Adam Bennett <abennett72@gmail.com> * Copied dynapro.c and edited for Hampshire 4-byte protocol */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/init.h> #define DRIVER_DESC "Hampshire serial touchscreen driver" MODULE_AUTHOR("Adam Bennett <abennett72@gmail.com>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* * Definitions & global arrays. */ #define HAMPSHIRE_FORMAT_TOUCH_BIT 0x40 #define HAMPSHIRE_FORMAT_LENGTH 4 #define HAMPSHIRE_RESPONSE_BEGIN_BYTE 0x80 #define HAMPSHIRE_MIN_XC 0 #define HAMPSHIRE_MAX_XC 0x1000 #define HAMPSHIRE_MIN_YC 0 #define HAMPSHIRE_MAX_YC 0x1000 #define HAMPSHIRE_GET_XC(data) (((data[3] & 0x0c) >> 2) | (data[1] << 2) | ((data[0] & 0x38) << 6)) #define HAMPSHIRE_GET_YC(data) ((data[3] & 0x03) | (data[2] << 2) | ((data[0] & 0x07) << 9)) #define HAMPSHIRE_GET_TOUCHED(data) (HAMPSHIRE_FORMAT_TOUCH_BIT & data[0]) /* * Per-touchscreen data. */ struct hampshire { struct input_dev *dev; struct serio *serio; int idx; unsigned char data[HAMPSHIRE_FORMAT_LENGTH]; char phys[32]; }; static void hampshire_process_data(struct hampshire *phampshire) { struct input_dev *dev = phampshire->dev; if (HAMPSHIRE_FORMAT_LENGTH == ++phampshire->idx) { input_report_abs(dev, ABS_X, HAMPSHIRE_GET_XC(phampshire->data)); input_report_abs(dev, ABS_Y, HAMPSHIRE_GET_YC(phampshire->data)); input_report_key(dev, BTN_TOUCH, HAMPSHIRE_GET_TOUCHED(phampshire->data)); input_sync(dev); phampshire->idx = 0; } } static irqreturn_t hampshire_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct hampshire *phampshire = serio_get_drvdata(serio); phampshire->data[phampshire->idx] = data; if (HAMPSHIRE_RESPONSE_BEGIN_BYTE & phampshire->data[0]) hampshire_process_data(phampshire); else dev_dbg(&serio->dev, "unknown/unsynchronized data: %x\n", phampshire->data[0]); return IRQ_HANDLED; } static void hampshire_disconnect(struct serio *serio) { struct hampshire *phampshire = serio_get_drvdata(serio); input_get_device(phampshire->dev); input_unregister_device(phampshire->dev); serio_close(serio); serio_set_drvdata(serio, NULL); input_put_device(phampshire->dev); kfree(phampshire); } /* * hampshire_connect() is the routine that is called when someone adds a * new serio device that supports hampshire protocol and registers it as * an input device. This is usually accomplished using inputattach. */ static int hampshire_connect(struct serio *serio, struct serio_driver *drv) { struct hampshire *phampshire; struct input_dev *input_dev; int err; phampshire = kzalloc(sizeof(struct hampshire), GFP_KERNEL); input_dev = input_allocate_device(); if (!phampshire || !input_dev) { err = -ENOMEM; goto fail1; } phampshire->serio = serio; phampshire->dev = input_dev; snprintf(phampshire->phys, sizeof(phampshire->phys), "%s/input0", serio->phys); input_dev->name = "Hampshire Serial TouchScreen"; input_dev->phys = phampshire->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_HAMPSHIRE; input_dev->id.product = 0; input_dev->id.version = 0x0001; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(phampshire->dev, ABS_X, HAMPSHIRE_MIN_XC, HAMPSHIRE_MAX_XC, 0, 0); input_set_abs_params(phampshire->dev, ABS_Y, HAMPSHIRE_MIN_YC, HAMPSHIRE_MAX_YC, 0, 0); serio_set_drvdata(serio, phampshire); err = serio_open(serio, drv); if (err) goto fail2; err = input_register_device(phampshire->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(phampshire); return err; } /* * The serio driver structure. */ static struct serio_device_id hampshire_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_HAMPSHIRE, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, hampshire_serio_ids); static struct serio_driver hampshire_drv = { .driver = { .name = "hampshire", }, .description = DRIVER_DESC, .id_table = hampshire_serio_ids, .interrupt = hampshire_interrupt, .connect = hampshire_connect, .disconnect = hampshire_disconnect, }; module_serio_driver(hampshire_drv);
gpl-2.0
CheckYourScreen/Arsenic.Kernel_onyx-oos
fs/ceph/super.c
3165
23727
#include <linux/ceph/ceph_debug.h> #include <linux/backing-dev.h> #include <linux/ctype.h> #include <linux/fs.h> #include <linux/inet.h> #include <linux/in6.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/parser.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/statfs.h> #include <linux/string.h> #include "super.h" #include "mds_client.h" #include <linux/ceph/decode.h> #include <linux/ceph/mon_client.h> #include <linux/ceph/auth.h> #include <linux/ceph/debugfs.h> /* * Ceph superblock operations * * Handle the basics of mounting, unmounting. */ /* * super ops */ static void ceph_put_super(struct super_block *s) { struct ceph_fs_client *fsc = ceph_sb_to_client(s); dout("put_super\n"); ceph_mdsc_close_sessions(fsc->mdsc); /* * ensure we release the bdi before put_anon_super releases * the device name. */ if (s->s_bdi == &fsc->backing_dev_info) { bdi_unregister(&fsc->backing_dev_info); s->s_bdi = NULL; } return; } static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) { struct ceph_fs_client *fsc = ceph_inode_to_client(dentry->d_inode); struct ceph_monmap *monmap = fsc->client->monc.monmap; struct ceph_statfs st; u64 fsid; int err; dout("statfs\n"); err = ceph_monc_do_statfs(&fsc->client->monc, &st); if (err < 0) return err; /* fill in kstatfs */ buf->f_type = CEPH_SUPER_MAGIC; /* ?? */ /* * express utilization in terms of large blocks to avoid * overflow on 32-bit machines. */ buf->f_bsize = 1 << CEPH_BLOCK_SHIFT; buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10); buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10); buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10); buf->f_files = le64_to_cpu(st.num_objects); buf->f_ffree = -1; buf->f_namelen = NAME_MAX; buf->f_frsize = PAGE_CACHE_SIZE; /* leave fsid little-endian, regardless of host endianness */ fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1); buf->f_fsid.val[0] = fsid & 0xffffffff; buf->f_fsid.val[1] = fsid >> 32; return 0; } static int ceph_sync_fs(struct super_block *sb, int wait) { struct ceph_fs_client *fsc = ceph_sb_to_client(sb); if (!wait) { dout("sync_fs (non-blocking)\n"); ceph_flush_dirty_caps(fsc->mdsc); dout("sync_fs (non-blocking) done\n"); return 0; } dout("sync_fs (blocking)\n"); ceph_osdc_sync(&fsc->client->osdc); ceph_mdsc_sync(fsc->mdsc); dout("sync_fs (blocking) done\n"); return 0; } /* * mount options */ enum { Opt_wsize, Opt_rsize, Opt_rasize, Opt_caps_wanted_delay_min, Opt_caps_wanted_delay_max, Opt_cap_release_safety, Opt_readdir_max_entries, Opt_readdir_max_bytes, Opt_congestion_kb, Opt_last_int, /* int args above */ Opt_snapdirname, Opt_last_string, /* string args above */ Opt_dirstat, Opt_nodirstat, Opt_rbytes, Opt_norbytes, Opt_asyncreaddir, Opt_noasyncreaddir, Opt_dcache, Opt_nodcache, Opt_ino32, Opt_noino32, }; static match_table_t fsopt_tokens = { {Opt_wsize, "wsize=%d"}, {Opt_rsize, "rsize=%d"}, {Opt_rasize, "rasize=%d"}, {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"}, {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"}, {Opt_cap_release_safety, "cap_release_safety=%d"}, {Opt_readdir_max_entries, "readdir_max_entries=%d"}, {Opt_readdir_max_bytes, "readdir_max_bytes=%d"}, {Opt_congestion_kb, "write_congestion_kb=%d"}, /* int args above */ {Opt_snapdirname, "snapdirname=%s"}, /* string args above */ {Opt_dirstat, "dirstat"}, {Opt_nodirstat, "nodirstat"}, {Opt_rbytes, "rbytes"}, {Opt_norbytes, "norbytes"}, {Opt_asyncreaddir, "asyncreaddir"}, {Opt_noasyncreaddir, "noasyncreaddir"}, {Opt_dcache, "dcache"}, {Opt_nodcache, "nodcache"}, {Opt_ino32, "ino32"}, {Opt_noino32, "noino32"}, {-1, NULL} }; static int parse_fsopt_token(char *c, void *private) { struct ceph_mount_options *fsopt = private; substring_t argstr[MAX_OPT_ARGS]; int token, intval, ret; token = match_token((char *)c, fsopt_tokens, argstr); if (token < 0) return -EINVAL; if (token < Opt_last_int) { ret = match_int(&argstr[0], &intval); if (ret < 0) { pr_err("bad mount option arg (not int) " "at '%s'\n", c); return ret; } dout("got int token %d val %d\n", token, intval); } else if (token > Opt_last_int && token < Opt_last_string) { dout("got string token %d val %s\n", token, argstr[0].from); } else { dout("got token %d\n", token); } switch (token) { case Opt_snapdirname: kfree(fsopt->snapdir_name); fsopt->snapdir_name = kstrndup(argstr[0].from, argstr[0].to-argstr[0].from, GFP_KERNEL); if (!fsopt->snapdir_name) return -ENOMEM; break; /* misc */ case Opt_wsize: fsopt->wsize = intval; break; case Opt_rsize: fsopt->rsize = intval; break; case Opt_rasize: fsopt->rasize = intval; break; case Opt_caps_wanted_delay_min: fsopt->caps_wanted_delay_min = intval; break; case Opt_caps_wanted_delay_max: fsopt->caps_wanted_delay_max = intval; break; case Opt_readdir_max_entries: fsopt->max_readdir = intval; break; case Opt_readdir_max_bytes: fsopt->max_readdir_bytes = intval; break; case Opt_congestion_kb: fsopt->congestion_kb = intval; break; case Opt_dirstat: fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT; break; case Opt_nodirstat: fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT; break; case Opt_rbytes: fsopt->flags |= CEPH_MOUNT_OPT_RBYTES; break; case Opt_norbytes: fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES; break; case Opt_asyncreaddir: fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR; break; case Opt_noasyncreaddir: fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR; break; case Opt_dcache: fsopt->flags |= CEPH_MOUNT_OPT_DCACHE; break; case Opt_nodcache: fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE; break; case Opt_ino32: fsopt->flags |= CEPH_MOUNT_OPT_INO32; break; case Opt_noino32: fsopt->flags &= ~CEPH_MOUNT_OPT_INO32; break; default: BUG_ON(token); } return 0; } static void destroy_mount_options(struct ceph_mount_options *args) { dout("destroy_mount_options %p\n", args); kfree(args->snapdir_name); kfree(args); } static int strcmp_null(const char *s1, const char *s2) { if (!s1 && !s2) return 0; if (s1 && !s2) return -1; if (!s1 && s2) return 1; return strcmp(s1, s2); } static int compare_mount_options(struct ceph_mount_options *new_fsopt, struct ceph_options *new_opt, struct ceph_fs_client *fsc) { struct ceph_mount_options *fsopt1 = new_fsopt; struct ceph_mount_options *fsopt2 = fsc->mount_options; int ofs = offsetof(struct ceph_mount_options, snapdir_name); int ret; ret = memcmp(fsopt1, fsopt2, ofs); if (ret) return ret; ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name); if (ret) return ret; return ceph_compare_options(new_opt, fsc->client); } static int parse_mount_options(struct ceph_mount_options **pfsopt, struct ceph_options **popt, int flags, char *options, const char *dev_name, const char **path) { struct ceph_mount_options *fsopt; const char *dev_name_end; int err = -ENOMEM; fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL); if (!fsopt) return -ENOMEM; dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name); fsopt->sb_flags = flags; fsopt->flags = CEPH_MOUNT_OPT_DEFAULT; fsopt->rsize = CEPH_RSIZE_DEFAULT; fsopt->rasize = CEPH_RASIZE_DEFAULT; fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL); fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT; fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT; fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT; fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT; fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT; fsopt->congestion_kb = default_congestion_kb(); /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */ err = -EINVAL; if (!dev_name) goto out; *path = strstr(dev_name, ":/"); if (*path == NULL) { pr_err("device name is missing path (no :/ in %s)\n", dev_name); goto out; } dev_name_end = *path; dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name); /* path on server */ *path += 2; dout("server path '%s'\n", *path); *popt = ceph_parse_options(options, dev_name, dev_name_end, parse_fsopt_token, (void *)fsopt); if (IS_ERR(*popt)) { err = PTR_ERR(*popt); goto out; } /* success */ *pfsopt = fsopt; return 0; out: destroy_mount_options(fsopt); return err; } /** * ceph_show_options - Show mount options in /proc/mounts * @m: seq_file to write to * @root: root of that (sub)tree */ static int ceph_show_options(struct seq_file *m, struct dentry *root) { struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb); struct ceph_mount_options *fsopt = fsc->mount_options; struct ceph_options *opt = fsc->client->options; if (opt->flags & CEPH_OPT_FSID) seq_printf(m, ",fsid=%pU", &opt->fsid); if (opt->flags & CEPH_OPT_NOSHARE) seq_puts(m, ",noshare"); if (opt->flags & CEPH_OPT_NOCRC) seq_puts(m, ",nocrc"); if (opt->name) seq_printf(m, ",name=%s", opt->name); if (opt->key) seq_puts(m, ",secret=<hidden>"); if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT) seq_printf(m, ",mount_timeout=%d", opt->mount_timeout); if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT) seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl); if (opt->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT) seq_printf(m, ",osdtimeout=%d", opt->osd_timeout); if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT) seq_printf(m, ",osdkeepalivetimeout=%d", opt->osd_keepalive_timeout); if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT) seq_puts(m, ",dirstat"); if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES) == 0) seq_puts(m, ",norbytes"); if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR) seq_puts(m, ",noasyncreaddir"); if (fsopt->flags & CEPH_MOUNT_OPT_DCACHE) seq_puts(m, ",dcache"); else seq_puts(m, ",nodcache"); if (fsopt->wsize) seq_printf(m, ",wsize=%d", fsopt->wsize); if (fsopt->rsize != CEPH_RSIZE_DEFAULT) seq_printf(m, ",rsize=%d", fsopt->rsize); if (fsopt->rasize != CEPH_RASIZE_DEFAULT) seq_printf(m, ",rasize=%d", fsopt->rasize); if (fsopt->congestion_kb != default_congestion_kb()) seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb); if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT) seq_printf(m, ",caps_wanted_delay_min=%d", fsopt->caps_wanted_delay_min); if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT) seq_printf(m, ",caps_wanted_delay_max=%d", fsopt->caps_wanted_delay_max); if (fsopt->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT) seq_printf(m, ",cap_release_safety=%d", fsopt->cap_release_safety); if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT) seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir); if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT) seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes); if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT)) seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name); return 0; } /* * handle any mon messages the standard library doesn't understand. * return error if we don't either. */ static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg) { struct ceph_fs_client *fsc = client->private; int type = le16_to_cpu(msg->hdr.type); switch (type) { case CEPH_MSG_MDS_MAP: ceph_mdsc_handle_map(fsc->mdsc, msg); return 0; default: return -1; } } /* * create a new fs client */ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, struct ceph_options *opt) { struct ceph_fs_client *fsc; const unsigned supported_features = CEPH_FEATURE_FLOCK | CEPH_FEATURE_DIRLAYOUTHASH; const unsigned required_features = 0; int err = -ENOMEM; fsc = kzalloc(sizeof(*fsc), GFP_KERNEL); if (!fsc) return ERR_PTR(-ENOMEM); fsc->client = ceph_create_client(opt, fsc, supported_features, required_features); if (IS_ERR(fsc->client)) { err = PTR_ERR(fsc->client); goto fail; } fsc->client->extra_mon_dispatch = extra_mon_dispatch; fsc->client->monc.want_mdsmap = 1; fsc->mount_options = fsopt; fsc->sb = NULL; fsc->mount_state = CEPH_MOUNT_MOUNTING; atomic_long_set(&fsc->writeback_count, 0); err = bdi_init(&fsc->backing_dev_info); if (err < 0) goto fail_client; err = -ENOMEM; /* * The number of concurrent works can be high but they don't need * to be processed in parallel, limit concurrency. */ fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1); if (fsc->wb_wq == NULL) goto fail_bdi; fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1); if (fsc->pg_inv_wq == NULL) goto fail_wb_wq; fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1); if (fsc->trunc_wq == NULL) goto fail_pg_inv_wq; /* set up mempools */ err = -ENOMEM; fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, fsc->mount_options->wsize >> PAGE_CACHE_SHIFT); if (!fsc->wb_pagevec_pool) goto fail_trunc_wq; /* caps */ fsc->min_caps = fsopt->max_readdir; return fsc; fail_trunc_wq: destroy_workqueue(fsc->trunc_wq); fail_pg_inv_wq: destroy_workqueue(fsc->pg_inv_wq); fail_wb_wq: destroy_workqueue(fsc->wb_wq); fail_bdi: bdi_destroy(&fsc->backing_dev_info); fail_client: ceph_destroy_client(fsc->client); fail: kfree(fsc); return ERR_PTR(err); } static void destroy_fs_client(struct ceph_fs_client *fsc) { dout("destroy_fs_client %p\n", fsc); destroy_workqueue(fsc->wb_wq); destroy_workqueue(fsc->pg_inv_wq); destroy_workqueue(fsc->trunc_wq); bdi_destroy(&fsc->backing_dev_info); mempool_destroy(fsc->wb_pagevec_pool); destroy_mount_options(fsc->mount_options); ceph_fs_debugfs_cleanup(fsc); ceph_destroy_client(fsc->client); kfree(fsc); dout("destroy_fs_client %p done\n", fsc); } /* * caches */ struct kmem_cache *ceph_inode_cachep; struct kmem_cache *ceph_cap_cachep; struct kmem_cache *ceph_dentry_cachep; struct kmem_cache *ceph_file_cachep; static void ceph_inode_init_once(void *foo) { struct ceph_inode_info *ci = foo; inode_init_once(&ci->vfs_inode); } static int __init init_caches(void) { ceph_inode_cachep = kmem_cache_create("ceph_inode_info", sizeof(struct ceph_inode_info), __alignof__(struct ceph_inode_info), (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD), ceph_inode_init_once); if (ceph_inode_cachep == NULL) return -ENOMEM; ceph_cap_cachep = KMEM_CACHE(ceph_cap, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); if (ceph_cap_cachep == NULL) goto bad_cap; ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); if (ceph_dentry_cachep == NULL) goto bad_dentry; ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); if (ceph_file_cachep == NULL) goto bad_file; return 0; bad_file: kmem_cache_destroy(ceph_dentry_cachep); bad_dentry: kmem_cache_destroy(ceph_cap_cachep); bad_cap: kmem_cache_destroy(ceph_inode_cachep); return -ENOMEM; } static void destroy_caches(void) { kmem_cache_destroy(ceph_inode_cachep); kmem_cache_destroy(ceph_cap_cachep); kmem_cache_destroy(ceph_dentry_cachep); kmem_cache_destroy(ceph_file_cachep); } /* * ceph_umount_begin - initiate forced umount. Tear down down the * mount, skipping steps that may hang while waiting for server(s). */ static void ceph_umount_begin(struct super_block *sb) { struct ceph_fs_client *fsc = ceph_sb_to_client(sb); dout("ceph_umount_begin - starting forced umount\n"); if (!fsc) return; fsc->mount_state = CEPH_MOUNT_SHUTDOWN; return; } static const struct super_operations ceph_super_ops = { .alloc_inode = ceph_alloc_inode, .destroy_inode = ceph_destroy_inode, .write_inode = ceph_write_inode, .sync_fs = ceph_sync_fs, .put_super = ceph_put_super, .show_options = ceph_show_options, .statfs = ceph_statfs, .umount_begin = ceph_umount_begin, }; /* * Bootstrap mount by opening the root directory. Note the mount * @started time from caller, and time out if this takes too long. */ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc, const char *path, unsigned long started) { struct ceph_mds_client *mdsc = fsc->mdsc; struct ceph_mds_request *req = NULL; int err; struct dentry *root; /* open dir */ dout("open_root_inode opening '%s'\n", path); req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); if (IS_ERR(req)) return ERR_CAST(req); req->r_path1 = kstrdup(path, GFP_NOFS); req->r_ino1.ino = CEPH_INO_ROOT; req->r_ino1.snap = CEPH_NOSNAP; req->r_started = started; req->r_timeout = fsc->client->options->mount_timeout * HZ; req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); req->r_num_caps = 2; err = ceph_mdsc_do_request(mdsc, NULL, req); if (err == 0) { struct inode *inode = req->r_target_inode; req->r_target_inode = NULL; dout("open_root_inode success\n"); if (ceph_ino(inode) == CEPH_INO_ROOT && fsc->sb->s_root == NULL) { root = d_make_root(inode); if (!root) { root = ERR_PTR(-ENOMEM); goto out; } } else { root = d_obtain_alias(inode); } ceph_init_dentry(root); dout("open_root_inode success, root dentry is %p\n", root); } else { root = ERR_PTR(err); } out: ceph_mdsc_put_request(req); return root; } /* * mount: join the ceph cluster, and open root directory. */ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc, const char *path) { int err; unsigned long started = jiffies; /* note the start time */ struct dentry *root; int first = 0; /* first vfsmount for this super_block */ dout("mount start\n"); mutex_lock(&fsc->client->mount_mutex); err = __ceph_open_session(fsc->client, started); if (err < 0) goto out; dout("mount opening root\n"); root = open_root_dentry(fsc, "", started); if (IS_ERR(root)) { err = PTR_ERR(root); goto out; } if (fsc->sb->s_root) { dput(root); } else { fsc->sb->s_root = root; first = 1; err = ceph_fs_debugfs_init(fsc); if (err < 0) goto fail; } if (path[0] == 0) { dget(root); } else { dout("mount opening base mountpoint\n"); root = open_root_dentry(fsc, path, started); if (IS_ERR(root)) { err = PTR_ERR(root); goto fail; } } fsc->mount_state = CEPH_MOUNT_MOUNTED; dout("mount success\n"); mutex_unlock(&fsc->client->mount_mutex); return root; out: mutex_unlock(&fsc->client->mount_mutex); return ERR_PTR(err); fail: if (first) { dput(fsc->sb->s_root); fsc->sb->s_root = NULL; } goto out; } static int ceph_set_super(struct super_block *s, void *data) { struct ceph_fs_client *fsc = data; int ret; dout("set_super %p data %p\n", s, data); s->s_flags = fsc->mount_options->sb_flags; s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */ s->s_fs_info = fsc; fsc->sb = s; s->s_op = &ceph_super_ops; s->s_export_op = &ceph_export_ops; s->s_time_gran = 1000; /* 1000 ns == 1 us */ ret = set_anon_super(s, NULL); /* what is that second arg for? */ if (ret != 0) goto fail; return ret; fail: s->s_fs_info = NULL; fsc->sb = NULL; return ret; } /* * share superblock if same fs AND options */ static int ceph_compare_super(struct super_block *sb, void *data) { struct ceph_fs_client *new = data; struct ceph_mount_options *fsopt = new->mount_options; struct ceph_options *opt = new->client->options; struct ceph_fs_client *other = ceph_sb_to_client(sb); dout("ceph_compare_super %p\n", sb); if (compare_mount_options(fsopt, opt, other)) { dout("monitor(s)/mount options don't match\n"); return 0; } if ((opt->flags & CEPH_OPT_FSID) && ceph_fsid_compare(&opt->fsid, &other->client->fsid)) { dout("fsid doesn't match\n"); return 0; } if (fsopt->sb_flags != other->mount_options->sb_flags) { dout("flags differ\n"); return 0; } return 1; } /* * construct our own bdi so we can control readahead, etc. */ static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); static int ceph_register_bdi(struct super_block *sb, struct ceph_fs_client *fsc) { int err; /* set ra_pages based on rasize mount option? */ if (fsc->mount_options->rasize >= PAGE_CACHE_SIZE) fsc->backing_dev_info.ra_pages = (fsc->mount_options->rasize + PAGE_CACHE_SIZE - 1) >> PAGE_SHIFT; else fsc->backing_dev_info.ra_pages = default_backing_dev_info.ra_pages; err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%d", atomic_long_inc_return(&bdi_seq)); if (!err) sb->s_bdi = &fsc->backing_dev_info; return err; } static struct dentry *ceph_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct super_block *sb; struct ceph_fs_client *fsc; struct dentry *res; int err; int (*compare_super)(struct super_block *, void *) = ceph_compare_super; const char *path = NULL; struct ceph_mount_options *fsopt = NULL; struct ceph_options *opt = NULL; dout("ceph_mount\n"); err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path); if (err < 0) { res = ERR_PTR(err); goto out_final; } /* create client (which we may/may not use) */ fsc = create_fs_client(fsopt, opt); if (IS_ERR(fsc)) { res = ERR_CAST(fsc); destroy_mount_options(fsopt); ceph_destroy_options(opt); goto out_final; } err = ceph_mdsc_init(fsc); if (err < 0) { res = ERR_PTR(err); goto out; } if (ceph_test_opt(fsc->client, NOSHARE)) compare_super = NULL; sb = sget(fs_type, compare_super, ceph_set_super, fsc); if (IS_ERR(sb)) { res = ERR_CAST(sb); goto out; } if (ceph_sb_to_client(sb) != fsc) { ceph_mdsc_destroy(fsc); destroy_fs_client(fsc); fsc = ceph_sb_to_client(sb); dout("get_sb got existing client %p\n", fsc); } else { dout("get_sb using new client %p\n", fsc); err = ceph_register_bdi(sb, fsc); if (err < 0) { res = ERR_PTR(err); goto out_splat; } } res = ceph_real_mount(fsc, path); if (IS_ERR(res)) goto out_splat; dout("root %p inode %p ino %llx.%llx\n", res, res->d_inode, ceph_vinop(res->d_inode)); return res; out_splat: ceph_mdsc_close_sessions(fsc->mdsc); deactivate_locked_super(sb); goto out_final; out: ceph_mdsc_destroy(fsc); destroy_fs_client(fsc); out_final: dout("ceph_mount fail %ld\n", PTR_ERR(res)); return res; } static void ceph_kill_sb(struct super_block *s) { struct ceph_fs_client *fsc = ceph_sb_to_client(s); dout("kill_sb %p\n", s); ceph_mdsc_pre_umount(fsc->mdsc); kill_anon_super(s); /* will call put_super after sb is r/o */ ceph_mdsc_destroy(fsc); destroy_fs_client(fsc); } static struct file_system_type ceph_fs_type = { .owner = THIS_MODULE, .name = "ceph", .mount = ceph_mount, .kill_sb = ceph_kill_sb, .fs_flags = FS_RENAME_DOES_D_MOVE, }; #define _STRINGIFY(x) #x #define STRINGIFY(x) _STRINGIFY(x) static int __init init_ceph(void) { int ret = init_caches(); if (ret) goto out; ceph_xattr_init(); ret = register_filesystem(&ceph_fs_type); if (ret) goto out_icache; pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL); return 0; out_icache: ceph_xattr_exit(); destroy_caches(); out: return ret; } static void __exit exit_ceph(void) { dout("exit_ceph\n"); unregister_filesystem(&ceph_fs_type); ceph_xattr_exit(); destroy_caches(); } module_init(init_ceph); module_exit(exit_ceph); MODULE_AUTHOR("Sage Weil <sage@newdream.net>"); MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>"); MODULE_AUTHOR("Patience Warnick <patience@newdream.net>"); MODULE_DESCRIPTION("Ceph filesystem for Linux"); MODULE_LICENSE("GPL");
gpl-2.0
omegahanggara/android_kernel_sony_xperia_z1
arch/arm/mach-omap2/omap-hotplug.c
4445
1491
/* * OMAP4 SMP cpu-hotplug support * * Copyright (C) 2010 Texas Instruments, Inc. * Author: * Santosh Shilimkar <santosh.shilimkar@ti.com> * * Platform file needed for the OMAP4 SMP. This file is based on arm * realview smp platform. * Copyright (c) 2002 ARM Limited. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/smp.h> #include <asm/cacheflush.h> #include "common.h" #include "powerdomain.h" int platform_cpu_kill(unsigned int cpu) { return 1; } /* * platform-specific code to shutdown a CPU * Called with IRQs disabled */ void __ref platform_cpu_die(unsigned int cpu) { unsigned int this_cpu; flush_cache_all(); dsb(); /* * we're ready for shutdown now, so do it */ if (omap_modify_auxcoreboot0(0x0, 0x200) != 0x0) pr_err("Secure clear status failed\n"); for (;;) { /* * Enter into low power state */ omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF); this_cpu = smp_processor_id(); if (omap_read_auxcoreboot0() == this_cpu) { /* * OK, proper wakeup, we're done */ break; } pr_debug("CPU%u: spurious wakeup call\n", cpu); } } int platform_cpu_disable(unsigned int cpu) { /* * we don't allow CPU 0 to be shutdown (it is still too special * e.g. clock tick interrupts) */ return cpu == 0 ? -EPERM : 0; }
gpl-2.0
SmithGitHu/linux
arch/powerpc/platforms/chrp/pci.c
4445
10665
/* * CHRP pci routines. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/hydra.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/sections.h> #include <asm/pci-bridge.h> #include <asm/grackle.h> #include <asm/rtas.h> #include "chrp.h" #include "gg2.h" /* LongTrail */ void __iomem *gg2_pci_config_base; /* * The VLSI Golden Gate II has only 512K of PCI configuration space, so we * limit the bus number to 3 bits */ int gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off, int len, u32 *val) { volatile void __iomem *cfg_data; struct pci_controller *hose = pci_bus_to_host(bus); if (bus->number > 7) return PCIBIOS_DEVICE_NOT_FOUND; /* * Note: the caller has already checked that off is * suitably aligned and that len is 1, 2 or 4. */ cfg_data = hose->cfg_data + ((bus->number<<16) | (devfn<<8) | off); switch (len) { case 1: *val = in_8(cfg_data); break; case 2: *val = in_le16(cfg_data); break; default: *val = in_le32(cfg_data); break; } return PCIBIOS_SUCCESSFUL; } int gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off, int len, u32 val) { volatile void __iomem *cfg_data; struct pci_controller *hose = pci_bus_to_host(bus); if (bus->number > 7) return PCIBIOS_DEVICE_NOT_FOUND; /* * Note: the caller has already checked that off is * suitably aligned and that len is 1, 2 or 4. */ cfg_data = hose->cfg_data + ((bus->number<<16) | (devfn<<8) | off); switch (len) { case 1: out_8(cfg_data, val); break; case 2: out_le16(cfg_data, val); break; default: out_le32(cfg_data, val); break; } return PCIBIOS_SUCCESSFUL; } static struct pci_ops gg2_pci_ops = { .read = gg2_read_config, .write = gg2_write_config, }; /* * Access functions for PCI config space using RTAS calls. */ int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { struct pci_controller *hose = pci_bus_to_host(bus); unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8) | (((bus->number - hose->first_busno) & 0xff) << 16) | (hose->global_number << 24); int ret = -1; int rval; rval = rtas_call(rtas_token("read-pci-config"), 2, 2, &ret, addr, len); *val = ret; return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL; } int rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { struct pci_controller *hose = pci_bus_to_host(bus); unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8) | (((bus->number - hose->first_busno) & 0xff) << 16) | (hose->global_number << 24); int rval; rval = rtas_call(rtas_token("write-pci-config"), 3, 1, NULL, addr, len, val); return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL; } static struct pci_ops rtas_pci_ops = { .read = rtas_read_config, .write = rtas_write_config, }; volatile struct Hydra __iomem *Hydra = NULL; int __init hydra_init(void) { struct device_node *np; struct resource r; np = of_find_node_by_name(NULL, "mac-io"); if (np == NULL || of_address_to_resource(np, 0, &r)) { of_node_put(np); return 0; } of_node_put(np); Hydra = ioremap(r.start, resource_size(&r)); printk("Hydra Mac I/O at %llx\n", (unsigned long long)r.start); printk("Hydra Feature_Control was %x", in_le32(&Hydra->Feature_Control)); out_le32(&Hydra->Feature_Control, (HYDRA_FC_SCC_CELL_EN | HYDRA_FC_SCSI_CELL_EN | HYDRA_FC_SCCA_ENABLE | HYDRA_FC_SCCB_ENABLE | HYDRA_FC_ARB_BYPASS | HYDRA_FC_MPIC_ENABLE | HYDRA_FC_SLOW_SCC_PCLK | HYDRA_FC_MPIC_IS_MASTER)); printk(", now %x\n", in_le32(&Hydra->Feature_Control)); return 1; } #define PRG_CL_RESET_VALID 0x00010000 static void __init setup_python(struct pci_controller *hose, struct device_node *dev) { u32 __iomem *reg; u32 val; struct resource r; if (of_address_to_resource(dev, 0, &r)) { printk(KERN_ERR "No address for Python PCI controller\n"); return; } /* Clear the magic go-slow bit */ reg = ioremap(r.start + 0xf6000, 0x40); BUG_ON(!reg); val = in_be32(&reg[12]); if (val & PRG_CL_RESET_VALID) { out_be32(&reg[12], val & ~PRG_CL_RESET_VALID); in_be32(&reg[12]); } iounmap(reg); setup_indirect_pci(hose, r.start + 0xf8000, r.start + 0xf8010, 0); } /* Marvell Discovery II based Pegasos 2 */ static void __init setup_peg2(struct pci_controller *hose, struct device_node *dev) { struct device_node *root = of_find_node_by_path("/"); struct device_node *rtas; rtas = of_find_node_by_name (root, "rtas"); if (rtas) { hose->ops = &rtas_pci_ops; of_node_put(rtas); } else { printk ("RTAS supporting Pegasos OF not found, please upgrade" " your firmware\n"); } pci_add_flags(PCI_REASSIGN_ALL_BUS); /* keep the reference to the root node */ } void __init chrp_find_bridges(void) { struct device_node *dev; const int *bus_range; int len, index = -1; struct pci_controller *hose; const unsigned int *dma; const char *model, *machine; int is_longtrail = 0, is_mot = 0, is_pegasos = 0; struct device_node *root = of_find_node_by_path("/"); struct resource r; /* * The PCI host bridge nodes on some machines don't have * properties to adequately identify them, so we have to * look at what sort of machine this is as well. */ machine = of_get_property(root, "model", NULL); if (machine != NULL) { is_longtrail = strncmp(machine, "IBM,LongTrail", 13) == 0; is_mot = strncmp(machine, "MOT", 3) == 0; if (strncmp(machine, "Pegasos2", 8) == 0) is_pegasos = 2; else if (strncmp(machine, "Pegasos", 7) == 0) is_pegasos = 1; } for (dev = root->child; dev != NULL; dev = dev->sibling) { if (dev->type == NULL || strcmp(dev->type, "pci") != 0) continue; ++index; /* The GG2 bridge on the LongTrail doesn't have an address */ if (of_address_to_resource(dev, 0, &r) && !is_longtrail) { printk(KERN_WARNING "Can't use %s: no address\n", dev->full_name); continue; } bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING "Can't get bus-range for %s\n", dev->full_name); continue; } if (bus_range[1] == bus_range[0]) printk(KERN_INFO "PCI bus %d", bus_range[0]); else printk(KERN_INFO "PCI buses %d..%d", bus_range[0], bus_range[1]); printk(" controlled by %s", dev->full_name); if (!is_longtrail) printk(" at %llx", (unsigned long long)r.start); printk("\n"); hose = pcibios_alloc_controller(dev); if (!hose) { printk("Can't allocate PCI controller structure for %s\n", dev->full_name); continue; } hose->first_busno = hose->self_busno = bus_range[0]; hose->last_busno = bus_range[1]; model = of_get_property(dev, "model", NULL); if (model == NULL) model = "<none>"; if (strncmp(model, "IBM, Python", 11) == 0) { setup_python(hose, dev); } else if (is_mot || strncmp(model, "Motorola, Grackle", 17) == 0) { setup_grackle(hose); } else if (is_longtrail) { void __iomem *p = ioremap(GG2_PCI_CONFIG_BASE, 0x80000); hose->ops = &gg2_pci_ops; hose->cfg_data = p; gg2_pci_config_base = p; } else if (is_pegasos == 1) { setup_indirect_pci(hose, 0xfec00cf8, 0xfee00cfc, 0); } else if (is_pegasos == 2) { setup_peg2(hose, dev); } else if (!strncmp(model, "IBM,CPC710", 10)) { setup_indirect_pci(hose, r.start + 0x000f8000, r.start + 0x000f8010, 0); if (index == 0) { dma = of_get_property(dev, "system-dma-base", &len); if (dma && len >= sizeof(*dma)) { dma = (unsigned int *) (((unsigned long)dma) + len - sizeof(*dma)); pci_dram_offset = *dma; } } } else { printk("No methods for %s (model %s), using RTAS\n", dev->full_name, model); hose->ops = &rtas_pci_ops; } pci_process_bridge_OF_ranges(hose, dev, index == 0); /* check the first bridge for a property that we can use to set pci_dram_offset */ dma = of_get_property(dev, "ibm,dma-ranges", &len); if (index == 0 && dma != NULL && len >= 6 * sizeof(*dma)) { pci_dram_offset = dma[2] - dma[3]; printk("pci_dram_offset = %lx\n", pci_dram_offset); } } of_node_put(root); } /* SL82C105 IDE Control/Status Register */ #define SL82C105_IDECSR 0x40 /* Fixup for Winbond ATA quirk, required for briq mostly because the * 8259 is configured for level sensitive IRQ 14 and so wants the * ATA controller to be set to fully native mode or bad things * will happen. */ static void chrp_pci_fixup_winbond_ata(struct pci_dev *sl82c105) { u8 progif; /* If non-briq machines need that fixup too, please speak up */ if (!machine_is(chrp) || _chrp_type != _CHRP_briq) return; if ((sl82c105->class & 5) != 5) { printk("W83C553: Switching SL82C105 IDE to PCI native mode\n"); /* Enable SL82C105 PCI native IDE mode */ pci_read_config_byte(sl82c105, PCI_CLASS_PROG, &progif); pci_write_config_byte(sl82c105, PCI_CLASS_PROG, progif | 0x05); sl82c105->class |= 0x05; /* Disable SL82C105 second port */ pci_write_config_word(sl82c105, SL82C105_IDECSR, 0x0003); /* Clear IO BARs, they will be reassigned */ pci_write_config_dword(sl82c105, PCI_BASE_ADDRESS_0, 0); pci_write_config_dword(sl82c105, PCI_BASE_ADDRESS_1, 0); pci_write_config_dword(sl82c105, PCI_BASE_ADDRESS_2, 0); pci_write_config_dword(sl82c105, PCI_BASE_ADDRESS_3, 0); } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105, chrp_pci_fixup_winbond_ata); /* Pegasos2 firmware version 20040810 configures the built-in IDE controller * in legacy mode, but sets the PCI registers to PCI native mode. * The chip can only operate in legacy mode, so force the PCI class into legacy * mode as well. The same fixup must be done to the class-code property in * the IDE node /pci@80000000/ide@C,1 */ static void chrp_pci_fixup_vt8231_ata(struct pci_dev *viaide) { u8 progif; struct pci_dev *viaisa; if (!machine_is(chrp) || _chrp_type != _CHRP_Pegasos) return; if (viaide->irq != 14) return; viaisa = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL); if (!viaisa) return; dev_info(&viaide->dev, "Fixing VIA IDE, force legacy mode on\n"); pci_read_config_byte(viaide, PCI_CLASS_PROG, &progif); pci_write_config_byte(viaide, PCI_CLASS_PROG, progif & ~0x5); viaide->class &= ~0x5; pci_dev_put(viaisa); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, chrp_pci_fixup_vt8231_ata);
gpl-2.0
flar2/ville-bulletproof
drivers/media/video/gspca/sq930x.c
4957
32957
/* * SQ930x subdriver * * Copyright (C) 2010 Jean-François Moine <http://moinejf.free.fr> * Copyright (C) 2006 -2008 Gerard Klaver <gerard at gkall dot hobby dot nl> * Copyright (C) 2007 Sam Revitch <samr7@cs.washington.edu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "sq930x" #include "gspca.h" MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>\n" "Gerard Klaver <gerard at gkall dot hobby dot nl\n" "Sam Revitch <samr7@cs.washington.edu>"); MODULE_DESCRIPTION("GSPCA/SQ930x USB Camera Driver"); MODULE_LICENSE("GPL"); /* Structure to hold all of our device specific stuff */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ u16 expo; u8 gain; u8 do_ctrl; u8 gpio[2]; u8 sensor; u8 type; #define Generic 0 #define Creative_live_motion 1 }; enum sensors { SENSOR_ICX098BQ, SENSOR_LZ24BP, SENSOR_MI0360, SENSOR_MT9V111, /* = MI360SOC */ SENSOR_OV7660, SENSOR_OV9630, }; static int sd_setexpo(struct gspca_dev *gspca_dev, __s32 val); static int sd_getexpo(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val); static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val); static const struct ctrl sd_ctrls[] = { { { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Exposure", .minimum = 0x0001, .maximum = 0x0fff, .step = 1, #define EXPO_DEF 0x0356 .default_value = EXPO_DEF, }, .set = sd_setexpo, .get = sd_getexpo, }, { { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gain", .minimum = 0x01, .maximum = 0xff, .step = 1, #define GAIN_DEF 0x8d .default_value = GAIN_DEF, }, .set = sd_setgain, .get = sd_getgain, }, }; static struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_SRGGB8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, {640, 480, V4L2_PIX_FMT_SRGGB8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, }; /* sq930x registers */ #define SQ930_CTRL_UCBUS_IO 0x0001 #define SQ930_CTRL_I2C_IO 0x0002 #define SQ930_CTRL_GPIO 0x0005 #define SQ930_CTRL_CAP_START 0x0010 #define SQ930_CTRL_CAP_STOP 0x0011 #define SQ930_CTRL_SET_EXPOSURE 0x001d #define SQ930_CTRL_RESET 0x001e #define SQ930_CTRL_GET_DEV_INFO 0x001f /* gpio 1 (8..15) */ #define SQ930_GPIO_DFL_I2C_SDA 0x0001 #define SQ930_GPIO_DFL_I2C_SCL 0x0002 #define SQ930_GPIO_RSTBAR 0x0004 #define SQ930_GPIO_EXTRA1 0x0040 #define SQ930_GPIO_EXTRA2 0x0080 /* gpio 3 (24..31) */ #define SQ930_GPIO_POWER 0x0200 #define SQ930_GPIO_DFL_LED 0x1000 struct ucbus_write_cmd { u16 bw_addr; u8 bw_data; }; struct i2c_write_cmd { u8 reg; u16 val; }; static const struct ucbus_write_cmd icx098bq_start_0[] = { {0x0354, 0x00}, {0x03fa, 0x00}, {0xf800, 0x02}, {0xf801, 0xce}, {0xf802, 0xc1}, {0xf804, 0x00}, {0xf808, 0x00}, {0xf809, 0x0e}, {0xf80a, 0x01}, {0xf80b, 0xee}, {0xf807, 0x60}, {0xf80c, 0x02}, {0xf80d, 0xf0}, {0xf80e, 0x03}, {0xf80f, 0x0a}, {0xf81c, 0x02}, {0xf81d, 0xf0}, {0xf81e, 0x03}, {0xf81f, 0x0a}, {0xf83a, 0x00}, {0xf83b, 0x10}, {0xf83c, 0x00}, {0xf83d, 0x4e}, {0xf810, 0x04}, {0xf811, 0x00}, {0xf812, 0x02}, {0xf813, 0x10}, {0xf803, 0x00}, {0xf814, 0x01}, {0xf815, 0x18}, {0xf816, 0x00}, {0xf817, 0x48}, {0xf818, 0x00}, {0xf819, 0x25}, {0xf81a, 0x00}, {0xf81b, 0x3c}, {0xf82f, 0x03}, {0xf820, 0xff}, {0xf821, 0x0d}, {0xf822, 0xff}, {0xf823, 0x07}, {0xf824, 0xff}, {0xf825, 0x03}, {0xf826, 0xff}, {0xf827, 0x06}, {0xf828, 0xff}, {0xf829, 0x03}, {0xf82a, 0xff}, {0xf82b, 0x0c}, {0xf82c, 0xfd}, {0xf82d, 0x01}, {0xf82e, 0x00}, {0xf830, 0x00}, {0xf831, 0x47}, {0xf832, 0x00}, {0xf833, 0x00}, {0xf850, 0x00}, {0xf851, 0x00}, {0xf852, 0x00}, {0xf853, 0x24}, {0xf854, 0x00}, {0xf855, 0x18}, {0xf856, 0x00}, {0xf857, 0x3c}, {0xf858, 0x00}, {0xf859, 0x0c}, {0xf85a, 0x00}, {0xf85b, 0x30}, {0xf85c, 0x00}, {0xf85d, 0x0c}, {0xf85e, 0x00}, {0xf85f, 0x30}, {0xf860, 0x00}, {0xf861, 0x48}, {0xf862, 0x01}, {0xf863, 0xdc}, {0xf864, 0xff}, {0xf865, 0x98}, {0xf866, 0xff}, {0xf867, 0xc0}, {0xf868, 0xff}, {0xf869, 0x70}, {0xf86c, 0xff}, {0xf86d, 0x00}, {0xf86a, 0xff}, {0xf86b, 0x48}, {0xf86e, 0xff}, {0xf86f, 0x00}, {0xf870, 0x01}, {0xf871, 0xdb}, {0xf872, 0x01}, {0xf873, 0xfa}, {0xf874, 0x01}, {0xf875, 0xdb}, {0xf876, 0x01}, {0xf877, 0xfa}, {0xf878, 0x0f}, {0xf879, 0x0f}, {0xf87a, 0xff}, {0xf87b, 0xff}, {0xf800, 0x03} }; static const struct ucbus_write_cmd icx098bq_start_1[] = { {0xf5f0, 0x00}, {0xf5f1, 0xcd}, {0xf5f2, 0x80}, {0xf5f3, 0x80}, {0xf5f4, 0xc0}, {0xf5f0, 0x49}, {0xf5f1, 0xcd}, {0xf5f2, 0x80}, {0xf5f3, 0x80}, {0xf5f4, 0xc0}, {0xf5fa, 0x00}, {0xf5f6, 0x00}, {0xf5f7, 0x00}, {0xf5f8, 0x00}, {0xf5f9, 0x00} }; static const struct ucbus_write_cmd icx098bq_start_2[] = { {0xf800, 0x02}, {0xf807, 0xff}, {0xf805, 0x82}, {0xf806, 0x00}, {0xf807, 0x7f}, {0xf800, 0x03}, {0xf800, 0x02}, {0xf807, 0xff}, {0xf805, 0x40}, {0xf806, 0x00}, {0xf807, 0x7f}, {0xf800, 0x03}, {0xf800, 0x02}, {0xf807, 0xff}, {0xf805, 0xcf}, {0xf806, 0xd0}, {0xf807, 0x7f}, {0xf800, 0x03}, {0xf800, 0x02}, {0xf807, 0xff}, {0xf805, 0x00}, {0xf806, 0x00}, {0xf807, 0x7f}, {0xf800, 0x03} }; static const struct ucbus_write_cmd lz24bp_start_0[] = { {0x0354, 0x00}, {0x03fa, 0x00}, {0xf800, 0x02}, {0xf801, 0xbe}, {0xf802, 0xc6}, {0xf804, 0x00}, {0xf808, 0x00}, {0xf809, 0x06}, {0xf80a, 0x01}, {0xf80b, 0xfe}, {0xf807, 0x84}, {0xf80c, 0x02}, {0xf80d, 0xf7}, {0xf80e, 0x03}, {0xf80f, 0x0b}, {0xf81c, 0x00}, {0xf81d, 0x49}, {0xf81e, 0x03}, {0xf81f, 0x0b}, {0xf83a, 0x00}, {0xf83b, 0x01}, {0xf83c, 0x00}, {0xf83d, 0x6b}, {0xf810, 0x03}, {0xf811, 0x10}, {0xf812, 0x02}, {0xf813, 0x6f}, {0xf803, 0x00}, {0xf814, 0x00}, {0xf815, 0x44}, {0xf816, 0x00}, {0xf817, 0x48}, {0xf818, 0x00}, {0xf819, 0x25}, {0xf81a, 0x00}, {0xf81b, 0x3c}, {0xf82f, 0x03}, {0xf820, 0xff}, {0xf821, 0x0d}, {0xf822, 0xff}, {0xf823, 0x07}, {0xf824, 0xfd}, {0xf825, 0x07}, {0xf826, 0xf0}, {0xf827, 0x0c}, {0xf828, 0xff}, {0xf829, 0x03}, {0xf82a, 0xff}, {0xf82b, 0x0c}, {0xf82c, 0xfc}, {0xf82d, 0x01}, {0xf82e, 0x00}, {0xf830, 0x00}, {0xf831, 0x47}, {0xf832, 0x00}, {0xf833, 0x00}, {0xf850, 0x00}, {0xf851, 0x00}, {0xf852, 0x00}, {0xf853, 0x24}, {0xf854, 0x00}, {0xf855, 0x0c}, {0xf856, 0x00}, {0xf857, 0x30}, {0xf858, 0x00}, {0xf859, 0x18}, {0xf85a, 0x00}, {0xf85b, 0x3c}, {0xf85c, 0x00}, {0xf85d, 0x18}, {0xf85e, 0x00}, {0xf85f, 0x3c}, {0xf860, 0xff}, {0xf861, 0x37}, {0xf862, 0xff}, {0xf863, 0x1d}, {0xf864, 0xff}, {0xf865, 0x98}, {0xf866, 0xff}, {0xf867, 0xc0}, {0xf868, 0x00}, {0xf869, 0x37}, {0xf86c, 0x02}, {0xf86d, 0x1d}, {0xf86a, 0x00}, {0xf86b, 0x37}, {0xf86e, 0x02}, {0xf86f, 0x1d}, {0xf870, 0x01}, {0xf871, 0xc6}, {0xf872, 0x02}, {0xf873, 0x04}, {0xf874, 0x01}, {0xf875, 0xc6}, {0xf876, 0x02}, {0xf877, 0x04}, {0xf878, 0x0f}, {0xf879, 0x0f}, {0xf87a, 0xff}, {0xf87b, 0xff}, {0xf800, 0x03} }; static const struct ucbus_write_cmd lz24bp_start_1_gen[] = { {0xf5f0, 0x00}, {0xf5f1, 0xff}, {0xf5f2, 0x80}, {0xf5f3, 0x80}, {0xf5f4, 0xb3}, {0xf5f0, 0x40}, {0xf5f1, 0xff}, {0xf5f2, 0x80}, {0xf5f3, 0x80}, {0xf5f4, 0xb3}, {0xf5fa, 0x00}, {0xf5f6, 0x00}, {0xf5f7, 0x00}, {0xf5f8, 0x00}, {0xf5f9, 0x00} }; static const struct ucbus_write_cmd lz24bp_start_1_clm[] = { {0xf5f0, 0x00}, {0xf5f1, 0xff}, {0xf5f2, 0x88}, {0xf5f3, 0x88}, {0xf5f4, 0xc0}, {0xf5f0, 0x40}, {0xf5f1, 0xff}, {0xf5f2, 0x88}, {0xf5f3, 0x88}, {0xf5f4, 0xc0}, {0xf5fa, 0x00}, {0xf5f6, 0x00}, {0xf5f7, 0x00}, {0xf5f8, 0x00}, {0xf5f9, 0x00} }; static const struct ucbus_write_cmd lz24bp_start_2[] = { {0xf800, 0x02}, {0xf807, 0xff}, {0xf805, 0x80}, {0xf806, 0x00}, {0xf807, 0x7f}, {0xf800, 0x03}, {0xf800, 0x02}, {0xf807, 0xff}, {0xf805, 0x4e}, {0xf806, 0x00}, {0xf807, 0x7f}, {0xf800, 0x03}, {0xf800, 0x02}, {0xf807, 0xff}, {0xf805, 0xc0}, {0xf806, 0x48}, {0xf807, 0x7f}, {0xf800, 0x03}, {0xf800, 0x02}, {0xf807, 0xff}, {0xf805, 0x00}, {0xf806, 0x00}, {0xf807, 0x7f}, {0xf800, 0x03} }; static const struct ucbus_write_cmd mi0360_start_0[] = { {0x0354, 0x00}, {0x03fa, 0x00}, {0xf332, 0xcc}, {0xf333, 0xcc}, {0xf334, 0xcc}, {0xf335, 0xcc}, {0xf33f, 0x00} }; static const struct i2c_write_cmd mi0360_init_23[] = { {0x30, 0x0040}, /* reserved - def 0x0005 */ {0x31, 0x0000}, /* reserved - def 0x002a */ {0x34, 0x0100}, /* reserved - def 0x0100 */ {0x3d, 0x068f}, /* reserved - def 0x068f */ }; static const struct i2c_write_cmd mi0360_init_24[] = { {0x03, 0x01e5}, /* window height */ {0x04, 0x0285}, /* window width */ }; static const struct i2c_write_cmd mi0360_init_25[] = { {0x35, 0x0020}, /* global gain */ {0x2b, 0x0020}, /* green1 gain */ {0x2c, 0x002a}, /* blue gain */ {0x2d, 0x0028}, /* red gain */ {0x2e, 0x0020}, /* green2 gain */ }; static const struct ucbus_write_cmd mi0360_start_1[] = { {0xf5f0, 0x11}, {0xf5f1, 0x99}, {0xf5f2, 0x80}, {0xf5f3, 0x80}, {0xf5f4, 0xa6}, {0xf5f0, 0x51}, {0xf5f1, 0x99}, {0xf5f2, 0x80}, {0xf5f3, 0x80}, {0xf5f4, 0xa6}, {0xf5fa, 0x00}, {0xf5f6, 0x00}, {0xf5f7, 0x00}, {0xf5f8, 0x00}, {0xf5f9, 0x00} }; static const struct i2c_write_cmd mi0360_start_2[] = { {0x62, 0x041d}, /* reserved - def 0x0418 */ }; static const struct i2c_write_cmd mi0360_start_3[] = { {0x05, 0x007b}, /* horiz blanking */ }; static const struct i2c_write_cmd mi0360_start_4[] = { {0x05, 0x03f5}, /* horiz blanking */ }; static const struct i2c_write_cmd mt9v111_init_0[] = { {0x01, 0x0001}, /* select IFP/SOC registers */ {0x06, 0x300c}, /* operating mode control */ {0x08, 0xcc00}, /* output format control (RGB) */ {0x01, 0x0004}, /* select sensor core registers */ }; static const struct i2c_write_cmd mt9v111_init_1[] = { {0x03, 0x01e5}, /* window height */ {0x04, 0x0285}, /* window width */ }; static const struct i2c_write_cmd mt9v111_init_2[] = { {0x30, 0x7800}, {0x31, 0x0000}, {0x07, 0x3002}, /* output control */ {0x35, 0x0020}, /* global gain */ {0x2b, 0x0020}, /* green1 gain */ {0x2c, 0x0020}, /* blue gain */ {0x2d, 0x0020}, /* red gain */ {0x2e, 0x0020}, /* green2 gain */ }; static const struct ucbus_write_cmd mt9v111_start_1[] = { {0xf5f0, 0x11}, {0xf5f1, 0x96}, {0xf5f2, 0x80}, {0xf5f3, 0x80}, {0xf5f4, 0xaa}, {0xf5f0, 0x51}, {0xf5f1, 0x96}, {0xf5f2, 0x80}, {0xf5f3, 0x80}, {0xf5f4, 0xaa}, {0xf5fa, 0x00}, {0xf5f6, 0x0a}, {0xf5f7, 0x0a}, {0xf5f8, 0x0a}, {0xf5f9, 0x0a} }; static const struct i2c_write_cmd mt9v111_init_3[] = { {0x62, 0x0405}, }; static const struct i2c_write_cmd mt9v111_init_4[] = { /* {0x05, 0x00ce}, */ {0x05, 0x005d}, /* horizontal blanking */ }; static const struct ucbus_write_cmd ov7660_start_0[] = { {0x0354, 0x00}, {0x03fa, 0x00}, {0xf332, 0x00}, {0xf333, 0xc0}, {0xf334, 0x39}, {0xf335, 0xe7}, {0xf33f, 0x03} }; static const struct ucbus_write_cmd ov9630_start_0[] = { {0x0354, 0x00}, {0x03fa, 0x00}, {0xf332, 0x00}, {0xf333, 0x00}, {0xf334, 0x3e}, {0xf335, 0xf8}, {0xf33f, 0x03} }; /* start parameters indexed by [sensor][mode] */ static const struct cap_s { u8 cc_sizeid; u8 cc_bytes[32]; } capconfig[4][2] = { [SENSOR_ICX098BQ] = { {2, /* Bayer 320x240 */ {0x05, 0x1f, 0x20, 0x0e, 0x00, 0x9f, 0x02, 0xee, 0x01, 0x01, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, {4, /* Bayer 640x480 */ {0x01, 0x1f, 0x20, 0x0e, 0x00, 0x9f, 0x02, 0xee, 0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, }, [SENSOR_LZ24BP] = { {2, /* Bayer 320x240 */ {0x05, 0x22, 0x20, 0x0e, 0x00, 0xa2, 0x02, 0xee, 0x01, 0x01, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, {4, /* Bayer 640x480 */ {0x01, 0x22, 0x20, 0x0e, 0x00, 0xa2, 0x02, 0xee, 0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, }, [SENSOR_MI0360] = { {2, /* Bayer 320x240 */ {0x05, 0x02, 0x20, 0x01, 0x20, 0x82, 0x02, 0xe1, 0x01, 0x01, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, {4, /* Bayer 640x480 */ {0x01, 0x02, 0x20, 0x01, 0x20, 0x82, 0x02, 0xe1, 0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, }, [SENSOR_MT9V111] = { {2, /* Bayer 320x240 */ {0x05, 0x02, 0x20, 0x01, 0x20, 0x82, 0x02, 0xe1, 0x01, 0x01, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, {4, /* Bayer 640x480 */ {0x01, 0x02, 0x20, 0x01, 0x20, 0x82, 0x02, 0xe1, 0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, }, }; struct sensor_s { const char *name; u8 i2c_addr; u8 i2c_dum; u8 gpio[5]; u8 cmd_len; const struct ucbus_write_cmd *cmd; }; static const struct sensor_s sensor_tb[] = { [SENSOR_ICX098BQ] = { "icx098bp", 0x00, 0x00, {0, SQ930_GPIO_DFL_I2C_SDA | SQ930_GPIO_DFL_I2C_SCL, SQ930_GPIO_DFL_I2C_SDA, 0, SQ930_GPIO_RSTBAR }, 8, icx098bq_start_0 }, [SENSOR_LZ24BP] = { "lz24bp", 0x00, 0x00, {0, SQ930_GPIO_DFL_I2C_SDA | SQ930_GPIO_DFL_I2C_SCL, SQ930_GPIO_DFL_I2C_SDA, 0, SQ930_GPIO_RSTBAR }, 8, lz24bp_start_0 }, [SENSOR_MI0360] = { "mi0360", 0x5d, 0x80, {SQ930_GPIO_RSTBAR, SQ930_GPIO_DFL_I2C_SDA | SQ930_GPIO_DFL_I2C_SCL, SQ930_GPIO_DFL_I2C_SDA, 0, 0 }, 7, mi0360_start_0 }, [SENSOR_MT9V111] = { "mt9v111", 0x5c, 0x7f, {SQ930_GPIO_RSTBAR, SQ930_GPIO_DFL_I2C_SDA | SQ930_GPIO_DFL_I2C_SCL, SQ930_GPIO_DFL_I2C_SDA, 0, 0 }, 7, mi0360_start_0 }, [SENSOR_OV7660] = { "ov7660", 0x21, 0x00, {0, SQ930_GPIO_DFL_I2C_SDA | SQ930_GPIO_DFL_I2C_SCL, SQ930_GPIO_DFL_I2C_SDA, 0, SQ930_GPIO_RSTBAR }, 7, ov7660_start_0 }, [SENSOR_OV9630] = { "ov9630", 0x30, 0x00, {0, SQ930_GPIO_DFL_I2C_SDA | SQ930_GPIO_DFL_I2C_SCL, SQ930_GPIO_DFL_I2C_SDA, 0, SQ930_GPIO_RSTBAR }, 7, ov9630_start_0 }, }; static void reg_r(struct gspca_dev *gspca_dev, u16 value, int len) { int ret; if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), 0x0c, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, 0, gspca_dev->usb_buf, len, 500); if (ret < 0) { pr_err("reg_r %04x failed %d\n", value, ret); gspca_dev->usb_err = ret; } } static void reg_w(struct gspca_dev *gspca_dev, u16 value, u16 index) { int ret; if (gspca_dev->usb_err < 0) return; PDEBUG(D_USBO, "reg_w v: %04x i: %04x", value, index); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x0c, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 500); msleep(30); if (ret < 0) { pr_err("reg_w %04x %04x failed %d\n", value, index, ret); gspca_dev->usb_err = ret; } } static void reg_wb(struct gspca_dev *gspca_dev, u16 value, u16 index, const u8 *data, int len) { int ret; if (gspca_dev->usb_err < 0) return; PDEBUG(D_USBO, "reg_wb v: %04x i: %04x %02x...%02x", value, index, *data, data[len - 1]); memcpy(gspca_dev->usb_buf, data, len); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x0c, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, gspca_dev->usb_buf, len, 1000); msleep(30); if (ret < 0) { pr_err("reg_wb %04x %04x failed %d\n", value, index, ret); gspca_dev->usb_err = ret; } } static void i2c_write(struct sd *sd, const struct i2c_write_cmd *cmd, int ncmds) { struct gspca_dev *gspca_dev = &sd->gspca_dev; const struct sensor_s *sensor; u16 val, idx; u8 *buf; int ret; if (gspca_dev->usb_err < 0) return; sensor = &sensor_tb[sd->sensor]; val = (sensor->i2c_addr << 8) | SQ930_CTRL_I2C_IO; idx = (cmd->val & 0xff00) | cmd->reg; buf = gspca_dev->usb_buf; *buf++ = sensor->i2c_dum; *buf++ = cmd->val; while (--ncmds > 0) { cmd++; *buf++ = cmd->reg; *buf++ = cmd->val >> 8; *buf++ = sensor->i2c_dum; *buf++ = cmd->val; } PDEBUG(D_USBO, "i2c_w v: %04x i: %04x %02x...%02x", val, idx, gspca_dev->usb_buf[0], buf[-1]); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x0c, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, val, idx, gspca_dev->usb_buf, buf - gspca_dev->usb_buf, 500); if (ret < 0) { pr_err("i2c_write failed %d\n", ret); gspca_dev->usb_err = ret; } } static void ucbus_write(struct gspca_dev *gspca_dev, const struct ucbus_write_cmd *cmd, int ncmds, int batchsize) { u8 *buf; u16 val, idx; int len, ret; if (gspca_dev->usb_err < 0) return; #ifdef GSPCA_DEBUG if ((batchsize - 1) * 3 > USB_BUF_SZ) { pr_err("Bug: usb_buf overflow\n"); gspca_dev->usb_err = -ENOMEM; return; } #endif for (;;) { len = ncmds; if (len > batchsize) len = batchsize; ncmds -= len; val = (cmd->bw_addr << 8) | SQ930_CTRL_UCBUS_IO; idx = (cmd->bw_data << 8) | (cmd->bw_addr >> 8); buf = gspca_dev->usb_buf; while (--len > 0) { cmd++; *buf++ = cmd->bw_addr; *buf++ = cmd->bw_addr >> 8; *buf++ = cmd->bw_data; } if (buf != gspca_dev->usb_buf) PDEBUG(D_USBO, "ucbus v: %04x i: %04x %02x...%02x", val, idx, gspca_dev->usb_buf[0], buf[-1]); else PDEBUG(D_USBO, "ucbus v: %04x i: %04x", val, idx); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x0c, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, val, idx, gspca_dev->usb_buf, buf - gspca_dev->usb_buf, 500); if (ret < 0) { pr_err("ucbus_write failed %d\n", ret); gspca_dev->usb_err = ret; return; } msleep(30); if (ncmds <= 0) break; cmd++; } } static void gpio_set(struct sd *sd, u16 val, u16 mask) { struct gspca_dev *gspca_dev = &sd->gspca_dev; if (mask & 0x00ff) { sd->gpio[0] &= ~mask; sd->gpio[0] |= val; reg_w(gspca_dev, 0x0100 | SQ930_CTRL_GPIO, ~sd->gpio[0] << 8); } mask >>= 8; val >>= 8; if (mask) { sd->gpio[1] &= ~mask; sd->gpio[1] |= val; reg_w(gspca_dev, 0x0300 | SQ930_CTRL_GPIO, ~sd->gpio[1] << 8); } } static void gpio_init(struct sd *sd, const u8 *gpio) { gpio_set(sd, *gpio++, 0x000f); gpio_set(sd, *gpio++, 0x000f); gpio_set(sd, *gpio++, 0x000f); gpio_set(sd, *gpio++, 0x000f); gpio_set(sd, *gpio, 0x000f); } static void bridge_init(struct sd *sd) { static const struct ucbus_write_cmd clkfreq_cmd = { 0xf031, 0 /* SQ930_CLKFREQ_60MHZ */ }; ucbus_write(&sd->gspca_dev, &clkfreq_cmd, 1, 1); gpio_set(sd, SQ930_GPIO_POWER, 0xff00); } static void cmos_probe(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i; const struct sensor_s *sensor; static const u8 probe_order[] = { /* SENSOR_LZ24BP, (tested as ccd) */ SENSOR_OV9630, SENSOR_MI0360, SENSOR_OV7660, SENSOR_MT9V111, }; for (i = 0; i < ARRAY_SIZE(probe_order); i++) { sensor = &sensor_tb[probe_order[i]]; ucbus_write(&sd->gspca_dev, sensor->cmd, sensor->cmd_len, 8); gpio_init(sd, sensor->gpio); msleep(100); reg_r(gspca_dev, (sensor->i2c_addr << 8) | 0x001c, 1); msleep(100); if (gspca_dev->usb_buf[0] != 0) break; } if (i >= ARRAY_SIZE(probe_order)) { pr_err("Unknown sensor\n"); gspca_dev->usb_err = -EINVAL; return; } sd->sensor = probe_order[i]; switch (sd->sensor) { case SENSOR_OV7660: case SENSOR_OV9630: pr_err("Sensor %s not yet treated\n", sensor_tb[sd->sensor].name); gspca_dev->usb_err = -EINVAL; break; } } static void mt9v111_init(struct gspca_dev *gspca_dev) { int i, nwait; static const u8 cmd_001b[] = { 0x00, 0x3b, 0xf6, 0x01, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 cmd_011b[][7] = { {0x10, 0x01, 0x66, 0x08, 0x00, 0x00, 0x00}, {0x01, 0x00, 0x1a, 0x04, 0x00, 0x00, 0x00}, {0x20, 0x00, 0x10, 0x04, 0x00, 0x00, 0x00}, {0x02, 0x01, 0xae, 0x01, 0x00, 0x00, 0x00}, }; reg_wb(gspca_dev, 0x001b, 0x0000, cmd_001b, sizeof cmd_001b); for (i = 0; i < ARRAY_SIZE(cmd_011b); i++) { reg_wb(gspca_dev, 0x001b, 0x0000, cmd_011b[i], ARRAY_SIZE(cmd_011b[0])); msleep(400); nwait = 20; for (;;) { reg_r(gspca_dev, 0x031b, 1); if (gspca_dev->usb_buf[0] == 0 || gspca_dev->usb_err != 0) break; if (--nwait < 0) { PDEBUG(D_PROBE, "mt9v111_init timeout"); gspca_dev->usb_err = -ETIME; return; } msleep(50); } } } static void global_init(struct sd *sd, int first_time) { switch (sd->sensor) { case SENSOR_ICX098BQ: if (first_time) ucbus_write(&sd->gspca_dev, icx098bq_start_0, 8, 8); gpio_init(sd, sensor_tb[sd->sensor].gpio); break; case SENSOR_LZ24BP: if (sd->type != Creative_live_motion) gpio_set(sd, SQ930_GPIO_EXTRA1, 0x00ff); else gpio_set(sd, 0, 0x00ff); msleep(50); if (first_time) ucbus_write(&sd->gspca_dev, lz24bp_start_0, 8, 8); gpio_init(sd, sensor_tb[sd->sensor].gpio); break; case SENSOR_MI0360: if (first_time) ucbus_write(&sd->gspca_dev, mi0360_start_0, ARRAY_SIZE(mi0360_start_0), 8); gpio_init(sd, sensor_tb[sd->sensor].gpio); gpio_set(sd, SQ930_GPIO_EXTRA2, SQ930_GPIO_EXTRA2); break; default: /* case SENSOR_MT9V111: */ if (first_time) mt9v111_init(&sd->gspca_dev); else gpio_init(sd, sensor_tb[sd->sensor].gpio); break; } } static void lz24bp_ppl(struct sd *sd, u16 ppl) { struct ucbus_write_cmd cmds[2] = { {0xf810, ppl >> 8}, {0xf811, ppl} }; ucbus_write(&sd->gspca_dev, cmds, ARRAY_SIZE(cmds), 2); } static void setexposure(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i, integclks, intstartclk, frameclks, min_frclk; const struct sensor_s *sensor; u16 cmd; u8 buf[15]; integclks = sd->expo; i = 0; cmd = SQ930_CTRL_SET_EXPOSURE; switch (sd->sensor) { case SENSOR_ICX098BQ: /* ccd */ case SENSOR_LZ24BP: min_frclk = sd->sensor == SENSOR_ICX098BQ ? 0x210 : 0x26f; if (integclks >= min_frclk) { intstartclk = 0; frameclks = integclks; } else { intstartclk = min_frclk - integclks; frameclks = min_frclk; } buf[i++] = intstartclk >> 8; buf[i++] = intstartclk; buf[i++] = frameclks >> 8; buf[i++] = frameclks; buf[i++] = sd->gain; break; default: /* cmos */ /* case SENSOR_MI0360: */ /* case SENSOR_MT9V111: */ cmd |= 0x0100; sensor = &sensor_tb[sd->sensor]; buf[i++] = sensor->i2c_addr; /* i2c_slave_addr */ buf[i++] = 0x08; /* 2 * ni2c */ buf[i++] = 0x09; /* reg = shutter width */ buf[i++] = integclks >> 8; /* val H */ buf[i++] = sensor->i2c_dum; buf[i++] = integclks; /* val L */ buf[i++] = 0x35; /* reg = global gain */ buf[i++] = 0x00; /* val H */ buf[i++] = sensor->i2c_dum; buf[i++] = 0x80 + sd->gain / 2; /* val L */ buf[i++] = 0x00; buf[i++] = 0x00; buf[i++] = 0x00; buf[i++] = 0x00; buf[i++] = 0x83; break; } reg_wb(gspca_dev, cmd, 0, buf, i); } /* This function is called at probe time just before sd_init */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam = &gspca_dev->cam; sd->sensor = id->driver_info >> 8; sd->type = id->driver_info; cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); cam->bulk = 1; sd->gain = GAIN_DEF; sd->expo = EXPO_DEF; return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; sd->gpio[0] = sd->gpio[1] = 0xff; /* force gpio rewrite */ /*fixme: is this needed for icx098bp and mi0360? if (sd->sensor != SENSOR_LZ24BP) reg_w(gspca_dev, SQ930_CTRL_RESET, 0x0000); */ reg_r(gspca_dev, SQ930_CTRL_GET_DEV_INFO, 8); if (gspca_dev->usb_err < 0) return gspca_dev->usb_err; /* it returns: * 03 00 12 93 0b f6 c9 00 live! ultra * 03 00 07 93 0b f6 ca 00 live! ultra for notebook * 03 00 12 93 0b fe c8 00 Trust WB-3500T * 02 00 06 93 0b fe c8 00 Joy-IT 318S * 03 00 12 93 0b f6 cf 00 icam tracer - sensor icx098bq * 02 00 12 93 0b fe cf 00 ProQ Motion Webcam * * byte * 0: 02 = usb 1.0 (12Mbit) / 03 = usb2.0 (480Mbit) * 1: 00 * 2: 06 / 07 / 12 = mode webcam? firmware?? * 3: 93 chip = 930b (930b or 930c) * 4: 0b * 5: f6 = cdd (icx098bq, lz24bp) / fe or de = cmos (i2c) (other sensors) * 6: c8 / c9 / ca / cf = mode webcam?, sensor? webcam? * 7: 00 */ PDEBUG(D_PROBE, "info: %02x %02x %02x %02x %02x %02x %02x %02x", gspca_dev->usb_buf[0], gspca_dev->usb_buf[1], gspca_dev->usb_buf[2], gspca_dev->usb_buf[3], gspca_dev->usb_buf[4], gspca_dev->usb_buf[5], gspca_dev->usb_buf[6], gspca_dev->usb_buf[7]); bridge_init(sd); if (sd->sensor == SENSOR_MI0360) { /* no sensor probe for icam tracer */ if (gspca_dev->usb_buf[5] == 0xf6) /* if ccd */ sd->sensor = SENSOR_ICX098BQ; else cmos_probe(gspca_dev); } if (gspca_dev->usb_err >= 0) { PDEBUG(D_PROBE, "Sensor %s", sensor_tb[sd->sensor].name); global_init(sd, 1); } return gspca_dev->usb_err; } /* send the start/stop commands to the webcam */ static void send_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; const struct cap_s *cap; int mode; mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; cap = &capconfig[sd->sensor][mode]; reg_wb(gspca_dev, 0x0900 | SQ930_CTRL_CAP_START, 0x0a00 | cap->cc_sizeid, cap->cc_bytes, 32); } static void send_stop(struct gspca_dev *gspca_dev) { reg_w(gspca_dev, SQ930_CTRL_CAP_STOP, 0); } /* function called at start time before URB creation */ static int sd_isoc_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; gspca_dev->cam.bulk_nurbs = 1; /* there must be one URB only */ sd->do_ctrl = 0; gspca_dev->cam.bulk_size = gspca_dev->width * gspca_dev->height + 8; return 0; } /* start the capture */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int mode; bridge_init(sd); global_init(sd, 0); msleep(100); switch (sd->sensor) { case SENSOR_ICX098BQ: ucbus_write(gspca_dev, icx098bq_start_0, ARRAY_SIZE(icx098bq_start_0), 8); ucbus_write(gspca_dev, icx098bq_start_1, ARRAY_SIZE(icx098bq_start_1), 5); ucbus_write(gspca_dev, icx098bq_start_2, ARRAY_SIZE(icx098bq_start_2), 6); msleep(50); /* 1st start */ send_start(gspca_dev); gpio_set(sd, SQ930_GPIO_EXTRA2 | SQ930_GPIO_RSTBAR, 0x00ff); msleep(70); reg_w(gspca_dev, SQ930_CTRL_CAP_STOP, 0x0000); gpio_set(sd, 0x7f, 0x00ff); /* 2nd start */ send_start(gspca_dev); gpio_set(sd, SQ930_GPIO_EXTRA2 | SQ930_GPIO_RSTBAR, 0x00ff); goto out; case SENSOR_LZ24BP: ucbus_write(gspca_dev, lz24bp_start_0, ARRAY_SIZE(lz24bp_start_0), 8); if (sd->type != Creative_live_motion) ucbus_write(gspca_dev, lz24bp_start_1_gen, ARRAY_SIZE(lz24bp_start_1_gen), 5); else ucbus_write(gspca_dev, lz24bp_start_1_clm, ARRAY_SIZE(lz24bp_start_1_clm), 5); ucbus_write(gspca_dev, lz24bp_start_2, ARRAY_SIZE(lz24bp_start_2), 6); mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; lz24bp_ppl(sd, mode == 1 ? 0x0564 : 0x0310); msleep(10); break; case SENSOR_MI0360: ucbus_write(gspca_dev, mi0360_start_0, ARRAY_SIZE(mi0360_start_0), 8); i2c_write(sd, mi0360_init_23, ARRAY_SIZE(mi0360_init_23)); i2c_write(sd, mi0360_init_24, ARRAY_SIZE(mi0360_init_24)); i2c_write(sd, mi0360_init_25, ARRAY_SIZE(mi0360_init_25)); ucbus_write(gspca_dev, mi0360_start_1, ARRAY_SIZE(mi0360_start_1), 5); i2c_write(sd, mi0360_start_2, ARRAY_SIZE(mi0360_start_2)); i2c_write(sd, mi0360_start_3, ARRAY_SIZE(mi0360_start_3)); /* 1st start */ send_start(gspca_dev); msleep(60); send_stop(gspca_dev); i2c_write(sd, mi0360_start_4, ARRAY_SIZE(mi0360_start_4)); break; default: /* case SENSOR_MT9V111: */ ucbus_write(gspca_dev, mi0360_start_0, ARRAY_SIZE(mi0360_start_0), 8); i2c_write(sd, mt9v111_init_0, ARRAY_SIZE(mt9v111_init_0)); i2c_write(sd, mt9v111_init_1, ARRAY_SIZE(mt9v111_init_1)); i2c_write(sd, mt9v111_init_2, ARRAY_SIZE(mt9v111_init_2)); ucbus_write(gspca_dev, mt9v111_start_1, ARRAY_SIZE(mt9v111_start_1), 5); i2c_write(sd, mt9v111_init_3, ARRAY_SIZE(mt9v111_init_3)); i2c_write(sd, mt9v111_init_4, ARRAY_SIZE(mt9v111_init_4)); break; } send_start(gspca_dev); out: msleep(1000); if (sd->sensor == SENSOR_MT9V111) gpio_set(sd, SQ930_GPIO_DFL_LED, SQ930_GPIO_DFL_LED); sd->do_ctrl = 1; /* set the exposure */ return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_MT9V111) gpio_set(sd, 0, SQ930_GPIO_DFL_LED); send_stop(gspca_dev); } /* function called when the application gets a new frame */ /* It sets the exposure if required and restart the bulk transfer. */ static void sd_dq_callback(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret; if (!sd->do_ctrl || gspca_dev->cam.bulk_nurbs != 0) return; sd->do_ctrl = 0; setexposure(gspca_dev); gspca_dev->cam.bulk_nurbs = 1; ret = usb_submit_urb(gspca_dev->urb[0], GFP_ATOMIC); if (ret < 0) pr_err("sd_dq_callback() err %d\n", ret); /* wait a little time, otherwise the webcam crashes */ msleep(100); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; if (sd->do_ctrl) gspca_dev->cam.bulk_nurbs = 0; gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0); gspca_frame_add(gspca_dev, INTER_PACKET, data, len - 8); gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); } static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->gain = val; if (gspca_dev->streaming) sd->do_ctrl = 1; return 0; } static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->gain; return 0; } static int sd_setexpo(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->expo = val; if (gspca_dev->streaming) sd->do_ctrl = 1; return 0; } static int sd_getexpo(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->expo; return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .ctrls = sd_ctrls, .nctrls = ARRAY_SIZE(sd_ctrls), .config = sd_config, .init = sd_init, .isoc_init = sd_isoc_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .dq_callback = sd_dq_callback, }; /* Table of supported USB devices */ #define ST(sensor, type) \ .driver_info = (SENSOR_ ## sensor << 8) \ | (type) static const struct usb_device_id device_table[] = { {USB_DEVICE(0x041e, 0x4038), ST(MI0360, 0)}, {USB_DEVICE(0x041e, 0x403c), ST(LZ24BP, 0)}, {USB_DEVICE(0x041e, 0x403d), ST(LZ24BP, 0)}, {USB_DEVICE(0x041e, 0x4041), ST(LZ24BP, Creative_live_motion)}, {USB_DEVICE(0x2770, 0x930b), ST(MI0360, 0)}, {USB_DEVICE(0x2770, 0x930c), ST(MI0360, 0)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
cphelps76/DEMENTED_kernel_grouper
drivers/target/iscsi/iscsi_target_seq_pdu_list.c
5469
16996
/******************************************************************************* * This file contains main functions related to iSCSI DataSequenceInOrder=No * and DataPDUInOrder=No. * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. * * Licensed to the Linux Foundation under the General Public License (GPL) version 2. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. ******************************************************************************/ #include <linux/slab.h> #include <linux/random.h> #include "iscsi_target_core.h" #include "iscsi_target_util.h" #include "iscsi_target_seq_pdu_list.h" #define OFFLOAD_BUF_SIZE 32768 void iscsit_dump_seq_list(struct iscsi_cmd *cmd) { int i; struct iscsi_seq *seq; pr_debug("Dumping Sequence List for ITT: 0x%08x:\n", cmd->init_task_tag); for (i = 0; i < cmd->seq_count; i++) { seq = &cmd->seq_list[i]; pr_debug("i: %d, pdu_start: %d, pdu_count: %d," " offset: %d, xfer_len: %d, seq_send_order: %d," " seq_no: %d\n", i, seq->pdu_start, seq->pdu_count, seq->offset, seq->xfer_len, seq->seq_send_order, seq->seq_no); } } void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) { int i; struct iscsi_pdu *pdu; pr_debug("Dumping PDU List for ITT: 0x%08x:\n", cmd->init_task_tag); for (i = 0; i < cmd->pdu_count; i++) { pdu = &cmd->pdu_list[i]; pr_debug("i: %d, offset: %d, length: %d," " pdu_send_order: %d, seq_no: %d\n", i, pdu->offset, pdu->length, pdu->pdu_send_order, pdu->seq_no); } } static void iscsit_ordered_seq_lists( struct iscsi_cmd *cmd, u8 type) { u32 i, seq_count = 0; for (i = 0; i < cmd->seq_count; i++) { if (cmd->seq_list[i].type != SEQTYPE_NORMAL) continue; cmd->seq_list[i].seq_send_order = seq_count++; } } static void iscsit_ordered_pdu_lists( struct iscsi_cmd *cmd, u8 type) { u32 i, pdu_send_order = 0, seq_no = 0; for (i = 0; i < cmd->pdu_count; i++) { redo: if (cmd->pdu_list[i].seq_no == seq_no) { cmd->pdu_list[i].pdu_send_order = pdu_send_order++; continue; } seq_no++; pdu_send_order = 0; goto redo; } } /* * Generate count random values into array. * Use 0x80000000 to mark generates valued in array[]. */ static void iscsit_create_random_array(u32 *array, u32 count) { int i, j, k; if (count == 1) { array[0] = 0; return; } for (i = 0; i < count; i++) { redo: get_random_bytes(&j, sizeof(u32)); j = (1 + (int) (9999 + 1) - j) % count; for (k = 0; k < i + 1; k++) { j |= 0x80000000; if ((array[k] & 0x80000000) && (array[k] == j)) goto redo; } array[i] = j; } for (i = 0; i < count; i++) array[i] &= ~0x80000000; } static int iscsit_randomize_pdu_lists( struct iscsi_cmd *cmd, u8 type) { int i = 0; u32 *array, pdu_count, seq_count = 0, seq_no = 0, seq_offset = 0; for (pdu_count = 0; pdu_count < cmd->pdu_count; pdu_count++) { redo: if (cmd->pdu_list[pdu_count].seq_no == seq_no) { seq_count++; continue; } array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL); if (!array) { pr_err("Unable to allocate memory" " for random array.\n"); return -1; } iscsit_create_random_array(array, seq_count); for (i = 0; i < seq_count; i++) cmd->pdu_list[seq_offset+i].pdu_send_order = array[i]; kfree(array); seq_offset += seq_count; seq_count = 0; seq_no++; goto redo; } if (seq_count) { array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL); if (!array) { pr_err("Unable to allocate memory for" " random array.\n"); return -1; } iscsit_create_random_array(array, seq_count); for (i = 0; i < seq_count; i++) cmd->pdu_list[seq_offset+i].pdu_send_order = array[i]; kfree(array); } return 0; } static int iscsit_randomize_seq_lists( struct iscsi_cmd *cmd, u8 type) { int i, j = 0; u32 *array, seq_count = cmd->seq_count; if ((type == PDULIST_IMMEDIATE) || (type == PDULIST_UNSOLICITED)) seq_count--; else if (type == PDULIST_IMMEDIATE_AND_UNSOLICITED) seq_count -= 2; if (!seq_count) return 0; array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL); if (!array) { pr_err("Unable to allocate memory for random array.\n"); return -1; } iscsit_create_random_array(array, seq_count); for (i = 0; i < cmd->seq_count; i++) { if (cmd->seq_list[i].type != SEQTYPE_NORMAL) continue; cmd->seq_list[i].seq_send_order = array[j++]; } kfree(array); return 0; } static void iscsit_determine_counts_for_list( struct iscsi_cmd *cmd, struct iscsi_build_list *bl, u32 *seq_count, u32 *pdu_count) { int check_immediate = 0; u32 burstlength = 0, offset = 0; u32 unsolicited_data_length = 0; struct iscsi_conn *conn = cmd->conn; if ((bl->type == PDULIST_IMMEDIATE) || (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED)) check_immediate = 1; if ((bl->type == PDULIST_UNSOLICITED) || (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED)) unsolicited_data_length = (cmd->data_length > conn->sess->sess_ops->FirstBurstLength) ? conn->sess->sess_ops->FirstBurstLength : cmd->data_length; while (offset < cmd->data_length) { *pdu_count += 1; if (check_immediate) { check_immediate = 0; offset += bl->immediate_data_length; *seq_count += 1; if (unsolicited_data_length) unsolicited_data_length -= bl->immediate_data_length; continue; } if (unsolicited_data_length > 0) { if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= cmd->data_length) { unsolicited_data_length -= (cmd->data_length - offset); offset += (cmd->data_length - offset); continue; } if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= conn->sess->sess_ops->FirstBurstLength) { unsolicited_data_length -= (conn->sess->sess_ops->FirstBurstLength - offset); offset += (conn->sess->sess_ops->FirstBurstLength - offset); burstlength = 0; *seq_count += 1; continue; } offset += conn->conn_ops->MaxRecvDataSegmentLength; unsolicited_data_length -= conn->conn_ops->MaxRecvDataSegmentLength; continue; } if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= cmd->data_length) { offset += (cmd->data_length - offset); continue; } if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >= conn->sess->sess_ops->MaxBurstLength) { offset += (conn->sess->sess_ops->MaxBurstLength - burstlength); burstlength = 0; *seq_count += 1; continue; } burstlength += conn->conn_ops->MaxRecvDataSegmentLength; offset += conn->conn_ops->MaxRecvDataSegmentLength; } } /* * Builds PDU and/or Sequence list, called while DataSequenceInOrder=No * and DataPDUInOrder=No. */ static int iscsit_build_pdu_and_seq_list( struct iscsi_cmd *cmd, struct iscsi_build_list *bl) { int check_immediate = 0, datapduinorder, datasequenceinorder; u32 burstlength = 0, offset = 0, i = 0; u32 pdu_count = 0, seq_no = 0, unsolicited_data_length = 0; struct iscsi_conn *conn = cmd->conn; struct iscsi_pdu *pdu = cmd->pdu_list; struct iscsi_seq *seq = cmd->seq_list; datapduinorder = conn->sess->sess_ops->DataPDUInOrder; datasequenceinorder = conn->sess->sess_ops->DataSequenceInOrder; if ((bl->type == PDULIST_IMMEDIATE) || (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED)) check_immediate = 1; if ((bl->type == PDULIST_UNSOLICITED) || (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED)) unsolicited_data_length = (cmd->data_length > conn->sess->sess_ops->FirstBurstLength) ? conn->sess->sess_ops->FirstBurstLength : cmd->data_length; while (offset < cmd->data_length) { pdu_count++; if (!datapduinorder) { pdu[i].offset = offset; pdu[i].seq_no = seq_no; } if (!datasequenceinorder && (pdu_count == 1)) { seq[seq_no].pdu_start = i; seq[seq_no].seq_no = seq_no; seq[seq_no].offset = offset; seq[seq_no].orig_offset = offset; } if (check_immediate) { check_immediate = 0; if (!datapduinorder) { pdu[i].type = PDUTYPE_IMMEDIATE; pdu[i++].length = bl->immediate_data_length; } if (!datasequenceinorder) { seq[seq_no].type = SEQTYPE_IMMEDIATE; seq[seq_no].pdu_count = 1; seq[seq_no].xfer_len = bl->immediate_data_length; } offset += bl->immediate_data_length; pdu_count = 0; seq_no++; if (unsolicited_data_length) unsolicited_data_length -= bl->immediate_data_length; continue; } if (unsolicited_data_length > 0) { if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= cmd->data_length) { if (!datapduinorder) { pdu[i].type = PDUTYPE_UNSOLICITED; pdu[i].length = (cmd->data_length - offset); } if (!datasequenceinorder) { seq[seq_no].type = SEQTYPE_UNSOLICITED; seq[seq_no].pdu_count = pdu_count; seq[seq_no].xfer_len = (burstlength + (cmd->data_length - offset)); } unsolicited_data_length -= (cmd->data_length - offset); offset += (cmd->data_length - offset); continue; } if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= conn->sess->sess_ops->FirstBurstLength) { if (!datapduinorder) { pdu[i].type = PDUTYPE_UNSOLICITED; pdu[i++].length = (conn->sess->sess_ops->FirstBurstLength - offset); } if (!datasequenceinorder) { seq[seq_no].type = SEQTYPE_UNSOLICITED; seq[seq_no].pdu_count = pdu_count; seq[seq_no].xfer_len = (burstlength + (conn->sess->sess_ops->FirstBurstLength - offset)); } unsolicited_data_length -= (conn->sess->sess_ops->FirstBurstLength - offset); offset += (conn->sess->sess_ops->FirstBurstLength - offset); burstlength = 0; pdu_count = 0; seq_no++; continue; } if (!datapduinorder) { pdu[i].type = PDUTYPE_UNSOLICITED; pdu[i++].length = conn->conn_ops->MaxRecvDataSegmentLength; } burstlength += conn->conn_ops->MaxRecvDataSegmentLength; offset += conn->conn_ops->MaxRecvDataSegmentLength; unsolicited_data_length -= conn->conn_ops->MaxRecvDataSegmentLength; continue; } if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >= cmd->data_length) { if (!datapduinorder) { pdu[i].type = PDUTYPE_NORMAL; pdu[i].length = (cmd->data_length - offset); } if (!datasequenceinorder) { seq[seq_no].type = SEQTYPE_NORMAL; seq[seq_no].pdu_count = pdu_count; seq[seq_no].xfer_len = (burstlength + (cmd->data_length - offset)); } offset += (cmd->data_length - offset); continue; } if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >= conn->sess->sess_ops->MaxBurstLength) { if (!datapduinorder) { pdu[i].type = PDUTYPE_NORMAL; pdu[i++].length = (conn->sess->sess_ops->MaxBurstLength - burstlength); } if (!datasequenceinorder) { seq[seq_no].type = SEQTYPE_NORMAL; seq[seq_no].pdu_count = pdu_count; seq[seq_no].xfer_len = (burstlength + (conn->sess->sess_ops->MaxBurstLength - burstlength)); } offset += (conn->sess->sess_ops->MaxBurstLength - burstlength); burstlength = 0; pdu_count = 0; seq_no++; continue; } if (!datapduinorder) { pdu[i].type = PDUTYPE_NORMAL; pdu[i++].length = conn->conn_ops->MaxRecvDataSegmentLength; } burstlength += conn->conn_ops->MaxRecvDataSegmentLength; offset += conn->conn_ops->MaxRecvDataSegmentLength; } if (!datasequenceinorder) { if (bl->data_direction & ISCSI_PDU_WRITE) { if (bl->randomize & RANDOM_R2T_OFFSETS) { if (iscsit_randomize_seq_lists(cmd, bl->type) < 0) return -1; } else iscsit_ordered_seq_lists(cmd, bl->type); } else if (bl->data_direction & ISCSI_PDU_READ) { if (bl->randomize & RANDOM_DATAIN_SEQ_OFFSETS) { if (iscsit_randomize_seq_lists(cmd, bl->type) < 0) return -1; } else iscsit_ordered_seq_lists(cmd, bl->type); } #if 0 iscsit_dump_seq_list(cmd); #endif } if (!datapduinorder) { if (bl->data_direction & ISCSI_PDU_WRITE) { if (bl->randomize & RANDOM_DATAOUT_PDU_OFFSETS) { if (iscsit_randomize_pdu_lists(cmd, bl->type) < 0) return -1; } else iscsit_ordered_pdu_lists(cmd, bl->type); } else if (bl->data_direction & ISCSI_PDU_READ) { if (bl->randomize & RANDOM_DATAIN_PDU_OFFSETS) { if (iscsit_randomize_pdu_lists(cmd, bl->type) < 0) return -1; } else iscsit_ordered_pdu_lists(cmd, bl->type); } #if 0 iscsit_dump_pdu_list(cmd); #endif } return 0; } /* * Only called while DataSequenceInOrder=No or DataPDUInOrder=No. */ int iscsit_do_build_list( struct iscsi_cmd *cmd, struct iscsi_build_list *bl) { u32 pdu_count = 0, seq_count = 1; struct iscsi_conn *conn = cmd->conn; struct iscsi_pdu *pdu = NULL; struct iscsi_seq *seq = NULL; iscsit_determine_counts_for_list(cmd, bl, &seq_count, &pdu_count); if (!conn->sess->sess_ops->DataSequenceInOrder) { seq = kzalloc(seq_count * sizeof(struct iscsi_seq), GFP_ATOMIC); if (!seq) { pr_err("Unable to allocate struct iscsi_seq list\n"); return -1; } cmd->seq_list = seq; cmd->seq_count = seq_count; } if (!conn->sess->sess_ops->DataPDUInOrder) { pdu = kzalloc(pdu_count * sizeof(struct iscsi_pdu), GFP_ATOMIC); if (!pdu) { pr_err("Unable to allocate struct iscsi_pdu list.\n"); kfree(seq); return -1; } cmd->pdu_list = pdu; cmd->pdu_count = pdu_count; } return iscsit_build_pdu_and_seq_list(cmd, bl); } struct iscsi_pdu *iscsit_get_pdu_holder( struct iscsi_cmd *cmd, u32 offset, u32 length) { u32 i; struct iscsi_pdu *pdu = NULL; if (!cmd->pdu_list) { pr_err("struct iscsi_cmd->pdu_list is NULL!\n"); return NULL; } pdu = &cmd->pdu_list[0]; for (i = 0; i < cmd->pdu_count; i++) if ((pdu[i].offset == offset) && (pdu[i].length == length)) return &pdu[i]; pr_err("Unable to locate PDU holder for ITT: 0x%08x, Offset:" " %u, Length: %u\n", cmd->init_task_tag, offset, length); return NULL; } struct iscsi_pdu *iscsit_get_pdu_holder_for_seq( struct iscsi_cmd *cmd, struct iscsi_seq *seq) { u32 i; struct iscsi_conn *conn = cmd->conn; struct iscsi_pdu *pdu = NULL; if (!cmd->pdu_list) { pr_err("struct iscsi_cmd->pdu_list is NULL!\n"); return NULL; } if (conn->sess->sess_ops->DataSequenceInOrder) { redo: pdu = &cmd->pdu_list[cmd->pdu_start]; for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) { #if 0 pr_debug("pdu[i].seq_no: %d, pdu[i].pdu" "_send_order: %d, pdu[i].offset: %d," " pdu[i].length: %d\n", pdu[i].seq_no, pdu[i].pdu_send_order, pdu[i].offset, pdu[i].length); #endif if (pdu[i].pdu_send_order == cmd->pdu_send_order) { cmd->pdu_send_order++; return &pdu[i]; } } cmd->pdu_start += cmd->pdu_send_order; cmd->pdu_send_order = 0; cmd->seq_no++; if (cmd->pdu_start < cmd->pdu_count) goto redo; pr_err("Command ITT: 0x%08x unable to locate" " struct iscsi_pdu for cmd->pdu_send_order: %u.\n", cmd->init_task_tag, cmd->pdu_send_order); return NULL; } else { if (!seq) { pr_err("struct iscsi_seq is NULL!\n"); return NULL; } #if 0 pr_debug("seq->pdu_start: %d, seq->pdu_count: %d," " seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count, seq->seq_no); #endif pdu = &cmd->pdu_list[seq->pdu_start]; if (seq->pdu_send_order == seq->pdu_count) { pr_err("Command ITT: 0x%08x seq->pdu_send" "_order: %u equals seq->pdu_count: %u\n", cmd->init_task_tag, seq->pdu_send_order, seq->pdu_count); return NULL; } for (i = 0; i < seq->pdu_count; i++) { if (pdu[i].pdu_send_order == seq->pdu_send_order) { seq->pdu_send_order++; return &pdu[i]; } } pr_err("Command ITT: 0x%08x unable to locate iscsi" "_pdu_t for seq->pdu_send_order: %u.\n", cmd->init_task_tag, seq->pdu_send_order); return NULL; } return NULL; } struct iscsi_seq *iscsit_get_seq_holder( struct iscsi_cmd *cmd, u32 offset, u32 length) { u32 i; if (!cmd->seq_list) { pr_err("struct iscsi_cmd->seq_list is NULL!\n"); return NULL; } for (i = 0; i < cmd->seq_count; i++) { #if 0 pr_debug("seq_list[i].orig_offset: %d, seq_list[i]." "xfer_len: %d, seq_list[i].seq_no %u\n", cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len, cmd->seq_list[i].seq_no); #endif if ((cmd->seq_list[i].orig_offset + cmd->seq_list[i].xfer_len) >= (offset + length)) return &cmd->seq_list[i]; } pr_err("Unable to locate Sequence holder for ITT: 0x%08x," " Offset: %u, Length: %u\n", cmd->init_task_tag, offset, length); return NULL; }
gpl-2.0
upndwn4par/android_kernel_htc_msm8974
arch/avr32/kernel/kprobes.c
12125
6189
/* * Kernel Probes (KProbes) * * Copyright (C) 2005-2006 Atmel Corporation * * Based on arch/ppc64/kernel/kprobes.c * Copyright (C) IBM Corporation, 2002, 2004 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kprobes.h> #include <linux/ptrace.h> #include <asm/cacheflush.h> #include <linux/kdebug.h> #include <asm/ocd.h> DEFINE_PER_CPU(struct kprobe *, current_kprobe); static unsigned long kprobe_status; static struct pt_regs jprobe_saved_regs; struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; int __kprobes arch_prepare_kprobe(struct kprobe *p) { int ret = 0; if ((unsigned long)p->addr & 0x01) { printk("Attempt to register kprobe at an unaligned address\n"); ret = -EINVAL; } /* XXX: Might be a good idea to check if p->addr is a valid * kernel address as well... */ if (!ret) { pr_debug("copy kprobe at %p\n", p->addr); memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); p->opcode = *p->addr; } return ret; } void __kprobes arch_arm_kprobe(struct kprobe *p) { pr_debug("arming kprobe at %p\n", p->addr); ocd_enable(NULL); *p->addr = BREAKPOINT_INSTRUCTION; flush_icache_range((unsigned long)p->addr, (unsigned long)p->addr + sizeof(kprobe_opcode_t)); } void __kprobes arch_disarm_kprobe(struct kprobe *p) { pr_debug("disarming kprobe at %p\n", p->addr); ocd_disable(NULL); *p->addr = p->opcode; flush_icache_range((unsigned long)p->addr, (unsigned long)p->addr + sizeof(kprobe_opcode_t)); } static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) { unsigned long dc; pr_debug("preparing to singlestep over %p (PC=%08lx)\n", p->addr, regs->pc); BUG_ON(!(sysreg_read(SR) & SYSREG_BIT(SR_D))); dc = ocd_read(DC); dc |= 1 << OCD_DC_SS_BIT; ocd_write(DC, dc); /* * We must run the instruction from its original location * since it may actually reference PC. * * TODO: Do the instruction replacement directly in icache. */ *p->addr = p->opcode; flush_icache_range((unsigned long)p->addr, (unsigned long)p->addr + sizeof(kprobe_opcode_t)); } static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) { unsigned long dc; pr_debug("resuming execution at PC=%08lx\n", regs->pc); dc = ocd_read(DC); dc &= ~(1 << OCD_DC_SS_BIT); ocd_write(DC, dc); *p->addr = BREAKPOINT_INSTRUCTION; flush_icache_range((unsigned long)p->addr, (unsigned long)p->addr + sizeof(kprobe_opcode_t)); } static void __kprobes set_current_kprobe(struct kprobe *p) { __get_cpu_var(current_kprobe) = p; } static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; void *addr = (void *)regs->pc; int ret = 0; pr_debug("kprobe_handler: kprobe_running=%p\n", kprobe_running()); /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); /* Check that we're not recursing */ if (kprobe_running()) { p = get_kprobe(addr); if (p) { if (kprobe_status == KPROBE_HIT_SS) { printk("FIXME: kprobe hit while single-stepping!\n"); goto no_kprobe; } printk("FIXME: kprobe hit while handling another kprobe\n"); goto no_kprobe; } else { p = kprobe_running(); if (p->break_handler && p->break_handler(p, regs)) goto ss_probe; } /* If it's not ours, can't be delete race, (we hold lock). */ goto no_kprobe; } p = get_kprobe(addr); if (!p) goto no_kprobe; kprobe_status = KPROBE_HIT_ACTIVE; set_current_kprobe(p); if (p->pre_handler && p->pre_handler(p, regs)) /* handler has already set things up, so skip ss setup */ return 1; ss_probe: prepare_singlestep(p, regs); kprobe_status = KPROBE_HIT_SS; return 1; no_kprobe: preempt_enable_no_resched(); return ret; } static int __kprobes post_kprobe_handler(struct pt_regs *regs) { struct kprobe *cur = kprobe_running(); pr_debug("post_kprobe_handler, cur=%p\n", cur); if (!cur) return 0; if (cur->post_handler) { kprobe_status = KPROBE_HIT_SSDONE; cur->post_handler(cur, regs, 0); } resume_execution(cur, regs); reset_current_kprobe(); preempt_enable_no_resched(); return 1; } int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); pr_debug("kprobe_fault_handler: trapnr=%d\n", trapnr); if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) return 1; if (kprobe_status & KPROBE_HIT_SS) { resume_execution(cur, regs); preempt_enable_no_resched(); } return 0; } /* * Wrapper routine to for handling exceptions. */ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; pr_debug("kprobe_exceptions_notify: val=%lu, data=%p\n", val, data); switch (val) { case DIE_BREAKPOINT: if (kprobe_handler(args->regs)) ret = NOTIFY_STOP; break; case DIE_SSTEP: if (post_kprobe_handler(args->regs)) ret = NOTIFY_STOP; break; default: break; } return ret; } int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct jprobe *jp = container_of(p, struct jprobe, kp); memcpy(&jprobe_saved_regs, regs, sizeof(struct pt_regs)); /* * TODO: We should probably save some of the stack here as * well, since gcc may pass arguments on the stack for certain * functions (lots of arguments, large aggregates, varargs) */ /* setup return addr to the jprobe handler routine */ regs->pc = (unsigned long)jp->entry; return 1; } void __kprobes jprobe_return(void) { asm volatile("breakpoint" ::: "memory"); } int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) { /* * FIXME - we should ideally be validating that we got here 'cos * of the "trap" in jprobe_return() above, before restoring the * saved regs... */ memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs)); return 1; } int __init arch_init_kprobes(void) { /* TODO: Register kretprobe trampoline */ return 0; }
gpl-2.0
jeehyn/NewWorld_kernel_ef52
drivers/media/video/sn9c102/sn9c102_tas5110d.c
12893
3625
/*************************************************************************** * Plug-in for TAS5110D image sensor connected to the SN9C1xx PC Camera * * Controllers * * * * Copyright (C) 2007 by Luca Risolia <luca.risolia@studio.unibo.it> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the Free Software * * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ***************************************************************************/ #include "sn9c102_sensor.h" #include "sn9c102_devtable.h" static int tas5110d_init(struct sn9c102_device* cam) { int err; err = sn9c102_write_const_regs(cam, {0x01, 0x01}, {0x04, 0x01}, {0x0a, 0x14}, {0x60, 0x17}, {0x06, 0x18}, {0xfb, 0x19}); err += sn9c102_i2c_write(cam, 0x9a, 0xca); return err; } static int tas5110d_set_crop(struct sn9c102_device* cam, const struct v4l2_rect* rect) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); int err = 0; u8 h_start = (u8)(rect->left - s->cropcap.bounds.left) + 69, v_start = (u8)(rect->top - s->cropcap.bounds.top) + 9; err += sn9c102_write_reg(cam, h_start, 0x12); err += sn9c102_write_reg(cam, v_start, 0x13); err += sn9c102_write_reg(cam, 0x14, 0x1a); err += sn9c102_write_reg(cam, 0x0a, 0x1b); return err; } static int tas5110d_set_pix_format(struct sn9c102_device* cam, const struct v4l2_pix_format* pix) { int err = 0; if (pix->pixelformat == V4L2_PIX_FMT_SN9C10X) err += sn9c102_write_reg(cam, 0x3b, 0x19); else err += sn9c102_write_reg(cam, 0xfb, 0x19); return err; } static const struct sn9c102_sensor tas5110d = { .name = "TAS5110D", .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>", .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102, .sysfs_ops = SN9C102_I2C_WRITE, .frequency = SN9C102_I2C_100KHZ, .interface = SN9C102_I2C_2WIRES, .i2c_slave_id = 0x61, .init = &tas5110d_init, .cropcap = { .bounds = { .left = 0, .top = 0, .width = 352, .height = 288, }, .defrect = { .left = 0, .top = 0, .width = 352, .height = 288, }, }, .set_crop = &tas5110d_set_crop, .pix_format = { .width = 352, .height = 288, .pixelformat = V4L2_PIX_FMT_SBGGR8, .priv = 8, }, .set_pix_format = &tas5110d_set_pix_format }; int sn9c102_probe_tas5110d(struct sn9c102_device* cam) { const struct usb_device_id tas5110d_id_table[] = { { USB_DEVICE(0x0c45, 0x6007), }, { } }; if (!sn9c102_match_id(cam, tas5110d_id_table)) return -ENODEV; sn9c102_attach_sensor(cam, &tas5110d); return 0; }
gpl-2.0
harunjo/galaxsih-kernel-JB-S3
drivers/video/mb862xx/mb862xxfb_accel.c
13917
8449
/* * drivers/mb862xx/mb862xxfb_accel.c * * Fujitsu Carmine/Coral-P(A)/Lime framebuffer driver acceleration support * * (C) 2007 Alexander Shishkin <virtuoso@slind.org> * (C) 2009 Valentin Sitdikov <v.sitdikov@gmail.com> * (C) 2009 Siemens AG * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/fb.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #if defined(CONFIG_OF) #include <linux/of_platform.h> #endif #include "mb862xxfb.h" #include "mb862xx_reg.h" #include "mb862xxfb_accel.h" static void mb862xxfb_write_fifo(u32 count, u32 *data, struct fb_info *info) { struct mb862xxfb_par *par = info->par; static u32 free; u32 total = 0; while (total < count) { if (free) { outreg(geo, GDC_GEO_REG_INPUT_FIFO, data[total]); total++; free--; } else { free = (u32) inreg(draw, GDC_REG_FIFO_COUNT); } } } static void mb86290fb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { __u32 cmd[6]; cmd[0] = (GDC_TYPE_SETREGISTER << 24) | (1 << 16) | GDC_REG_MODE_BITMAP; /* Set raster operation */ cmd[1] = (2 << 7) | (GDC_ROP_COPY << 9); cmd[2] = GDC_TYPE_BLTCOPYP << 24; if (area->sx >= area->dx && area->sy >= area->dy) cmd[2] |= GDC_CMD_BLTCOPY_TOP_LEFT << 16; else if (area->sx >= area->dx && area->sy <= area->dy) cmd[2] |= GDC_CMD_BLTCOPY_BOTTOM_LEFT << 16; else if (area->sx <= area->dx && area->sy >= area->dy) cmd[2] |= GDC_CMD_BLTCOPY_TOP_RIGHT << 16; else cmd[2] |= GDC_CMD_BLTCOPY_BOTTOM_RIGHT << 16; cmd[3] = (area->sy << 16) | area->sx; cmd[4] = (area->dy << 16) | area->dx; cmd[5] = (area->height << 16) | area->width; mb862xxfb_write_fifo(6, cmd, info); } /* * Fill in the cmd array /GDC FIFO commands/ to draw a 1bit image. * Make sure cmd has enough room! */ static void mb86290fb_imageblit1(u32 *cmd, u16 step, u16 dx, u16 dy, u16 width, u16 height, u32 fgcolor, u32 bgcolor, const struct fb_image *image, struct fb_info *info) { int i; unsigned const char *line; u16 bytes; /* set colors and raster operation regs */ cmd[0] = (GDC_TYPE_SETREGISTER << 24) | (1 << 16) | GDC_REG_MODE_BITMAP; /* Set raster operation */ cmd[1] = (2 << 7) | (GDC_ROP_COPY << 9); cmd[2] = (GDC_TYPE_SETCOLORREGISTER << 24) | (GDC_CMD_BODY_FORE_COLOR << 16); cmd[3] = fgcolor; cmd[4] = (GDC_TYPE_SETCOLORREGISTER << 24) | (GDC_CMD_BODY_BACK_COLOR << 16); cmd[5] = bgcolor; i = 0; line = image->data; bytes = (image->width + 7) >> 3; /* and the image */ cmd[6] = (GDC_TYPE_DRAWBITMAPP << 24) | (GDC_CMD_BITMAP << 16) | (2 + (step * height)); cmd[7] = (dy << 16) | dx; cmd[8] = (height << 16) | width; while (i < height) { memcpy(&cmd[9 + i * step], line, step << 2); #ifdef __LITTLE_ENDIAN { int k = 0; for (k = 0; k < step; k++) cmd[9 + i * step + k] = cpu_to_be32(cmd[9 + i * step + k]); } #endif line += bytes; i++; } } /* * Fill in the cmd array /GDC FIFO commands/ to draw a 8bit image. * Make sure cmd has enough room! */ static void mb86290fb_imageblit8(u32 *cmd, u16 step, u16 dx, u16 dy, u16 width, u16 height, u32 fgcolor, u32 bgcolor, const struct fb_image *image, struct fb_info *info) { int i, j; unsigned const char *line, *ptr; u16 bytes; cmd[0] = (GDC_TYPE_DRAWBITMAPP << 24) | (GDC_CMD_BLT_DRAW << 16) | (2 + (height * step)); cmd[1] = (dy << 16) | dx; cmd[2] = (height << 16) | width; i = 0; line = ptr = image->data; bytes = image->width; while (i < height) { ptr = line; for (j = 0; j < step; j++) { cmd[3 + i * step + j] = (((u32 *) (info->pseudo_palette))[*ptr]) & 0xffff; ptr++; cmd[3 + i * step + j] |= ((((u32 *) (info-> pseudo_palette))[*ptr]) & 0xffff) << 16; ptr++; } line += bytes; i++; } } /* * Fill in the cmd array /GDC FIFO commands/ to draw a 16bit image. * Make sure cmd has enough room! */ static void mb86290fb_imageblit16(u32 *cmd, u16 step, u16 dx, u16 dy, u16 width, u16 height, u32 fgcolor, u32 bgcolor, const struct fb_image *image, struct fb_info *info) { int i; unsigned const char *line; u16 bytes; i = 0; line = image->data; bytes = image->width << 1; cmd[0] = (GDC_TYPE_DRAWBITMAPP << 24) | (GDC_CMD_BLT_DRAW << 16) | (2 + step * height); cmd[1] = (dy << 16) | dx; cmd[2] = (height << 16) | width; while (i < height) { memcpy(&cmd[3 + i * step], line, step); line += bytes; i++; } } static void mb86290fb_imageblit(struct fb_info *info, const struct fb_image *image) { int mdr; u32 *cmd = NULL; void (*cmdfn) (u32 *, u16, u16, u16, u16, u16, u32, u32, const struct fb_image *, struct fb_info *) = NULL; u32 cmdlen; u32 fgcolor = 0, bgcolor = 0; u16 step; u16 width = image->width, height = image->height; u16 dx = image->dx, dy = image->dy; int x2, y2, vxres, vyres; mdr = (GDC_ROP_COPY << 9); x2 = image->dx + image->width; y2 = image->dy + image->height; vxres = info->var.xres_virtual; vyres = info->var.yres_virtual; x2 = min(x2, vxres); y2 = min(y2, vyres); width = x2 - dx; height = y2 - dy; switch (image->depth) { case 1: step = (width + 31) >> 5; cmdlen = 9 + height * step; cmdfn = mb86290fb_imageblit1; if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { fgcolor = ((u32 *) (info->pseudo_palette))[image->fg_color]; bgcolor = ((u32 *) (info->pseudo_palette))[image->bg_color]; } else { fgcolor = image->fg_color; bgcolor = image->bg_color; } break; case 8: step = (width + 1) >> 1; cmdlen = 3 + height * step; cmdfn = mb86290fb_imageblit8; break; case 16: step = (width + 1) >> 1; cmdlen = 3 + height * step; cmdfn = mb86290fb_imageblit16; break; default: cfb_imageblit(info, image); return; } cmd = kmalloc(cmdlen * 4, GFP_DMA); if (!cmd) return cfb_imageblit(info, image); cmdfn(cmd, step, dx, dy, width, height, fgcolor, bgcolor, image, info); mb862xxfb_write_fifo(cmdlen, cmd, info); kfree(cmd); } static void mb86290fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { u32 x2, y2, vxres, vyres, height, width, fg; u32 cmd[7]; vxres = info->var.xres_virtual; vyres = info->var.yres_virtual; if (!rect->width || !rect->height || rect->dx > vxres || rect->dy > vyres) return; /* We could use hardware clipping but on many cards you get around * hardware clipping by writing to framebuffer directly. */ x2 = rect->dx + rect->width; y2 = rect->dy + rect->height; x2 = min(x2, vxres); y2 = min(y2, vyres); width = x2 - rect->dx; height = y2 - rect->dy; if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) fg = ((u32 *) (info->pseudo_palette))[rect->color]; else fg = rect->color; switch (rect->rop) { case ROP_XOR: /* Set raster operation */ cmd[1] = (2 << 7) | (GDC_ROP_XOR << 9); break; case ROP_COPY: /* Set raster operation */ cmd[1] = (2 << 7) | (GDC_ROP_COPY << 9); break; } cmd[0] = (GDC_TYPE_SETREGISTER << 24) | (1 << 16) | GDC_REG_MODE_BITMAP; /* cmd[1] set earlier */ cmd[2] = (GDC_TYPE_SETCOLORREGISTER << 24) | (GDC_CMD_BODY_FORE_COLOR << 16); cmd[3] = fg; cmd[4] = (GDC_TYPE_DRAWRECTP << 24) | (GDC_CMD_BLT_FILL << 16); cmd[5] = (rect->dy << 16) | (rect->dx); cmd[6] = (height << 16) | width; mb862xxfb_write_fifo(7, cmd, info); } void mb862xxfb_init_accel(struct fb_info *info, int xres) { struct mb862xxfb_par *par = info->par; if (info->var.bits_per_pixel == 32) { info->fbops->fb_fillrect = cfb_fillrect; info->fbops->fb_copyarea = cfb_copyarea; info->fbops->fb_imageblit = cfb_imageblit; } else { outreg(disp, GC_L0EM, 3); info->fbops->fb_fillrect = mb86290fb_fillrect; info->fbops->fb_copyarea = mb86290fb_copyarea; info->fbops->fb_imageblit = mb86290fb_imageblit; } outreg(draw, GDC_REG_DRAW_BASE, 0); outreg(draw, GDC_REG_MODE_MISC, 0x8000); outreg(draw, GDC_REG_X_RESOLUTION, xres); info->flags |= FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; info->fix.accel = 0xff; /*FIXME: add right define */ } EXPORT_SYMBOL(mb862xxfb_init_accel); MODULE_LICENSE("GPL v2");
gpl-2.0
eldarerathis/linux-raspberrypi
drivers/usb/renesas_usbhs/mod_gadget.c
94
25128
/* * Renesas USB driver * * Copyright (C) 2011 Renesas Solutions Corp. * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include "common.h" /* * struct */ struct usbhsg_request { struct usb_request req; struct usbhs_pkt pkt; }; #define EP_NAME_SIZE 8 struct usbhsg_gpriv; struct usbhsg_uep { struct usb_ep ep; struct usbhs_pipe *pipe; char ep_name[EP_NAME_SIZE]; struct usbhsg_gpriv *gpriv; }; struct usbhsg_gpriv { struct usb_gadget gadget; struct usbhs_mod mod; struct usbhsg_uep *uep; int uep_size; struct usb_gadget_driver *driver; u32 status; #define USBHSG_STATUS_STARTED (1 << 0) #define USBHSG_STATUS_REGISTERD (1 << 1) #define USBHSG_STATUS_WEDGE (1 << 2) #define USBHSG_STATUS_SELF_POWERED (1 << 3) }; struct usbhsg_recip_handle { char *name; int (*device)(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl); int (*interface)(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl); int (*endpoint)(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl); }; /* * macro */ #define usbhsg_priv_to_gpriv(priv) \ container_of( \ usbhs_mod_get(priv, USBHS_GADGET), \ struct usbhsg_gpriv, mod) #define __usbhsg_for_each_uep(start, pos, g, i) \ for (i = start, pos = (g)->uep + i; \ i < (g)->uep_size; \ i++, pos = (g)->uep + i) #define usbhsg_for_each_uep(pos, gpriv, i) \ __usbhsg_for_each_uep(1, pos, gpriv, i) #define usbhsg_for_each_uep_with_dcp(pos, gpriv, i) \ __usbhsg_for_each_uep(0, pos, gpriv, i) #define usbhsg_gadget_to_gpriv(g)\ container_of(g, struct usbhsg_gpriv, gadget) #define usbhsg_req_to_ureq(r)\ container_of(r, struct usbhsg_request, req) #define usbhsg_ep_to_uep(e) container_of(e, struct usbhsg_uep, ep) #define usbhsg_gpriv_to_dev(gp) usbhs_priv_to_dev((gp)->mod.priv) #define usbhsg_gpriv_to_priv(gp) ((gp)->mod.priv) #define usbhsg_gpriv_to_dcp(gp) ((gp)->uep) #define usbhsg_gpriv_to_nth_uep(gp, i) ((gp)->uep + i) #define usbhsg_uep_to_gpriv(u) ((u)->gpriv) #define usbhsg_uep_to_pipe(u) ((u)->pipe) #define usbhsg_pipe_to_uep(p) ((p)->mod_private) #define usbhsg_is_dcp(u) ((u) == usbhsg_gpriv_to_dcp((u)->gpriv)) #define usbhsg_ureq_to_pkt(u) (&(u)->pkt) #define usbhsg_pkt_to_ureq(i) \ container_of(i, struct usbhsg_request, pkt) #define usbhsg_is_not_connected(gp) ((gp)->gadget.speed == USB_SPEED_UNKNOWN) /* status */ #define usbhsg_status_init(gp) do {(gp)->status = 0; } while (0) #define usbhsg_status_set(gp, b) (gp->status |= b) #define usbhsg_status_clr(gp, b) (gp->status &= ~b) #define usbhsg_status_has(gp, b) (gp->status & b) /* * queue push/pop */ static void usbhsg_queue_pop(struct usbhsg_uep *uep, struct usbhsg_request *ureq, int status) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe)); ureq->req.status = status; ureq->req.complete(&uep->ep, &ureq->req); } static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt) { struct usbhs_pipe *pipe = pkt->pipe; struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe); struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt); ureq->req.actual = pkt->actual; usbhsg_queue_pop(uep, ureq, 0); } static void usbhsg_queue_push(struct usbhsg_uep *uep, struct usbhsg_request *ureq) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhs_pkt *pkt = usbhsg_ureq_to_pkt(ureq); struct usb_request *req = &ureq->req; req->actual = 0; req->status = -EINPROGRESS; usbhs_pkt_push(pipe, pkt, usbhsg_queue_done, req->buf, req->length, req->zero, -1); usbhs_pkt_start(pipe); dev_dbg(dev, "pipe %d : queue push (%d)\n", usbhs_pipe_number(pipe), req->length); } /* * dma map/unmap */ static int usbhsg_dma_map_ctrl(struct usbhs_pkt *pkt, int map) { struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt); struct usb_request *req = &ureq->req; struct usbhs_pipe *pipe = pkt->pipe; struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); enum dma_data_direction dir; int ret = 0; dir = usbhs_pipe_is_dir_host(pipe); if (map) { /* it can not use scatter/gather */ WARN_ON(req->num_sgs); ret = usb_gadget_map_request(&gpriv->gadget, req, dir); if (ret < 0) return ret; pkt->dma = req->dma; } else { usb_gadget_unmap_request(&gpriv->gadget, req, dir); } return ret; } /* * USB_TYPE_STANDARD / clear feature functions */ static int usbhsg_recip_handler_std_control_done(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp); usbhs_dcp_control_transfer_done(pipe); return 0; } static int usbhsg_recip_handler_std_clear_endpoint(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); if (!usbhsg_status_has(gpriv, USBHSG_STATUS_WEDGE)) { usbhs_pipe_disable(pipe); usbhs_pipe_sequence_data0(pipe); usbhs_pipe_enable(pipe); } usbhsg_recip_handler_std_control_done(priv, uep, ctrl); usbhs_pkt_start(pipe); return 0; } struct usbhsg_recip_handle req_clear_feature = { .name = "clear feature", .device = usbhsg_recip_handler_std_control_done, .interface = usbhsg_recip_handler_std_control_done, .endpoint = usbhsg_recip_handler_std_clear_endpoint, }; /* * USB_TYPE_STANDARD / set feature functions */ static int usbhsg_recip_handler_std_set_device(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { switch (le16_to_cpu(ctrl->wValue)) { case USB_DEVICE_TEST_MODE: usbhsg_recip_handler_std_control_done(priv, uep, ctrl); udelay(100); usbhs_sys_set_test_mode(priv, le16_to_cpu(ctrl->wIndex >> 8)); break; default: usbhsg_recip_handler_std_control_done(priv, uep, ctrl); break; } return 0; } static int usbhsg_recip_handler_std_set_endpoint(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); usbhs_pipe_stall(pipe); usbhsg_recip_handler_std_control_done(priv, uep, ctrl); return 0; } struct usbhsg_recip_handle req_set_feature = { .name = "set feature", .device = usbhsg_recip_handler_std_set_device, .interface = usbhsg_recip_handler_std_control_done, .endpoint = usbhsg_recip_handler_std_set_endpoint, }; /* * USB_TYPE_STANDARD / get status functions */ static void __usbhsg_recip_send_complete(struct usb_ep *ep, struct usb_request *req) { struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); /* free allocated recip-buffer/usb_request */ kfree(ureq->pkt.buf); usb_ep_free_request(ep, req); } static void __usbhsg_recip_send_status(struct usbhsg_gpriv *gpriv, unsigned short status) { struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usb_request *req; unsigned short *buf; /* alloc new usb_request for recip */ req = usb_ep_alloc_request(&dcp->ep, GFP_ATOMIC); if (!req) { dev_err(dev, "recip request allocation fail\n"); return; } /* alloc recip data buffer */ buf = kmalloc(sizeof(*buf), GFP_ATOMIC); if (!buf) { usb_ep_free_request(&dcp->ep, req); dev_err(dev, "recip data allocation fail\n"); return; } /* recip data is status */ *buf = cpu_to_le16(status); /* allocated usb_request/buffer will be freed */ req->complete = __usbhsg_recip_send_complete; req->buf = buf; req->length = sizeof(*buf); req->zero = 0; /* push packet */ pipe->handler = &usbhs_fifo_pio_push_handler; usbhsg_queue_push(dcp, usbhsg_req_to_ureq(req)); } static int usbhsg_recip_handler_std_get_device(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); unsigned short status = 0; if (usbhsg_status_has(gpriv, USBHSG_STATUS_SELF_POWERED)) status = 1 << USB_DEVICE_SELF_POWERED; __usbhsg_recip_send_status(gpriv, status); return 0; } static int usbhsg_recip_handler_std_get_interface(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); unsigned short status = 0; __usbhsg_recip_send_status(gpriv, status); return 0; } static int usbhsg_recip_handler_std_get_endpoint(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); unsigned short status = 0; if (usbhs_pipe_is_stall(pipe)) status = 1 << USB_ENDPOINT_HALT; __usbhsg_recip_send_status(gpriv, status); return 0; } struct usbhsg_recip_handle req_get_status = { .name = "get status", .device = usbhsg_recip_handler_std_get_device, .interface = usbhsg_recip_handler_std_get_interface, .endpoint = usbhsg_recip_handler_std_get_endpoint, }; /* * USB_TYPE handler */ static int usbhsg_recip_run_handle(struct usbhs_priv *priv, struct usbhsg_recip_handle *handler, struct usb_ctrlrequest *ctrl) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usbhsg_uep *uep; struct usbhs_pipe *pipe; int recip = ctrl->bRequestType & USB_RECIP_MASK; int nth = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK; int ret = 0; int (*func)(struct usbhs_priv *priv, struct usbhsg_uep *uep, struct usb_ctrlrequest *ctrl); char *msg; uep = usbhsg_gpriv_to_nth_uep(gpriv, nth); pipe = usbhsg_uep_to_pipe(uep); if (!pipe) { dev_err(dev, "wrong recip request\n"); return -EINVAL; } switch (recip) { case USB_RECIP_DEVICE: msg = "DEVICE"; func = handler->device; break; case USB_RECIP_INTERFACE: msg = "INTERFACE"; func = handler->interface; break; case USB_RECIP_ENDPOINT: msg = "ENDPOINT"; func = handler->endpoint; break; default: dev_warn(dev, "unsupported RECIP(%d)\n", recip); func = NULL; ret = -EINVAL; } if (func) { dev_dbg(dev, "%s (pipe %d :%s)\n", handler->name, nth, msg); ret = func(priv, uep, ctrl); } return ret; } /* * irq functions * * it will be called from usbhs_interrupt */ static int usbhsg_irq_dev_state(struct usbhs_priv *priv, struct usbhs_irq_state *irq_state) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct device *dev = usbhsg_gpriv_to_dev(gpriv); gpriv->gadget.speed = usbhs_bus_get_speed(priv); dev_dbg(dev, "state = %x : speed : %d\n", usbhs_status_get_device_state(irq_state), gpriv->gadget.speed); return 0; } static int usbhsg_irq_ctrl_stage(struct usbhs_priv *priv, struct usbhs_irq_state *irq_state) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp); struct device *dev = usbhsg_gpriv_to_dev(gpriv); struct usb_ctrlrequest ctrl; struct usbhsg_recip_handle *recip_handler = NULL; int stage = usbhs_status_get_ctrl_stage(irq_state); int ret = 0; dev_dbg(dev, "stage = %d\n", stage); /* * see Manual * * "Operation" * - "Interrupt Function" * - "Control Transfer Stage Transition Interrupt" * - Fig. "Control Transfer Stage Transitions" */ switch (stage) { case READ_DATA_STAGE: pipe->handler = &usbhs_fifo_pio_push_handler; break; case WRITE_DATA_STAGE: pipe->handler = &usbhs_fifo_pio_pop_handler; break; case NODATA_STATUS_STAGE: pipe->handler = &usbhs_ctrl_stage_end_handler; break; default: return ret; } /* * get usb request */ usbhs_usbreq_get_val(priv, &ctrl); switch (ctrl.bRequestType & USB_TYPE_MASK) { case USB_TYPE_STANDARD: switch (ctrl.bRequest) { case USB_REQ_CLEAR_FEATURE: recip_handler = &req_clear_feature; break; case USB_REQ_SET_FEATURE: recip_handler = &req_set_feature; break; case USB_REQ_GET_STATUS: recip_handler = &req_get_status; break; } } /* * setup stage / run recip */ if (recip_handler) ret = usbhsg_recip_run_handle(priv, recip_handler, &ctrl); else ret = gpriv->driver->setup(&gpriv->gadget, &ctrl); if (ret < 0) usbhs_pipe_stall(pipe); return ret; } /* * * usb_dcp_ops * */ static int usbhsg_pipe_disable(struct usbhsg_uep *uep) { struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhs_pkt *pkt; while (1) { pkt = usbhs_pkt_pop(pipe, NULL); if (!pkt) break; usbhsg_queue_pop(uep, usbhsg_pkt_to_ureq(pkt), -ECONNRESET); } usbhs_pipe_disable(pipe); return 0; } /* * * usb_ep_ops * */ static int usbhsg_ep_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); struct usbhs_pipe *pipe; int ret = -EIO; /* * if it already have pipe, * nothing to do */ if (uep->pipe) { usbhs_pipe_clear(uep->pipe); usbhs_pipe_sequence_data0(uep->pipe); return 0; } pipe = usbhs_pipe_malloc(priv, usb_endpoint_type(desc), usb_endpoint_dir_in(desc)); if (pipe) { uep->pipe = pipe; pipe->mod_private = uep; /* set epnum / maxp */ usbhs_pipe_config_update(pipe, 0, usb_endpoint_num(desc), usb_endpoint_maxp(desc)); /* * usbhs_fifo_dma_push/pop_handler try to * use dmaengine if possible. * It will use pio handler if impossible. */ if (usb_endpoint_dir_in(desc)) pipe->handler = &usbhs_fifo_dma_push_handler; else pipe->handler = &usbhs_fifo_dma_pop_handler; ret = 0; } return ret; } static int usbhsg_ep_disable(struct usb_ep *ep) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); usbhsg_pipe_disable(uep); uep->pipe->mod_private = NULL; uep->pipe = NULL; return 0; } static struct usb_request *usbhsg_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) { struct usbhsg_request *ureq; ureq = kzalloc(sizeof *ureq, gfp_flags); if (!ureq) return NULL; usbhs_pkt_init(usbhsg_ureq_to_pkt(ureq)); return &ureq->req; } static void usbhsg_ep_free_request(struct usb_ep *ep, struct usb_request *req) { struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); WARN_ON(!list_empty(&ureq->pkt.node)); kfree(ureq); } static int usbhsg_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); /* param check */ if (usbhsg_is_not_connected(gpriv) || unlikely(!gpriv->driver) || unlikely(!pipe)) return -ESHUTDOWN; usbhsg_queue_push(uep, ureq); return 0; } static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq)); usbhsg_queue_pop(uep, ureq, -ECONNRESET); return 0; } static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); struct device *dev = usbhsg_gpriv_to_dev(gpriv); unsigned long flags; usbhsg_pipe_disable(uep); dev_dbg(dev, "set halt %d (pipe %d)\n", halt, usbhs_pipe_number(pipe)); /******************** spin lock ********************/ usbhs_lock(priv, flags); if (halt) usbhs_pipe_stall(pipe); else usbhs_pipe_disable(pipe); if (halt && wedge) usbhsg_status_set(gpriv, USBHSG_STATUS_WEDGE); else usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE); usbhs_unlock(priv, flags); /******************** spin unlock ******************/ return 0; } static int usbhsg_ep_set_halt(struct usb_ep *ep, int value) { return __usbhsg_ep_set_halt_wedge(ep, value, 0); } static int usbhsg_ep_set_wedge(struct usb_ep *ep) { return __usbhsg_ep_set_halt_wedge(ep, 1, 1); } static struct usb_ep_ops usbhsg_ep_ops = { .enable = usbhsg_ep_enable, .disable = usbhsg_ep_disable, .alloc_request = usbhsg_ep_alloc_request, .free_request = usbhsg_ep_free_request, .queue = usbhsg_ep_queue, .dequeue = usbhsg_ep_dequeue, .set_halt = usbhsg_ep_set_halt, .set_wedge = usbhsg_ep_set_wedge, }; /* * usb module start/end */ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); struct usbhs_mod *mod = usbhs_mod_get_current(priv); struct device *dev = usbhs_priv_to_dev(priv); unsigned long flags; int ret = 0; /******************** spin lock ********************/ usbhs_lock(priv, flags); usbhsg_status_set(gpriv, status); if (!(usbhsg_status_has(gpriv, USBHSG_STATUS_STARTED) && usbhsg_status_has(gpriv, USBHSG_STATUS_REGISTERD))) ret = -1; /* not ready */ usbhs_unlock(priv, flags); /******************** spin unlock ********************/ if (ret < 0) return 0; /* not ready is not error */ /* * enable interrupt and systems if ready */ dev_dbg(dev, "start gadget\n"); /* * pipe initialize and enable DCP */ usbhs_pipe_init(priv, usbhsg_dma_map_ctrl); usbhs_fifo_init(priv); /* dcp init instead of usbhsg_ep_enable() */ dcp->pipe = usbhs_dcp_malloc(priv); dcp->pipe->mod_private = dcp; usbhs_pipe_config_update(dcp->pipe, 0, 0, 64); /* * system config enble * - HI speed * - function * - usb module */ usbhs_sys_function_ctrl(priv, 1); /* * enable irq callback */ mod->irq_dev_state = usbhsg_irq_dev_state; mod->irq_ctrl_stage = usbhsg_irq_ctrl_stage; usbhs_irq_callback_update(priv, mod); return 0; } static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct usbhs_mod *mod = usbhs_mod_get_current(priv); struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); struct device *dev = usbhs_priv_to_dev(priv); unsigned long flags; int ret = 0; /******************** spin lock ********************/ usbhs_lock(priv, flags); usbhsg_status_clr(gpriv, status); if (!usbhsg_status_has(gpriv, USBHSG_STATUS_STARTED) && !usbhsg_status_has(gpriv, USBHSG_STATUS_REGISTERD)) ret = -1; /* already done */ usbhs_unlock(priv, flags); /******************** spin unlock ********************/ if (ret < 0) return 0; /* already done is not error */ /* * disable interrupt and systems if 1st try */ usbhs_fifo_quit(priv); /* disable all irq */ mod->irq_dev_state = NULL; mod->irq_ctrl_stage = NULL; usbhs_irq_callback_update(priv, mod); gpriv->gadget.speed = USB_SPEED_UNKNOWN; /* disable sys */ usbhs_sys_set_test_mode(priv, 0); usbhs_sys_function_ctrl(priv, 0); usbhsg_ep_disable(&dcp->ep); dev_dbg(dev, "stop gadget\n"); return 0; } /* * * linux usb function * */ static int usbhsg_gadget_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); if (!driver || !driver->setup || driver->max_speed < USB_SPEED_FULL) return -EINVAL; /* first hook up the driver ... */ gpriv->driver = driver; gpriv->gadget.dev.driver = &driver->driver; return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD); } static int usbhsg_gadget_stop(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); if (!driver || !driver->unbind) return -EINVAL; usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD); gpriv->gadget.dev.driver = NULL; gpriv->driver = NULL; return 0; } /* * usb gadget ops */ static int usbhsg_get_frame(struct usb_gadget *gadget) { struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); return usbhs_frame_get_num(priv); } static int usbhsg_pullup(struct usb_gadget *gadget, int is_on) { struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); usbhs_sys_function_pullup(priv, is_on); return 0; } static int usbhsg_set_selfpowered(struct usb_gadget *gadget, int is_self) { struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget); if (is_self) usbhsg_status_set(gpriv, USBHSG_STATUS_SELF_POWERED); else usbhsg_status_clr(gpriv, USBHSG_STATUS_SELF_POWERED); return 0; } static struct usb_gadget_ops usbhsg_gadget_ops = { .get_frame = usbhsg_get_frame, .set_selfpowered = usbhsg_set_selfpowered, .udc_start = usbhsg_gadget_start, .udc_stop = usbhsg_gadget_stop, .pullup = usbhsg_pullup, }; static int usbhsg_start(struct usbhs_priv *priv) { return usbhsg_try_start(priv, USBHSG_STATUS_STARTED); } static int usbhsg_stop(struct usbhs_priv *priv) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); /* cable disconnect */ if (gpriv->driver && gpriv->driver->disconnect) gpriv->driver->disconnect(&gpriv->gadget); return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED); } static void usbhs_mod_gadget_release(struct device *pdev) { /* do nothing */ } int usbhs_mod_gadget_probe(struct usbhs_priv *priv) { struct usbhsg_gpriv *gpriv; struct usbhsg_uep *uep; struct device *dev = usbhs_priv_to_dev(priv); int pipe_size = usbhs_get_dparam(priv, pipe_size); int i; int ret; gpriv = kzalloc(sizeof(struct usbhsg_gpriv), GFP_KERNEL); if (!gpriv) { dev_err(dev, "Could not allocate gadget priv\n"); return -ENOMEM; } uep = kzalloc(sizeof(struct usbhsg_uep) * pipe_size, GFP_KERNEL); if (!uep) { dev_err(dev, "Could not allocate ep\n"); ret = -ENOMEM; goto usbhs_mod_gadget_probe_err_gpriv; } /* * CAUTION * * There is no guarantee that it is possible to access usb module here. * Don't accesses to it. * The accesse will be enable after "usbhsg_start" */ /* * register itself */ usbhs_mod_register(priv, &gpriv->mod, USBHS_GADGET); /* init gpriv */ gpriv->mod.name = "gadget"; gpriv->mod.start = usbhsg_start; gpriv->mod.stop = usbhsg_stop; gpriv->uep = uep; gpriv->uep_size = pipe_size; usbhsg_status_init(gpriv); /* * init gadget */ dev_set_name(&gpriv->gadget.dev, "gadget"); gpriv->gadget.dev.parent = dev; gpriv->gadget.dev.release = usbhs_mod_gadget_release; gpriv->gadget.name = "renesas_usbhs_udc"; gpriv->gadget.ops = &usbhsg_gadget_ops; gpriv->gadget.max_speed = USB_SPEED_HIGH; ret = device_register(&gpriv->gadget.dev); if (ret < 0) goto err_add_udc; INIT_LIST_HEAD(&gpriv->gadget.ep_list); /* * init usb_ep */ usbhsg_for_each_uep_with_dcp(uep, gpriv, i) { uep->gpriv = gpriv; uep->pipe = NULL; snprintf(uep->ep_name, EP_NAME_SIZE, "ep%d", i); uep->ep.name = uep->ep_name; uep->ep.ops = &usbhsg_ep_ops; INIT_LIST_HEAD(&uep->ep.ep_list); /* init DCP */ if (usbhsg_is_dcp(uep)) { gpriv->gadget.ep0 = &uep->ep; uep->ep.maxpacket = 64; } /* init normal pipe */ else { uep->ep.maxpacket = 512; list_add_tail(&uep->ep.ep_list, &gpriv->gadget.ep_list); } } ret = usb_add_gadget_udc(dev, &gpriv->gadget); if (ret) goto err_register; dev_info(dev, "gadget probed\n"); return 0; err_register: device_unregister(&gpriv->gadget.dev); err_add_udc: kfree(gpriv->uep); usbhs_mod_gadget_probe_err_gpriv: kfree(gpriv); return ret; } void usbhs_mod_gadget_remove(struct usbhs_priv *priv) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); usb_del_gadget_udc(&gpriv->gadget); device_unregister(&gpriv->gadget.dev); kfree(gpriv->uep); kfree(gpriv); }
gpl-2.0
notro/linux-staging
drivers/gpu/drm/exynos/exynos_hdmi.c
94
73999
/* * Copyright (C) 2011 Samsung Electronics Co.Ltd * Authors: * Seung-Woo Kim <sw0312.kim@samsung.com> * Inki Dae <inki.dae@samsung.com> * Joonyoung Shim <jy0922.shim@samsung.com> * * Based on drivers/media/video/s5p-tv/hdmi_drv.c * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <drm/drmP.h> #include <drm/drm_edid.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_atomic_helper.h> #include "regs-hdmi.h" #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/pm_runtime.h> #include <linux/clk.h> #include <linux/regulator/consumer.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_gpio.h> #include <linux/hdmi.h> #include <linux/component.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <drm/exynos_drm.h> #include "exynos_drm_drv.h" #include "exynos_drm_crtc.h" #include "exynos_mixer.h" #include <linux/gpio.h> #include <media/s5p_hdmi.h> #define ctx_from_connector(c) container_of(c, struct hdmi_context, connector) #define HOTPLUG_DEBOUNCE_MS 1100 /* AVI header and aspect ratio */ #define HDMI_AVI_VERSION 0x02 #define HDMI_AVI_LENGTH 0x0D /* AUI header info */ #define HDMI_AUI_VERSION 0x01 #define HDMI_AUI_LENGTH 0x0A #define AVI_SAME_AS_PIC_ASPECT_RATIO 0x8 #define AVI_4_3_CENTER_RATIO 0x9 #define AVI_16_9_CENTER_RATIO 0xa enum hdmi_type { HDMI_TYPE13, HDMI_TYPE14, }; struct hdmi_driver_data { unsigned int type; const struct hdmiphy_config *phy_confs; unsigned int phy_conf_count; unsigned int is_apb_phy:1; }; struct hdmi_resources { struct clk *hdmi; struct clk *sclk_hdmi; struct clk *sclk_pixel; struct clk *sclk_hdmiphy; struct clk *mout_hdmi; struct regulator_bulk_data *regul_bulk; struct regulator *reg_hdmi_en; int regul_count; }; struct hdmi_tg_regs { u8 cmd[1]; u8 h_fsz[2]; u8 hact_st[2]; u8 hact_sz[2]; u8 v_fsz[2]; u8 vsync[2]; u8 vsync2[2]; u8 vact_st[2]; u8 vact_sz[2]; u8 field_chg[2]; u8 vact_st2[2]; u8 vact_st3[2]; u8 vact_st4[2]; u8 vsync_top_hdmi[2]; u8 vsync_bot_hdmi[2]; u8 field_top_hdmi[2]; u8 field_bot_hdmi[2]; u8 tg_3d[1]; }; struct hdmi_v13_core_regs { u8 h_blank[2]; u8 v_blank[3]; u8 h_v_line[3]; u8 vsync_pol[1]; u8 int_pro_mode[1]; u8 v_blank_f[3]; u8 h_sync_gen[3]; u8 v_sync_gen1[3]; u8 v_sync_gen2[3]; u8 v_sync_gen3[3]; }; struct hdmi_v14_core_regs { u8 h_blank[2]; u8 v2_blank[2]; u8 v1_blank[2]; u8 v_line[2]; u8 h_line[2]; u8 hsync_pol[1]; u8 vsync_pol[1]; u8 int_pro_mode[1]; u8 v_blank_f0[2]; u8 v_blank_f1[2]; u8 h_sync_start[2]; u8 h_sync_end[2]; u8 v_sync_line_bef_2[2]; u8 v_sync_line_bef_1[2]; u8 v_sync_line_aft_2[2]; u8 v_sync_line_aft_1[2]; u8 v_sync_line_aft_pxl_2[2]; u8 v_sync_line_aft_pxl_1[2]; u8 v_blank_f2[2]; /* for 3D mode */ u8 v_blank_f3[2]; /* for 3D mode */ u8 v_blank_f4[2]; /* for 3D mode */ u8 v_blank_f5[2]; /* for 3D mode */ u8 v_sync_line_aft_3[2]; u8 v_sync_line_aft_4[2]; u8 v_sync_line_aft_5[2]; u8 v_sync_line_aft_6[2]; u8 v_sync_line_aft_pxl_3[2]; u8 v_sync_line_aft_pxl_4[2]; u8 v_sync_line_aft_pxl_5[2]; u8 v_sync_line_aft_pxl_6[2]; u8 vact_space_1[2]; u8 vact_space_2[2]; u8 vact_space_3[2]; u8 vact_space_4[2]; u8 vact_space_5[2]; u8 vact_space_6[2]; }; struct hdmi_v13_conf { struct hdmi_v13_core_regs core; struct hdmi_tg_regs tg; }; struct hdmi_v14_conf { struct hdmi_v14_core_regs core; struct hdmi_tg_regs tg; }; struct hdmi_conf_regs { int pixel_clock; int cea_video_id; enum hdmi_picture_aspect aspect_ratio; union { struct hdmi_v13_conf v13_conf; struct hdmi_v14_conf v14_conf; } conf; }; struct hdmi_context { struct exynos_drm_display display; struct device *dev; struct drm_device *drm_dev; struct drm_connector connector; struct drm_encoder *encoder; bool hpd; bool powered; bool dvi_mode; struct mutex hdmi_mutex; void __iomem *regs; int irq; struct delayed_work hotplug_work; struct i2c_adapter *ddc_adpt; struct i2c_client *hdmiphy_port; /* current hdmiphy conf regs */ struct drm_display_mode current_mode; struct hdmi_conf_regs mode_conf; struct hdmi_resources res; int hpd_gpio; void __iomem *regs_hdmiphy; const struct hdmiphy_config *phy_confs; unsigned int phy_conf_count; struct regmap *pmureg; enum hdmi_type type; }; static inline struct hdmi_context *display_to_hdmi(struct exynos_drm_display *d) { return container_of(d, struct hdmi_context, display); } struct hdmiphy_config { int pixel_clock; u8 conf[32]; }; /* list of phy config settings */ static const struct hdmiphy_config hdmiphy_v13_configs[] = { { .pixel_clock = 27000000, .conf = { 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40, 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87, 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, }, }, { .pixel_clock = 27027000, .conf = { 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64, 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87, 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00, }, }, { .pixel_clock = 74176000, .conf = { 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B, 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9, 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0, 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00, }, }, { .pixel_clock = 74250000, .conf = { 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40, 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba, 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0, 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00, }, }, { .pixel_clock = 148500000, .conf = { 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40, 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba, 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0, 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00, }, }, }; static const struct hdmiphy_config hdmiphy_v14_configs[] = { { .pixel_clock = 25200000, .conf = { 0x01, 0x51, 0x2A, 0x75, 0x40, 0x01, 0x00, 0x08, 0x82, 0x80, 0xfc, 0xd8, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xf4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 27000000, .conf = { 0x01, 0xd1, 0x22, 0x51, 0x40, 0x08, 0xfc, 0x20, 0x98, 0xa0, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80, 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xe4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 27027000, .conf = { 0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08, 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, }, }, { .pixel_clock = 36000000, .conf = { 0x01, 0x51, 0x2d, 0x55, 0x40, 0x01, 0x00, 0x08, 0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xab, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 40000000, .conf = { 0x01, 0x51, 0x32, 0x55, 0x40, 0x01, 0x00, 0x08, 0x82, 0x80, 0x2c, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0x9a, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 65000000, .conf = { 0x01, 0xd1, 0x36, 0x34, 0x40, 0x1e, 0x0a, 0x08, 0x82, 0xa0, 0x45, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xbd, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 71000000, .conf = { 0x01, 0xd1, 0x3b, 0x35, 0x40, 0x0c, 0x04, 0x08, 0x85, 0xa0, 0x63, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xad, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 73250000, .conf = { 0x01, 0xd1, 0x3d, 0x35, 0x40, 0x18, 0x02, 0x08, 0x83, 0xa0, 0x6e, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xa8, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 74176000, .conf = { 0x01, 0xd1, 0x3e, 0x35, 0x40, 0x5b, 0xde, 0x08, 0x82, 0xa0, 0x73, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x56, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 74250000, .conf = { 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08, 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80, 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00, }, }, { .pixel_clock = 83500000, .conf = { 0x01, 0xd1, 0x23, 0x11, 0x40, 0x0c, 0xfb, 0x08, 0x85, 0xa0, 0xd1, 0xd8, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0x93, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 106500000, .conf = { 0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08, 0x84, 0xa0, 0x0a, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0x73, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 108000000, .conf = { 0x01, 0x51, 0x2d, 0x15, 0x40, 0x01, 0x00, 0x08, 0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xc7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 115500000, .conf = { 0x01, 0xd1, 0x30, 0x12, 0x40, 0x40, 0x10, 0x08, 0x80, 0x80, 0x21, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0xaa, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 119000000, .conf = { 0x01, 0xd1, 0x32, 0x1a, 0x40, 0x30, 0xd8, 0x08, 0x04, 0xa0, 0x2a, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0x9d, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 146250000, .conf = { 0x01, 0xd1, 0x3d, 0x15, 0x40, 0x18, 0xfd, 0x08, 0x83, 0xa0, 0x6e, 0xd9, 0x45, 0xa0, 0xac, 0x80, 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0x50, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 148500000, .conf = { 0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08, 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80, 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86, 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00, }, }, }; static const struct hdmiphy_config hdmiphy_5420_configs[] = { { .pixel_clock = 25200000, .conf = { 0x01, 0x52, 0x3F, 0x55, 0x40, 0x01, 0x00, 0xC8, 0x82, 0xC8, 0xBD, 0xD8, 0x45, 0xA0, 0xAC, 0x80, 0x06, 0x80, 0x01, 0x84, 0x05, 0x02, 0x24, 0x66, 0x54, 0xF4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 27000000, .conf = { 0x01, 0xD1, 0x22, 0x51, 0x40, 0x08, 0xFC, 0xE0, 0x98, 0xE8, 0xCB, 0xD8, 0x45, 0xA0, 0xAC, 0x80, 0x06, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, 0x54, 0xE4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 27027000, .conf = { 0x01, 0xD1, 0x2D, 0x72, 0x40, 0x64, 0x12, 0xC8, 0x43, 0xE8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80, 0x26, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, 0x54, 0xE3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 36000000, .conf = { 0x01, 0x51, 0x2D, 0x55, 0x40, 0x40, 0x00, 0xC8, 0x02, 0xC8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80, 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, 0x54, 0xAB, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 40000000, .conf = { 0x01, 0xD1, 0x21, 0x31, 0x40, 0x3C, 0x28, 0xC8, 0x87, 0xE8, 0xC8, 0xD8, 0x45, 0xA0, 0xAC, 0x80, 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, 0x54, 0x9A, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 65000000, .conf = { 0x01, 0xD1, 0x36, 0x34, 0x40, 0x0C, 0x04, 0xC8, 0x82, 0xE8, 0x45, 0xD9, 0x45, 0xA0, 0xAC, 0x80, 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, 0x54, 0xBD, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 71000000, .conf = { 0x01, 0xD1, 0x3B, 0x35, 0x40, 0x0C, 0x04, 0xC8, 0x85, 0xE8, 0x63, 0xD9, 0x45, 0xA0, 0xAC, 0x80, 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, 0x54, 0x57, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 73250000, .conf = { 0x01, 0xD1, 0x1F, 0x10, 0x40, 0x78, 0x8D, 0xC8, 0x81, 0xE8, 0xB7, 0xD8, 0x45, 0xA0, 0xAC, 0x80, 0x56, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, 0x54, 0xA8, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 74176000, .conf = { 0x01, 0xD1, 0x1F, 0x10, 0x40, 0x5B, 0xEF, 0xC8, 0x81, 0xE8, 0xB9, 0xD8, 0x45, 0xA0, 0xAC, 0x80, 0x56, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, 0x54, 0xA6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 74250000, .conf = { 0x01, 0xD1, 0x1F, 0x10, 0x40, 0x40, 0xF8, 0x08, 0x81, 0xE8, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80, 0x26, 0x80, 0x09, 0x84, 0x05, 0x22, 0x24, 0x66, 0x54, 0xA5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 83500000, .conf = { 0x01, 0xD1, 0x23, 0x11, 0x40, 0x0C, 0xFB, 0xC8, 0x85, 0xE8, 0xD1, 0xD8, 0x45, 0xA0, 0xAC, 0x80, 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, 0x54, 0x4A, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 88750000, .conf = { 0x01, 0xD1, 0x25, 0x11, 0x40, 0x18, 0xFF, 0xC8, 0x83, 0xE8, 0xDE, 0xD8, 0x45, 0xA0, 0xAC, 0x80, 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, 0x54, 0x45, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 106500000, .conf = { 0x01, 0xD1, 0x2C, 0x12, 0x40, 0x0C, 0x09, 0xC8, 0x84, 0xE8, 0x0A, 0xD9, 0x45, 0xA0, 0xAC, 0x80, 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, 0x54, 0x73, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 108000000, .conf = { 0x01, 0x51, 0x2D, 0x15, 0x40, 0x01, 0x00, 0xC8, 0x82, 0xC8, 0x0E, 0xD9, 0x45, 0xA0, 0xAC, 0x80, 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, 0x54, 0xC7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 115500000, .conf = { 0x01, 0xD1, 0x30, 0x14, 0x40, 0x0C, 0x03, 0xC8, 0x88, 0xE8, 0x21, 0xD9, 0x45, 0xA0, 0xAC, 0x80, 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, 0x54, 0x6A, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 146250000, .conf = { 0x01, 0xD1, 0x3D, 0x15, 0x40, 0x18, 0xFD, 0xC8, 0x83, 0xE8, 0x6E, 0xD9, 0x45, 0xA0, 0xAC, 0x80, 0x08, 0x80, 0x09, 0x84, 0x05, 0x02, 0x24, 0x66, 0x54, 0x54, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80, }, }, { .pixel_clock = 148500000, .conf = { 0x01, 0xD1, 0x1F, 0x00, 0x40, 0x40, 0xF8, 0x08, 0x81, 0xE8, 0xBA, 0xD8, 0x45, 0xA0, 0xAC, 0x80, 0x26, 0x80, 0x09, 0x84, 0x05, 0x22, 0x24, 0x66, 0x54, 0x4B, 0x25, 0x03, 0x00, 0x80, 0x01, 0x80, }, }, }; static struct hdmi_driver_data exynos5420_hdmi_driver_data = { .type = HDMI_TYPE14, .phy_confs = hdmiphy_5420_configs, .phy_conf_count = ARRAY_SIZE(hdmiphy_5420_configs), .is_apb_phy = 1, }; static struct hdmi_driver_data exynos4212_hdmi_driver_data = { .type = HDMI_TYPE14, .phy_confs = hdmiphy_v14_configs, .phy_conf_count = ARRAY_SIZE(hdmiphy_v14_configs), .is_apb_phy = 0, }; static struct hdmi_driver_data exynos4210_hdmi_driver_data = { .type = HDMI_TYPE13, .phy_confs = hdmiphy_v13_configs, .phy_conf_count = ARRAY_SIZE(hdmiphy_v13_configs), .is_apb_phy = 0, }; static struct hdmi_driver_data exynos5_hdmi_driver_data = { .type = HDMI_TYPE14, .phy_confs = hdmiphy_v13_configs, .phy_conf_count = ARRAY_SIZE(hdmiphy_v13_configs), .is_apb_phy = 0, }; static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) { return readl(hdata->regs + reg_id); } static inline void hdmi_reg_writeb(struct hdmi_context *hdata, u32 reg_id, u8 value) { writeb(value, hdata->regs + reg_id); } static inline void hdmi_reg_writemask(struct hdmi_context *hdata, u32 reg_id, u32 value, u32 mask) { u32 old = readl(hdata->regs + reg_id); value = (value & mask) | (old & ~mask); writel(value, hdata->regs + reg_id); } static int hdmiphy_reg_writeb(struct hdmi_context *hdata, u32 reg_offset, u8 value) { if (hdata->hdmiphy_port) { u8 buffer[2]; int ret; buffer[0] = reg_offset; buffer[1] = value; ret = i2c_master_send(hdata->hdmiphy_port, buffer, 2); if (ret == 2) return 0; return ret; } else { writeb(value, hdata->regs_hdmiphy + (reg_offset<<2)); return 0; } } static int hdmiphy_reg_write_buf(struct hdmi_context *hdata, u32 reg_offset, const u8 *buf, u32 len) { if ((reg_offset + len) > 32) return -EINVAL; if (hdata->hdmiphy_port) { int ret; ret = i2c_master_send(hdata->hdmiphy_port, buf, len); if (ret == len) return 0; return ret; } else { int i; for (i = 0; i < len; i++) writeb(buf[i], hdata->regs_hdmiphy + ((reg_offset + i)<<2)); return 0; } } static void hdmi_v13_regs_dump(struct hdmi_context *hdata, char *prefix) { #define DUMPREG(reg_id) \ DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \ readl(hdata->regs + reg_id)) DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix); DUMPREG(HDMI_INTC_FLAG); DUMPREG(HDMI_INTC_CON); DUMPREG(HDMI_HPD_STATUS); DUMPREG(HDMI_V13_PHY_RSTOUT); DUMPREG(HDMI_V13_PHY_VPLL); DUMPREG(HDMI_V13_PHY_CMU); DUMPREG(HDMI_V13_CORE_RSTOUT); DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix); DUMPREG(HDMI_CON_0); DUMPREG(HDMI_CON_1); DUMPREG(HDMI_CON_2); DUMPREG(HDMI_SYS_STATUS); DUMPREG(HDMI_V13_PHY_STATUS); DUMPREG(HDMI_STATUS_EN); DUMPREG(HDMI_HPD); DUMPREG(HDMI_MODE_SEL); DUMPREG(HDMI_V13_HPD_GEN); DUMPREG(HDMI_V13_DC_CONTROL); DUMPREG(HDMI_V13_VIDEO_PATTERN_GEN); DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix); DUMPREG(HDMI_H_BLANK_0); DUMPREG(HDMI_H_BLANK_1); DUMPREG(HDMI_V13_V_BLANK_0); DUMPREG(HDMI_V13_V_BLANK_1); DUMPREG(HDMI_V13_V_BLANK_2); DUMPREG(HDMI_V13_H_V_LINE_0); DUMPREG(HDMI_V13_H_V_LINE_1); DUMPREG(HDMI_V13_H_V_LINE_2); DUMPREG(HDMI_VSYNC_POL); DUMPREG(HDMI_INT_PRO_MODE); DUMPREG(HDMI_V13_V_BLANK_F_0); DUMPREG(HDMI_V13_V_BLANK_F_1); DUMPREG(HDMI_V13_V_BLANK_F_2); DUMPREG(HDMI_V13_H_SYNC_GEN_0); DUMPREG(HDMI_V13_H_SYNC_GEN_1); DUMPREG(HDMI_V13_H_SYNC_GEN_2); DUMPREG(HDMI_V13_V_SYNC_GEN_1_0); DUMPREG(HDMI_V13_V_SYNC_GEN_1_1); DUMPREG(HDMI_V13_V_SYNC_GEN_1_2); DUMPREG(HDMI_V13_V_SYNC_GEN_2_0); DUMPREG(HDMI_V13_V_SYNC_GEN_2_1); DUMPREG(HDMI_V13_V_SYNC_GEN_2_2); DUMPREG(HDMI_V13_V_SYNC_GEN_3_0); DUMPREG(HDMI_V13_V_SYNC_GEN_3_1); DUMPREG(HDMI_V13_V_SYNC_GEN_3_2); DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix); DUMPREG(HDMI_TG_CMD); DUMPREG(HDMI_TG_H_FSZ_L); DUMPREG(HDMI_TG_H_FSZ_H); DUMPREG(HDMI_TG_HACT_ST_L); DUMPREG(HDMI_TG_HACT_ST_H); DUMPREG(HDMI_TG_HACT_SZ_L); DUMPREG(HDMI_TG_HACT_SZ_H); DUMPREG(HDMI_TG_V_FSZ_L); DUMPREG(HDMI_TG_V_FSZ_H); DUMPREG(HDMI_TG_VSYNC_L); DUMPREG(HDMI_TG_VSYNC_H); DUMPREG(HDMI_TG_VSYNC2_L); DUMPREG(HDMI_TG_VSYNC2_H); DUMPREG(HDMI_TG_VACT_ST_L); DUMPREG(HDMI_TG_VACT_ST_H); DUMPREG(HDMI_TG_VACT_SZ_L); DUMPREG(HDMI_TG_VACT_SZ_H); DUMPREG(HDMI_TG_FIELD_CHG_L); DUMPREG(HDMI_TG_FIELD_CHG_H); DUMPREG(HDMI_TG_VACT_ST2_L); DUMPREG(HDMI_TG_VACT_ST2_H); DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L); DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H); DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L); DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H); DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L); DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H); DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L); DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H); #undef DUMPREG } static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix) { int i; #define DUMPREG(reg_id) \ DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \ readl(hdata->regs + reg_id)) DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix); DUMPREG(HDMI_INTC_CON); DUMPREG(HDMI_INTC_FLAG); DUMPREG(HDMI_HPD_STATUS); DUMPREG(HDMI_INTC_CON_1); DUMPREG(HDMI_INTC_FLAG_1); DUMPREG(HDMI_PHY_STATUS_0); DUMPREG(HDMI_PHY_STATUS_PLL); DUMPREG(HDMI_PHY_CON_0); DUMPREG(HDMI_PHY_RSTOUT); DUMPREG(HDMI_PHY_VPLL); DUMPREG(HDMI_PHY_CMU); DUMPREG(HDMI_CORE_RSTOUT); DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix); DUMPREG(HDMI_CON_0); DUMPREG(HDMI_CON_1); DUMPREG(HDMI_CON_2); DUMPREG(HDMI_SYS_STATUS); DUMPREG(HDMI_PHY_STATUS_0); DUMPREG(HDMI_STATUS_EN); DUMPREG(HDMI_HPD); DUMPREG(HDMI_MODE_SEL); DUMPREG(HDMI_ENC_EN); DUMPREG(HDMI_DC_CONTROL); DUMPREG(HDMI_VIDEO_PATTERN_GEN); DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix); DUMPREG(HDMI_H_BLANK_0); DUMPREG(HDMI_H_BLANK_1); DUMPREG(HDMI_V2_BLANK_0); DUMPREG(HDMI_V2_BLANK_1); DUMPREG(HDMI_V1_BLANK_0); DUMPREG(HDMI_V1_BLANK_1); DUMPREG(HDMI_V_LINE_0); DUMPREG(HDMI_V_LINE_1); DUMPREG(HDMI_H_LINE_0); DUMPREG(HDMI_H_LINE_1); DUMPREG(HDMI_HSYNC_POL); DUMPREG(HDMI_VSYNC_POL); DUMPREG(HDMI_INT_PRO_MODE); DUMPREG(HDMI_V_BLANK_F0_0); DUMPREG(HDMI_V_BLANK_F0_1); DUMPREG(HDMI_V_BLANK_F1_0); DUMPREG(HDMI_V_BLANK_F1_1); DUMPREG(HDMI_H_SYNC_START_0); DUMPREG(HDMI_H_SYNC_START_1); DUMPREG(HDMI_H_SYNC_END_0); DUMPREG(HDMI_H_SYNC_END_1); DUMPREG(HDMI_V_SYNC_LINE_BEF_2_0); DUMPREG(HDMI_V_SYNC_LINE_BEF_2_1); DUMPREG(HDMI_V_SYNC_LINE_BEF_1_0); DUMPREG(HDMI_V_SYNC_LINE_BEF_1_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_2_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_2_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_1_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_1_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_1); DUMPREG(HDMI_V_BLANK_F2_0); DUMPREG(HDMI_V_BLANK_F2_1); DUMPREG(HDMI_V_BLANK_F3_0); DUMPREG(HDMI_V_BLANK_F3_1); DUMPREG(HDMI_V_BLANK_F4_0); DUMPREG(HDMI_V_BLANK_F4_1); DUMPREG(HDMI_V_BLANK_F5_0); DUMPREG(HDMI_V_BLANK_F5_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_3_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_3_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_4_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_4_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_5_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_5_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_6_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_6_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_1); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_0); DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_1); DUMPREG(HDMI_VACT_SPACE_1_0); DUMPREG(HDMI_VACT_SPACE_1_1); DUMPREG(HDMI_VACT_SPACE_2_0); DUMPREG(HDMI_VACT_SPACE_2_1); DUMPREG(HDMI_VACT_SPACE_3_0); DUMPREG(HDMI_VACT_SPACE_3_1); DUMPREG(HDMI_VACT_SPACE_4_0); DUMPREG(HDMI_VACT_SPACE_4_1); DUMPREG(HDMI_VACT_SPACE_5_0); DUMPREG(HDMI_VACT_SPACE_5_1); DUMPREG(HDMI_VACT_SPACE_6_0); DUMPREG(HDMI_VACT_SPACE_6_1); DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix); DUMPREG(HDMI_TG_CMD); DUMPREG(HDMI_TG_H_FSZ_L); DUMPREG(HDMI_TG_H_FSZ_H); DUMPREG(HDMI_TG_HACT_ST_L); DUMPREG(HDMI_TG_HACT_ST_H); DUMPREG(HDMI_TG_HACT_SZ_L); DUMPREG(HDMI_TG_HACT_SZ_H); DUMPREG(HDMI_TG_V_FSZ_L); DUMPREG(HDMI_TG_V_FSZ_H); DUMPREG(HDMI_TG_VSYNC_L); DUMPREG(HDMI_TG_VSYNC_H); DUMPREG(HDMI_TG_VSYNC2_L); DUMPREG(HDMI_TG_VSYNC2_H); DUMPREG(HDMI_TG_VACT_ST_L); DUMPREG(HDMI_TG_VACT_ST_H); DUMPREG(HDMI_TG_VACT_SZ_L); DUMPREG(HDMI_TG_VACT_SZ_H); DUMPREG(HDMI_TG_FIELD_CHG_L); DUMPREG(HDMI_TG_FIELD_CHG_H); DUMPREG(HDMI_TG_VACT_ST2_L); DUMPREG(HDMI_TG_VACT_ST2_H); DUMPREG(HDMI_TG_VACT_ST3_L); DUMPREG(HDMI_TG_VACT_ST3_H); DUMPREG(HDMI_TG_VACT_ST4_L); DUMPREG(HDMI_TG_VACT_ST4_H); DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L); DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H); DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L); DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H); DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L); DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H); DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L); DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H); DUMPREG(HDMI_TG_3D); DRM_DEBUG_KMS("%s: ---- PACKET REGISTERS ----\n", prefix); DUMPREG(HDMI_AVI_CON); DUMPREG(HDMI_AVI_HEADER0); DUMPREG(HDMI_AVI_HEADER1); DUMPREG(HDMI_AVI_HEADER2); DUMPREG(HDMI_AVI_CHECK_SUM); DUMPREG(HDMI_VSI_CON); DUMPREG(HDMI_VSI_HEADER0); DUMPREG(HDMI_VSI_HEADER1); DUMPREG(HDMI_VSI_HEADER2); for (i = 0; i < 7; ++i) DUMPREG(HDMI_VSI_DATA(i)); #undef DUMPREG } static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix) { if (hdata->type == HDMI_TYPE13) hdmi_v13_regs_dump(hdata, prefix); else hdmi_v14_regs_dump(hdata, prefix); } static u8 hdmi_chksum(struct hdmi_context *hdata, u32 start, u8 len, u32 hdr_sum) { int i; /* hdr_sum : header0 + header1 + header2 * start : start address of packet byte1 * len : packet bytes - 1 */ for (i = 0; i < len; ++i) hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4); /* return 2's complement of 8 bit hdr_sum */ return (u8)(~(hdr_sum & 0xff) + 1); } static void hdmi_reg_infoframe(struct hdmi_context *hdata, union hdmi_infoframe *infoframe) { u32 hdr_sum; u8 chksum; u32 mod; u32 vic; mod = hdmi_reg_read(hdata, HDMI_MODE_SEL); if (hdata->dvi_mode) { hdmi_reg_writeb(hdata, HDMI_VSI_CON, HDMI_VSI_CON_DO_NOT_TRANSMIT); hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_DO_NOT_TRANSMIT); hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN); return; } switch (infoframe->any.type) { case HDMI_INFOFRAME_TYPE_AVI: hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC); hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type); hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->any.version); hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length); hdr_sum = infoframe->any.type + infoframe->any.version + infoframe->any.length; /* Output format zero hardcoded ,RGB YBCR selection */ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 | AVI_ACTIVE_FORMAT_VALID | AVI_UNDERSCANNED_DISPLAY_VALID); /* * Set the aspect ratio as per the mode, mentioned in * Table 9 AVI InfoFrame Data Byte 2 of CEA-861-D Standard */ switch (hdata->mode_conf.aspect_ratio) { case HDMI_PICTURE_ASPECT_4_3: hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), hdata->mode_conf.aspect_ratio | AVI_4_3_CENTER_RATIO); break; case HDMI_PICTURE_ASPECT_16_9: hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), hdata->mode_conf.aspect_ratio | AVI_16_9_CENTER_RATIO); break; case HDMI_PICTURE_ASPECT_NONE: default: hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), hdata->mode_conf.aspect_ratio | AVI_SAME_AS_PIC_ASPECT_RATIO); break; } vic = hdata->mode_conf.cea_video_id; hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), infoframe->any.length, hdr_sum); DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum); hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum); break; case HDMI_INFOFRAME_TYPE_AUDIO: hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02); hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type); hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->any.version); hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length); hdr_sum = infoframe->any.type + infoframe->any.version + infoframe->any.length; chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1), infoframe->any.length, hdr_sum); DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum); hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum); break; default: break; } } static enum drm_connector_status hdmi_detect(struct drm_connector *connector, bool force) { struct hdmi_context *hdata = ctx_from_connector(connector); hdata->hpd = gpio_get_value(hdata->hpd_gpio); return hdata->hpd ? connector_status_connected : connector_status_disconnected; } static void hdmi_connector_destroy(struct drm_connector *connector) { drm_connector_unregister(connector); drm_connector_cleanup(connector); } static struct drm_connector_funcs hdmi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .fill_modes = drm_helper_probe_single_connector_modes, .detect = hdmi_detect, .destroy = hdmi_connector_destroy, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int hdmi_get_modes(struct drm_connector *connector) { struct hdmi_context *hdata = ctx_from_connector(connector); struct edid *edid; if (!hdata->ddc_adpt) return -ENODEV; edid = drm_get_edid(connector, hdata->ddc_adpt); if (!edid) return -ENODEV; hdata->dvi_mode = !drm_detect_hdmi_monitor(edid); DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n", (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), edid->width_cm, edid->height_cm); drm_mode_connector_update_edid_property(connector, edid); return drm_add_edid_modes(connector, edid); } static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock) { int i; for (i = 0; i < hdata->phy_conf_count; i++) if (hdata->phy_confs[i].pixel_clock == pixel_clock) return i; DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock); return -EINVAL; } static int hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct hdmi_context *hdata = ctx_from_connector(connector); int ret; DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n", mode->hdisplay, mode->vdisplay, mode->vrefresh, (mode->flags & DRM_MODE_FLAG_INTERLACE) ? true : false, mode->clock * 1000); ret = mixer_check_mode(mode); if (ret) return MODE_BAD; ret = hdmi_find_phy_conf(hdata, mode->clock * 1000); if (ret < 0) return MODE_BAD; return MODE_OK; } static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector) { struct hdmi_context *hdata = ctx_from_connector(connector); return hdata->encoder; } static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { .get_modes = hdmi_get_modes, .mode_valid = hdmi_mode_valid, .best_encoder = hdmi_best_encoder, }; static int hdmi_create_connector(struct exynos_drm_display *display, struct drm_encoder *encoder) { struct hdmi_context *hdata = display_to_hdmi(display); struct drm_connector *connector = &hdata->connector; int ret; hdata->encoder = encoder; connector->interlace_allowed = true; connector->polled = DRM_CONNECTOR_POLL_HPD; ret = drm_connector_init(hdata->drm_dev, connector, &hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); if (ret) { DRM_ERROR("Failed to initialize connector with drm\n"); return ret; } drm_connector_helper_add(connector, &hdmi_connector_helper_funcs); drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); return 0; } static void hdmi_mode_fixup(struct exynos_drm_display *display, struct drm_connector *connector, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_display_mode *m; int mode_ok; DRM_DEBUG_KMS("%s\n", __FILE__); drm_mode_set_crtcinfo(adjusted_mode, 0); mode_ok = hdmi_mode_valid(connector, adjusted_mode); /* just return if user desired mode exists. */ if (mode_ok == MODE_OK) return; /* * otherwise, find the most suitable mode among modes and change it * to adjusted_mode. */ list_for_each_entry(m, &connector->modes, head) { mode_ok = hdmi_mode_valid(connector, m); if (mode_ok == MODE_OK) { DRM_INFO("desired mode doesn't exist so\n"); DRM_INFO("use the most suitable mode among modes.\n"); DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n", m->hdisplay, m->vdisplay, m->vrefresh); drm_mode_copy(adjusted_mode, m); break; } } } static void hdmi_set_acr(u32 freq, u8 *acr) { u32 n, cts; switch (freq) { case 32000: n = 4096; cts = 27000; break; case 44100: n = 6272; cts = 30000; break; case 88200: n = 12544; cts = 30000; break; case 176400: n = 25088; cts = 30000; break; case 48000: n = 6144; cts = 27000; break; case 96000: n = 12288; cts = 27000; break; case 192000: n = 24576; cts = 27000; break; default: n = 0; cts = 0; break; } acr[1] = cts >> 16; acr[2] = cts >> 8 & 0xff; acr[3] = cts & 0xff; acr[4] = n >> 16; acr[5] = n >> 8 & 0xff; acr[6] = n & 0xff; } static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr) { hdmi_reg_writeb(hdata, HDMI_ACR_N0, acr[6]); hdmi_reg_writeb(hdata, HDMI_ACR_N1, acr[5]); hdmi_reg_writeb(hdata, HDMI_ACR_N2, acr[4]); hdmi_reg_writeb(hdata, HDMI_ACR_MCTS0, acr[3]); hdmi_reg_writeb(hdata, HDMI_ACR_MCTS1, acr[2]); hdmi_reg_writeb(hdata, HDMI_ACR_MCTS2, acr[1]); hdmi_reg_writeb(hdata, HDMI_ACR_CTS0, acr[3]); hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]); hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]); if (hdata->type == HDMI_TYPE13) hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4); else hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4); } static void hdmi_audio_init(struct hdmi_context *hdata) { u32 sample_rate, bits_per_sample; u32 data_num, bit_ch, sample_frq; u32 val; u8 acr[7]; sample_rate = 44100; bits_per_sample = 16; switch (bits_per_sample) { case 20: data_num = 2; bit_ch = 1; break; case 24: data_num = 3; bit_ch = 1; break; default: data_num = 1; bit_ch = 0; break; } hdmi_set_acr(sample_rate, acr); hdmi_reg_acr(hdata, acr); hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CON, HDMI_I2S_IN_DISABLE | HDMI_I2S_AUD_I2S | HDMI_I2S_CUV_I2S_ENABLE | HDMI_I2S_MUX_ENABLE); hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CH, HDMI_I2S_CH0_EN | HDMI_I2S_CH1_EN | HDMI_I2S_CH2_EN); hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CUV, HDMI_I2S_CUV_RL_EN); sample_frq = (sample_rate == 44100) ? 0 : (sample_rate == 48000) ? 2 : (sample_rate == 32000) ? 3 : (sample_rate == 96000) ? 0xa : 0x0; hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_DIS); hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_EN); val = hdmi_reg_read(hdata, HDMI_I2S_DSD_CON) | 0x01; hdmi_reg_writeb(hdata, HDMI_I2S_DSD_CON, val); /* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */ hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5) | HDMI_I2S_SEL_LRCK(6)); hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1) | HDMI_I2S_SEL_SDATA2(4)); hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1) | HDMI_I2S_SEL_SDATA2(2)); hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0)); /* I2S_CON_1 & 2 */ hdmi_reg_writeb(hdata, HDMI_I2S_CON_1, HDMI_I2S_SCLK_FALLING_EDGE | HDMI_I2S_L_CH_LOW_POL); hdmi_reg_writeb(hdata, HDMI_I2S_CON_2, HDMI_I2S_MSB_FIRST_MODE | HDMI_I2S_SET_BIT_CH(bit_ch) | HDMI_I2S_SET_SDATA_BIT(data_num) | HDMI_I2S_BASIC_FORMAT); /* Configure register related to CUV information */ hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_0, HDMI_I2S_CH_STATUS_MODE_0 | HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH | HDMI_I2S_COPYRIGHT | HDMI_I2S_LINEAR_PCM | HDMI_I2S_CONSUMER_FORMAT); hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_1, HDMI_I2S_CD_PLAYER); hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_2, HDMI_I2S_SET_SOURCE_NUM(0)); hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_3, HDMI_I2S_CLK_ACCUR_LEVEL_2 | HDMI_I2S_SET_SMP_FREQ(sample_frq)); hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_4, HDMI_I2S_ORG_SMP_FREQ_44_1 | HDMI_I2S_WORD_LEN_MAX24_24BITS | HDMI_I2S_WORD_LEN_MAX_24BITS); hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_CON, HDMI_I2S_CH_STATUS_RELOAD); } static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff) { if (hdata->dvi_mode) return; hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0); hdmi_reg_writemask(hdata, HDMI_CON_0, onoff ? HDMI_ASP_EN : HDMI_ASP_DIS, HDMI_ASP_MASK); } static void hdmi_start(struct hdmi_context *hdata, bool start) { u32 val = start ? HDMI_TG_EN : 0; if (hdata->current_mode.flags & DRM_MODE_FLAG_INTERLACE) val |= HDMI_FIELD_EN; hdmi_reg_writemask(hdata, HDMI_CON_0, val, HDMI_EN); hdmi_reg_writemask(hdata, HDMI_TG_CMD, val, HDMI_TG_EN | HDMI_FIELD_EN); } static void hdmi_conf_init(struct hdmi_context *hdata) { union hdmi_infoframe infoframe; /* disable HPD interrupts from HDMI IP block, use GPIO instead */ hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); /* choose HDMI mode */ hdmi_reg_writemask(hdata, HDMI_MODE_SEL, HDMI_MODE_HDMI_EN, HDMI_MODE_MASK); /* Apply Video preable and Guard band in HDMI mode only */ hdmi_reg_writeb(hdata, HDMI_CON_2, 0); /* disable bluescreen */ hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN); if (hdata->dvi_mode) { /* choose DVI mode */ hdmi_reg_writemask(hdata, HDMI_MODE_SEL, HDMI_MODE_DVI_EN, HDMI_MODE_MASK); hdmi_reg_writeb(hdata, HDMI_CON_2, HDMI_VID_PREAMBLE_DIS | HDMI_GUARD_BAND_DIS); } if (hdata->type == HDMI_TYPE13) { /* choose bluescreen (fecal) color */ hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12); hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34); hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_2, 0x56); /* enable AVI packet every vsync, fixes purple line problem */ hdmi_reg_writeb(hdata, HDMI_V13_AVI_CON, 0x02); /* force RGB, look to CEA-861-D, table 7 for more detail */ hdmi_reg_writeb(hdata, HDMI_V13_AVI_BYTE(0), 0 << 5); hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5); hdmi_reg_writeb(hdata, HDMI_V13_SPD_CON, 0x02); hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); } else { infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI; infoframe.any.version = HDMI_AVI_VERSION; infoframe.any.length = HDMI_AVI_LENGTH; hdmi_reg_infoframe(hdata, &infoframe); infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO; infoframe.any.version = HDMI_AUI_VERSION; infoframe.any.length = HDMI_AUI_LENGTH; hdmi_reg_infoframe(hdata, &infoframe); /* enable AVI packet every vsync, fixes purple line problem */ hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5); } } static void hdmi_v13_mode_apply(struct hdmi_context *hdata) { const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg; const struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core; int tries; /* setting core registers */ hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]); hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_0, core->v_blank[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_1, core->v_blank[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_2, core->v_blank[2]); hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_0, core->h_v_line[0]); hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_1, core->h_v_line[1]); hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_2, core->h_v_line[2]); hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]); hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_0, core->v_blank_f[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_1, core->v_blank_f[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_2, core->v_blank_f[2]); hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_0, core->h_sync_gen[0]); hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_1, core->h_sync_gen[1]); hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_2, core->h_sync_gen[2]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_0, core->v_sync_gen1[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_1, core->v_sync_gen1[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_2, core->v_sync_gen1[2]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_0, core->v_sync_gen2[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_1, core->v_sync_gen2[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_2, core->v_sync_gen2[2]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_0, core->v_sync_gen3[0]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]); hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]); /* Timing generator registers */ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]); hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]); hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]); hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]); hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]); hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]); hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]); hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]); /* waiting for HDMIPHY's PLL to get to steady state */ for (tries = 100; tries; --tries) { u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS); if (val & HDMI_PHY_STATUS_READY) break; usleep_range(1000, 2000); } /* steady state not achieved */ if (tries == 0) { DRM_ERROR("hdmiphy's pll could not reach steady state.\n"); hdmi_regs_dump(hdata, "timing apply"); } clk_disable_unprepare(hdata->res.sclk_hdmi); clk_set_parent(hdata->res.mout_hdmi, hdata->res.sclk_hdmiphy); clk_prepare_enable(hdata->res.sclk_hdmi); /* enable HDMI and timing generator */ hdmi_start(hdata, true); } static void hdmi_v14_mode_apply(struct hdmi_context *hdata) { const struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg; const struct hdmi_v14_core_regs *core = &hdata->mode_conf.conf.v14_conf.core; int tries; /* setting core registers */ hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]); hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]); hdmi_reg_writeb(hdata, HDMI_V2_BLANK_0, core->v2_blank[0]); hdmi_reg_writeb(hdata, HDMI_V2_BLANK_1, core->v2_blank[1]); hdmi_reg_writeb(hdata, HDMI_V1_BLANK_0, core->v1_blank[0]); hdmi_reg_writeb(hdata, HDMI_V1_BLANK_1, core->v1_blank[1]); hdmi_reg_writeb(hdata, HDMI_V_LINE_0, core->v_line[0]); hdmi_reg_writeb(hdata, HDMI_V_LINE_1, core->v_line[1]); hdmi_reg_writeb(hdata, HDMI_H_LINE_0, core->h_line[0]); hdmi_reg_writeb(hdata, HDMI_H_LINE_1, core->h_line[1]); hdmi_reg_writeb(hdata, HDMI_HSYNC_POL, core->hsync_pol[0]); hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]); hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_0, core->v_blank_f0[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_1, core->v_blank_f0[1]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_0, core->v_blank_f1[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_1, core->v_blank_f1[1]); hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_0, core->h_sync_start[0]); hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_1, core->h_sync_start[1]); hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_0, core->h_sync_end[0]); hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_1, core->h_sync_end[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_0, core->v_sync_line_bef_2[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_1, core->v_sync_line_bef_2[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_0, core->v_sync_line_bef_1[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_1, core->v_sync_line_bef_1[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_0, core->v_sync_line_aft_2[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_1, core->v_sync_line_aft_2[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_0, core->v_sync_line_aft_1[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_1, core->v_sync_line_aft_1[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0, core->v_sync_line_aft_pxl_2[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_1, core->v_sync_line_aft_pxl_2[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0, core->v_sync_line_aft_pxl_1[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_1, core->v_sync_line_aft_pxl_1[1]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_0, core->v_blank_f2[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_1, core->v_blank_f2[1]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_0, core->v_blank_f3[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_1, core->v_blank_f3[1]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_0, core->v_blank_f4[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_1, core->v_blank_f4[1]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_0, core->v_blank_f5[0]); hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_1, core->v_blank_f5[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_0, core->v_sync_line_aft_3[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_1, core->v_sync_line_aft_3[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_0, core->v_sync_line_aft_4[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_1, core->v_sync_line_aft_4[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_0, core->v_sync_line_aft_5[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_1, core->v_sync_line_aft_5[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_0, core->v_sync_line_aft_6[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_1, core->v_sync_line_aft_6[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0, core->v_sync_line_aft_pxl_3[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_1, core->v_sync_line_aft_pxl_3[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0, core->v_sync_line_aft_pxl_4[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_1, core->v_sync_line_aft_pxl_4[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, core->v_sync_line_aft_pxl_5[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_1, core->v_sync_line_aft_pxl_5[1]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, core->v_sync_line_aft_pxl_6[0]); hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_1, core->v_sync_line_aft_pxl_6[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_0, core->vact_space_1[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_1, core->vact_space_1[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_0, core->vact_space_2[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_1, core->vact_space_2[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_0, core->vact_space_3[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_1, core->vact_space_3[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_0, core->vact_space_4[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_1, core->vact_space_4[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_0, core->vact_space_5[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_1, core->vact_space_5[1]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_0, core->vact_space_6[0]); hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]); /* Timing generator registers */ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]); hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]); hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]); hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]); hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]); hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]); hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]); hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3[0]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3[1]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4[0]); hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4[1]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]); hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]); hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]); hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d[0]); /* waiting for HDMIPHY's PLL to get to steady state */ for (tries = 100; tries; --tries) { u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0); if (val & HDMI_PHY_STATUS_READY) break; usleep_range(1000, 2000); } /* steady state not achieved */ if (tries == 0) { DRM_ERROR("hdmiphy's pll could not reach steady state.\n"); hdmi_regs_dump(hdata, "timing apply"); } clk_disable_unprepare(hdata->res.sclk_hdmi); clk_set_parent(hdata->res.mout_hdmi, hdata->res.sclk_hdmiphy); clk_prepare_enable(hdata->res.sclk_hdmi); /* enable HDMI and timing generator */ hdmi_start(hdata, true); } static void hdmi_mode_apply(struct hdmi_context *hdata) { if (hdata->type == HDMI_TYPE13) hdmi_v13_mode_apply(hdata); else hdmi_v14_mode_apply(hdata); } static void hdmiphy_conf_reset(struct hdmi_context *hdata) { u32 reg; clk_disable_unprepare(hdata->res.sclk_hdmi); clk_set_parent(hdata->res.mout_hdmi, hdata->res.sclk_pixel); clk_prepare_enable(hdata->res.sclk_hdmi); /* operation mode */ hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE, HDMI_PHY_ENABLE_MODE_SET); if (hdata->type == HDMI_TYPE13) reg = HDMI_V13_PHY_RSTOUT; else reg = HDMI_PHY_RSTOUT; /* reset hdmiphy */ hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT); usleep_range(10000, 12000); hdmi_reg_writemask(hdata, reg, 0, HDMI_PHY_SW_RSTOUT); usleep_range(10000, 12000); } static void hdmiphy_poweron(struct hdmi_context *hdata) { if (hdata->type != HDMI_TYPE14) return; DRM_DEBUG_KMS("\n"); /* For PHY Mode Setting */ hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE, HDMI_PHY_ENABLE_MODE_SET); /* Phy Power On */ hdmiphy_reg_writeb(hdata, HDMIPHY_POWER, HDMI_PHY_POWER_ON); /* For PHY Mode Setting */ hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE, HDMI_PHY_DISABLE_MODE_SET); /* PHY SW Reset */ hdmiphy_conf_reset(hdata); } static void hdmiphy_poweroff(struct hdmi_context *hdata) { if (hdata->type != HDMI_TYPE14) return; DRM_DEBUG_KMS("\n"); /* PHY SW Reset */ hdmiphy_conf_reset(hdata); /* For PHY Mode Setting */ hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE, HDMI_PHY_ENABLE_MODE_SET); /* PHY Power Off */ hdmiphy_reg_writeb(hdata, HDMIPHY_POWER, HDMI_PHY_POWER_OFF); /* For PHY Mode Setting */ hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE, HDMI_PHY_DISABLE_MODE_SET); } static void hdmiphy_conf_apply(struct hdmi_context *hdata) { int ret; int i; /* pixel clock */ i = hdmi_find_phy_conf(hdata, hdata->mode_conf.pixel_clock); if (i < 0) { DRM_ERROR("failed to find hdmiphy conf\n"); return; } ret = hdmiphy_reg_write_buf(hdata, 0, hdata->phy_confs[i].conf, 32); if (ret) { DRM_ERROR("failed to configure hdmiphy\n"); return; } usleep_range(10000, 12000); ret = hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE, HDMI_PHY_DISABLE_MODE_SET); if (ret) { DRM_ERROR("failed to enable hdmiphy\n"); return; } } static void hdmi_conf_apply(struct hdmi_context *hdata) { hdmiphy_conf_reset(hdata); hdmiphy_conf_apply(hdata); mutex_lock(&hdata->hdmi_mutex); hdmi_start(hdata, false); hdmi_conf_init(hdata); mutex_unlock(&hdata->hdmi_mutex); hdmi_audio_init(hdata); /* setting core registers */ hdmi_mode_apply(hdata); hdmi_audio_control(hdata, true); hdmi_regs_dump(hdata, "start"); } static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value) { int i; BUG_ON(num_bytes > 4); for (i = 0; i < num_bytes; i++) reg_pair[i] = (value >> (8 * i)) & 0xff; } static void hdmi_v13_mode_set(struct hdmi_context *hdata, struct drm_display_mode *m) { struct hdmi_v13_core_regs *core = &hdata->mode_conf.conf.v13_conf.core; struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v13_conf.tg; unsigned int val; hdata->mode_conf.cea_video_id = drm_match_cea_mode((struct drm_display_mode *)m); hdata->mode_conf.pixel_clock = m->clock * 1000; hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio; hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay); hdmi_set_reg(core->h_v_line, 3, (m->htotal << 12) | m->vtotal); val = (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0; hdmi_set_reg(core->vsync_pol, 1, val); val = (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0; hdmi_set_reg(core->int_pro_mode, 1, val); val = (m->hsync_start - m->hdisplay - 2); val |= ((m->hsync_end - m->hdisplay - 2) << 10); val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20; hdmi_set_reg(core->h_sync_gen, 3, val); /* * Quirk requirement for exynos HDMI IP design, * 2 pixels less than the actual calculation for hsync_start * and end. */ /* Following values & calculations differ for different type of modes */ if (m->flags & DRM_MODE_FLAG_INTERLACE) { /* Interlaced Mode */ val = ((m->vsync_end - m->vdisplay) / 2); val |= ((m->vsync_start - m->vdisplay) / 2) << 12; hdmi_set_reg(core->v_sync_gen1, 3, val); val = m->vtotal / 2; val |= ((m->vtotal - m->vdisplay) / 2) << 11; hdmi_set_reg(core->v_blank, 3, val); val = (m->vtotal + ((m->vsync_end - m->vsync_start) * 4) + 5) / 2; val |= m->vtotal << 11; hdmi_set_reg(core->v_blank_f, 3, val); val = ((m->vtotal / 2) + 7); val |= ((m->vtotal / 2) + 2) << 12; hdmi_set_reg(core->v_sync_gen2, 3, val); val = ((m->htotal / 2) + (m->hsync_start - m->hdisplay)); val |= ((m->htotal / 2) + (m->hsync_start - m->hdisplay)) << 12; hdmi_set_reg(core->v_sync_gen3, 3, val); hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2); hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2); hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/ } else { /* Progressive Mode */ val = m->vtotal; val |= (m->vtotal - m->vdisplay) << 11; hdmi_set_reg(core->v_blank, 3, val); hdmi_set_reg(core->v_blank_f, 3, 0); val = (m->vsync_end - m->vdisplay); val |= ((m->vsync_start - m->vdisplay) << 12); hdmi_set_reg(core->v_sync_gen1, 3, val); hdmi_set_reg(core->v_sync_gen2, 3, 0x1001);/* Reset value */ hdmi_set_reg(core->v_sync_gen3, 3, 0x1001);/* Reset value */ hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay); hdmi_set_reg(tg->vact_sz, 2, m->vdisplay); hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */ } /* Timing generator registers */ hdmi_set_reg(tg->cmd, 1, 0x0); hdmi_set_reg(tg->h_fsz, 2, m->htotal); hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay); hdmi_set_reg(tg->hact_sz, 2, m->hdisplay); hdmi_set_reg(tg->v_fsz, 2, m->vtotal); hdmi_set_reg(tg->vsync, 2, 0x1); hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */ hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */ hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */ hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */ hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */ hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */ hdmi_set_reg(tg->tg_3d, 1, 0x0); /* Not used */ } static void hdmi_v14_mode_set(struct hdmi_context *hdata, struct drm_display_mode *m) { struct hdmi_tg_regs *tg = &hdata->mode_conf.conf.v14_conf.tg; struct hdmi_v14_core_regs *core = &hdata->mode_conf.conf.v14_conf.core; hdata->mode_conf.cea_video_id = drm_match_cea_mode((struct drm_display_mode *)m); hdata->mode_conf.pixel_clock = m->clock * 1000; hdata->mode_conf.aspect_ratio = m->picture_aspect_ratio; hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay); hdmi_set_reg(core->v_line, 2, m->vtotal); hdmi_set_reg(core->h_line, 2, m->htotal); hdmi_set_reg(core->hsync_pol, 1, (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0); hdmi_set_reg(core->vsync_pol, 1, (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0); hdmi_set_reg(core->int_pro_mode, 1, (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0); /* * Quirk requirement for exynos 5 HDMI IP design, * 2 pixels less than the actual calculation for hsync_start * and end. */ /* Following values & calculations differ for different type of modes */ if (m->flags & DRM_MODE_FLAG_INTERLACE) { /* Interlaced Mode */ hdmi_set_reg(core->v_sync_line_bef_2, 2, (m->vsync_end - m->vdisplay) / 2); hdmi_set_reg(core->v_sync_line_bef_1, 2, (m->vsync_start - m->vdisplay) / 2); hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2); hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2); hdmi_set_reg(core->v_blank_f0, 2, m->vtotal - m->vdisplay / 2); hdmi_set_reg(core->v_blank_f1, 2, m->vtotal); hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7); hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2); hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, (m->htotal / 2) + (m->hsync_start - m->hdisplay)); hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, (m->htotal / 2) + (m->hsync_start - m->hdisplay)); hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2); hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2); hdmi_set_reg(tg->vact_st2, 2, m->vtotal - m->vdisplay / 2); hdmi_set_reg(tg->vsync2, 2, (m->vtotal / 2) + 1); hdmi_set_reg(tg->vsync_bot_hdmi, 2, (m->vtotal / 2) + 1); hdmi_set_reg(tg->field_bot_hdmi, 2, (m->vtotal / 2) + 1); hdmi_set_reg(tg->vact_st3, 2, 0x0); hdmi_set_reg(tg->vact_st4, 2, 0x0); } else { /* Progressive Mode */ hdmi_set_reg(core->v_sync_line_bef_2, 2, m->vsync_end - m->vdisplay); hdmi_set_reg(core->v_sync_line_bef_1, 2, m->vsync_start - m->vdisplay); hdmi_set_reg(core->v2_blank, 2, m->vtotal); hdmi_set_reg(core->v1_blank, 2, m->vtotal - m->vdisplay); hdmi_set_reg(core->v_blank_f0, 2, 0xffff); hdmi_set_reg(core->v_blank_f1, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_2, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_1, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, 0xffff); hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay); hdmi_set_reg(tg->vact_sz, 2, m->vdisplay); hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */ hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */ hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */ hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */ hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */ hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */ } /* Following values & calculations are same irrespective of mode type */ hdmi_set_reg(core->h_sync_start, 2, m->hsync_start - m->hdisplay - 2); hdmi_set_reg(core->h_sync_end, 2, m->hsync_end - m->hdisplay - 2); hdmi_set_reg(core->vact_space_1, 2, 0xffff); hdmi_set_reg(core->vact_space_2, 2, 0xffff); hdmi_set_reg(core->vact_space_3, 2, 0xffff); hdmi_set_reg(core->vact_space_4, 2, 0xffff); hdmi_set_reg(core->vact_space_5, 2, 0xffff); hdmi_set_reg(core->vact_space_6, 2, 0xffff); hdmi_set_reg(core->v_blank_f2, 2, 0xffff); hdmi_set_reg(core->v_blank_f3, 2, 0xffff); hdmi_set_reg(core->v_blank_f4, 2, 0xffff); hdmi_set_reg(core->v_blank_f5, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_3, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_4, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_5, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_6, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_pxl_3, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_pxl_4, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_pxl_5, 2, 0xffff); hdmi_set_reg(core->v_sync_line_aft_pxl_6, 2, 0xffff); /* Timing generator registers */ hdmi_set_reg(tg->cmd, 1, 0x0); hdmi_set_reg(tg->h_fsz, 2, m->htotal); hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay); hdmi_set_reg(tg->hact_sz, 2, m->hdisplay); hdmi_set_reg(tg->v_fsz, 2, m->vtotal); hdmi_set_reg(tg->vsync, 2, 0x1); hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */ hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */ hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */ hdmi_set_reg(tg->tg_3d, 1, 0x0); } static void hdmi_mode_set(struct exynos_drm_display *display, struct drm_display_mode *mode) { struct hdmi_context *hdata = display_to_hdmi(display); struct drm_display_mode *m = mode; DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n", m->hdisplay, m->vdisplay, m->vrefresh, (m->flags & DRM_MODE_FLAG_INTERLACE) ? "INTERLACED" : "PROGRESSIVE"); /* preserve mode information for later use. */ drm_mode_copy(&hdata->current_mode, mode); if (hdata->type == HDMI_TYPE13) hdmi_v13_mode_set(hdata, mode); else hdmi_v14_mode_set(hdata, mode); } static void hdmi_commit(struct exynos_drm_display *display) { struct hdmi_context *hdata = display_to_hdmi(display); mutex_lock(&hdata->hdmi_mutex); if (!hdata->powered) { mutex_unlock(&hdata->hdmi_mutex); return; } mutex_unlock(&hdata->hdmi_mutex); hdmi_conf_apply(hdata); } static void hdmi_poweron(struct hdmi_context *hdata) { struct hdmi_resources *res = &hdata->res; mutex_lock(&hdata->hdmi_mutex); if (hdata->powered) { mutex_unlock(&hdata->hdmi_mutex); return; } hdata->powered = true; mutex_unlock(&hdata->hdmi_mutex); pm_runtime_get_sync(hdata->dev); if (regulator_bulk_enable(res->regul_count, res->regul_bulk)) DRM_DEBUG_KMS("failed to enable regulator bulk\n"); /* set pmu hdmiphy control bit to enable hdmiphy */ regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL, PMU_HDMI_PHY_ENABLE_BIT, 1); clk_prepare_enable(res->hdmi); clk_prepare_enable(res->sclk_hdmi); hdmiphy_poweron(hdata); hdmi_commit(&hdata->display); } static void hdmi_poweroff(struct hdmi_context *hdata) { struct hdmi_resources *res = &hdata->res; mutex_lock(&hdata->hdmi_mutex); if (!hdata->powered) goto out; mutex_unlock(&hdata->hdmi_mutex); /* HDMI System Disable */ hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN); hdmiphy_poweroff(hdata); cancel_delayed_work(&hdata->hotplug_work); clk_disable_unprepare(res->sclk_hdmi); clk_disable_unprepare(res->hdmi); /* reset pmu hdmiphy control bit to disable hdmiphy */ regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL, PMU_HDMI_PHY_ENABLE_BIT, 0); regulator_bulk_disable(res->regul_count, res->regul_bulk); pm_runtime_put_sync(hdata->dev); mutex_lock(&hdata->hdmi_mutex); hdata->powered = false; out: mutex_unlock(&hdata->hdmi_mutex); } static void hdmi_dpms(struct exynos_drm_display *display, int mode) { struct hdmi_context *hdata = display_to_hdmi(display); struct drm_encoder *encoder = hdata->encoder; struct drm_crtc *crtc = encoder->crtc; const struct drm_crtc_helper_funcs *funcs = NULL; DRM_DEBUG_KMS("mode %d\n", mode); switch (mode) { case DRM_MODE_DPMS_ON: hdmi_poweron(hdata); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: /* * The SFRs of VP and Mixer are updated by Vertical Sync of * Timing generator which is a part of HDMI so the sequence * to disable TV Subsystem should be as following, * VP -> Mixer -> HDMI * * Below codes will try to disable Mixer and VP(if used) * prior to disabling HDMI. */ if (crtc) funcs = crtc->helper_private; if (funcs && funcs->disable) (*funcs->disable)(crtc); hdmi_poweroff(hdata); break; default: DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode); break; } } static struct exynos_drm_display_ops hdmi_display_ops = { .create_connector = hdmi_create_connector, .mode_fixup = hdmi_mode_fixup, .mode_set = hdmi_mode_set, .dpms = hdmi_dpms, .commit = hdmi_commit, }; static void hdmi_hotplug_work_func(struct work_struct *work) { struct hdmi_context *hdata; hdata = container_of(work, struct hdmi_context, hotplug_work.work); mutex_lock(&hdata->hdmi_mutex); hdata->hpd = gpio_get_value(hdata->hpd_gpio); mutex_unlock(&hdata->hdmi_mutex); if (hdata->drm_dev) drm_helper_hpd_irq_event(hdata->drm_dev); } static irqreturn_t hdmi_irq_thread(int irq, void *arg) { struct hdmi_context *hdata = arg; mod_delayed_work(system_wq, &hdata->hotplug_work, msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS)); return IRQ_HANDLED; } static int hdmi_resources_init(struct hdmi_context *hdata) { struct device *dev = hdata->dev; struct hdmi_resources *res = &hdata->res; static char *supply[] = { "vdd", "vdd_osc", "vdd_pll", }; int i, ret; DRM_DEBUG_KMS("HDMI resource init\n"); /* get clocks, power */ res->hdmi = devm_clk_get(dev, "hdmi"); if (IS_ERR(res->hdmi)) { DRM_ERROR("failed to get clock 'hdmi'\n"); ret = PTR_ERR(res->hdmi); goto fail; } res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi"); if (IS_ERR(res->sclk_hdmi)) { DRM_ERROR("failed to get clock 'sclk_hdmi'\n"); ret = PTR_ERR(res->sclk_hdmi); goto fail; } res->sclk_pixel = devm_clk_get(dev, "sclk_pixel"); if (IS_ERR(res->sclk_pixel)) { DRM_ERROR("failed to get clock 'sclk_pixel'\n"); ret = PTR_ERR(res->sclk_pixel); goto fail; } res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy"); if (IS_ERR(res->sclk_hdmiphy)) { DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n"); ret = PTR_ERR(res->sclk_hdmiphy); goto fail; } res->mout_hdmi = devm_clk_get(dev, "mout_hdmi"); if (IS_ERR(res->mout_hdmi)) { DRM_ERROR("failed to get clock 'mout_hdmi'\n"); ret = PTR_ERR(res->mout_hdmi); goto fail; } clk_set_parent(res->mout_hdmi, res->sclk_pixel); res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) * sizeof(res->regul_bulk[0]), GFP_KERNEL); if (!res->regul_bulk) { ret = -ENOMEM; goto fail; } for (i = 0; i < ARRAY_SIZE(supply); ++i) { res->regul_bulk[i].supply = supply[i]; res->regul_bulk[i].consumer = NULL; } ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk); if (ret) { DRM_ERROR("failed to get regulators\n"); return ret; } res->regul_count = ARRAY_SIZE(supply); res->reg_hdmi_en = devm_regulator_get(dev, "hdmi-en"); if (IS_ERR(res->reg_hdmi_en) && PTR_ERR(res->reg_hdmi_en) != -ENOENT) { DRM_ERROR("failed to get hdmi-en regulator\n"); return PTR_ERR(res->reg_hdmi_en); } if (!IS_ERR(res->reg_hdmi_en)) { ret = regulator_enable(res->reg_hdmi_en); if (ret) { DRM_ERROR("failed to enable hdmi-en regulator\n"); return ret; } } else res->reg_hdmi_en = NULL; return ret; fail: DRM_ERROR("HDMI resource init - failed\n"); return ret; } static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata (struct device *dev) { struct device_node *np = dev->of_node; struct s5p_hdmi_platform_data *pd; u32 value; pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); if (!pd) goto err_data; if (!of_find_property(np, "hpd-gpio", &value)) { DRM_ERROR("no hpd gpio property found\n"); goto err_data; } pd->hpd_gpio = of_get_named_gpio(np, "hpd-gpio", 0); return pd; err_data: return NULL; } static struct of_device_id hdmi_match_types[] = { { .compatible = "samsung,exynos5-hdmi", .data = &exynos5_hdmi_driver_data, }, { .compatible = "samsung,exynos4210-hdmi", .data = &exynos4210_hdmi_driver_data, }, { .compatible = "samsung,exynos4212-hdmi", .data = &exynos4212_hdmi_driver_data, }, { .compatible = "samsung,exynos5420-hdmi", .data = &exynos5420_hdmi_driver_data, }, { /* end node */ } }; MODULE_DEVICE_TABLE (of, hdmi_match_types); static int hdmi_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm_dev = data; struct hdmi_context *hdata = dev_get_drvdata(dev); hdata->drm_dev = drm_dev; return exynos_drm_create_enc_conn(drm_dev, &hdata->display); } static void hdmi_unbind(struct device *dev, struct device *master, void *data) { } static const struct component_ops hdmi_component_ops = { .bind = hdmi_bind, .unbind = hdmi_unbind, }; static struct device_node *hdmi_legacy_ddc_dt_binding(struct device *dev) { const char *compatible_str = "samsung,exynos4210-hdmiddc"; struct device_node *np; np = of_find_compatible_node(NULL, NULL, compatible_str); if (np) return of_get_next_parent(np); return NULL; } static struct device_node *hdmi_legacy_phy_dt_binding(struct device *dev) { const char *compatible_str = "samsung,exynos4212-hdmiphy"; return of_find_compatible_node(NULL, NULL, compatible_str); } static int hdmi_probe(struct platform_device *pdev) { struct device_node *ddc_node, *phy_node; struct s5p_hdmi_platform_data *pdata; struct hdmi_driver_data *drv_data; const struct of_device_id *match; struct device *dev = &pdev->dev; struct hdmi_context *hdata; struct resource *res; int ret; if (!dev->of_node) return -ENODEV; pdata = drm_hdmi_dt_parse_pdata(dev); if (!pdata) return -EINVAL; hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL); if (!hdata) return -ENOMEM; hdata->display.type = EXYNOS_DISPLAY_TYPE_HDMI; hdata->display.ops = &hdmi_display_ops; mutex_init(&hdata->hdmi_mutex); platform_set_drvdata(pdev, hdata); match = of_match_node(hdmi_match_types, dev->of_node); if (!match) return -ENODEV; drv_data = (struct hdmi_driver_data *)match->data; hdata->type = drv_data->type; hdata->phy_confs = drv_data->phy_confs; hdata->phy_conf_count = drv_data->phy_conf_count; hdata->hpd_gpio = pdata->hpd_gpio; hdata->dev = dev; ret = hdmi_resources_init(hdata); if (ret) { DRM_ERROR("hdmi_resources_init failed\n"); return ret; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hdata->regs = devm_ioremap_resource(dev, res); if (IS_ERR(hdata->regs)) { ret = PTR_ERR(hdata->regs); return ret; } ret = devm_gpio_request(dev, hdata->hpd_gpio, "HPD"); if (ret) { DRM_ERROR("failed to request HPD gpio\n"); return ret; } ddc_node = hdmi_legacy_ddc_dt_binding(dev); if (ddc_node) goto out_get_ddc_adpt; /* DDC i2c driver */ ddc_node = of_parse_phandle(dev->of_node, "ddc", 0); if (!ddc_node) { DRM_ERROR("Failed to find ddc node in device tree\n"); return -ENODEV; } out_get_ddc_adpt: hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node); if (!hdata->ddc_adpt) { DRM_ERROR("Failed to get ddc i2c adapter by node\n"); return -EPROBE_DEFER; } phy_node = hdmi_legacy_phy_dt_binding(dev); if (phy_node) goto out_get_phy_port; /* hdmiphy i2c driver */ phy_node = of_parse_phandle(dev->of_node, "phy", 0); if (!phy_node) { DRM_ERROR("Failed to find hdmiphy node in device tree\n"); ret = -ENODEV; goto err_ddc; } out_get_phy_port: if (drv_data->is_apb_phy) { hdata->regs_hdmiphy = of_iomap(phy_node, 0); if (!hdata->regs_hdmiphy) { DRM_ERROR("failed to ioremap hdmi phy\n"); ret = -ENOMEM; goto err_ddc; } } else { hdata->hdmiphy_port = of_find_i2c_device_by_node(phy_node); if (!hdata->hdmiphy_port) { DRM_ERROR("Failed to get hdmi phy i2c client\n"); ret = -EPROBE_DEFER; goto err_ddc; } } hdata->irq = gpio_to_irq(hdata->hpd_gpio); if (hdata->irq < 0) { DRM_ERROR("failed to get GPIO irq\n"); ret = hdata->irq; goto err_hdmiphy; } hdata->hpd = gpio_get_value(hdata->hpd_gpio); INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func); ret = devm_request_threaded_irq(dev, hdata->irq, NULL, hdmi_irq_thread, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "hdmi", hdata); if (ret) { DRM_ERROR("failed to register hdmi interrupt\n"); goto err_hdmiphy; } hdata->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node, "samsung,syscon-phandle"); if (IS_ERR(hdata->pmureg)) { DRM_ERROR("syscon regmap lookup failed.\n"); ret = -EPROBE_DEFER; goto err_hdmiphy; } pm_runtime_enable(dev); ret = component_add(&pdev->dev, &hdmi_component_ops); if (ret) goto err_disable_pm_runtime; return ret; err_disable_pm_runtime: pm_runtime_disable(dev); err_hdmiphy: if (hdata->hdmiphy_port) put_device(&hdata->hdmiphy_port->dev); err_ddc: put_device(&hdata->ddc_adpt->dev); return ret; } static int hdmi_remove(struct platform_device *pdev) { struct hdmi_context *hdata = platform_get_drvdata(pdev); cancel_delayed_work_sync(&hdata->hotplug_work); if (hdata->res.reg_hdmi_en) regulator_disable(hdata->res.reg_hdmi_en); if (hdata->hdmiphy_port) put_device(&hdata->hdmiphy_port->dev); put_device(&hdata->ddc_adpt->dev); pm_runtime_disable(&pdev->dev); component_del(&pdev->dev, &hdmi_component_ops); return 0; } struct platform_driver hdmi_driver = { .probe = hdmi_probe, .remove = hdmi_remove, .driver = { .name = "exynos-hdmi", .owner = THIS_MODULE, .of_match_table = hdmi_match_types, }, };
gpl-2.0
michael2012z/myKernel
arch/sparc/kernel/leon_pmc.c
350
2248
// SPDX-License-Identifier: GPL-2.0 /* leon_pmc.c: LEON Power-down cpu_idle() handler * * Copyright (C) 2011 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB */ #include <linux/init.h> #include <linux/pm.h> #include <asm/leon_amba.h> #include <asm/cpu_type.h> #include <asm/leon.h> #include <asm/processor.h> /* List of Systems that need fixup instructions around power-down instruction */ static unsigned int pmc_leon_fixup_ids[] = { AEROFLEX_UT699, GAISLER_GR712RC, LEON4_NEXTREME1, 0 }; static int pmc_leon_need_fixup(void) { unsigned int systemid = amba_system_id >> 16; unsigned int *id; id = &pmc_leon_fixup_ids[0]; while (*id != 0) { if (*id == systemid) return 1; id++; } return 0; } /* * CPU idle callback function for systems that need some extra handling * See .../arch/sparc/kernel/process.c */ static void pmc_leon_idle_fixup(void) { /* Prepare an address to a non-cachable region. APB is always * none-cachable. One instruction is executed after the Sleep * instruction, we make sure to read the bus and throw away the * value by accessing a non-cachable area, also we make sure the * MMU does not get a TLB miss here by using the MMU BYPASS ASI. */ register unsigned int address = (unsigned int)leon3_irqctrl_regs; /* Interrupts need to be enabled to not hang the CPU */ local_irq_enable(); __asm__ __volatile__ ( "wr %%g0, %%asr19\n" "lda [%0] %1, %%g0\n" : : "r"(address), "i"(ASI_LEON_BYPASS)); } /* * CPU idle callback function * See .../arch/sparc/kernel/process.c */ static void pmc_leon_idle(void) { /* Interrupts need to be enabled to not hang the CPU */ local_irq_enable(); /* For systems without power-down, this will be no-op */ __asm__ __volatile__ ("wr %g0, %asr19\n\t"); } /* Install LEON Power Down function */ static int __init leon_pmc_install(void) { if (sparc_cpu_model == sparc_leon) { /* Assign power management IDLE handler */ if (pmc_leon_need_fixup()) sparc_idle = pmc_leon_idle_fixup; else sparc_idle = pmc_leon_idle; printk(KERN_INFO "leon: power management initialized\n"); } return 0; } /* This driver is not critical to the boot process, don't care * if initialized late. */ late_initcall(leon_pmc_install);
gpl-2.0
full-of-foo/linux
sound/pci/als4000.c
606
32136
/* * card-als4000.c - driver for Avance Logic ALS4000 based soundcards. * Copyright (C) 2000 by Bart Hartgers <bart@etpmod.phys.tue.nl>, * Jaroslav Kysela <perex@perex.cz> * Copyright (C) 2002, 2008 by Andreas Mohr <hw7oshyuv3001@sneakemail.com> * * Framework borrowed from Massimo Piccioni's card-als100.c. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * NOTES * * Since Avance does not provide any meaningful documentation, and I * bought an ALS4000 based soundcard, I was forced to base this driver * on reverse engineering. * * Note: this is no longer true (thank you!): * pretty verbose chip docu (ALS4000a.PDF) can be found on the ALSA web site. * Page numbers stated anywhere below with the "SPECS_PAGE:" tag * refer to: ALS4000a.PDF specs Ver 1.0, May 28th, 1998. * * The ALS4000 seems to be the PCI-cousin of the ALS100. It contains an * ALS100-like SB DSP/mixer, an OPL3 synth, a MPU401 and a gameport * interface. These subsystems can be mapped into ISA io-port space, * using the PCI-interface. In addition, the PCI-bit provides DMA and IRQ * services to the subsystems. * * While ALS4000 is very similar to a SoundBlaster, the differences in * DMA and capturing require more changes to the SoundBlaster than * desirable, so I made this separate driver. * * The ALS4000 can do real full duplex playback/capture. * * FMDAC: * - 0x4f -> port 0x14 * - port 0x15 |= 1 * * Enable/disable 3D sound: * - 0x50 -> port 0x14 * - change bit 6 (0x40) of port 0x15 * * Set QSound: * - 0xdb -> port 0x14 * - set port 0x15: * 0x3e (mode 3), 0x3c (mode 2), 0x3a (mode 1), 0x38 (mode 0) * * Set KSound: * - value -> some port 0x0c0d * * ToDo: * - by default, don't enable legacy game and use PCI game I/O * - power management? (card can do voice wakeup according to datasheet!!) */ #include <asm/io.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/gameport.h> #include <linux/moduleparam.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/rawmidi.h> #include <sound/mpu401.h> #include <sound/opl3.h> #include <sound/sb.h> #include <sound/initval.h> MODULE_AUTHOR("Bart Hartgers <bart@etpmod.phys.tue.nl>, Andreas Mohr"); MODULE_DESCRIPTION("Avance Logic ALS4000"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Avance Logic,ALS4000}}"); #if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) #define SUPPORT_JOYSTICK 1 #endif static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ #ifdef SUPPORT_JOYSTICK static int joystick_port[SNDRV_CARDS]; #endif module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for ALS4000 soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for ALS4000 soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable ALS4000 soundcard."); #ifdef SUPPORT_JOYSTICK module_param_array(joystick_port, int, NULL, 0444); MODULE_PARM_DESC(joystick_port, "Joystick port address for ALS4000 soundcard. (0 = disabled)"); #endif struct snd_card_als4000 { /* most frequent access first */ unsigned long iobase; struct pci_dev *pci; struct snd_sb *chip; #ifdef SUPPORT_JOYSTICK struct gameport *gameport; #endif }; static struct pci_device_id snd_als4000_ids[] = { { 0x4005, 0x4000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, /* ALS4000 */ { 0, } }; MODULE_DEVICE_TABLE(pci, snd_als4000_ids); enum als4k_iobase_t { /* IOx: B == Byte, W = Word, D = DWord; SPECS_PAGE: 37 */ ALS4K_IOD_00_AC97_ACCESS = 0x00, ALS4K_IOW_04_AC97_READ = 0x04, ALS4K_IOB_06_AC97_STATUS = 0x06, ALS4K_IOB_07_IRQSTATUS = 0x07, ALS4K_IOD_08_GCR_DATA = 0x08, ALS4K_IOB_0C_GCR_INDEX = 0x0c, ALS4K_IOB_0E_IRQTYPE_SB_CR1E_MPU = 0x0e, ALS4K_IOB_10_ADLIB_ADDR0 = 0x10, ALS4K_IOB_11_ADLIB_ADDR1 = 0x11, ALS4K_IOB_12_ADLIB_ADDR2 = 0x12, ALS4K_IOB_13_ADLIB_ADDR3 = 0x13, ALS4K_IOB_14_MIXER_INDEX = 0x14, ALS4K_IOB_15_MIXER_DATA = 0x15, ALS4K_IOB_16_ESP_RESET = 0x16, ALS4K_IOB_16_ACK_FOR_CR1E = 0x16, /* 2nd function */ ALS4K_IOB_18_OPL_ADDR0 = 0x18, ALS4K_IOB_19_OPL_ADDR1 = 0x19, ALS4K_IOB_1A_ESP_RD_DATA = 0x1a, ALS4K_IOB_1C_ESP_CMD_DATA = 0x1c, ALS4K_IOB_1C_ESP_WR_STATUS = 0x1c, /* 2nd function */ ALS4K_IOB_1E_ESP_RD_STATUS8 = 0x1e, ALS4K_IOB_1F_ESP_RD_STATUS16 = 0x1f, ALS4K_IOB_20_ESP_GAMEPORT_200 = 0x20, ALS4K_IOB_21_ESP_GAMEPORT_201 = 0x21, ALS4K_IOB_30_MIDI_DATA = 0x30, ALS4K_IOB_31_MIDI_STATUS = 0x31, ALS4K_IOB_31_MIDI_COMMAND = 0x31, /* 2nd function */ }; enum als4k_iobase_0e_t { ALS4K_IOB_0E_MPU_IRQ = 0x10, ALS4K_IOB_0E_CR1E_IRQ = 0x40, ALS4K_IOB_0E_SB_DMA_IRQ = 0x80, }; enum als4k_gcr_t { /* all registers 32bit wide; SPECS_PAGE: 38 to 42 */ ALS4K_GCR8C_MISC_CTRL = 0x8c, ALS4K_GCR90_TEST_MODE_REG = 0x90, ALS4K_GCR91_DMA0_ADDR = 0x91, ALS4K_GCR92_DMA0_MODE_COUNT = 0x92, ALS4K_GCR93_DMA1_ADDR = 0x93, ALS4K_GCR94_DMA1_MODE_COUNT = 0x94, ALS4K_GCR95_DMA3_ADDR = 0x95, ALS4K_GCR96_DMA3_MODE_COUNT = 0x96, ALS4K_GCR99_DMA_EMULATION_CTRL = 0x99, ALS4K_GCRA0_FIFO1_CURRENT_ADDR = 0xa0, ALS4K_GCRA1_FIFO1_STATUS_BYTECOUNT = 0xa1, ALS4K_GCRA2_FIFO2_PCIADDR = 0xa2, ALS4K_GCRA3_FIFO2_COUNT = 0xa3, ALS4K_GCRA4_FIFO2_CURRENT_ADDR = 0xa4, ALS4K_GCRA5_FIFO1_STATUS_BYTECOUNT = 0xa5, ALS4K_GCRA6_PM_CTRL = 0xa6, ALS4K_GCRA7_PCI_ACCESS_STORAGE = 0xa7, ALS4K_GCRA8_LEGACY_CFG1 = 0xa8, ALS4K_GCRA9_LEGACY_CFG2 = 0xa9, ALS4K_GCRFF_DUMMY_SCRATCH = 0xff, }; enum als4k_gcr8c_t { ALS4K_GCR8C_IRQ_MASK_CTRL_ENABLE = 0x8000, ALS4K_GCR8C_CHIP_REV_MASK = 0xf0000 }; static inline void snd_als4k_iobase_writeb(unsigned long iobase, enum als4k_iobase_t reg, u8 val) { outb(val, iobase + reg); } static inline void snd_als4k_iobase_writel(unsigned long iobase, enum als4k_iobase_t reg, u32 val) { outl(val, iobase + reg); } static inline u8 snd_als4k_iobase_readb(unsigned long iobase, enum als4k_iobase_t reg) { return inb(iobase + reg); } static inline u32 snd_als4k_iobase_readl(unsigned long iobase, enum als4k_iobase_t reg) { return inl(iobase + reg); } static inline void snd_als4k_gcr_write_addr(unsigned long iobase, enum als4k_gcr_t reg, u32 val) { snd_als4k_iobase_writeb(iobase, ALS4K_IOB_0C_GCR_INDEX, reg); snd_als4k_iobase_writel(iobase, ALS4K_IOD_08_GCR_DATA, val); } static inline void snd_als4k_gcr_write(struct snd_sb *sb, enum als4k_gcr_t reg, u32 val) { snd_als4k_gcr_write_addr(sb->alt_port, reg, val); } static inline u32 snd_als4k_gcr_read_addr(unsigned long iobase, enum als4k_gcr_t reg) { /* SPECS_PAGE: 37/38 */ snd_als4k_iobase_writeb(iobase, ALS4K_IOB_0C_GCR_INDEX, reg); return snd_als4k_iobase_readl(iobase, ALS4K_IOD_08_GCR_DATA); } static inline u32 snd_als4k_gcr_read(struct snd_sb *sb, enum als4k_gcr_t reg) { return snd_als4k_gcr_read_addr(sb->alt_port, reg); } enum als4k_cr_t { /* all registers 8bit wide; SPECS_PAGE: 20 to 23 */ ALS4K_CR0_SB_CONFIG = 0x00, ALS4K_CR2_MISC_CONTROL = 0x02, ALS4K_CR3_CONFIGURATION = 0x03, ALS4K_CR17_FIFO_STATUS = 0x17, ALS4K_CR18_ESP_MAJOR_VERSION = 0x18, ALS4K_CR19_ESP_MINOR_VERSION = 0x19, ALS4K_CR1A_MPU401_UART_MODE_CONTROL = 0x1a, ALS4K_CR1C_FIFO2_BLOCK_LENGTH_LO = 0x1c, ALS4K_CR1D_FIFO2_BLOCK_LENGTH_HI = 0x1d, ALS4K_CR1E_FIFO2_CONTROL = 0x1e, /* secondary PCM FIFO (recording) */ ALS4K_CR3A_MISC_CONTROL = 0x3a, ALS4K_CR3B_CRC32_BYTE0 = 0x3b, /* for testing, activate via CR3A */ ALS4K_CR3C_CRC32_BYTE1 = 0x3c, ALS4K_CR3D_CRC32_BYTE2 = 0x3d, ALS4K_CR3E_CRC32_BYTE3 = 0x3e, }; enum als4k_cr0_t { ALS4K_CR0_DMA_CONTIN_MODE_CTRL = 0x02, /* IRQ/FIFO controlled for 0/1 */ ALS4K_CR0_DMA_90H_MODE_CTRL = 0x04, /* IRQ/FIFO controlled for 0/1 */ ALS4K_CR0_MX80_81_REG_WRITE_ENABLE = 0x80, }; static inline void snd_als4_cr_write(struct snd_sb *chip, enum als4k_cr_t reg, u8 data) { /* Control Register is reg | 0xc0 (bit 7, 6 set) on sbmixer_index * NOTE: assumes chip->mixer_lock to be locked externally already! * SPECS_PAGE: 6 */ snd_sbmixer_write(chip, reg | 0xc0, data); } static inline u8 snd_als4_cr_read(struct snd_sb *chip, enum als4k_cr_t reg) { /* NOTE: assumes chip->mixer_lock to be locked externally already! */ return snd_sbmixer_read(chip, reg | 0xc0); } static void snd_als4000_set_rate(struct snd_sb *chip, unsigned int rate) { if (!(chip->mode & SB_RATE_LOCK)) { snd_sbdsp_command(chip, SB_DSP_SAMPLE_RATE_OUT); snd_sbdsp_command(chip, rate>>8); snd_sbdsp_command(chip, rate); } } static inline void snd_als4000_set_capture_dma(struct snd_sb *chip, dma_addr_t addr, unsigned size) { /* SPECS_PAGE: 40 */ snd_als4k_gcr_write(chip, ALS4K_GCRA2_FIFO2_PCIADDR, addr); snd_als4k_gcr_write(chip, ALS4K_GCRA3_FIFO2_COUNT, (size-1)); } static inline void snd_als4000_set_playback_dma(struct snd_sb *chip, dma_addr_t addr, unsigned size) { /* SPECS_PAGE: 38 */ snd_als4k_gcr_write(chip, ALS4K_GCR91_DMA0_ADDR, addr); snd_als4k_gcr_write(chip, ALS4K_GCR92_DMA0_MODE_COUNT, (size-1)|0x180000); } #define ALS4000_FORMAT_SIGNED (1<<0) #define ALS4000_FORMAT_16BIT (1<<1) #define ALS4000_FORMAT_STEREO (1<<2) static int snd_als4000_get_format(struct snd_pcm_runtime *runtime) { int result; result = 0; if (snd_pcm_format_signed(runtime->format)) result |= ALS4000_FORMAT_SIGNED; if (snd_pcm_format_physical_width(runtime->format) == 16) result |= ALS4000_FORMAT_16BIT; if (runtime->channels > 1) result |= ALS4000_FORMAT_STEREO; return result; } /* structure for setting up playback */ static const struct { unsigned char dsp_cmd, dma_on, dma_off, format; } playback_cmd_vals[]={ /* ALS4000_FORMAT_U8_MONO */ { SB_DSP4_OUT8_AI, SB_DSP_DMA8_ON, SB_DSP_DMA8_OFF, SB_DSP4_MODE_UNS_MONO }, /* ALS4000_FORMAT_S8_MONO */ { SB_DSP4_OUT8_AI, SB_DSP_DMA8_ON, SB_DSP_DMA8_OFF, SB_DSP4_MODE_SIGN_MONO }, /* ALS4000_FORMAT_U16L_MONO */ { SB_DSP4_OUT16_AI, SB_DSP_DMA16_ON, SB_DSP_DMA16_OFF, SB_DSP4_MODE_UNS_MONO }, /* ALS4000_FORMAT_S16L_MONO */ { SB_DSP4_OUT16_AI, SB_DSP_DMA16_ON, SB_DSP_DMA16_OFF, SB_DSP4_MODE_SIGN_MONO }, /* ALS4000_FORMAT_U8_STEREO */ { SB_DSP4_OUT8_AI, SB_DSP_DMA8_ON, SB_DSP_DMA8_OFF, SB_DSP4_MODE_UNS_STEREO }, /* ALS4000_FORMAT_S8_STEREO */ { SB_DSP4_OUT8_AI, SB_DSP_DMA8_ON, SB_DSP_DMA8_OFF, SB_DSP4_MODE_SIGN_STEREO }, /* ALS4000_FORMAT_U16L_STEREO */ { SB_DSP4_OUT16_AI, SB_DSP_DMA16_ON, SB_DSP_DMA16_OFF, SB_DSP4_MODE_UNS_STEREO }, /* ALS4000_FORMAT_S16L_STEREO */ { SB_DSP4_OUT16_AI, SB_DSP_DMA16_ON, SB_DSP_DMA16_OFF, SB_DSP4_MODE_SIGN_STEREO }, }; #define playback_cmd(chip) (playback_cmd_vals[(chip)->playback_format]) /* structure for setting up capture */ enum { CMD_WIDTH8=0x04, CMD_SIGNED=0x10, CMD_MONO=0x80, CMD_STEREO=0xA0 }; static const unsigned char capture_cmd_vals[]= { CMD_WIDTH8|CMD_MONO, /* ALS4000_FORMAT_U8_MONO */ CMD_WIDTH8|CMD_SIGNED|CMD_MONO, /* ALS4000_FORMAT_S8_MONO */ CMD_MONO, /* ALS4000_FORMAT_U16L_MONO */ CMD_SIGNED|CMD_MONO, /* ALS4000_FORMAT_S16L_MONO */ CMD_WIDTH8|CMD_STEREO, /* ALS4000_FORMAT_U8_STEREO */ CMD_WIDTH8|CMD_SIGNED|CMD_STEREO, /* ALS4000_FORMAT_S8_STEREO */ CMD_STEREO, /* ALS4000_FORMAT_U16L_STEREO */ CMD_SIGNED|CMD_STEREO, /* ALS4000_FORMAT_S16L_STEREO */ }; #define capture_cmd(chip) (capture_cmd_vals[(chip)->capture_format]) static int snd_als4000_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_als4000_hw_free(struct snd_pcm_substream *substream) { snd_pcm_lib_free_pages(substream); return 0; } static int snd_als4000_capture_prepare(struct snd_pcm_substream *substream) { struct snd_sb *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned long size; unsigned count; chip->capture_format = snd_als4000_get_format(runtime); size = snd_pcm_lib_buffer_bytes(substream); count = snd_pcm_lib_period_bytes(substream); if (chip->capture_format & ALS4000_FORMAT_16BIT) count >>= 1; count--; spin_lock_irq(&chip->reg_lock); snd_als4000_set_rate(chip, runtime->rate); snd_als4000_set_capture_dma(chip, runtime->dma_addr, size); spin_unlock_irq(&chip->reg_lock); spin_lock_irq(&chip->mixer_lock); snd_als4_cr_write(chip, ALS4K_CR1C_FIFO2_BLOCK_LENGTH_LO, count & 0xff); snd_als4_cr_write(chip, ALS4K_CR1D_FIFO2_BLOCK_LENGTH_HI, count >> 8); spin_unlock_irq(&chip->mixer_lock); return 0; } static int snd_als4000_playback_prepare(struct snd_pcm_substream *substream) { struct snd_sb *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned long size; unsigned count; chip->playback_format = snd_als4000_get_format(runtime); size = snd_pcm_lib_buffer_bytes(substream); count = snd_pcm_lib_period_bytes(substream); if (chip->playback_format & ALS4000_FORMAT_16BIT) count >>= 1; count--; /* FIXME: from second playback on, there's a lot more clicks and pops * involved here than on first playback. Fiddling with * tons of different settings didn't help (DMA, speaker on/off, * reordering, ...). Something seems to get enabled on playback * that I haven't found out how to disable again, which then causes * the switching pops to reach the speakers the next time here. */ spin_lock_irq(&chip->reg_lock); snd_als4000_set_rate(chip, runtime->rate); snd_als4000_set_playback_dma(chip, runtime->dma_addr, size); /* SPEAKER_ON not needed, since dma_on seems to also enable speaker */ /* snd_sbdsp_command(chip, SB_DSP_SPEAKER_ON); */ snd_sbdsp_command(chip, playback_cmd(chip).dsp_cmd); snd_sbdsp_command(chip, playback_cmd(chip).format); snd_sbdsp_command(chip, count & 0xff); snd_sbdsp_command(chip, count >> 8); snd_sbdsp_command(chip, playback_cmd(chip).dma_off); spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_als4000_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_sb *chip = snd_pcm_substream_chip(substream); int result = 0; /* FIXME race condition in here!!! chip->mode non-atomic update gets consistently protected by reg_lock always, _except_ for this place!! Probably need to take reg_lock as outer (or inner??) lock, too. (or serialize both lock operations? probably not, though... - racy?) */ spin_lock(&chip->mixer_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: chip->mode |= SB_RATE_LOCK_CAPTURE; snd_als4_cr_write(chip, ALS4K_CR1E_FIFO2_CONTROL, capture_cmd(chip)); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: chip->mode &= ~SB_RATE_LOCK_CAPTURE; snd_als4_cr_write(chip, ALS4K_CR1E_FIFO2_CONTROL, capture_cmd(chip)); break; default: result = -EINVAL; break; } spin_unlock(&chip->mixer_lock); return result; } static int snd_als4000_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_sb *chip = snd_pcm_substream_chip(substream); int result = 0; spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: chip->mode |= SB_RATE_LOCK_PLAYBACK; snd_sbdsp_command(chip, playback_cmd(chip).dma_on); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: snd_sbdsp_command(chip, playback_cmd(chip).dma_off); chip->mode &= ~SB_RATE_LOCK_PLAYBACK; break; default: result = -EINVAL; break; } spin_unlock(&chip->reg_lock); return result; } static snd_pcm_uframes_t snd_als4000_capture_pointer(struct snd_pcm_substream *substream) { struct snd_sb *chip = snd_pcm_substream_chip(substream); unsigned int result; spin_lock(&chip->reg_lock); result = snd_als4k_gcr_read(chip, ALS4K_GCRA4_FIFO2_CURRENT_ADDR); spin_unlock(&chip->reg_lock); result &= 0xffff; return bytes_to_frames( substream->runtime, result ); } static snd_pcm_uframes_t snd_als4000_playback_pointer(struct snd_pcm_substream *substream) { struct snd_sb *chip = snd_pcm_substream_chip(substream); unsigned result; spin_lock(&chip->reg_lock); result = snd_als4k_gcr_read(chip, ALS4K_GCRA0_FIFO1_CURRENT_ADDR); spin_unlock(&chip->reg_lock); result &= 0xffff; return bytes_to_frames( substream->runtime, result ); } /* FIXME: this IRQ routine doesn't really support IRQ sharing (we always * return IRQ_HANDLED no matter whether we actually had an IRQ flag or not). * ALS4000a.PDF writes that while ACKing IRQ in PCI block will *not* ACK * the IRQ in the SB core, ACKing IRQ in SB block *will* ACK the PCI IRQ * register (alt_port + ALS4K_IOB_0E_IRQTYPE_SB_CR1E_MPU). Probably something * could be optimized here to query/write one register only... * And even if both registers need to be queried, then there's still the * question of whether it's actually correct to ACK PCI IRQ before reading * SB IRQ like we do now, since ALS4000a.PDF mentions that PCI IRQ will *clear* * SB IRQ status. * (hmm, SPECS_PAGE: 38 mentions it the other way around!) * And do we *really* need the lock here for *reading* SB_DSP4_IRQSTATUS?? * */ static irqreturn_t snd_als4000_interrupt(int irq, void *dev_id) { struct snd_sb *chip = dev_id; unsigned pci_irqstatus; unsigned sb_irqstatus; /* find out which bit of the ALS4000 PCI block produced the interrupt, SPECS_PAGE: 38, 5 */ pci_irqstatus = snd_als4k_iobase_readb(chip->alt_port, ALS4K_IOB_0E_IRQTYPE_SB_CR1E_MPU); if ((pci_irqstatus & ALS4K_IOB_0E_SB_DMA_IRQ) && (chip->playback_substream)) /* playback */ snd_pcm_period_elapsed(chip->playback_substream); if ((pci_irqstatus & ALS4K_IOB_0E_CR1E_IRQ) && (chip->capture_substream)) /* capturing */ snd_pcm_period_elapsed(chip->capture_substream); if ((pci_irqstatus & ALS4K_IOB_0E_MPU_IRQ) && (chip->rmidi)) /* MPU401 interrupt */ snd_mpu401_uart_interrupt(irq, chip->rmidi->private_data); /* ACK the PCI block IRQ */ snd_als4k_iobase_writeb(chip->alt_port, ALS4K_IOB_0E_IRQTYPE_SB_CR1E_MPU, pci_irqstatus); spin_lock(&chip->mixer_lock); /* SPECS_PAGE: 20 */ sb_irqstatus = snd_sbmixer_read(chip, SB_DSP4_IRQSTATUS); spin_unlock(&chip->mixer_lock); if (sb_irqstatus & SB_IRQTYPE_8BIT) snd_sb_ack_8bit(chip); if (sb_irqstatus & SB_IRQTYPE_16BIT) snd_sb_ack_16bit(chip); if (sb_irqstatus & SB_IRQTYPE_MPUIN) inb(chip->mpu_port); if (sb_irqstatus & ALS4K_IRQTYPE_CR1E_DMA) snd_als4k_iobase_readb(chip->alt_port, ALS4K_IOB_16_ACK_FOR_CR1E); /* printk(KERN_INFO "als4000: irq 0x%04x 0x%04x\n", pci_irqstatus, sb_irqstatus); */ /* only ack the things we actually handled above */ return IRQ_RETVAL( (pci_irqstatus & (ALS4K_IOB_0E_SB_DMA_IRQ|ALS4K_IOB_0E_CR1E_IRQ| ALS4K_IOB_0E_MPU_IRQ)) || (sb_irqstatus & (SB_IRQTYPE_8BIT|SB_IRQTYPE_16BIT| SB_IRQTYPE_MPUIN|ALS4K_IRQTYPE_CR1E_DMA)) ); } /*****************************************************************/ static struct snd_pcm_hardware snd_als4000_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE, /* formats */ .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 65536, .period_bytes_min = 64, .period_bytes_max = 65536, .periods_min = 1, .periods_max = 1024, .fifo_size = 0 }; static struct snd_pcm_hardware snd_als4000_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID), .formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE, /* formats */ .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 65536, .period_bytes_min = 64, .period_bytes_max = 65536, .periods_min = 1, .periods_max = 1024, .fifo_size = 0 }; /*****************************************************************/ static int snd_als4000_playback_open(struct snd_pcm_substream *substream) { struct snd_sb *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; chip->playback_substream = substream; runtime->hw = snd_als4000_playback; return 0; } static int snd_als4000_playback_close(struct snd_pcm_substream *substream) { struct snd_sb *chip = snd_pcm_substream_chip(substream); chip->playback_substream = NULL; snd_pcm_lib_free_pages(substream); return 0; } static int snd_als4000_capture_open(struct snd_pcm_substream *substream) { struct snd_sb *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; chip->capture_substream = substream; runtime->hw = snd_als4000_capture; return 0; } static int snd_als4000_capture_close(struct snd_pcm_substream *substream) { struct snd_sb *chip = snd_pcm_substream_chip(substream); chip->capture_substream = NULL; snd_pcm_lib_free_pages(substream); return 0; } /******************************************************************/ static struct snd_pcm_ops snd_als4000_playback_ops = { .open = snd_als4000_playback_open, .close = snd_als4000_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_als4000_hw_params, .hw_free = snd_als4000_hw_free, .prepare = snd_als4000_playback_prepare, .trigger = snd_als4000_playback_trigger, .pointer = snd_als4000_playback_pointer }; static struct snd_pcm_ops snd_als4000_capture_ops = { .open = snd_als4000_capture_open, .close = snd_als4000_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_als4000_hw_params, .hw_free = snd_als4000_hw_free, .prepare = snd_als4000_capture_prepare, .trigger = snd_als4000_capture_trigger, .pointer = snd_als4000_capture_pointer }; static int __devinit snd_als4000_pcm(struct snd_sb *chip, int device) { struct snd_pcm *pcm; int err; err = snd_pcm_new(chip->card, "ALS4000 DSP", device, 1, 1, &pcm); if (err < 0) return err; pcm->private_data = chip; pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_als4000_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_als4000_capture_ops); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 64*1024); chip->pcm = pcm; return 0; } /******************************************************************/ static void snd_als4000_set_addr(unsigned long iobase, unsigned int sb_io, unsigned int mpu_io, unsigned int opl_io, unsigned int game_io) { u32 cfg1 = 0; u32 cfg2 = 0; if (mpu_io > 0) cfg2 |= (mpu_io | 1) << 16; if (sb_io > 0) cfg2 |= (sb_io | 1); if (game_io > 0) cfg1 |= (game_io | 1) << 16; if (opl_io > 0) cfg1 |= (opl_io | 1); snd_als4k_gcr_write_addr(iobase, ALS4K_GCRA8_LEGACY_CFG1, cfg1); snd_als4k_gcr_write_addr(iobase, ALS4K_GCRA9_LEGACY_CFG2, cfg2); } static void snd_als4000_configure(struct snd_sb *chip) { u8 tmp; int i; /* do some more configuration */ spin_lock_irq(&chip->mixer_lock); tmp = snd_als4_cr_read(chip, ALS4K_CR0_SB_CONFIG); snd_als4_cr_write(chip, ALS4K_CR0_SB_CONFIG, tmp|ALS4K_CR0_MX80_81_REG_WRITE_ENABLE); /* always select DMA channel 0, since we do not actually use DMA * SPECS_PAGE: 19/20 */ snd_sbmixer_write(chip, SB_DSP4_DMASETUP, SB_DMASETUP_DMA0); snd_als4_cr_write(chip, ALS4K_CR0_SB_CONFIG, tmp & ~ALS4K_CR0_MX80_81_REG_WRITE_ENABLE); spin_unlock_irq(&chip->mixer_lock); spin_lock_irq(&chip->reg_lock); /* enable interrupts */ snd_als4k_gcr_write(chip, ALS4K_GCR8C_MISC_CTRL, ALS4K_GCR8C_IRQ_MASK_CTRL_ENABLE); /* SPECS_PAGE: 39 */ for (i = ALS4K_GCR91_DMA0_ADDR; i <= ALS4K_GCR96_DMA3_MODE_COUNT; ++i) snd_als4k_gcr_write(chip, i, 0); snd_als4k_gcr_write(chip, ALS4K_GCR99_DMA_EMULATION_CTRL, snd_als4k_gcr_read(chip, ALS4K_GCR99_DMA_EMULATION_CTRL)); spin_unlock_irq(&chip->reg_lock); } #ifdef SUPPORT_JOYSTICK static int __devinit snd_als4000_create_gameport(struct snd_card_als4000 *acard, int dev) { struct gameport *gp; struct resource *r; int io_port; if (joystick_port[dev] == 0) return -ENODEV; if (joystick_port[dev] == 1) { /* auto-detect */ for (io_port = 0x200; io_port <= 0x218; io_port += 8) { r = request_region(io_port, 8, "ALS4000 gameport"); if (r) break; } } else { io_port = joystick_port[dev]; r = request_region(io_port, 8, "ALS4000 gameport"); } if (!r) { printk(KERN_WARNING "als4000: cannot reserve joystick ports\n"); return -EBUSY; } acard->gameport = gp = gameport_allocate_port(); if (!gp) { printk(KERN_ERR "als4000: cannot allocate memory for gameport\n"); release_and_free_resource(r); return -ENOMEM; } gameport_set_name(gp, "ALS4000 Gameport"); gameport_set_phys(gp, "pci%s/gameport0", pci_name(acard->pci)); gameport_set_dev_parent(gp, &acard->pci->dev); gp->io = io_port; gameport_set_port_data(gp, r); /* Enable legacy joystick port */ snd_als4000_set_addr(acard->iobase, 0, 0, 0, 1); gameport_register_port(acard->gameport); return 0; } static void snd_als4000_free_gameport(struct snd_card_als4000 *acard) { if (acard->gameport) { struct resource *r = gameport_get_port_data(acard->gameport); gameport_unregister_port(acard->gameport); acard->gameport = NULL; /* disable joystick */ snd_als4000_set_addr(acard->iobase, 0, 0, 0, 0); release_and_free_resource(r); } } #else static inline int snd_als4000_create_gameport(struct snd_card_als4000 *acard, int dev) { return -ENOSYS; } static inline void snd_als4000_free_gameport(struct snd_card_als4000 *acard) { } #endif static void snd_card_als4000_free( struct snd_card *card ) { struct snd_card_als4000 *acard = card->private_data; /* make sure that interrupts are disabled */ snd_als4k_gcr_write_addr(acard->iobase, ALS4K_GCR8C_MISC_CTRL, 0); /* free resources */ snd_als4000_free_gameport(acard); pci_release_regions(acard->pci); pci_disable_device(acard->pci); } static int __devinit snd_card_als4000_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct snd_card_als4000 *acard; unsigned long iobase; struct snd_sb *chip; struct snd_opl3 *opl3; unsigned short word; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) { return err; } /* check, if we can restrict PCI DMA transfers to 24 bits */ if (pci_set_dma_mask(pci, DMA_BIT_MASK(24)) < 0 || pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(24)) < 0) { snd_printk(KERN_ERR "architecture does not support 24bit PCI busmaster DMA\n"); pci_disable_device(pci); return -ENXIO; } if ((err = pci_request_regions(pci, "ALS4000")) < 0) { pci_disable_device(pci); return err; } iobase = pci_resource_start(pci, 0); pci_read_config_word(pci, PCI_COMMAND, &word); pci_write_config_word(pci, PCI_COMMAND, word | PCI_COMMAND_IO); pci_set_master(pci); err = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(*acard) /* private_data: acard */, &card); if (err < 0) { pci_release_regions(pci); pci_disable_device(pci); return err; } acard = card->private_data; acard->pci = pci; acard->iobase = iobase; card->private_free = snd_card_als4000_free; /* disable all legacy ISA stuff */ snd_als4000_set_addr(acard->iobase, 0, 0, 0, 0); if ((err = snd_sbdsp_create(card, iobase + ALS4K_IOB_10_ADLIB_ADDR0, pci->irq, /* internally registered as IRQF_SHARED in case of ALS4000 SB */ snd_als4000_interrupt, -1, -1, SB_HW_ALS4000, &chip)) < 0) { goto out_err; } acard->chip = chip; chip->pci = pci; chip->alt_port = iobase; snd_card_set_dev(card, &pci->dev); snd_als4000_configure(chip); strcpy(card->driver, "ALS4000"); strcpy(card->shortname, "Avance Logic ALS4000"); sprintf(card->longname, "%s at 0x%lx, irq %i", card->shortname, chip->alt_port, chip->irq); if ((err = snd_mpu401_uart_new( card, 0, MPU401_HW_ALS4000, iobase + ALS4K_IOB_30_MIDI_DATA, MPU401_INFO_INTEGRATED, pci->irq, 0, &chip->rmidi)) < 0) { printk(KERN_ERR "als4000: no MPU-401 device at 0x%lx?\n", iobase + ALS4K_IOB_30_MIDI_DATA); goto out_err; } /* FIXME: ALS4000 has interesting MPU401 configuration features * at ALS4K_CR1A_MPU401_UART_MODE_CONTROL * (pass-thru / UART switching, fast MIDI clock, etc.), * however there doesn't seem to be an ALSA API for this... * SPECS_PAGE: 21 */ if ((err = snd_als4000_pcm(chip, 0)) < 0) { goto out_err; } if ((err = snd_sbmixer_new(chip)) < 0) { goto out_err; } if (snd_opl3_create(card, iobase + ALS4K_IOB_10_ADLIB_ADDR0, iobase + ALS4K_IOB_12_ADLIB_ADDR2, OPL3_HW_AUTO, 1, &opl3) < 0) { printk(KERN_ERR "als4000: no OPL device at 0x%lx-0x%lx?\n", iobase + ALS4K_IOB_10_ADLIB_ADDR0, iobase + ALS4K_IOB_12_ADLIB_ADDR2); } else { if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { goto out_err; } } snd_als4000_create_gameport(acard, dev); if ((err = snd_card_register(card)) < 0) { goto out_err; } pci_set_drvdata(pci, card); dev++; err = 0; goto out; out_err: snd_card_free(card); out: return err; } static void __devexit snd_card_als4000_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } #ifdef CONFIG_PM static int snd_als4000_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct snd_card_als4000 *acard = card->private_data; struct snd_sb *chip = acard->chip; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); snd_sbmixer_suspend(chip); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } static int snd_als4000_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct snd_card_als4000 *acard = card->private_data; struct snd_sb *chip = acard->chip; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "als4000: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); snd_als4000_configure(chip); snd_sbdsp_reset(chip); snd_sbmixer_resume(chip); #ifdef SUPPORT_JOYSTICK if (acard->gameport) snd_als4000_set_addr(acard->iobase, 0, 0, 0, 1); #endif snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif /* CONFIG_PM */ static struct pci_driver driver = { .name = "ALS4000", .id_table = snd_als4000_ids, .probe = snd_card_als4000_probe, .remove = __devexit_p(snd_card_als4000_remove), #ifdef CONFIG_PM .suspend = snd_als4000_suspend, .resume = snd_als4000_resume, #endif }; static int __init alsa_card_als4000_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_als4000_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_als4000_init) module_exit(alsa_card_als4000_exit)
gpl-2.0
diverger/linux-2.6.34-lpc32xx
fs/nilfs2/gcdat.c
862
2841
/* * gcdat.c - NILFS shadow DAT inode for GC * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Seiji Kihara <kihara@osrg.net>, Amagai Yoshiji <amagai@osrg.net>, * and Ryusuke Konishi <ryusuke@osrg.net>. * */ #include <linux/buffer_head.h> #include "nilfs.h" #include "page.h" #include "mdt.h" int nilfs_init_gcdat_inode(struct the_nilfs *nilfs) { struct inode *dat = nilfs->ns_dat, *gcdat = nilfs->ns_gc_dat; struct nilfs_inode_info *dii = NILFS_I(dat), *gii = NILFS_I(gcdat); int err; gcdat->i_state = 0; gcdat->i_blocks = dat->i_blocks; gii->i_flags = dii->i_flags; gii->i_state = dii->i_state | (1 << NILFS_I_GCDAT); gii->i_cno = 0; nilfs_bmap_init_gcdat(gii->i_bmap, dii->i_bmap); err = nilfs_copy_dirty_pages(gcdat->i_mapping, dat->i_mapping); if (unlikely(err)) return err; return nilfs_copy_dirty_pages(&gii->i_btnode_cache, &dii->i_btnode_cache); } void nilfs_commit_gcdat_inode(struct the_nilfs *nilfs) { struct inode *dat = nilfs->ns_dat, *gcdat = nilfs->ns_gc_dat; struct nilfs_inode_info *dii = NILFS_I(dat), *gii = NILFS_I(gcdat); struct address_space *mapping = dat->i_mapping; struct address_space *gmapping = gcdat->i_mapping; down_write(&NILFS_MDT(dat)->mi_sem); dat->i_blocks = gcdat->i_blocks; dii->i_flags = gii->i_flags; dii->i_state = gii->i_state & ~(1 << NILFS_I_GCDAT); nilfs_bmap_commit_gcdat(gii->i_bmap, dii->i_bmap); nilfs_palloc_clear_cache(dat); nilfs_palloc_clear_cache(gcdat); nilfs_clear_dirty_pages(mapping); nilfs_copy_back_pages(mapping, gmapping); /* note: mdt dirty flags should be cleared by segctor. */ nilfs_clear_dirty_pages(&dii->i_btnode_cache); nilfs_copy_back_pages(&dii->i_btnode_cache, &gii->i_btnode_cache); up_write(&NILFS_MDT(dat)->mi_sem); } void nilfs_clear_gcdat_inode(struct the_nilfs *nilfs) { struct inode *gcdat = nilfs->ns_gc_dat; struct nilfs_inode_info *gii = NILFS_I(gcdat); gcdat->i_state = I_CLEAR; gii->i_flags = 0; nilfs_palloc_clear_cache(gcdat); truncate_inode_pages(gcdat->i_mapping, 0); truncate_inode_pages(&gii->i_btnode_cache, 0); }
gpl-2.0
Motorhead1991/android_kernel_samsung_s5pv210
arch/mips/math-emu/dp_simple.c
862
1905
/* IEEE754 floating point arithmetic * double precision: common utilities */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * http://www.algor.co.uk * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754dp.h" int ieee754dp_finite(ieee754dp x) { return DPBEXP(x) != DP_EMAX + 1 + DP_EBIAS; } ieee754dp ieee754dp_copysign(ieee754dp x, ieee754dp y) { CLEARCX; DPSIGN(x) = DPSIGN(y); return x; } ieee754dp ieee754dp_neg(ieee754dp x) { COMPXDP; EXPLODEXDP; CLEARCX; FLUSHXDP; /* * Invert the sign ALWAYS to prevent an endless recursion on * pow() in libc. */ /* quick fix up */ DPSIGN(x) ^= 1; if (xc == IEEE754_CLASS_SNAN) { ieee754dp y = ieee754dp_indef(); SETCX(IEEE754_INVALID_OPERATION); DPSIGN(y) = DPSIGN(x); return ieee754dp_nanxcpt(y, "neg"); } return x; } ieee754dp ieee754dp_abs(ieee754dp x) { COMPXDP; EXPLODEXDP; CLEARCX; FLUSHXDP; /* Clear sign ALWAYS, irrespective of NaN */ DPSIGN(x) = 0; if (xc == IEEE754_CLASS_SNAN) { SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_nanxcpt(ieee754dp_indef(), "abs"); } return x; }
gpl-2.0
CyanogenMod/android_kernel_yu_msm8916
arch/x86/kernel/cpu/perf_event_amd.c
1630
18684
#include <linux/perf_event.h> #include <linux/export.h> #include <linux/types.h> #include <linux/init.h> #include <linux/slab.h> #include <asm/apicdef.h> #include "perf_event.h" static __initconst const u64 amd_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { [ C(L1D) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */ }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ [ C(RESULT_MISS) ] = 0, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */ [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */ }, }, [ C(L1I ) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */ [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */ }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */ [ C(RESULT_MISS) ] = 0, }, }, [ C(LL ) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */ [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */ }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */ [ C(RESULT_MISS) ] = 0, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0, }, }, [ C(DTLB) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */ }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = 0, [ C(RESULT_MISS) ] = 0, }, }, [ C(ITLB) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */ }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, }, [ C(BPU ) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */ [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */ }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, }, [ C(NODE) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */ [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */ }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = -1, [ C(RESULT_MISS) ] = -1, }, }, }; /* * AMD Performance Monitor K7 and later. */ static const u64 amd_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */ }; static u64 amd_pmu_event_map(int hw_event) { return amd_perfmon_event_map[hw_event]; } /* * Previously calculated offsets */ static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly; static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly; /* * Legacy CPUs: * 4 counters starting at 0xc0010000 each offset by 1 * * CPUs with core performance counter extensions: * 6 counters starting at 0xc0010200 each offset by 2 */ static inline int amd_pmu_addr_offset(int index, bool eventsel) { int offset; if (!index) return index; if (eventsel) offset = event_offsets[index]; else offset = count_offsets[index]; if (offset) return offset; if (!cpu_has_perfctr_core) offset = index; else offset = index << 1; if (eventsel) event_offsets[index] = offset; else count_offsets[index] = offset; return offset; } static int amd_core_hw_config(struct perf_event *event) { if (event->attr.exclude_host && event->attr.exclude_guest) /* * When HO == GO == 1 the hardware treats that as GO == HO == 0 * and will count in both modes. We don't want to count in that * case so we emulate no-counting by setting US = OS = 0. */ event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_OS); else if (event->attr.exclude_host) event->hw.config |= AMD64_EVENTSEL_GUESTONLY; else if (event->attr.exclude_guest) event->hw.config |= AMD64_EVENTSEL_HOSTONLY; return 0; } /* * AMD64 events are detected based on their event codes. */ static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) { return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff); } static inline int amd_is_nb_event(struct hw_perf_event *hwc) { return (hwc->config & 0xe0) == 0xe0; } static inline int amd_has_nb(struct cpu_hw_events *cpuc) { struct amd_nb *nb = cpuc->amd_nb; return nb && nb->nb_id != -1; } static int amd_pmu_hw_config(struct perf_event *event) { int ret; /* pass precise event sampling to ibs: */ if (event->attr.precise_ip && get_ibs_caps()) return -ENOENT; if (has_branch_stack(event)) return -EOPNOTSUPP; ret = x86_pmu_hw_config(event); if (ret) return ret; if (event->attr.type == PERF_TYPE_RAW) event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; return amd_core_hw_config(event); } static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) { struct amd_nb *nb = cpuc->amd_nb; int i; /* * need to scan whole list because event may not have * been assigned during scheduling * * no race condition possible because event can only * be removed on one CPU at a time AND PMU is disabled * when we come here */ for (i = 0; i < x86_pmu.num_counters; i++) { if (cmpxchg(nb->owners + i, event, NULL) == event) break; } } /* * AMD64 NorthBridge events need special treatment because * counter access needs to be synchronized across all cores * of a package. Refer to BKDG section 3.12 * * NB events are events measuring L3 cache, Hypertransport * traffic. They are identified by an event code >= 0xe00. * They measure events on the NorthBride which is shared * by all cores on a package. NB events are counted on a * shared set of counters. When a NB event is programmed * in a counter, the data actually comes from a shared * counter. Thus, access to those counters needs to be * synchronized. * * We implement the synchronization such that no two cores * can be measuring NB events using the same counters. Thus, * we maintain a per-NB allocation table. The available slot * is propagated using the event_constraint structure. * * We provide only one choice for each NB event based on * the fact that only NB events have restrictions. Consequently, * if a counter is available, there is a guarantee the NB event * will be assigned to it. If no slot is available, an empty * constraint is returned and scheduling will eventually fail * for this event. * * Note that all cores attached the same NB compete for the same * counters to host NB events, this is why we use atomic ops. Some * multi-chip CPUs may have more than one NB. * * Given that resources are allocated (cmpxchg), they must be * eventually freed for others to use. This is accomplished by * calling __amd_put_nb_event_constraints() * * Non NB events are not impacted by this restriction. */ static struct event_constraint * __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, struct event_constraint *c) { struct hw_perf_event *hwc = &event->hw; struct amd_nb *nb = cpuc->amd_nb; struct perf_event *old; int idx, new = -1; if (!c) c = &unconstrained; if (cpuc->is_fake) return c; /* * detect if already present, if so reuse * * cannot merge with actual allocation * because of possible holes * * event can already be present yet not assigned (in hwc->idx) * because of successive calls to x86_schedule_events() from * hw_perf_group_sched_in() without hw_perf_enable() */ for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) { if (new == -1 || hwc->idx == idx) /* assign free slot, prefer hwc->idx */ old = cmpxchg(nb->owners + idx, NULL, event); else if (nb->owners[idx] == event) /* event already present */ old = event; else continue; if (old && old != event) continue; /* reassign to this slot */ if (new != -1) cmpxchg(nb->owners + new, event, NULL); new = idx; /* already present, reuse */ if (old == event) break; } if (new == -1) return &emptyconstraint; return &nb->event_constraints[new]; } static struct amd_nb *amd_alloc_nb(int cpu) { struct amd_nb *nb; int i; nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu)); if (!nb) return NULL; nb->nb_id = -1; /* * initialize all possible NB constraints */ for (i = 0; i < x86_pmu.num_counters; i++) { __set_bit(i, nb->event_constraints[i].idxmsk); nb->event_constraints[i].weight = 1; } return nb; } static int amd_pmu_cpu_prepare(int cpu) { struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); WARN_ON_ONCE(cpuc->amd_nb); if (boot_cpu_data.x86_max_cores < 2) return NOTIFY_OK; cpuc->amd_nb = amd_alloc_nb(cpu); if (!cpuc->amd_nb) return NOTIFY_BAD; return NOTIFY_OK; } static void amd_pmu_cpu_starting(int cpu) { struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); struct amd_nb *nb; int i, nb_id; cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; if (boot_cpu_data.x86_max_cores < 2) return; nb_id = amd_get_nb_id(cpu); WARN_ON_ONCE(nb_id == BAD_APICID); for_each_online_cpu(i) { nb = per_cpu(cpu_hw_events, i).amd_nb; if (WARN_ON_ONCE(!nb)) continue; if (nb->nb_id == nb_id) { cpuc->kfree_on_online = cpuc->amd_nb; cpuc->amd_nb = nb; break; } } cpuc->amd_nb->nb_id = nb_id; cpuc->amd_nb->refcnt++; } static void amd_pmu_cpu_dead(int cpu) { struct cpu_hw_events *cpuhw; if (boot_cpu_data.x86_max_cores < 2) return; cpuhw = &per_cpu(cpu_hw_events, cpu); if (cpuhw->amd_nb) { struct amd_nb *nb = cpuhw->amd_nb; if (nb->nb_id == -1 || --nb->refcnt == 0) kfree(nb); cpuhw->amd_nb = NULL; } } static struct event_constraint * amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) { /* * if not NB event or no NB, then no constraints */ if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) return &unconstrained; return __amd_get_nb_event_constraints(cpuc, event, NULL); } static void amd_put_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) { if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)) __amd_put_nb_event_constraints(cpuc, event); } PMU_FORMAT_ATTR(event, "config:0-7,32-35"); PMU_FORMAT_ATTR(umask, "config:8-15" ); PMU_FORMAT_ATTR(edge, "config:18" ); PMU_FORMAT_ATTR(inv, "config:23" ); PMU_FORMAT_ATTR(cmask, "config:24-31" ); static struct attribute *amd_format_attr[] = { &format_attr_event.attr, &format_attr_umask.attr, &format_attr_edge.attr, &format_attr_inv.attr, &format_attr_cmask.attr, NULL, }; /* AMD Family 15h */ #define AMD_EVENT_TYPE_MASK 0x000000F0ULL #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL #define AMD_EVENT_EX_LS 0x000000C0ULL #define AMD_EVENT_DE 0x000000D0ULL #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL /* * AMD family 15h event code/PMC mappings: * * type = event_code & 0x0F0: * * 0x000 FP PERF_CTL[5:3] * 0x010 FP PERF_CTL[5:3] * 0x020 LS PERF_CTL[5:0] * 0x030 LS PERF_CTL[5:0] * 0x040 DC PERF_CTL[5:0] * 0x050 DC PERF_CTL[5:0] * 0x060 CU PERF_CTL[2:0] * 0x070 CU PERF_CTL[2:0] * 0x080 IC/DE PERF_CTL[2:0] * 0x090 IC/DE PERF_CTL[2:0] * 0x0A0 --- * 0x0B0 --- * 0x0C0 EX/LS PERF_CTL[5:0] * 0x0D0 DE PERF_CTL[2:0] * 0x0E0 NB NB_PERF_CTL[3:0] * 0x0F0 NB NB_PERF_CTL[3:0] * * Exceptions: * * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*) * 0x003 FP PERF_CTL[3] * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*) * 0x00B FP PERF_CTL[3] * 0x00D FP PERF_CTL[3] * 0x023 DE PERF_CTL[2:0] * 0x02D LS PERF_CTL[3] * 0x02E LS PERF_CTL[3,0] * 0x031 LS PERF_CTL[2:0] (**) * 0x043 CU PERF_CTL[2:0] * 0x045 CU PERF_CTL[2:0] * 0x046 CU PERF_CTL[2:0] * 0x054 CU PERF_CTL[2:0] * 0x055 CU PERF_CTL[2:0] * 0x08F IC PERF_CTL[0] * 0x187 DE PERF_CTL[0] * 0x188 DE PERF_CTL[0] * 0x0DB EX PERF_CTL[5:0] * 0x0DC LS PERF_CTL[5:0] * 0x0DD LS PERF_CTL[5:0] * 0x0DE LS PERF_CTL[5:0] * 0x0DF LS PERF_CTL[5:0] * 0x1C0 EX PERF_CTL[5:3] * 0x1D6 EX PERF_CTL[5:0] * 0x1D8 EX PERF_CTL[5:0] * * (*) depending on the umask all FPU counters may be used * (**) only one unitmask enabled at a time */ static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0); static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0); static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0); static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); static struct event_constraint * amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; unsigned int event_code = amd_get_event_code(hwc); switch (event_code & AMD_EVENT_TYPE_MASK) { case AMD_EVENT_FP: switch (event_code) { case 0x000: if (!(hwc->config & 0x0000F000ULL)) break; if (!(hwc->config & 0x00000F00ULL)) break; return &amd_f15_PMC3; case 0x004: if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) break; return &amd_f15_PMC3; case 0x003: case 0x00B: case 0x00D: return &amd_f15_PMC3; } return &amd_f15_PMC53; case AMD_EVENT_LS: case AMD_EVENT_DC: case AMD_EVENT_EX_LS: switch (event_code) { case 0x023: case 0x043: case 0x045: case 0x046: case 0x054: case 0x055: return &amd_f15_PMC20; case 0x02D: return &amd_f15_PMC3; case 0x02E: return &amd_f15_PMC30; case 0x031: if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1) return &amd_f15_PMC20; return &emptyconstraint; case 0x1C0: return &amd_f15_PMC53; default: return &amd_f15_PMC50; } case AMD_EVENT_CU: case AMD_EVENT_IC_DE: case AMD_EVENT_DE: switch (event_code) { case 0x08F: case 0x187: case 0x188: return &amd_f15_PMC0; case 0x0DB ... 0x0DF: case 0x1D6: case 0x1D8: return &amd_f15_PMC50; default: return &amd_f15_PMC20; } case AMD_EVENT_NB: /* moved to perf_event_amd_uncore.c */ return &emptyconstraint; default: return &emptyconstraint; } } static ssize_t amd_event_sysfs_show(char *page, u64 config) { u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) | (config & AMD64_EVENTSEL_EVENT) >> 24; return x86_event_sysfs_show(page, config, event); } static __initconst const struct x86_pmu amd_pmu = { .name = "AMD", .handle_irq = x86_pmu_handle_irq, .disable_all = x86_pmu_disable_all, .enable_all = x86_pmu_enable_all, .enable = x86_pmu_enable_event, .disable = x86_pmu_disable_event, .hw_config = amd_pmu_hw_config, .schedule_events = x86_schedule_events, .eventsel = MSR_K7_EVNTSEL0, .perfctr = MSR_K7_PERFCTR0, .addr_offset = amd_pmu_addr_offset, .event_map = amd_pmu_event_map, .max_events = ARRAY_SIZE(amd_perfmon_event_map), .num_counters = AMD64_NUM_COUNTERS, .cntval_bits = 48, .cntval_mask = (1ULL << 48) - 1, .apic = 1, /* use highest bit to detect overflow */ .max_period = (1ULL << 47) - 1, .get_event_constraints = amd_get_event_constraints, .put_event_constraints = amd_put_event_constraints, .format_attrs = amd_format_attr, .events_sysfs_show = amd_event_sysfs_show, .cpu_prepare = amd_pmu_cpu_prepare, .cpu_starting = amd_pmu_cpu_starting, .cpu_dead = amd_pmu_cpu_dead, }; static int setup_event_constraints(void) { if (boot_cpu_data.x86 == 0x15) x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; return 0; } static int setup_perfctr_core(void) { if (!cpu_has_perfctr_core) { WARN(x86_pmu.get_event_constraints == amd_get_event_constraints_f15h, KERN_ERR "Odd, counter constraints enabled but no core perfctrs detected!"); return -ENODEV; } WARN(x86_pmu.get_event_constraints == amd_get_event_constraints, KERN_ERR "hw perf events core counters need constraints handler!"); /* * If core performance counter extensions exists, we must use * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also * x86_pmu_addr_offset(). */ x86_pmu.eventsel = MSR_F15H_PERF_CTL; x86_pmu.perfctr = MSR_F15H_PERF_CTR; x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE; printk(KERN_INFO "perf: AMD core performance counters detected\n"); return 0; } __init int amd_pmu_init(void) { /* Performance-monitoring supported from K7 and later: */ if (boot_cpu_data.x86 < 6) return -ENODEV; x86_pmu = amd_pmu; setup_event_constraints(); setup_perfctr_core(); /* Events are common for all AMDs */ memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids)); return 0; } void amd_pmu_enable_virt(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); cpuc->perf_ctr_virt_mask = 0; /* Reload all events */ x86_pmu_disable_all(); x86_pmu_enable_all(0); } EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); void amd_pmu_disable_virt(void) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); /* * We only mask out the Host-only bit so that host-only counting works * when SVM is disabled. If someone sets up a guest-only counter when * SVM is disabled the Guest-only bits still gets set and the counter * will not count anything. */ cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY; /* Reload all events */ x86_pmu_disable_all(); x86_pmu_enable_all(0); } EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
gpl-2.0
shakalaca/ASUS_ZenFone_A500CG_A600CG
linux/kernel/drivers/pinctrl/pinmux.c
1886
16364
/* * Core driver for the pin muxing portions of the pin control subsystem * * Copyright (C) 2011-2012 ST-Ericsson SA * Written on behalf of Linaro for ST-Ericsson * Based on bits of regulator core, gpio core and clk core * * Author: Linus Walleij <linus.walleij@linaro.org> * * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. * * License terms: GNU General Public License (GPL) version 2 */ #define pr_fmt(fmt) "pinmux core: " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/radix-tree.h> #include <linux/err.h> #include <linux/list.h> #include <linux/string.h> #include <linux/sysfs.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/pinctrl/machine.h> #include <linux/pinctrl/pinmux.h> #include "core.h" #include "pinmux.h" int pinmux_check_ops(struct pinctrl_dev *pctldev) { const struct pinmux_ops *ops = pctldev->desc->pmxops; unsigned nfuncs; unsigned selector = 0; /* Check that we implement required operations */ if (!ops || !ops->get_functions_count || !ops->get_function_name || !ops->get_function_groups || !ops->enable) { dev_err(pctldev->dev, "pinmux ops lacks necessary functions\n"); return -EINVAL; } /* Check that all functions registered have names */ nfuncs = ops->get_functions_count(pctldev); while (selector < nfuncs) { const char *fname = ops->get_function_name(pctldev, selector); if (!fname) { dev_err(pctldev->dev, "pinmux ops has no name for function%u\n", selector); return -EINVAL; } selector++; } return 0; } int pinmux_validate_map(struct pinctrl_map const *map, int i) { if (!map->data.mux.function) { pr_err("failed to register map %s (%d): no function given\n", map->name, i); return -EINVAL; } return 0; } /** * pin_request() - request a single pin to be muxed in, typically for GPIO * @pin: the pin number in the global pin space * @owner: a representation of the owner of this pin; typically the device * name that controls its mux function, or the requested GPIO name * @gpio_range: the range matching the GPIO pin if this is a request for a * single GPIO pin */ static int pin_request(struct pinctrl_dev *pctldev, int pin, const char *owner, struct pinctrl_gpio_range *gpio_range) { struct pin_desc *desc; const struct pinmux_ops *ops = pctldev->desc->pmxops; int status = -EINVAL; desc = pin_desc_get(pctldev, pin); if (desc == NULL) { dev_err(pctldev->dev, "pin %d is not registered so it cannot be requested\n", pin); goto out; } dev_dbg(pctldev->dev, "request pin %d (%s) for %s\n", pin, desc->name, owner); if (gpio_range) { /* There's no need to support multiple GPIO requests */ if (desc->gpio_owner) { dev_err(pctldev->dev, "pin %s already requested by %s; cannot claim for %s\n", desc->name, desc->gpio_owner, owner); goto out; } desc->gpio_owner = owner; } else { if (desc->mux_usecount && strcmp(desc->mux_owner, owner)) { dev_err(pctldev->dev, "pin %s already requested by %s; cannot claim for %s\n", desc->name, desc->mux_owner, owner); goto out; } desc->mux_usecount++; if (desc->mux_usecount > 1) return 0; desc->mux_owner = owner; } /* Let each pin increase references to this module */ if (!try_module_get(pctldev->owner)) { dev_err(pctldev->dev, "could not increase module refcount for pin %d\n", pin); status = -EINVAL; goto out_free_pin; } /* * If there is no kind of request function for the pin we just assume * we got it by default and proceed. */ if (gpio_range && ops->gpio_request_enable) /* This requests and enables a single GPIO pin */ status = ops->gpio_request_enable(pctldev, gpio_range, pin); else if (ops->request) status = ops->request(pctldev, pin); else status = 0; if (status) { dev_err(pctldev->dev, "request() failed for pin %d\n", pin); module_put(pctldev->owner); } out_free_pin: if (status) { if (gpio_range) { desc->gpio_owner = NULL; } else { desc->mux_usecount--; if (!desc->mux_usecount) desc->mux_owner = NULL; } } out: if (status) dev_err(pctldev->dev, "pin-%d (%s) status %d\n", pin, owner, status); return status; } /** * pin_free() - release a single muxed in pin so something else can be muxed * @pctldev: pin controller device handling this pin * @pin: the pin to free * @gpio_range: the range matching the GPIO pin if this is a request for a * single GPIO pin * * This function returns a pointer to the previous owner. This is used * for callers that dynamically allocate an owner name so it can be freed * once the pin is free. This is done for GPIO request functions. */ static const char *pin_free(struct pinctrl_dev *pctldev, int pin, struct pinctrl_gpio_range *gpio_range) { const struct pinmux_ops *ops = pctldev->desc->pmxops; struct pin_desc *desc; const char *owner; desc = pin_desc_get(pctldev, pin); if (desc == NULL) { dev_err(pctldev->dev, "pin is not registered so it cannot be freed\n"); return NULL; } if (!gpio_range) { /* * A pin should not be freed more times than allocated. */ if (WARN_ON(!desc->mux_usecount)) return NULL; desc->mux_usecount--; if (desc->mux_usecount) return NULL; } /* * If there is no kind of request function for the pin we just assume * we got it by default and proceed. */ if (gpio_range && ops->gpio_disable_free) ops->gpio_disable_free(pctldev, gpio_range, pin); else if (ops->free) ops->free(pctldev, pin); if (gpio_range) { owner = desc->gpio_owner; desc->gpio_owner = NULL; } else { owner = desc->mux_owner; desc->mux_owner = NULL; desc->mux_setting = NULL; } module_put(pctldev->owner); return owner; } /** * pinmux_request_gpio() - request pinmuxing for a GPIO pin * @pctldev: pin controller device affected * @pin: the pin to mux in for GPIO * @range: the applicable GPIO range */ int pinmux_request_gpio(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned pin, unsigned gpio) { const char *owner; int ret; /* Conjure some name stating what chip and pin this is taken by */ owner = kasprintf(GFP_KERNEL, "%s:%d", range->name, gpio); if (!owner) return -EINVAL; ret = pin_request(pctldev, pin, owner, range); if (ret < 0) kfree(owner); return ret; } /** * pinmux_free_gpio() - release a pin from GPIO muxing * @pctldev: the pin controller device for the pin * @pin: the affected currently GPIO-muxed in pin * @range: applicable GPIO range */ void pinmux_free_gpio(struct pinctrl_dev *pctldev, unsigned pin, struct pinctrl_gpio_range *range) { const char *owner; owner = pin_free(pctldev, pin, range); kfree(owner); } /** * pinmux_gpio_direction() - set the direction of a single muxed-in GPIO pin * @pctldev: the pin controller handling this pin * @range: applicable GPIO range * @pin: the affected GPIO pin in this controller * @input: true if we set the pin as input, false for output */ int pinmux_gpio_direction(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned pin, bool input) { const struct pinmux_ops *ops; int ret; ops = pctldev->desc->pmxops; if (ops->gpio_set_direction) ret = ops->gpio_set_direction(pctldev, range, pin, input); else ret = 0; return ret; } static int pinmux_func_name_to_selector(struct pinctrl_dev *pctldev, const char *function) { const struct pinmux_ops *ops = pctldev->desc->pmxops; unsigned nfuncs = ops->get_functions_count(pctldev); unsigned selector = 0; /* See if this pctldev has this function */ while (selector < nfuncs) { const char *fname = ops->get_function_name(pctldev, selector); if (!strcmp(function, fname)) return selector; selector++; } pr_err("%s does not support function %s\n", pinctrl_dev_get_name(pctldev), function); return -EINVAL; } int pinmux_map_to_setting(struct pinctrl_map const *map, struct pinctrl_setting *setting) { struct pinctrl_dev *pctldev = setting->pctldev; const struct pinmux_ops *pmxops = pctldev->desc->pmxops; char const * const *groups; unsigned num_groups; int ret; const char *group; int i; if (!pmxops) { dev_err(pctldev->dev, "does not support mux function\n"); return -EINVAL; } ret = pinmux_func_name_to_selector(pctldev, map->data.mux.function); if (ret < 0) { dev_err(pctldev->dev, "invalid function %s in map table\n", map->data.mux.function); return ret; } setting->data.mux.func = ret; ret = pmxops->get_function_groups(pctldev, setting->data.mux.func, &groups, &num_groups); if (ret < 0) { dev_err(pctldev->dev, "can't query groups for function %s\n", map->data.mux.function); return ret; } if (!num_groups) { dev_err(pctldev->dev, "function %s can't be selected on any group\n", map->data.mux.function); return -EINVAL; } if (map->data.mux.group) { bool found = false; group = map->data.mux.group; for (i = 0; i < num_groups; i++) { if (!strcmp(group, groups[i])) { found = true; break; } } if (!found) { dev_err(pctldev->dev, "invalid group \"%s\" for function \"%s\"\n", group, map->data.mux.function); return -EINVAL; } } else { group = groups[0]; } ret = pinctrl_get_group_selector(pctldev, group); if (ret < 0) { dev_err(pctldev->dev, "invalid group %s in map table\n", map->data.mux.group); return ret; } setting->data.mux.group = ret; return 0; } void pinmux_free_setting(struct pinctrl_setting const *setting) { /* This function is currently unused */ } int pinmux_enable_setting(struct pinctrl_setting const *setting) { struct pinctrl_dev *pctldev = setting->pctldev; const struct pinctrl_ops *pctlops = pctldev->desc->pctlops; const struct pinmux_ops *ops = pctldev->desc->pmxops; int ret; const unsigned *pins; unsigned num_pins; int i; struct pin_desc *desc; ret = pctlops->get_group_pins(pctldev, setting->data.mux.group, &pins, &num_pins); if (ret) { /* errors only affect debug data, so just warn */ dev_warn(pctldev->dev, "could not get pins for group selector %d\n", setting->data.mux.group); num_pins = 0; } /* Try to allocate all pins in this group, one by one */ for (i = 0; i < num_pins; i++) { ret = pin_request(pctldev, pins[i], setting->dev_name, NULL); if (ret) { dev_err(pctldev->dev, "could not request pin %d on device %s\n", pins[i], pinctrl_dev_get_name(pctldev)); goto err_pin_request; } } /* Now that we have acquired the pins, encode the mux setting */ for (i = 0; i < num_pins; i++) { desc = pin_desc_get(pctldev, pins[i]); if (desc == NULL) { dev_warn(pctldev->dev, "could not get pin desc for pin %d\n", pins[i]); continue; } desc->mux_setting = &(setting->data.mux); } ret = ops->enable(pctldev, setting->data.mux.func, setting->data.mux.group); if (ret) goto err_enable; return 0; err_enable: for (i = 0; i < num_pins; i++) { desc = pin_desc_get(pctldev, pins[i]); if (desc) desc->mux_setting = NULL; } err_pin_request: /* On error release all taken pins */ while (--i >= 0) pin_free(pctldev, pins[i], NULL); return ret; } void pinmux_disable_setting(struct pinctrl_setting const *setting) { struct pinctrl_dev *pctldev = setting->pctldev; const struct pinctrl_ops *pctlops = pctldev->desc->pctlops; const struct pinmux_ops *ops = pctldev->desc->pmxops; int ret; const unsigned *pins; unsigned num_pins; int i; struct pin_desc *desc; ret = pctlops->get_group_pins(pctldev, setting->data.mux.group, &pins, &num_pins); if (ret) { /* errors only affect debug data, so just warn */ dev_warn(pctldev->dev, "could not get pins for group selector %d\n", setting->data.mux.group); num_pins = 0; } /* Flag the descs that no setting is active */ for (i = 0; i < num_pins; i++) { desc = pin_desc_get(pctldev, pins[i]); if (desc == NULL) { dev_warn(pctldev->dev, "could not get pin desc for pin %d\n", pins[i]); continue; } desc->mux_setting = NULL; } /* And release the pins */ for (i = 0; i < num_pins; i++) pin_free(pctldev, pins[i], NULL); if (ops->disable) ops->disable(pctldev, setting->data.mux.func, setting->data.mux.group); } #ifdef CONFIG_DEBUG_FS /* Called from pincontrol core */ static int pinmux_functions_show(struct seq_file *s, void *what) { struct pinctrl_dev *pctldev = s->private; const struct pinmux_ops *pmxops = pctldev->desc->pmxops; unsigned nfuncs; unsigned func_selector = 0; if (!pmxops) return 0; mutex_lock(&pctldev->mutex); nfuncs = pmxops->get_functions_count(pctldev); while (func_selector < nfuncs) { const char *func = pmxops->get_function_name(pctldev, func_selector); const char * const *groups; unsigned num_groups; int ret; int i; ret = pmxops->get_function_groups(pctldev, func_selector, &groups, &num_groups); if (ret) seq_printf(s, "function %s: COULD NOT GET GROUPS\n", func); seq_printf(s, "function: %s, groups = [ ", func); for (i = 0; i < num_groups; i++) seq_printf(s, "%s ", groups[i]); seq_puts(s, "]\n"); func_selector++; } mutex_unlock(&pctldev->mutex); return 0; } static int pinmux_pins_show(struct seq_file *s, void *what) { struct pinctrl_dev *pctldev = s->private; const struct pinctrl_ops *pctlops = pctldev->desc->pctlops; const struct pinmux_ops *pmxops = pctldev->desc->pmxops; unsigned i, pin; if (!pmxops) return 0; seq_puts(s, "Pinmux settings per pin\n"); seq_puts(s, "Format: pin (name): mux_owner gpio_owner hog?\n"); mutex_lock(&pctldev->mutex); /* The pin number can be retrived from the pin controller descriptor */ for (i = 0; i < pctldev->desc->npins; i++) { struct pin_desc *desc; bool is_hog = false; pin = pctldev->desc->pins[i].number; desc = pin_desc_get(pctldev, pin); /* Skip if we cannot search the pin */ if (desc == NULL) continue; if (desc->mux_owner && !strcmp(desc->mux_owner, pinctrl_dev_get_name(pctldev))) is_hog = true; seq_printf(s, "pin %d (%s): %s %s%s", pin, desc->name ? desc->name : "unnamed", desc->mux_owner ? desc->mux_owner : "(MUX UNCLAIMED)", desc->gpio_owner ? desc->gpio_owner : "(GPIO UNCLAIMED)", is_hog ? " (HOG)" : ""); if (desc->mux_setting) seq_printf(s, " function %s group %s\n", pmxops->get_function_name(pctldev, desc->mux_setting->func), pctlops->get_group_name(pctldev, desc->mux_setting->group)); else seq_printf(s, "\n"); } mutex_unlock(&pctldev->mutex); return 0; } void pinmux_show_map(struct seq_file *s, struct pinctrl_map const *map) { seq_printf(s, "group %s\nfunction %s\n", map->data.mux.group ? map->data.mux.group : "(default)", map->data.mux.function); } void pinmux_show_setting(struct seq_file *s, struct pinctrl_setting const *setting) { struct pinctrl_dev *pctldev = setting->pctldev; const struct pinmux_ops *pmxops = pctldev->desc->pmxops; const struct pinctrl_ops *pctlops = pctldev->desc->pctlops; seq_printf(s, "group: %s (%u) function: %s (%u)\n", pctlops->get_group_name(pctldev, setting->data.mux.group), setting->data.mux.group, pmxops->get_function_name(pctldev, setting->data.mux.func), setting->data.mux.func); } static int pinmux_functions_open(struct inode *inode, struct file *file) { return single_open(file, pinmux_functions_show, inode->i_private); } static int pinmux_pins_open(struct inode *inode, struct file *file) { return single_open(file, pinmux_pins_show, inode->i_private); } static const struct file_operations pinmux_functions_ops = { .open = pinmux_functions_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations pinmux_pins_ops = { .open = pinmux_pins_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; void pinmux_init_device_debugfs(struct dentry *devroot, struct pinctrl_dev *pctldev) { debugfs_create_file("pinmux-functions", S_IFREG | S_IRUGO, devroot, pctldev, &pinmux_functions_ops); debugfs_create_file("pinmux-pins", S_IFREG | S_IRUGO, devroot, pctldev, &pinmux_pins_ops); } #endif /* CONFIG_DEBUG_FS */
gpl-2.0
wgoossens/linux-nios2
drivers/uio/uio_aec.c
2398
4129
/* * uio_aec.c -- simple driver for Adrienne Electronics Corp time code PCI device * * Copyright (C) 2008 Brandon Philips <brandon@ifup.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place, Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/cdev.h> #include <linux/fs.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/uio_driver.h> #include <linux/slab.h> #define PCI_VENDOR_ID_AEC 0xaecb #define PCI_DEVICE_ID_AEC_VITCLTC 0x6250 #define INT_ENABLE_ADDR 0xFC #define INT_ENABLE 0x10 #define INT_DISABLE 0x0 #define INT_MASK_ADDR 0x2E #define INT_MASK_ALL 0x3F #define INTA_DRVR_ADDR 0xFE #define INTA_ENABLED_FLAG 0x08 #define INTA_FLAG 0x01 #define MAILBOX 0x0F static struct pci_device_id ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AEC, PCI_DEVICE_ID_AEC_VITCLTC), }, { 0, } }; MODULE_DEVICE_TABLE(pci, ids); static irqreturn_t aectc_irq(int irq, struct uio_info *dev_info) { void __iomem *int_flag = dev_info->priv + INTA_DRVR_ADDR; unsigned char status = ioread8(int_flag); if ((status & INTA_ENABLED_FLAG) && (status & INTA_FLAG)) { /* application writes 0x00 to 0x2F to get next interrupt */ status = ioread8(dev_info->priv + MAILBOX); return IRQ_HANDLED; } return IRQ_NONE; } static void print_board_data(struct pci_dev *pdev, struct uio_info *i) { dev_info(&pdev->dev, "PCI-TC board vendor: %x%x number: %x%x" " revision: %c%c\n", ioread8(i->priv + 0x01), ioread8(i->priv + 0x00), ioread8(i->priv + 0x03), ioread8(i->priv + 0x02), ioread8(i->priv + 0x06), ioread8(i->priv + 0x07)); } static int probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct uio_info *info; int ret; info = kzalloc(sizeof(struct uio_info), GFP_KERNEL); if (!info) return -ENOMEM; if (pci_enable_device(pdev)) goto out_free; if (pci_request_regions(pdev, "aectc")) goto out_disable; info->name = "aectc"; info->port[0].start = pci_resource_start(pdev, 0); if (!info->port[0].start) goto out_release; info->priv = pci_iomap(pdev, 0, 0); if (!info->priv) goto out_release; info->port[0].size = pci_resource_len(pdev, 0); info->port[0].porttype = UIO_PORT_GPIO; info->version = "0.0.1"; info->irq = pdev->irq; info->irq_flags = IRQF_SHARED; info->handler = aectc_irq; print_board_data(pdev, info); ret = uio_register_device(&pdev->dev, info); if (ret) goto out_unmap; iowrite32(INT_ENABLE, info->priv + INT_ENABLE_ADDR); iowrite8(INT_MASK_ALL, info->priv + INT_MASK_ADDR); if (!(ioread8(info->priv + INTA_DRVR_ADDR) & INTA_ENABLED_FLAG)) dev_err(&pdev->dev, "aectc: interrupts not enabled\n"); pci_set_drvdata(pdev, info); return 0; out_unmap: pci_iounmap(pdev, info->priv); out_release: pci_release_regions(pdev); out_disable: pci_disable_device(pdev); out_free: kfree(info); return -ENODEV; } static void remove(struct pci_dev *pdev) { struct uio_info *info = pci_get_drvdata(pdev); /* disable interrupts */ iowrite8(INT_DISABLE, info->priv + INT_MASK_ADDR); iowrite32(INT_DISABLE, info->priv + INT_ENABLE_ADDR); /* read mailbox to ensure board drops irq */ ioread8(info->priv + MAILBOX); uio_unregister_device(info); pci_release_regions(pdev); pci_disable_device(pdev); iounmap(info->priv); kfree(info); } static struct pci_driver pci_driver = { .name = "aectc", .id_table = ids, .probe = probe, .remove = remove, }; module_pci_driver(pci_driver); MODULE_LICENSE("GPL");
gpl-2.0
andyhui/linux-kernel-3.12.17
drivers/media/v4l2-core/v4l2-event.c
2654
7641
/* * v4l2-event.c * * V4L2 events. * * Copyright (C) 2009--2010 Nokia Corporation. * * Contact: Sakari Ailus <sakari.ailus@iki.fi> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <media/v4l2-dev.h> #include <media/v4l2-fh.h> #include <media/v4l2-event.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/export.h> static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx) { idx += sev->first; return idx >= sev->elems ? idx - sev->elems : idx; } static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event) { struct v4l2_kevent *kev; unsigned long flags; spin_lock_irqsave(&fh->vdev->fh_lock, flags); if (list_empty(&fh->available)) { spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); return -ENOENT; } WARN_ON(fh->navailable == 0); kev = list_first_entry(&fh->available, struct v4l2_kevent, list); list_del(&kev->list); fh->navailable--; kev->event.pending = fh->navailable; *event = kev->event; kev->sev->first = sev_pos(kev->sev, 1); kev->sev->in_use--; spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); return 0; } int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, int nonblocking) { int ret; if (nonblocking) return __v4l2_event_dequeue(fh, event); /* Release the vdev lock while waiting */ if (fh->vdev->lock) mutex_unlock(fh->vdev->lock); do { ret = wait_event_interruptible(fh->wait, fh->navailable != 0); if (ret < 0) break; ret = __v4l2_event_dequeue(fh, event); } while (ret == -ENOENT); if (fh->vdev->lock) mutex_lock(fh->vdev->lock); return ret; } EXPORT_SYMBOL_GPL(v4l2_event_dequeue); /* Caller must hold fh->vdev->fh_lock! */ static struct v4l2_subscribed_event *v4l2_event_subscribed( struct v4l2_fh *fh, u32 type, u32 id) { struct v4l2_subscribed_event *sev; assert_spin_locked(&fh->vdev->fh_lock); list_for_each_entry(sev, &fh->subscribed, list) if (sev->type == type && sev->id == id) return sev; return NULL; } static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev, const struct timespec *ts) { struct v4l2_subscribed_event *sev; struct v4l2_kevent *kev; bool copy_payload = true; /* Are we subscribed? */ sev = v4l2_event_subscribed(fh, ev->type, ev->id); if (sev == NULL) return; /* * If the event has been added to the fh->subscribed list, but its * add op has not completed yet elems will be 0, treat this as * not being subscribed. */ if (!sev->elems) return; /* Increase event sequence number on fh. */ fh->sequence++; /* Do we have any free events? */ if (sev->in_use == sev->elems) { /* no, remove the oldest one */ kev = sev->events + sev_pos(sev, 0); list_del(&kev->list); sev->in_use--; sev->first = sev_pos(sev, 1); fh->navailable--; if (sev->elems == 1) { if (sev->ops && sev->ops->replace) { sev->ops->replace(&kev->event, ev); copy_payload = false; } } else if (sev->ops && sev->ops->merge) { struct v4l2_kevent *second_oldest = sev->events + sev_pos(sev, 0); sev->ops->merge(&kev->event, &second_oldest->event); } } /* Take one and fill it. */ kev = sev->events + sev_pos(sev, sev->in_use); kev->event.type = ev->type; if (copy_payload) kev->event.u = ev->u; kev->event.id = ev->id; kev->event.timestamp = *ts; kev->event.sequence = fh->sequence; sev->in_use++; list_add_tail(&kev->list, &fh->available); fh->navailable++; wake_up_all(&fh->wait); } void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev) { struct v4l2_fh *fh; unsigned long flags; struct timespec timestamp; ktime_get_ts(&timestamp); spin_lock_irqsave(&vdev->fh_lock, flags); list_for_each_entry(fh, &vdev->fh_list, list) __v4l2_event_queue_fh(fh, ev, &timestamp); spin_unlock_irqrestore(&vdev->fh_lock, flags); } EXPORT_SYMBOL_GPL(v4l2_event_queue); void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev) { unsigned long flags; struct timespec timestamp; ktime_get_ts(&timestamp); spin_lock_irqsave(&fh->vdev->fh_lock, flags); __v4l2_event_queue_fh(fh, ev, &timestamp); spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); } EXPORT_SYMBOL_GPL(v4l2_event_queue_fh); int v4l2_event_pending(struct v4l2_fh *fh) { return fh->navailable; } EXPORT_SYMBOL_GPL(v4l2_event_pending); int v4l2_event_subscribe(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub, unsigned elems, const struct v4l2_subscribed_event_ops *ops) { struct v4l2_subscribed_event *sev, *found_ev; unsigned long flags; unsigned i; if (sub->type == V4L2_EVENT_ALL) return -EINVAL; if (elems < 1) elems = 1; sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL); if (!sev) return -ENOMEM; for (i = 0; i < elems; i++) sev->events[i].sev = sev; sev->type = sub->type; sev->id = sub->id; sev->flags = sub->flags; sev->fh = fh; sev->ops = ops; spin_lock_irqsave(&fh->vdev->fh_lock, flags); found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); if (!found_ev) list_add(&sev->list, &fh->subscribed); spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); if (found_ev) { kfree(sev); return 0; /* Already listening */ } if (sev->ops && sev->ops->add) { int ret = sev->ops->add(sev, elems); if (ret) { sev->ops = NULL; v4l2_event_unsubscribe(fh, sub); return ret; } } /* Mark as ready for use */ sev->elems = elems; return 0; } EXPORT_SYMBOL_GPL(v4l2_event_subscribe); void v4l2_event_unsubscribe_all(struct v4l2_fh *fh) { struct v4l2_event_subscription sub; struct v4l2_subscribed_event *sev; unsigned long flags; do { sev = NULL; spin_lock_irqsave(&fh->vdev->fh_lock, flags); if (!list_empty(&fh->subscribed)) { sev = list_first_entry(&fh->subscribed, struct v4l2_subscribed_event, list); sub.type = sev->type; sub.id = sev->id; } spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); if (sev) v4l2_event_unsubscribe(fh, &sub); } while (sev); } EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all); int v4l2_event_unsubscribe(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub) { struct v4l2_subscribed_event *sev; unsigned long flags; int i; if (sub->type == V4L2_EVENT_ALL) { v4l2_event_unsubscribe_all(fh); return 0; } spin_lock_irqsave(&fh->vdev->fh_lock, flags); sev = v4l2_event_subscribed(fh, sub->type, sub->id); if (sev != NULL) { /* Remove any pending events for this subscription */ for (i = 0; i < sev->in_use; i++) { list_del(&sev->events[sev_pos(sev, i)].list); fh->navailable--; } list_del(&sev->list); } spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); if (sev && sev->ops && sev->ops->del) sev->ops->del(sev); kfree(sev); return 0; } EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe); int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { return v4l2_event_unsubscribe(fh, sub); } EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
gpl-2.0
slv96/kernel_samsung_codina
arch/arm/mach-s3c2443/dma.c
3934
4997
/* linux/arch/arm/mach-s3c2443/dma.c * * Copyright (c) 2007 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * S3C2443 DMA selection * * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/sysdev.h> #include <linux/serial_core.h> #include <linux/io.h> #include <mach/dma.h> #include <plat/dma-s3c24xx.h> #include <plat/cpu.h> #include <plat/regs-serial.h> #include <mach/regs-gpio.h> #include <plat/regs-ac97.h> #include <plat/regs-dma.h> #include <mach/regs-mem.h> #include <mach/regs-lcd.h> #include <mach/regs-sdi.h> #include <plat/regs-iis.h> #include <plat/regs-spi.h> #define MAP(x) { \ [0] = (x) | DMA_CH_VALID, \ [1] = (x) | DMA_CH_VALID, \ [2] = (x) | DMA_CH_VALID, \ [3] = (x) | DMA_CH_VALID, \ [4] = (x) | DMA_CH_VALID, \ [5] = (x) | DMA_CH_VALID, \ } static struct s3c24xx_dma_map __initdata s3c2443_dma_mappings[] = { [DMACH_XD0] = { .name = "xdreq0", .channels = MAP(S3C2443_DMAREQSEL_XDREQ0), }, [DMACH_XD1] = { .name = "xdreq1", .channels = MAP(S3C2443_DMAREQSEL_XDREQ1), }, [DMACH_SDI] = { .name = "sdi", .channels = MAP(S3C2443_DMAREQSEL_SDI), .hw_addr.to = S3C2410_PA_IIS + S3C2410_IISFIFO, .hw_addr.from = S3C2410_PA_IIS + S3C2410_IISFIFO, }, [DMACH_SPI0] = { .name = "spi0", .channels = MAP(S3C2443_DMAREQSEL_SPI0TX), .hw_addr.to = S3C2410_PA_SPI + S3C2410_SPTDAT, .hw_addr.from = S3C2410_PA_SPI + S3C2410_SPRDAT, }, [DMACH_SPI1] = { .name = "spi1", .channels = MAP(S3C2443_DMAREQSEL_SPI1TX), .hw_addr.to = S3C2410_PA_SPI + 0x20 + S3C2410_SPTDAT, .hw_addr.from = S3C2410_PA_SPI + 0x20 + S3C2410_SPRDAT, }, [DMACH_UART0] = { .name = "uart0", .channels = MAP(S3C2443_DMAREQSEL_UART0_0), .hw_addr.to = S3C2410_PA_UART0 + S3C2410_UTXH, .hw_addr.from = S3C2410_PA_UART0 + S3C2410_URXH, }, [DMACH_UART1] = { .name = "uart1", .channels = MAP(S3C2443_DMAREQSEL_UART1_0), .hw_addr.to = S3C2410_PA_UART1 + S3C2410_UTXH, .hw_addr.from = S3C2410_PA_UART1 + S3C2410_URXH, }, [DMACH_UART2] = { .name = "uart2", .channels = MAP(S3C2443_DMAREQSEL_UART2_0), .hw_addr.to = S3C2410_PA_UART2 + S3C2410_UTXH, .hw_addr.from = S3C2410_PA_UART2 + S3C2410_URXH, }, [DMACH_UART3] = { .name = "uart3", .channels = MAP(S3C2443_DMAREQSEL_UART3_0), .hw_addr.to = S3C2443_PA_UART3 + S3C2410_UTXH, .hw_addr.from = S3C2443_PA_UART3 + S3C2410_URXH, }, [DMACH_UART0_SRC2] = { .name = "uart0", .channels = MAP(S3C2443_DMAREQSEL_UART0_1), .hw_addr.to = S3C2410_PA_UART0 + S3C2410_UTXH, .hw_addr.from = S3C2410_PA_UART0 + S3C2410_URXH, }, [DMACH_UART1_SRC2] = { .name = "uart1", .channels = MAP(S3C2443_DMAREQSEL_UART1_1), .hw_addr.to = S3C2410_PA_UART1 + S3C2410_UTXH, .hw_addr.from = S3C2410_PA_UART1 + S3C2410_URXH, }, [DMACH_UART2_SRC2] = { .name = "uart2", .channels = MAP(S3C2443_DMAREQSEL_UART2_1), .hw_addr.to = S3C2410_PA_UART2 + S3C2410_UTXH, .hw_addr.from = S3C2410_PA_UART2 + S3C2410_URXH, }, [DMACH_UART3_SRC2] = { .name = "uart3", .channels = MAP(S3C2443_DMAREQSEL_UART3_1), .hw_addr.to = S3C2443_PA_UART3 + S3C2410_UTXH, .hw_addr.from = S3C2443_PA_UART3 + S3C2410_URXH, }, [DMACH_TIMER] = { .name = "timer", .channels = MAP(S3C2443_DMAREQSEL_TIMER), }, [DMACH_I2S_IN] = { .name = "i2s-sdi", .channels = MAP(S3C2443_DMAREQSEL_I2SRX), .hw_addr.from = S3C2410_PA_IIS + S3C2410_IISFIFO, }, [DMACH_I2S_OUT] = { .name = "i2s-sdo", .channels = MAP(S3C2443_DMAREQSEL_I2STX), .hw_addr.to = S3C2410_PA_IIS + S3C2410_IISFIFO, }, [DMACH_PCM_IN] = { .name = "pcm-in", .channels = MAP(S3C2443_DMAREQSEL_PCMIN), .hw_addr.from = S3C2440_PA_AC97 + S3C_AC97_PCM_DATA, }, [DMACH_PCM_OUT] = { .name = "pcm-out", .channels = MAP(S3C2443_DMAREQSEL_PCMOUT), .hw_addr.to = S3C2440_PA_AC97 + S3C_AC97_PCM_DATA, }, [DMACH_MIC_IN] = { .name = "mic-in", .channels = MAP(S3C2443_DMAREQSEL_MICIN), .hw_addr.from = S3C2440_PA_AC97 + S3C_AC97_MIC_DATA, }, }; static void s3c2443_dma_select(struct s3c2410_dma_chan *chan, struct s3c24xx_dma_map *map) { writel(map->channels[0] | S3C2443_DMAREQSEL_HW, chan->regs + S3C2443_DMA_DMAREQSEL); } static struct s3c24xx_dma_selection __initdata s3c2443_dma_sel = { .select = s3c2443_dma_select, .dcon_mask = 0, .map = s3c2443_dma_mappings, .map_size = ARRAY_SIZE(s3c2443_dma_mappings), }; static int __init s3c2443_dma_add(struct sys_device *sysdev) { s3c24xx_dma_init(6, IRQ_S3C2443_DMA0, 0x100); return s3c24xx_dma_init_map(&s3c2443_dma_sel); } static struct sysdev_driver s3c2443_dma_driver = { .add = s3c2443_dma_add, }; static int __init s3c2443_dma_init(void) { return sysdev_driver_register(&s3c2443_sysclass, &s3c2443_dma_driver); } arch_initcall(s3c2443_dma_init);
gpl-2.0
EPDCenter/android_kernel_bq_qc
drivers/video/intelfb/intelfbdrv.c
4190
45454
/* * intelfb * * Linux framebuffer driver for Intel(R) 830M/845G/852GM/855GM/865G/915G/915GM/ * 945G/945GM/945GME/965G/965GM integrated graphics chips. * * Copyright © 2002, 2003 David Dawes <dawes@xfree86.org> * 2004 Sylvain Meyer * 2006 David Airlie * * This driver consists of two parts. The first part (intelfbdrv.c) provides * the basic fbdev interfaces, is derived in part from the radeonfb and * vesafb drivers, and is covered by the GPL. The second part (intelfbhw.c) * provides the code to program the hardware. Most of it is derived from * the i810/i830 XFree86 driver. The HW-specific code is covered here * under a dual license (GPL and MIT/XFree86 license). * * Author: David Dawes * */ /* $DHD: intelfb/intelfbdrv.c,v 1.20 2003/06/27 15:17:40 dawes Exp $ */ /* * Changes: * 01/2003 - Initial driver (0.1.0), no mode switching, no acceleration. * This initial version is a basic core that works a lot like * the vesafb driver. It must be built-in to the kernel, * and the initial video mode must be set with vga=XXX at * boot time. (David Dawes) * * 01/2003 - Version 0.2.0: Mode switching added, colormap support * implemented, Y panning, and soft screen blanking implemented. * No acceleration yet. (David Dawes) * * 01/2003 - Version 0.3.0: fbcon acceleration support added. Module * option handling added. (David Dawes) * * 01/2003 - Version 0.4.0: fbcon HW cursor support added. (David Dawes) * * 01/2003 - Version 0.4.1: Add auto-generation of built-in modes. * (David Dawes) * * 02/2003 - Version 0.4.2: Add check for active non-CRT devices, and * mode validation checks. (David Dawes) * * 02/2003 - Version 0.4.3: Check when the VC is in graphics mode so that * acceleration is disabled while an XFree86 server is running. * (David Dawes) * * 02/2003 - Version 0.4.4: Monitor DPMS support. (David Dawes) * * 02/2003 - Version 0.4.5: Basic XFree86 + fbdev working. (David Dawes) * * 02/2003 - Version 0.5.0: Modify to work with the 2.5.32 kernel as well * as 2.4.x kernels. (David Dawes) * * 02/2003 - Version 0.6.0: Split out HW-specifics into a separate file. * (David Dawes) * * 02/2003 - Version 0.7.0: Test on 852GM/855GM. Acceleration and HW * cursor are disabled on this platform. (David Dawes) * * 02/2003 - Version 0.7.1: Test on 845G. Acceleration is disabled * on this platform. (David Dawes) * * 02/2003 - Version 0.7.2: Test on 830M. Acceleration and HW * cursor are disabled on this platform. (David Dawes) * * 02/2003 - Version 0.7.3: Fix 8-bit modes for mobile platforms * (David Dawes) * * 02/2003 - Version 0.7.4: Add checks for FB and FBCON_HAS_CFB* configured * in the kernel, and add mode bpp verification and default * bpp selection based on which FBCON_HAS_CFB* are configured. * (David Dawes) * * 02/2003 - Version 0.7.5: Add basic package/install scripts based on the * DRI packaging scripts. (David Dawes) * * 04/2003 - Version 0.7.6: Fix typo that affects builds with SMP-enabled * kernels. (David Dawes, reported by Anupam). * * 06/2003 - Version 0.7.7: * Fix Makefile.kernel build problem (Tsutomu Yasuda). * Fix mis-placed #endif (2.4.21 kernel). * * 09/2004 - Version 0.9.0 - by Sylvain Meyer * Port to linux 2.6 kernel fbdev * Fix HW accel and HW cursor on i845G * Use of agpgart for fb memory reservation * Add mtrr support * * 10/2004 - Version 0.9.1 * Use module_param instead of old MODULE_PARM * Some cleanup * * 11/2004 - Version 0.9.2 * Add vram option to reserve more memory than stolen by BIOS * Fix intelfbhw_pan_display typo * Add __initdata annotations * * 04/2008 - Version 0.9.5 * Add support for 965G/965GM. (Maik Broemme <mbroemme@plusserver.de>) * * 08/2008 - Version 0.9.6 * Add support for 945GME. (Phil Endecott <spam_from_intelfb@chezphil.org>) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/screen_info.h> #include <asm/io.h> #ifdef CONFIG_MTRR #include <asm/mtrr.h> #endif #include "intelfb.h" #include "intelfbhw.h" #include "../edid.h" static void __devinit get_initial_mode(struct intelfb_info *dinfo); static void update_dinfo(struct intelfb_info *dinfo, struct fb_var_screeninfo *var); static int intelfb_open(struct fb_info *info, int user); static int intelfb_release(struct fb_info *info, int user); static int intelfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info); static int intelfb_set_par(struct fb_info *info); static int intelfb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info); static int intelfb_blank(int blank, struct fb_info *info); static int intelfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info); static void intelfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect); static void intelfb_copyarea(struct fb_info *info, const struct fb_copyarea *region); static void intelfb_imageblit(struct fb_info *info, const struct fb_image *image); static int intelfb_cursor(struct fb_info *info, struct fb_cursor *cursor); static int intelfb_sync(struct fb_info *info); static int intelfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg); static int __devinit intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent); static void __devexit intelfb_pci_unregister(struct pci_dev *pdev); static int __devinit intelfb_set_fbinfo(struct intelfb_info *dinfo); /* * Limiting the class to PCI_CLASS_DISPLAY_VGA prevents function 1 of the * mobile chipsets from being registered. */ #if DETECT_VGA_CLASS_ONLY #define INTELFB_CLASS_MASK ~0 << 8 #else #define INTELFB_CLASS_MASK 0 #endif static struct pci_device_id intelfb_pci_table[] __devinitdata = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_830M, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_830M }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_845G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_845G }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_85XGM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_85XGM }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_865G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_865G }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_854, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_854 }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_915G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_915G }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_915GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_915GM }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945G }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945GM }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945GME, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945GME }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_965G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_965G }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_965GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_965GM }, { 0, } }; /* Global data */ static int num_registered = 0; /* fb ops */ static struct fb_ops intel_fb_ops = { .owner = THIS_MODULE, .fb_open = intelfb_open, .fb_release = intelfb_release, .fb_check_var = intelfb_check_var, .fb_set_par = intelfb_set_par, .fb_setcolreg = intelfb_setcolreg, .fb_blank = intelfb_blank, .fb_pan_display = intelfb_pan_display, .fb_fillrect = intelfb_fillrect, .fb_copyarea = intelfb_copyarea, .fb_imageblit = intelfb_imageblit, .fb_cursor = intelfb_cursor, .fb_sync = intelfb_sync, .fb_ioctl = intelfb_ioctl }; /* PCI driver module table */ static struct pci_driver intelfb_driver = { .name = "intelfb", .id_table = intelfb_pci_table, .probe = intelfb_pci_register, .remove = __devexit_p(intelfb_pci_unregister) }; /* Module description/parameters */ MODULE_AUTHOR("David Dawes <dawes@tungstengraphics.com>, " "Sylvain Meyer <sylvain.meyer@worldonline.fr>"); MODULE_DESCRIPTION("Framebuffer driver for Intel(R) " SUPPORTED_CHIPSETS " chipsets"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DEVICE_TABLE(pci, intelfb_pci_table); static int accel = 1; static int vram = 4; static int hwcursor = 0; static int mtrr = 1; static int fixed = 0; static int noinit = 0; static int noregister = 0; static int probeonly = 0; static int idonly = 0; static int bailearly = 0; static int voffset = 48; static char *mode = NULL; module_param(accel, bool, S_IRUGO); MODULE_PARM_DESC(accel, "Enable hardware acceleration"); module_param(vram, int, S_IRUGO); MODULE_PARM_DESC(vram, "System RAM to allocate to framebuffer in MiB"); module_param(voffset, int, S_IRUGO); MODULE_PARM_DESC(voffset, "Offset of framebuffer in MiB"); module_param(hwcursor, bool, S_IRUGO); MODULE_PARM_DESC(hwcursor, "Enable HW cursor"); module_param(mtrr, bool, S_IRUGO); MODULE_PARM_DESC(mtrr, "Enable MTRR support"); module_param(fixed, bool, S_IRUGO); MODULE_PARM_DESC(fixed, "Disable mode switching"); module_param(noinit, bool, 0); MODULE_PARM_DESC(noinit, "Don't initialise graphics mode when loading"); module_param(noregister, bool, 0); MODULE_PARM_DESC(noregister, "Don't register, just probe and exit (debug)"); module_param(probeonly, bool, 0); MODULE_PARM_DESC(probeonly, "Do a minimal probe (debug)"); module_param(idonly, bool, 0); MODULE_PARM_DESC(idonly, "Just identify without doing anything else (debug)"); module_param(bailearly, bool, 0); MODULE_PARM_DESC(bailearly, "Bail out early, depending on value (debug)"); module_param(mode, charp, S_IRUGO); MODULE_PARM_DESC(mode, "Initial video mode \"<xres>x<yres>[-<depth>][@<refresh>]\""); #ifndef MODULE #define OPT_EQUAL(opt, name) (!strncmp(opt, name, strlen(name))) #define OPT_INTVAL(opt, name) simple_strtoul(opt + strlen(name) + 1, NULL, 0) #define OPT_STRVAL(opt, name) (opt + strlen(name)) static __inline__ char * get_opt_string(const char *this_opt, const char *name) { const char *p; int i; char *ret; p = OPT_STRVAL(this_opt, name); i = 0; while (p[i] && p[i] != ' ' && p[i] != ',') i++; ret = kmalloc(i + 1, GFP_KERNEL); if (ret) { strncpy(ret, p, i); ret[i] = '\0'; } return ret; } static __inline__ int get_opt_int(const char *this_opt, const char *name, int *ret) { if (!ret) return 0; if (!OPT_EQUAL(this_opt, name)) return 0; *ret = OPT_INTVAL(this_opt, name); return 1; } static __inline__ int get_opt_bool(const char *this_opt, const char *name, int *ret) { if (!ret) return 0; if (OPT_EQUAL(this_opt, name)) { if (this_opt[strlen(name)] == '=') *ret = simple_strtoul(this_opt + strlen(name) + 1, NULL, 0); else *ret = 1; } else { if (OPT_EQUAL(this_opt, "no") && OPT_EQUAL(this_opt + 2, name)) *ret = 0; else return 0; } return 1; } static int __init intelfb_setup(char *options) { char *this_opt; DBG_MSG("intelfb_setup\n"); if (!options || !*options) { DBG_MSG("no options\n"); return 0; } else DBG_MSG("options: %s\n", options); /* * These are the built-in options analogous to the module parameters * defined above. * * The syntax is: * * video=intelfb:[mode][,<param>=<val>] ... * * e.g., * * video=intelfb:1024x768-16@75,accel=0 */ while ((this_opt = strsep(&options, ","))) { if (!*this_opt) continue; if (get_opt_bool(this_opt, "accel", &accel)) ; else if (get_opt_int(this_opt, "vram", &vram)) ; else if (get_opt_bool(this_opt, "hwcursor", &hwcursor)) ; else if (get_opt_bool(this_opt, "mtrr", &mtrr)) ; else if (get_opt_bool(this_opt, "fixed", &fixed)) ; else if (get_opt_bool(this_opt, "init", &noinit)) noinit = !noinit; else if (OPT_EQUAL(this_opt, "mode=")) mode = get_opt_string(this_opt, "mode="); else mode = this_opt; } return 0; } #endif static int __init intelfb_init(void) { #ifndef MODULE char *option = NULL; #endif DBG_MSG("intelfb_init\n"); INF_MSG("Framebuffer driver for " "Intel(R) " SUPPORTED_CHIPSETS " chipsets\n"); INF_MSG("Version " INTELFB_VERSION "\n"); if (idonly) return -ENODEV; #ifndef MODULE if (fb_get_options("intelfb", &option)) return -ENODEV; intelfb_setup(option); #endif return pci_register_driver(&intelfb_driver); } static void __exit intelfb_exit(void) { DBG_MSG("intelfb_exit\n"); pci_unregister_driver(&intelfb_driver); } module_init(intelfb_init); module_exit(intelfb_exit); /*************************************************************** * mtrr support functions * ***************************************************************/ #ifdef CONFIG_MTRR static inline void __devinit set_mtrr(struct intelfb_info *dinfo) { dinfo->mtrr_reg = mtrr_add(dinfo->aperture.physical, dinfo->aperture.size, MTRR_TYPE_WRCOMB, 1); if (dinfo->mtrr_reg < 0) { ERR_MSG("unable to set MTRR\n"); return; } dinfo->has_mtrr = 1; } static inline void unset_mtrr(struct intelfb_info *dinfo) { if (dinfo->has_mtrr) mtrr_del(dinfo->mtrr_reg, dinfo->aperture.physical, dinfo->aperture.size); } #else #define set_mtrr(x) WRN_MSG("MTRR is disabled in the kernel\n") #define unset_mtrr(x) do { } while (0) #endif /* CONFIG_MTRR */ /*************************************************************** * driver init / cleanup * ***************************************************************/ static void cleanup(struct intelfb_info *dinfo) { DBG_MSG("cleanup\n"); if (!dinfo) return; intelfbhw_disable_irq(dinfo); fb_dealloc_cmap(&dinfo->info->cmap); kfree(dinfo->info->pixmap.addr); if (dinfo->registered) unregister_framebuffer(dinfo->info); unset_mtrr(dinfo); if (dinfo->fbmem_gart && dinfo->gtt_fb_mem) { agp_unbind_memory(dinfo->gtt_fb_mem); agp_free_memory(dinfo->gtt_fb_mem); } if (dinfo->gtt_cursor_mem) { agp_unbind_memory(dinfo->gtt_cursor_mem); agp_free_memory(dinfo->gtt_cursor_mem); } if (dinfo->gtt_ring_mem) { agp_unbind_memory(dinfo->gtt_ring_mem); agp_free_memory(dinfo->gtt_ring_mem); } #ifdef CONFIG_FB_INTEL_I2C /* un-register I2C bus */ intelfb_delete_i2c_busses(dinfo); #endif if (dinfo->mmio_base) iounmap((void __iomem *)dinfo->mmio_base); if (dinfo->aperture.virtual) iounmap((void __iomem *)dinfo->aperture.virtual); if (dinfo->flag & INTELFB_MMIO_ACQUIRED) release_mem_region(dinfo->mmio_base_phys, INTEL_REG_SIZE); if (dinfo->flag & INTELFB_FB_ACQUIRED) release_mem_region(dinfo->aperture.physical, dinfo->aperture.size); framebuffer_release(dinfo->info); } #define bailout(dinfo) do { \ DBG_MSG("bailout\n"); \ cleanup(dinfo); \ INF_MSG("Not going to register framebuffer, exiting...\n"); \ return -ENODEV; \ } while (0) static int __devinit intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent) { struct fb_info *info; struct intelfb_info *dinfo; int i, err, dvo; int aperture_size, stolen_size; struct agp_kern_info gtt_info; int agp_memtype; const char *s; struct agp_bridge_data *bridge; int aperture_bar = 0; int mmio_bar = 1; int offset; DBG_MSG("intelfb_pci_register\n"); num_registered++; if (num_registered != 1) { ERR_MSG("Attempted to register %d devices " "(should be only 1).\n", num_registered); return -ENODEV; } info = framebuffer_alloc(sizeof(struct intelfb_info), &pdev->dev); if (!info) { ERR_MSG("Could not allocate memory for intelfb_info.\n"); return -ENODEV; } if (fb_alloc_cmap(&info->cmap, 256, 1) < 0) { ERR_MSG("Could not allocate cmap for intelfb_info.\n"); goto err_out_cmap; return -ENODEV; } dinfo = info->par; dinfo->info = info; dinfo->fbops = &intel_fb_ops; dinfo->pdev = pdev; /* Reserve pixmap space. */ info->pixmap.addr = kzalloc(64 * 1024, GFP_KERNEL); if (info->pixmap.addr == NULL) { ERR_MSG("Cannot reserve pixmap memory.\n"); goto err_out_pixmap; } /* set early this option because it could be changed by tv encoder driver */ dinfo->fixed_mode = fixed; /* Enable device. */ if ((err = pci_enable_device(pdev))) { ERR_MSG("Cannot enable device.\n"); cleanup(dinfo); return -ENODEV; } /* Set base addresses. */ if ((ent->device == PCI_DEVICE_ID_INTEL_915G) || (ent->device == PCI_DEVICE_ID_INTEL_915GM) || (ent->device == PCI_DEVICE_ID_INTEL_945G) || (ent->device == PCI_DEVICE_ID_INTEL_945GM) || (ent->device == PCI_DEVICE_ID_INTEL_945GME) || (ent->device == PCI_DEVICE_ID_INTEL_965G) || (ent->device == PCI_DEVICE_ID_INTEL_965GM)) { aperture_bar = 2; mmio_bar = 0; } dinfo->aperture.physical = pci_resource_start(pdev, aperture_bar); dinfo->aperture.size = pci_resource_len(pdev, aperture_bar); dinfo->mmio_base_phys = pci_resource_start(pdev, mmio_bar); DBG_MSG("fb aperture: 0x%llx/0x%llx, MMIO region: 0x%llx/0x%llx\n", (unsigned long long)pci_resource_start(pdev, aperture_bar), (unsigned long long)pci_resource_len(pdev, aperture_bar), (unsigned long long)pci_resource_start(pdev, mmio_bar), (unsigned long long)pci_resource_len(pdev, mmio_bar)); /* Reserve the fb and MMIO regions */ if (!request_mem_region(dinfo->aperture.physical, dinfo->aperture.size, INTELFB_MODULE_NAME)) { ERR_MSG("Cannot reserve FB region.\n"); cleanup(dinfo); return -ENODEV; } dinfo->flag |= INTELFB_FB_ACQUIRED; if (!request_mem_region(dinfo->mmio_base_phys, INTEL_REG_SIZE, INTELFB_MODULE_NAME)) { ERR_MSG("Cannot reserve MMIO region.\n"); cleanup(dinfo); return -ENODEV; } dinfo->flag |= INTELFB_MMIO_ACQUIRED; /* Get the chipset info. */ dinfo->pci_chipset = pdev->device; if (intelfbhw_get_chipset(pdev, dinfo)) { cleanup(dinfo); return -ENODEV; } if (intelfbhw_get_memory(pdev, &aperture_size,&stolen_size)) { cleanup(dinfo); return -ENODEV; } INF_MSG("%02x:%02x.%d: %s, aperture size %dMB, " "stolen memory %dkB\n", pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), dinfo->name, BtoMB(aperture_size), BtoKB(stolen_size)); /* Set these from the options. */ dinfo->accel = accel; dinfo->hwcursor = hwcursor; if (NOACCEL_CHIPSET(dinfo) && dinfo->accel == 1) { INF_MSG("Acceleration is not supported for the %s chipset.\n", dinfo->name); dinfo->accel = 0; } /* Framebuffer parameters - Use all the stolen memory if >= vram */ if (ROUND_UP_TO_PAGE(stolen_size) >= MB(vram)) { dinfo->fb.size = ROUND_UP_TO_PAGE(stolen_size); dinfo->fbmem_gart = 0; } else { dinfo->fb.size = MB(vram); dinfo->fbmem_gart = 1; } /* Allocate space for the ring buffer and HW cursor if enabled. */ if (dinfo->accel) { dinfo->ring.size = RINGBUFFER_SIZE; dinfo->ring_tail_mask = dinfo->ring.size - 1; } if (dinfo->hwcursor) dinfo->cursor.size = HW_CURSOR_SIZE; /* Use agpgart to manage the GATT */ if (!(bridge = agp_backend_acquire(pdev))) { ERR_MSG("cannot acquire agp\n"); cleanup(dinfo); return -ENODEV; } /* get the current gatt info */ if (agp_copy_info(bridge, &gtt_info)) { ERR_MSG("cannot get agp info\n"); agp_backend_release(bridge); cleanup(dinfo); return -ENODEV; } if (MB(voffset) < stolen_size) offset = (stolen_size >> 12); else offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE; /* set the mem offsets - set them after the already used pages */ if (dinfo->accel) dinfo->ring.offset = offset + gtt_info.current_memory; if (dinfo->hwcursor) dinfo->cursor.offset = offset + + gtt_info.current_memory + (dinfo->ring.size >> 12); if (dinfo->fbmem_gart) dinfo->fb.offset = offset + + gtt_info.current_memory + (dinfo->ring.size >> 12) + (dinfo->cursor.size >> 12); /* Allocate memories (which aren't stolen) */ /* Map the fb and MMIO regions */ /* ioremap only up to the end of used aperture */ dinfo->aperture.virtual = (u8 __iomem *)ioremap_nocache (dinfo->aperture.physical, ((offset + dinfo->fb.offset) << 12) + dinfo->fb.size); if (!dinfo->aperture.virtual) { ERR_MSG("Cannot remap FB region.\n"); cleanup(dinfo); return -ENODEV; } dinfo->mmio_base = (u8 __iomem *)ioremap_nocache(dinfo->mmio_base_phys, INTEL_REG_SIZE); if (!dinfo->mmio_base) { ERR_MSG("Cannot remap MMIO region.\n"); cleanup(dinfo); return -ENODEV; } if (dinfo->accel) { if (!(dinfo->gtt_ring_mem = agp_allocate_memory(bridge, dinfo->ring.size >> 12, AGP_NORMAL_MEMORY))) { ERR_MSG("cannot allocate ring buffer memory\n"); agp_backend_release(bridge); cleanup(dinfo); return -ENOMEM; } if (agp_bind_memory(dinfo->gtt_ring_mem, dinfo->ring.offset)) { ERR_MSG("cannot bind ring buffer memory\n"); agp_backend_release(bridge); cleanup(dinfo); return -EBUSY; } dinfo->ring.physical = dinfo->aperture.physical + (dinfo->ring.offset << 12); dinfo->ring.virtual = dinfo->aperture.virtual + (dinfo->ring.offset << 12); dinfo->ring_head = 0; } if (dinfo->hwcursor) { agp_memtype = dinfo->mobile ? AGP_PHYSICAL_MEMORY : AGP_NORMAL_MEMORY; if (!(dinfo->gtt_cursor_mem = agp_allocate_memory(bridge, dinfo->cursor.size >> 12, agp_memtype))) { ERR_MSG("cannot allocate cursor memory\n"); agp_backend_release(bridge); cleanup(dinfo); return -ENOMEM; } if (agp_bind_memory(dinfo->gtt_cursor_mem, dinfo->cursor.offset)) { ERR_MSG("cannot bind cursor memory\n"); agp_backend_release(bridge); cleanup(dinfo); return -EBUSY; } if (dinfo->mobile) dinfo->cursor.physical = dinfo->gtt_cursor_mem->physical; else dinfo->cursor.physical = dinfo->aperture.physical + (dinfo->cursor.offset << 12); dinfo->cursor.virtual = dinfo->aperture.virtual + (dinfo->cursor.offset << 12); } if (dinfo->fbmem_gart) { if (!(dinfo->gtt_fb_mem = agp_allocate_memory(bridge, dinfo->fb.size >> 12, AGP_NORMAL_MEMORY))) { WRN_MSG("cannot allocate framebuffer memory - use " "the stolen one\n"); dinfo->fbmem_gart = 0; } if (agp_bind_memory(dinfo->gtt_fb_mem, dinfo->fb.offset)) { WRN_MSG("cannot bind framebuffer memory - use " "the stolen one\n"); dinfo->fbmem_gart = 0; } } /* update framebuffer memory parameters */ if (!dinfo->fbmem_gart) dinfo->fb.offset = 0; /* starts at offset 0 */ dinfo->fb.physical = dinfo->aperture.physical + (dinfo->fb.offset << 12); dinfo->fb.virtual = dinfo->aperture.virtual + (dinfo->fb.offset << 12); dinfo->fb_start = dinfo->fb.offset << 12; /* release agpgart */ agp_backend_release(bridge); if (mtrr) set_mtrr(dinfo); DBG_MSG("fb: 0x%x(+ 0x%x)/0x%x (0x%p)\n", dinfo->fb.physical, dinfo->fb.offset, dinfo->fb.size, dinfo->fb.virtual); DBG_MSG("MMIO: 0x%x/0x%x (0x%p)\n", dinfo->mmio_base_phys, INTEL_REG_SIZE, dinfo->mmio_base); DBG_MSG("ring buffer: 0x%x/0x%x (0x%p)\n", dinfo->ring.physical, dinfo->ring.size, dinfo->ring.virtual); DBG_MSG("HW cursor: 0x%x/0x%x (0x%p) (offset 0x%x) (phys 0x%x)\n", dinfo->cursor.physical, dinfo->cursor.size, dinfo->cursor.virtual, dinfo->cursor.offset, dinfo->cursor.physical); DBG_MSG("options: vram = %d, accel = %d, hwcursor = %d, fixed = %d, " "noinit = %d\n", vram, accel, hwcursor, fixed, noinit); DBG_MSG("options: mode = \"%s\"\n", mode ? mode : ""); if (probeonly) bailout(dinfo); /* * Check if the LVDS port or any DVO ports are enabled. If so, * don't allow mode switching */ dvo = intelfbhw_check_non_crt(dinfo); if (dvo) { dinfo->fixed_mode = 1; WRN_MSG("Non-CRT device is enabled ( "); i = 0; while (dvo) { if (dvo & 1) { s = intelfbhw_dvo_to_string(1 << i); if (s) printk("%s ", s); } dvo >>= 1; ++i; } printk("). Disabling mode switching.\n"); } if (bailearly == 1) bailout(dinfo); if (FIXED_MODE(dinfo) && screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB) { ERR_MSG("Video mode must be programmed at boot time.\n"); cleanup(dinfo); return -ENODEV; } if (bailearly == 2) bailout(dinfo); /* Initialise dinfo and related data. */ /* If an initial mode was programmed at boot time, get its details. */ if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB) get_initial_mode(dinfo); if (bailearly == 3) bailout(dinfo); if (FIXED_MODE(dinfo)) /* remap fb address */ update_dinfo(dinfo, &dinfo->initial_var); if (bailearly == 4) bailout(dinfo); if (intelfb_set_fbinfo(dinfo)) { cleanup(dinfo); return -ENODEV; } if (bailearly == 5) bailout(dinfo); #ifdef CONFIG_FB_INTEL_I2C /* register I2C bus */ intelfb_create_i2c_busses(dinfo); #endif if (bailearly == 6) bailout(dinfo); pci_set_drvdata(pdev, dinfo); /* Save the initial register state. */ i = intelfbhw_read_hw_state(dinfo, &dinfo->save_state, bailearly > 6 ? bailearly - 6 : 0); if (i != 0) { DBG_MSG("intelfbhw_read_hw_state returned %d\n", i); bailout(dinfo); } intelfbhw_print_hw_state(dinfo, &dinfo->save_state); if (bailearly == 18) bailout(dinfo); /* read active pipe */ dinfo->pipe = intelfbhw_active_pipe(&dinfo->save_state); /* Cursor initialisation */ if (dinfo->hwcursor) { intelfbhw_cursor_init(dinfo); intelfbhw_cursor_reset(dinfo); } if (bailearly == 19) bailout(dinfo); /* 2d acceleration init */ if (dinfo->accel) intelfbhw_2d_start(dinfo); if (bailearly == 20) bailout(dinfo); if (noregister) bailout(dinfo); if (register_framebuffer(dinfo->info) < 0) { ERR_MSG("Cannot register framebuffer.\n"); cleanup(dinfo); return -ENODEV; } dinfo->registered = 1; dinfo->open = 0; init_waitqueue_head(&dinfo->vsync.wait); spin_lock_init(&dinfo->int_lock); dinfo->irq_flags = 0; dinfo->vsync.pan_display = 0; dinfo->vsync.pan_offset = 0; return 0; err_out_pixmap: fb_dealloc_cmap(&info->cmap); err_out_cmap: framebuffer_release(info); return -ENODEV; } static void __devexit intelfb_pci_unregister(struct pci_dev *pdev) { struct intelfb_info *dinfo = pci_get_drvdata(pdev); DBG_MSG("intelfb_pci_unregister\n"); if (!dinfo) return; cleanup(dinfo); pci_set_drvdata(pdev, NULL); } /*************************************************************** * helper functions * ***************************************************************/ int __inline__ intelfb_var_to_depth(const struct fb_var_screeninfo *var) { DBG_MSG("intelfb_var_to_depth: bpp: %d, green.length is %d\n", var->bits_per_pixel, var->green.length); switch (var->bits_per_pixel) { case 16: return (var->green.length == 6) ? 16 : 15; case 32: return 24; default: return var->bits_per_pixel; } } static __inline__ int var_to_refresh(const struct fb_var_screeninfo *var) { int xtot = var->xres + var->left_margin + var->right_margin + var->hsync_len; int ytot = var->yres + var->upper_margin + var->lower_margin + var->vsync_len; return (1000000000 / var->pixclock * 1000 + 500) / xtot / ytot; } /*************************************************************** * Various intialisation functions * ***************************************************************/ static void __devinit get_initial_mode(struct intelfb_info *dinfo) { struct fb_var_screeninfo *var; int xtot, ytot; DBG_MSG("get_initial_mode\n"); dinfo->initial_vga = 1; dinfo->initial_fb_base = screen_info.lfb_base; dinfo->initial_video_ram = screen_info.lfb_size * KB(64); dinfo->initial_pitch = screen_info.lfb_linelength; var = &dinfo->initial_var; memset(var, 0, sizeof(*var)); var->xres = screen_info.lfb_width; var->yres = screen_info.lfb_height; var->bits_per_pixel = screen_info.lfb_depth; switch (screen_info.lfb_depth) { case 15: var->bits_per_pixel = 16; break; case 24: var->bits_per_pixel = 32; break; } DBG_MSG("Initial info: FB is 0x%x/0x%x (%d kByte)\n", dinfo->initial_fb_base, dinfo->initial_video_ram, BtoKB(dinfo->initial_video_ram)); DBG_MSG("Initial info: mode is %dx%d-%d (%d)\n", var->xres, var->yres, var->bits_per_pixel, dinfo->initial_pitch); /* Dummy timing values (assume 60Hz) */ var->left_margin = (var->xres / 8) & 0xf8; var->right_margin = 32; var->upper_margin = 16; var->lower_margin = 4; var->hsync_len = (var->xres / 8) & 0xf8; var->vsync_len = 4; xtot = var->xres + var->left_margin + var->right_margin + var->hsync_len; ytot = var->yres + var->upper_margin + var->lower_margin + var->vsync_len; var->pixclock = 10000000 / xtot * 1000 / ytot * 100 / 60; var->height = -1; var->width = -1; if (var->bits_per_pixel > 8) { var->red.offset = screen_info.red_pos; var->red.length = screen_info.red_size; var->green.offset = screen_info.green_pos; var->green.length = screen_info.green_size; var->blue.offset = screen_info.blue_pos; var->blue.length = screen_info.blue_size; var->transp.offset = screen_info.rsvd_pos; var->transp.length = screen_info.rsvd_size; } else { var->red.length = 8; var->green.length = 8; var->blue.length = 8; } } static int __devinit intelfb_init_var(struct intelfb_info *dinfo) { struct fb_var_screeninfo *var; int msrc = 0; DBG_MSG("intelfb_init_var\n"); var = &dinfo->info->var; if (FIXED_MODE(dinfo)) { memcpy(var, &dinfo->initial_var, sizeof(struct fb_var_screeninfo)); msrc = 5; } else { const u8 *edid_s = fb_firmware_edid(&dinfo->pdev->dev); u8 *edid_d = NULL; if (edid_s) { edid_d = kmemdup(edid_s, EDID_LENGTH, GFP_KERNEL); if (edid_d) { fb_edid_to_monspecs(edid_d, &dinfo->info->monspecs); kfree(edid_d); } } if (mode) { printk("intelfb: Looking for mode in private " "database\n"); msrc = fb_find_mode(var, dinfo->info, mode, dinfo->info->monspecs.modedb, dinfo->info->monspecs.modedb_len, NULL, 0); if (msrc && msrc > 1) { printk("intelfb: No mode in private database, " "intelfb: looking for mode in global " "database "); msrc = fb_find_mode(var, dinfo->info, mode, NULL, 0, NULL, 0); if (msrc) msrc |= 8; } } if (!msrc) msrc = fb_find_mode(var, dinfo->info, PREFERRED_MODE, NULL, 0, NULL, 0); } if (!msrc) { ERR_MSG("Cannot find a suitable video mode.\n"); return 1; } INF_MSG("Initial video mode is %dx%d-%d@%d.\n", var->xres, var->yres, var->bits_per_pixel, var_to_refresh(var)); DBG_MSG("Initial video mode is from %d.\n", msrc); #if ALLOCATE_FOR_PANNING /* Allow use of half of the video ram for panning */ var->xres_virtual = var->xres; var->yres_virtual = dinfo->fb.size / 2 / (var->bits_per_pixel * var->xres); if (var->yres_virtual < var->yres) var->yres_virtual = var->yres; #else var->yres_virtual = var->yres; #endif if (dinfo->accel) var->accel_flags |= FB_ACCELF_TEXT; else var->accel_flags &= ~FB_ACCELF_TEXT; return 0; } static int __devinit intelfb_set_fbinfo(struct intelfb_info *dinfo) { struct fb_info *info = dinfo->info; DBG_MSG("intelfb_set_fbinfo\n"); info->flags = FBINFO_FLAG_DEFAULT; info->fbops = &intel_fb_ops; info->pseudo_palette = dinfo->pseudo_palette; info->pixmap.size = 64*1024; info->pixmap.buf_align = 8; info->pixmap.access_align = 32; info->pixmap.flags = FB_PIXMAP_SYSTEM; if (intelfb_init_var(dinfo)) return 1; info->pixmap.scan_align = 1; strcpy(info->fix.id, dinfo->name); info->fix.smem_start = dinfo->fb.physical; info->fix.smem_len = dinfo->fb.size; info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.type_aux = 0; info->fix.xpanstep = 8; info->fix.ypanstep = 1; info->fix.ywrapstep = 0; info->fix.mmio_start = dinfo->mmio_base_phys; info->fix.mmio_len = INTEL_REG_SIZE; info->fix.accel = FB_ACCEL_I830; update_dinfo(dinfo, &info->var); return 0; } /* Update dinfo to match the active video mode. */ static void update_dinfo(struct intelfb_info *dinfo, struct fb_var_screeninfo *var) { DBG_MSG("update_dinfo\n"); dinfo->bpp = var->bits_per_pixel; dinfo->depth = intelfb_var_to_depth(var); dinfo->xres = var->xres; dinfo->yres = var->xres; dinfo->pixclock = var->pixclock; dinfo->info->fix.visual = dinfo->visual; dinfo->info->fix.line_length = dinfo->pitch; switch (dinfo->bpp) { case 8: dinfo->visual = FB_VISUAL_PSEUDOCOLOR; dinfo->pitch = var->xres_virtual; break; case 16: dinfo->visual = FB_VISUAL_TRUECOLOR; dinfo->pitch = var->xres_virtual * 2; break; case 32: dinfo->visual = FB_VISUAL_TRUECOLOR; dinfo->pitch = var->xres_virtual * 4; break; } /* Make sure the line length is a aligned correctly. */ if (IS_I9XX(dinfo)) dinfo->pitch = ROUND_UP_TO(dinfo->pitch, STRIDE_ALIGNMENT_I9XX); else dinfo->pitch = ROUND_UP_TO(dinfo->pitch, STRIDE_ALIGNMENT); if (FIXED_MODE(dinfo)) dinfo->pitch = dinfo->initial_pitch; dinfo->info->screen_base = (char __iomem *)dinfo->fb.virtual; dinfo->info->fix.line_length = dinfo->pitch; dinfo->info->fix.visual = dinfo->visual; } /* fbops functions */ /*************************************************************** * fbdev interface * ***************************************************************/ static int intelfb_open(struct fb_info *info, int user) { struct intelfb_info *dinfo = GET_DINFO(info); if (user) dinfo->open++; return 0; } static int intelfb_release(struct fb_info *info, int user) { struct intelfb_info *dinfo = GET_DINFO(info); if (user) { dinfo->open--; msleep(1); if (!dinfo->open) intelfbhw_disable_irq(dinfo); } return 0; } static int intelfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { int change_var = 0; struct fb_var_screeninfo v; struct intelfb_info *dinfo; static int first = 1; int i; /* Good pitches to allow tiling. Don't care about pitches < 1024. */ static const int pitches[] = { 128 * 8, 128 * 16, 128 * 32, 128 * 64, 0 }; DBG_MSG("intelfb_check_var: accel_flags is %d\n", var->accel_flags); dinfo = GET_DINFO(info); /* update the pitch */ if (intelfbhw_validate_mode(dinfo, var) != 0) return -EINVAL; v = *var; for (i = 0; pitches[i] != 0; i++) { if (pitches[i] >= v.xres_virtual) { v.xres_virtual = pitches[i]; break; } } /* Check for a supported bpp. */ if (v.bits_per_pixel <= 8) v.bits_per_pixel = 8; else if (v.bits_per_pixel <= 16) { if (v.bits_per_pixel == 16) v.green.length = 6; v.bits_per_pixel = 16; } else if (v.bits_per_pixel <= 32) v.bits_per_pixel = 32; else return -EINVAL; change_var = ((info->var.xres != var->xres) || (info->var.yres != var->yres) || (info->var.xres_virtual != var->xres_virtual) || (info->var.yres_virtual != var->yres_virtual) || (info->var.bits_per_pixel != var->bits_per_pixel) || memcmp(&info->var.red, &var->red, sizeof(var->red)) || memcmp(&info->var.green, &var->green, sizeof(var->green)) || memcmp(&info->var.blue, &var->blue, sizeof(var->blue))); if (FIXED_MODE(dinfo) && (change_var || var->yres_virtual > dinfo->initial_var.yres_virtual || var->yres_virtual < dinfo->initial_var.yres || var->xoffset || var->nonstd)) { if (first) { ERR_MSG("Changing the video mode is not supported.\n"); first = 0; } return -EINVAL; } switch (intelfb_var_to_depth(&v)) { case 8: v.red.offset = v.green.offset = v.blue.offset = 0; v.red.length = v.green.length = v.blue.length = 8; v.transp.offset = v.transp.length = 0; break; case 15: v.red.offset = 10; v.green.offset = 5; v.blue.offset = 0; v.red.length = v.green.length = v.blue.length = 5; v.transp.offset = v.transp.length = 0; break; case 16: v.red.offset = 11; v.green.offset = 5; v.blue.offset = 0; v.red.length = 5; v.green.length = 6; v.blue.length = 5; v.transp.offset = v.transp.length = 0; break; case 24: v.red.offset = 16; v.green.offset = 8; v.blue.offset = 0; v.red.length = v.green.length = v.blue.length = 8; v.transp.offset = v.transp.length = 0; break; case 32: v.red.offset = 16; v.green.offset = 8; v.blue.offset = 0; v.red.length = v.green.length = v.blue.length = 8; v.transp.offset = 24; v.transp.length = 8; break; } if (v.xoffset < 0) v.xoffset = 0; if (v.yoffset < 0) v.yoffset = 0; if (v.xoffset > v.xres_virtual - v.xres) v.xoffset = v.xres_virtual - v.xres; if (v.yoffset > v.yres_virtual - v.yres) v.yoffset = v.yres_virtual - v.yres; v.red.msb_right = v.green.msb_right = v.blue.msb_right = v.transp.msb_right = 0; *var = v; return 0; } static int intelfb_set_par(struct fb_info *info) { struct intelfb_hwstate *hw; struct intelfb_info *dinfo = GET_DINFO(info); if (FIXED_MODE(dinfo)) { ERR_MSG("Changing the video mode is not supported.\n"); return -EINVAL; } hw = kmalloc(sizeof(*hw), GFP_ATOMIC); if (!hw) return -ENOMEM; DBG_MSG("intelfb_set_par (%dx%d-%d)\n", info->var.xres, info->var.yres, info->var.bits_per_pixel); /* * Disable VCO prior to timing register change. */ OUTREG(DPLL_A, INREG(DPLL_A) & ~DPLL_VCO_ENABLE); intelfb_blank(FB_BLANK_POWERDOWN, info); if (ACCEL(dinfo, info)) intelfbhw_2d_stop(dinfo); memcpy(hw, &dinfo->save_state, sizeof(*hw)); if (intelfbhw_mode_to_hw(dinfo, hw, &info->var)) goto invalid_mode; if (intelfbhw_program_mode(dinfo, hw, 0)) goto invalid_mode; #if REGDUMP > 0 intelfbhw_read_hw_state(dinfo, hw, 0); intelfbhw_print_hw_state(dinfo, hw); #endif update_dinfo(dinfo, &info->var); if (ACCEL(dinfo, info)) intelfbhw_2d_start(dinfo); intelfb_pan_display(&info->var, info); intelfb_blank(FB_BLANK_UNBLANK, info); if (ACCEL(dinfo, info)) { info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; } else info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; kfree(hw); return 0; invalid_mode: kfree(hw); return -EINVAL; } static int intelfb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct intelfb_info *dinfo = GET_DINFO(info); #if VERBOSE > 0 DBG_MSG("intelfb_setcolreg: regno %d, depth %d\n", regno, dinfo->depth); #endif if (regno > 255) return 1; if (dinfo->depth == 8) { red >>= 8; green >>= 8; blue >>= 8; intelfbhw_setcolreg(dinfo, regno, red, green, blue, transp); } if (regno < 16) { switch (dinfo->depth) { case 15: dinfo->pseudo_palette[regno] = ((red & 0xf800) >> 1) | ((green & 0xf800) >> 6) | ((blue & 0xf800) >> 11); break; case 16: dinfo->pseudo_palette[regno] = (red & 0xf800) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); break; case 24: dinfo->pseudo_palette[regno] = ((red & 0xff00) << 8) | (green & 0xff00) | ((blue & 0xff00) >> 8); break; } } return 0; } static int intelfb_blank(int blank, struct fb_info *info) { intelfbhw_do_blank(blank, info); return 0; } static int intelfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { intelfbhw_pan_display(var, info); return 0; } /* When/if we have our own ioctls. */ static int intelfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { int retval = 0; struct intelfb_info *dinfo = GET_DINFO(info); u32 pipe = 0; switch (cmd) { case FBIO_WAITFORVSYNC: if (get_user(pipe, (__u32 __user *)arg)) return -EFAULT; retval = intelfbhw_wait_for_vsync(dinfo, pipe); break; default: break; } return retval; } static void intelfb_fillrect (struct fb_info *info, const struct fb_fillrect *rect) { struct intelfb_info *dinfo = GET_DINFO(info); u32 rop, color; #if VERBOSE > 0 DBG_MSG("intelfb_fillrect\n"); #endif if (!ACCEL(dinfo, info) || dinfo->depth == 4) { cfb_fillrect(info, rect); return; } if (rect->rop == ROP_COPY) rop = PAT_ROP_GXCOPY; else /* ROP_XOR */ rop = PAT_ROP_GXXOR; if (dinfo->depth != 8) color = dinfo->pseudo_palette[rect->color]; else color = rect->color; intelfbhw_do_fillrect(dinfo, rect->dx, rect->dy, rect->width, rect->height, color, dinfo->pitch, info->var.bits_per_pixel, rop); } static void intelfb_copyarea(struct fb_info *info, const struct fb_copyarea *region) { struct intelfb_info *dinfo = GET_DINFO(info); #if VERBOSE > 0 DBG_MSG("intelfb_copyarea\n"); #endif if (!ACCEL(dinfo, info) || dinfo->depth == 4) { cfb_copyarea(info, region); return; } intelfbhw_do_bitblt(dinfo, region->sx, region->sy, region->dx, region->dy, region->width, region->height, dinfo->pitch, info->var.bits_per_pixel); } static void intelfb_imageblit(struct fb_info *info, const struct fb_image *image) { struct intelfb_info *dinfo = GET_DINFO(info); u32 fgcolor, bgcolor; #if VERBOSE > 0 DBG_MSG("intelfb_imageblit\n"); #endif if (!ACCEL(dinfo, info) || dinfo->depth == 4 || image->depth != 1) { cfb_imageblit(info, image); return; } if (dinfo->depth != 8) { fgcolor = dinfo->pseudo_palette[image->fg_color]; bgcolor = dinfo->pseudo_palette[image->bg_color]; } else { fgcolor = image->fg_color; bgcolor = image->bg_color; } if (!intelfbhw_do_drawglyph(dinfo, fgcolor, bgcolor, image->width, image->height, image->data, image->dx, image->dy, dinfo->pitch, info->var.bits_per_pixel)) { cfb_imageblit(info, image); return; } } static int intelfb_cursor(struct fb_info *info, struct fb_cursor *cursor) { struct intelfb_info *dinfo = GET_DINFO(info); u32 physical; #if VERBOSE > 0 DBG_MSG("intelfb_cursor\n"); #endif if (!dinfo->hwcursor) return -ENODEV; intelfbhw_cursor_hide(dinfo); /* If XFree killed the cursor - restore it */ physical = (dinfo->mobile || IS_I9XX(dinfo)) ? dinfo->cursor.physical : (dinfo->cursor.offset << 12); if (INREG(CURSOR_A_BASEADDR) != physical) { u32 fg, bg; DBG_MSG("the cursor was killed - restore it !!\n"); DBG_MSG("size %d, %d pos %d, %d\n", cursor->image.width, cursor->image.height, cursor->image.dx, cursor->image.dy); intelfbhw_cursor_init(dinfo); intelfbhw_cursor_reset(dinfo); intelfbhw_cursor_setpos(dinfo, cursor->image.dx, cursor->image.dy); if (dinfo->depth != 8) { fg =dinfo->pseudo_palette[cursor->image.fg_color]; bg =dinfo->pseudo_palette[cursor->image.bg_color]; } else { fg = cursor->image.fg_color; bg = cursor->image.bg_color; } intelfbhw_cursor_setcolor(dinfo, bg, fg); intelfbhw_cursor_load(dinfo, cursor->image.width, cursor->image.height, dinfo->cursor_src); if (cursor->enable) intelfbhw_cursor_show(dinfo); return 0; } if (cursor->set & FB_CUR_SETPOS) { u32 dx, dy; dx = cursor->image.dx - info->var.xoffset; dy = cursor->image.dy - info->var.yoffset; intelfbhw_cursor_setpos(dinfo, dx, dy); } if (cursor->set & FB_CUR_SETSIZE) { if (cursor->image.width > 64 || cursor->image.height > 64) return -ENXIO; intelfbhw_cursor_reset(dinfo); } if (cursor->set & FB_CUR_SETCMAP) { u32 fg, bg; if (dinfo->depth != 8) { fg = dinfo->pseudo_palette[cursor->image.fg_color]; bg = dinfo->pseudo_palette[cursor->image.bg_color]; } else { fg = cursor->image.fg_color; bg = cursor->image.bg_color; } intelfbhw_cursor_setcolor(dinfo, bg, fg); } if (cursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETIMAGE)) { u32 s_pitch = (ROUND_UP_TO(cursor->image.width, 8) / 8); u32 size = s_pitch * cursor->image.height; u8 *dat = (u8 *) cursor->image.data; u8 *msk = (u8 *) cursor->mask; u8 src[64]; u32 i; if (cursor->image.depth != 1) return -ENXIO; switch (cursor->rop) { case ROP_XOR: for (i = 0; i < size; i++) src[i] = dat[i] ^ msk[i]; break; case ROP_COPY: default: for (i = 0; i < size; i++) src[i] = dat[i] & msk[i]; break; } /* save the bitmap to restore it when XFree will make the cursor dirty */ memcpy(dinfo->cursor_src, src, size); intelfbhw_cursor_load(dinfo, cursor->image.width, cursor->image.height, src); } if (cursor->enable) intelfbhw_cursor_show(dinfo); return 0; } static int intelfb_sync(struct fb_info *info) { struct intelfb_info *dinfo = GET_DINFO(info); #if VERBOSE > 0 DBG_MSG("intelfb_sync\n"); #endif if (dinfo->ring_lockup) return 0; intelfbhw_do_sync(dinfo); return 0; }
gpl-2.0
AOKP/kernel_samsung_manta
drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
4190
53897
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include <linux/export.h> #include "dm_common.h" #include "phy_common.h" #include "../pci.h" #include "../base.h" struct dig_t dm_digtable; static struct ps_t dm_pstable; #define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1) #define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1) #define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1) #define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1) #define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1) #define RTLPRIV (struct rtl_priv *) #define GET_UNDECORATED_AVERAGE_RSSI(_priv) \ ((RTLPRIV(_priv))->mac80211.opmode == \ NL80211_IFTYPE_ADHOC) ? \ ((RTLPRIV(_priv))->dm.entry_min_undecoratedsmoothed_pwdb) : \ ((RTLPRIV(_priv))->dm.undecorated_smoothed_pwdb) static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = { 0x7f8001fe, 0x788001e2, 0x71c001c7, 0x6b8001ae, 0x65400195, 0x5fc0017f, 0x5a400169, 0x55400155, 0x50800142, 0x4c000130, 0x47c0011f, 0x43c0010f, 0x40000100, 0x3c8000f2, 0x390000e4, 0x35c000d7, 0x32c000cb, 0x300000c0, 0x2d4000b5, 0x2ac000ab, 0x288000a2, 0x26000098, 0x24000090, 0x22000088, 0x20000080, 0x1e400079, 0x1c800072, 0x1b00006c, 0x19800066, 0x18000060, 0x16c0005b, 0x15800056, 0x14400051, 0x1300004c, 0x12000048, 0x11000044, 0x10000040, }; static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = { {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} }; static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = { {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} }; static void rtl92c_dm_diginit(struct ieee80211_hw *hw) { dm_digtable.dig_enable_flag = true; dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; dm_digtable.cur_igvalue = 0x20; dm_digtable.pre_igvalue = 0x0; dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT; dm_digtable.presta_connectstate = DIG_STA_DISCONNECT; dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT; dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW; dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH; dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW; dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH; dm_digtable.rx_gain_range_max = DM_DIG_MAX; dm_digtable.rx_gain_range_min = DM_DIG_MIN; dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT; dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX; dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN; dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX; dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX; } static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); long rssi_val_min = 0; if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) && (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) { if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0) rssi_val_min = (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb > rtlpriv->dm.undecorated_smoothed_pwdb) ? rtlpriv->dm.undecorated_smoothed_pwdb : rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; else rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) { rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; } else if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) { rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; } return (u8) rssi_val_min; } static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) { u32 ret_value; struct rtl_priv *rtlpriv = rtl_priv(hw); struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD); falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD); falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff); falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD); falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff); falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail + falsealm_cnt->cnt_rate_illegal + falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail; rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1); ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0); falsealm_cnt->cnt_cck_fail = ret_value; ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3); falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8; falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail + falsealm_cnt->cnt_rate_illegal + falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail + falsealm_cnt->cnt_cck_fail); rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1); rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n", falsealm_cnt->cnt_parity_fail, falsealm_cnt->cnt_rate_illegal, falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n", falsealm_cnt->cnt_ofdm_fail, falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all); } static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 value_igi = dm_digtable.cur_igvalue; if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0) value_igi--; else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1) value_igi += 0; else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2) value_igi++; else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2) value_igi += 2; if (value_igi > DM_DIG_FA_UPPER) value_igi = DM_DIG_FA_UPPER; else if (value_igi < DM_DIG_FA_LOWER) value_igi = DM_DIG_FA_LOWER; if (rtlpriv->falsealm_cnt.cnt_all > 10000) value_igi = 0x32; dm_digtable.cur_igvalue = value_igi; rtl92c_dm_write_dig(hw); } static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) { if ((dm_digtable.backoff_val - 2) < dm_digtable.backoff_val_range_min) dm_digtable.backoff_val = dm_digtable.backoff_val_range_min; else dm_digtable.backoff_val -= 2; } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) { if ((dm_digtable.backoff_val + 2) > dm_digtable.backoff_val_range_max) dm_digtable.backoff_val = dm_digtable.backoff_val_range_max; else dm_digtable.backoff_val += 2; } if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) > dm_digtable.rx_gain_range_max) dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max; else if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min) dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min; else dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val; RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "rssi_val_min = %x backoff_val %x\n", dm_digtable.rssi_val_min, dm_digtable.backoff_val); rtl92c_dm_write_dig(hw); } static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw) { static u8 initialized; /* initialized to false */ struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; bool multi_sta = false; if (mac->opmode == NL80211_IFTYPE_ADHOC) multi_sta = true; if (!multi_sta || dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) { initialized = false; dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; return; } else if (initialized == false) { initialized = true; dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; dm_digtable.cur_igvalue = 0x20; rtl92c_dm_write_dig(hw); } if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) { if ((rssi_strength < dm_digtable.rssi_lowthresh) && (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) { if (dm_digtable.dig_ext_port_stage == DIG_EXT_PORT_STAGE_2) { dm_digtable.cur_igvalue = 0x20; rtl92c_dm_write_dig(hw); } dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1; } else if (rssi_strength > dm_digtable.rssi_highthresh) { dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2; rtl92c_dm_ctrl_initgain_by_fa(hw); } } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) { dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; dm_digtable.cur_igvalue = 0x20; rtl92c_dm_write_dig(hw); } RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "curmultista_connectstate = %x dig_ext_port_stage %x\n", dm_digtable.curmultista_connectstate, dm_digtable.dig_ext_port_stage); } static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "presta_connectstate = %x, cursta_connectctate = %x\n", dm_digtable.presta_connectstate, dm_digtable.cursta_connectctate); if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) { if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) { dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw); rtl92c_dm_ctrl_initgain_by_rssi(hw); } } else { dm_digtable.rssi_val_min = 0; dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT; dm_digtable.cur_igvalue = 0x20; dm_digtable.pre_igvalue = 0; rtl92c_dm_write_dig(hw); } } static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) { dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw); if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) { if (dm_digtable.rssi_val_min <= 25) dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_LowRssi; else dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_HighRssi; } else { if (dm_digtable.rssi_val_min <= 20) dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_LowRssi; else dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_HighRssi; } } else { dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX; } if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) { if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) { if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800) dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_High; else dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low; if (dm_digtable.pre_cck_fa_state != dm_digtable.cur_cck_fa_state) { if (dm_digtable.cur_cck_fa_state == CCK_FA_STAGE_Low) rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83); else rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd); dm_digtable.pre_cck_fa_state = dm_digtable.cur_cck_fa_state; } rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40); if (IS_92C_SERIAL(rtlhal->version)) rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, MASKBYTE2, 0xd7); } else { rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd); rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47); if (IS_92C_SERIAL(rtlhal->version)) rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, MASKBYTE2, 0xd3); } dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state; } RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "CCKPDStage=%x\n", dm_digtable.cur_cck_pd_state); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "is92C=%x\n", IS_92C_SERIAL(rtlhal->version)); } static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw) { struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); if (mac->act_scanning) return; if (mac->link_state >= MAC80211_LINKED) dm_digtable.cursta_connectctate = DIG_STA_CONNECT; else dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT; rtl92c_dm_initial_gain_sta(hw); rtl92c_dm_initial_gain_multi_sta(hw); rtl92c_dm_cck_packet_detection_thresh(hw); dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate; } static void rtl92c_dm_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (rtlpriv->dm.dm_initialgain_enable == false) return; if (dm_digtable.dig_enable_flag == false) return; rtl92c_dm_ctrl_initgain_by_twoport(hw); } static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.dynamic_txpower_enable = false; rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; } void rtl92c_dm_write_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n", dm_digtable.cur_igvalue, dm_digtable.pre_igvalue, dm_digtable.backoff_val); dm_digtable.cur_igvalue += 2; if (dm_digtable.cur_igvalue > 0x3f) dm_digtable.cur_igvalue = 0x3f; if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) { rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, dm_digtable.cur_igvalue); rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, dm_digtable.cur_igvalue); dm_digtable.pre_igvalue = dm_digtable.cur_igvalue; } } EXPORT_SYMBOL(rtl92c_dm_write_dig); static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff; u8 h2c_parameter[3] = { 0 }; return; if (tmpentry_max_pwdb != 0) { rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = tmpentry_max_pwdb; } else { rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0; } if (tmpentry_min_pwdb != 0xff) { rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = tmpentry_min_pwdb; } else { rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0; } h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF); h2c_parameter[0] = 0; rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter); } void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.current_turbo_edca = false; rtlpriv->dm.is_any_nonbepkts = false; rtlpriv->dm.is_cur_rdlstate = false; } EXPORT_SYMBOL(rtl92c_dm_init_edca_turbo); static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); static u64 last_txok_cnt; static u64 last_rxok_cnt; static u32 last_bt_edca_ul; static u32 last_bt_edca_dl; u64 cur_txok_cnt = 0; u64 cur_rxok_cnt = 0; u32 edca_be_ul = 0x5ea42b; u32 edca_be_dl = 0x5ea42b; bool bt_change_edca = false; if ((last_bt_edca_ul != rtlpcipriv->bt_coexist.bt_edca_ul) || (last_bt_edca_dl != rtlpcipriv->bt_coexist.bt_edca_dl)) { rtlpriv->dm.current_turbo_edca = false; last_bt_edca_ul = rtlpcipriv->bt_coexist.bt_edca_ul; last_bt_edca_dl = rtlpcipriv->bt_coexist.bt_edca_dl; } if (rtlpcipriv->bt_coexist.bt_edca_ul != 0) { edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_ul; bt_change_edca = true; } if (rtlpcipriv->bt_coexist.bt_edca_dl != 0) { edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_dl; bt_change_edca = true; } if (mac->link_state != MAC80211_LINKED) { rtlpriv->dm.current_turbo_edca = false; return; } if ((!mac->ht_enable) && (!rtlpcipriv->bt_coexist.bt_coexistence)) { if (!(edca_be_ul & 0xffff0000)) edca_be_ul |= 0x005e0000; if (!(edca_be_dl & 0xffff0000)) edca_be_dl |= 0x005e0000; } if ((bt_change_edca) || ((!rtlpriv->dm.is_any_nonbepkts) && (!rtlpriv->dm.disable_framebursting))) { cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; if (cur_rxok_cnt > 4 * cur_txok_cnt) { if (!rtlpriv->dm.is_cur_rdlstate || !rtlpriv->dm.current_turbo_edca) { rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be_dl); rtlpriv->dm.is_cur_rdlstate = true; } } else { if (rtlpriv->dm.is_cur_rdlstate || !rtlpriv->dm.current_turbo_edca) { rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be_ul); rtlpriv->dm.is_cur_rdlstate = false; } } rtlpriv->dm.current_turbo_edca = true; } else { if (rtlpriv->dm.current_turbo_edca) { u8 tmp = AC0_BE; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, (u8 *) (&tmp)); rtlpriv->dm.current_turbo_edca = false; } } rtlpriv->dm.is_any_nonbepkts = false; last_txok_cnt = rtlpriv->stats.txbytesunicast; last_rxok_cnt = rtlpriv->stats.rxbytesunicast; } static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 thermalvalue, delta, delta_lck, delta_iqk; long ele_a, ele_d, temp_cck, val_x, value32; long val_y, ele_c = 0; u8 ofdm_index[2], cck_index = 0, ofdm_index_old[2], cck_index_old = 0; int i; bool is2t = IS_92C_SERIAL(rtlhal->version); s8 txpwr_level[2] = {0, 0}; u8 ofdm_min_index = 6, rf; rtlpriv->dm.txpower_trackinginit = true; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "rtl92c_dm_txpower_tracking_callback_thermalmeter\n"); thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f); RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n", thermalvalue, rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter); rtl92c_phy_ap_calibrate(hw, (thermalvalue - rtlefuse->eeprom_thermalmeter)); if (is2t) rf = 2; else rf = 1; if (thermalvalue) { ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) & MASKOFDM_D; for (i = 0; i < OFDM_TABLE_LENGTH; i++) { if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { ofdm_index_old[0] = (u8) i; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n", ROFDM0_XATXIQIMBALANCE, ele_d, ofdm_index_old[0]); break; } } if (is2t) { ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD) & MASKOFDM_D; for (i = 0; i < OFDM_TABLE_LENGTH; i++) { if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial pathB ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n", ROFDM0_XBTXIQIMBALANCE, ele_d, ofdm_index_old[1]); break; } } } temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK; for (i = 0; i < CCK_TABLE_LENGTH; i++) { if (rtlpriv->dm.cck_inch14) { if (memcmp((void *)&temp_cck, (void *)&cckswing_table_ch14[i][2], 4) == 0) { cck_index_old = (u8) i; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial reg0x%x = 0x%lx, cck_index=0x%x, ch 14 %d\n", RCCK0_TXFILTER2, temp_cck, cck_index_old, rtlpriv->dm.cck_inch14); break; } } else { if (memcmp((void *)&temp_cck, (void *) &cckswing_table_ch1ch13[i][2], 4) == 0) { cck_index_old = (u8) i; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial reg0x%x = 0x%lx, cck_index=0x%x, ch14 %d\n", RCCK0_TXFILTER2, temp_cck, cck_index_old, rtlpriv->dm.cck_inch14); break; } } } if (!rtlpriv->dm.thermalvalue) { rtlpriv->dm.thermalvalue = rtlefuse->eeprom_thermalmeter; rtlpriv->dm.thermalvalue_lck = thermalvalue; rtlpriv->dm.thermalvalue_iqk = thermalvalue; for (i = 0; i < rf; i++) rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i]; rtlpriv->dm.cck_index = cck_index_old; } delta = (thermalvalue > rtlpriv->dm.thermalvalue) ? (thermalvalue - rtlpriv->dm.thermalvalue) : (rtlpriv->dm.thermalvalue - thermalvalue); delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ? (thermalvalue - rtlpriv->dm.thermalvalue_lck) : (rtlpriv->dm.thermalvalue_lck - thermalvalue); delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ? (thermalvalue - rtlpriv->dm.thermalvalue_iqk) : (rtlpriv->dm.thermalvalue_iqk - thermalvalue); RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n", thermalvalue, rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter, delta, delta_lck, delta_iqk); if (delta_lck > 1) { rtlpriv->dm.thermalvalue_lck = thermalvalue; rtl92c_phy_lc_calibrate(hw); } if (delta > 0 && rtlpriv->dm.txpower_track_control) { if (thermalvalue > rtlpriv->dm.thermalvalue) { for (i = 0; i < rf; i++) rtlpriv->dm.ofdm_index[i] -= delta; rtlpriv->dm.cck_index -= delta; } else { for (i = 0; i < rf; i++) rtlpriv->dm.ofdm_index[i] += delta; rtlpriv->dm.cck_index += delta; } if (is2t) { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "temp OFDM_A_index=0x%x, OFDM_B_index=0x%x, cck_index=0x%x\n", rtlpriv->dm.ofdm_index[0], rtlpriv->dm.ofdm_index[1], rtlpriv->dm.cck_index); } else { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "temp OFDM_A_index=0x%x, cck_index=0x%x\n", rtlpriv->dm.ofdm_index[0], rtlpriv->dm.cck_index); } if (thermalvalue > rtlefuse->eeprom_thermalmeter) { for (i = 0; i < rf; i++) ofdm_index[i] = rtlpriv->dm.ofdm_index[i] + 1; cck_index = rtlpriv->dm.cck_index + 1; } else { for (i = 0; i < rf; i++) ofdm_index[i] = rtlpriv->dm.ofdm_index[i]; cck_index = rtlpriv->dm.cck_index; } for (i = 0; i < rf; i++) { if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) { if (thermalvalue > rtlefuse->eeprom_thermalmeter) { if (delta < 5) ofdm_index[i] -= 1; else ofdm_index[i] -= 2; } else if (delta > 5 && thermalvalue < rtlefuse-> eeprom_thermalmeter) { ofdm_index[i] += 1; } } else if (txpwr_level[i] >= 27 && txpwr_level[i] <= 32 && thermalvalue > rtlefuse->eeprom_thermalmeter) { if (delta < 5) ofdm_index[i] -= 1; else ofdm_index[i] -= 2; } else if (txpwr_level[i] >= 32 && txpwr_level[i] <= 38 && thermalvalue > rtlefuse->eeprom_thermalmeter && delta > 5) { ofdm_index[i] -= 1; } } if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) { if (thermalvalue > rtlefuse->eeprom_thermalmeter) { if (delta < 5) cck_index -= 1; else cck_index -= 2; } else if (delta > 5 && thermalvalue < rtlefuse->eeprom_thermalmeter) { cck_index += 1; } } else if (txpwr_level[i] >= 27 && txpwr_level[i] <= 32 && thermalvalue > rtlefuse->eeprom_thermalmeter) { if (delta < 5) cck_index -= 1; else cck_index -= 2; } else if (txpwr_level[i] >= 32 && txpwr_level[i] <= 38 && thermalvalue > rtlefuse->eeprom_thermalmeter && delta > 5) { cck_index -= 1; } for (i = 0; i < rf; i++) { if (ofdm_index[i] > OFDM_TABLE_SIZE - 1) ofdm_index[i] = OFDM_TABLE_SIZE - 1; else if (ofdm_index[i] < ofdm_min_index) ofdm_index[i] = ofdm_min_index; } if (cck_index > CCK_TABLE_SIZE - 1) cck_index = CCK_TABLE_SIZE - 1; else if (cck_index < 0) cck_index = 0; if (is2t) { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "new OFDM_A_index=0x%x, OFDM_B_index=0x%x, cck_index=0x%x\n", ofdm_index[0], ofdm_index[1], cck_index); } else { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "new OFDM_A_index=0x%x, cck_index=0x%x\n", ofdm_index[0], cck_index); } } if (rtlpriv->dm.txpower_track_control && delta != 0) { ele_d = (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22; val_x = rtlphy->reg_e94; val_y = rtlphy->reg_e9c; if (val_x != 0) { if ((val_x & 0x00000200) != 0) val_x = val_x | 0xFFFFFC00; ele_a = ((val_x * ele_d) >> 8) & 0x000003FF; if ((val_y & 0x00000200) != 0) val_y = val_y | 0xFFFFFC00; ele_c = ((val_y * ele_d) >> 8) & 0x000003FF; value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a; rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, value32); value32 = (ele_c & 0x000003C0) >> 6; rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, value32); value32 = ((val_x * ele_d) >> 7) & 0x01; rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31), value32); value32 = ((val_y * ele_d) >> 7) & 0x01; rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29), value32); } else { rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, ofdmswing_table[ofdm_index[0]]); rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, 0x00); rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31) | BIT(29), 0x00); } if (!rtlpriv->dm.cck_inch14) { rtl_write_byte(rtlpriv, 0xa22, cckswing_table_ch1ch13[cck_index] [0]); rtl_write_byte(rtlpriv, 0xa23, cckswing_table_ch1ch13[cck_index] [1]); rtl_write_byte(rtlpriv, 0xa24, cckswing_table_ch1ch13[cck_index] [2]); rtl_write_byte(rtlpriv, 0xa25, cckswing_table_ch1ch13[cck_index] [3]); rtl_write_byte(rtlpriv, 0xa26, cckswing_table_ch1ch13[cck_index] [4]); rtl_write_byte(rtlpriv, 0xa27, cckswing_table_ch1ch13[cck_index] [5]); rtl_write_byte(rtlpriv, 0xa28, cckswing_table_ch1ch13[cck_index] [6]); rtl_write_byte(rtlpriv, 0xa29, cckswing_table_ch1ch13[cck_index] [7]); } else { rtl_write_byte(rtlpriv, 0xa22, cckswing_table_ch14[cck_index] [0]); rtl_write_byte(rtlpriv, 0xa23, cckswing_table_ch14[cck_index] [1]); rtl_write_byte(rtlpriv, 0xa24, cckswing_table_ch14[cck_index] [2]); rtl_write_byte(rtlpriv, 0xa25, cckswing_table_ch14[cck_index] [3]); rtl_write_byte(rtlpriv, 0xa26, cckswing_table_ch14[cck_index] [4]); rtl_write_byte(rtlpriv, 0xa27, cckswing_table_ch14[cck_index] [5]); rtl_write_byte(rtlpriv, 0xa28, cckswing_table_ch14[cck_index] [6]); rtl_write_byte(rtlpriv, 0xa29, cckswing_table_ch14[cck_index] [7]); } if (is2t) { ele_d = (ofdmswing_table[ofdm_index[1]] & 0xFFC00000) >> 22; val_x = rtlphy->reg_eb4; val_y = rtlphy->reg_ebc; if (val_x != 0) { if ((val_x & 0x00000200) != 0) val_x = val_x | 0xFFFFFC00; ele_a = ((val_x * ele_d) >> 8) & 0x000003FF; if ((val_y & 0x00000200) != 0) val_y = val_y | 0xFFFFFC00; ele_c = ((val_y * ele_d) >> 8) & 0x00003FF; value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a; rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD, value32); value32 = (ele_c & 0x000003C0) >> 6; rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, value32); value32 = ((val_x * ele_d) >> 7) & 0x01; rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27), value32); value32 = ((val_y * ele_d) >> 7) & 0x01; rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25), value32); } else { rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD, ofdmswing_table[ofdm_index [1]]); rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, 0x00); rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27) | BIT(25), 0x00); } } } if (delta_iqk > 3) { rtlpriv->dm.thermalvalue_iqk = thermalvalue; rtl92c_phy_iq_calibrate(hw, false); } if (rtlpriv->dm.txpower_track_control) rtlpriv->dm.thermalvalue = thermalvalue; } RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n"); } static void rtl92c_dm_initialize_txpower_tracking_thermalmeter( struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.txpower_tracking = true; rtlpriv->dm.txpower_trackinginit = false; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "pMgntInfo->txpower_tracking = %d\n", rtlpriv->dm.txpower_tracking); } static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw) { rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw); } static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw) { rtl92c_dm_txpower_tracking_callback_thermalmeter(hw); } static void rtl92c_dm_check_txpower_tracking_thermal_meter( struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); static u8 tm_trigger; if (!rtlpriv->dm.txpower_tracking) return; if (!tm_trigger) { rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK, 0x60); RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Trigger 92S Thermal Meter!!\n"); tm_trigger = 1; return; } else { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Schedule TxPowerTracking direct call!!\n"); rtl92c_dm_txpower_tracking_directcall(hw); tm_trigger = 0; } } void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw) { rtl92c_dm_check_txpower_tracking_thermal_meter(hw); } EXPORT_SYMBOL(rtl92c_dm_check_txpower_tracking); void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rate_adaptive *p_ra = &(rtlpriv->ra); p_ra->ratr_state = DM_RATR_STA_INIT; p_ra->pre_ratr_state = DM_RATR_STA_INIT; if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) rtlpriv->dm.useramask = true; else rtlpriv->dm.useramask = false; } EXPORT_SYMBOL(rtl92c_dm_init_rate_adaptive_mask); static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rate_adaptive *p_ra = &(rtlpriv->ra); u32 low_rssithresh_for_ra, high_rssithresh_for_ra; struct ieee80211_sta *sta = NULL; if (is_hal_stop(rtlhal)) { RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "<---- driver is going to unload\n"); return; } if (!rtlpriv->dm.useramask) { RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "<---- driver does not control rate adaptive mask\n"); return; } if (mac->link_state == MAC80211_LINKED && mac->opmode == NL80211_IFTYPE_STATION) { switch (p_ra->pre_ratr_state) { case DM_RATR_STA_HIGH: high_rssithresh_for_ra = 50; low_rssithresh_for_ra = 20; break; case DM_RATR_STA_MIDDLE: high_rssithresh_for_ra = 55; low_rssithresh_for_ra = 20; break; case DM_RATR_STA_LOW: high_rssithresh_for_ra = 50; low_rssithresh_for_ra = 25; break; default: high_rssithresh_for_ra = 50; low_rssithresh_for_ra = 20; break; } if (rtlpriv->dm.undecorated_smoothed_pwdb > (long)high_rssithresh_for_ra) p_ra->ratr_state = DM_RATR_STA_HIGH; else if (rtlpriv->dm.undecorated_smoothed_pwdb > (long)low_rssithresh_for_ra) p_ra->ratr_state = DM_RATR_STA_MIDDLE; else p_ra->ratr_state = DM_RATR_STA_LOW; if (p_ra->pre_ratr_state != p_ra->ratr_state) { RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI = %ld\n", rtlpriv->dm.undecorated_smoothed_pwdb); RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI_LEVEL = %d\n", p_ra->ratr_state); RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "PreState = %d, CurState = %d\n", p_ra->pre_ratr_state, p_ra->ratr_state); /* Only the PCI card uses sta in the update rate table * callback routine */ if (rtlhal->interface == INTF_PCI) { rcu_read_lock(); sta = ieee80211_find_sta(mac->vif, mac->bssid); } rtlpriv->cfg->ops->update_rate_tbl(hw, sta, p_ra->ratr_state); p_ra->pre_ratr_state = p_ra->ratr_state; if (rtlhal->interface == INTF_PCI) rcu_read_unlock(); } } } static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw) { dm_pstable.pre_ccastate = CCA_MAX; dm_pstable.cur_ccasate = CCA_MAX; dm_pstable.pre_rfstate = RF_MAX; dm_pstable.cur_rfstate = RF_MAX; dm_pstable.rssi_val_min = 0; } void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal) { static u8 initialize; static u32 reg_874, reg_c70, reg_85c, reg_a74; if (initialize == 0) { reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD) & 0x1CC000) >> 14; reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1, MASKDWORD) & BIT(3)) >> 3; reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, MASKDWORD) & 0xFF000000) >> 24; reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12; initialize = 1; } if (!bforce_in_normal) { if (dm_pstable.rssi_val_min != 0) { if (dm_pstable.pre_rfstate == RF_NORMAL) { if (dm_pstable.rssi_val_min >= 30) dm_pstable.cur_rfstate = RF_SAVE; else dm_pstable.cur_rfstate = RF_NORMAL; } else { if (dm_pstable.rssi_val_min <= 25) dm_pstable.cur_rfstate = RF_NORMAL; else dm_pstable.cur_rfstate = RF_SAVE; } } else { dm_pstable.cur_rfstate = RF_MAX; } } else { dm_pstable.cur_rfstate = RF_NORMAL; } if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) { if (dm_pstable.cur_rfstate == RF_SAVE) { rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, 0x1C0000, 0x2); rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0); rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000, 0x63); rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, 0xC000, 0x2); rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3); rtl_set_bbreg(hw, 0x818, BIT(28), 0x0); rtl_set_bbreg(hw, 0x818, BIT(28), 0x1); } else { rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, 0x1CC000, reg_874); rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), reg_c70); rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000, reg_85c); rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74); rtl_set_bbreg(hw, 0x818, BIT(28), 0x0); } dm_pstable.pre_rfstate = dm_pstable.cur_rfstate; } } EXPORT_SYMBOL(rtl92c_dm_rf_saving); static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); if (((mac->link_state == MAC80211_NOLINK)) && (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) { dm_pstable.rssi_val_min = 0; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n"); } if (mac->link_state == MAC80211_LINKED) { if (mac->opmode == NL80211_IFTYPE_ADHOC) { dm_pstable.rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "AP Client PWDB = 0x%lx\n", dm_pstable.rssi_val_min); } else { dm_pstable.rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "STA Default Port PWDB = 0x%lx\n", dm_pstable.rssi_val_min); } } else { dm_pstable.rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "AP Ext Port PWDB = 0x%lx\n", dm_pstable.rssi_val_min); } if (IS_92C_SERIAL(rtlhal->version)) ;/* rtl92c_dm_1r_cca(hw); */ else rtl92c_dm_rf_saving(hw, false); } void rtl92c_dm_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; rtl92c_dm_diginit(hw); rtl92c_dm_init_dynamic_txpower(hw); rtl92c_dm_init_edca_turbo(hw); rtl92c_dm_init_rate_adaptive_mask(hw); rtl92c_dm_initialize_txpower_tracking(hw); rtl92c_dm_init_dynamic_bb_powersaving(hw); } EXPORT_SYMBOL(rtl92c_dm_init); void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); long undecorated_smoothed_pwdb; if (!rtlpriv->dm.dynamic_txpower_enable) return; if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; return; } if ((mac->link_state < MAC80211_LINKED) && (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, "Not connected to any\n"); rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; return; } if (mac->link_state >= MAC80211_LINKED) { if (mac->opmode == NL80211_IFTYPE_ADHOC) { undecorated_smoothed_pwdb = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "AP Client PWDB = 0x%lx\n", undecorated_smoothed_pwdb); } else { undecorated_smoothed_pwdb = rtlpriv->dm.undecorated_smoothed_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "STA Default Port PWDB = 0x%lx\n", undecorated_smoothed_pwdb); } } else { undecorated_smoothed_pwdb = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "AP Ext Port PWDB = 0x%lx\n", undecorated_smoothed_pwdb); } if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"); } else if ((undecorated_smoothed_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) && (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"); } else if (undecorated_smoothed_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_NORMAL\n"); } if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "PHY_SetTxPowerLevel8192S() Channel = %d\n", rtlphy->current_channel); rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel); } rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl; } void rtl92c_dm_watchdog(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); bool fw_current_inpsmode = false; bool fw_ps_awake = true; rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, (u8 *) (&fw_current_inpsmode)); rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON, (u8 *) (&fw_ps_awake)); if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) && fw_ps_awake) && (!ppsc->rfchange_inprogress)) { rtl92c_dm_pwdb_monitor(hw); rtl92c_dm_dig(hw); rtl92c_dm_false_alarm_counter_statistics(hw); rtl92c_dm_dynamic_bb_powersaving(hw); rtl92c_dm_dynamic_txpower(hw); rtl92c_dm_check_txpower_tracking(hw); rtl92c_dm_refresh_rate_adaptive_mask(hw); rtl92c_dm_bt_coexist(hw); rtl92c_dm_check_edca_turbo(hw); } } EXPORT_SYMBOL(rtl92c_dm_watchdog); u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); long undecorated_smoothed_pwdb; u8 curr_bt_rssi_state = 0x00; if (rtlpriv->mac80211.link_state == MAC80211_LINKED) { undecorated_smoothed_pwdb = GET_UNDECORATED_AVERAGE_RSSI(rtlpriv); } else { if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0) undecorated_smoothed_pwdb = 100; else undecorated_smoothed_pwdb = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; } /* Check RSSI to determine HighPower/NormalPower state for * BT coexistence. */ if (undecorated_smoothed_pwdb >= 67) curr_bt_rssi_state &= (~BT_RSSI_STATE_NORMAL_POWER); else if (undecorated_smoothed_pwdb < 62) curr_bt_rssi_state |= BT_RSSI_STATE_NORMAL_POWER; /* Check RSSI to determine AMPDU setting for BT coexistence. */ if (undecorated_smoothed_pwdb >= 40) curr_bt_rssi_state &= (~BT_RSSI_STATE_AMDPU_OFF); else if (undecorated_smoothed_pwdb <= 32) curr_bt_rssi_state |= BT_RSSI_STATE_AMDPU_OFF; /* Marked RSSI state. It will be used to determine BT coexistence * setting later. */ if (undecorated_smoothed_pwdb < 35) curr_bt_rssi_state |= BT_RSSI_STATE_SPECIAL_LOW; else curr_bt_rssi_state &= (~BT_RSSI_STATE_SPECIAL_LOW); /* Set Tx Power according to BT status. */ if (undecorated_smoothed_pwdb >= 30) curr_bt_rssi_state |= BT_RSSI_STATE_TXPOWER_LOW; else if (undecorated_smoothed_pwdb < 25) curr_bt_rssi_state &= (~BT_RSSI_STATE_TXPOWER_LOW); /* Check BT state related to BT_Idle in B/G mode. */ if (undecorated_smoothed_pwdb < 15) curr_bt_rssi_state |= BT_RSSI_STATE_BG_EDCA_LOW; else curr_bt_rssi_state &= (~BT_RSSI_STATE_BG_EDCA_LOW); if (curr_bt_rssi_state != rtlpcipriv->bt_coexist.bt_rssi_state) { rtlpcipriv->bt_coexist.bt_rssi_state = curr_bt_rssi_state; return true; } else { return false; } } EXPORT_SYMBOL(rtl92c_bt_rssi_state_change); static bool rtl92c_bt_state_change(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); u32 polling, ratio_tx, ratio_pri; u32 bt_tx, bt_pri; u8 bt_state; u8 cur_service_type; if (rtlpriv->mac80211.link_state < MAC80211_LINKED) return false; bt_state = rtl_read_byte(rtlpriv, 0x4fd); bt_tx = rtl_read_dword(rtlpriv, 0x488); bt_tx = bt_tx & 0x00ffffff; bt_pri = rtl_read_dword(rtlpriv, 0x48c); bt_pri = bt_pri & 0x00ffffff; polling = rtl_read_dword(rtlpriv, 0x490); if (bt_tx == 0xffffffff && bt_pri == 0xffffffff && polling == 0xffffffff && bt_state == 0xff) return false; bt_state &= BIT_OFFSET_LEN_MASK_32(0, 1); if (bt_state != rtlpcipriv->bt_coexist.bt_cur_state) { rtlpcipriv->bt_coexist.bt_cur_state = bt_state; if (rtlpcipriv->bt_coexist.reg_bt_sco == 3) { rtlpcipriv->bt_coexist.bt_service = BT_IDLE; bt_state = bt_state | ((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ? 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) | BIT_OFFSET_LEN_MASK_32(2, 1); rtl_write_byte(rtlpriv, 0x4fd, bt_state); } return true; } ratio_tx = bt_tx * 1000 / polling; ratio_pri = bt_pri * 1000 / polling; rtlpcipriv->bt_coexist.ratio_tx = ratio_tx; rtlpcipriv->bt_coexist.ratio_pri = ratio_pri; if (bt_state && rtlpcipriv->bt_coexist.reg_bt_sco == 3) { if ((ratio_tx < 30) && (ratio_pri < 30)) cur_service_type = BT_IDLE; else if ((ratio_pri > 110) && (ratio_pri < 250)) cur_service_type = BT_SCO; else if ((ratio_tx >= 200) && (ratio_pri >= 200)) cur_service_type = BT_BUSY; else if ((ratio_tx >= 350) && (ratio_tx < 500)) cur_service_type = BT_OTHERBUSY; else if (ratio_tx >= 500) cur_service_type = BT_PAN; else cur_service_type = BT_OTHER_ACTION; if (cur_service_type != rtlpcipriv->bt_coexist.bt_service) { rtlpcipriv->bt_coexist.bt_service = cur_service_type; bt_state = bt_state | ((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ? 0 : BIT_OFFSET_LEN_MASK_32(1, 1)) | ((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) ? 0 : BIT_OFFSET_LEN_MASK_32(2, 1)); /* Add interrupt migration when bt is not ini * idle state (no traffic). */ if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) { rtl_write_word(rtlpriv, 0x504, 0x0ccc); rtl_write_byte(rtlpriv, 0x506, 0x54); rtl_write_byte(rtlpriv, 0x507, 0x54); } else { rtl_write_byte(rtlpriv, 0x506, 0x00); rtl_write_byte(rtlpriv, 0x507, 0x00); } rtl_write_byte(rtlpriv, 0x4fd, bt_state); return true; } } return false; } static bool rtl92c_bt_wifi_connect_change(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); static bool media_connect; if (rtlpriv->mac80211.link_state < MAC80211_LINKED) { media_connect = false; } else { if (!media_connect) { media_connect = true; return true; } media_connect = true; } return false; } static void rtl92c_bt_set_normal(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); if (rtlpcipriv->bt_coexist.bt_service == BT_OTHERBUSY) { rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72b; rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72b; } else if (rtlpcipriv->bt_coexist.bt_service == BT_BUSY) { rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82f; rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82f; } else if (rtlpcipriv->bt_coexist.bt_service == BT_SCO) { if (rtlpcipriv->bt_coexist.ratio_tx > 160) { rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72f; rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72f; } else { rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea32b; rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea42b; } } else { rtlpcipriv->bt_coexist.bt_edca_ul = 0; rtlpcipriv->bt_coexist.bt_edca_dl = 0; } if ((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) && (rtlpriv->mac80211.mode == WIRELESS_MODE_G || (rtlpriv->mac80211.mode == (WIRELESS_MODE_G | WIRELESS_MODE_B))) && (rtlpcipriv->bt_coexist.bt_rssi_state & BT_RSSI_STATE_BG_EDCA_LOW)) { rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82b; rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82b; } } static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); /* Only enable HW BT coexist when BT in "Busy" state. */ if (rtlpriv->mac80211.vendor == PEER_CISCO && rtlpcipriv->bt_coexist.bt_service == BT_OTHER_ACTION) { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0); } else { if ((rtlpcipriv->bt_coexist.bt_service == BT_BUSY) && (rtlpcipriv->bt_coexist.bt_rssi_state & BT_RSSI_STATE_NORMAL_POWER)) { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0); } else if ((rtlpcipriv->bt_coexist.bt_service == BT_OTHER_ACTION) && (rtlpriv->mac80211.mode < WIRELESS_MODE_N_24G) && (rtlpcipriv->bt_coexist.bt_rssi_state & BT_RSSI_STATE_SPECIAL_LOW)) { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0); } else if (rtlpcipriv->bt_coexist.bt_service == BT_PAN) { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0x00); } else { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0x00); } } if (rtlpcipriv->bt_coexist.bt_service == BT_PAN) rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x10100); else rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x0); if (rtlpcipriv->bt_coexist.bt_rssi_state & BT_RSSI_STATE_NORMAL_POWER) { rtl92c_bt_set_normal(hw); } else { rtlpcipriv->bt_coexist.bt_edca_ul = 0; rtlpcipriv->bt_coexist.bt_edca_dl = 0; } if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) { rtlpriv->cfg->ops->set_rfreg(hw, RF90_PATH_A, 0x1e, 0xf0, 0xf); } else { rtlpriv->cfg->ops->set_rfreg(hw, RF90_PATH_A, 0x1e, 0xf0, rtlpcipriv->bt_coexist.bt_rfreg_origin_1e); } if (!rtlpriv->dm.dynamic_txpower_enable) { if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) { if (rtlpcipriv->bt_coexist.bt_rssi_state & BT_RSSI_STATE_TXPOWER_LOW) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_BT2; } else { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_BT1; } } else { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; } rtl92c_phy_set_txpower_level(hw, rtlpriv->phy.current_channel); } } static void rtl92c_check_bt_change(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); if (rtlpcipriv->bt_coexist.bt_cur_state) { if (rtlpcipriv->bt_coexist.bt_ant_isolation) rtl92c_bt_ant_isolation(hw); } else { rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0x00); rtlpriv->cfg->ops->set_rfreg(hw, RF90_PATH_A, 0x1e, 0xf0, rtlpcipriv->bt_coexist.bt_rfreg_origin_1e); rtlpcipriv->bt_coexist.bt_edca_ul = 0; rtlpcipriv->bt_coexist.bt_edca_dl = 0; } } void rtl92c_dm_bt_coexist(struct ieee80211_hw *hw) { struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); bool wifi_connect_change; bool bt_state_change; bool rssi_state_change; if ((rtlpcipriv->bt_coexist.bt_coexistence) && (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) { wifi_connect_change = rtl92c_bt_wifi_connect_change(hw); bt_state_change = rtl92c_bt_state_change(hw); rssi_state_change = rtl92c_bt_rssi_state_change(hw); if (wifi_connect_change || bt_state_change || rssi_state_change) rtl92c_check_bt_change(hw); } } EXPORT_SYMBOL(rtl92c_dm_bt_coexist);
gpl-2.0
major91/Zeta_Chromium-L
arch/powerpc/kernel/pci-common.c
4446
49031
/* * Contains common pci routines for ALL ppc platform * (based on pci_32.c and pci_64.c) * * Port for PPC64 David Engebretsen, IBM Corp. * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. * * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM * Rework, based on alpha PCI code. * * Common pmac/prep/chrp pci routines. -- Cort * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/export.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/mm.h> #include <linux/list.h> #include <linux/syscalls.h> #include <linux/irq.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/byteorder.h> #include <asm/machdep.h> #include <asm/ppc-pci.h> #include <asm/eeh.h> static DEFINE_SPINLOCK(hose_spinlock); LIST_HEAD(hose_list); /* XXX kill that some day ... */ static int global_phb_number; /* Global phb counter */ /* ISA Memory physical address */ resource_size_t isa_mem_base; static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; void set_pci_dma_ops(struct dma_map_ops *dma_ops) { pci_dma_ops = dma_ops; } struct dma_map_ops *get_pci_dma_ops(void) { return pci_dma_ops; } EXPORT_SYMBOL(get_pci_dma_ops); struct pci_controller *pcibios_alloc_controller(struct device_node *dev) { struct pci_controller *phb; phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL); if (phb == NULL) return NULL; spin_lock(&hose_spinlock); phb->global_number = global_phb_number++; list_add_tail(&phb->list_node, &hose_list); spin_unlock(&hose_spinlock); phb->dn = dev; phb->is_dynamic = mem_init_done; #ifdef CONFIG_PPC64 if (dev) { int nid = of_node_to_nid(dev); if (nid < 0 || !node_online(nid)) nid = -1; PHB_SET_NODE(phb, nid); } #endif return phb; } void pcibios_free_controller(struct pci_controller *phb) { spin_lock(&hose_spinlock); list_del(&phb->list_node); spin_unlock(&hose_spinlock); if (phb->is_dynamic) kfree(phb); } static resource_size_t pcibios_io_size(const struct pci_controller *hose) { #ifdef CONFIG_PPC64 return hose->pci_io_size; #else return resource_size(&hose->io_resource); #endif } int pcibios_vaddr_is_ioport(void __iomem *address) { int ret = 0; struct pci_controller *hose; resource_size_t size; spin_lock(&hose_spinlock); list_for_each_entry(hose, &hose_list, list_node) { size = pcibios_io_size(hose); if (address >= hose->io_base_virt && address < (hose->io_base_virt + size)) { ret = 1; break; } } spin_unlock(&hose_spinlock); return ret; } unsigned long pci_address_to_pio(phys_addr_t address) { struct pci_controller *hose; resource_size_t size; unsigned long ret = ~0; spin_lock(&hose_spinlock); list_for_each_entry(hose, &hose_list, list_node) { size = pcibios_io_size(hose); if (address >= hose->io_base_phys && address < (hose->io_base_phys + size)) { unsigned long base = (unsigned long)hose->io_base_virt - _IO_BASE; ret = base + (address - hose->io_base_phys); break; } } spin_unlock(&hose_spinlock); return ret; } EXPORT_SYMBOL_GPL(pci_address_to_pio); /* * Return the domain number for this bus. */ int pci_domain_nr(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); return hose->global_number; } EXPORT_SYMBOL(pci_domain_nr); /* This routine is meant to be used early during boot, when the * PCI bus numbers have not yet been assigned, and you need to * issue PCI config cycles to an OF device. * It could also be used to "fix" RTAS config cycles if you want * to set pci_assign_all_buses to 1 and still use RTAS for PCI * config cycles. */ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) { while(node) { struct pci_controller *hose, *tmp; list_for_each_entry_safe(hose, tmp, &hose_list, list_node) if (hose->dn == node) return hose; node = node->parent; } return NULL; } static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev; struct device_node *np; pdev = to_pci_dev (dev); np = pci_device_to_OF_node(pdev); if (np == NULL || np->full_name == NULL) return 0; return sprintf(buf, "%s", np->full_name); } static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); /* Add sysfs properties */ int pcibios_add_platform_entries(struct pci_dev *pdev) { return device_create_file(&pdev->dev, &dev_attr_devspec); } char __devinit *pcibios_setup(char *str) { return str; } /* * Reads the interrupt pin to determine if interrupt is use by card. * If the interrupt is used, then gets the interrupt line from the * openfirmware and sets it in the pci_dev and pci_config line. */ static int pci_read_irq_line(struct pci_dev *pci_dev) { struct of_irq oirq; unsigned int virq; pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); #ifdef DEBUG memset(&oirq, 0xff, sizeof(oirq)); #endif /* Try to get a mapping from the device-tree */ if (of_irq_map_pci(pci_dev, &oirq)) { u8 line, pin; /* If that fails, lets fallback to what is in the config * space and map that through the default controller. We * also set the type to level low since that's what PCI * interrupts are. If your platform does differently, then * either provide a proper interrupt tree or don't use this * function. */ if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin)) return -1; if (pin == 0) return -1; if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) || line == 0xff || line == 0) { return -1; } pr_debug(" No map ! Using line %d (pin %d) from PCI config\n", line, pin); virq = irq_create_mapping(NULL, line); if (virq != NO_IRQ) irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); } else { pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", oirq.size, oirq.specifier[0], oirq.specifier[1], oirq.controller ? oirq.controller->full_name : "<default>"); virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); } if(virq == NO_IRQ) { pr_debug(" Failed to map !\n"); return -1; } pr_debug(" Mapped to linux irq %d\n", virq); pci_dev->irq = virq; return 0; } /* * Platform support for /proc/bus/pci/X/Y mmap()s, * modelled on the sparc64 implementation by Dave Miller. * -- paulus. */ /* * Adjust vm_pgoff of VMA such that it is the physical page offset * corresponding to the 32-bit pci bus offset for DEV requested by the user. * * Basically, the user finds the base address for his device which he wishes * to mmap. They read the 32-bit value from the config space base register, * add whatever PAGE_SIZE multiple offset they wish, and feed this into the * offset parameter of mmap on /proc/bus/pci/XXX for that device. * * Returns negative error code on failure, zero on success. */ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, resource_size_t *offset, enum pci_mmap_state mmap_state) { struct pci_controller *hose = pci_bus_to_host(dev->bus); unsigned long io_offset = 0; int i, res_bit; if (hose == 0) return NULL; /* should never happen */ /* If memory, add on the PCI bridge address offset */ if (mmap_state == pci_mmap_mem) { #if 0 /* See comment in pci_resource_to_user() for why this is disabled */ *offset += hose->pci_mem_offset; #endif res_bit = IORESOURCE_MEM; } else { io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; *offset += io_offset; res_bit = IORESOURCE_IO; } /* * Check that the offset requested corresponds to one of the * resources of the device. */ for (i = 0; i <= PCI_ROM_RESOURCE; i++) { struct resource *rp = &dev->resource[i]; int flags = rp->flags; /* treat ROM as memory (should be already) */ if (i == PCI_ROM_RESOURCE) flags |= IORESOURCE_MEM; /* Active and same type? */ if ((flags & res_bit) == 0) continue; /* In the range of this resource? */ if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) continue; /* found it! construct the final physical address */ if (mmap_state == pci_mmap_io) *offset += hose->io_base_phys - io_offset; return rp; } return NULL; } /* * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci * device mapping. */ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, pgprot_t protection, enum pci_mmap_state mmap_state, int write_combine) { unsigned long prot = pgprot_val(protection); /* Write combine is always 0 on non-memory space mappings. On * memory space, if the user didn't pass 1, we check for a * "prefetchable" resource. This is a bit hackish, but we use * this to workaround the inability of /sysfs to provide a write * combine bit */ if (mmap_state != pci_mmap_mem) write_combine = 0; else if (write_combine == 0) { if (rp->flags & IORESOURCE_PREFETCH) write_combine = 1; } /* XXX would be nice to have a way to ask for write-through */ if (write_combine) return pgprot_noncached_wc(prot); else return pgprot_noncached(prot); } /* * This one is used by /dev/mem and fbdev who have no clue about the * PCI device, it tries to find the PCI device first and calls the * above routine */ pgprot_t pci_phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t prot) { struct pci_dev *pdev = NULL; struct resource *found = NULL; resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT; int i; if (page_is_ram(pfn)) return prot; prot = pgprot_noncached(prot); for_each_pci_dev(pdev) { for (i = 0; i <= PCI_ROM_RESOURCE; i++) { struct resource *rp = &pdev->resource[i]; int flags = rp->flags; /* Active and same type? */ if ((flags & IORESOURCE_MEM) == 0) continue; /* In the range of this resource? */ if (offset < (rp->start & PAGE_MASK) || offset > rp->end) continue; found = rp; break; } if (found) break; } if (found) { if (found->flags & IORESOURCE_PREFETCH) prot = pgprot_noncached_wc(prot); pci_dev_put(pdev); } pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n", (unsigned long long)offset, pgprot_val(prot)); return prot; } /* * Perform the actual remap of the pages for a PCI device mapping, as * appropriate for this architecture. The region in the process to map * is described by vm_start and vm_end members of VMA, the base physical * address is found in vm_pgoff. * The pci device structure is provided so that architectures may make mapping * decisions on a per-device or per-bus basis. * * Returns a negative error code on failure, zero on success. */ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine) { resource_size_t offset = ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; struct resource *rp; int ret; rp = __pci_mmap_make_offset(dev, &offset, mmap_state); if (rp == NULL) return -EINVAL; vma->vm_pgoff = offset >> PAGE_SHIFT; vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, vma->vm_page_prot, mmap_state, write_combine); ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); return ret; } /* This provides legacy IO read access on a bus */ int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) { unsigned long offset; struct pci_controller *hose = pci_bus_to_host(bus); struct resource *rp = &hose->io_resource; void __iomem *addr; /* Check if port can be supported by that bus. We only check * the ranges of the PHB though, not the bus itself as the rules * for forwarding legacy cycles down bridges are not our problem * here. So if the host bridge supports it, we do it. */ offset = (unsigned long)hose->io_base_virt - _IO_BASE; offset += port; if (!(rp->flags & IORESOURCE_IO)) return -ENXIO; if (offset < rp->start || (offset + size) > rp->end) return -ENXIO; addr = hose->io_base_virt + port; switch(size) { case 1: *((u8 *)val) = in_8(addr); return 1; case 2: if (port & 1) return -EINVAL; *((u16 *)val) = in_le16(addr); return 2; case 4: if (port & 3) return -EINVAL; *((u32 *)val) = in_le32(addr); return 4; } return -EINVAL; } /* This provides legacy IO write access on a bus */ int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) { unsigned long offset; struct pci_controller *hose = pci_bus_to_host(bus); struct resource *rp = &hose->io_resource; void __iomem *addr; /* Check if port can be supported by that bus. We only check * the ranges of the PHB though, not the bus itself as the rules * for forwarding legacy cycles down bridges are not our problem * here. So if the host bridge supports it, we do it. */ offset = (unsigned long)hose->io_base_virt - _IO_BASE; offset += port; if (!(rp->flags & IORESOURCE_IO)) return -ENXIO; if (offset < rp->start || (offset + size) > rp->end) return -ENXIO; addr = hose->io_base_virt + port; /* WARNING: The generic code is idiotic. It gets passed a pointer * to what can be a 1, 2 or 4 byte quantity and always reads that * as a u32, which means that we have to correct the location of * the data read within those 32 bits for size 1 and 2 */ switch(size) { case 1: out_8(addr, val >> 24); return 1; case 2: if (port & 1) return -EINVAL; out_le16(addr, val >> 16); return 2; case 4: if (port & 3) return -EINVAL; out_le32(addr, val); return 4; } return -EINVAL; } /* This provides legacy IO or memory mmap access on a bus */ int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, enum pci_mmap_state mmap_state) { struct pci_controller *hose = pci_bus_to_host(bus); resource_size_t offset = ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; resource_size_t size = vma->vm_end - vma->vm_start; struct resource *rp; pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n", pci_domain_nr(bus), bus->number, mmap_state == pci_mmap_mem ? "MEM" : "IO", (unsigned long long)offset, (unsigned long long)(offset + size - 1)); if (mmap_state == pci_mmap_mem) { /* Hack alert ! * * Because X is lame and can fail starting if it gets an error trying * to mmap legacy_mem (instead of just moving on without legacy memory * access) we fake it here by giving it anonymous memory, effectively * behaving just like /dev/zero */ if ((offset + size) > hose->isa_mem_size) { printk(KERN_DEBUG "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n", current->comm, current->pid, pci_domain_nr(bus), bus->number); if (vma->vm_flags & VM_SHARED) return shmem_zero_setup(vma); return 0; } offset += hose->isa_mem_phys; } else { unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; unsigned long roffset = offset + io_offset; rp = &hose->io_resource; if (!(rp->flags & IORESOURCE_IO)) return -ENXIO; if (roffset < rp->start || (roffset + size) > rp->end) return -ENXIO; offset += hose->io_base_phys; } pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); vma->vm_pgoff = offset >> PAGE_SHIFT; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); } void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, resource_size_t *start, resource_size_t *end) { struct pci_controller *hose = pci_bus_to_host(dev->bus); resource_size_t offset = 0; if (hose == NULL) return; if (rsrc->flags & IORESOURCE_IO) offset = (unsigned long)hose->io_base_virt - _IO_BASE; /* We pass a fully fixed up address to userland for MMIO instead of * a BAR value because X is lame and expects to be able to use that * to pass to /dev/mem ! * * That means that we'll have potentially 64 bits values where some * userland apps only expect 32 (like X itself since it thinks only * Sparc has 64 bits MMIO) but if we don't do that, we break it on * 32 bits CHRPs :-( * * Hopefully, the sysfs insterface is immune to that gunk. Once X * has been fixed (and the fix spread enough), we can re-enable the * 2 lines below and pass down a BAR value to userland. In that case * we'll also have to re-enable the matching code in * __pci_mmap_make_offset(). * * BenH. */ #if 0 else if (rsrc->flags & IORESOURCE_MEM) offset = hose->pci_mem_offset; #endif *start = rsrc->start - offset; *end = rsrc->end - offset; } /** * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree * @hose: newly allocated pci_controller to be setup * @dev: device node of the host bridge * @primary: set if primary bus (32 bits only, soon to be deprecated) * * This function will parse the "ranges" property of a PCI host bridge device * node and setup the resource mapping of a pci controller based on its * content. * * Life would be boring if it wasn't for a few issues that we have to deal * with here: * * - We can only cope with one IO space range and up to 3 Memory space * ranges. However, some machines (thanks Apple !) tend to split their * space into lots of small contiguous ranges. So we have to coalesce. * * - We can only cope with all memory ranges having the same offset * between CPU addresses and PCI addresses. Unfortunately, some bridges * are setup for a large 1:1 mapping along with a small "window" which * maps PCI address 0 to some arbitrary high address of the CPU space in * order to give access to the ISA memory hole. * The way out of here that I've chosen for now is to always set the * offset based on the first resource found, then override it if we * have a different offset and the previous was set by an ISA hole. * * - Some busses have IO space not starting at 0, which causes trouble with * the way we do our IO resource renumbering. The code somewhat deals with * it for 64 bits but I would expect problems on 32 bits. * * - Some 32 bits platforms such as 4xx can have physical space larger than * 32 bits so we need to use 64 bits values for the parsing */ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, struct device_node *dev, int primary) { const u32 *ranges; int rlen; int pna = of_n_addr_cells(dev); int np = pna + 5; int memno = 0, isa_hole = -1; u32 pci_space; unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size; unsigned long long isa_mb = 0; struct resource *res; printk(KERN_INFO "PCI host bridge %s %s ranges:\n", dev->full_name, primary ? "(primary)" : ""); /* Get ranges property */ ranges = of_get_property(dev, "ranges", &rlen); if (ranges == NULL) return; /* Parse it */ while ((rlen -= np * 4) >= 0) { /* Read next ranges element */ pci_space = ranges[0]; pci_addr = of_read_number(ranges + 1, 2); cpu_addr = of_translate_address(dev, ranges + 3); size = of_read_number(ranges + pna + 3, 2); ranges += np; /* If we failed translation or got a zero-sized region * (some FW try to feed us with non sensical zero sized regions * such as power3 which look like some kind of attempt at exposing * the VGA memory hole) */ if (cpu_addr == OF_BAD_ADDR || size == 0) continue; /* Now consume following elements while they are contiguous */ for (; rlen >= np * sizeof(u32); ranges += np, rlen -= np * 4) { if (ranges[0] != pci_space) break; pci_next = of_read_number(ranges + 1, 2); cpu_next = of_translate_address(dev, ranges + 3); if (pci_next != pci_addr + size || cpu_next != cpu_addr + size) break; size += of_read_number(ranges + pna + 3, 2); } /* Act based on address space type */ res = NULL; switch ((pci_space >> 24) & 0x3) { case 1: /* PCI IO space */ printk(KERN_INFO " IO 0x%016llx..0x%016llx -> 0x%016llx\n", cpu_addr, cpu_addr + size - 1, pci_addr); /* We support only one IO range */ if (hose->pci_io_size) { printk(KERN_INFO " \\--> Skipped (too many) !\n"); continue; } #ifdef CONFIG_PPC32 /* On 32 bits, limit I/O space to 16MB */ if (size > 0x01000000) size = 0x01000000; /* 32 bits needs to map IOs here */ hose->io_base_virt = ioremap(cpu_addr, size); /* Expect trouble if pci_addr is not 0 */ if (primary) isa_io_base = (unsigned long)hose->io_base_virt; #endif /* CONFIG_PPC32 */ /* pci_io_size and io_base_phys always represent IO * space starting at 0 so we factor in pci_addr */ hose->pci_io_size = pci_addr + size; hose->io_base_phys = cpu_addr - pci_addr; /* Build resource */ res = &hose->io_resource; res->flags = IORESOURCE_IO; res->start = pci_addr; break; case 2: /* PCI Memory space */ case 3: /* PCI 64 bits Memory space */ printk(KERN_INFO " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n", cpu_addr, cpu_addr + size - 1, pci_addr, (pci_space & 0x40000000) ? "Prefetch" : ""); /* We support only 3 memory ranges */ if (memno >= 3) { printk(KERN_INFO " \\--> Skipped (too many) !\n"); continue; } /* Handles ISA memory hole space here */ if (pci_addr == 0) { isa_mb = cpu_addr; isa_hole = memno; if (primary || isa_mem_base == 0) isa_mem_base = cpu_addr; hose->isa_mem_phys = cpu_addr; hose->isa_mem_size = size; } /* We get the PCI/Mem offset from the first range or * the, current one if the offset came from an ISA * hole. If they don't match, bugger. */ if (memno == 0 || (isa_hole >= 0 && pci_addr != 0 && hose->pci_mem_offset == isa_mb)) hose->pci_mem_offset = cpu_addr - pci_addr; else if (pci_addr != 0 && hose->pci_mem_offset != cpu_addr - pci_addr) { printk(KERN_INFO " \\--> Skipped (offset mismatch) !\n"); continue; } /* Build resource */ res = &hose->mem_resources[memno++]; res->flags = IORESOURCE_MEM; if (pci_space & 0x40000000) res->flags |= IORESOURCE_PREFETCH; res->start = cpu_addr; break; } if (res != NULL) { res->name = dev->full_name; res->end = res->start + size - 1; res->parent = NULL; res->sibling = NULL; res->child = NULL; } } /* If there's an ISA hole and the pci_mem_offset is -not- matching * the ISA hole offset, then we need to remove the ISA hole from * the resource list for that brige */ if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) { unsigned int next = isa_hole + 1; printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb); if (next < memno) memmove(&hose->mem_resources[isa_hole], &hose->mem_resources[next], sizeof(struct resource) * (memno - next)); hose->mem_resources[--memno].flags = 0; } } /* Decide whether to display the domain number in /proc */ int pci_proc_domain(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS)) return 0; if (pci_has_flag(PCI_COMPAT_DOMAIN_0)) return hose->global_number != 0; return 1; } /* This header fixup will do the resource fixup for all devices as they are * probed, but not for bridge ranges */ static void __devinit pcibios_fixup_resources(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); int i; if (!hose) { printk(KERN_ERR "No host bridge for PCI dev %s !\n", pci_name(dev)); return; } for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { struct resource *res = dev->resource + i; if (!res->flags) continue; /* If we're going to re-assign everything, we mark all resources * as unset (and 0-base them). In addition, we mark BARs starting * at 0 as unset as well, except if PCI_PROBE_ONLY is also set * since in that case, we don't want to re-assign anything */ if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) || (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) { /* Only print message if not re-assigning */ if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] " "is unassigned\n", pci_name(dev), i, (unsigned long long)res->start, (unsigned long long)res->end, (unsigned int)res->flags); res->end -= res->start; res->start = 0; res->flags |= IORESOURCE_UNSET; continue; } pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n", pci_name(dev), i, (unsigned long long)res->start,\ (unsigned long long)res->end, (unsigned int)res->flags); } /* Call machine specific resource fixup */ if (ppc_md.pcibios_fixup_resources) ppc_md.pcibios_fixup_resources(dev); } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); /* This function tries to figure out if a bridge resource has been initialized * by the firmware or not. It doesn't have to be absolutely bullet proof, but * things go more smoothly when it gets it right. It should covers cases such * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges */ static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus, struct resource *res) { struct pci_controller *hose = pci_bus_to_host(bus); struct pci_dev *dev = bus->self; resource_size_t offset; u16 command; int i; /* We don't do anything if PCI_PROBE_ONLY is set */ if (pci_has_flag(PCI_PROBE_ONLY)) return 0; /* Job is a bit different between memory and IO */ if (res->flags & IORESOURCE_MEM) { /* If the BAR is non-0 (res != pci_mem_offset) then it's probably been * initialized by somebody */ if (res->start != hose->pci_mem_offset) return 0; /* The BAR is 0, let's check if memory decoding is enabled on * the bridge. If not, we consider it unassigned */ pci_read_config_word(dev, PCI_COMMAND, &command); if ((command & PCI_COMMAND_MEMORY) == 0) return 1; /* Memory decoding is enabled and the BAR is 0. If any of the bridge * resources covers that starting address (0 then it's good enough for * us for memory */ for (i = 0; i < 3; i++) { if ((hose->mem_resources[i].flags & IORESOURCE_MEM) && hose->mem_resources[i].start == hose->pci_mem_offset) return 0; } /* Well, it starts at 0 and we know it will collide so we may as * well consider it as unassigned. That covers the Apple case. */ return 1; } else { /* If the BAR is non-0, then we consider it assigned */ offset = (unsigned long)hose->io_base_virt - _IO_BASE; if (((res->start - offset) & 0xfffffffful) != 0) return 0; /* Here, we are a bit different than memory as typically IO space * starting at low addresses -is- valid. What we do instead if that * we consider as unassigned anything that doesn't have IO enabled * in the PCI command register, and that's it. */ pci_read_config_word(dev, PCI_COMMAND, &command); if (command & PCI_COMMAND_IO) return 0; /* It's starting at 0 and IO is disabled in the bridge, consider * it unassigned */ return 1; } } /* Fixup resources of a PCI<->PCI bridge */ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus) { struct resource *res; int i; struct pci_dev *dev = bus->self; pci_bus_for_each_resource(bus, res, i) { if (!res || !res->flags) continue; if (i >= 3 && bus->self->transparent) continue; /* If we are going to re-assign everything, mark the resource * as unset and move it down to 0 */ if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) { res->flags |= IORESOURCE_UNSET; res->end -= res->start; res->start = 0; continue; } pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x]\n", pci_name(dev), i, (unsigned long long)res->start,\ (unsigned long long)res->end, (unsigned int)res->flags); /* Try to detect uninitialized P2P bridge resources, * and clear them out so they get re-assigned later */ if (pcibios_uninitialized_bridge_resource(bus, res)) { res->flags = 0; pr_debug("PCI:%s (unassigned)\n", pci_name(dev)); } } } void __devinit pcibios_setup_bus_self(struct pci_bus *bus) { /* Fix up the bus resources for P2P bridges */ if (bus->self != NULL) pcibios_fixup_bridge(bus); /* Platform specific bus fixups. This is currently only used * by fsl_pci and I'm hoping to get rid of it at some point */ if (ppc_md.pcibios_fixup_bus) ppc_md.pcibios_fixup_bus(bus); /* Setup bus DMA mappings */ if (ppc_md.pci_dma_bus_setup) ppc_md.pci_dma_bus_setup(bus); } void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) { struct pci_dev *dev; pr_debug("PCI: Fixup bus devices %d (%s)\n", bus->number, bus->self ? pci_name(bus->self) : "PHB"); list_for_each_entry(dev, &bus->devices, bus_list) { /* Cardbus can call us to add new devices to a bus, so ignore * those who are already fully discovered */ if (dev->is_added) continue; /* Fixup NUMA node as it may not be setup yet by the generic * code and is needed by the DMA init */ set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); /* Hook up default DMA ops */ set_dma_ops(&dev->dev, pci_dma_ops); set_dma_offset(&dev->dev, PCI_DRAM_OFFSET); /* Additional platform DMA/iommu setup */ if (ppc_md.pci_dma_dev_setup) ppc_md.pci_dma_dev_setup(dev); /* Read default IRQs and fixup if necessary */ pci_read_irq_line(dev); if (ppc_md.pci_irq_fixup) ppc_md.pci_irq_fixup(dev); } } void pcibios_set_master(struct pci_dev *dev) { /* No special bus mastering setup handling */ } void __devinit pcibios_fixup_bus(struct pci_bus *bus) { /* When called from the generic PCI probe, read PCI<->PCI bridge * bases. This is -not- called when generating the PCI tree from * the OF device-tree. */ if (bus->self != NULL) pci_read_bridge_bases(bus); /* Now fixup the bus bus */ pcibios_setup_bus_self(bus); /* Now fixup devices on that bus */ pcibios_setup_bus_devices(bus); } EXPORT_SYMBOL(pcibios_fixup_bus); void __devinit pci_fixup_cardbus(struct pci_bus *bus) { /* Now fixup devices on that bus */ pcibios_setup_bus_devices(bus); } static int skip_isa_ioresource_align(struct pci_dev *dev) { if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) && !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA)) return 1; return 0; } /* * We need to avoid collisions with `mirrored' VGA ports * and other strange ISA hardware, so we always want the * addresses to be allocated in the 0x000-0x0ff region * modulo 0x400. * * Why? Because some silly external IO cards only decode * the low 10 bits of the IO address. The 0x00-0xff region * is reserved for motherboard devices that decode all 16 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, * but we want to try to avoid allocating at 0x2900-0x2bff * which might have be mirrored at 0x0100-0x03ff.. */ resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pci_dev *dev = data; resource_size_t start = res->start; if (res->flags & IORESOURCE_IO) { if (skip_isa_ioresource_align(dev)) return start; if (start & 0x300) start = (start + 0x3ff) & ~0x3ff; } return start; } EXPORT_SYMBOL(pcibios_align_resource); /* * Reparent resource children of pr that conflict with res * under res, and make res replace those children. */ static int reparent_resources(struct resource *parent, struct resource *res) { struct resource *p, **pp; struct resource **firstpp = NULL; for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) { if (p->end < res->start) continue; if (res->end < p->start) break; if (p->start < res->start || p->end > res->end) return -1; /* not completely contained */ if (firstpp == NULL) firstpp = pp; } if (firstpp == NULL) return -1; /* didn't find any conflicting entries? */ res->parent = parent; res->child = *firstpp; res->sibling = *pp; *firstpp = res; *pp = NULL; for (p = res->child; p != NULL; p = p->sibling) { p->parent = res; pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n", p->name, (unsigned long long)p->start, (unsigned long long)p->end, res->name); } return 0; } /* * Handle resources of PCI devices. If the world were perfect, we could * just allocate all the resource regions and do nothing more. It isn't. * On the other hand, we cannot just re-allocate all devices, as it would * require us to know lots of host bridge internals. So we attempt to * keep as much of the original configuration as possible, but tweak it * when it's found to be wrong. * * Known BIOS problems we have to work around: * - I/O or memory regions not configured * - regions configured, but not enabled in the command register * - bogus I/O addresses above 64K used * - expansion ROMs left enabled (this may sound harmless, but given * the fact the PCI specs explicitly allow address decoders to be * shared between expansion ROMs and other resource regions, it's * at least dangerous) * * Our solution: * (1) Allocate resources for all buses behind PCI-to-PCI bridges. * This gives us fixed barriers on where we can allocate. * (2) Allocate resources for all enabled devices. If there is * a collision, just mark the resource as unallocated. Also * disable expansion ROMs during this step. * (3) Try to allocate resources for disabled devices. If the * resources were assigned correctly, everything goes well, * if they weren't, they won't disturb allocation of other * resources. * (4) Assign new addresses to resources which were either * not configured at all or misconfigured. If explicitly * requested by the user, configure expansion ROM address * as well. */ void pcibios_allocate_bus_resources(struct pci_bus *bus) { struct pci_bus *b; int i; struct resource *res, *pr; pr_debug("PCI: Allocating bus resources for %04x:%02x...\n", pci_domain_nr(bus), bus->number); pci_bus_for_each_resource(bus, res, i) { if (!res || !res->flags || res->start > res->end || res->parent) continue; /* If the resource was left unset at this point, we clear it */ if (res->flags & IORESOURCE_UNSET) goto clear_resource; if (bus->parent == NULL) pr = (res->flags & IORESOURCE_IO) ? &ioport_resource : &iomem_resource; else { pr = pci_find_parent_resource(bus->self, res); if (pr == res) { /* this happens when the generic PCI * code (wrongly) decides that this * bridge is transparent -- paulus */ continue; } } pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx " "[0x%x], parent %p (%s)\n", bus->self ? pci_name(bus->self) : "PHB", bus->number, i, (unsigned long long)res->start, (unsigned long long)res->end, (unsigned int)res->flags, pr, (pr && pr->name) ? pr->name : "nil"); if (pr && !(pr->flags & IORESOURCE_UNSET)) { if (request_resource(pr, res) == 0) continue; /* * Must be a conflict with an existing entry. * Move that entry (or entries) under the * bridge resource and try again. */ if (reparent_resources(pr, res) == 0) continue; } pr_warning("PCI: Cannot allocate resource region " "%d of PCI bridge %d, will remap\n", i, bus->number); clear_resource: res->start = res->end = 0; res->flags = 0; } list_for_each_entry(b, &bus->children, node) pcibios_allocate_bus_resources(b); } static inline void __devinit alloc_resource(struct pci_dev *dev, int idx) { struct resource *pr, *r = &dev->resource[idx]; pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n", pci_name(dev), idx, (unsigned long long)r->start, (unsigned long long)r->end, (unsigned int)r->flags); pr = pci_find_parent_resource(dev, r); if (!pr || (pr->flags & IORESOURCE_UNSET) || request_resource(pr, r) < 0) { printk(KERN_WARNING "PCI: Cannot allocate resource region %d" " of device %s, will remap\n", idx, pci_name(dev)); if (pr) pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n", pr, (unsigned long long)pr->start, (unsigned long long)pr->end, (unsigned int)pr->flags); /* We'll assign a new address later */ r->flags |= IORESOURCE_UNSET; r->end -= r->start; r->start = 0; } } static void __init pcibios_allocate_resources(int pass) { struct pci_dev *dev = NULL; int idx, disabled; u16 command; struct resource *r; for_each_pci_dev(dev) { pci_read_config_word(dev, PCI_COMMAND, &command); for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { r = &dev->resource[idx]; if (r->parent) /* Already allocated */ continue; if (!r->flags || (r->flags & IORESOURCE_UNSET)) continue; /* Not assigned at all */ /* We only allocate ROMs on pass 1 just in case they * have been screwed up by firmware */ if (idx == PCI_ROM_RESOURCE ) disabled = 1; if (r->flags & IORESOURCE_IO) disabled = !(command & PCI_COMMAND_IO); else disabled = !(command & PCI_COMMAND_MEMORY); if (pass == disabled) alloc_resource(dev, idx); } if (pass) continue; r = &dev->resource[PCI_ROM_RESOURCE]; if (r->flags) { /* Turn the ROM off, leave the resource region, * but keep it unregistered. */ u32 reg; pci_read_config_dword(dev, dev->rom_base_reg, &reg); if (reg & PCI_ROM_ADDRESS_ENABLE) { pr_debug("PCI: Switching off ROM of %s\n", pci_name(dev)); r->flags &= ~IORESOURCE_ROM_ENABLE; pci_write_config_dword(dev, dev->rom_base_reg, reg & ~PCI_ROM_ADDRESS_ENABLE); } } } } static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); resource_size_t offset; struct resource *res, *pres; int i; pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus)); /* Check for IO */ if (!(hose->io_resource.flags & IORESOURCE_IO)) goto no_io; offset = (unsigned long)hose->io_base_virt - _IO_BASE; res = kzalloc(sizeof(struct resource), GFP_KERNEL); BUG_ON(res == NULL); res->name = "Legacy IO"; res->flags = IORESOURCE_IO; res->start = offset; res->end = (offset + 0xfff) & 0xfffffffful; pr_debug("Candidate legacy IO: %pR\n", res); if (request_resource(&hose->io_resource, res)) { printk(KERN_DEBUG "PCI %04x:%02x Cannot reserve Legacy IO %pR\n", pci_domain_nr(bus), bus->number, res); kfree(res); } no_io: /* Check for memory */ offset = hose->pci_mem_offset; pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset); for (i = 0; i < 3; i++) { pres = &hose->mem_resources[i]; if (!(pres->flags & IORESOURCE_MEM)) continue; pr_debug("hose mem res: %pR\n", pres); if ((pres->start - offset) <= 0xa0000 && (pres->end - offset) >= 0xbffff) break; } if (i >= 3) return; res = kzalloc(sizeof(struct resource), GFP_KERNEL); BUG_ON(res == NULL); res->name = "Legacy VGA memory"; res->flags = IORESOURCE_MEM; res->start = 0xa0000 + offset; res->end = 0xbffff + offset; pr_debug("Candidate VGA memory: %pR\n", res); if (request_resource(pres, res)) { printk(KERN_DEBUG "PCI %04x:%02x Cannot reserve VGA memory %pR\n", pci_domain_nr(bus), bus->number, res); kfree(res); } } void __init pcibios_resource_survey(void) { struct pci_bus *b; /* Allocate and assign resources */ list_for_each_entry(b, &pci_root_buses, node) pcibios_allocate_bus_resources(b); pcibios_allocate_resources(0); pcibios_allocate_resources(1); /* Before we start assigning unassigned resource, we try to reserve * the low IO area and the VGA memory area if they intersect the * bus available resources to avoid allocating things on top of them */ if (!pci_has_flag(PCI_PROBE_ONLY)) { list_for_each_entry(b, &pci_root_buses, node) pcibios_reserve_legacy_regions(b); } /* Now, if the platform didn't decide to blindly trust the firmware, * we proceed to assigning things that were left unassigned */ if (!pci_has_flag(PCI_PROBE_ONLY)) { pr_debug("PCI: Assigning unassigned resources...\n"); pci_assign_unassigned_resources(); } /* Call machine dependent fixup */ if (ppc_md.pcibios_fixup) ppc_md.pcibios_fixup(); } #ifdef CONFIG_HOTPLUG /* This is used by the PCI hotplug driver to allocate resource * of newly plugged busses. We can try to consolidate with the * rest of the code later, for now, keep it as-is as our main * resource allocation function doesn't deal with sub-trees yet. */ void pcibios_claim_one_bus(struct pci_bus *bus) { struct pci_dev *dev; struct pci_bus *child_bus; list_for_each_entry(dev, &bus->devices, bus_list) { int i; for (i = 0; i < PCI_NUM_RESOURCES; i++) { struct resource *r = &dev->resource[i]; if (r->parent || !r->start || !r->flags) continue; pr_debug("PCI: Claiming %s: " "Resource %d: %016llx..%016llx [%x]\n", pci_name(dev), i, (unsigned long long)r->start, (unsigned long long)r->end, (unsigned int)r->flags); pci_claim_resource(dev, i); } } list_for_each_entry(child_bus, &bus->children, node) pcibios_claim_one_bus(child_bus); } /* pcibios_finish_adding_to_bus * * This is to be called by the hotplug code after devices have been * added to a bus, this include calling it for a PHB that is just * being added */ void pcibios_finish_adding_to_bus(struct pci_bus *bus) { pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n", pci_domain_nr(bus), bus->number); /* Allocate bus and devices resources */ pcibios_allocate_bus_resources(bus); pcibios_claim_one_bus(bus); /* Add new devices to global lists. Register in proc, sysfs. */ pci_bus_add_devices(bus); /* Fixup EEH */ eeh_add_device_tree_late(bus); } EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus); #endif /* CONFIG_HOTPLUG */ int pcibios_enable_device(struct pci_dev *dev, int mask) { if (ppc_md.pcibios_enable_device_hook) if (ppc_md.pcibios_enable_device_hook(dev)) return -EINVAL; return pci_enable_resources(dev, mask); } resource_size_t pcibios_io_space_offset(struct pci_controller *hose) { return (unsigned long) hose->io_base_virt - _IO_BASE; } static void __devinit pcibios_setup_phb_resources(struct pci_controller *hose, struct list_head *resources) { struct resource *res; int i; /* Hookup PHB IO resource */ res = &hose->io_resource; if (!res->flags) { printk(KERN_WARNING "PCI: I/O resource not set for host" " bridge %s (domain %d)\n", hose->dn->full_name, hose->global_number); #ifdef CONFIG_PPC32 /* Workaround for lack of IO resource only on 32-bit */ res->start = (unsigned long)hose->io_base_virt - isa_io_base; res->end = res->start + IO_SPACE_LIMIT; res->flags = IORESOURCE_IO; #endif /* CONFIG_PPC32 */ } pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n", (unsigned long long)res->start, (unsigned long long)res->end, (unsigned long)res->flags); pci_add_resource_offset(resources, res, pcibios_io_space_offset(hose)); /* Hookup PHB Memory resources */ for (i = 0; i < 3; ++i) { res = &hose->mem_resources[i]; if (!res->flags) { if (i > 0) continue; printk(KERN_ERR "PCI: Memory resource 0 not set for " "host bridge %s (domain %d)\n", hose->dn->full_name, hose->global_number); #ifdef CONFIG_PPC32 /* Workaround for lack of MEM resource only on 32-bit */ res->start = hose->pci_mem_offset; res->end = (resource_size_t)-1LL; res->flags = IORESOURCE_MEM; #endif /* CONFIG_PPC32 */ } pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", i, (unsigned long long)res->start, (unsigned long long)res->end, (unsigned long)res->flags); pci_add_resource_offset(resources, res, hose->pci_mem_offset); } pr_debug("PCI: PHB MEM offset = %016llx\n", (unsigned long long)hose->pci_mem_offset); pr_debug("PCI: PHB IO offset = %08lx\n", (unsigned long)hose->io_base_virt - _IO_BASE); } /* * Null PCI config access functions, for the case when we can't * find a hose. */ #define NULL_PCI_OP(rw, size, type) \ static int \ null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \ { \ return PCIBIOS_DEVICE_NOT_FOUND; \ } static int null_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { return PCIBIOS_DEVICE_NOT_FOUND; } static int null_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { return PCIBIOS_DEVICE_NOT_FOUND; } static struct pci_ops null_pci_ops = { .read = null_read_config, .write = null_write_config, }; /* * These functions are used early on before PCI scanning is done * and all of the pci_dev and pci_bus structures have been created. */ static struct pci_bus * fake_pci_bus(struct pci_controller *hose, int busnr) { static struct pci_bus bus; if (hose == 0) { printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr); } bus.number = busnr; bus.sysdata = hose; bus.ops = hose? hose->ops: &null_pci_ops; return &bus; } #define EARLY_PCI_OP(rw, size, type) \ int early_##rw##_config_##size(struct pci_controller *hose, int bus, \ int devfn, int offset, type value) \ { \ return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \ devfn, offset, value); \ } EARLY_PCI_OP(read, byte, u8 *) EARLY_PCI_OP(read, word, u16 *) EARLY_PCI_OP(read, dword, u32 *) EARLY_PCI_OP(write, byte, u8) EARLY_PCI_OP(write, word, u16) EARLY_PCI_OP(write, dword, u32) extern int pci_bus_find_capability (struct pci_bus *bus, unsigned int devfn, int cap); int early_find_capability(struct pci_controller *hose, int bus, int devfn, int cap) { return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap); } struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) { struct pci_controller *hose = bus->sysdata; return of_node_get(hose->dn); } /** * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus * @hose: Pointer to the PCI host controller instance structure */ void __devinit pcibios_scan_phb(struct pci_controller *hose) { LIST_HEAD(resources); struct pci_bus *bus; struct device_node *node = hose->dn; int mode; pr_debug("PCI: Scanning PHB %s\n", node ? node->full_name : "<NO NAME>"); /* Get some IO space for the new PHB */ pcibios_setup_phb_io_space(hose); /* Wire up PHB bus resources */ pcibios_setup_phb_resources(hose, &resources); /* Create an empty bus for the toplevel */ bus = pci_create_root_bus(hose->parent, hose->first_busno, hose->ops, hose, &resources); if (bus == NULL) { pr_err("Failed to create bus for PCI domain %04x\n", hose->global_number); pci_free_resource_list(&resources); return; } bus->secondary = hose->first_busno; hose->bus = bus; /* Get probe mode and perform scan */ mode = PCI_PROBE_NORMAL; if (node && ppc_md.pci_probe_mode) mode = ppc_md.pci_probe_mode(bus); pr_debug(" probe mode: %d\n", mode); if (mode == PCI_PROBE_DEVTREE) { bus->subordinate = hose->last_busno; of_scan_bus(node, bus); } if (mode == PCI_PROBE_NORMAL) hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); /* Platform gets a chance to do some global fixups before * we proceed to resource allocation */ if (ppc_md.pcibios_fixup_phb) ppc_md.pcibios_fixup_phb(hose); /* Configure PCI Express settings */ if (bus && !pci_has_flag(PCI_PROBE_ONLY)) { struct pci_bus *child; list_for_each_entry(child, &bus->children, node) { struct pci_dev *self = child->self; if (!self) continue; pcie_bus_configure_settings(child, self->pcie_mpss); } } } static void fixup_hide_host_resource_fsl(struct pci_dev *dev) { int i, class = dev->class >> 8; /* When configured as agent, programing interface = 1 */ int prog_if = dev->class & 0xf; if ((class == PCI_CLASS_PROCESSOR_POWERPC || class == PCI_CLASS_BRIDGE_OTHER) && (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) && (prog_if == 0) && (dev->bus->parent == NULL)) { for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { dev->resource[i].start = 0; dev->resource[i].end = 0; dev->resource[i].flags = 0; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
gpl-2.0
kennethlyn/enclustra_zynq_linux
arch/powerpc/kernel/pci-common.c
4446
49031
/* * Contains common pci routines for ALL ppc platform * (based on pci_32.c and pci_64.c) * * Port for PPC64 David Engebretsen, IBM Corp. * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. * * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM * Rework, based on alpha PCI code. * * Common pmac/prep/chrp pci routines. -- Cort * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/export.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/mm.h> #include <linux/list.h> #include <linux/syscalls.h> #include <linux/irq.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/byteorder.h> #include <asm/machdep.h> #include <asm/ppc-pci.h> #include <asm/eeh.h> static DEFINE_SPINLOCK(hose_spinlock); LIST_HEAD(hose_list); /* XXX kill that some day ... */ static int global_phb_number; /* Global phb counter */ /* ISA Memory physical address */ resource_size_t isa_mem_base; static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; void set_pci_dma_ops(struct dma_map_ops *dma_ops) { pci_dma_ops = dma_ops; } struct dma_map_ops *get_pci_dma_ops(void) { return pci_dma_ops; } EXPORT_SYMBOL(get_pci_dma_ops); struct pci_controller *pcibios_alloc_controller(struct device_node *dev) { struct pci_controller *phb; phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL); if (phb == NULL) return NULL; spin_lock(&hose_spinlock); phb->global_number = global_phb_number++; list_add_tail(&phb->list_node, &hose_list); spin_unlock(&hose_spinlock); phb->dn = dev; phb->is_dynamic = mem_init_done; #ifdef CONFIG_PPC64 if (dev) { int nid = of_node_to_nid(dev); if (nid < 0 || !node_online(nid)) nid = -1; PHB_SET_NODE(phb, nid); } #endif return phb; } void pcibios_free_controller(struct pci_controller *phb) { spin_lock(&hose_spinlock); list_del(&phb->list_node); spin_unlock(&hose_spinlock); if (phb->is_dynamic) kfree(phb); } static resource_size_t pcibios_io_size(const struct pci_controller *hose) { #ifdef CONFIG_PPC64 return hose->pci_io_size; #else return resource_size(&hose->io_resource); #endif } int pcibios_vaddr_is_ioport(void __iomem *address) { int ret = 0; struct pci_controller *hose; resource_size_t size; spin_lock(&hose_spinlock); list_for_each_entry(hose, &hose_list, list_node) { size = pcibios_io_size(hose); if (address >= hose->io_base_virt && address < (hose->io_base_virt + size)) { ret = 1; break; } } spin_unlock(&hose_spinlock); return ret; } unsigned long pci_address_to_pio(phys_addr_t address) { struct pci_controller *hose; resource_size_t size; unsigned long ret = ~0; spin_lock(&hose_spinlock); list_for_each_entry(hose, &hose_list, list_node) { size = pcibios_io_size(hose); if (address >= hose->io_base_phys && address < (hose->io_base_phys + size)) { unsigned long base = (unsigned long)hose->io_base_virt - _IO_BASE; ret = base + (address - hose->io_base_phys); break; } } spin_unlock(&hose_spinlock); return ret; } EXPORT_SYMBOL_GPL(pci_address_to_pio); /* * Return the domain number for this bus. */ int pci_domain_nr(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); return hose->global_number; } EXPORT_SYMBOL(pci_domain_nr); /* This routine is meant to be used early during boot, when the * PCI bus numbers have not yet been assigned, and you need to * issue PCI config cycles to an OF device. * It could also be used to "fix" RTAS config cycles if you want * to set pci_assign_all_buses to 1 and still use RTAS for PCI * config cycles. */ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) { while(node) { struct pci_controller *hose, *tmp; list_for_each_entry_safe(hose, tmp, &hose_list, list_node) if (hose->dn == node) return hose; node = node->parent; } return NULL; } static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev; struct device_node *np; pdev = to_pci_dev (dev); np = pci_device_to_OF_node(pdev); if (np == NULL || np->full_name == NULL) return 0; return sprintf(buf, "%s", np->full_name); } static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); /* Add sysfs properties */ int pcibios_add_platform_entries(struct pci_dev *pdev) { return device_create_file(&pdev->dev, &dev_attr_devspec); } char __devinit *pcibios_setup(char *str) { return str; } /* * Reads the interrupt pin to determine if interrupt is use by card. * If the interrupt is used, then gets the interrupt line from the * openfirmware and sets it in the pci_dev and pci_config line. */ static int pci_read_irq_line(struct pci_dev *pci_dev) { struct of_irq oirq; unsigned int virq; pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); #ifdef DEBUG memset(&oirq, 0xff, sizeof(oirq)); #endif /* Try to get a mapping from the device-tree */ if (of_irq_map_pci(pci_dev, &oirq)) { u8 line, pin; /* If that fails, lets fallback to what is in the config * space and map that through the default controller. We * also set the type to level low since that's what PCI * interrupts are. If your platform does differently, then * either provide a proper interrupt tree or don't use this * function. */ if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin)) return -1; if (pin == 0) return -1; if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) || line == 0xff || line == 0) { return -1; } pr_debug(" No map ! Using line %d (pin %d) from PCI config\n", line, pin); virq = irq_create_mapping(NULL, line); if (virq != NO_IRQ) irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); } else { pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", oirq.size, oirq.specifier[0], oirq.specifier[1], oirq.controller ? oirq.controller->full_name : "<default>"); virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); } if(virq == NO_IRQ) { pr_debug(" Failed to map !\n"); return -1; } pr_debug(" Mapped to linux irq %d\n", virq); pci_dev->irq = virq; return 0; } /* * Platform support for /proc/bus/pci/X/Y mmap()s, * modelled on the sparc64 implementation by Dave Miller. * -- paulus. */ /* * Adjust vm_pgoff of VMA such that it is the physical page offset * corresponding to the 32-bit pci bus offset for DEV requested by the user. * * Basically, the user finds the base address for his device which he wishes * to mmap. They read the 32-bit value from the config space base register, * add whatever PAGE_SIZE multiple offset they wish, and feed this into the * offset parameter of mmap on /proc/bus/pci/XXX for that device. * * Returns negative error code on failure, zero on success. */ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, resource_size_t *offset, enum pci_mmap_state mmap_state) { struct pci_controller *hose = pci_bus_to_host(dev->bus); unsigned long io_offset = 0; int i, res_bit; if (hose == 0) return NULL; /* should never happen */ /* If memory, add on the PCI bridge address offset */ if (mmap_state == pci_mmap_mem) { #if 0 /* See comment in pci_resource_to_user() for why this is disabled */ *offset += hose->pci_mem_offset; #endif res_bit = IORESOURCE_MEM; } else { io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; *offset += io_offset; res_bit = IORESOURCE_IO; } /* * Check that the offset requested corresponds to one of the * resources of the device. */ for (i = 0; i <= PCI_ROM_RESOURCE; i++) { struct resource *rp = &dev->resource[i]; int flags = rp->flags; /* treat ROM as memory (should be already) */ if (i == PCI_ROM_RESOURCE) flags |= IORESOURCE_MEM; /* Active and same type? */ if ((flags & res_bit) == 0) continue; /* In the range of this resource? */ if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) continue; /* found it! construct the final physical address */ if (mmap_state == pci_mmap_io) *offset += hose->io_base_phys - io_offset; return rp; } return NULL; } /* * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci * device mapping. */ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, pgprot_t protection, enum pci_mmap_state mmap_state, int write_combine) { unsigned long prot = pgprot_val(protection); /* Write combine is always 0 on non-memory space mappings. On * memory space, if the user didn't pass 1, we check for a * "prefetchable" resource. This is a bit hackish, but we use * this to workaround the inability of /sysfs to provide a write * combine bit */ if (mmap_state != pci_mmap_mem) write_combine = 0; else if (write_combine == 0) { if (rp->flags & IORESOURCE_PREFETCH) write_combine = 1; } /* XXX would be nice to have a way to ask for write-through */ if (write_combine) return pgprot_noncached_wc(prot); else return pgprot_noncached(prot); } /* * This one is used by /dev/mem and fbdev who have no clue about the * PCI device, it tries to find the PCI device first and calls the * above routine */ pgprot_t pci_phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t prot) { struct pci_dev *pdev = NULL; struct resource *found = NULL; resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT; int i; if (page_is_ram(pfn)) return prot; prot = pgprot_noncached(prot); for_each_pci_dev(pdev) { for (i = 0; i <= PCI_ROM_RESOURCE; i++) { struct resource *rp = &pdev->resource[i]; int flags = rp->flags; /* Active and same type? */ if ((flags & IORESOURCE_MEM) == 0) continue; /* In the range of this resource? */ if (offset < (rp->start & PAGE_MASK) || offset > rp->end) continue; found = rp; break; } if (found) break; } if (found) { if (found->flags & IORESOURCE_PREFETCH) prot = pgprot_noncached_wc(prot); pci_dev_put(pdev); } pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n", (unsigned long long)offset, pgprot_val(prot)); return prot; } /* * Perform the actual remap of the pages for a PCI device mapping, as * appropriate for this architecture. The region in the process to map * is described by vm_start and vm_end members of VMA, the base physical * address is found in vm_pgoff. * The pci device structure is provided so that architectures may make mapping * decisions on a per-device or per-bus basis. * * Returns a negative error code on failure, zero on success. */ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine) { resource_size_t offset = ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; struct resource *rp; int ret; rp = __pci_mmap_make_offset(dev, &offset, mmap_state); if (rp == NULL) return -EINVAL; vma->vm_pgoff = offset >> PAGE_SHIFT; vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, vma->vm_page_prot, mmap_state, write_combine); ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); return ret; } /* This provides legacy IO read access on a bus */ int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) { unsigned long offset; struct pci_controller *hose = pci_bus_to_host(bus); struct resource *rp = &hose->io_resource; void __iomem *addr; /* Check if port can be supported by that bus. We only check * the ranges of the PHB though, not the bus itself as the rules * for forwarding legacy cycles down bridges are not our problem * here. So if the host bridge supports it, we do it. */ offset = (unsigned long)hose->io_base_virt - _IO_BASE; offset += port; if (!(rp->flags & IORESOURCE_IO)) return -ENXIO; if (offset < rp->start || (offset + size) > rp->end) return -ENXIO; addr = hose->io_base_virt + port; switch(size) { case 1: *((u8 *)val) = in_8(addr); return 1; case 2: if (port & 1) return -EINVAL; *((u16 *)val) = in_le16(addr); return 2; case 4: if (port & 3) return -EINVAL; *((u32 *)val) = in_le32(addr); return 4; } return -EINVAL; } /* This provides legacy IO write access on a bus */ int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) { unsigned long offset; struct pci_controller *hose = pci_bus_to_host(bus); struct resource *rp = &hose->io_resource; void __iomem *addr; /* Check if port can be supported by that bus. We only check * the ranges of the PHB though, not the bus itself as the rules * for forwarding legacy cycles down bridges are not our problem * here. So if the host bridge supports it, we do it. */ offset = (unsigned long)hose->io_base_virt - _IO_BASE; offset += port; if (!(rp->flags & IORESOURCE_IO)) return -ENXIO; if (offset < rp->start || (offset + size) > rp->end) return -ENXIO; addr = hose->io_base_virt + port; /* WARNING: The generic code is idiotic. It gets passed a pointer * to what can be a 1, 2 or 4 byte quantity and always reads that * as a u32, which means that we have to correct the location of * the data read within those 32 bits for size 1 and 2 */ switch(size) { case 1: out_8(addr, val >> 24); return 1; case 2: if (port & 1) return -EINVAL; out_le16(addr, val >> 16); return 2; case 4: if (port & 3) return -EINVAL; out_le32(addr, val); return 4; } return -EINVAL; } /* This provides legacy IO or memory mmap access on a bus */ int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, enum pci_mmap_state mmap_state) { struct pci_controller *hose = pci_bus_to_host(bus); resource_size_t offset = ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; resource_size_t size = vma->vm_end - vma->vm_start; struct resource *rp; pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n", pci_domain_nr(bus), bus->number, mmap_state == pci_mmap_mem ? "MEM" : "IO", (unsigned long long)offset, (unsigned long long)(offset + size - 1)); if (mmap_state == pci_mmap_mem) { /* Hack alert ! * * Because X is lame and can fail starting if it gets an error trying * to mmap legacy_mem (instead of just moving on without legacy memory * access) we fake it here by giving it anonymous memory, effectively * behaving just like /dev/zero */ if ((offset + size) > hose->isa_mem_size) { printk(KERN_DEBUG "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n", current->comm, current->pid, pci_domain_nr(bus), bus->number); if (vma->vm_flags & VM_SHARED) return shmem_zero_setup(vma); return 0; } offset += hose->isa_mem_phys; } else { unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; unsigned long roffset = offset + io_offset; rp = &hose->io_resource; if (!(rp->flags & IORESOURCE_IO)) return -ENXIO; if (roffset < rp->start || (roffset + size) > rp->end) return -ENXIO; offset += hose->io_base_phys; } pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); vma->vm_pgoff = offset >> PAGE_SHIFT; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); } void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, resource_size_t *start, resource_size_t *end) { struct pci_controller *hose = pci_bus_to_host(dev->bus); resource_size_t offset = 0; if (hose == NULL) return; if (rsrc->flags & IORESOURCE_IO) offset = (unsigned long)hose->io_base_virt - _IO_BASE; /* We pass a fully fixed up address to userland for MMIO instead of * a BAR value because X is lame and expects to be able to use that * to pass to /dev/mem ! * * That means that we'll have potentially 64 bits values where some * userland apps only expect 32 (like X itself since it thinks only * Sparc has 64 bits MMIO) but if we don't do that, we break it on * 32 bits CHRPs :-( * * Hopefully, the sysfs insterface is immune to that gunk. Once X * has been fixed (and the fix spread enough), we can re-enable the * 2 lines below and pass down a BAR value to userland. In that case * we'll also have to re-enable the matching code in * __pci_mmap_make_offset(). * * BenH. */ #if 0 else if (rsrc->flags & IORESOURCE_MEM) offset = hose->pci_mem_offset; #endif *start = rsrc->start - offset; *end = rsrc->end - offset; } /** * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree * @hose: newly allocated pci_controller to be setup * @dev: device node of the host bridge * @primary: set if primary bus (32 bits only, soon to be deprecated) * * This function will parse the "ranges" property of a PCI host bridge device * node and setup the resource mapping of a pci controller based on its * content. * * Life would be boring if it wasn't for a few issues that we have to deal * with here: * * - We can only cope with one IO space range and up to 3 Memory space * ranges. However, some machines (thanks Apple !) tend to split their * space into lots of small contiguous ranges. So we have to coalesce. * * - We can only cope with all memory ranges having the same offset * between CPU addresses and PCI addresses. Unfortunately, some bridges * are setup for a large 1:1 mapping along with a small "window" which * maps PCI address 0 to some arbitrary high address of the CPU space in * order to give access to the ISA memory hole. * The way out of here that I've chosen for now is to always set the * offset based on the first resource found, then override it if we * have a different offset and the previous was set by an ISA hole. * * - Some busses have IO space not starting at 0, which causes trouble with * the way we do our IO resource renumbering. The code somewhat deals with * it for 64 bits but I would expect problems on 32 bits. * * - Some 32 bits platforms such as 4xx can have physical space larger than * 32 bits so we need to use 64 bits values for the parsing */ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, struct device_node *dev, int primary) { const u32 *ranges; int rlen; int pna = of_n_addr_cells(dev); int np = pna + 5; int memno = 0, isa_hole = -1; u32 pci_space; unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size; unsigned long long isa_mb = 0; struct resource *res; printk(KERN_INFO "PCI host bridge %s %s ranges:\n", dev->full_name, primary ? "(primary)" : ""); /* Get ranges property */ ranges = of_get_property(dev, "ranges", &rlen); if (ranges == NULL) return; /* Parse it */ while ((rlen -= np * 4) >= 0) { /* Read next ranges element */ pci_space = ranges[0]; pci_addr = of_read_number(ranges + 1, 2); cpu_addr = of_translate_address(dev, ranges + 3); size = of_read_number(ranges + pna + 3, 2); ranges += np; /* If we failed translation or got a zero-sized region * (some FW try to feed us with non sensical zero sized regions * such as power3 which look like some kind of attempt at exposing * the VGA memory hole) */ if (cpu_addr == OF_BAD_ADDR || size == 0) continue; /* Now consume following elements while they are contiguous */ for (; rlen >= np * sizeof(u32); ranges += np, rlen -= np * 4) { if (ranges[0] != pci_space) break; pci_next = of_read_number(ranges + 1, 2); cpu_next = of_translate_address(dev, ranges + 3); if (pci_next != pci_addr + size || cpu_next != cpu_addr + size) break; size += of_read_number(ranges + pna + 3, 2); } /* Act based on address space type */ res = NULL; switch ((pci_space >> 24) & 0x3) { case 1: /* PCI IO space */ printk(KERN_INFO " IO 0x%016llx..0x%016llx -> 0x%016llx\n", cpu_addr, cpu_addr + size - 1, pci_addr); /* We support only one IO range */ if (hose->pci_io_size) { printk(KERN_INFO " \\--> Skipped (too many) !\n"); continue; } #ifdef CONFIG_PPC32 /* On 32 bits, limit I/O space to 16MB */ if (size > 0x01000000) size = 0x01000000; /* 32 bits needs to map IOs here */ hose->io_base_virt = ioremap(cpu_addr, size); /* Expect trouble if pci_addr is not 0 */ if (primary) isa_io_base = (unsigned long)hose->io_base_virt; #endif /* CONFIG_PPC32 */ /* pci_io_size and io_base_phys always represent IO * space starting at 0 so we factor in pci_addr */ hose->pci_io_size = pci_addr + size; hose->io_base_phys = cpu_addr - pci_addr; /* Build resource */ res = &hose->io_resource; res->flags = IORESOURCE_IO; res->start = pci_addr; break; case 2: /* PCI Memory space */ case 3: /* PCI 64 bits Memory space */ printk(KERN_INFO " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n", cpu_addr, cpu_addr + size - 1, pci_addr, (pci_space & 0x40000000) ? "Prefetch" : ""); /* We support only 3 memory ranges */ if (memno >= 3) { printk(KERN_INFO " \\--> Skipped (too many) !\n"); continue; } /* Handles ISA memory hole space here */ if (pci_addr == 0) { isa_mb = cpu_addr; isa_hole = memno; if (primary || isa_mem_base == 0) isa_mem_base = cpu_addr; hose->isa_mem_phys = cpu_addr; hose->isa_mem_size = size; } /* We get the PCI/Mem offset from the first range or * the, current one if the offset came from an ISA * hole. If they don't match, bugger. */ if (memno == 0 || (isa_hole >= 0 && pci_addr != 0 && hose->pci_mem_offset == isa_mb)) hose->pci_mem_offset = cpu_addr - pci_addr; else if (pci_addr != 0 && hose->pci_mem_offset != cpu_addr - pci_addr) { printk(KERN_INFO " \\--> Skipped (offset mismatch) !\n"); continue; } /* Build resource */ res = &hose->mem_resources[memno++]; res->flags = IORESOURCE_MEM; if (pci_space & 0x40000000) res->flags |= IORESOURCE_PREFETCH; res->start = cpu_addr; break; } if (res != NULL) { res->name = dev->full_name; res->end = res->start + size - 1; res->parent = NULL; res->sibling = NULL; res->child = NULL; } } /* If there's an ISA hole and the pci_mem_offset is -not- matching * the ISA hole offset, then we need to remove the ISA hole from * the resource list for that brige */ if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) { unsigned int next = isa_hole + 1; printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb); if (next < memno) memmove(&hose->mem_resources[isa_hole], &hose->mem_resources[next], sizeof(struct resource) * (memno - next)); hose->mem_resources[--memno].flags = 0; } } /* Decide whether to display the domain number in /proc */ int pci_proc_domain(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS)) return 0; if (pci_has_flag(PCI_COMPAT_DOMAIN_0)) return hose->global_number != 0; return 1; } /* This header fixup will do the resource fixup for all devices as they are * probed, but not for bridge ranges */ static void __devinit pcibios_fixup_resources(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); int i; if (!hose) { printk(KERN_ERR "No host bridge for PCI dev %s !\n", pci_name(dev)); return; } for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { struct resource *res = dev->resource + i; if (!res->flags) continue; /* If we're going to re-assign everything, we mark all resources * as unset (and 0-base them). In addition, we mark BARs starting * at 0 as unset as well, except if PCI_PROBE_ONLY is also set * since in that case, we don't want to re-assign anything */ if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) || (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) { /* Only print message if not re-assigning */ if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] " "is unassigned\n", pci_name(dev), i, (unsigned long long)res->start, (unsigned long long)res->end, (unsigned int)res->flags); res->end -= res->start; res->start = 0; res->flags |= IORESOURCE_UNSET; continue; } pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n", pci_name(dev), i, (unsigned long long)res->start,\ (unsigned long long)res->end, (unsigned int)res->flags); } /* Call machine specific resource fixup */ if (ppc_md.pcibios_fixup_resources) ppc_md.pcibios_fixup_resources(dev); } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); /* This function tries to figure out if a bridge resource has been initialized * by the firmware or not. It doesn't have to be absolutely bullet proof, but * things go more smoothly when it gets it right. It should covers cases such * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges */ static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus, struct resource *res) { struct pci_controller *hose = pci_bus_to_host(bus); struct pci_dev *dev = bus->self; resource_size_t offset; u16 command; int i; /* We don't do anything if PCI_PROBE_ONLY is set */ if (pci_has_flag(PCI_PROBE_ONLY)) return 0; /* Job is a bit different between memory and IO */ if (res->flags & IORESOURCE_MEM) { /* If the BAR is non-0 (res != pci_mem_offset) then it's probably been * initialized by somebody */ if (res->start != hose->pci_mem_offset) return 0; /* The BAR is 0, let's check if memory decoding is enabled on * the bridge. If not, we consider it unassigned */ pci_read_config_word(dev, PCI_COMMAND, &command); if ((command & PCI_COMMAND_MEMORY) == 0) return 1; /* Memory decoding is enabled and the BAR is 0. If any of the bridge * resources covers that starting address (0 then it's good enough for * us for memory */ for (i = 0; i < 3; i++) { if ((hose->mem_resources[i].flags & IORESOURCE_MEM) && hose->mem_resources[i].start == hose->pci_mem_offset) return 0; } /* Well, it starts at 0 and we know it will collide so we may as * well consider it as unassigned. That covers the Apple case. */ return 1; } else { /* If the BAR is non-0, then we consider it assigned */ offset = (unsigned long)hose->io_base_virt - _IO_BASE; if (((res->start - offset) & 0xfffffffful) != 0) return 0; /* Here, we are a bit different than memory as typically IO space * starting at low addresses -is- valid. What we do instead if that * we consider as unassigned anything that doesn't have IO enabled * in the PCI command register, and that's it. */ pci_read_config_word(dev, PCI_COMMAND, &command); if (command & PCI_COMMAND_IO) return 0; /* It's starting at 0 and IO is disabled in the bridge, consider * it unassigned */ return 1; } } /* Fixup resources of a PCI<->PCI bridge */ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus) { struct resource *res; int i; struct pci_dev *dev = bus->self; pci_bus_for_each_resource(bus, res, i) { if (!res || !res->flags) continue; if (i >= 3 && bus->self->transparent) continue; /* If we are going to re-assign everything, mark the resource * as unset and move it down to 0 */ if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) { res->flags |= IORESOURCE_UNSET; res->end -= res->start; res->start = 0; continue; } pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x]\n", pci_name(dev), i, (unsigned long long)res->start,\ (unsigned long long)res->end, (unsigned int)res->flags); /* Try to detect uninitialized P2P bridge resources, * and clear them out so they get re-assigned later */ if (pcibios_uninitialized_bridge_resource(bus, res)) { res->flags = 0; pr_debug("PCI:%s (unassigned)\n", pci_name(dev)); } } } void __devinit pcibios_setup_bus_self(struct pci_bus *bus) { /* Fix up the bus resources for P2P bridges */ if (bus->self != NULL) pcibios_fixup_bridge(bus); /* Platform specific bus fixups. This is currently only used * by fsl_pci and I'm hoping to get rid of it at some point */ if (ppc_md.pcibios_fixup_bus) ppc_md.pcibios_fixup_bus(bus); /* Setup bus DMA mappings */ if (ppc_md.pci_dma_bus_setup) ppc_md.pci_dma_bus_setup(bus); } void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) { struct pci_dev *dev; pr_debug("PCI: Fixup bus devices %d (%s)\n", bus->number, bus->self ? pci_name(bus->self) : "PHB"); list_for_each_entry(dev, &bus->devices, bus_list) { /* Cardbus can call us to add new devices to a bus, so ignore * those who are already fully discovered */ if (dev->is_added) continue; /* Fixup NUMA node as it may not be setup yet by the generic * code and is needed by the DMA init */ set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); /* Hook up default DMA ops */ set_dma_ops(&dev->dev, pci_dma_ops); set_dma_offset(&dev->dev, PCI_DRAM_OFFSET); /* Additional platform DMA/iommu setup */ if (ppc_md.pci_dma_dev_setup) ppc_md.pci_dma_dev_setup(dev); /* Read default IRQs and fixup if necessary */ pci_read_irq_line(dev); if (ppc_md.pci_irq_fixup) ppc_md.pci_irq_fixup(dev); } } void pcibios_set_master(struct pci_dev *dev) { /* No special bus mastering setup handling */ } void __devinit pcibios_fixup_bus(struct pci_bus *bus) { /* When called from the generic PCI probe, read PCI<->PCI bridge * bases. This is -not- called when generating the PCI tree from * the OF device-tree. */ if (bus->self != NULL) pci_read_bridge_bases(bus); /* Now fixup the bus bus */ pcibios_setup_bus_self(bus); /* Now fixup devices on that bus */ pcibios_setup_bus_devices(bus); } EXPORT_SYMBOL(pcibios_fixup_bus); void __devinit pci_fixup_cardbus(struct pci_bus *bus) { /* Now fixup devices on that bus */ pcibios_setup_bus_devices(bus); } static int skip_isa_ioresource_align(struct pci_dev *dev) { if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) && !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA)) return 1; return 0; } /* * We need to avoid collisions with `mirrored' VGA ports * and other strange ISA hardware, so we always want the * addresses to be allocated in the 0x000-0x0ff region * modulo 0x400. * * Why? Because some silly external IO cards only decode * the low 10 bits of the IO address. The 0x00-0xff region * is reserved for motherboard devices that decode all 16 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, * but we want to try to avoid allocating at 0x2900-0x2bff * which might have be mirrored at 0x0100-0x03ff.. */ resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pci_dev *dev = data; resource_size_t start = res->start; if (res->flags & IORESOURCE_IO) { if (skip_isa_ioresource_align(dev)) return start; if (start & 0x300) start = (start + 0x3ff) & ~0x3ff; } return start; } EXPORT_SYMBOL(pcibios_align_resource); /* * Reparent resource children of pr that conflict with res * under res, and make res replace those children. */ static int reparent_resources(struct resource *parent, struct resource *res) { struct resource *p, **pp; struct resource **firstpp = NULL; for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) { if (p->end < res->start) continue; if (res->end < p->start) break; if (p->start < res->start || p->end > res->end) return -1; /* not completely contained */ if (firstpp == NULL) firstpp = pp; } if (firstpp == NULL) return -1; /* didn't find any conflicting entries? */ res->parent = parent; res->child = *firstpp; res->sibling = *pp; *firstpp = res; *pp = NULL; for (p = res->child; p != NULL; p = p->sibling) { p->parent = res; pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n", p->name, (unsigned long long)p->start, (unsigned long long)p->end, res->name); } return 0; } /* * Handle resources of PCI devices. If the world were perfect, we could * just allocate all the resource regions and do nothing more. It isn't. * On the other hand, we cannot just re-allocate all devices, as it would * require us to know lots of host bridge internals. So we attempt to * keep as much of the original configuration as possible, but tweak it * when it's found to be wrong. * * Known BIOS problems we have to work around: * - I/O or memory regions not configured * - regions configured, but not enabled in the command register * - bogus I/O addresses above 64K used * - expansion ROMs left enabled (this may sound harmless, but given * the fact the PCI specs explicitly allow address decoders to be * shared between expansion ROMs and other resource regions, it's * at least dangerous) * * Our solution: * (1) Allocate resources for all buses behind PCI-to-PCI bridges. * This gives us fixed barriers on where we can allocate. * (2) Allocate resources for all enabled devices. If there is * a collision, just mark the resource as unallocated. Also * disable expansion ROMs during this step. * (3) Try to allocate resources for disabled devices. If the * resources were assigned correctly, everything goes well, * if they weren't, they won't disturb allocation of other * resources. * (4) Assign new addresses to resources which were either * not configured at all or misconfigured. If explicitly * requested by the user, configure expansion ROM address * as well. */ void pcibios_allocate_bus_resources(struct pci_bus *bus) { struct pci_bus *b; int i; struct resource *res, *pr; pr_debug("PCI: Allocating bus resources for %04x:%02x...\n", pci_domain_nr(bus), bus->number); pci_bus_for_each_resource(bus, res, i) { if (!res || !res->flags || res->start > res->end || res->parent) continue; /* If the resource was left unset at this point, we clear it */ if (res->flags & IORESOURCE_UNSET) goto clear_resource; if (bus->parent == NULL) pr = (res->flags & IORESOURCE_IO) ? &ioport_resource : &iomem_resource; else { pr = pci_find_parent_resource(bus->self, res); if (pr == res) { /* this happens when the generic PCI * code (wrongly) decides that this * bridge is transparent -- paulus */ continue; } } pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx " "[0x%x], parent %p (%s)\n", bus->self ? pci_name(bus->self) : "PHB", bus->number, i, (unsigned long long)res->start, (unsigned long long)res->end, (unsigned int)res->flags, pr, (pr && pr->name) ? pr->name : "nil"); if (pr && !(pr->flags & IORESOURCE_UNSET)) { if (request_resource(pr, res) == 0) continue; /* * Must be a conflict with an existing entry. * Move that entry (or entries) under the * bridge resource and try again. */ if (reparent_resources(pr, res) == 0) continue; } pr_warning("PCI: Cannot allocate resource region " "%d of PCI bridge %d, will remap\n", i, bus->number); clear_resource: res->start = res->end = 0; res->flags = 0; } list_for_each_entry(b, &bus->children, node) pcibios_allocate_bus_resources(b); } static inline void __devinit alloc_resource(struct pci_dev *dev, int idx) { struct resource *pr, *r = &dev->resource[idx]; pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n", pci_name(dev), idx, (unsigned long long)r->start, (unsigned long long)r->end, (unsigned int)r->flags); pr = pci_find_parent_resource(dev, r); if (!pr || (pr->flags & IORESOURCE_UNSET) || request_resource(pr, r) < 0) { printk(KERN_WARNING "PCI: Cannot allocate resource region %d" " of device %s, will remap\n", idx, pci_name(dev)); if (pr) pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n", pr, (unsigned long long)pr->start, (unsigned long long)pr->end, (unsigned int)pr->flags); /* We'll assign a new address later */ r->flags |= IORESOURCE_UNSET; r->end -= r->start; r->start = 0; } } static void __init pcibios_allocate_resources(int pass) { struct pci_dev *dev = NULL; int idx, disabled; u16 command; struct resource *r; for_each_pci_dev(dev) { pci_read_config_word(dev, PCI_COMMAND, &command); for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { r = &dev->resource[idx]; if (r->parent) /* Already allocated */ continue; if (!r->flags || (r->flags & IORESOURCE_UNSET)) continue; /* Not assigned at all */ /* We only allocate ROMs on pass 1 just in case they * have been screwed up by firmware */ if (idx == PCI_ROM_RESOURCE ) disabled = 1; if (r->flags & IORESOURCE_IO) disabled = !(command & PCI_COMMAND_IO); else disabled = !(command & PCI_COMMAND_MEMORY); if (pass == disabled) alloc_resource(dev, idx); } if (pass) continue; r = &dev->resource[PCI_ROM_RESOURCE]; if (r->flags) { /* Turn the ROM off, leave the resource region, * but keep it unregistered. */ u32 reg; pci_read_config_dword(dev, dev->rom_base_reg, &reg); if (reg & PCI_ROM_ADDRESS_ENABLE) { pr_debug("PCI: Switching off ROM of %s\n", pci_name(dev)); r->flags &= ~IORESOURCE_ROM_ENABLE; pci_write_config_dword(dev, dev->rom_base_reg, reg & ~PCI_ROM_ADDRESS_ENABLE); } } } } static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); resource_size_t offset; struct resource *res, *pres; int i; pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus)); /* Check for IO */ if (!(hose->io_resource.flags & IORESOURCE_IO)) goto no_io; offset = (unsigned long)hose->io_base_virt - _IO_BASE; res = kzalloc(sizeof(struct resource), GFP_KERNEL); BUG_ON(res == NULL); res->name = "Legacy IO"; res->flags = IORESOURCE_IO; res->start = offset; res->end = (offset + 0xfff) & 0xfffffffful; pr_debug("Candidate legacy IO: %pR\n", res); if (request_resource(&hose->io_resource, res)) { printk(KERN_DEBUG "PCI %04x:%02x Cannot reserve Legacy IO %pR\n", pci_domain_nr(bus), bus->number, res); kfree(res); } no_io: /* Check for memory */ offset = hose->pci_mem_offset; pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset); for (i = 0; i < 3; i++) { pres = &hose->mem_resources[i]; if (!(pres->flags & IORESOURCE_MEM)) continue; pr_debug("hose mem res: %pR\n", pres); if ((pres->start - offset) <= 0xa0000 && (pres->end - offset) >= 0xbffff) break; } if (i >= 3) return; res = kzalloc(sizeof(struct resource), GFP_KERNEL); BUG_ON(res == NULL); res->name = "Legacy VGA memory"; res->flags = IORESOURCE_MEM; res->start = 0xa0000 + offset; res->end = 0xbffff + offset; pr_debug("Candidate VGA memory: %pR\n", res); if (request_resource(pres, res)) { printk(KERN_DEBUG "PCI %04x:%02x Cannot reserve VGA memory %pR\n", pci_domain_nr(bus), bus->number, res); kfree(res); } } void __init pcibios_resource_survey(void) { struct pci_bus *b; /* Allocate and assign resources */ list_for_each_entry(b, &pci_root_buses, node) pcibios_allocate_bus_resources(b); pcibios_allocate_resources(0); pcibios_allocate_resources(1); /* Before we start assigning unassigned resource, we try to reserve * the low IO area and the VGA memory area if they intersect the * bus available resources to avoid allocating things on top of them */ if (!pci_has_flag(PCI_PROBE_ONLY)) { list_for_each_entry(b, &pci_root_buses, node) pcibios_reserve_legacy_regions(b); } /* Now, if the platform didn't decide to blindly trust the firmware, * we proceed to assigning things that were left unassigned */ if (!pci_has_flag(PCI_PROBE_ONLY)) { pr_debug("PCI: Assigning unassigned resources...\n"); pci_assign_unassigned_resources(); } /* Call machine dependent fixup */ if (ppc_md.pcibios_fixup) ppc_md.pcibios_fixup(); } #ifdef CONFIG_HOTPLUG /* This is used by the PCI hotplug driver to allocate resource * of newly plugged busses. We can try to consolidate with the * rest of the code later, for now, keep it as-is as our main * resource allocation function doesn't deal with sub-trees yet. */ void pcibios_claim_one_bus(struct pci_bus *bus) { struct pci_dev *dev; struct pci_bus *child_bus; list_for_each_entry(dev, &bus->devices, bus_list) { int i; for (i = 0; i < PCI_NUM_RESOURCES; i++) { struct resource *r = &dev->resource[i]; if (r->parent || !r->start || !r->flags) continue; pr_debug("PCI: Claiming %s: " "Resource %d: %016llx..%016llx [%x]\n", pci_name(dev), i, (unsigned long long)r->start, (unsigned long long)r->end, (unsigned int)r->flags); pci_claim_resource(dev, i); } } list_for_each_entry(child_bus, &bus->children, node) pcibios_claim_one_bus(child_bus); } /* pcibios_finish_adding_to_bus * * This is to be called by the hotplug code after devices have been * added to a bus, this include calling it for a PHB that is just * being added */ void pcibios_finish_adding_to_bus(struct pci_bus *bus) { pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n", pci_domain_nr(bus), bus->number); /* Allocate bus and devices resources */ pcibios_allocate_bus_resources(bus); pcibios_claim_one_bus(bus); /* Add new devices to global lists. Register in proc, sysfs. */ pci_bus_add_devices(bus); /* Fixup EEH */ eeh_add_device_tree_late(bus); } EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus); #endif /* CONFIG_HOTPLUG */ int pcibios_enable_device(struct pci_dev *dev, int mask) { if (ppc_md.pcibios_enable_device_hook) if (ppc_md.pcibios_enable_device_hook(dev)) return -EINVAL; return pci_enable_resources(dev, mask); } resource_size_t pcibios_io_space_offset(struct pci_controller *hose) { return (unsigned long) hose->io_base_virt - _IO_BASE; } static void __devinit pcibios_setup_phb_resources(struct pci_controller *hose, struct list_head *resources) { struct resource *res; int i; /* Hookup PHB IO resource */ res = &hose->io_resource; if (!res->flags) { printk(KERN_WARNING "PCI: I/O resource not set for host" " bridge %s (domain %d)\n", hose->dn->full_name, hose->global_number); #ifdef CONFIG_PPC32 /* Workaround for lack of IO resource only on 32-bit */ res->start = (unsigned long)hose->io_base_virt - isa_io_base; res->end = res->start + IO_SPACE_LIMIT; res->flags = IORESOURCE_IO; #endif /* CONFIG_PPC32 */ } pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n", (unsigned long long)res->start, (unsigned long long)res->end, (unsigned long)res->flags); pci_add_resource_offset(resources, res, pcibios_io_space_offset(hose)); /* Hookup PHB Memory resources */ for (i = 0; i < 3; ++i) { res = &hose->mem_resources[i]; if (!res->flags) { if (i > 0) continue; printk(KERN_ERR "PCI: Memory resource 0 not set for " "host bridge %s (domain %d)\n", hose->dn->full_name, hose->global_number); #ifdef CONFIG_PPC32 /* Workaround for lack of MEM resource only on 32-bit */ res->start = hose->pci_mem_offset; res->end = (resource_size_t)-1LL; res->flags = IORESOURCE_MEM; #endif /* CONFIG_PPC32 */ } pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", i, (unsigned long long)res->start, (unsigned long long)res->end, (unsigned long)res->flags); pci_add_resource_offset(resources, res, hose->pci_mem_offset); } pr_debug("PCI: PHB MEM offset = %016llx\n", (unsigned long long)hose->pci_mem_offset); pr_debug("PCI: PHB IO offset = %08lx\n", (unsigned long)hose->io_base_virt - _IO_BASE); } /* * Null PCI config access functions, for the case when we can't * find a hose. */ #define NULL_PCI_OP(rw, size, type) \ static int \ null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \ { \ return PCIBIOS_DEVICE_NOT_FOUND; \ } static int null_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { return PCIBIOS_DEVICE_NOT_FOUND; } static int null_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { return PCIBIOS_DEVICE_NOT_FOUND; } static struct pci_ops null_pci_ops = { .read = null_read_config, .write = null_write_config, }; /* * These functions are used early on before PCI scanning is done * and all of the pci_dev and pci_bus structures have been created. */ static struct pci_bus * fake_pci_bus(struct pci_controller *hose, int busnr) { static struct pci_bus bus; if (hose == 0) { printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr); } bus.number = busnr; bus.sysdata = hose; bus.ops = hose? hose->ops: &null_pci_ops; return &bus; } #define EARLY_PCI_OP(rw, size, type) \ int early_##rw##_config_##size(struct pci_controller *hose, int bus, \ int devfn, int offset, type value) \ { \ return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \ devfn, offset, value); \ } EARLY_PCI_OP(read, byte, u8 *) EARLY_PCI_OP(read, word, u16 *) EARLY_PCI_OP(read, dword, u32 *) EARLY_PCI_OP(write, byte, u8) EARLY_PCI_OP(write, word, u16) EARLY_PCI_OP(write, dword, u32) extern int pci_bus_find_capability (struct pci_bus *bus, unsigned int devfn, int cap); int early_find_capability(struct pci_controller *hose, int bus, int devfn, int cap) { return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap); } struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) { struct pci_controller *hose = bus->sysdata; return of_node_get(hose->dn); } /** * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus * @hose: Pointer to the PCI host controller instance structure */ void __devinit pcibios_scan_phb(struct pci_controller *hose) { LIST_HEAD(resources); struct pci_bus *bus; struct device_node *node = hose->dn; int mode; pr_debug("PCI: Scanning PHB %s\n", node ? node->full_name : "<NO NAME>"); /* Get some IO space for the new PHB */ pcibios_setup_phb_io_space(hose); /* Wire up PHB bus resources */ pcibios_setup_phb_resources(hose, &resources); /* Create an empty bus for the toplevel */ bus = pci_create_root_bus(hose->parent, hose->first_busno, hose->ops, hose, &resources); if (bus == NULL) { pr_err("Failed to create bus for PCI domain %04x\n", hose->global_number); pci_free_resource_list(&resources); return; } bus->secondary = hose->first_busno; hose->bus = bus; /* Get probe mode and perform scan */ mode = PCI_PROBE_NORMAL; if (node && ppc_md.pci_probe_mode) mode = ppc_md.pci_probe_mode(bus); pr_debug(" probe mode: %d\n", mode); if (mode == PCI_PROBE_DEVTREE) { bus->subordinate = hose->last_busno; of_scan_bus(node, bus); } if (mode == PCI_PROBE_NORMAL) hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); /* Platform gets a chance to do some global fixups before * we proceed to resource allocation */ if (ppc_md.pcibios_fixup_phb) ppc_md.pcibios_fixup_phb(hose); /* Configure PCI Express settings */ if (bus && !pci_has_flag(PCI_PROBE_ONLY)) { struct pci_bus *child; list_for_each_entry(child, &bus->children, node) { struct pci_dev *self = child->self; if (!self) continue; pcie_bus_configure_settings(child, self->pcie_mpss); } } } static void fixup_hide_host_resource_fsl(struct pci_dev *dev) { int i, class = dev->class >> 8; /* When configured as agent, programing interface = 1 */ int prog_if = dev->class & 0xf; if ((class == PCI_CLASS_PROCESSOR_POWERPC || class == PCI_CLASS_BRIDGE_OTHER) && (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) && (prog_if == 0) && (dev->bus->parent == NULL)) { for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { dev->resource[i].start = 0; dev->resource[i].end = 0; dev->resource[i].flags = 0; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
gpl-2.0
zarboz/Butterfly_s_beastmode
drivers/net/irda/bfin_sir.c
4958
18780
/* * Blackfin Infra-red Driver * * Copyright 2006-2009 Analog Devices Inc. * * Enter bugs at http://blackfin.uclinux.org/ * * Licensed under the GPL-2 or later. * */ #include "bfin_sir.h" #ifdef CONFIG_SIR_BFIN_DMA #define DMA_SIR_RX_XCNT 10 #define DMA_SIR_RX_YCNT (PAGE_SIZE / DMA_SIR_RX_XCNT) #define DMA_SIR_RX_FLUSH_JIFS (HZ * 4 / 250) #endif #if ANOMALY_05000447 static int max_rate = 57600; #else static int max_rate = 115200; #endif static void turnaround_delay(unsigned long last_jif, int mtt) { long ticks; mtt = mtt < 10000 ? 10000 : mtt; ticks = 1 + mtt / (USEC_PER_SEC / HZ); schedule_timeout_uninterruptible(ticks); } static void __devinit bfin_sir_init_ports(struct bfin_sir_port *sp, struct platform_device *pdev) { int i; struct resource *res; for (i = 0; i < pdev->num_resources; i++) { res = &pdev->resource[i]; switch (res->flags) { case IORESOURCE_MEM: sp->membase = (void __iomem *)res->start; break; case IORESOURCE_IRQ: sp->irq = res->start; break; case IORESOURCE_DMA: sp->rx_dma_channel = res->start; sp->tx_dma_channel = res->end; break; default: break; } } sp->clk = get_sclk(); #ifdef CONFIG_SIR_BFIN_DMA sp->tx_done = 1; init_timer(&(sp->rx_dma_timer)); #endif } static void bfin_sir_stop_tx(struct bfin_sir_port *port) { #ifdef CONFIG_SIR_BFIN_DMA disable_dma(port->tx_dma_channel); #endif while (!(UART_GET_LSR(port) & THRE)) { cpu_relax(); continue; } UART_CLEAR_IER(port, ETBEI); } static void bfin_sir_enable_tx(struct bfin_sir_port *port) { UART_SET_IER(port, ETBEI); } static void bfin_sir_stop_rx(struct bfin_sir_port *port) { UART_CLEAR_IER(port, ERBFI); } static void bfin_sir_enable_rx(struct bfin_sir_port *port) { UART_SET_IER(port, ERBFI); } static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed) { int ret = -EINVAL; unsigned int quot; unsigned short val, lsr, lcr; static int utime; int count = 10; lcr = WLS(8); switch (speed) { case 9600: case 19200: case 38400: case 57600: case 115200: /* * IRDA is not affected by anomaly 05000230, so there is no * need to tweak the divisor like he UART driver (which will * slightly speed up the baud rate on us). */ quot = (port->clk + (8 * speed)) / (16 * speed); do { udelay(utime); lsr = UART_GET_LSR(port); } while (!(lsr & TEMT) && count--); /* The useconds for 1 bits to transmit */ utime = 1000000 / speed + 1; /* Clear UCEN bit to reset the UART state machine * and control registers */ val = UART_GET_GCTL(port); val &= ~UCEN; UART_PUT_GCTL(port, val); /* Set DLAB in LCR to Access THR RBR IER */ UART_SET_DLAB(port); SSYNC(); UART_PUT_DLL(port, quot & 0xFF); UART_PUT_DLH(port, (quot >> 8) & 0xFF); SSYNC(); /* Clear DLAB in LCR */ UART_CLEAR_DLAB(port); SSYNC(); UART_PUT_LCR(port, lcr); val = UART_GET_GCTL(port); val |= UCEN; UART_PUT_GCTL(port, val); ret = 0; break; default: printk(KERN_WARNING "bfin_sir: Invalid speed %d\n", speed); break; } val = UART_GET_GCTL(port); /* If not add the 'RPOLC', we can't catch the receive interrupt. * It's related with the HW layout and the IR transiver. */ val |= IREN | RPOLC; UART_PUT_GCTL(port, val); return ret; } static int bfin_sir_is_receiving(struct net_device *dev) { struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; if (!(UART_GET_IER(port) & ERBFI)) return 0; return self->rx_buff.state != OUTSIDE_FRAME; } #ifdef CONFIG_SIR_BFIN_PIO static void bfin_sir_tx_chars(struct net_device *dev) { unsigned int chr; struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; if (self->tx_buff.len != 0) { chr = *(self->tx_buff.data); UART_PUT_CHAR(port, chr); self->tx_buff.data++; self->tx_buff.len--; } else { self->stats.tx_packets++; self->stats.tx_bytes += self->tx_buff.data - self->tx_buff.head; if (self->newspeed) { bfin_sir_set_speed(port, self->newspeed); self->speed = self->newspeed; self->newspeed = 0; } bfin_sir_stop_tx(port); bfin_sir_enable_rx(port); /* I'm hungry! */ netif_wake_queue(dev); } } static void bfin_sir_rx_chars(struct net_device *dev) { struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; unsigned char ch; UART_CLEAR_LSR(port); ch = UART_GET_CHAR(port); async_unwrap_char(dev, &self->stats, &self->rx_buff, ch); dev->last_rx = jiffies; } static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; spin_lock(&self->lock); while ((UART_GET_LSR(port) & DR)) bfin_sir_rx_chars(dev); spin_unlock(&self->lock); return IRQ_HANDLED; } static irqreturn_t bfin_sir_tx_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; spin_lock(&self->lock); if (UART_GET_LSR(port) & THRE) bfin_sir_tx_chars(dev); spin_unlock(&self->lock); return IRQ_HANDLED; } #endif /* CONFIG_SIR_BFIN_PIO */ #ifdef CONFIG_SIR_BFIN_DMA static void bfin_sir_dma_tx_chars(struct net_device *dev) { struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; if (!port->tx_done) return; port->tx_done = 0; if (self->tx_buff.len == 0) { self->stats.tx_packets++; if (self->newspeed) { bfin_sir_set_speed(port, self->newspeed); self->speed = self->newspeed; self->newspeed = 0; } bfin_sir_enable_rx(port); port->tx_done = 1; netif_wake_queue(dev); return; } blackfin_dcache_flush_range((unsigned long)(self->tx_buff.data), (unsigned long)(self->tx_buff.data+self->tx_buff.len)); set_dma_config(port->tx_dma_channel, set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP, INTR_ON_BUF, DIMENSION_LINEAR, DATA_SIZE_8, DMA_SYNC_RESTART)); set_dma_start_addr(port->tx_dma_channel, (unsigned long)(self->tx_buff.data)); set_dma_x_count(port->tx_dma_channel, self->tx_buff.len); set_dma_x_modify(port->tx_dma_channel, 1); enable_dma(port->tx_dma_channel); } static irqreturn_t bfin_sir_dma_tx_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; spin_lock(&self->lock); if (!(get_dma_curr_irqstat(port->tx_dma_channel) & DMA_RUN)) { clear_dma_irqstat(port->tx_dma_channel); bfin_sir_stop_tx(port); self->stats.tx_packets++; self->stats.tx_bytes += self->tx_buff.len; self->tx_buff.len = 0; if (self->newspeed) { bfin_sir_set_speed(port, self->newspeed); self->speed = self->newspeed; self->newspeed = 0; } bfin_sir_enable_rx(port); /* I'm hungry! */ netif_wake_queue(dev); port->tx_done = 1; } spin_unlock(&self->lock); return IRQ_HANDLED; } static void bfin_sir_dma_rx_chars(struct net_device *dev) { struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; int i; UART_CLEAR_LSR(port); for (i = port->rx_dma_buf.head; i < port->rx_dma_buf.tail; i++) async_unwrap_char(dev, &self->stats, &self->rx_buff, port->rx_dma_buf.buf[i]); } void bfin_sir_rx_dma_timeout(struct net_device *dev) { struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; int x_pos, pos; unsigned long flags; spin_lock_irqsave(&self->lock, flags); x_pos = DMA_SIR_RX_XCNT - get_dma_curr_xcount(port->rx_dma_channel); if (x_pos == DMA_SIR_RX_XCNT) x_pos = 0; pos = port->rx_dma_nrows * DMA_SIR_RX_XCNT + x_pos; if (pos > port->rx_dma_buf.tail) { port->rx_dma_buf.tail = pos; bfin_sir_dma_rx_chars(dev); port->rx_dma_buf.head = port->rx_dma_buf.tail; } spin_unlock_irqrestore(&self->lock, flags); } static irqreturn_t bfin_sir_dma_rx_int(int irq, void *dev_id) { struct net_device *dev = dev_id; struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; unsigned short irqstat; spin_lock(&self->lock); port->rx_dma_nrows++; port->rx_dma_buf.tail = DMA_SIR_RX_XCNT * port->rx_dma_nrows; bfin_sir_dma_rx_chars(dev); if (port->rx_dma_nrows >= DMA_SIR_RX_YCNT) { port->rx_dma_nrows = 0; port->rx_dma_buf.tail = 0; } port->rx_dma_buf.head = port->rx_dma_buf.tail; irqstat = get_dma_curr_irqstat(port->rx_dma_channel); clear_dma_irqstat(port->rx_dma_channel); spin_unlock(&self->lock); mod_timer(&port->rx_dma_timer, jiffies + DMA_SIR_RX_FLUSH_JIFS); return IRQ_HANDLED; } #endif /* CONFIG_SIR_BFIN_DMA */ static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev) { #ifdef CONFIG_SIR_BFIN_DMA dma_addr_t dma_handle; #endif /* CONFIG_SIR_BFIN_DMA */ if (request_dma(port->rx_dma_channel, "BFIN_UART_RX") < 0) { dev_warn(&dev->dev, "Unable to attach SIR RX DMA channel\n"); return -EBUSY; } if (request_dma(port->tx_dma_channel, "BFIN_UART_TX") < 0) { dev_warn(&dev->dev, "Unable to attach SIR TX DMA channel\n"); free_dma(port->rx_dma_channel); return -EBUSY; } #ifdef CONFIG_SIR_BFIN_DMA set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev); set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev); port->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA); port->rx_dma_buf.head = 0; port->rx_dma_buf.tail = 0; port->rx_dma_nrows = 0; set_dma_config(port->rx_dma_channel, set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO, INTR_ON_ROW, DIMENSION_2D, DATA_SIZE_8, DMA_SYNC_RESTART)); set_dma_x_count(port->rx_dma_channel, DMA_SIR_RX_XCNT); set_dma_x_modify(port->rx_dma_channel, 1); set_dma_y_count(port->rx_dma_channel, DMA_SIR_RX_YCNT); set_dma_y_modify(port->rx_dma_channel, 1); set_dma_start_addr(port->rx_dma_channel, (unsigned long)port->rx_dma_buf.buf); enable_dma(port->rx_dma_channel); port->rx_dma_timer.data = (unsigned long)(dev); port->rx_dma_timer.function = (void *)bfin_sir_rx_dma_timeout; #else if (request_irq(port->irq, bfin_sir_rx_int, IRQF_DISABLED, "BFIN_SIR_RX", dev)) { dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n"); return -EBUSY; } if (request_irq(port->irq+1, bfin_sir_tx_int, IRQF_DISABLED, "BFIN_SIR_TX", dev)) { dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n"); free_irq(port->irq, dev); return -EBUSY; } #endif return 0; } static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev) { unsigned short val; bfin_sir_stop_rx(port); val = UART_GET_GCTL(port); val &= ~(UCEN | IREN | RPOLC); UART_PUT_GCTL(port, val); #ifdef CONFIG_SIR_BFIN_DMA disable_dma(port->tx_dma_channel); disable_dma(port->rx_dma_channel); del_timer(&(port->rx_dma_timer)); dma_free_coherent(NULL, PAGE_SIZE, port->rx_dma_buf.buf, 0); #else free_irq(port->irq+1, dev); free_irq(port->irq, dev); #endif free_dma(port->tx_dma_channel); free_dma(port->rx_dma_channel); } #ifdef CONFIG_PM static int bfin_sir_suspend(struct platform_device *pdev, pm_message_t state) { struct bfin_sir_port *sir_port; struct net_device *dev; struct bfin_sir_self *self; sir_port = platform_get_drvdata(pdev); if (!sir_port) return 0; dev = sir_port->dev; self = netdev_priv(dev); if (self->open) { flush_work(&self->work); bfin_sir_shutdown(self->sir_port, dev); netif_device_detach(dev); } return 0; } static int bfin_sir_resume(struct platform_device *pdev) { struct bfin_sir_port *sir_port; struct net_device *dev; struct bfin_sir_self *self; struct bfin_sir_port *port; sir_port = platform_get_drvdata(pdev); if (!sir_port) return 0; dev = sir_port->dev; self = netdev_priv(dev); port = self->sir_port; if (self->open) { if (self->newspeed) { self->speed = self->newspeed; self->newspeed = 0; } bfin_sir_startup(port, dev); bfin_sir_set_speed(port, 9600); bfin_sir_enable_rx(port); netif_device_attach(dev); } return 0; } #else #define bfin_sir_suspend NULL #define bfin_sir_resume NULL #endif static void bfin_sir_send_work(struct work_struct *work) { struct bfin_sir_self *self = container_of(work, struct bfin_sir_self, work); struct net_device *dev = self->sir_port->dev; struct bfin_sir_port *port = self->sir_port; unsigned short val; int tx_cnt = 10; while (bfin_sir_is_receiving(dev) && --tx_cnt) turnaround_delay(dev->last_rx, self->mtt); bfin_sir_stop_rx(port); /* To avoid losting RX interrupt, we reset IR function before * sending data. We also can set the speed, which will * reset all the UART. */ val = UART_GET_GCTL(port); val &= ~(IREN | RPOLC); UART_PUT_GCTL(port, val); SSYNC(); val |= IREN | RPOLC; UART_PUT_GCTL(port, val); SSYNC(); /* bfin_sir_set_speed(port, self->speed); */ #ifdef CONFIG_SIR_BFIN_DMA bfin_sir_dma_tx_chars(dev); #endif bfin_sir_enable_tx(port); dev->trans_start = jiffies; } static int bfin_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev) { struct bfin_sir_self *self = netdev_priv(dev); int speed = irda_get_next_speed(skb); netif_stop_queue(dev); self->mtt = irda_get_mtt(skb); if (speed != self->speed && speed != -1) self->newspeed = speed; self->tx_buff.data = self->tx_buff.head; if (skb->len == 0) self->tx_buff.len = 0; else self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, self->tx_buff.truesize); schedule_work(&self->work); dev_kfree_skb(skb); return 0; } static int bfin_sir_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd) { struct if_irda_req *rq = (struct if_irda_req *)ifreq; struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; int ret = 0; switch (cmd) { case SIOCSBANDWIDTH: if (capable(CAP_NET_ADMIN)) { if (self->open) { ret = bfin_sir_set_speed(port, rq->ifr_baudrate); bfin_sir_enable_rx(port); } else { dev_warn(&dev->dev, "SIOCSBANDWIDTH: !netif_running\n"); ret = 0; } } break; case SIOCSMEDIABUSY: ret = -EPERM; if (capable(CAP_NET_ADMIN)) { irda_device_set_media_busy(dev, TRUE); ret = 0; } break; case SIOCGRECEIVING: rq->ifr_receiving = bfin_sir_is_receiving(dev); break; default: ret = -EOPNOTSUPP; break; } return ret; } static struct net_device_stats *bfin_sir_stats(struct net_device *dev) { struct bfin_sir_self *self = netdev_priv(dev); return &self->stats; } static int bfin_sir_open(struct net_device *dev) { struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; int err = -ENOMEM; self->newspeed = 0; self->speed = 9600; spin_lock_init(&self->lock); err = bfin_sir_startup(port, dev); if (err) goto err_startup; bfin_sir_set_speed(port, 9600); self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME); if (!self->irlap) goto err_irlap; INIT_WORK(&self->work, bfin_sir_send_work); /* * Now enable the interrupt then start the queue */ self->open = 1; bfin_sir_enable_rx(port); netif_start_queue(dev); return 0; err_irlap: self->open = 0; bfin_sir_shutdown(port, dev); err_startup: return err; } static int bfin_sir_stop(struct net_device *dev) { struct bfin_sir_self *self = netdev_priv(dev); flush_work(&self->work); bfin_sir_shutdown(self->sir_port, dev); if (self->rxskb) { dev_kfree_skb(self->rxskb); self->rxskb = NULL; } /* Stop IrLAP */ if (self->irlap) { irlap_close(self->irlap); self->irlap = NULL; } netif_stop_queue(dev); self->open = 0; return 0; } static int bfin_sir_init_iobuf(iobuff_t *io, int size) { io->head = kmalloc(size, GFP_KERNEL); if (!io->head) return -ENOMEM; io->truesize = size; io->in_frame = FALSE; io->state = OUTSIDE_FRAME; io->data = io->head; return 0; } static const struct net_device_ops bfin_sir_ndo = { .ndo_open = bfin_sir_open, .ndo_stop = bfin_sir_stop, .ndo_start_xmit = bfin_sir_hard_xmit, .ndo_do_ioctl = bfin_sir_ioctl, .ndo_get_stats = bfin_sir_stats, }; static int __devinit bfin_sir_probe(struct platform_device *pdev) { struct net_device *dev; struct bfin_sir_self *self; unsigned int baudrate_mask; struct bfin_sir_port *sir_port; int err; if (pdev->id >= 0 && pdev->id < ARRAY_SIZE(per) && \ per[pdev->id][3] == pdev->id) { err = peripheral_request_list(per[pdev->id], DRIVER_NAME); if (err) return err; } else { dev_err(&pdev->dev, "Invalid pdev id, please check board file\n"); return -ENODEV; } err = -ENOMEM; sir_port = kmalloc(sizeof(*sir_port), GFP_KERNEL); if (!sir_port) goto err_mem_0; bfin_sir_init_ports(sir_port, pdev); dev = alloc_irdadev(sizeof(*self)); if (!dev) goto err_mem_1; self = netdev_priv(dev); self->dev = &pdev->dev; self->sir_port = sir_port; sir_port->dev = dev; err = bfin_sir_init_iobuf(&self->rx_buff, IRDA_SKB_MAX_MTU); if (err) goto err_mem_2; err = bfin_sir_init_iobuf(&self->tx_buff, IRDA_SIR_MAX_FRAME); if (err) goto err_mem_3; dev->netdev_ops = &bfin_sir_ndo; dev->irq = sir_port->irq; irda_init_max_qos_capabilies(&self->qos); baudrate_mask = IR_9600; switch (max_rate) { case 115200: baudrate_mask |= IR_115200; case 57600: baudrate_mask |= IR_57600; case 38400: baudrate_mask |= IR_38400; case 19200: baudrate_mask |= IR_19200; case 9600: break; default: dev_warn(&pdev->dev, "Invalid maximum baud rate, using 9600\n"); } self->qos.baud_rate.bits &= baudrate_mask; self->qos.min_turn_time.bits = 1; /* 10 ms or more */ irda_qos_bits_to_value(&self->qos); err = register_netdev(dev); if (err) { kfree(self->tx_buff.head); err_mem_3: kfree(self->rx_buff.head); err_mem_2: free_netdev(dev); err_mem_1: kfree(sir_port); err_mem_0: peripheral_free_list(per[pdev->id]); } else platform_set_drvdata(pdev, sir_port); return err; } static int __devexit bfin_sir_remove(struct platform_device *pdev) { struct bfin_sir_port *sir_port; struct net_device *dev = NULL; struct bfin_sir_self *self; sir_port = platform_get_drvdata(pdev); if (!sir_port) return 0; dev = sir_port->dev; self = netdev_priv(dev); unregister_netdev(dev); kfree(self->tx_buff.head); kfree(self->rx_buff.head); free_netdev(dev); kfree(sir_port); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver bfin_ir_driver = { .probe = bfin_sir_probe, .remove = __devexit_p(bfin_sir_remove), .suspend = bfin_sir_suspend, .resume = bfin_sir_resume, .driver = { .name = DRIVER_NAME, }, }; module_platform_driver(bfin_ir_driver); module_param(max_rate, int, 0); MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)"); MODULE_AUTHOR("Graf Yang <graf.yang@analog.com>"); MODULE_DESCRIPTION("Blackfin IrDA driver"); MODULE_LICENSE("GPL");
gpl-2.0
liuxingghost/android_kernel_google_omap
drivers/media/common/tuners/tda9887.c
8030
18564
#include <linux/module.h> #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/types.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/videodev2.h> #include <media/v4l2-common.h> #include <media/tuner.h> #include "tuner-i2c.h" #include "tda9887.h" /* Chips: TDA9885 (PAL, NTSC) TDA9886 (PAL, SECAM, NTSC) TDA9887 (PAL, SECAM, NTSC, FM Radio) Used as part of several tuners */ static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable verbose debug messages"); static DEFINE_MUTEX(tda9887_list_mutex); static LIST_HEAD(hybrid_tuner_instance_list); struct tda9887_priv { struct tuner_i2c_props i2c_props; struct list_head hybrid_tuner_instance_list; unsigned char data[4]; unsigned int config; unsigned int mode; unsigned int audmode; v4l2_std_id std; bool standby; }; /* ---------------------------------------------------------------------- */ #define UNSET (-1U) struct tvnorm { v4l2_std_id std; char *name; unsigned char b; unsigned char c; unsigned char e; }; /* ---------------------------------------------------------------------- */ // // TDA defines // //// first reg (b) #define cVideoTrapBypassOFF 0x00 // bit b0 #define cVideoTrapBypassON 0x01 // bit b0 #define cAutoMuteFmInactive 0x00 // bit b1 #define cAutoMuteFmActive 0x02 // bit b1 #define cIntercarrier 0x00 // bit b2 #define cQSS 0x04 // bit b2 #define cPositiveAmTV 0x00 // bit b3:4 #define cFmRadio 0x08 // bit b3:4 #define cNegativeFmTV 0x10 // bit b3:4 #define cForcedMuteAudioON 0x20 // bit b5 #define cForcedMuteAudioOFF 0x00 // bit b5 #define cOutputPort1Active 0x00 // bit b6 #define cOutputPort1Inactive 0x40 // bit b6 #define cOutputPort2Active 0x00 // bit b7 #define cOutputPort2Inactive 0x80 // bit b7 //// second reg (c) #define cDeemphasisOFF 0x00 // bit c5 #define cDeemphasisON 0x20 // bit c5 #define cDeemphasis75 0x00 // bit c6 #define cDeemphasis50 0x40 // bit c6 #define cAudioGain0 0x00 // bit c7 #define cAudioGain6 0x80 // bit c7 #define cTopMask 0x1f // bit c0:4 #define cTopDefault 0x10 // bit c0:4 //// third reg (e) #define cAudioIF_4_5 0x00 // bit e0:1 #define cAudioIF_5_5 0x01 // bit e0:1 #define cAudioIF_6_0 0x02 // bit e0:1 #define cAudioIF_6_5 0x03 // bit e0:1 #define cVideoIFMask 0x1c // bit e2:4 /* Video IF selection in TV Mode (bit B3=0) */ #define cVideoIF_58_75 0x00 // bit e2:4 #define cVideoIF_45_75 0x04 // bit e2:4 #define cVideoIF_38_90 0x08 // bit e2:4 #define cVideoIF_38_00 0x0C // bit e2:4 #define cVideoIF_33_90 0x10 // bit e2:4 #define cVideoIF_33_40 0x14 // bit e2:4 #define cRadioIF_45_75 0x18 // bit e2:4 #define cRadioIF_38_90 0x1C // bit e2:4 /* IF1 selection in Radio Mode (bit B3=1) */ #define cRadioIF_33_30 0x00 // bit e2,4 (also 0x10,0x14) #define cRadioIF_41_30 0x04 // bit e2,4 /* Output of AFC pin in radio mode when bit E7=1 */ #define cRadioAGC_SIF 0x00 // bit e3 #define cRadioAGC_FM 0x08 // bit e3 #define cTunerGainNormal 0x00 // bit e5 #define cTunerGainLow 0x20 // bit e5 #define cGating_18 0x00 // bit e6 #define cGating_36 0x40 // bit e6 #define cAgcOutON 0x80 // bit e7 #define cAgcOutOFF 0x00 // bit e7 /* ---------------------------------------------------------------------- */ static struct tvnorm tvnorms[] = { { .std = V4L2_STD_PAL_BG | V4L2_STD_PAL_H | V4L2_STD_PAL_N, .name = "PAL-BGHN", .b = ( cNegativeFmTV | cQSS ), .c = ( cDeemphasisON | cDeemphasis50 | cTopDefault), .e = ( cGating_36 | cAudioIF_5_5 | cVideoIF_38_90 ), },{ .std = V4L2_STD_PAL_I, .name = "PAL-I", .b = ( cNegativeFmTV | cQSS ), .c = ( cDeemphasisON | cDeemphasis50 | cTopDefault), .e = ( cGating_36 | cAudioIF_6_0 | cVideoIF_38_90 ), },{ .std = V4L2_STD_PAL_DK, .name = "PAL-DK", .b = ( cNegativeFmTV | cQSS ), .c = ( cDeemphasisON | cDeemphasis50 | cTopDefault), .e = ( cGating_36 | cAudioIF_6_5 | cVideoIF_38_90 ), },{ .std = V4L2_STD_PAL_M | V4L2_STD_PAL_Nc, .name = "PAL-M/Nc", .b = ( cNegativeFmTV | cQSS ), .c = ( cDeemphasisON | cDeemphasis75 | cTopDefault), .e = ( cGating_36 | cAudioIF_4_5 | cVideoIF_45_75 ), },{ .std = V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, .name = "SECAM-BGH", .b = ( cNegativeFmTV | cQSS ), .c = ( cTopDefault), .e = ( cAudioIF_5_5 | cVideoIF_38_90 ), },{ .std = V4L2_STD_SECAM_L, .name = "SECAM-L", .b = ( cPositiveAmTV | cQSS ), .c = ( cTopDefault), .e = ( cGating_36 | cAudioIF_6_5 | cVideoIF_38_90 ), },{ .std = V4L2_STD_SECAM_LC, .name = "SECAM-L'", .b = ( cOutputPort2Inactive | cPositiveAmTV | cQSS ), .c = ( cTopDefault), .e = ( cGating_36 | cAudioIF_6_5 | cVideoIF_33_90 ), },{ .std = V4L2_STD_SECAM_DK, .name = "SECAM-DK", .b = ( cNegativeFmTV | cQSS ), .c = ( cDeemphasisON | cDeemphasis50 | cTopDefault), .e = ( cGating_36 | cAudioIF_6_5 | cVideoIF_38_90 ), },{ .std = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR, .name = "NTSC-M", .b = ( cNegativeFmTV | cQSS ), .c = ( cDeemphasisON | cDeemphasis75 | cTopDefault), .e = ( cGating_36 | cAudioIF_4_5 | cVideoIF_45_75 ), },{ .std = V4L2_STD_NTSC_M_JP, .name = "NTSC-M-JP", .b = ( cNegativeFmTV | cQSS ), .c = ( cDeemphasisON | cDeemphasis50 | cTopDefault), .e = ( cGating_36 | cAudioIF_4_5 | cVideoIF_58_75 ), } }; static struct tvnorm radio_stereo = { .name = "Radio Stereo", .b = ( cFmRadio | cQSS ), .c = ( cDeemphasisOFF | cAudioGain6 | cTopDefault), .e = ( cTunerGainLow | cAudioIF_5_5 | cRadioIF_38_90 ), }; static struct tvnorm radio_mono = { .name = "Radio Mono", .b = ( cFmRadio | cQSS ), .c = ( cDeemphasisON | cDeemphasis75 | cTopDefault), .e = ( cTunerGainLow | cAudioIF_5_5 | cRadioIF_38_90 ), }; /* ---------------------------------------------------------------------- */ static void dump_read_message(struct dvb_frontend *fe, unsigned char *buf) { struct tda9887_priv *priv = fe->analog_demod_priv; static char *afc[16] = { "- 12.5 kHz", "- 37.5 kHz", "- 62.5 kHz", "- 87.5 kHz", "-112.5 kHz", "-137.5 kHz", "-162.5 kHz", "-187.5 kHz [min]", "+187.5 kHz [max]", "+162.5 kHz", "+137.5 kHz", "+112.5 kHz", "+ 87.5 kHz", "+ 62.5 kHz", "+ 37.5 kHz", "+ 12.5 kHz", }; tuner_info("read: 0x%2x\n", buf[0]); tuner_info(" after power on : %s\n", (buf[0] & 0x01) ? "yes" : "no"); tuner_info(" afc : %s\n", afc[(buf[0] >> 1) & 0x0f]); tuner_info(" fmif level : %s\n", (buf[0] & 0x20) ? "high" : "low"); tuner_info(" afc window : %s\n", (buf[0] & 0x40) ? "in" : "out"); tuner_info(" vfi level : %s\n", (buf[0] & 0x80) ? "high" : "low"); } static void dump_write_message(struct dvb_frontend *fe, unsigned char *buf) { struct tda9887_priv *priv = fe->analog_demod_priv; static char *sound[4] = { "AM/TV", "FM/radio", "FM/TV", "FM/radio" }; static char *adjust[32] = { "-16", "-15", "-14", "-13", "-12", "-11", "-10", "-9", "-8", "-7", "-6", "-5", "-4", "-3", "-2", "-1", "0", "+1", "+2", "+3", "+4", "+5", "+6", "+7", "+8", "+9", "+10", "+11", "+12", "+13", "+14", "+15" }; static char *deemph[4] = { "no", "no", "75", "50" }; static char *carrier[4] = { "4.5 MHz", "5.5 MHz", "6.0 MHz", "6.5 MHz / AM" }; static char *vif[8] = { "58.75 MHz", "45.75 MHz", "38.9 MHz", "38.0 MHz", "33.9 MHz", "33.4 MHz", "45.75 MHz + pin13", "38.9 MHz + pin13", }; static char *rif[4] = { "44 MHz", "52 MHz", "52 MHz", "44 MHz", }; tuner_info("write: byte B 0x%02x\n", buf[1]); tuner_info(" B0 video mode : %s\n", (buf[1] & 0x01) ? "video trap" : "sound trap"); tuner_info(" B1 auto mute fm : %s\n", (buf[1] & 0x02) ? "yes" : "no"); tuner_info(" B2 carrier mode : %s\n", (buf[1] & 0x04) ? "QSS" : "Intercarrier"); tuner_info(" B3-4 tv sound/radio : %s\n", sound[(buf[1] & 0x18) >> 3]); tuner_info(" B5 force mute audio: %s\n", (buf[1] & 0x20) ? "yes" : "no"); tuner_info(" B6 output port 1 : %s\n", (buf[1] & 0x40) ? "high (inactive)" : "low (active)"); tuner_info(" B7 output port 2 : %s\n", (buf[1] & 0x80) ? "high (inactive)" : "low (active)"); tuner_info("write: byte C 0x%02x\n", buf[2]); tuner_info(" C0-4 top adjustment : %s dB\n", adjust[buf[2] & 0x1f]); tuner_info(" C5-6 de-emphasis : %s\n", deemph[(buf[2] & 0x60) >> 5]); tuner_info(" C7 audio gain : %s\n", (buf[2] & 0x80) ? "-6" : "0"); tuner_info("write: byte E 0x%02x\n", buf[3]); tuner_info(" E0-1 sound carrier : %s\n", carrier[(buf[3] & 0x03)]); tuner_info(" E6 l pll gating : %s\n", (buf[3] & 0x40) ? "36" : "13"); if (buf[1] & 0x08) { /* radio */ tuner_info(" E2-4 video if : %s\n", rif[(buf[3] & 0x0c) >> 2]); tuner_info(" E7 vif agc output : %s\n", (buf[3] & 0x80) ? ((buf[3] & 0x10) ? "fm-agc radio" : "sif-agc radio") : "fm radio carrier afc"); } else { /* video */ tuner_info(" E2-4 video if : %s\n", vif[(buf[3] & 0x1c) >> 2]); tuner_info(" E5 tuner gain : %s\n", (buf[3] & 0x80) ? ((buf[3] & 0x20) ? "external" : "normal") : ((buf[3] & 0x20) ? "minimum" : "normal")); tuner_info(" E7 vif agc output : %s\n", (buf[3] & 0x80) ? ((buf[3] & 0x20) ? "pin3 port, pin22 vif agc out" : "pin22 port, pin3 vif acg ext in") : "pin3+pin22 port"); } tuner_info("--\n"); } /* ---------------------------------------------------------------------- */ static int tda9887_set_tvnorm(struct dvb_frontend *fe) { struct tda9887_priv *priv = fe->analog_demod_priv; struct tvnorm *norm = NULL; char *buf = priv->data; int i; if (priv->mode == V4L2_TUNER_RADIO) { if (priv->audmode == V4L2_TUNER_MODE_MONO) norm = &radio_mono; else norm = &radio_stereo; } else { for (i = 0; i < ARRAY_SIZE(tvnorms); i++) { if (tvnorms[i].std & priv->std) { norm = tvnorms+i; break; } } } if (NULL == norm) { tuner_dbg("Unsupported tvnorm entry - audio muted\n"); return -1; } tuner_dbg("configure for: %s\n", norm->name); buf[1] = norm->b; buf[2] = norm->c; buf[3] = norm->e; return 0; } static unsigned int port1 = UNSET; static unsigned int port2 = UNSET; static unsigned int qss = UNSET; static unsigned int adjust = UNSET; module_param(port1, int, 0644); module_param(port2, int, 0644); module_param(qss, int, 0644); module_param(adjust, int, 0644); static int tda9887_set_insmod(struct dvb_frontend *fe) { struct tda9887_priv *priv = fe->analog_demod_priv; char *buf = priv->data; if (UNSET != port1) { if (port1) buf[1] |= cOutputPort1Inactive; else buf[1] &= ~cOutputPort1Inactive; } if (UNSET != port2) { if (port2) buf[1] |= cOutputPort2Inactive; else buf[1] &= ~cOutputPort2Inactive; } if (UNSET != qss) { if (qss) buf[1] |= cQSS; else buf[1] &= ~cQSS; } if (adjust < 0x20) { buf[2] &= ~cTopMask; buf[2] |= adjust; } return 0; } static int tda9887_do_config(struct dvb_frontend *fe) { struct tda9887_priv *priv = fe->analog_demod_priv; char *buf = priv->data; if (priv->config & TDA9887_PORT1_ACTIVE) buf[1] &= ~cOutputPort1Inactive; if (priv->config & TDA9887_PORT1_INACTIVE) buf[1] |= cOutputPort1Inactive; if (priv->config & TDA9887_PORT2_ACTIVE) buf[1] &= ~cOutputPort2Inactive; if (priv->config & TDA9887_PORT2_INACTIVE) buf[1] |= cOutputPort2Inactive; if (priv->config & TDA9887_QSS) buf[1] |= cQSS; if (priv->config & TDA9887_INTERCARRIER) buf[1] &= ~cQSS; if (priv->config & TDA9887_AUTOMUTE) buf[1] |= cAutoMuteFmActive; if (priv->config & TDA9887_DEEMPHASIS_MASK) { buf[2] &= ~0x60; switch (priv->config & TDA9887_DEEMPHASIS_MASK) { case TDA9887_DEEMPHASIS_NONE: buf[2] |= cDeemphasisOFF; break; case TDA9887_DEEMPHASIS_50: buf[2] |= cDeemphasisON | cDeemphasis50; break; case TDA9887_DEEMPHASIS_75: buf[2] |= cDeemphasisON | cDeemphasis75; break; } } if (priv->config & TDA9887_TOP_SET) { buf[2] &= ~cTopMask; buf[2] |= (priv->config >> 8) & cTopMask; } if ((priv->config & TDA9887_INTERCARRIER_NTSC) && (priv->std & V4L2_STD_NTSC)) buf[1] &= ~cQSS; if (priv->config & TDA9887_GATING_18) buf[3] &= ~cGating_36; if (priv->mode == V4L2_TUNER_RADIO) { if (priv->config & TDA9887_RIF_41_3) { buf[3] &= ~cVideoIFMask; buf[3] |= cRadioIF_41_30; } if (priv->config & TDA9887_GAIN_NORMAL) buf[3] &= ~cTunerGainLow; } return 0; } /* ---------------------------------------------------------------------- */ static int tda9887_status(struct dvb_frontend *fe) { struct tda9887_priv *priv = fe->analog_demod_priv; unsigned char buf[1]; int rc; memset(buf,0,sizeof(buf)); if (1 != (rc = tuner_i2c_xfer_recv(&priv->i2c_props,buf,1))) tuner_info("i2c i/o error: rc == %d (should be 1)\n", rc); dump_read_message(fe, buf); return 0; } static void tda9887_configure(struct dvb_frontend *fe) { struct tda9887_priv *priv = fe->analog_demod_priv; int rc; memset(priv->data,0,sizeof(priv->data)); tda9887_set_tvnorm(fe); /* A note on the port settings: These settings tend to depend on the specifics of the board. By default they are set to inactive (bit value 1) by this driver, overwriting any changes made by the tvnorm. This means that it is the responsibility of the module using the tda9887 to set these values in case of changes in the tvnorm. In many cases port 2 should be made active (0) when selecting SECAM-L, and port 2 should remain inactive (1) for SECAM-L'. For the other standards the tda9887 application note says that the ports should be set to active (0), but, again, that may differ depending on the precise hardware configuration. */ priv->data[1] |= cOutputPort1Inactive; priv->data[1] |= cOutputPort2Inactive; tda9887_do_config(fe); tda9887_set_insmod(fe); if (priv->standby) priv->data[1] |= cForcedMuteAudioON; tuner_dbg("writing: b=0x%02x c=0x%02x e=0x%02x\n", priv->data[1], priv->data[2], priv->data[3]); if (debug > 1) dump_write_message(fe, priv->data); if (4 != (rc = tuner_i2c_xfer_send(&priv->i2c_props,priv->data,4))) tuner_info("i2c i/o error: rc == %d (should be 4)\n", rc); if (debug > 2) { msleep_interruptible(1000); tda9887_status(fe); } } /* ---------------------------------------------------------------------- */ static void tda9887_tuner_status(struct dvb_frontend *fe) { struct tda9887_priv *priv = fe->analog_demod_priv; tuner_info("Data bytes: b=0x%02x c=0x%02x e=0x%02x\n", priv->data[1], priv->data[2], priv->data[3]); } static int tda9887_get_afc(struct dvb_frontend *fe) { struct tda9887_priv *priv = fe->analog_demod_priv; static int AFC_BITS_2_kHz[] = { -12500, -37500, -62500, -97500, -112500, -137500, -162500, -187500, 187500, 162500, 137500, 112500, 97500 , 62500, 37500 , 12500 }; int afc=0; __u8 reg = 0; if (1 == tuner_i2c_xfer_recv(&priv->i2c_props,&reg,1)) afc = AFC_BITS_2_kHz[(reg>>1)&0x0f]; return afc; } static void tda9887_standby(struct dvb_frontend *fe) { struct tda9887_priv *priv = fe->analog_demod_priv; priv->standby = true; tda9887_configure(fe); } static void tda9887_set_params(struct dvb_frontend *fe, struct analog_parameters *params) { struct tda9887_priv *priv = fe->analog_demod_priv; priv->standby = false; priv->mode = params->mode; priv->audmode = params->audmode; priv->std = params->std; tda9887_configure(fe); } static int tda9887_set_config(struct dvb_frontend *fe, void *priv_cfg) { struct tda9887_priv *priv = fe->analog_demod_priv; priv->config = *(unsigned int *)priv_cfg; tda9887_configure(fe); return 0; } static void tda9887_release(struct dvb_frontend *fe) { struct tda9887_priv *priv = fe->analog_demod_priv; mutex_lock(&tda9887_list_mutex); if (priv) hybrid_tuner_release_state(priv); mutex_unlock(&tda9887_list_mutex); fe->analog_demod_priv = NULL; } static struct analog_demod_ops tda9887_ops = { .info = { .name = "tda9887", }, .set_params = tda9887_set_params, .standby = tda9887_standby, .tuner_status = tda9887_tuner_status, .get_afc = tda9887_get_afc, .release = tda9887_release, .set_config = tda9887_set_config, }; struct dvb_frontend *tda9887_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c_adap, u8 i2c_addr) { struct tda9887_priv *priv = NULL; int instance; mutex_lock(&tda9887_list_mutex); instance = hybrid_tuner_request_state(struct tda9887_priv, priv, hybrid_tuner_instance_list, i2c_adap, i2c_addr, "tda9887"); switch (instance) { case 0: mutex_unlock(&tda9887_list_mutex); return NULL; case 1: fe->analog_demod_priv = priv; priv->standby = true; tuner_info("tda988[5/6/7] found\n"); break; default: fe->analog_demod_priv = priv; break; } mutex_unlock(&tda9887_list_mutex); memcpy(&fe->ops.analog_ops, &tda9887_ops, sizeof(struct analog_demod_ops)); return fe; } EXPORT_SYMBOL_GPL(tda9887_attach); MODULE_LICENSE("GPL"); /* * Overrides for Emacs so that we follow Linus's tabbing style. * --------------------------------------------------------------------------- * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
Hooks405/kernel_j3ltespr
lib/timerqueue.c
8286
3178
/* * Generic Timer-queue * * Manages a simple queue of timers, ordered by expiration time. * Uses rbtrees for quick list adds and expiration. * * NOTE: All of the following functions need to be serialized * to avoid races. No locking is done by this library code. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/bug.h> #include <linux/timerqueue.h> #include <linux/rbtree.h> #include <linux/export.h> /** * timerqueue_add - Adds timer to timerqueue. * * @head: head of timerqueue * @node: timer node to be added * * Adds the timer node to the timerqueue, sorted by the * node's expires value. */ void timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) { struct rb_node **p = &head->head.rb_node; struct rb_node *parent = NULL; struct timerqueue_node *ptr; /* Make sure we don't add nodes that are already added */ WARN_ON_ONCE(!RB_EMPTY_NODE(&node->node)); while (*p) { parent = *p; ptr = rb_entry(parent, struct timerqueue_node, node); if (node->expires.tv64 < ptr->expires.tv64) p = &(*p)->rb_left; else p = &(*p)->rb_right; } rb_link_node(&node->node, parent, p); rb_insert_color(&node->node, &head->head); if (!head->next || node->expires.tv64 < head->next->expires.tv64) head->next = node; } EXPORT_SYMBOL_GPL(timerqueue_add); /** * timerqueue_del - Removes a timer from the timerqueue. * * @head: head of timerqueue * @node: timer node to be removed * * Removes the timer node from the timerqueue. */ void timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) { WARN_ON_ONCE(RB_EMPTY_NODE(&node->node)); /* update next pointer */ if (head->next == node) { struct rb_node *rbn = rb_next(&node->node); head->next = rbn ? rb_entry(rbn, struct timerqueue_node, node) : NULL; } rb_erase(&node->node, &head->head); RB_CLEAR_NODE(&node->node); } EXPORT_SYMBOL_GPL(timerqueue_del); /** * timerqueue_iterate_next - Returns the timer after the provided timer * * @node: Pointer to a timer. * * Provides the timer that is after the given node. This is used, when * necessary, to iterate through the list of timers in a timer list * without modifying the list. */ struct timerqueue_node *timerqueue_iterate_next(struct timerqueue_node *node) { struct rb_node *next; if (!node) return NULL; next = rb_next(&node->node); if (!next) return NULL; return container_of(next, struct timerqueue_node, node); } EXPORT_SYMBOL_GPL(timerqueue_iterate_next);
gpl-2.0
fat-tire/omap
arch/mips/bcm63xx/dev-dsp.c
9054
1449
/* * Broadcom BCM63xx VoIP DSP registration * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2009 Florian Fainelli <florian@openwrt.org> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <bcm63xx_cpu.h> #include <bcm63xx_dev_dsp.h> #include <bcm63xx_regs.h> #include <bcm63xx_io.h> static struct resource voip_dsp_resources[] = { { .start = -1, /* filled at runtime */ .end = -1, /* filled at runtime */ .flags = IORESOURCE_MEM, }, { .start = -1, /* filled at runtime */ .flags = IORESOURCE_IRQ, }, }; static struct platform_device bcm63xx_voip_dsp_device = { .name = "bcm63xx-voip-dsp", .id = 0, .num_resources = ARRAY_SIZE(voip_dsp_resources), .resource = voip_dsp_resources, }; int __init bcm63xx_dsp_register(const struct bcm63xx_dsp_platform_data *pd) { struct bcm63xx_dsp_platform_data *dpd; u32 val; /* Get the memory window */ val = bcm_mpi_readl(MPI_CSBASE_REG(pd->cs - 1)); val &= MPI_CSBASE_BASE_MASK; voip_dsp_resources[0].start = val; voip_dsp_resources[0].end = val + 0xFFFFFFF; voip_dsp_resources[1].start = pd->ext_irq; /* copy given platform data */ dpd = bcm63xx_voip_dsp_device.dev.platform_data; memcpy(dpd, pd, sizeof (*pd)); return platform_device_register(&bcm63xx_voip_dsp_device); }
gpl-2.0
SimpleAOSP-Kernel/kernel_flounder
arch/c6x/kernel/sys_c6x.c
9054
1932
/* * Port on Texas Instruments TMS320C6x architecture * * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/syscalls.h> #include <linux/uaccess.h> #include <asm/syscalls.h> #ifdef CONFIG_ACCESS_CHECK int _access_ok(unsigned long addr, unsigned long size) { if (!size) return 1; if (!addr || addr > (0xffffffffUL - (size - 1))) goto _bad_access; if (segment_eq(get_fs(), KERNEL_DS)) return 1; if (memory_start <= addr && (addr + size - 1) < memory_end) return 1; _bad_access: pr_debug("Bad access attempt: pid[%d] addr[%08lx] size[0x%lx]\n", current->pid, addr, size); return 0; } EXPORT_SYMBOL(_access_ok); #endif /* sys_cache_sync -- sync caches over given range */ asmlinkage int sys_cache_sync(unsigned long s, unsigned long e) { L1D_cache_block_writeback_invalidate(s, e); L1P_cache_block_invalidate(s, e); return 0; } /* Provide the actual syscall number to call mapping. */ #undef __SYSCALL #define __SYSCALL(nr, call) [nr] = (call), /* * Use trampolines */ #define sys_pread64 sys_pread_c6x #define sys_pwrite64 sys_pwrite_c6x #define sys_truncate64 sys_truncate64_c6x #define sys_ftruncate64 sys_ftruncate64_c6x #define sys_fadvise64 sys_fadvise64_c6x #define sys_fadvise64_64 sys_fadvise64_64_c6x #define sys_fallocate sys_fallocate_c6x /* Use sys_mmap_pgoff directly */ #define sys_mmap2 sys_mmap_pgoff /* * Note that we can't include <linux/unistd.h> here since the header * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well. */ void *sys_call_table[__NR_syscalls] = { [0 ... __NR_syscalls-1] = sys_ni_syscall, #include <asm/unistd.h> };
gpl-2.0
gearslam/JB_LS970ZVC
arch/arm/mach-omap1/i2c.c
9054
1025
/* * Helper module for board specific I2C bus registration * * Copyright (C) 2009 Nokia Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <plat/i2c.h> #include <plat/mux.h> #include <plat/cpu.h> void __init omap1_i2c_mux_pins(int bus_id) { if (cpu_is_omap7xx()) { omap_cfg_reg(I2C_7XX_SDA); omap_cfg_reg(I2C_7XX_SCL); } else { omap_cfg_reg(I2C_SDA); omap_cfg_reg(I2C_SCL); } }
gpl-2.0
xcaliburinhand/I9000-Reoriented-for-I897-Froyo
arch/powerpc/kernel/signal.c
607
5328
/* * Common signal handling code for both 32 and 64 bits * * Copyright (c) 2007 Benjamin Herrenschmidt, IBM Coproration * Extracted from signal_32.c and signal_64.c * * This file is subject to the terms and conditions of the GNU General * Public License. See the file README.legal in the main directory of * this archive for more details. */ #include <linux/tracehook.h> #include <linux/signal.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include "signal.h" /* Log an error when sending an unhandled signal to a process. Controlled * through debug.exception-trace sysctl. */ int show_unhandled_signals = 0; /* * Allocate space for the signal frame */ void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, int is_32) { unsigned long oldsp, newsp; /* Default to using normal stack */ oldsp = get_clean_sp(regs, is_32); /* Check for alt stack */ if ((ka->sa.sa_flags & SA_ONSTACK) && current->sas_ss_size && !on_sig_stack(oldsp)) oldsp = (current->sas_ss_sp + current->sas_ss_size); /* Get aligned frame */ newsp = (oldsp - frame_size) & ~0xFUL; /* Check access */ if (!access_ok(VERIFY_WRITE, (void __user *)newsp, oldsp - newsp)) return NULL; return (void __user *)newsp; } /* * Restore the user process's signal mask */ void restore_sigmask(sigset_t *set) { sigdelsetmask(set, ~_BLOCKABLE); spin_lock_irq(&current->sighand->siglock); current->blocked = *set; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); } static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) { unsigned long ret = regs->gpr[3]; int restart = 1; /* syscall ? */ if (TRAP(regs) != 0x0C00) return; /* error signalled ? */ if (!(regs->ccr & 0x10000000)) return; switch (ret) { case ERESTART_RESTARTBLOCK: case ERESTARTNOHAND: /* ERESTARTNOHAND means that the syscall should only be * restarted if there was no handler for the signal, and since * we only get here if there is a handler, we dont restart. */ restart = !has_handler; break; case ERESTARTSYS: /* ERESTARTSYS means to restart the syscall if there is no * handler or the handler was registered with SA_RESTART */ restart = !has_handler || (ka->sa.sa_flags & SA_RESTART) != 0; break; case ERESTARTNOINTR: /* ERESTARTNOINTR means that the syscall should be * called again after the signal handler returns. */ break; default: return; } if (restart) { if (ret == ERESTART_RESTARTBLOCK) regs->gpr[0] = __NR_restart_syscall; else regs->gpr[3] = regs->orig_gpr3; regs->nip -= 4; regs->result = 0; } else { regs->result = -EINTR; regs->gpr[3] = EINTR; regs->ccr |= 0x10000000; } } static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs) { siginfo_t info; int signr; struct k_sigaction ka; int ret; int is32 = is_32bit_task(); if (current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK) oldset = &current->saved_sigmask; else if (!oldset) oldset = &current->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); /* Is there any syscall restart business here ? */ check_syscall_restart(regs, &ka, signr > 0); if (signr <= 0) { struct thread_info *ti = current_thread_info(); /* No signal to deliver -- put the saved sigmask back */ if (ti->local_flags & _TLF_RESTORE_SIGMASK) { ti->local_flags &= ~_TLF_RESTORE_SIGMASK; sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } return 0; /* no signals delivered */ } /* * Reenable the DABR before delivering the signal to * user space. The DABR will have been cleared if it * triggered inside the kernel. */ if (current->thread.dabr) { set_dabr(current->thread.dabr); #if defined(CONFIG_BOOKE) mtspr(SPRN_DBCR0, current->thread.dbcr0); #endif } if (is32) { if (ka.sa.sa_flags & SA_SIGINFO) ret = handle_rt_signal32(signr, &ka, &info, oldset, regs); else ret = handle_signal32(signr, &ka, &info, oldset, regs); } else { ret = handle_rt_signal64(signr, &ka, &info, oldset, regs); } if (ret) { spin_lock_irq(&current->sighand->siglock); sigorsets(&current->blocked, &current->blocked, &ka.sa.sa_mask); if (!(ka.sa.sa_flags & SA_NODEFER)) sigaddset(&current->blocked, signr); recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); /* * A signal was successfully delivered; the saved sigmask is in * its frame, and we can clear the TLF_RESTORE_SIGMASK flag. */ current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK; /* * Let tracing know that we've done the handler setup. */ tracehook_signal_handler(signr, &info, &ka, regs, test_thread_flag(TIF_SINGLESTEP)); } return ret; } void do_signal(struct pt_regs *regs, unsigned long thread_info_flags) { if (thread_info_flags & _TIF_SIGPENDING) do_signal_pending(NULL, regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } } long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, unsigned long r5, unsigned long r6, unsigned long r7, unsigned long r8, struct pt_regs *regs) { return do_sigaltstack(uss, uoss, regs->gpr[1]); }
gpl-2.0
novic/AniDroid-Hardened-Kernel
net/l2tp/l2tp_ppp.c
863
47192
/***************************************************************************** * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets * * PPPoX --- Generic PPP encapsulation socket family * PPPoL2TP --- PPP over L2TP (RFC 2661) * * Version: 2.0.0 * * Authors: James Chapman (jchapman@katalix.com) * * Based on original work by Martijn van Oosterhout <kleptog@svana.org> * * License: * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ /* This driver handles only L2TP data frames; control frames are handled by a * userspace application. * * To send data in an L2TP session, userspace opens a PPPoL2TP socket and * attaches it to a bound UDP socket with local tunnel_id / session_id and * peer tunnel_id / session_id set. Data can then be sent or received using * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket * can be read or modified using ioctl() or [gs]etsockopt() calls. * * When a PPPoL2TP socket is connected with local and peer session_id values * zero, the socket is treated as a special tunnel management socket. * * Here's example userspace code to create a socket for sending/receiving data * over an L2TP session:- * * struct sockaddr_pppol2tp sax; * int fd; * int session_fd; * * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP); * * sax.sa_family = AF_PPPOX; * sax.sa_protocol = PX_PROTO_OL2TP; * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr; * sax.pppol2tp.addr.sin_port = addr->sin_port; * sax.pppol2tp.addr.sin_family = AF_INET; * sax.pppol2tp.s_tunnel = tunnel_id; * sax.pppol2tp.s_session = session_id; * sax.pppol2tp.d_tunnel = peer_tunnel_id; * sax.pppol2tp.d_session = peer_session_id; * * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax)); * * A pppd plugin that allows PPP traffic to be carried over L2TP using * this driver is available from the OpenL2TP project at * http://openl2tp.sourceforge.net. */ #include <linux/module.h> #include <linux/string.h> #include <linux/list.h> #include <linux/uaccess.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/jiffies.h> #include <linux/netdevice.h> #include <linux/net.h> #include <linux/inetdevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/ip.h> #include <linux/udp.h> #include <linux/if_pppox.h> #include <linux/if_pppol2tp.h> #include <net/sock.h> #include <linux/ppp_channel.h> #include <linux/ppp_defs.h> #include <linux/if_ppp.h> #include <linux/file.h> #include <linux/hash.h> #include <linux/sort.h> #include <linux/proc_fs.h> #include <linux/l2tp.h> #include <linux/nsproxy.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/dst.h> #include <net/ip.h> #include <net/udp.h> #include <net/xfrm.h> #include <asm/byteorder.h> #include <asm/atomic.h> #include "l2tp_core.h" #define PPPOL2TP_DRV_VERSION "V2.0" /* Space for UDP, L2TP and PPP headers */ #define PPPOL2TP_HEADER_OVERHEAD 40 #define PRINTK(_mask, _type, _lvl, _fmt, args...) \ do { \ if ((_mask) & (_type)) \ printk(_lvl "PPPOL2TP: " _fmt, ##args); \ } while (0) /* Number of bytes to build transmit L2TP headers. * Unfortunately the size is different depending on whether sequence numbers * are enabled. */ #define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10 #define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6 /* Private data of each session. This data lives at the end of struct * l2tp_session, referenced via session->priv[]. */ struct pppol2tp_session { int owner; /* pid that opened the socket */ struct sock *sock; /* Pointer to the session * PPPoX socket */ struct sock *tunnel_sock; /* Pointer to the tunnel UDP * socket */ int flags; /* accessed by PPPIOCGFLAGS. * Unused. */ }; static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb); static const struct ppp_channel_ops pppol2tp_chan_ops = { .start_xmit = pppol2tp_xmit, }; static const struct proto_ops pppol2tp_ops; /* Helpers to obtain tunnel/session contexts from sockets. */ static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk) { struct l2tp_session *session; if (sk == NULL) return NULL; sock_hold(sk); session = (struct l2tp_session *)(sk->sk_user_data); if (session == NULL) { sock_put(sk); goto out; } BUG_ON(session->magic != L2TP_SESSION_MAGIC); out: return session; } /***************************************************************************** * Receive data handling *****************************************************************************/ static int pppol2tp_recv_payload_hook(struct sk_buff *skb) { /* Skip PPP header, if present. In testing, Microsoft L2TP clients * don't send the PPP header (PPP header compression enabled), but * other clients can include the header. So we cope with both cases * here. The PPP header is always FF03 when using L2TP. * * Note that skb->data[] isn't dereferenced from a u16 ptr here since * the field may be unaligned. */ if (!pskb_may_pull(skb, 2)) return 1; if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03)) skb_pull(skb, 2); return 0; } /* Receive message. This is the recvmsg for the PPPoL2TP socket. */ static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int err; struct sk_buff *skb; struct sock *sk = sock->sk; err = -EIO; if (sk->sk_state & PPPOX_BOUND) goto end; msg->msg_namelen = 0; err = 0; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb) goto end; if (len > skb->len) len = skb->len; else if (len < skb->len) msg->msg_flags |= MSG_TRUNC; err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len); if (likely(err == 0)) err = len; kfree_skb(skb); end: return err; } static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) { struct pppol2tp_session *ps = l2tp_session_priv(session); struct sock *sk = NULL; /* If the socket is bound, send it in to PPP's input queue. Otherwise * queue it on the session socket. */ sk = ps->sock; if (sk == NULL) goto no_sock; if (sk->sk_state & PPPOX_BOUND) { struct pppox_sock *po; PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, "%s: recv %d byte data frame, passing to ppp\n", session->name, data_len); /* We need to forget all info related to the L2TP packet * gathered in the skb as we are going to reuse the same * skb for the inner packet. * Namely we need to: * - reset xfrm (IPSec) information as it applies to * the outer L2TP packet and not to the inner one * - release the dst to force a route lookup on the inner * IP packet since skb->dst currently points to the dst * of the UDP tunnel * - reset netfilter information as it doesn't apply * to the inner packet either */ secpath_reset(skb); skb_dst_drop(skb); nf_reset(skb); po = pppox_sk(sk); ppp_input(&po->chan, skb); } else { PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO, "%s: socket not bound\n", session->name); /* Not bound. Nothing we can do, so discard. */ session->stats.rx_errors++; kfree_skb(skb); } return; no_sock: PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO, "%s: no socket\n", session->name); kfree_skb(skb); } static void pppol2tp_session_sock_hold(struct l2tp_session *session) { struct pppol2tp_session *ps = l2tp_session_priv(session); if (ps->sock) sock_hold(ps->sock); } static void pppol2tp_session_sock_put(struct l2tp_session *session) { struct pppol2tp_session *ps = l2tp_session_priv(session); if (ps->sock) sock_put(ps->sock); } /************************************************************************ * Transmit handling ***********************************************************************/ /* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here * when a user application does a sendmsg() on the session socket. L2TP and * PPP headers must be inserted into the user's data. */ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { static const unsigned char ppph[2] = { 0xff, 0x03 }; struct sock *sk = sock->sk; struct sk_buff *skb; int error; struct l2tp_session *session; struct l2tp_tunnel *tunnel; struct pppol2tp_session *ps; int uhlen; error = -ENOTCONN; if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) goto error; /* Get session and tunnel contexts */ error = -EBADF; session = pppol2tp_sock_to_session(sk); if (session == NULL) goto error; ps = l2tp_session_priv(session); tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); if (tunnel == NULL) goto error_put_sess; uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; /* Allocate a socket buffer */ error = -ENOMEM; skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len + sizeof(ppph) + total_len, 0, GFP_KERNEL); if (!skb) goto error_put_sess_tun; /* Reserve space for headers. */ skb_reserve(skb, NET_SKB_PAD); skb_reset_network_header(skb); skb_reserve(skb, sizeof(struct iphdr)); skb_reset_transport_header(skb); skb_reserve(skb, uhlen); /* Add PPP header */ skb->data[0] = ppph[0]; skb->data[1] = ppph[1]; skb_put(skb, 2); /* Copy user data into skb */ error = memcpy_fromiovec(skb->data, m->msg_iov, total_len); if (error < 0) { kfree_skb(skb); goto error_put_sess_tun; } skb_put(skb, total_len); l2tp_xmit_skb(session, skb, session->hdr_len); sock_put(ps->tunnel_sock); return error; error_put_sess_tun: sock_put(ps->tunnel_sock); error_put_sess: sock_put(sk); error: return error; } /* Transmit function called by generic PPP driver. Sends PPP frame * over PPPoL2TP socket. * * This is almost the same as pppol2tp_sendmsg(), but rather than * being called with a msghdr from userspace, it is called with a skb * from the kernel. * * The supplied skb from ppp doesn't have enough headroom for the * insertion of L2TP, UDP and IP headers so we need to allocate more * headroom in the skb. This will create a cloned skb. But we must be * careful in the error case because the caller will expect to free * the skb it supplied, not our cloned skb. So we take care to always * leave the original skb unfreed if we return an error. */ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) { static const u8 ppph[2] = { 0xff, 0x03 }; struct sock *sk = (struct sock *) chan->private; struct sock *sk_tun; struct l2tp_session *session; struct l2tp_tunnel *tunnel; struct pppol2tp_session *ps; int old_headroom; int new_headroom; if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) goto abort; /* Get session and tunnel contexts from the socket */ session = pppol2tp_sock_to_session(sk); if (session == NULL) goto abort; ps = l2tp_session_priv(session); sk_tun = ps->tunnel_sock; if (sk_tun == NULL) goto abort_put_sess; tunnel = l2tp_sock_to_tunnel(sk_tun); if (tunnel == NULL) goto abort_put_sess; old_headroom = skb_headroom(skb); if (skb_cow_head(skb, sizeof(ppph))) goto abort_put_sess_tun; new_headroom = skb_headroom(skb); skb->truesize += new_headroom - old_headroom; /* Setup PPP header */ __skb_push(skb, sizeof(ppph)); skb->data[0] = ppph[0]; skb->data[1] = ppph[1]; l2tp_xmit_skb(session, skb, session->hdr_len); sock_put(sk_tun); sock_put(sk); return 1; abort_put_sess_tun: sock_put(sk_tun); abort_put_sess: sock_put(sk); abort: /* Free the original skb */ kfree_skb(skb); return 1; } /***************************************************************************** * Session (and tunnel control) socket create/destroy. *****************************************************************************/ /* Called by l2tp_core when a session socket is being closed. */ static void pppol2tp_session_close(struct l2tp_session *session) { struct pppol2tp_session *ps = l2tp_session_priv(session); struct sock *sk = ps->sock; struct sk_buff *skb; BUG_ON(session->magic != L2TP_SESSION_MAGIC); if (session->session_id == 0) goto out; if (sk != NULL) { lock_sock(sk); if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { pppox_unbind_sock(sk); sk->sk_state = PPPOX_DEAD; sk->sk_state_change(sk); } /* Purge any queued data */ skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); while ((skb = skb_dequeue(&session->reorder_q))) { kfree_skb(skb); sock_put(sk); } release_sock(sk); } out: return; } /* Really kill the session socket. (Called from sock_put() if * refcnt == 0.) */ static void pppol2tp_session_destruct(struct sock *sk) { struct l2tp_session *session; if (sk->sk_user_data != NULL) { session = sk->sk_user_data; if (session == NULL) goto out; sk->sk_user_data = NULL; BUG_ON(session->magic != L2TP_SESSION_MAGIC); l2tp_session_dec_refcount(session); } out: return; } /* Called when the PPPoX socket (session) is closed. */ static int pppol2tp_release(struct socket *sock) { struct sock *sk = sock->sk; struct l2tp_session *session; int error; if (!sk) return 0; error = -EBADF; lock_sock(sk); if (sock_flag(sk, SOCK_DEAD) != 0) goto error; pppox_unbind_sock(sk); /* Signal the death of the socket. */ sk->sk_state = PPPOX_DEAD; sock_orphan(sk); sock->sk = NULL; session = pppol2tp_sock_to_session(sk); /* Purge any queued data */ skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); if (session != NULL) { struct sk_buff *skb; while ((skb = skb_dequeue(&session->reorder_q))) { kfree_skb(skb); sock_put(sk); } sock_put(sk); } release_sock(sk); /* This will delete the session context via * pppol2tp_session_destruct() if the socket's refcnt drops to * zero. */ sock_put(sk); return 0; error: release_sock(sk); return error; } static struct proto pppol2tp_sk_proto = { .name = "PPPOL2TP", .owner = THIS_MODULE, .obj_size = sizeof(struct pppox_sock), }; static int pppol2tp_backlog_recv(struct sock *sk, struct sk_buff *skb) { int rc; rc = l2tp_udp_encap_recv(sk, skb); if (rc) kfree_skb(skb); return NET_RX_SUCCESS; } /* socket() handler. Initialize a new struct sock. */ static int pppol2tp_create(struct net *net, struct socket *sock) { int error = -ENOMEM; struct sock *sk; sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto); if (!sk) goto out; sock_init_data(sock, sk); sock->state = SS_UNCONNECTED; sock->ops = &pppol2tp_ops; sk->sk_backlog_rcv = pppol2tp_backlog_recv; sk->sk_protocol = PX_PROTO_OL2TP; sk->sk_family = PF_PPPOX; sk->sk_state = PPPOX_NONE; sk->sk_type = SOCK_STREAM; sk->sk_destruct = pppol2tp_session_destruct; error = 0; out: return error; } #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) static void pppol2tp_show(struct seq_file *m, void *arg) { struct l2tp_session *session = arg; struct pppol2tp_session *ps = l2tp_session_priv(session); if (ps) { struct pppox_sock *po = pppox_sk(ps->sock); if (po) seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); } } #endif /* connect() handler. Attach a PPPoX socket to a tunnel UDP socket */ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, int sockaddr_len, int flags) { struct sock *sk = sock->sk; struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr; struct sockaddr_pppol2tpv3 *sp3 = (struct sockaddr_pppol2tpv3 *) uservaddr; struct pppox_sock *po = pppox_sk(sk); struct l2tp_session *session = NULL; struct l2tp_tunnel *tunnel; struct pppol2tp_session *ps; struct dst_entry *dst; struct l2tp_session_cfg cfg = { 0, }; int error = 0; u32 tunnel_id, peer_tunnel_id; u32 session_id, peer_session_id; int ver = 2; int fd; lock_sock(sk); error = -EINVAL; if (sp->sa_protocol != PX_PROTO_OL2TP) goto end; /* Check for already bound sockets */ error = -EBUSY; if (sk->sk_state & PPPOX_CONNECTED) goto end; /* We don't supporting rebinding anyway */ error = -EALREADY; if (sk->sk_user_data) goto end; /* socket is already attached */ /* Get params from socket address. Handle L2TPv2 and L2TPv3 */ if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) { fd = sp->pppol2tp.fd; tunnel_id = sp->pppol2tp.s_tunnel; peer_tunnel_id = sp->pppol2tp.d_tunnel; session_id = sp->pppol2tp.s_session; peer_session_id = sp->pppol2tp.d_session; } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) { ver = 3; fd = sp3->pppol2tp.fd; tunnel_id = sp3->pppol2tp.s_tunnel; peer_tunnel_id = sp3->pppol2tp.d_tunnel; session_id = sp3->pppol2tp.s_session; peer_session_id = sp3->pppol2tp.d_session; } else { error = -EINVAL; goto end; /* bad socket address */ } /* Don't bind if tunnel_id is 0 */ error = -EINVAL; if (tunnel_id == 0) goto end; tunnel = l2tp_tunnel_find(sock_net(sk), tunnel_id); /* Special case: create tunnel context if session_id and * peer_session_id is 0. Otherwise look up tunnel using supplied * tunnel id. */ if ((session_id == 0) && (peer_session_id == 0)) { if (tunnel == NULL) { struct l2tp_tunnel_cfg tcfg = { .encap = L2TP_ENCAPTYPE_UDP, .debug = 0, }; error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel); if (error < 0) goto end; } } else { /* Error if we can't find the tunnel */ error = -ENOENT; if (tunnel == NULL) goto end; /* Error if socket is not prepped */ if (tunnel->sock == NULL) goto end; } if (tunnel->recv_payload_hook == NULL) tunnel->recv_payload_hook = pppol2tp_recv_payload_hook; if (tunnel->peer_tunnel_id == 0) { if (ver == 2) tunnel->peer_tunnel_id = sp->pppol2tp.d_tunnel; else tunnel->peer_tunnel_id = sp3->pppol2tp.d_tunnel; } /* Create session if it doesn't already exist. We handle the * case where a session was previously created by the netlink * interface by checking that the session doesn't already have * a socket and its tunnel socket are what we expect. If any * of those checks fail, return EEXIST to the caller. */ session = l2tp_session_find(sock_net(sk), tunnel, session_id); if (session == NULL) { /* Default MTU must allow space for UDP/L2TP/PPP * headers. */ cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD; /* Allocate and initialize a new session context. */ session = l2tp_session_create(sizeof(struct pppol2tp_session), tunnel, session_id, peer_session_id, &cfg); if (session == NULL) { error = -ENOMEM; goto end; } } else { ps = l2tp_session_priv(session); error = -EEXIST; if (ps->sock != NULL) goto end; /* consistency checks */ if (ps->tunnel_sock != tunnel->sock) goto end; } /* Associate session with its PPPoL2TP socket */ ps = l2tp_session_priv(session); ps->owner = current->pid; ps->sock = sk; ps->tunnel_sock = tunnel->sock; session->recv_skb = pppol2tp_recv; session->session_close = pppol2tp_session_close; #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) session->show = pppol2tp_show; #endif /* We need to know each time a skb is dropped from the reorder * queue. */ session->ref = pppol2tp_session_sock_hold; session->deref = pppol2tp_session_sock_put; /* If PMTU discovery was enabled, use the MTU that was discovered */ dst = sk_dst_get(sk); if (dst != NULL) { u32 pmtu = dst_mtu(__sk_dst_get(sk)); if (pmtu != 0) session->mtu = session->mru = pmtu - PPPOL2TP_HEADER_OVERHEAD; dst_release(dst); } /* Special case: if source & dest session_id == 0x0000, this * socket is being created to manage the tunnel. Just set up * the internal context for use by ioctl() and sockopt() * handlers. */ if ((session->session_id == 0) && (session->peer_session_id == 0)) { error = 0; goto out_no_ppp; } /* The only header we need to worry about is the L2TP * header. This size is different depending on whether * sequence numbers are enabled for the data channel. */ po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; po->chan.private = sk; po->chan.ops = &pppol2tp_chan_ops; po->chan.mtu = session->mtu; error = ppp_register_net_channel(sock_net(sk), &po->chan); if (error) goto end; out_no_ppp: /* This is how we get the session context from the socket. */ sk->sk_user_data = session; sk->sk_state = PPPOX_CONNECTED; PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: created\n", session->name); end: release_sock(sk); return error; } #ifdef CONFIG_L2TP_V3 /* Called when creating sessions via the netlink interface. */ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) { int error; struct l2tp_tunnel *tunnel; struct l2tp_session *session; struct pppol2tp_session *ps; tunnel = l2tp_tunnel_find(net, tunnel_id); /* Error if we can't find the tunnel */ error = -ENOENT; if (tunnel == NULL) goto out; /* Error if tunnel socket is not prepped */ if (tunnel->sock == NULL) goto out; /* Check that this session doesn't already exist */ error = -EEXIST; session = l2tp_session_find(net, tunnel, session_id); if (session != NULL) goto out; /* Default MTU values. */ if (cfg->mtu == 0) cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; if (cfg->mru == 0) cfg->mru = cfg->mtu; /* Allocate and initialize a new session context. */ error = -ENOMEM; session = l2tp_session_create(sizeof(struct pppol2tp_session), tunnel, session_id, peer_session_id, cfg); if (session == NULL) goto out; ps = l2tp_session_priv(session); ps->tunnel_sock = tunnel->sock; PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: created\n", session->name); error = 0; out: return error; } /* Called when deleting sessions via the netlink interface. */ static int pppol2tp_session_delete(struct l2tp_session *session) { struct pppol2tp_session *ps = l2tp_session_priv(session); if (ps->sock == NULL) l2tp_session_dec_refcount(session); return 0; } #endif /* CONFIG_L2TP_V3 */ /* getname() support. */ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, int *usockaddr_len, int peer) { int len = 0; int error = 0; struct l2tp_session *session; struct l2tp_tunnel *tunnel; struct sock *sk = sock->sk; struct inet_sock *inet; struct pppol2tp_session *pls; error = -ENOTCONN; if (sk == NULL) goto end; if (sk->sk_state != PPPOX_CONNECTED) goto end; error = -EBADF; session = pppol2tp_sock_to_session(sk); if (session == NULL) goto end; pls = l2tp_session_priv(session); tunnel = l2tp_sock_to_tunnel(pls->tunnel_sock); if (tunnel == NULL) { error = -EBADF; goto end_put_sess; } inet = inet_sk(tunnel->sock); if (tunnel->version == 2) { struct sockaddr_pppol2tp sp; len = sizeof(sp); memset(&sp, 0, len); sp.sa_family = AF_PPPOX; sp.sa_protocol = PX_PROTO_OL2TP; sp.pppol2tp.fd = tunnel->fd; sp.pppol2tp.pid = pls->owner; sp.pppol2tp.s_tunnel = tunnel->tunnel_id; sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id; sp.pppol2tp.s_session = session->session_id; sp.pppol2tp.d_session = session->peer_session_id; sp.pppol2tp.addr.sin_family = AF_INET; sp.pppol2tp.addr.sin_port = inet->inet_dport; sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr; memcpy(uaddr, &sp, len); } else if (tunnel->version == 3) { struct sockaddr_pppol2tpv3 sp; len = sizeof(sp); memset(&sp, 0, len); sp.sa_family = AF_PPPOX; sp.sa_protocol = PX_PROTO_OL2TP; sp.pppol2tp.fd = tunnel->fd; sp.pppol2tp.pid = pls->owner; sp.pppol2tp.s_tunnel = tunnel->tunnel_id; sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id; sp.pppol2tp.s_session = session->session_id; sp.pppol2tp.d_session = session->peer_session_id; sp.pppol2tp.addr.sin_family = AF_INET; sp.pppol2tp.addr.sin_port = inet->inet_dport; sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr; memcpy(uaddr, &sp, len); } *usockaddr_len = len; sock_put(pls->tunnel_sock); end_put_sess: sock_put(sk); error = 0; end: return error; } /**************************************************************************** * ioctl() handlers. * * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP * sockets. However, in order to control kernel tunnel features, we allow * userspace to create a special "tunnel" PPPoX socket which is used for * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow * the user application to issue L2TP setsockopt(), getsockopt() and ioctl() * calls. ****************************************************************************/ static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, struct l2tp_stats *stats) { dest->tx_packets = stats->tx_packets; dest->tx_bytes = stats->tx_bytes; dest->tx_errors = stats->tx_errors; dest->rx_packets = stats->rx_packets; dest->rx_bytes = stats->rx_bytes; dest->rx_seq_discards = stats->rx_seq_discards; dest->rx_oos_packets = stats->rx_oos_packets; dest->rx_errors = stats->rx_errors; } /* Session ioctl helper. */ static int pppol2tp_session_ioctl(struct l2tp_session *session, unsigned int cmd, unsigned long arg) { struct ifreq ifr; int err = 0; struct sock *sk; int val = (int) arg; struct pppol2tp_session *ps = l2tp_session_priv(session); struct l2tp_tunnel *tunnel = session->tunnel; struct pppol2tp_ioc_stats stats; PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n", session->name, cmd, arg); sk = ps->sock; sock_hold(sk); switch (cmd) { case SIOCGIFMTU: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; err = -EFAULT; if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq))) break; ifr.ifr_mtu = session->mtu; if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq))) break; PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: get mtu=%d\n", session->name, session->mtu); err = 0; break; case SIOCSIFMTU: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; err = -EFAULT; if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq))) break; session->mtu = ifr.ifr_mtu; PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: set mtu=%d\n", session->name, session->mtu); err = 0; break; case PPPIOCGMRU: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; err = -EFAULT; if (put_user(session->mru, (int __user *) arg)) break; PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: get mru=%d\n", session->name, session->mru); err = 0; break; case PPPIOCSMRU: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; err = -EFAULT; if (get_user(val, (int __user *) arg)) break; session->mru = val; PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: set mru=%d\n", session->name, session->mru); err = 0; break; case PPPIOCGFLAGS: err = -EFAULT; if (put_user(ps->flags, (int __user *) arg)) break; PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: get flags=%d\n", session->name, ps->flags); err = 0; break; case PPPIOCSFLAGS: err = -EFAULT; if (get_user(val, (int __user *) arg)) break; ps->flags = val; PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: set flags=%d\n", session->name, ps->flags); err = 0; break; case PPPIOCGL2TPSTATS: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; memset(&stats, 0, sizeof(stats)); stats.tunnel_id = tunnel->tunnel_id; stats.session_id = session->session_id; pppol2tp_copy_stats(&stats, &session->stats); if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) break; PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: get L2TP stats\n", session->name); err = 0; break; default: err = -ENOSYS; break; } sock_put(sk); return err; } /* Tunnel ioctl helper. * * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data * specifies a session_id, the session ioctl handler is called. This allows an * application to retrieve session stats via a tunnel socket. */ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, unsigned int cmd, unsigned long arg) { int err = 0; struct sock *sk; struct pppol2tp_ioc_stats stats; PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG, "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", tunnel->name, cmd, arg); sk = tunnel->sock; sock_hold(sk); switch (cmd) { case PPPIOCGL2TPSTATS: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; if (copy_from_user(&stats, (void __user *) arg, sizeof(stats))) { err = -EFAULT; break; } if (stats.session_id != 0) { /* resend to session ioctl handler */ struct l2tp_session *session = l2tp_session_find(sock_net(sk), tunnel, stats.session_id); if (session != NULL) err = pppol2tp_session_ioctl(session, cmd, arg); else err = -EBADR; break; } #ifdef CONFIG_XFRM stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0; #endif pppol2tp_copy_stats(&stats, &tunnel->stats); if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) { err = -EFAULT; break; } PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: get L2TP stats\n", tunnel->name); err = 0; break; default: err = -ENOSYS; break; } sock_put(sk); return err; } /* Main ioctl() handler. * Dispatch to tunnel or session helpers depending on the socket. */ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct l2tp_session *session; struct l2tp_tunnel *tunnel; struct pppol2tp_session *ps; int err; if (!sk) return 0; err = -EBADF; if (sock_flag(sk, SOCK_DEAD) != 0) goto end; err = -ENOTCONN; if ((sk->sk_user_data == NULL) || (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)))) goto end; /* Get session context from the socket */ err = -EBADF; session = pppol2tp_sock_to_session(sk); if (session == NULL) goto end; /* Special case: if session's session_id is zero, treat ioctl as a * tunnel ioctl */ ps = l2tp_session_priv(session); if ((session->session_id == 0) && (session->peer_session_id == 0)) { err = -EBADF; tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); if (tunnel == NULL) goto end_put_sess; err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg); sock_put(ps->tunnel_sock); goto end_put_sess; } err = pppol2tp_session_ioctl(session, cmd, arg); end_put_sess: sock_put(sk); end: return err; } /***************************************************************************** * setsockopt() / getsockopt() support. * * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP * sockets. In order to control kernel tunnel features, we allow userspace to * create a special "tunnel" PPPoX socket which is used for control only. * Tunnel PPPoX sockets have session_id == 0 and simply allow the user * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls. *****************************************************************************/ /* Tunnel setsockopt() helper. */ static int pppol2tp_tunnel_setsockopt(struct sock *sk, struct l2tp_tunnel *tunnel, int optname, int val) { int err = 0; switch (optname) { case PPPOL2TP_SO_DEBUG: tunnel->debug = val; PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: set debug=%x\n", tunnel->name, tunnel->debug); break; default: err = -ENOPROTOOPT; break; } return err; } /* Session setsockopt helper. */ static int pppol2tp_session_setsockopt(struct sock *sk, struct l2tp_session *session, int optname, int val) { int err = 0; struct pppol2tp_session *ps = l2tp_session_priv(session); switch (optname) { case PPPOL2TP_SO_RECVSEQ: if ((val != 0) && (val != 1)) { err = -EINVAL; break; } session->recv_seq = val ? -1 : 0; PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: set recv_seq=%d\n", session->name, session->recv_seq); break; case PPPOL2TP_SO_SENDSEQ: if ((val != 0) && (val != 1)) { err = -EINVAL; break; } session->send_seq = val ? -1 : 0; { struct sock *ssk = ps->sock; struct pppox_sock *po = pppox_sk(ssk); po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; } PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: set send_seq=%d\n", session->name, session->send_seq); break; case PPPOL2TP_SO_LNSMODE: if ((val != 0) && (val != 1)) { err = -EINVAL; break; } session->lns_mode = val ? -1 : 0; PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: set lns_mode=%d\n", session->name, session->lns_mode); break; case PPPOL2TP_SO_DEBUG: session->debug = val; PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: set debug=%x\n", session->name, session->debug); break; case PPPOL2TP_SO_REORDERTO: session->reorder_timeout = msecs_to_jiffies(val); PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: set reorder_timeout=%d\n", session->name, session->reorder_timeout); break; default: err = -ENOPROTOOPT; break; } return err; } /* Main setsockopt() entry point. * Does API checks, then calls either the tunnel or session setsockopt * handler, according to whether the PPPoL2TP socket is a for a regular * session or the special tunnel type. */ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct l2tp_session *session; struct l2tp_tunnel *tunnel; struct pppol2tp_session *ps; int val; int err; if (level != SOL_PPPOL2TP) return udp_prot.setsockopt(sk, level, optname, optval, optlen); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; err = -ENOTCONN; if (sk->sk_user_data == NULL) goto end; /* Get session context from the socket */ err = -EBADF; session = pppol2tp_sock_to_session(sk); if (session == NULL) goto end; /* Special case: if session_id == 0x0000, treat as operation on tunnel */ ps = l2tp_session_priv(session); if ((session->session_id == 0) && (session->peer_session_id == 0)) { err = -EBADF; tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); if (tunnel == NULL) goto end_put_sess; err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val); sock_put(ps->tunnel_sock); } else err = pppol2tp_session_setsockopt(sk, session, optname, val); err = 0; end_put_sess: sock_put(sk); end: return err; } /* Tunnel getsockopt helper. Called with sock locked. */ static int pppol2tp_tunnel_getsockopt(struct sock *sk, struct l2tp_tunnel *tunnel, int optname, int *val) { int err = 0; switch (optname) { case PPPOL2TP_SO_DEBUG: *val = tunnel->debug; PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: get debug=%x\n", tunnel->name, tunnel->debug); break; default: err = -ENOPROTOOPT; break; } return err; } /* Session getsockopt helper. Called with sock locked. */ static int pppol2tp_session_getsockopt(struct sock *sk, struct l2tp_session *session, int optname, int *val) { int err = 0; switch (optname) { case PPPOL2TP_SO_RECVSEQ: *val = session->recv_seq; PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: get recv_seq=%d\n", session->name, *val); break; case PPPOL2TP_SO_SENDSEQ: *val = session->send_seq; PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: get send_seq=%d\n", session->name, *val); break; case PPPOL2TP_SO_LNSMODE: *val = session->lns_mode; PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: get lns_mode=%d\n", session->name, *val); break; case PPPOL2TP_SO_DEBUG: *val = session->debug; PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: get debug=%d\n", session->name, *val); break; case PPPOL2TP_SO_REORDERTO: *val = (int) jiffies_to_msecs(session->reorder_timeout); PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, "%s: get reorder_timeout=%d\n", session->name, *val); break; default: err = -ENOPROTOOPT; } return err; } /* Main getsockopt() entry point. * Does API checks, then calls either the tunnel or session getsockopt * handler, according to whether the PPPoX socket is a for a regular session * or the special tunnel type. */ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct l2tp_session *session; struct l2tp_tunnel *tunnel; int val, len; int err; struct pppol2tp_session *ps; if (level != SOL_PPPOL2TP) return udp_prot.getsockopt(sk, level, optname, optval, optlen); if (get_user(len, (int __user *) optlen)) return -EFAULT; len = min_t(unsigned int, len, sizeof(int)); if (len < 0) return -EINVAL; err = -ENOTCONN; if (sk->sk_user_data == NULL) goto end; /* Get the session context */ err = -EBADF; session = pppol2tp_sock_to_session(sk); if (session == NULL) goto end; /* Special case: if session_id == 0x0000, treat as operation on tunnel */ ps = l2tp_session_priv(session); if ((session->session_id == 0) && (session->peer_session_id == 0)) { err = -EBADF; tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock); if (tunnel == NULL) goto end_put_sess; err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); sock_put(ps->tunnel_sock); } else err = pppol2tp_session_getsockopt(sk, session, optname, &val); err = -EFAULT; if (put_user(len, (int __user *) optlen)) goto end_put_sess; if (copy_to_user((void __user *) optval, &val, len)) goto end_put_sess; err = 0; end_put_sess: sock_put(sk); end: return err; } /***************************************************************************** * /proc filesystem for debug * Since the original pppol2tp driver provided /proc/net/pppol2tp for * L2TPv2, we dump only L2TPv2 tunnels and sessions here. *****************************************************************************/ static unsigned int pppol2tp_net_id; #ifdef CONFIG_PROC_FS struct pppol2tp_seq_data { struct seq_net_private p; int tunnel_idx; /* current tunnel */ int session_idx; /* index of session within current tunnel */ struct l2tp_tunnel *tunnel; struct l2tp_session *session; /* NULL means get next tunnel */ }; static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) { for (;;) { pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx); pd->tunnel_idx++; if (pd->tunnel == NULL) break; /* Ignore L2TPv3 tunnels */ if (pd->tunnel->version < 3) break; } } static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) { pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); pd->session_idx++; if (pd->session == NULL) { pd->session_idx = 0; pppol2tp_next_tunnel(net, pd); } } static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs) { struct pppol2tp_seq_data *pd = SEQ_START_TOKEN; loff_t pos = *offs; struct net *net; if (!pos) goto out; BUG_ON(m->private == NULL); pd = m->private; net = seq_file_net(m); if (pd->tunnel == NULL) pppol2tp_next_tunnel(net, pd); else pppol2tp_next_session(net, pd); /* NULL tunnel and session indicates end of list */ if ((pd->tunnel == NULL) && (pd->session == NULL)) pd = NULL; out: return pd; } static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos) { (*pos)++; return NULL; } static void pppol2tp_seq_stop(struct seq_file *p, void *v) { /* nothing to do */ } static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) { struct l2tp_tunnel *tunnel = v; seq_printf(m, "\nTUNNEL '%s', %c %d\n", tunnel->name, (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N', atomic_read(&tunnel->ref_count) - 1); seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n", tunnel->debug, (unsigned long long)tunnel->stats.tx_packets, (unsigned long long)tunnel->stats.tx_bytes, (unsigned long long)tunnel->stats.tx_errors, (unsigned long long)tunnel->stats.rx_packets, (unsigned long long)tunnel->stats.rx_bytes, (unsigned long long)tunnel->stats.rx_errors); } static void pppol2tp_seq_session_show(struct seq_file *m, void *v) { struct l2tp_session *session = v; struct l2tp_tunnel *tunnel = session->tunnel; struct pppol2tp_session *ps = l2tp_session_priv(session); struct pppox_sock *po = pppox_sk(ps->sock); u32 ip = 0; u16 port = 0; if (tunnel->sock) { struct inet_sock *inet = inet_sk(tunnel->sock); ip = ntohl(inet->inet_saddr); port = ntohs(inet->inet_sport); } seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> " "%04X/%04X %d %c\n", session->name, ip, port, tunnel->tunnel_id, session->session_id, tunnel->peer_tunnel_id, session->peer_session_id, ps->sock->sk_state, (session == ps->sock->sk_user_data) ? 'Y' : 'N'); seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n", session->mtu, session->mru, session->recv_seq ? 'R' : '-', session->send_seq ? 'S' : '-', session->lns_mode ? "LNS" : "LAC", session->debug, jiffies_to_msecs(session->reorder_timeout)); seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n", session->nr, session->ns, (unsigned long long)session->stats.tx_packets, (unsigned long long)session->stats.tx_bytes, (unsigned long long)session->stats.tx_errors, (unsigned long long)session->stats.rx_packets, (unsigned long long)session->stats.rx_bytes, (unsigned long long)session->stats.rx_errors); if (po) seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); } static int pppol2tp_seq_show(struct seq_file *m, void *v) { struct pppol2tp_seq_data *pd = v; /* display header on line 1 */ if (v == SEQ_START_TOKEN) { seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n"); seq_puts(m, "TUNNEL name, user-data-ok session-count\n"); seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); seq_puts(m, " SESSION name, addr/port src-tid/sid " "dest-tid/sid state user-data-ok\n"); seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n"); seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); goto out; } /* Show the tunnel or session context. */ if (pd->session == NULL) pppol2tp_seq_tunnel_show(m, pd->tunnel); else pppol2tp_seq_session_show(m, pd->session); out: return 0; } static const struct seq_operations pppol2tp_seq_ops = { .start = pppol2tp_seq_start, .next = pppol2tp_seq_next, .stop = pppol2tp_seq_stop, .show = pppol2tp_seq_show, }; /* Called when our /proc file is opened. We allocate data for use when * iterating our tunnel / session contexts and store it in the private * data of the seq_file. */ static int pppol2tp_proc_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &pppol2tp_seq_ops, sizeof(struct pppol2tp_seq_data)); } static const struct file_operations pppol2tp_proc_fops = { .owner = THIS_MODULE, .open = pppol2tp_proc_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif /* CONFIG_PROC_FS */ /***************************************************************************** * Network namespace *****************************************************************************/ static __net_init int pppol2tp_init_net(struct net *net) { struct proc_dir_entry *pde; int err = 0; pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops); if (!pde) { err = -ENOMEM; goto out; } out: return err; } static __net_exit void pppol2tp_exit_net(struct net *net) { proc_net_remove(net, "pppol2tp"); } static struct pernet_operations pppol2tp_net_ops = { .init = pppol2tp_init_net, .exit = pppol2tp_exit_net, .id = &pppol2tp_net_id, }; /***************************************************************************** * Init and cleanup *****************************************************************************/ static const struct proto_ops pppol2tp_ops = { .family = AF_PPPOX, .owner = THIS_MODULE, .release = pppol2tp_release, .bind = sock_no_bind, .connect = pppol2tp_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = pppol2tp_getname, .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = pppol2tp_setsockopt, .getsockopt = pppol2tp_getsockopt, .sendmsg = pppol2tp_sendmsg, .recvmsg = pppol2tp_recvmsg, .mmap = sock_no_mmap, .ioctl = pppox_ioctl, }; static const struct pppox_proto pppol2tp_proto = { .create = pppol2tp_create, .ioctl = pppol2tp_ioctl }; #ifdef CONFIG_L2TP_V3 static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = { .session_create = pppol2tp_session_create, .session_delete = pppol2tp_session_delete, }; #endif /* CONFIG_L2TP_V3 */ static int __init pppol2tp_init(void) { int err; err = register_pernet_device(&pppol2tp_net_ops); if (err) goto out; err = proto_register(&pppol2tp_sk_proto, 0); if (err) goto out_unregister_pppol2tp_pernet; err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto); if (err) goto out_unregister_pppol2tp_proto; #ifdef CONFIG_L2TP_V3 err = l2tp_nl_register_ops(L2TP_PWTYPE_PPP, &pppol2tp_nl_cmd_ops); if (err) goto out_unregister_pppox; #endif printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", PPPOL2TP_DRV_VERSION); out: return err; #ifdef CONFIG_L2TP_V3 out_unregister_pppox: unregister_pppox_proto(PX_PROTO_OL2TP); #endif out_unregister_pppol2tp_proto: proto_unregister(&pppol2tp_sk_proto); out_unregister_pppol2tp_pernet: unregister_pernet_device(&pppol2tp_net_ops); goto out; } static void __exit pppol2tp_exit(void) { #ifdef CONFIG_L2TP_V3 l2tp_nl_unregister_ops(L2TP_PWTYPE_PPP); #endif unregister_pppox_proto(PX_PROTO_OL2TP); proto_unregister(&pppol2tp_sk_proto); unregister_pernet_device(&pppol2tp_net_ops); } module_init(pppol2tp_init); module_exit(pppol2tp_exit); MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); MODULE_DESCRIPTION("PPP over L2TP over UDP"); MODULE_LICENSE("GPL"); MODULE_VERSION(PPPOL2TP_DRV_VERSION);
gpl-2.0
xsilon/linux-xsilon
drivers/power/avs/smartreflex.c
2143
29092
/* * OMAP SmartReflex Voltage Control * * Author: Thara Gopinath <thara@ti.com> * * Copyright (C) 2012 Texas Instruments, Inc. * Thara Gopinath <thara@ti.com> * * Copyright (C) 2008 Nokia Corporation * Kalle Jokiniemi * * Copyright (C) 2007 Texas Instruments, Inc. * Lesly A M <x0080970@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/power/smartreflex.h> #define DRIVER_NAME "smartreflex" #define SMARTREFLEX_NAME_LEN 32 #define NVALUE_NAME_LEN 40 #define SR_DISABLE_TIMEOUT 200 /* sr_list contains all the instances of smartreflex module */ static LIST_HEAD(sr_list); static struct omap_sr_class_data *sr_class; static struct omap_sr_pmic_data *sr_pmic_data; static struct dentry *sr_dbg_dir; static inline void sr_write_reg(struct omap_sr *sr, unsigned offset, u32 value) { __raw_writel(value, (sr->base + offset)); } static inline void sr_modify_reg(struct omap_sr *sr, unsigned offset, u32 mask, u32 value) { u32 reg_val; /* * Smartreflex error config register is special as it contains * certain status bits which if written a 1 into means a clear * of those bits. So in order to make sure no accidental write of * 1 happens to those status bits, do a clear of them in the read * value. This mean this API doesn't rewrite values in these bits * if they are currently set, but does allow the caller to write * those bits. */ if (sr->ip_type == SR_TYPE_V1 && offset == ERRCONFIG_V1) mask |= ERRCONFIG_STATUS_V1_MASK; else if (sr->ip_type == SR_TYPE_V2 && offset == ERRCONFIG_V2) mask |= ERRCONFIG_VPBOUNDINTST_V2; reg_val = __raw_readl(sr->base + offset); reg_val &= ~mask; value &= mask; reg_val |= value; __raw_writel(reg_val, (sr->base + offset)); } static inline u32 sr_read_reg(struct omap_sr *sr, unsigned offset) { return __raw_readl(sr->base + offset); } static struct omap_sr *_sr_lookup(struct voltagedomain *voltdm) { struct omap_sr *sr_info; if (!voltdm) { pr_err("%s: Null voltage domain passed!\n", __func__); return ERR_PTR(-EINVAL); } list_for_each_entry(sr_info, &sr_list, node) { if (voltdm == sr_info->voltdm) return sr_info; } return ERR_PTR(-ENODATA); } static irqreturn_t sr_interrupt(int irq, void *data) { struct omap_sr *sr_info = data; u32 status = 0; switch (sr_info->ip_type) { case SR_TYPE_V1: /* Read the status bits */ status = sr_read_reg(sr_info, ERRCONFIG_V1); /* Clear them by writing back */ sr_write_reg(sr_info, ERRCONFIG_V1, status); break; case SR_TYPE_V2: /* Read the status bits */ status = sr_read_reg(sr_info, IRQSTATUS); /* Clear them by writing back */ sr_write_reg(sr_info, IRQSTATUS, status); break; default: dev_err(&sr_info->pdev->dev, "UNKNOWN IP type %d\n", sr_info->ip_type); return IRQ_NONE; } if (sr_class->notify) sr_class->notify(sr_info, status); return IRQ_HANDLED; } static void sr_set_clk_length(struct omap_sr *sr) { struct clk *fck; u32 fclk_speed; fck = clk_get(&sr->pdev->dev, "fck"); if (IS_ERR(fck)) { dev_err(&sr->pdev->dev, "%s: unable to get fck for device %s\n", __func__, dev_name(&sr->pdev->dev)); return; } fclk_speed = clk_get_rate(fck); clk_put(fck); switch (fclk_speed) { case 12000000: sr->clk_length = SRCLKLENGTH_12MHZ_SYSCLK; break; case 13000000: sr->clk_length = SRCLKLENGTH_13MHZ_SYSCLK; break; case 19200000: sr->clk_length = SRCLKLENGTH_19MHZ_SYSCLK; break; case 26000000: sr->clk_length = SRCLKLENGTH_26MHZ_SYSCLK; break; case 38400000: sr->clk_length = SRCLKLENGTH_38MHZ_SYSCLK; break; default: dev_err(&sr->pdev->dev, "%s: Invalid fclk rate: %d\n", __func__, fclk_speed); break; } } static void sr_start_vddautocomp(struct omap_sr *sr) { if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) { dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n", __func__); return; } if (!sr_class->enable(sr)) sr->autocomp_active = true; } static void sr_stop_vddautocomp(struct omap_sr *sr) { if (!sr_class || !(sr_class->disable)) { dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n", __func__); return; } if (sr->autocomp_active) { sr_class->disable(sr, 1); sr->autocomp_active = false; } } /* * This function handles the intializations which have to be done * only when both sr device and class driver regiter has * completed. This will be attempted to be called from both sr class * driver register and sr device intializtion API's. Only one call * will ultimately succeed. * * Currently this function registers interrupt handler for a particular SR * if smartreflex class driver is already registered and has * requested for interrupts and the SR interrupt line in present. */ static int sr_late_init(struct omap_sr *sr_info) { struct omap_sr_data *pdata = sr_info->pdev->dev.platform_data; int ret = 0; if (sr_class->notify && sr_class->notify_flags && sr_info->irq) { ret = devm_request_irq(&sr_info->pdev->dev, sr_info->irq, sr_interrupt, 0, sr_info->name, sr_info); if (ret) goto error; disable_irq(sr_info->irq); } if (pdata && pdata->enable_on_init) sr_start_vddautocomp(sr_info); return ret; error: list_del(&sr_info->node); dev_err(&sr_info->pdev->dev, "%s: ERROR in registering" "interrupt handler. Smartreflex will" "not function as desired\n", __func__); return ret; } static void sr_v1_disable(struct omap_sr *sr) { int timeout = 0; int errconf_val = ERRCONFIG_MCUACCUMINTST | ERRCONFIG_MCUVALIDINTST | ERRCONFIG_MCUBOUNDINTST; /* Enable MCUDisableAcknowledge interrupt */ sr_modify_reg(sr, ERRCONFIG_V1, ERRCONFIG_MCUDISACKINTEN, ERRCONFIG_MCUDISACKINTEN); /* SRCONFIG - disable SR */ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0); /* Disable all other SR interrupts and clear the status as needed */ if (sr_read_reg(sr, ERRCONFIG_V1) & ERRCONFIG_VPBOUNDINTST_V1) errconf_val |= ERRCONFIG_VPBOUNDINTST_V1; sr_modify_reg(sr, ERRCONFIG_V1, (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN | ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_VPBOUNDINTEN_V1), errconf_val); /* * Wait for SR to be disabled. * wait until ERRCONFIG.MCUDISACKINTST = 1. Typical latency is 1us. */ sr_test_cond_timeout((sr_read_reg(sr, ERRCONFIG_V1) & ERRCONFIG_MCUDISACKINTST), SR_DISABLE_TIMEOUT, timeout); if (timeout >= SR_DISABLE_TIMEOUT) dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n", __func__); /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */ sr_modify_reg(sr, ERRCONFIG_V1, ERRCONFIG_MCUDISACKINTEN, ERRCONFIG_MCUDISACKINTST); } static void sr_v2_disable(struct omap_sr *sr) { int timeout = 0; /* Enable MCUDisableAcknowledge interrupt */ sr_write_reg(sr, IRQENABLE_SET, IRQENABLE_MCUDISABLEACKINT); /* SRCONFIG - disable SR */ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0); /* * Disable all other SR interrupts and clear the status * write to status register ONLY on need basis - only if status * is set. */ if (sr_read_reg(sr, ERRCONFIG_V2) & ERRCONFIG_VPBOUNDINTST_V2) sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2, ERRCONFIG_VPBOUNDINTST_V2); else sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2, 0x0); sr_write_reg(sr, IRQENABLE_CLR, (IRQENABLE_MCUACCUMINT | IRQENABLE_MCUVALIDINT | IRQENABLE_MCUBOUNDSINT)); sr_write_reg(sr, IRQSTATUS, (IRQSTATUS_MCUACCUMINT | IRQSTATUS_MCVALIDINT | IRQSTATUS_MCBOUNDSINT)); /* * Wait for SR to be disabled. * wait until IRQSTATUS.MCUDISACKINTST = 1. Typical latency is 1us. */ sr_test_cond_timeout((sr_read_reg(sr, IRQSTATUS) & IRQSTATUS_MCUDISABLEACKINT), SR_DISABLE_TIMEOUT, timeout); if (timeout >= SR_DISABLE_TIMEOUT) dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n", __func__); /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */ sr_write_reg(sr, IRQENABLE_CLR, IRQENABLE_MCUDISABLEACKINT); sr_write_reg(sr, IRQSTATUS, IRQSTATUS_MCUDISABLEACKINT); } static struct omap_sr_nvalue_table *sr_retrieve_nvalue_row( struct omap_sr *sr, u32 efuse_offs) { int i; if (!sr->nvalue_table) { dev_warn(&sr->pdev->dev, "%s: Missing ntarget value table\n", __func__); return NULL; } for (i = 0; i < sr->nvalue_count; i++) { if (sr->nvalue_table[i].efuse_offs == efuse_offs) return &sr->nvalue_table[i]; } return NULL; } /* Public Functions */ /** * sr_configure_errgen() - Configures the SmartReflex to perform AVS using the * error generator module. * @sr: SR module to be configured. * * This API is to be called from the smartreflex class driver to * configure the error generator module inside the smartreflex module. * SR settings if using the ERROR module inside Smartreflex. * SR CLASS 3 by default uses only the ERROR module where as * SR CLASS 2 can choose between ERROR module and MINMAXAVG * module. Returns 0 on success and error value in case of failure. */ int sr_configure_errgen(struct omap_sr *sr) { u32 sr_config, sr_errconfig, errconfig_offs; u32 vpboundint_en, vpboundint_st; u32 senp_en = 0, senn_en = 0; u8 senp_shift, senn_shift; if (!sr) { pr_warn("%s: NULL omap_sr from %pF\n", __func__, (void *)_RET_IP_); return -EINVAL; } if (!sr->clk_length) sr_set_clk_length(sr); senp_en = sr->senp_mod; senn_en = sr->senn_mod; sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) | SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN; switch (sr->ip_type) { case SR_TYPE_V1: sr_config |= SRCONFIG_DELAYCTRL; senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT; senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT; errconfig_offs = ERRCONFIG_V1; vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1; vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1; break; case SR_TYPE_V2: senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT; senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT; errconfig_offs = ERRCONFIG_V2; vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2; vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2; break; default: dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex" "module without specifying the ip\n", __func__); return -EINVAL; } sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift)); sr_write_reg(sr, SRCONFIG, sr_config); sr_errconfig = (sr->err_weight << ERRCONFIG_ERRWEIGHT_SHIFT) | (sr->err_maxlimit << ERRCONFIG_ERRMAXLIMIT_SHIFT) | (sr->err_minlimit << ERRCONFIG_ERRMINLIMIT_SHIFT); sr_modify_reg(sr, errconfig_offs, (SR_ERRWEIGHT_MASK | SR_ERRMAXLIMIT_MASK | SR_ERRMINLIMIT_MASK), sr_errconfig); /* Enabling the interrupts if the ERROR module is used */ sr_modify_reg(sr, errconfig_offs, (vpboundint_en | vpboundint_st), vpboundint_en); return 0; } /** * sr_disable_errgen() - Disables SmartReflex AVS module's errgen component * @sr: SR module to be configured. * * This API is to be called from the smartreflex class driver to * disable the error generator module inside the smartreflex module. * * Returns 0 on success and error value in case of failure. */ int sr_disable_errgen(struct omap_sr *sr) { u32 errconfig_offs; u32 vpboundint_en, vpboundint_st; if (!sr) { pr_warn("%s: NULL omap_sr from %pF\n", __func__, (void *)_RET_IP_); return -EINVAL; } switch (sr->ip_type) { case SR_TYPE_V1: errconfig_offs = ERRCONFIG_V1; vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1; vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1; break; case SR_TYPE_V2: errconfig_offs = ERRCONFIG_V2; vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2; vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2; break; default: dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex" "module without specifying the ip\n", __func__); return -EINVAL; } /* Disable the Sensor and errorgen */ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN, 0); /* * Disable the interrupts of ERROR module * NOTE: modify is a read, modify,write - an implicit OCP barrier * which is required is present here - sequencing is critical * at this point (after errgen is disabled, vpboundint disable) */ sr_modify_reg(sr, errconfig_offs, vpboundint_en | vpboundint_st, 0); return 0; } /** * sr_configure_minmax() - Configures the SmartReflex to perform AVS using the * minmaxavg module. * @sr: SR module to be configured. * * This API is to be called from the smartreflex class driver to * configure the minmaxavg module inside the smartreflex module. * SR settings if using the ERROR module inside Smartreflex. * SR CLASS 3 by default uses only the ERROR module where as * SR CLASS 2 can choose between ERROR module and MINMAXAVG * module. Returns 0 on success and error value in case of failure. */ int sr_configure_minmax(struct omap_sr *sr) { u32 sr_config, sr_avgwt; u32 senp_en = 0, senn_en = 0; u8 senp_shift, senn_shift; if (!sr) { pr_warn("%s: NULL omap_sr from %pF\n", __func__, (void *)_RET_IP_); return -EINVAL; } if (!sr->clk_length) sr_set_clk_length(sr); senp_en = sr->senp_mod; senn_en = sr->senn_mod; sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) | SRCONFIG_SENENABLE | (sr->accum_data << SRCONFIG_ACCUMDATA_SHIFT); switch (sr->ip_type) { case SR_TYPE_V1: sr_config |= SRCONFIG_DELAYCTRL; senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT; senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT; break; case SR_TYPE_V2: senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT; senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT; break; default: dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex" "module without specifying the ip\n", __func__); return -EINVAL; } sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift)); sr_write_reg(sr, SRCONFIG, sr_config); sr_avgwt = (sr->senp_avgweight << AVGWEIGHT_SENPAVGWEIGHT_SHIFT) | (sr->senn_avgweight << AVGWEIGHT_SENNAVGWEIGHT_SHIFT); sr_write_reg(sr, AVGWEIGHT, sr_avgwt); /* * Enabling the interrupts if MINMAXAVG module is used. * TODO: check if all the interrupts are mandatory */ switch (sr->ip_type) { case SR_TYPE_V1: sr_modify_reg(sr, ERRCONFIG_V1, (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN | ERRCONFIG_MCUBOUNDINTEN), (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUACCUMINTST | ERRCONFIG_MCUVALIDINTEN | ERRCONFIG_MCUVALIDINTST | ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_MCUBOUNDINTST)); break; case SR_TYPE_V2: sr_write_reg(sr, IRQSTATUS, IRQSTATUS_MCUACCUMINT | IRQSTATUS_MCVALIDINT | IRQSTATUS_MCBOUNDSINT | IRQSTATUS_MCUDISABLEACKINT); sr_write_reg(sr, IRQENABLE_SET, IRQENABLE_MCUACCUMINT | IRQENABLE_MCUVALIDINT | IRQENABLE_MCUBOUNDSINT | IRQENABLE_MCUDISABLEACKINT); break; default: dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex" "module without specifying the ip\n", __func__); return -EINVAL; } return 0; } /** * sr_enable() - Enables the smartreflex module. * @sr: pointer to which the SR module to be configured belongs to. * @volt: The voltage at which the Voltage domain associated with * the smartreflex module is operating at. * This is required only to program the correct Ntarget value. * * This API is to be called from the smartreflex class driver to * enable a smartreflex module. Returns 0 on success. Returns error * value if the voltage passed is wrong or if ntarget value is wrong. */ int sr_enable(struct omap_sr *sr, unsigned long volt) { struct omap_volt_data *volt_data; struct omap_sr_nvalue_table *nvalue_row; int ret; if (!sr) { pr_warn("%s: NULL omap_sr from %pF\n", __func__, (void *)_RET_IP_); return -EINVAL; } volt_data = omap_voltage_get_voltdata(sr->voltdm, volt); if (IS_ERR(volt_data)) { dev_warn(&sr->pdev->dev, "%s: Unable to get voltage table" "for nominal voltage %ld\n", __func__, volt); return PTR_ERR(volt_data); } nvalue_row = sr_retrieve_nvalue_row(sr, volt_data->sr_efuse_offs); if (!nvalue_row) { dev_warn(&sr->pdev->dev, "%s: failure getting SR data for this voltage %ld\n", __func__, volt); return -ENODATA; } /* errminlimit is opp dependent and hence linked to voltage */ sr->err_minlimit = nvalue_row->errminlimit; pm_runtime_get_sync(&sr->pdev->dev); /* Check if SR is already enabled. If yes do nothing */ if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE) return 0; /* Configure SR */ ret = sr_class->configure(sr); if (ret) return ret; sr_write_reg(sr, NVALUERECIPROCAL, nvalue_row->nvalue); /* SRCONFIG - enable SR */ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, SRCONFIG_SRENABLE); return 0; } /** * sr_disable() - Disables the smartreflex module. * @sr: pointer to which the SR module to be configured belongs to. * * This API is to be called from the smartreflex class driver to * disable a smartreflex module. */ void sr_disable(struct omap_sr *sr) { if (!sr) { pr_warn("%s: NULL omap_sr from %pF\n", __func__, (void *)_RET_IP_); return; } /* Check if SR clocks are already disabled. If yes do nothing */ if (pm_runtime_suspended(&sr->pdev->dev)) return; /* * Disable SR if only it is indeed enabled. Else just * disable the clocks. */ if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE) { switch (sr->ip_type) { case SR_TYPE_V1: sr_v1_disable(sr); break; case SR_TYPE_V2: sr_v2_disable(sr); break; default: dev_err(&sr->pdev->dev, "UNKNOWN IP type %d\n", sr->ip_type); } } pm_runtime_put_sync_suspend(&sr->pdev->dev); } /** * sr_register_class() - API to register a smartreflex class parameters. * @class_data: The structure containing various sr class specific data. * * This API is to be called by the smartreflex class driver to register itself * with the smartreflex driver during init. Returns 0 on success else the * error value. */ int sr_register_class(struct omap_sr_class_data *class_data) { struct omap_sr *sr_info; if (!class_data) { pr_warning("%s:, Smartreflex class data passed is NULL\n", __func__); return -EINVAL; } if (sr_class) { pr_warning("%s: Smartreflex class driver already registered\n", __func__); return -EBUSY; } sr_class = class_data; /* * Call into late init to do intializations that require * both sr driver and sr class driver to be initiallized. */ list_for_each_entry(sr_info, &sr_list, node) sr_late_init(sr_info); return 0; } /** * omap_sr_enable() - API to enable SR clocks and to call into the * registered smartreflex class enable API. * @voltdm: VDD pointer to which the SR module to be configured belongs to. * * This API is to be called from the kernel in order to enable * a particular smartreflex module. This API will do the initial * configurations to turn on the smartreflex module and in turn call * into the registered smartreflex class enable API. */ void omap_sr_enable(struct voltagedomain *voltdm) { struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return; } if (!sr->autocomp_active) return; if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) { dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not" "registered\n", __func__); return; } sr_class->enable(sr); } /** * omap_sr_disable() - API to disable SR without resetting the voltage * processor voltage * @voltdm: VDD pointer to which the SR module to be configured belongs to. * * This API is to be called from the kernel in order to disable * a particular smartreflex module. This API will in turn call * into the registered smartreflex class disable API. This API will tell * the smartreflex class disable not to reset the VP voltage after * disabling smartreflex. */ void omap_sr_disable(struct voltagedomain *voltdm) { struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return; } if (!sr->autocomp_active) return; if (!sr_class || !(sr_class->disable)) { dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not" "registered\n", __func__); return; } sr_class->disable(sr, 0); } /** * omap_sr_disable_reset_volt() - API to disable SR and reset the * voltage processor voltage * @voltdm: VDD pointer to which the SR module to be configured belongs to. * * This API is to be called from the kernel in order to disable * a particular smartreflex module. This API will in turn call * into the registered smartreflex class disable API. This API will tell * the smartreflex class disable to reset the VP voltage after * disabling smartreflex. */ void omap_sr_disable_reset_volt(struct voltagedomain *voltdm) { struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return; } if (!sr->autocomp_active) return; if (!sr_class || !(sr_class->disable)) { dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not" "registered\n", __func__); return; } sr_class->disable(sr, 1); } /** * omap_sr_register_pmic() - API to register pmic specific info. * @pmic_data: The structure containing pmic specific data. * * This API is to be called from the PMIC specific code to register with * smartreflex driver pmic specific info. Currently the only info required * is the smartreflex init on the PMIC side. */ void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data) { if (!pmic_data) { pr_warning("%s: Trying to register NULL PMIC data structure" "with smartreflex\n", __func__); return; } sr_pmic_data = pmic_data; } /* PM Debug FS entries to enable and disable smartreflex. */ static int omap_sr_autocomp_show(void *data, u64 *val) { struct omap_sr *sr_info = data; if (!sr_info) { pr_warning("%s: omap_sr struct not found\n", __func__); return -EINVAL; } *val = sr_info->autocomp_active; return 0; } static int omap_sr_autocomp_store(void *data, u64 val) { struct omap_sr *sr_info = data; if (!sr_info) { pr_warning("%s: omap_sr struct not found\n", __func__); return -EINVAL; } /* Sanity check */ if (val > 1) { pr_warning("%s: Invalid argument %lld\n", __func__, val); return -EINVAL; } /* control enable/disable only if there is a delta in value */ if (sr_info->autocomp_active != val) { if (!val) sr_stop_vddautocomp(sr_info); else sr_start_vddautocomp(sr_info); } return 0; } DEFINE_SIMPLE_ATTRIBUTE(pm_sr_fops, omap_sr_autocomp_show, omap_sr_autocomp_store, "%llu\n"); static int __init omap_sr_probe(struct platform_device *pdev) { struct omap_sr *sr_info; struct omap_sr_data *pdata = pdev->dev.platform_data; struct resource *mem, *irq; struct dentry *nvalue_dir; int i, ret = 0; sr_info = devm_kzalloc(&pdev->dev, sizeof(struct omap_sr), GFP_KERNEL); if (!sr_info) { dev_err(&pdev->dev, "%s: unable to allocate sr_info\n", __func__); return -ENOMEM; } sr_info->name = devm_kzalloc(&pdev->dev, SMARTREFLEX_NAME_LEN, GFP_KERNEL); if (!sr_info->name) { dev_err(&pdev->dev, "%s: unable to allocate SR instance name\n", __func__); return -ENOMEM; } platform_set_drvdata(pdev, sr_info); if (!pdata) { dev_err(&pdev->dev, "%s: platform data missing\n", __func__); return -EINVAL; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); sr_info->base = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(sr_info->base)) { dev_err(&pdev->dev, "%s: ioremap fail\n", __func__); return PTR_ERR(sr_info->base); } irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); pm_runtime_enable(&pdev->dev); pm_runtime_irq_safe(&pdev->dev); snprintf(sr_info->name, SMARTREFLEX_NAME_LEN, "%s", pdata->name); sr_info->pdev = pdev; sr_info->srid = pdev->id; sr_info->voltdm = pdata->voltdm; sr_info->nvalue_table = pdata->nvalue_table; sr_info->nvalue_count = pdata->nvalue_count; sr_info->senn_mod = pdata->senn_mod; sr_info->senp_mod = pdata->senp_mod; sr_info->err_weight = pdata->err_weight; sr_info->err_maxlimit = pdata->err_maxlimit; sr_info->accum_data = pdata->accum_data; sr_info->senn_avgweight = pdata->senn_avgweight; sr_info->senp_avgweight = pdata->senp_avgweight; sr_info->autocomp_active = false; sr_info->ip_type = pdata->ip_type; if (irq) sr_info->irq = irq->start; sr_set_clk_length(sr_info); list_add(&sr_info->node, &sr_list); /* * Call into late init to do intializations that require * both sr driver and sr class driver to be initiallized. */ if (sr_class) { ret = sr_late_init(sr_info); if (ret) { pr_warning("%s: Error in SR late init\n", __func__); goto err_list_del; } } dev_info(&pdev->dev, "%s: SmartReflex driver initialized\n", __func__); if (!sr_dbg_dir) { sr_dbg_dir = debugfs_create_dir("smartreflex", NULL); if (IS_ERR_OR_NULL(sr_dbg_dir)) { ret = PTR_ERR(sr_dbg_dir); pr_err("%s:sr debugfs dir creation failed(%d)\n", __func__, ret); goto err_list_del; } } sr_info->dbg_dir = debugfs_create_dir(sr_info->name, sr_dbg_dir); if (IS_ERR_OR_NULL(sr_info->dbg_dir)) { dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n", __func__); ret = PTR_ERR(sr_info->dbg_dir); goto err_debugfs; } (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, sr_info->dbg_dir, (void *)sr_info, &pm_sr_fops); (void) debugfs_create_x32("errweight", S_IRUGO, sr_info->dbg_dir, &sr_info->err_weight); (void) debugfs_create_x32("errmaxlimit", S_IRUGO, sr_info->dbg_dir, &sr_info->err_maxlimit); nvalue_dir = debugfs_create_dir("nvalue", sr_info->dbg_dir); if (IS_ERR_OR_NULL(nvalue_dir)) { dev_err(&pdev->dev, "%s: Unable to create debugfs directory" "for n-values\n", __func__); ret = PTR_ERR(nvalue_dir); goto err_debugfs; } if (sr_info->nvalue_count == 0 || !sr_info->nvalue_table) { dev_warn(&pdev->dev, "%s: %s: No Voltage table for the corresponding vdd. Cannot create debugfs entries for n-values\n", __func__, sr_info->name); ret = -ENODATA; goto err_debugfs; } for (i = 0; i < sr_info->nvalue_count; i++) { char name[NVALUE_NAME_LEN + 1]; snprintf(name, sizeof(name), "volt_%lu", sr_info->nvalue_table[i].volt_nominal); (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir, &(sr_info->nvalue_table[i].nvalue)); snprintf(name, sizeof(name), "errminlimit_%lu", sr_info->nvalue_table[i].volt_nominal); (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir, &(sr_info->nvalue_table[i].errminlimit)); } return ret; err_debugfs: debugfs_remove_recursive(sr_info->dbg_dir); err_list_del: list_del(&sr_info->node); return ret; } static int omap_sr_remove(struct platform_device *pdev) { struct omap_sr_data *pdata = pdev->dev.platform_data; struct omap_sr *sr_info; if (!pdata) { dev_err(&pdev->dev, "%s: platform data missing\n", __func__); return -EINVAL; } sr_info = _sr_lookup(pdata->voltdm); if (IS_ERR(sr_info)) { dev_warn(&pdev->dev, "%s: omap_sr struct not found\n", __func__); return PTR_ERR(sr_info); } if (sr_info->autocomp_active) sr_stop_vddautocomp(sr_info); if (sr_info->dbg_dir) debugfs_remove_recursive(sr_info->dbg_dir); pm_runtime_disable(&pdev->dev); list_del(&sr_info->node); return 0; } static void omap_sr_shutdown(struct platform_device *pdev) { struct omap_sr_data *pdata = pdev->dev.platform_data; struct omap_sr *sr_info; if (!pdata) { dev_err(&pdev->dev, "%s: platform data missing\n", __func__); return; } sr_info = _sr_lookup(pdata->voltdm); if (IS_ERR(sr_info)) { dev_warn(&pdev->dev, "%s: omap_sr struct not found\n", __func__); return; } if (sr_info->autocomp_active) sr_stop_vddautocomp(sr_info); return; } static struct platform_driver smartreflex_driver = { .remove = omap_sr_remove, .shutdown = omap_sr_shutdown, .driver = { .name = DRIVER_NAME, }, }; static int __init sr_init(void) { int ret = 0; /* * sr_init is a late init. If by then a pmic specific API is not * registered either there is no need for anything to be done on * the PMIC side or somebody has forgotten to register a PMIC * handler. Warn for the second condition. */ if (sr_pmic_data && sr_pmic_data->sr_pmic_init) sr_pmic_data->sr_pmic_init(); else pr_warning("%s: No PMIC hook to init smartreflex\n", __func__); ret = platform_driver_probe(&smartreflex_driver, omap_sr_probe); if (ret) { pr_err("%s: platform driver register failed for SR\n", __func__); return ret; } return 0; } late_initcall(sr_init); static void __exit sr_exit(void) { platform_driver_unregister(&smartreflex_driver); } module_exit(sr_exit); MODULE_DESCRIPTION("OMAP Smartreflex Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_AUTHOR("Texas Instruments Inc");
gpl-2.0
Razdroid/razdroid-kernel
drivers/input/keyboard/mpr121_touchkey.c
2399
8818
/* * Touchkey driver for Freescale MPR121 Controllor * * Copyright (C) 2011 Freescale Semiconductor, Inc. * Author: Zhang Jiejing <jiejing.zhang@freescale.com> * * Based on mcs_touchkey.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/interrupt.h> #include <linux/i2c/mpr121_touchkey.h> /* Register definitions */ #define ELE_TOUCH_STATUS_0_ADDR 0x0 #define ELE_TOUCH_STATUS_1_ADDR 0X1 #define MHD_RISING_ADDR 0x2b #define NHD_RISING_ADDR 0x2c #define NCL_RISING_ADDR 0x2d #define FDL_RISING_ADDR 0x2e #define MHD_FALLING_ADDR 0x2f #define NHD_FALLING_ADDR 0x30 #define NCL_FALLING_ADDR 0x31 #define FDL_FALLING_ADDR 0x32 #define ELE0_TOUCH_THRESHOLD_ADDR 0x41 #define ELE0_RELEASE_THRESHOLD_ADDR 0x42 #define AFE_CONF_ADDR 0x5c #define FILTER_CONF_ADDR 0x5d /* * ELECTRODE_CONF_ADDR: This register configures the number of * enabled capacitance sensing inputs and its run/suspend mode. */ #define ELECTRODE_CONF_ADDR 0x5e #define ELECTRODE_CONF_QUICK_CHARGE 0x80 #define AUTO_CONFIG_CTRL_ADDR 0x7b #define AUTO_CONFIG_USL_ADDR 0x7d #define AUTO_CONFIG_LSL_ADDR 0x7e #define AUTO_CONFIG_TL_ADDR 0x7f /* Threshold of touch/release trigger */ #define TOUCH_THRESHOLD 0x08 #define RELEASE_THRESHOLD 0x05 /* Masks for touch and release triggers */ #define TOUCH_STATUS_MASK 0xfff /* MPR121 has 12 keys */ #define MPR121_MAX_KEY_COUNT 12 struct mpr121_touchkey { struct i2c_client *client; struct input_dev *input_dev; unsigned int key_val; unsigned int statusbits; unsigned int keycount; u16 keycodes[MPR121_MAX_KEY_COUNT]; }; struct mpr121_init_register { int addr; u8 val; }; static const struct mpr121_init_register init_reg_table[] = { { MHD_RISING_ADDR, 0x1 }, { NHD_RISING_ADDR, 0x1 }, { MHD_FALLING_ADDR, 0x1 }, { NHD_FALLING_ADDR, 0x1 }, { NCL_FALLING_ADDR, 0xff }, { FDL_FALLING_ADDR, 0x02 }, { FILTER_CONF_ADDR, 0x04 }, { AFE_CONF_ADDR, 0x0b }, { AUTO_CONFIG_CTRL_ADDR, 0x0b }, }; static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id) { struct mpr121_touchkey *mpr121 = dev_id; struct i2c_client *client = mpr121->client; struct input_dev *input = mpr121->input_dev; unsigned int key_num, key_val, pressed; int reg; reg = i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_1_ADDR); if (reg < 0) { dev_err(&client->dev, "i2c read error [%d]\n", reg); goto out; } reg <<= 8; reg |= i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_0_ADDR); if (reg < 0) { dev_err(&client->dev, "i2c read error [%d]\n", reg); goto out; } reg &= TOUCH_STATUS_MASK; /* use old press bit to figure out which bit changed */ key_num = ffs(reg ^ mpr121->statusbits) - 1; pressed = reg & (1 << key_num); mpr121->statusbits = reg; key_val = mpr121->keycodes[key_num]; input_event(input, EV_MSC, MSC_SCAN, key_num); input_report_key(input, key_val, pressed); input_sync(input); dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val, pressed ? "pressed" : "released"); out: return IRQ_HANDLED; } static int mpr121_phys_init(const struct mpr121_platform_data *pdata, struct mpr121_touchkey *mpr121, struct i2c_client *client) { const struct mpr121_init_register *reg; unsigned char usl, lsl, tl, eleconf; int i, t, vdd, ret; /* Set up touch/release threshold for ele0-ele11 */ for (i = 0; i <= MPR121_MAX_KEY_COUNT; i++) { t = ELE0_TOUCH_THRESHOLD_ADDR + (i * 2); ret = i2c_smbus_write_byte_data(client, t, TOUCH_THRESHOLD); if (ret < 0) goto err_i2c_write; ret = i2c_smbus_write_byte_data(client, t + 1, RELEASE_THRESHOLD); if (ret < 0) goto err_i2c_write; } /* Set up init register */ for (i = 0; i < ARRAY_SIZE(init_reg_table); i++) { reg = &init_reg_table[i]; ret = i2c_smbus_write_byte_data(client, reg->addr, reg->val); if (ret < 0) goto err_i2c_write; } /* * Capacitance on sensing input varies and needs to be compensated. * The internal MPR121-auto-configuration can do this if it's * registers are set properly (based on pdata->vdd_uv). */ vdd = pdata->vdd_uv / 1000; usl = ((vdd - 700) * 256) / vdd; lsl = (usl * 65) / 100; tl = (usl * 90) / 100; ret = i2c_smbus_write_byte_data(client, AUTO_CONFIG_USL_ADDR, usl); ret |= i2c_smbus_write_byte_data(client, AUTO_CONFIG_LSL_ADDR, lsl); ret |= i2c_smbus_write_byte_data(client, AUTO_CONFIG_TL_ADDR, tl); /* * Quick charge bit will let the capacitive charge to ready * state quickly, or the buttons may not function after system * boot. */ eleconf = mpr121->keycount | ELECTRODE_CONF_QUICK_CHARGE; ret |= i2c_smbus_write_byte_data(client, ELECTRODE_CONF_ADDR, eleconf); if (ret != 0) goto err_i2c_write; dev_dbg(&client->dev, "set up with %x keys.\n", mpr121->keycount); return 0; err_i2c_write: dev_err(&client->dev, "i2c write error: %d\n", ret); return ret; } static int mpr_touchkey_probe(struct i2c_client *client, const struct i2c_device_id *id) { const struct mpr121_platform_data *pdata = client->dev.platform_data; struct mpr121_touchkey *mpr121; struct input_dev *input_dev; int error; int i; if (!pdata) { dev_err(&client->dev, "no platform data defined\n"); return -EINVAL; } if (!pdata->keymap || !pdata->keymap_size) { dev_err(&client->dev, "missing keymap data\n"); return -EINVAL; } if (pdata->keymap_size > MPR121_MAX_KEY_COUNT) { dev_err(&client->dev, "too many keys defined\n"); return -EINVAL; } if (!client->irq) { dev_err(&client->dev, "irq number should not be zero\n"); return -EINVAL; } mpr121 = kzalloc(sizeof(struct mpr121_touchkey), GFP_KERNEL); input_dev = input_allocate_device(); if (!mpr121 || !input_dev) { dev_err(&client->dev, "Failed to allocate memory\n"); error = -ENOMEM; goto err_free_mem; } mpr121->client = client; mpr121->input_dev = input_dev; mpr121->keycount = pdata->keymap_size; input_dev->name = "Freescale MPR121 Touchkey"; input_dev->id.bustype = BUS_I2C; input_dev->dev.parent = &client->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); input_dev->keycode = mpr121->keycodes; input_dev->keycodesize = sizeof(mpr121->keycodes[0]); input_dev->keycodemax = mpr121->keycount; for (i = 0; i < pdata->keymap_size; i++) { input_set_capability(input_dev, EV_KEY, pdata->keymap[i]); mpr121->keycodes[i] = pdata->keymap[i]; } error = mpr121_phys_init(pdata, mpr121, client); if (error) { dev_err(&client->dev, "Failed to init register\n"); goto err_free_mem; } error = request_threaded_irq(client->irq, NULL, mpr_touchkey_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, client->dev.driver->name, mpr121); if (error) { dev_err(&client->dev, "Failed to register interrupt\n"); goto err_free_mem; } error = input_register_device(input_dev); if (error) goto err_free_irq; i2c_set_clientdata(client, mpr121); device_init_wakeup(&client->dev, pdata->wakeup); return 0; err_free_irq: free_irq(client->irq, mpr121); err_free_mem: input_free_device(input_dev); kfree(mpr121); return error; } static int mpr_touchkey_remove(struct i2c_client *client) { struct mpr121_touchkey *mpr121 = i2c_get_clientdata(client); free_irq(client->irq, mpr121); input_unregister_device(mpr121->input_dev); kfree(mpr121); return 0; } #ifdef CONFIG_PM_SLEEP static int mpr_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); if (device_may_wakeup(&client->dev)) enable_irq_wake(client->irq); i2c_smbus_write_byte_data(client, ELECTRODE_CONF_ADDR, 0x00); return 0; } static int mpr_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct mpr121_touchkey *mpr121 = i2c_get_clientdata(client); if (device_may_wakeup(&client->dev)) disable_irq_wake(client->irq); i2c_smbus_write_byte_data(client, ELECTRODE_CONF_ADDR, mpr121->keycount); return 0; } #endif static SIMPLE_DEV_PM_OPS(mpr121_touchkey_pm_ops, mpr_suspend, mpr_resume); static const struct i2c_device_id mpr121_id[] = { { "mpr121_touchkey", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, mpr121_id); static struct i2c_driver mpr_touchkey_driver = { .driver = { .name = "mpr121", .owner = THIS_MODULE, .pm = &mpr121_touchkey_pm_ops, }, .id_table = mpr121_id, .probe = mpr_touchkey_probe, .remove = mpr_touchkey_remove, }; module_i2c_driver(mpr_touchkey_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Zhang Jiejing <jiejing.zhang@freescale.com>"); MODULE_DESCRIPTION("Touch Key driver for Freescale MPR121 Chip");
gpl-2.0
auras76/aur-kernel-XZxx
drivers/video/pxa168fb.c
2399
20917
/* * linux/drivers/video/pxa168fb.c -- Marvell PXA168 LCD Controller * * Copyright (C) 2008 Marvell International Ltd. * All rights reserved. * * 2009-02-16 adapted from original version for PXA168/910 * Jun Nie <njun@marvell.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/fb.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/uaccess.h> #include <video/pxa168fb.h> #include "pxa168fb.h" #define DEFAULT_REFRESH 60 /* Hz */ static int determine_best_pix_fmt(struct fb_var_screeninfo *var) { /* * Pseudocolor mode? */ if (var->bits_per_pixel == 8) return PIX_FMT_PSEUDOCOLOR; /* * Check for 565/1555. */ if (var->bits_per_pixel == 16 && var->red.length <= 5 && var->green.length <= 6 && var->blue.length <= 5) { if (var->transp.length == 0) { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGB565; else return PIX_FMT_BGR565; } if (var->transp.length == 1 && var->green.length <= 5) { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGB1555; else return PIX_FMT_BGR1555; } /* fall through */ } /* * Check for 888/A888. */ if (var->bits_per_pixel <= 32 && var->red.length <= 8 && var->green.length <= 8 && var->blue.length <= 8) { if (var->bits_per_pixel == 24 && var->transp.length == 0) { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGB888PACK; else return PIX_FMT_BGR888PACK; } if (var->bits_per_pixel == 32 && var->transp.length == 8) { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGBA888; else return PIX_FMT_BGRA888; } else { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGB888UNPACK; else return PIX_FMT_BGR888UNPACK; } /* fall through */ } return -EINVAL; } static void set_pix_fmt(struct fb_var_screeninfo *var, int pix_fmt) { switch (pix_fmt) { case PIX_FMT_RGB565: var->bits_per_pixel = 16; var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case PIX_FMT_BGR565: var->bits_per_pixel = 16; var->red.offset = 0; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 11; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case PIX_FMT_RGB1555: var->bits_per_pixel = 16; var->red.offset = 10; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 15; var->transp.length = 1; break; case PIX_FMT_BGR1555: var->bits_per_pixel = 16; var->red.offset = 0; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 10; var->blue.length = 5; var->transp.offset = 15; var->transp.length = 1; break; case PIX_FMT_RGB888PACK: var->bits_per_pixel = 24; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case PIX_FMT_BGR888PACK: var->bits_per_pixel = 24; var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case PIX_FMT_RGBA888: var->bits_per_pixel = 32; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; case PIX_FMT_BGRA888: var->bits_per_pixel = 32; var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; case PIX_FMT_PSEUDOCOLOR: var->bits_per_pixel = 8; var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; } } static void set_mode(struct pxa168fb_info *fbi, struct fb_var_screeninfo *var, struct fb_videomode *mode, int pix_fmt, int ystretch) { struct fb_info *info = fbi->info; set_pix_fmt(var, pix_fmt); var->xres = mode->xres; var->yres = mode->yres; var->xres_virtual = max(var->xres, var->xres_virtual); if (ystretch) var->yres_virtual = info->fix.smem_len / (var->xres_virtual * (var->bits_per_pixel >> 3)); else var->yres_virtual = max(var->yres, var->yres_virtual); var->grayscale = 0; var->accel_flags = FB_ACCEL_NONE; var->pixclock = mode->pixclock; var->left_margin = mode->left_margin; var->right_margin = mode->right_margin; var->upper_margin = mode->upper_margin; var->lower_margin = mode->lower_margin; var->hsync_len = mode->hsync_len; var->vsync_len = mode->vsync_len; var->sync = mode->sync; var->vmode = FB_VMODE_NONINTERLACED; var->rotate = FB_ROTATE_UR; } static int pxa168fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct pxa168fb_info *fbi = info->par; int pix_fmt; /* * Determine which pixel format we're going to use. */ pix_fmt = determine_best_pix_fmt(var); if (pix_fmt < 0) return pix_fmt; set_pix_fmt(var, pix_fmt); fbi->pix_fmt = pix_fmt; /* * Basic geometry sanity checks. */ if (var->xoffset + var->xres > var->xres_virtual) return -EINVAL; if (var->yoffset + var->yres > var->yres_virtual) return -EINVAL; if (var->xres + var->right_margin + var->hsync_len + var->left_margin > 2048) return -EINVAL; if (var->yres + var->lower_margin + var->vsync_len + var->upper_margin > 2048) return -EINVAL; /* * Check size of framebuffer. */ if (var->xres_virtual * var->yres_virtual * (var->bits_per_pixel >> 3) > info->fix.smem_len) return -EINVAL; return 0; } /* * The hardware clock divider has an integer and a fractional * stage: * * clk2 = clk_in / integer_divider * clk_out = clk2 * (1 - (fractional_divider >> 12)) * * Calculate integer and fractional divider for given clk_in * and clk_out. */ static void set_clock_divider(struct pxa168fb_info *fbi, const struct fb_videomode *m) { int divider_int; int needed_pixclk; u64 div_result; u32 x = 0; /* * Notice: The field pixclock is used by linux fb * is in pixel second. E.g. struct fb_videomode & * struct fb_var_screeninfo */ /* * Check input values. */ if (!m || !m->pixclock || !m->refresh) { dev_err(fbi->dev, "Input refresh or pixclock is wrong.\n"); return; } /* * Using PLL/AXI clock. */ x = 0x80000000; /* * Calc divider according to refresh rate. */ div_result = 1000000000000ll; do_div(div_result, m->pixclock); needed_pixclk = (u32)div_result; divider_int = clk_get_rate(fbi->clk) / needed_pixclk; /* check whether divisor is too small. */ if (divider_int < 2) { dev_warn(fbi->dev, "Warning: clock source is too slow." "Try smaller resolution\n"); divider_int = 2; } /* * Set setting to reg. */ x |= divider_int; writel(x, fbi->reg_base + LCD_CFG_SCLK_DIV); } static void set_dma_control0(struct pxa168fb_info *fbi) { u32 x; /* * Set bit to enable graphics DMA. */ x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); x &= ~CFG_GRA_ENA_MASK; x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0); /* * If we are in a pseudo-color mode, we need to enable * palette lookup. */ if (fbi->pix_fmt == PIX_FMT_PSEUDOCOLOR) x |= 0x10000000; /* * Configure hardware pixel format. */ x &= ~(0xF << 16); x |= (fbi->pix_fmt >> 1) << 16; /* * Check red and blue pixel swap. * 1. source data swap * 2. panel output data swap */ x &= ~(1 << 12); x |= ((fbi->pix_fmt & 1) ^ (fbi->panel_rbswap)) << 12; writel(x, fbi->reg_base + LCD_SPU_DMA_CTRL0); } static void set_dma_control1(struct pxa168fb_info *fbi, int sync) { u32 x; /* * Configure default bits: vsync triggers DMA, gated clock * enable, power save enable, configure alpha registers to * display 100% graphics, and set pixel command. */ x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL1); x |= 0x2032ff81; /* * We trigger DMA on the falling edge of vsync if vsync is * active low, or on the rising edge if vsync is active high. */ if (!(sync & FB_SYNC_VERT_HIGH_ACT)) x |= 0x08000000; writel(x, fbi->reg_base + LCD_SPU_DMA_CTRL1); } static void set_graphics_start(struct fb_info *info, int xoffset, int yoffset) { struct pxa168fb_info *fbi = info->par; struct fb_var_screeninfo *var = &info->var; int pixel_offset; unsigned long addr; pixel_offset = (yoffset * var->xres_virtual) + xoffset; addr = fbi->fb_start_dma + (pixel_offset * (var->bits_per_pixel >> 3)); writel(addr, fbi->reg_base + LCD_CFG_GRA_START_ADDR0); } static void set_dumb_panel_control(struct fb_info *info) { struct pxa168fb_info *fbi = info->par; struct pxa168fb_mach_info *mi = fbi->dev->platform_data; u32 x; /* * Preserve enable flag. */ x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL) & 0x00000001; x |= (fbi->is_blanked ? 0x7 : mi->dumb_mode) << 28; x |= mi->gpio_output_data << 20; x |= mi->gpio_output_mask << 12; x |= mi->panel_rgb_reverse_lanes ? 0x00000080 : 0; x |= mi->invert_composite_blank ? 0x00000040 : 0; x |= (info->var.sync & FB_SYNC_COMP_HIGH_ACT) ? 0x00000020 : 0; x |= mi->invert_pix_val_ena ? 0x00000010 : 0; x |= (info->var.sync & FB_SYNC_VERT_HIGH_ACT) ? 0 : 0x00000008; x |= (info->var.sync & FB_SYNC_HOR_HIGH_ACT) ? 0 : 0x00000004; x |= mi->invert_pixclock ? 0x00000002 : 0; writel(x, fbi->reg_base + LCD_SPU_DUMB_CTRL); } static void set_dumb_screen_dimensions(struct fb_info *info) { struct pxa168fb_info *fbi = info->par; struct fb_var_screeninfo *v = &info->var; int x; int y; x = v->xres + v->right_margin + v->hsync_len + v->left_margin; y = v->yres + v->lower_margin + v->vsync_len + v->upper_margin; writel((y << 16) | x, fbi->reg_base + LCD_SPUT_V_H_TOTAL); } static int pxa168fb_set_par(struct fb_info *info) { struct pxa168fb_info *fbi = info->par; struct fb_var_screeninfo *var = &info->var; struct fb_videomode mode; u32 x; struct pxa168fb_mach_info *mi; mi = fbi->dev->platform_data; /* * Set additional mode info. */ if (fbi->pix_fmt == PIX_FMT_PSEUDOCOLOR) info->fix.visual = FB_VISUAL_PSEUDOCOLOR; else info->fix.visual = FB_VISUAL_TRUECOLOR; info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8; info->fix.ypanstep = var->yres; /* * Disable panel output while we setup the display. */ x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL); writel(x & ~1, fbi->reg_base + LCD_SPU_DUMB_CTRL); /* * Configure global panel parameters. */ writel((var->yres << 16) | var->xres, fbi->reg_base + LCD_SPU_V_H_ACTIVE); /* * convet var to video mode */ fb_var_to_videomode(&mode, &info->var); /* Calculate clock divisor. */ set_clock_divider(fbi, &mode); /* Configure dma ctrl regs. */ set_dma_control0(fbi); set_dma_control1(fbi, info->var.sync); /* * Configure graphics DMA parameters. */ x = readl(fbi->reg_base + LCD_CFG_GRA_PITCH); x = (x & ~0xFFFF) | ((var->xres_virtual * var->bits_per_pixel) >> 3); writel(x, fbi->reg_base + LCD_CFG_GRA_PITCH); writel((var->yres << 16) | var->xres, fbi->reg_base + LCD_SPU_GRA_HPXL_VLN); writel((var->yres << 16) | var->xres, fbi->reg_base + LCD_SPU_GZM_HPXL_VLN); /* * Configure dumb panel ctrl regs & timings. */ set_dumb_panel_control(info); set_dumb_screen_dimensions(info); writel((var->left_margin << 16) | var->right_margin, fbi->reg_base + LCD_SPU_H_PORCH); writel((var->upper_margin << 16) | var->lower_margin, fbi->reg_base + LCD_SPU_V_PORCH); /* * Re-enable panel output. */ x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL); writel(x | 1, fbi->reg_base + LCD_SPU_DUMB_CTRL); return 0; } static unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf) { return ((chan & 0xffff) >> (16 - bf->length)) << bf->offset; } static u32 to_rgb(u16 red, u16 green, u16 blue) { red >>= 8; green >>= 8; blue >>= 8; return (red << 16) | (green << 8) | blue; } static int pxa168fb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int trans, struct fb_info *info) { struct pxa168fb_info *fbi = info->par; u32 val; if (info->var.grayscale) red = green = blue = (19595 * red + 38470 * green + 7471 * blue) >> 16; if (info->fix.visual == FB_VISUAL_TRUECOLOR && regno < 16) { val = chan_to_field(red, &info->var.red); val |= chan_to_field(green, &info->var.green); val |= chan_to_field(blue , &info->var.blue); fbi->pseudo_palette[regno] = val; } if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR && regno < 256) { val = to_rgb(red, green, blue); writel(val, fbi->reg_base + LCD_SPU_SRAM_WRDAT); writel(0x8300 | regno, fbi->reg_base + LCD_SPU_SRAM_CTRL); } return 0; } static int pxa168fb_blank(int blank, struct fb_info *info) { struct pxa168fb_info *fbi = info->par; fbi->is_blanked = (blank == FB_BLANK_UNBLANK) ? 0 : 1; set_dumb_panel_control(info); return 0; } static int pxa168fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { set_graphics_start(info, var->xoffset, var->yoffset); return 0; } static irqreturn_t pxa168fb_handle_irq(int irq, void *dev_id) { struct pxa168fb_info *fbi = dev_id; u32 isr = readl(fbi->reg_base + SPU_IRQ_ISR); if ((isr & GRA_FRAME_IRQ0_ENA_MASK)) { writel(isr & (~GRA_FRAME_IRQ0_ENA_MASK), fbi->reg_base + SPU_IRQ_ISR); return IRQ_HANDLED; } return IRQ_NONE; } static struct fb_ops pxa168fb_ops = { .owner = THIS_MODULE, .fb_check_var = pxa168fb_check_var, .fb_set_par = pxa168fb_set_par, .fb_setcolreg = pxa168fb_setcolreg, .fb_blank = pxa168fb_blank, .fb_pan_display = pxa168fb_pan_display, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; static int pxa168fb_init_mode(struct fb_info *info, struct pxa168fb_mach_info *mi) { struct pxa168fb_info *fbi = info->par; struct fb_var_screeninfo *var = &info->var; int ret = 0; u32 total_w, total_h, refresh; u64 div_result; const struct fb_videomode *m; /* * Set default value */ refresh = DEFAULT_REFRESH; /* try to find best video mode. */ m = fb_find_best_mode(&info->var, &info->modelist); if (m) fb_videomode_to_var(&info->var, m); /* Init settings. */ var->xres_virtual = var->xres; var->yres_virtual = info->fix.smem_len / (var->xres_virtual * (var->bits_per_pixel >> 3)); dev_dbg(fbi->dev, "pxa168fb: find best mode: res = %dx%d\n", var->xres, var->yres); /* correct pixclock. */ total_w = var->xres + var->left_margin + var->right_margin + var->hsync_len; total_h = var->yres + var->upper_margin + var->lower_margin + var->vsync_len; div_result = 1000000000000ll; do_div(div_result, total_w * total_h * refresh); var->pixclock = (u32)div_result; return ret; } static int pxa168fb_probe(struct platform_device *pdev) { struct pxa168fb_mach_info *mi; struct fb_info *info = 0; struct pxa168fb_info *fbi = 0; struct resource *res; struct clk *clk; int irq, ret; mi = pdev->dev.platform_data; if (mi == NULL) { dev_err(&pdev->dev, "no platform data defined\n"); return -EINVAL; } clk = clk_get(&pdev->dev, "LCDCLK"); if (IS_ERR(clk)) { dev_err(&pdev->dev, "unable to get LCDCLK"); return PTR_ERR(clk); } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no IO memory defined\n"); ret = -ENOENT; goto failed_put_clk; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no IRQ defined\n"); ret = -ENOENT; goto failed_put_clk; } info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev); if (info == NULL) { ret = -ENOMEM; goto failed_put_clk; } /* Initialize private data */ fbi = info->par; fbi->info = info; fbi->clk = clk; fbi->dev = info->dev = &pdev->dev; fbi->panel_rbswap = mi->panel_rbswap; fbi->is_blanked = 0; fbi->active = mi->active; /* * Initialise static fb parameters. */ info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN; info->node = -1; strlcpy(info->fix.id, mi->id, 16); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.type_aux = 0; info->fix.xpanstep = 0; info->fix.ypanstep = 0; info->fix.ywrapstep = 0; info->fix.mmio_start = res->start; info->fix.mmio_len = resource_size(res); info->fix.accel = FB_ACCEL_NONE; info->fbops = &pxa168fb_ops; info->pseudo_palette = fbi->pseudo_palette; /* * Map LCD controller registers. */ fbi->reg_base = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (fbi->reg_base == NULL) { ret = -ENOMEM; goto failed_free_info; } /* * Allocate framebuffer memory. */ info->fix.smem_len = PAGE_ALIGN(DEFAULT_FB_SIZE); info->screen_base = dma_alloc_writecombine(fbi->dev, info->fix.smem_len, &fbi->fb_start_dma, GFP_KERNEL); if (info->screen_base == NULL) { ret = -ENOMEM; goto failed_free_info; } info->fix.smem_start = (unsigned long)fbi->fb_start_dma; set_graphics_start(info, 0, 0); /* * Set video mode according to platform data. */ set_mode(fbi, &info->var, mi->modes, mi->pix_fmt, 1); fb_videomode_to_modelist(mi->modes, mi->num_modes, &info->modelist); /* * init video mode data. */ pxa168fb_init_mode(info, mi); /* * Fill in sane defaults. */ ret = pxa168fb_check_var(&info->var, info); if (ret) goto failed_free_fbmem; /* * enable controller clock */ clk_enable(fbi->clk); pxa168fb_set_par(info); /* * Configure default register values. */ writel(0, fbi->reg_base + LCD_SPU_BLANKCOLOR); writel(mi->io_pin_allocation_mode, fbi->reg_base + SPU_IOPAD_CONTROL); writel(0, fbi->reg_base + LCD_CFG_GRA_START_ADDR1); writel(0, fbi->reg_base + LCD_SPU_GRA_OVSA_HPXL_VLN); writel(0, fbi->reg_base + LCD_SPU_SRAM_PARA0); writel(CFG_CSB_256x32(0x1)|CFG_CSB_256x24(0x1)|CFG_CSB_256x8(0x1), fbi->reg_base + LCD_SPU_SRAM_PARA1); /* * Allocate color map. */ if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { ret = -ENOMEM; goto failed_free_clk; } /* * Register irq handler. */ ret = devm_request_irq(&pdev->dev, irq, pxa168fb_handle_irq, IRQF_SHARED, info->fix.id, fbi); if (ret < 0) { dev_err(&pdev->dev, "unable to request IRQ\n"); ret = -ENXIO; goto failed_free_cmap; } /* * Enable GFX interrupt */ writel(GRA_FRAME_IRQ0_ENA(0x1), fbi->reg_base + SPU_IRQ_ENA); /* * Register framebuffer. */ ret = register_framebuffer(info); if (ret < 0) { dev_err(&pdev->dev, "Failed to register pxa168-fb: %d\n", ret); ret = -ENXIO; goto failed_free_cmap; } platform_set_drvdata(pdev, fbi); return 0; failed_free_cmap: fb_dealloc_cmap(&info->cmap); failed_free_clk: clk_disable(fbi->clk); failed_free_fbmem: dma_free_coherent(fbi->dev, info->fix.smem_len, info->screen_base, fbi->fb_start_dma); failed_free_info: kfree(info); failed_put_clk: clk_put(clk); dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret); return ret; } static int pxa168fb_remove(struct platform_device *pdev) { struct pxa168fb_info *fbi = platform_get_drvdata(pdev); struct fb_info *info; int irq; unsigned int data; if (!fbi) return 0; /* disable DMA transfer */ data = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); data &= ~CFG_GRA_ENA_MASK; writel(data, fbi->reg_base + LCD_SPU_DMA_CTRL0); info = fbi->info; unregister_framebuffer(info); writel(GRA_FRAME_IRQ0_ENA(0x0), fbi->reg_base + SPU_IRQ_ENA); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); irq = platform_get_irq(pdev, 0); dma_free_writecombine(fbi->dev, PAGE_ALIGN(info->fix.smem_len), info->screen_base, info->fix.smem_start); clk_disable(fbi->clk); clk_put(fbi->clk); framebuffer_release(info); return 0; } static struct platform_driver pxa168fb_driver = { .driver = { .name = "pxa168-fb", .owner = THIS_MODULE, }, .probe = pxa168fb_probe, .remove = pxa168fb_remove, }; module_platform_driver(pxa168fb_driver); MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com> " "Green Wan <gwan@marvell.com>"); MODULE_DESCRIPTION("Framebuffer driver for PXA168/910"); MODULE_LICENSE("GPL");
gpl-2.0
NinjahMeh/android_kernel_huawei_angler
drivers/message/i2o/i2o_block.c
3167
31536
/* * Block OSM * * Copyright (C) 1999-2002 Red Hat Software * * Written by Alan Cox, Building Number Three Ltd * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * For the purpose of avoiding doubt the preferred form of the work * for making modifications shall be a standards compliant form such * gzipped tar and not one requiring a proprietary or patent encumbered * tool to unpack. * * Fixes/additions: * Steve Ralston: * Multiple device handling error fixes, * Added a queue depth. * Alan Cox: * FC920 has an rmw bug. Dont or in the end marker. * Removed queue walk, fixed for 64bitness. * Rewrote much of the code over time * Added indirect block lists * Handle 64K limits on many controllers * Don't use indirects on the Promise (breaks) * Heavily chop down the queue depths * Deepak Saxena: * Independent queues per IOP * Support for dynamic device creation/deletion * Code cleanup * Support for larger I/Os through merge* functions * (taken from DAC960 driver) * Boji T Kannanthanam: * Set the I2O Block devices to be detected in increasing * order of TIDs during boot. * Search and set the I2O block device that we boot off * from as the first device to be claimed (as /dev/i2o/hda) * Properly attach/detach I2O gendisk structure from the * system gendisk list. The I2O block devices now appear in * /proc/partitions. * Markus Lidel <Markus.Lidel@shadowconnect.com>: * Minor bugfixes for 2.6. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/i2o.h> #include <linux/mutex.h> #include <linux/mempool.h> #include <linux/genhd.h> #include <linux/blkdev.h> #include <linux/hdreg.h> #include <scsi/scsi.h> #include "i2o_block.h" #define OSM_NAME "block-osm" #define OSM_VERSION "1.325" #define OSM_DESCRIPTION "I2O Block Device OSM" static DEFINE_MUTEX(i2o_block_mutex); static struct i2o_driver i2o_block_driver; /* global Block OSM request mempool */ static struct i2o_block_mempool i2o_blk_req_pool; /* Block OSM class handling definition */ static struct i2o_class_id i2o_block_class_id[] = { {I2O_CLASS_RANDOM_BLOCK_STORAGE}, {I2O_CLASS_END} }; /** * i2o_block_device_free - free the memory of the I2O Block device * @dev: I2O Block device, which should be cleaned up * * Frees the request queue, gendisk and the i2o_block_device structure. */ static void i2o_block_device_free(struct i2o_block_device *dev) { blk_cleanup_queue(dev->gd->queue); put_disk(dev->gd); kfree(dev); }; /** * i2o_block_remove - remove the I2O Block device from the system again * @dev: I2O Block device which should be removed * * Remove gendisk from system and free all allocated memory. * * Always returns 0. */ static int i2o_block_remove(struct device *dev) { struct i2o_device *i2o_dev = to_i2o_device(dev); struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev); osm_info("device removed (TID: %03x): %s\n", i2o_dev->lct_data.tid, i2o_blk_dev->gd->disk_name); i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0); del_gendisk(i2o_blk_dev->gd); dev_set_drvdata(dev, NULL); i2o_device_claim_release(i2o_dev); i2o_block_device_free(i2o_blk_dev); return 0; }; /** * i2o_block_device flush - Flush all dirty data of I2O device dev * @dev: I2O device which should be flushed * * Flushes all dirty data on device dev. * * Returns 0 on success or negative error code on failure. */ static int i2o_block_device_flush(struct i2o_device *dev) { struct i2o_message *msg; msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return PTR_ERR(msg); msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); msg->u.head[1] = cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev-> lct_data.tid); msg->body[0] = cpu_to_le32(60 << 16); osm_debug("Flushing...\n"); return i2o_msg_post_wait(dev->iop, msg, 60); }; /** * i2o_block_device_mount - Mount (load) the media of device dev * @dev: I2O device which should receive the mount request * @media_id: Media Identifier * * Load a media into drive. Identifier should be set to -1, because the * spec does not support any other value. * * Returns 0 on success or negative error code on failure. */ static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) { struct i2o_message *msg; msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return PTR_ERR(msg); msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); msg->u.head[1] = cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev-> lct_data.tid); msg->body[0] = cpu_to_le32(-1); msg->body[1] = cpu_to_le32(0x00000000); osm_debug("Mounting...\n"); return i2o_msg_post_wait(dev->iop, msg, 2); }; /** * i2o_block_device_lock - Locks the media of device dev * @dev: I2O device which should receive the lock request * @media_id: Media Identifier * * Lock media of device dev to prevent removal. The media identifier * should be set to -1, because the spec does not support any other value. * * Returns 0 on success or negative error code on failure. */ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) { struct i2o_message *msg; msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return PTR_ERR(msg); msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); msg->u.head[1] = cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev-> lct_data.tid); msg->body[0] = cpu_to_le32(-1); osm_debug("Locking...\n"); return i2o_msg_post_wait(dev->iop, msg, 2); }; /** * i2o_block_device_unlock - Unlocks the media of device dev * @dev: I2O device which should receive the unlocked request * @media_id: Media Identifier * * Unlocks the media in device dev. The media identifier should be set to * -1, because the spec does not support any other value. * * Returns 0 on success or negative error code on failure. */ static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id) { struct i2o_message *msg; msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return PTR_ERR(msg); msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); msg->u.head[1] = cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev-> lct_data.tid); msg->body[0] = cpu_to_le32(media_id); osm_debug("Unlocking...\n"); return i2o_msg_post_wait(dev->iop, msg, 2); }; /** * i2o_block_device_power - Power management for device dev * @dev: I2O device which should receive the power management request * @op: Operation to send * * Send a power management request to the device dev. * * Returns 0 on success or negative error code on failure. */ static int i2o_block_device_power(struct i2o_block_device *dev, u8 op) { struct i2o_device *i2o_dev = dev->i2o_dev; struct i2o_controller *c = i2o_dev->iop; struct i2o_message *msg; int rc; msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return PTR_ERR(msg); msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); msg->u.head[1] = cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev-> lct_data.tid); msg->body[0] = cpu_to_le32(op << 24); osm_debug("Power...\n"); rc = i2o_msg_post_wait(c, msg, 60); if (!rc) dev->power = op; return rc; }; /** * i2o_block_request_alloc - Allocate an I2O block request struct * * Allocates an I2O block request struct and initialize the list. * * Returns a i2o_block_request pointer on success or negative error code * on failure. */ static inline struct i2o_block_request *i2o_block_request_alloc(void) { struct i2o_block_request *ireq; ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC); if (!ireq) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&ireq->queue); sg_init_table(ireq->sg_table, I2O_MAX_PHYS_SEGMENTS); return ireq; }; /** * i2o_block_request_free - Frees a I2O block request * @ireq: I2O block request which should be freed * * Frees the allocated memory (give it back to the request mempool). */ static inline void i2o_block_request_free(struct i2o_block_request *ireq) { mempool_free(ireq, i2o_blk_req_pool.pool); }; /** * i2o_block_sglist_alloc - Allocate the SG list and map it * @c: I2O controller to which the request belongs * @ireq: I2O block request * @mptr: message body pointer * * Builds the SG list and map it to be accessible by the controller. * * Returns 0 on failure or 1 on success. */ static inline int i2o_block_sglist_alloc(struct i2o_controller *c, struct i2o_block_request *ireq, u32 ** mptr) { int nents; enum dma_data_direction direction; ireq->dev = &c->pdev->dev; nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table); if (rq_data_dir(ireq->req) == READ) direction = PCI_DMA_FROMDEVICE; else direction = PCI_DMA_TODEVICE; ireq->sg_nents = nents; return i2o_dma_map_sg(c, ireq->sg_table, nents, direction, mptr); }; /** * i2o_block_sglist_free - Frees the SG list * @ireq: I2O block request from which the SG should be freed * * Frees the SG list from the I2O block request. */ static inline void i2o_block_sglist_free(struct i2o_block_request *ireq) { enum dma_data_direction direction; if (rq_data_dir(ireq->req) == READ) direction = PCI_DMA_FROMDEVICE; else direction = PCI_DMA_TODEVICE; dma_unmap_sg(ireq->dev, ireq->sg_table, ireq->sg_nents, direction); }; /** * i2o_block_prep_req_fn - Allocates I2O block device specific struct * @q: request queue for the request * @req: the request to prepare * * Allocate the necessary i2o_block_request struct and connect it to * the request. This is needed that we not lose the SG list later on. * * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure. */ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) { struct i2o_block_device *i2o_blk_dev = q->queuedata; struct i2o_block_request *ireq; if (unlikely(!i2o_blk_dev)) { osm_err("block device already removed\n"); return BLKPREP_KILL; } /* connect the i2o_block_request to the request */ if (!req->special) { ireq = i2o_block_request_alloc(); if (IS_ERR(ireq)) { osm_debug("unable to allocate i2o_block_request!\n"); return BLKPREP_DEFER; } ireq->i2o_blk_dev = i2o_blk_dev; req->special = ireq; ireq->req = req; } /* do not come back here */ req->cmd_flags |= REQ_DONTPREP; return BLKPREP_OK; }; /** * i2o_block_delayed_request_fn - delayed request queue function * @work: the delayed request with the queue to start * * If the request queue is stopped for a disk, and there is no open * request, a new event is created, which calls this function to start * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never * be started again. */ static void i2o_block_delayed_request_fn(struct work_struct *work) { struct i2o_block_delayed_request *dreq = container_of(work, struct i2o_block_delayed_request, work.work); struct request_queue *q = dreq->queue; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); blk_start_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); kfree(dreq); }; /** * i2o_block_end_request - Post-processing of completed commands * @req: request which should be completed * @error: 0 for success, < 0 for error * @nr_bytes: number of bytes to complete * * Mark the request as complete. The lock must not be held when entering. * */ static void i2o_block_end_request(struct request *req, int error, int nr_bytes) { struct i2o_block_request *ireq = req->special; struct i2o_block_device *dev = ireq->i2o_blk_dev; struct request_queue *q = req->q; unsigned long flags; if (blk_end_request(req, error, nr_bytes)) if (error) blk_end_request_all(req, -EIO); spin_lock_irqsave(q->queue_lock, flags); if (likely(dev)) { dev->open_queue_depth--; list_del(&ireq->queue); } blk_start_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); i2o_block_sglist_free(ireq); i2o_block_request_free(ireq); }; /** * i2o_block_reply - Block OSM reply handler. * @c: I2O controller from which the message arrives * @m: message id of reply * @msg: the actual I2O message reply * * This function gets all the message replies. * */ static int i2o_block_reply(struct i2o_controller *c, u32 m, struct i2o_message *msg) { struct request *req; int error = 0; req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt)); if (unlikely(!req)) { osm_err("NULL reply received!\n"); return -1; } /* * Lets see what is cooking. We stuffed the * request in the context. */ if ((le32_to_cpu(msg->body[0]) >> 24) != 0) { u32 status = le32_to_cpu(msg->body[0]); /* * Device not ready means two things. One is that the * the thing went offline (but not a removal media) * * The second is that you have a SuperTrak 100 and the * firmware got constipated. Unlike standard i2o card * setups the supertrak returns an error rather than * blocking for the timeout in these cases. * * Don't stick a supertrak100 into cache aggressive modes */ osm_err("TID %03x error status: 0x%02x, detailed status: " "0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff), status >> 24, status & 0xffff); req->errors++; error = -EIO; } i2o_block_end_request(req, error, le32_to_cpu(msg->body[1])); return 1; }; static void i2o_block_event(struct work_struct *work) { struct i2o_event *evt = container_of(work, struct i2o_event, work); osm_debug("event received\n"); kfree(evt); }; /* * SCSI-CAM for ioctl geometry mapping * Duplicated with SCSI - this should be moved into somewhere common * perhaps genhd ? * * LBA -> CHS mapping table taken from: * * "Incorporating the I2O Architecture into BIOS for Intel Architecture * Platforms" * * This is an I2O document that is only available to I2O members, * not developers. * * From my understanding, this is how all the I2O cards do this * * Disk Size | Sectors | Heads | Cylinders * ---------------+---------+-------+------------------- * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512) * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512) * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) * */ #define BLOCK_SIZE_528M 1081344 #define BLOCK_SIZE_1G 2097152 #define BLOCK_SIZE_21G 4403200 #define BLOCK_SIZE_42G 8806400 #define BLOCK_SIZE_84G 17612800 static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls, unsigned char *hds, unsigned char *secs) { unsigned long heads, sectors, cylinders; sectors = 63L; /* Maximize sectors per track */ if (capacity <= BLOCK_SIZE_528M) heads = 16; else if (capacity <= BLOCK_SIZE_1G) heads = 32; else if (capacity <= BLOCK_SIZE_21G) heads = 64; else if (capacity <= BLOCK_SIZE_42G) heads = 128; else heads = 255; cylinders = (unsigned long)capacity / (heads * sectors); *cyls = (unsigned short)cylinders; /* Stuff return values */ *secs = (unsigned char)sectors; *hds = (unsigned char)heads; } /** * i2o_block_open - Open the block device * @bdev: block device being opened * @mode: file open mode * * Power up the device, mount and lock the media. This function is called, * if the block device is opened for access. * * Returns 0 on success or negative error code on failure. */ static int i2o_block_open(struct block_device *bdev, fmode_t mode) { struct i2o_block_device *dev = bdev->bd_disk->private_data; if (!dev->i2o_dev) return -ENODEV; mutex_lock(&i2o_block_mutex); if (dev->power > 0x1f) i2o_block_device_power(dev, 0x02); i2o_block_device_mount(dev->i2o_dev, -1); i2o_block_device_lock(dev->i2o_dev, -1); osm_debug("Ready.\n"); mutex_unlock(&i2o_block_mutex); return 0; }; /** * i2o_block_release - Release the I2O block device * @disk: gendisk device being released * @mode: file open mode * * Unlock and unmount the media, and power down the device. Gets called if * the block device is closed. */ static void i2o_block_release(struct gendisk *disk, fmode_t mode) { struct i2o_block_device *dev = disk->private_data; u8 operation; /* * This is to deal with the case of an application * opening a device and then the device disappears while * it's in use, and then the application tries to release * it. ex: Unmounting a deleted RAID volume at reboot. * If we send messages, it will just cause FAILs since * the TID no longer exists. */ if (!dev->i2o_dev) return; mutex_lock(&i2o_block_mutex); i2o_block_device_flush(dev->i2o_dev); i2o_block_device_unlock(dev->i2o_dev, -1); if (dev->flags & (1 << 3 | 1 << 4)) /* Removable */ operation = 0x21; else operation = 0x24; i2o_block_device_power(dev, operation); mutex_unlock(&i2o_block_mutex); } static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo) { i2o_block_biosparam(get_capacity(bdev->bd_disk), &geo->cylinders, &geo->heads, &geo->sectors); return 0; } /** * i2o_block_ioctl - Issue device specific ioctl calls. * @bdev: block device being opened * @mode: file open mode * @cmd: ioctl command * @arg: arg * * Handles ioctl request for the block device. * * Return 0 on success or negative error on failure. */ static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct gendisk *disk = bdev->bd_disk; struct i2o_block_device *dev = disk->private_data; int ret = -ENOTTY; /* Anyone capable of this syscall can do *real bad* things */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; mutex_lock(&i2o_block_mutex); switch (cmd) { case BLKI2OGRSTRAT: ret = put_user(dev->rcache, (int __user *)arg); break; case BLKI2OGWSTRAT: ret = put_user(dev->wcache, (int __user *)arg); break; case BLKI2OSRSTRAT: ret = -EINVAL; if (arg < 0 || arg > CACHE_SMARTFETCH) break; dev->rcache = arg; ret = 0; break; case BLKI2OSWSTRAT: ret = -EINVAL; if (arg != 0 && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK)) break; dev->wcache = arg; ret = 0; break; } mutex_unlock(&i2o_block_mutex); return ret; }; /** * i2o_block_check_events - Have we seen a media change? * @disk: gendisk which should be verified * @clearing: events being cleared * * Verifies if the media has changed. * * Returns 1 if the media was changed or 0 otherwise. */ static unsigned int i2o_block_check_events(struct gendisk *disk, unsigned int clearing) { struct i2o_block_device *p = disk->private_data; if (p->media_change_flag) { p->media_change_flag = 0; return DISK_EVENT_MEDIA_CHANGE; } return 0; } /** * i2o_block_transfer - Transfer a request to/from the I2O controller * @req: the request which should be transferred * * This function converts the request into a I2O message. The necessary * DMA buffers are allocated and after everything is setup post the message * to the I2O controller. No cleanup is done by this function. It is done * on the interrupt side when the reply arrives. * * Return 0 on success or negative error code on failure. */ static int i2o_block_transfer(struct request *req) { struct i2o_block_device *dev = req->rq_disk->private_data; struct i2o_controller *c; u32 tid; struct i2o_message *msg; u32 *mptr; struct i2o_block_request *ireq = req->special; u32 tcntxt; u32 sgl_offset = SGL_OFFSET_8; u32 ctl_flags = 0x00000000; int rc; u32 cmd; if (unlikely(!dev->i2o_dev)) { osm_err("transfer to removed drive\n"); rc = -ENODEV; goto exit; } tid = dev->i2o_dev->lct_data.tid; c = dev->i2o_dev->iop; msg = i2o_msg_get(c); if (IS_ERR(msg)) { rc = PTR_ERR(msg); goto exit; } tcntxt = i2o_cntxt_list_add(c, req); if (!tcntxt) { rc = -ENOMEM; goto nop_msg; } msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context); msg->u.s.tcntxt = cpu_to_le32(tcntxt); mptr = &msg->body[0]; if (rq_data_dir(req) == READ) { cmd = I2O_CMD_BLOCK_READ << 24; switch (dev->rcache) { case CACHE_PREFETCH: ctl_flags = 0x201F0008; break; case CACHE_SMARTFETCH: if (blk_rq_sectors(req) > 16) ctl_flags = 0x201F0008; else ctl_flags = 0x001F0000; break; default: break; } } else { cmd = I2O_CMD_BLOCK_WRITE << 24; switch (dev->wcache) { case CACHE_WRITETHROUGH: ctl_flags = 0x001F0008; break; case CACHE_WRITEBACK: ctl_flags = 0x001F0010; break; case CACHE_SMARTBACK: if (blk_rq_sectors(req) > 16) ctl_flags = 0x001F0004; else ctl_flags = 0x001F0010; break; case CACHE_SMARTTHROUGH: if (blk_rq_sectors(req) > 16) ctl_flags = 0x001F0004; else ctl_flags = 0x001F0010; default: break; } } #ifdef CONFIG_I2O_EXT_ADAPTEC if (c->adaptec) { u8 cmd[10]; u32 scsi_flags; u16 hwsec; hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT; memset(cmd, 0, 10); sgl_offset = SGL_OFFSET_12; msg->u.head[1] = cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid); *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC); *mptr++ = cpu_to_le32(tid); /* * ENABLE_DISCONNECT * SIMPLE_TAG * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME */ if (rq_data_dir(req) == READ) { cmd[0] = READ_10; scsi_flags = 0x60a0000a; } else { cmd[0] = WRITE_10; scsi_flags = 0xa0a0000a; } *mptr++ = cpu_to_le32(scsi_flags); *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec); *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec); memcpy(mptr, cmd, 10); mptr += 4; *mptr++ = cpu_to_le32(blk_rq_bytes(req)); } else #endif { msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); *mptr++ = cpu_to_le32(ctl_flags); *mptr++ = cpu_to_le32(blk_rq_bytes(req)); *mptr++ = cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT)); *mptr++ = cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT)); } if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { rc = -ENOMEM; goto context_remove; } msg->u.head[0] = cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset); list_add_tail(&ireq->queue, &dev->open_queue); dev->open_queue_depth++; i2o_msg_post(c, msg); return 0; context_remove: i2o_cntxt_list_remove(c, req); nop_msg: i2o_msg_nop(c, msg); exit: return rc; }; /** * i2o_block_request_fn - request queue handling function * @q: request queue from which the request could be fetched * * Takes the next request from the queue, transfers it and if no error * occurs dequeue it from the queue. On arrival of the reply the message * will be processed further. If an error occurs requeue the request. */ static void i2o_block_request_fn(struct request_queue *q) { struct request *req; while ((req = blk_peek_request(q)) != NULL) { if (req->cmd_type == REQ_TYPE_FS) { struct i2o_block_delayed_request *dreq; struct i2o_block_request *ireq = req->special; unsigned int queue_depth; queue_depth = ireq->i2o_blk_dev->open_queue_depth; if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { if (!i2o_block_transfer(req)) { blk_start_request(req); continue; } else osm_info("transfer error\n"); } if (queue_depth) break; /* stop the queue and retry later */ dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC); if (!dreq) continue; dreq->queue = q; INIT_DELAYED_WORK(&dreq->work, i2o_block_delayed_request_fn); if (!queue_delayed_work(i2o_block_driver.event_queue, &dreq->work, I2O_BLOCK_RETRY_TIME)) kfree(dreq); else { blk_stop_queue(q); break; } } else { blk_start_request(req); __blk_end_request_all(req, -EIO); } } }; /* I2O Block device operations definition */ static const struct block_device_operations i2o_block_fops = { .owner = THIS_MODULE, .open = i2o_block_open, .release = i2o_block_release, .ioctl = i2o_block_ioctl, .compat_ioctl = i2o_block_ioctl, .getgeo = i2o_block_getgeo, .check_events = i2o_block_check_events, }; /** * i2o_block_device_alloc - Allocate memory for a I2O Block device * * Allocate memory for the i2o_block_device struct, gendisk and request * queue and initialize them as far as no additional information is needed. * * Returns a pointer to the allocated I2O Block device on success or a * negative error code on failure. */ static struct i2o_block_device *i2o_block_device_alloc(void) { struct i2o_block_device *dev; struct gendisk *gd; struct request_queue *queue; int rc; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { osm_err("Insufficient memory to allocate I2O Block disk.\n"); rc = -ENOMEM; goto exit; } INIT_LIST_HEAD(&dev->open_queue); spin_lock_init(&dev->lock); dev->rcache = CACHE_PREFETCH; dev->wcache = CACHE_WRITEBACK; /* allocate a gendisk with 16 partitions */ gd = alloc_disk(16); if (!gd) { osm_err("Insufficient memory to allocate gendisk.\n"); rc = -ENOMEM; goto cleanup_dev; } /* initialize the request queue */ queue = blk_init_queue(i2o_block_request_fn, &dev->lock); if (!queue) { osm_err("Insufficient memory to allocate request queue.\n"); rc = -ENOMEM; goto cleanup_queue; } blk_queue_prep_rq(queue, i2o_block_prep_req_fn); gd->major = I2O_MAJOR; gd->queue = queue; gd->fops = &i2o_block_fops; gd->private_data = dev; dev->gd = gd; return dev; cleanup_queue: put_disk(gd); cleanup_dev: kfree(dev); exit: return ERR_PTR(rc); }; /** * i2o_block_probe - verify if dev is a I2O Block device and install it * @dev: device to verify if it is a I2O Block device * * We only verify if the user_tid of the device is 0xfff and then install * the device. Otherwise it is used by some other device (e. g. RAID). * * Returns 0 on success or negative error code on failure. */ static int i2o_block_probe(struct device *dev) { struct i2o_device *i2o_dev = to_i2o_device(dev); struct i2o_controller *c = i2o_dev->iop; struct i2o_block_device *i2o_blk_dev; struct gendisk *gd; struct request_queue *queue; static int unit = 0; int rc; u64 size; u32 blocksize; u16 body_size = 4; u16 power; unsigned short max_sectors; #ifdef CONFIG_I2O_EXT_ADAPTEC if (c->adaptec) body_size = 8; #endif if (c->limit_sectors) max_sectors = I2O_MAX_SECTORS_LIMITED; else max_sectors = I2O_MAX_SECTORS; /* skip devices which are used by IOP */ if (i2o_dev->lct_data.user_tid != 0xfff) { osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid); return -ENODEV; } if (i2o_device_claim(i2o_dev)) { osm_warn("Unable to claim device. Installation aborted\n"); rc = -EFAULT; goto exit; } i2o_blk_dev = i2o_block_device_alloc(); if (IS_ERR(i2o_blk_dev)) { osm_err("could not alloc a new I2O block device"); rc = PTR_ERR(i2o_blk_dev); goto claim_release; } i2o_blk_dev->i2o_dev = i2o_dev; dev_set_drvdata(dev, i2o_blk_dev); /* setup gendisk */ gd = i2o_blk_dev->gd; gd->first_minor = unit << 4; sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit); gd->driverfs_dev = &i2o_dev->device; /* setup request queue */ queue = gd->queue; queue->queuedata = i2o_blk_dev; blk_queue_max_hw_sectors(queue, max_sectors); blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size)); osm_debug("max sectors = %d\n", queue->max_sectors); osm_debug("phys segments = %d\n", queue->max_phys_segments); osm_debug("max hw segments = %d\n", queue->max_hw_segments); /* * Ask for the current media data. If that isn't supported * then we ask for the device capacity data */ if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { blk_queue_logical_block_size(queue, le32_to_cpu(blocksize)); } else osm_warn("unable to get blocksize of %s\n", gd->disk_name); if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT); } else osm_warn("could not get size of %s\n", gd->disk_name); if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2)) i2o_blk_dev->power = power; i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); add_disk(gd); unit++; osm_info("device added (TID: %03x): %s\n", i2o_dev->lct_data.tid, i2o_blk_dev->gd->disk_name); return 0; claim_release: i2o_device_claim_release(i2o_dev); exit: return rc; }; /* Block OSM driver struct */ static struct i2o_driver i2o_block_driver = { .name = OSM_NAME, .event = i2o_block_event, .reply = i2o_block_reply, .classes = i2o_block_class_id, .driver = { .probe = i2o_block_probe, .remove = i2o_block_remove, }, }; /** * i2o_block_init - Block OSM initialization function * * Allocate the slab and mempool for request structs, registers i2o_block * block device and finally register the Block OSM in the I2O core. * * Returns 0 on success or negative error code on failure. */ static int __init i2o_block_init(void) { int rc; int size; printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); /* Allocate request mempool and slab */ size = sizeof(struct i2o_block_request); i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0, SLAB_HWCACHE_ALIGN, NULL); if (!i2o_blk_req_pool.slab) { osm_err("can't init request slab\n"); rc = -ENOMEM; goto exit; } i2o_blk_req_pool.pool = mempool_create_slab_pool(I2O_BLOCK_REQ_MEMPOOL_SIZE, i2o_blk_req_pool.slab); if (!i2o_blk_req_pool.pool) { osm_err("can't init request mempool\n"); rc = -ENOMEM; goto free_slab; } /* Register the block device interfaces */ rc = register_blkdev(I2O_MAJOR, "i2o_block"); if (rc) { osm_err("unable to register block device\n"); goto free_mempool; } #ifdef MODULE osm_info("registered device at major %d\n", I2O_MAJOR); #endif /* Register Block OSM into I2O core */ rc = i2o_driver_register(&i2o_block_driver); if (rc) { osm_err("Could not register Block driver\n"); goto unregister_blkdev; } return 0; unregister_blkdev: unregister_blkdev(I2O_MAJOR, "i2o_block"); free_mempool: mempool_destroy(i2o_blk_req_pool.pool); free_slab: kmem_cache_destroy(i2o_blk_req_pool.slab); exit: return rc; }; /** * i2o_block_exit - Block OSM exit function * * Unregisters Block OSM from I2O core, unregisters i2o_block block device * and frees the mempool and slab. */ static void __exit i2o_block_exit(void) { /* Unregister I2O Block OSM from I2O core */ i2o_driver_unregister(&i2o_block_driver); /* Unregister block device */ unregister_blkdev(I2O_MAJOR, "i2o_block"); /* Free request mempool and slab */ mempool_destroy(i2o_blk_req_pool.pool); kmem_cache_destroy(i2o_blk_req_pool.slab); }; MODULE_AUTHOR("Red Hat"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(OSM_DESCRIPTION); MODULE_VERSION(OSM_VERSION); module_init(i2o_block_init); module_exit(i2o_block_exit);
gpl-2.0
imoseyon/leanKernel-i500-gingerbread
sound/core/seq/oss/seq_oss_init.c
3935
12273
/* * OSS compatible sequencer driver * * open/close and reset interface * * Copyright (C) 1998-1999 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "seq_oss_device.h" #include "seq_oss_synth.h" #include "seq_oss_midi.h" #include "seq_oss_writeq.h" #include "seq_oss_readq.h" #include "seq_oss_timer.h" #include "seq_oss_event.h" #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/slab.h> /* * common variables */ static int maxqlen = SNDRV_SEQ_OSS_MAX_QLEN; module_param(maxqlen, int, 0444); MODULE_PARM_DESC(maxqlen, "maximum queue length"); static int system_client = -1; /* ALSA sequencer client number */ static int system_port = -1; static int num_clients; static struct seq_oss_devinfo *client_table[SNDRV_SEQ_OSS_MAX_CLIENTS]; /* * prototypes */ static int receive_announce(struct snd_seq_event *ev, int direct, void *private, int atomic, int hop); static int translate_mode(struct file *file); static int create_port(struct seq_oss_devinfo *dp); static int delete_port(struct seq_oss_devinfo *dp); static int alloc_seq_queue(struct seq_oss_devinfo *dp); static int delete_seq_queue(int queue); static void free_devinfo(void *private); #define call_ctl(type,rec) snd_seq_kernel_client_ctl(system_client, type, rec) /* * create sequencer client for OSS sequencer */ int __init snd_seq_oss_create_client(void) { int rc; struct snd_seq_port_info *port; struct snd_seq_port_callback port_callback; port = kmalloc(sizeof(*port), GFP_KERNEL); if (!port) { rc = -ENOMEM; goto __error; } /* create ALSA client */ rc = snd_seq_create_kernel_client(NULL, SNDRV_SEQ_CLIENT_OSS, "OSS sequencer"); if (rc < 0) goto __error; system_client = rc; debug_printk(("new client = %d\n", rc)); /* look up midi devices */ snd_seq_oss_midi_lookup_ports(system_client); /* create annoucement receiver port */ memset(port, 0, sizeof(*port)); strcpy(port->name, "Receiver"); port->addr.client = system_client; port->capability = SNDRV_SEQ_PORT_CAP_WRITE; /* receive only */ port->type = 0; memset(&port_callback, 0, sizeof(port_callback)); /* don't set port_callback.owner here. otherwise the module counter * is incremented and we can no longer release the module.. */ port_callback.event_input = receive_announce; port->kernel = &port_callback; call_ctl(SNDRV_SEQ_IOCTL_CREATE_PORT, port); if ((system_port = port->addr.port) >= 0) { struct snd_seq_port_subscribe subs; memset(&subs, 0, sizeof(subs)); subs.sender.client = SNDRV_SEQ_CLIENT_SYSTEM; subs.sender.port = SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE; subs.dest.client = system_client; subs.dest.port = system_port; call_ctl(SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT, &subs); } rc = 0; __error: kfree(port); return rc; } /* * receive annoucement from system port, and check the midi device */ static int receive_announce(struct snd_seq_event *ev, int direct, void *private, int atomic, int hop) { struct snd_seq_port_info pinfo; if (atomic) return 0; /* it must not happen */ switch (ev->type) { case SNDRV_SEQ_EVENT_PORT_START: case SNDRV_SEQ_EVENT_PORT_CHANGE: if (ev->data.addr.client == system_client) break; /* ignore myself */ memset(&pinfo, 0, sizeof(pinfo)); pinfo.addr = ev->data.addr; if (call_ctl(SNDRV_SEQ_IOCTL_GET_PORT_INFO, &pinfo) >= 0) snd_seq_oss_midi_check_new_port(&pinfo); break; case SNDRV_SEQ_EVENT_PORT_EXIT: if (ev->data.addr.client == system_client) break; /* ignore myself */ snd_seq_oss_midi_check_exit_port(ev->data.addr.client, ev->data.addr.port); break; } return 0; } /* * delete OSS sequencer client */ int snd_seq_oss_delete_client(void) { if (system_client >= 0) snd_seq_delete_kernel_client(system_client); snd_seq_oss_midi_clear_all(); return 0; } /* * open sequencer device */ int snd_seq_oss_open(struct file *file, int level) { int i, rc; struct seq_oss_devinfo *dp; dp = kzalloc(sizeof(*dp), GFP_KERNEL); if (!dp) { snd_printk(KERN_ERR "can't malloc device info\n"); return -ENOMEM; } debug_printk(("oss_open: dp = %p\n", dp)); dp->cseq = system_client; dp->port = -1; dp->queue = -1; for (i = 0; i < SNDRV_SEQ_OSS_MAX_CLIENTS; i++) { if (client_table[i] == NULL) break; } dp->index = i; if (i >= SNDRV_SEQ_OSS_MAX_CLIENTS) { snd_printk(KERN_ERR "too many applications\n"); rc = -ENOMEM; goto _error; } /* look up synth and midi devices */ snd_seq_oss_synth_setup(dp); snd_seq_oss_midi_setup(dp); if (dp->synth_opened == 0 && dp->max_mididev == 0) { /* snd_printk(KERN_ERR "no device found\n"); */ rc = -ENODEV; goto _error; } /* create port */ debug_printk(("create new port\n")); rc = create_port(dp); if (rc < 0) { snd_printk(KERN_ERR "can't create port\n"); goto _error; } /* allocate queue */ debug_printk(("allocate queue\n")); rc = alloc_seq_queue(dp); if (rc < 0) goto _error; /* set address */ dp->addr.client = dp->cseq; dp->addr.port = dp->port; /*dp->addr.queue = dp->queue;*/ /*dp->addr.channel = 0;*/ dp->seq_mode = level; /* set up file mode */ dp->file_mode = translate_mode(file); /* initialize read queue */ debug_printk(("initialize read queue\n")); if (is_read_mode(dp->file_mode)) { dp->readq = snd_seq_oss_readq_new(dp, maxqlen); if (!dp->readq) { rc = -ENOMEM; goto _error; } } /* initialize write queue */ debug_printk(("initialize write queue\n")); if (is_write_mode(dp->file_mode)) { dp->writeq = snd_seq_oss_writeq_new(dp, maxqlen); if (!dp->writeq) { rc = -ENOMEM; goto _error; } } /* initialize timer */ debug_printk(("initialize timer\n")); dp->timer = snd_seq_oss_timer_new(dp); if (!dp->timer) { snd_printk(KERN_ERR "can't alloc timer\n"); rc = -ENOMEM; goto _error; } debug_printk(("timer initialized\n")); /* set private data pointer */ file->private_data = dp; /* set up for mode2 */ if (level == SNDRV_SEQ_OSS_MODE_MUSIC) snd_seq_oss_synth_setup_midi(dp); else if (is_read_mode(dp->file_mode)) snd_seq_oss_midi_open_all(dp, SNDRV_SEQ_OSS_FILE_READ); client_table[dp->index] = dp; num_clients++; debug_printk(("open done\n")); return 0; _error: snd_seq_oss_synth_cleanup(dp); snd_seq_oss_midi_cleanup(dp); delete_seq_queue(dp->queue); delete_port(dp); return rc; } /* * translate file flags to private mode */ static int translate_mode(struct file *file) { int file_mode = 0; if ((file->f_flags & O_ACCMODE) != O_RDONLY) file_mode |= SNDRV_SEQ_OSS_FILE_WRITE; if ((file->f_flags & O_ACCMODE) != O_WRONLY) file_mode |= SNDRV_SEQ_OSS_FILE_READ; if (file->f_flags & O_NONBLOCK) file_mode |= SNDRV_SEQ_OSS_FILE_NONBLOCK; return file_mode; } /* * create sequencer port */ static int create_port(struct seq_oss_devinfo *dp) { int rc; struct snd_seq_port_info port; struct snd_seq_port_callback callback; memset(&port, 0, sizeof(port)); port.addr.client = dp->cseq; sprintf(port.name, "Sequencer-%d", dp->index); port.capability = SNDRV_SEQ_PORT_CAP_READ|SNDRV_SEQ_PORT_CAP_WRITE; /* no subscription */ port.type = SNDRV_SEQ_PORT_TYPE_SPECIFIC; port.midi_channels = 128; port.synth_voices = 128; memset(&callback, 0, sizeof(callback)); callback.owner = THIS_MODULE; callback.private_data = dp; callback.event_input = snd_seq_oss_event_input; callback.private_free = free_devinfo; port.kernel = &callback; rc = call_ctl(SNDRV_SEQ_IOCTL_CREATE_PORT, &port); if (rc < 0) return rc; dp->port = port.addr.port; debug_printk(("new port = %d\n", port.addr.port)); return 0; } /* * delete ALSA port */ static int delete_port(struct seq_oss_devinfo *dp) { if (dp->port < 0) { kfree(dp); return 0; } debug_printk(("delete_port %i\n", dp->port)); return snd_seq_event_port_detach(dp->cseq, dp->port); } /* * allocate a queue */ static int alloc_seq_queue(struct seq_oss_devinfo *dp) { struct snd_seq_queue_info qinfo; int rc; memset(&qinfo, 0, sizeof(qinfo)); qinfo.owner = system_client; qinfo.locked = 1; strcpy(qinfo.name, "OSS Sequencer Emulation"); if ((rc = call_ctl(SNDRV_SEQ_IOCTL_CREATE_QUEUE, &qinfo)) < 0) return rc; dp->queue = qinfo.queue; return 0; } /* * release queue */ static int delete_seq_queue(int queue) { struct snd_seq_queue_info qinfo; int rc; if (queue < 0) return 0; memset(&qinfo, 0, sizeof(qinfo)); qinfo.queue = queue; rc = call_ctl(SNDRV_SEQ_IOCTL_DELETE_QUEUE, &qinfo); if (rc < 0) printk(KERN_ERR "seq-oss: unable to delete queue %d (%d)\n", queue, rc); return rc; } /* * free device informations - private_free callback of port */ static void free_devinfo(void *private) { struct seq_oss_devinfo *dp = (struct seq_oss_devinfo *)private; if (dp->timer) snd_seq_oss_timer_delete(dp->timer); if (dp->writeq) snd_seq_oss_writeq_delete(dp->writeq); if (dp->readq) snd_seq_oss_readq_delete(dp->readq); kfree(dp); } /* * close sequencer device */ void snd_seq_oss_release(struct seq_oss_devinfo *dp) { int queue; client_table[dp->index] = NULL; num_clients--; debug_printk(("resetting..\n")); snd_seq_oss_reset(dp); debug_printk(("cleaning up..\n")); snd_seq_oss_synth_cleanup(dp); snd_seq_oss_midi_cleanup(dp); /* clear slot */ debug_printk(("releasing resource..\n")); queue = dp->queue; if (dp->port >= 0) delete_port(dp); delete_seq_queue(queue); debug_printk(("release done\n")); } /* * Wait until the queue is empty (if we don't have nonblock) */ void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp) { if (! dp->timer->running) return; if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) && dp->writeq) { debug_printk(("syncing..\n")); while (snd_seq_oss_writeq_sync(dp->writeq)) ; } } /* * reset sequencer devices */ void snd_seq_oss_reset(struct seq_oss_devinfo *dp) { int i; /* reset all synth devices */ for (i = 0; i < dp->max_synthdev; i++) snd_seq_oss_synth_reset(dp, i); /* reset all midi devices */ if (dp->seq_mode != SNDRV_SEQ_OSS_MODE_MUSIC) { for (i = 0; i < dp->max_mididev; i++) snd_seq_oss_midi_reset(dp, i); } /* remove queues */ if (dp->readq) snd_seq_oss_readq_clear(dp->readq); if (dp->writeq) snd_seq_oss_writeq_clear(dp->writeq); /* reset timer */ snd_seq_oss_timer_stop(dp->timer); } #ifdef CONFIG_PROC_FS /* * misc. functions for proc interface */ char * enabled_str(int bool) { return bool ? "enabled" : "disabled"; } static char * filemode_str(int val) { static char *str[] = { "none", "read", "write", "read/write", }; return str[val & SNDRV_SEQ_OSS_FILE_ACMODE]; } /* * proc interface */ void snd_seq_oss_system_info_read(struct snd_info_buffer *buf) { int i; struct seq_oss_devinfo *dp; snd_iprintf(buf, "ALSA client number %d\n", system_client); snd_iprintf(buf, "ALSA receiver port %d\n", system_port); snd_iprintf(buf, "\nNumber of applications: %d\n", num_clients); for (i = 0; i < num_clients; i++) { snd_iprintf(buf, "\nApplication %d: ", i); if ((dp = client_table[i]) == NULL) { snd_iprintf(buf, "*empty*\n"); continue; } snd_iprintf(buf, "port %d : queue %d\n", dp->port, dp->queue); snd_iprintf(buf, " sequencer mode = %s : file open mode = %s\n", (dp->seq_mode ? "music" : "synth"), filemode_str(dp->file_mode)); if (dp->seq_mode) snd_iprintf(buf, " timer tempo = %d, timebase = %d\n", dp->timer->oss_tempo, dp->timer->oss_timebase); snd_iprintf(buf, " max queue length %d\n", maxqlen); if (is_read_mode(dp->file_mode) && dp->readq) snd_seq_oss_readq_info_read(dp->readq, buf); } } #endif /* CONFIG_PROC_FS */
gpl-2.0
losfair/MiracleKernel
drivers/ide/umc8672.c
4191
5086
/* * Copyright (C) 1995-1996 Linus Torvalds & author (see below) */ /* * Principal Author/Maintainer: PODIEN@hml2.atlas.de (Wolfram Podien) * * This file provides support for the advanced features * of the UMC 8672 IDE interface. * * Version 0.01 Initial version, hacked out of ide.c, * and #include'd rather than compiled separately. * This will get cleaned up in a subsequent release. * * Version 0.02 now configs/compiles separate from ide.c -ml * Version 0.03 enhanced auto-tune, fix display bug * Version 0.05 replace sti() with restore_flags() -ml * add detection of possible race condition -ml */ /* * VLB Controller Support from * Wolfram Podien * Rohoefe 3 * D28832 Achim * Germany * * To enable UMC8672 support there must a lilo line like * append="ide0=umc8672"... * To set the speed according to the abilities of the hardware there must be a * line like * #define UMC_DRIVE0 11 * in the beginning of the driver, which sets the speed of drive 0 to 11 (there * are some lines present). 0 - 11 are allowed speed values. These values are * the results from the DOS speed test program supplied from UMC. 11 is the * highest speed (about PIO mode 3) */ #define REALLY_SLOW_IO /* some systems can safely undef this */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/ioport.h> #include <linux/blkdev.h> #include <linux/ide.h> #include <linux/init.h> #include <asm/io.h> #define DRV_NAME "umc8672" /* * Default speeds. These can be changed with "auto-tune" and/or hdparm. */ #define UMC_DRIVE0 1 /* DOS measured drive speeds */ #define UMC_DRIVE1 1 /* 0 to 11 allowed */ #define UMC_DRIVE2 1 /* 11 = Fastest Speed */ #define UMC_DRIVE3 1 /* In case of crash reduce speed */ static u8 current_speeds[4] = {UMC_DRIVE0, UMC_DRIVE1, UMC_DRIVE2, UMC_DRIVE3}; static const u8 pio_to_umc [5] = {0, 3, 7, 10, 11}; /* rough guesses */ /* 0 1 2 3 4 5 6 7 8 9 10 11 */ static const u8 speedtab [3][12] = { {0x0f, 0x0b, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1}, {0x03, 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1}, {0xff, 0xcb, 0xc0, 0x58, 0x36, 0x33, 0x23, 0x22, 0x21, 0x11, 0x10, 0x0} }; static void out_umc(char port, char wert) { outb_p(port, 0x108); outb_p(wert, 0x109); } static inline u8 in_umc(char port) { outb_p(port, 0x108); return inb_p(0x109); } static void umc_set_speeds(u8 speeds[]) { int i, tmp; outb_p(0x5A, 0x108); /* enable umc */ out_umc(0xd7, (speedtab[0][speeds[2]] | (speedtab[0][speeds[3]]<<4))); out_umc(0xd6, (speedtab[0][speeds[0]] | (speedtab[0][speeds[1]]<<4))); tmp = 0; for (i = 3; i >= 0; i--) tmp = (tmp << 2) | speedtab[1][speeds[i]]; out_umc(0xdc, tmp); for (i = 0; i < 4; i++) { out_umc(0xd0 + i, speedtab[2][speeds[i]]); out_umc(0xd8 + i, speedtab[2][speeds[i]]); } outb_p(0xa5, 0x108); /* disable umc */ printk("umc8672: drive speeds [0 to 11]: %d %d %d %d\n", speeds[0], speeds[1], speeds[2], speeds[3]); } static void umc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { ide_hwif_t *mate = hwif->mate; unsigned long uninitialized_var(flags); const u8 pio = drive->pio_mode - XFER_PIO_0; printk("%s: setting umc8672 to PIO mode%d (speed %d)\n", drive->name, pio, pio_to_umc[pio]); if (mate) spin_lock_irqsave(&mate->lock, flags); if (mate && mate->handler) { printk(KERN_ERR "umc8672: other interface is busy: exiting tune_umc()\n"); } else { current_speeds[drive->name[2] - 'a'] = pio_to_umc[pio]; umc_set_speeds(current_speeds); } if (mate) spin_unlock_irqrestore(&mate->lock, flags); } static const struct ide_port_ops umc8672_port_ops = { .set_pio_mode = umc_set_pio_mode, }; static const struct ide_port_info umc8672_port_info __initdata = { .name = DRV_NAME, .chipset = ide_umc8672, .port_ops = &umc8672_port_ops, .host_flags = IDE_HFLAG_NO_DMA, .pio_mask = ATA_PIO4, }; static int __init umc8672_probe(void) { unsigned long flags; if (!request_region(0x108, 2, "umc8672")) { printk(KERN_ERR "umc8672: ports 0x108-0x109 already in use.\n"); return 1; } local_irq_save(flags); outb_p(0x5A, 0x108); /* enable umc */ if (in_umc (0xd5) != 0xa0) { local_irq_restore(flags); printk(KERN_ERR "umc8672: not found\n"); release_region(0x108, 2); return 1; } outb_p(0xa5, 0x108); /* disable umc */ umc_set_speeds(current_speeds); local_irq_restore(flags); return ide_legacy_device_add(&umc8672_port_info, 0); } static int probe_umc8672; module_param_named(probe, probe_umc8672, bool, 0); MODULE_PARM_DESC(probe, "probe for UMC8672 chipset"); static int __init umc8672_init(void) { if (probe_umc8672 == 0) goto out; if (umc8672_probe() == 0) return 0; out: return -ENODEV; } module_init(umc8672_init); MODULE_AUTHOR("Wolfram Podien"); MODULE_DESCRIPTION("Support for UMC 8672 IDE chipset"); MODULE_LICENSE("GPL");
gpl-2.0
omnirom/android_kernel_samsung_n1
drivers/watchdog/nuc900_wdt.c
4191
8392
/* * Copyright (c) 2009 Nuvoton technology corporation. * * Wan ZongShun <mcuos.com@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. * */ #include <linux/bitops.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/watchdog.h> #include <linux/uaccess.h> #define REG_WTCR 0x1c #define WTCLK (0x01 << 10) #define WTE (0x01 << 7) /*wdt enable*/ #define WTIS (0x03 << 4) #define WTIF (0x01 << 3) #define WTRF (0x01 << 2) #define WTRE (0x01 << 1) #define WTR (0x01 << 0) /* * The watchdog time interval can be calculated via following formula: * WTIS real time interval (formula) * 0x00 ((2^ 14 ) * ((external crystal freq) / 256))seconds * 0x01 ((2^ 16 ) * ((external crystal freq) / 256))seconds * 0x02 ((2^ 18 ) * ((external crystal freq) / 256))seconds * 0x03 ((2^ 20 ) * ((external crystal freq) / 256))seconds * * The external crystal freq is 15Mhz in the nuc900 evaluation board. * So 0x00 = +-0.28 seconds, 0x01 = +-1.12 seconds, 0x02 = +-4.48 seconds, * 0x03 = +- 16.92 seconds.. */ #define WDT_HW_TIMEOUT 0x02 #define WDT_TIMEOUT (HZ/2) #define WDT_HEARTBEAT 15 static int heartbeat = WDT_HEARTBEAT; module_param(heartbeat, int, 0); MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. " "(default = " __MODULE_STRING(WDT_HEARTBEAT) ")"); static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); struct nuc900_wdt { struct resource *res; struct clk *wdt_clock; struct platform_device *pdev; void __iomem *wdt_base; char expect_close; struct timer_list timer; spinlock_t wdt_lock; unsigned long next_heartbeat; }; static unsigned long nuc900wdt_busy; struct nuc900_wdt *nuc900_wdt; static inline void nuc900_wdt_keepalive(void) { unsigned int val; spin_lock(&nuc900_wdt->wdt_lock); val = __raw_readl(nuc900_wdt->wdt_base + REG_WTCR); val |= (WTR | WTIF); __raw_writel(val, nuc900_wdt->wdt_base + REG_WTCR); spin_unlock(&nuc900_wdt->wdt_lock); } static inline void nuc900_wdt_start(void) { unsigned int val; spin_lock(&nuc900_wdt->wdt_lock); val = __raw_readl(nuc900_wdt->wdt_base + REG_WTCR); val |= (WTRE | WTE | WTR | WTCLK | WTIF); val &= ~WTIS; val |= (WDT_HW_TIMEOUT << 0x04); __raw_writel(val, nuc900_wdt->wdt_base + REG_WTCR); spin_unlock(&nuc900_wdt->wdt_lock); nuc900_wdt->next_heartbeat = jiffies + heartbeat * HZ; mod_timer(&nuc900_wdt->timer, jiffies + WDT_TIMEOUT); } static inline void nuc900_wdt_stop(void) { unsigned int val; del_timer(&nuc900_wdt->timer); spin_lock(&nuc900_wdt->wdt_lock); val = __raw_readl(nuc900_wdt->wdt_base + REG_WTCR); val &= ~WTE; __raw_writel(val, nuc900_wdt->wdt_base + REG_WTCR); spin_unlock(&nuc900_wdt->wdt_lock); } static inline void nuc900_wdt_ping(void) { nuc900_wdt->next_heartbeat = jiffies + heartbeat * HZ; } static int nuc900_wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &nuc900wdt_busy)) return -EBUSY; nuc900_wdt_start(); return nonseekable_open(inode, file); } static int nuc900_wdt_close(struct inode *inode, struct file *file) { if (nuc900_wdt->expect_close == 42) nuc900_wdt_stop(); else { dev_crit(&nuc900_wdt->pdev->dev, "Unexpected close, not stopping watchdog!\n"); nuc900_wdt_ping(); } nuc900_wdt->expect_close = 0; clear_bit(0, &nuc900wdt_busy); return 0; } static const struct watchdog_info nuc900_wdt_info = { .identity = "nuc900 watchdog", .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, }; static long nuc900_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; int new_value; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &nuc900_wdt_info, sizeof(nuc900_wdt_info)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_KEEPALIVE: nuc900_wdt_ping(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_value, p)) return -EFAULT; heartbeat = new_value; nuc900_wdt_ping(); return put_user(new_value, p); case WDIOC_GETTIMEOUT: return put_user(heartbeat, p); default: return -ENOTTY; } } static ssize_t nuc900_wdt_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { if (!len) return 0; /* Scan for magic character */ if (!nowayout) { size_t i; nuc900_wdt->expect_close = 0; for (i = 0; i < len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') { nuc900_wdt->expect_close = 42; break; } } } nuc900_wdt_ping(); return len; } static void nuc900_wdt_timer_ping(unsigned long data) { if (time_before(jiffies, nuc900_wdt->next_heartbeat)) { nuc900_wdt_keepalive(); mod_timer(&nuc900_wdt->timer, jiffies + WDT_TIMEOUT); } else dev_warn(&nuc900_wdt->pdev->dev, "Will reset the machine !\n"); } static const struct file_operations nuc900wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = nuc900_wdt_ioctl, .open = nuc900_wdt_open, .release = nuc900_wdt_close, .write = nuc900_wdt_write, }; static struct miscdevice nuc900wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &nuc900wdt_fops, }; static int __devinit nuc900wdt_probe(struct platform_device *pdev) { int ret = 0; nuc900_wdt = kzalloc(sizeof(struct nuc900_wdt), GFP_KERNEL); if (!nuc900_wdt) return -ENOMEM; nuc900_wdt->pdev = pdev; spin_lock_init(&nuc900_wdt->wdt_lock); nuc900_wdt->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (nuc900_wdt->res == NULL) { dev_err(&pdev->dev, "no memory resource specified\n"); ret = -ENOENT; goto err_get; } if (!request_mem_region(nuc900_wdt->res->start, resource_size(nuc900_wdt->res), pdev->name)) { dev_err(&pdev->dev, "failed to get memory region\n"); ret = -ENOENT; goto err_get; } nuc900_wdt->wdt_base = ioremap(nuc900_wdt->res->start, resource_size(nuc900_wdt->res)); if (nuc900_wdt->wdt_base == NULL) { dev_err(&pdev->dev, "failed to ioremap() region\n"); ret = -EINVAL; goto err_req; } nuc900_wdt->wdt_clock = clk_get(&pdev->dev, NULL); if (IS_ERR(nuc900_wdt->wdt_clock)) { dev_err(&pdev->dev, "failed to find watchdog clock source\n"); ret = PTR_ERR(nuc900_wdt->wdt_clock); goto err_map; } clk_enable(nuc900_wdt->wdt_clock); setup_timer(&nuc900_wdt->timer, nuc900_wdt_timer_ping, 0); if (misc_register(&nuc900wdt_miscdev)) { dev_err(&pdev->dev, "err register miscdev on minor=%d (%d)\n", WATCHDOG_MINOR, ret); goto err_clk; } return 0; err_clk: clk_disable(nuc900_wdt->wdt_clock); clk_put(nuc900_wdt->wdt_clock); err_map: iounmap(nuc900_wdt->wdt_base); err_req: release_mem_region(nuc900_wdt->res->start, resource_size(nuc900_wdt->res)); err_get: kfree(nuc900_wdt); return ret; } static int __devexit nuc900wdt_remove(struct platform_device *pdev) { misc_deregister(&nuc900wdt_miscdev); clk_disable(nuc900_wdt->wdt_clock); clk_put(nuc900_wdt->wdt_clock); iounmap(nuc900_wdt->wdt_base); release_mem_region(nuc900_wdt->res->start, resource_size(nuc900_wdt->res)); kfree(nuc900_wdt); return 0; } static struct platform_driver nuc900wdt_driver = { .probe = nuc900wdt_probe, .remove = __devexit_p(nuc900wdt_remove), .driver = { .name = "nuc900-wdt", .owner = THIS_MODULE, }, }; static int __init nuc900_wdt_init(void) { return platform_driver_register(&nuc900wdt_driver); } static void __exit nuc900_wdt_exit(void) { platform_driver_unregister(&nuc900wdt_driver); } module_init(nuc900_wdt_init); module_exit(nuc900_wdt_exit); MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); MODULE_DESCRIPTION("Watchdog driver for NUC900"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:nuc900-wdt");
gpl-2.0
yaymalaga/hellsPrime_kernel
arch/tile/kernel/pci.c
4447
16534
/* * Copyright 2011 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <linux/capability.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/bootmem.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/export.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/byteorder.h> #include <asm/hv_driver.h> #include <hv/drv_pcie_rc_intf.h> /* * Initialization flow and process * ------------------------------- * * This files contains the routines to search for PCI buses, * enumerate the buses, and configure any attached devices. * * There are two entry points here: * 1) tile_pci_init * This sets up the pci_controller structs, and opens the * FDs to the hypervisor. This is called from setup_arch() early * in the boot process. * 2) pcibios_init * This probes the PCI bus(es) for any attached hardware. It's * called by subsys_initcall. All of the real work is done by the * generic Linux PCI layer. * */ /* * This flag tells if the platform is TILEmpower that needs * special configuration for the PLX switch chip. */ int __write_once tile_plx_gen1; static struct pci_controller controllers[TILE_NUM_PCIE]; static int num_controllers; static int pci_scan_flags[TILE_NUM_PCIE]; static struct pci_ops tile_cfg_ops; /* * We don't need to worry about the alignment of resources. */ resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { return res->start; } EXPORT_SYMBOL(pcibios_align_resource); /* * Open a FD to the hypervisor PCI device. * * controller_id is the controller number, config type is 0 or 1 for * config0 or config1 operations. */ static int __devinit tile_pcie_open(int controller_id, int config_type) { char filename[32]; int fd; sprintf(filename, "pcie/%d/config%d", controller_id, config_type); fd = hv_dev_open((HV_VirtAddr)filename, 0); return fd; } /* * Get the IRQ numbers from the HV and set up the handlers for them. */ static int __devinit tile_init_irqs(int controller_id, struct pci_controller *controller) { char filename[32]; int fd; int ret; int x; struct pcie_rc_config rc_config; sprintf(filename, "pcie/%d/ctl", controller_id); fd = hv_dev_open((HV_VirtAddr)filename, 0); if (fd < 0) { pr_err("PCI: hv_dev_open(%s) failed\n", filename); return -1; } ret = hv_dev_pread(fd, 0, (HV_VirtAddr)(&rc_config), sizeof(rc_config), PCIE_RC_CONFIG_MASK_OFF); hv_dev_close(fd); if (ret != sizeof(rc_config)) { pr_err("PCI: wanted %zd bytes, got %d\n", sizeof(rc_config), ret); return -1; } /* Record irq_base so that we can map INTx to IRQ # later. */ controller->irq_base = rc_config.intr; for (x = 0; x < 4; x++) tile_irq_activate(rc_config.intr + x, TILE_IRQ_HW_CLEAR); if (rc_config.plx_gen1) controller->plx_gen1 = 1; return 0; } /* * First initialization entry point, called from setup_arch(). * * Find valid controllers and fill in pci_controller structs for each * of them. * * Returns the number of controllers discovered. */ int __init tile_pci_init(void) { int i; pr_info("PCI: Searching for controllers...\n"); /* Re-init number of PCIe controllers to support hot-plug feature. */ num_controllers = 0; /* Do any configuration we need before using the PCIe */ for (i = 0; i < TILE_NUM_PCIE; i++) { /* * To see whether we need a real config op based on * the results of pcibios_init(), to support PCIe hot-plug. */ if (pci_scan_flags[i] == 0) { int hv_cfg_fd0 = -1; int hv_cfg_fd1 = -1; int hv_mem_fd = -1; char name[32]; struct pci_controller *controller; /* * Open the fd to the HV. If it fails then this * device doesn't exist. */ hv_cfg_fd0 = tile_pcie_open(i, 0); if (hv_cfg_fd0 < 0) continue; hv_cfg_fd1 = tile_pcie_open(i, 1); if (hv_cfg_fd1 < 0) { pr_err("PCI: Couldn't open config fd to HV " "for controller %d\n", i); goto err_cont; } sprintf(name, "pcie/%d/mem", i); hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0); if (hv_mem_fd < 0) { pr_err("PCI: Could not open mem fd to HV!\n"); goto err_cont; } pr_info("PCI: Found PCI controller #%d\n", i); controller = &controllers[i]; controller->index = i; controller->hv_cfg_fd[0] = hv_cfg_fd0; controller->hv_cfg_fd[1] = hv_cfg_fd1; controller->hv_mem_fd = hv_mem_fd; controller->first_busno = 0; controller->last_busno = 0xff; controller->ops = &tile_cfg_ops; num_controllers++; continue; err_cont: if (hv_cfg_fd0 >= 0) hv_dev_close(hv_cfg_fd0); if (hv_cfg_fd1 >= 0) hv_dev_close(hv_cfg_fd1); if (hv_mem_fd >= 0) hv_dev_close(hv_mem_fd); continue; } } /* * Before using the PCIe, see if we need to do any platform-specific * configuration, such as the PLX switch Gen 1 issue on TILEmpower. */ for (i = 0; i < num_controllers; i++) { struct pci_controller *controller = &controllers[i]; if (controller->plx_gen1) tile_plx_gen1 = 1; } return num_controllers; } /* * (pin - 1) converts from the PCI standard's [1:4] convention to * a normal [0:3] range. */ static int tile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct pci_controller *controller = (struct pci_controller *)dev->sysdata; return (pin - 1) + controller->irq_base; } static void __devinit fixup_read_and_payload_sizes(void) { struct pci_dev *dev = NULL; int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */ int max_read_size = 0x2; /* Limit to 512 byte reads. */ u16 new_values; /* Scan for the smallest maximum payload size. */ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { int pcie_caps_offset; u32 devcap; int max_payload; pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP); if (pcie_caps_offset == 0) continue; pci_read_config_dword(dev, pcie_caps_offset + PCI_EXP_DEVCAP, &devcap); max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD; if (max_payload < smallest_max_payload) smallest_max_payload = max_payload; } /* Now, set the max_payload_size for all devices to that value. */ new_values = (max_read_size << 12) | (smallest_max_payload << 5); while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { int pcie_caps_offset; u16 devctl; pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP); if (pcie_caps_offset == 0) continue; pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, &devctl); devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ); devctl |= new_values; pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, devctl); } } /* * Second PCI initialization entry point, called by subsys_initcall. * * The controllers have been set up by the time we get here, by a call to * tile_pci_init. */ int __init pcibios_init(void) { int i; pr_info("PCI: Probing PCI hardware\n"); /* * Delay a bit in case devices aren't ready. Some devices are * known to require at least 20ms here, but we use a more * conservative value. */ mdelay(250); /* Scan all of the recorded PCI controllers. */ for (i = 0; i < TILE_NUM_PCIE; i++) { /* * Do real pcibios init ops if the controller is initialized * by tile_pci_init() successfully and not initialized by * pcibios_init() yet to support PCIe hot-plug. */ if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) { struct pci_controller *controller = &controllers[i]; struct pci_bus *bus; if (tile_init_irqs(i, controller)) { pr_err("PCI: Could not initialize IRQs\n"); continue; } pr_info("PCI: initializing controller #%d\n", i); /* * This comes from the generic Linux PCI driver. * * It reads the PCI tree for this bus into the Linux * data structures. * * This is inlined in linux/pci.h and calls into * pci_scan_bus_parented() in probe.c. */ bus = pci_scan_bus(0, controller->ops, controller); controller->root_bus = bus; controller->last_busno = bus->subordinate; } } /* Do machine dependent PCI interrupt routing */ pci_fixup_irqs(pci_common_swizzle, tile_map_irq); /* * This comes from the generic Linux PCI driver. * * It allocates all of the resources (I/O memory, etc) * associated with the devices read in above. */ pci_assign_unassigned_resources(); /* Configure the max_read_size and max_payload_size values. */ fixup_read_and_payload_sizes(); /* Record the I/O resources in the PCI controller structure. */ for (i = 0; i < TILE_NUM_PCIE; i++) { /* * Do real pcibios init ops if the controller is initialized * by tile_pci_init() successfully and not initialized by * pcibios_init() yet to support PCIe hot-plug. */ if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) { struct pci_bus *root_bus = controllers[i].root_bus; struct pci_bus *next_bus; struct pci_dev *dev; list_for_each_entry(dev, &root_bus->devices, bus_list) { /* * Find the PCI host controller, ie. the 1st * bridge. */ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && (PCI_SLOT(dev->devfn) == 0)) { next_bus = dev->subordinate; controllers[i].mem_resources[0] = *next_bus->resource[0]; controllers[i].mem_resources[1] = *next_bus->resource[1]; controllers[i].mem_resources[2] = *next_bus->resource[2]; /* Setup flags. */ pci_scan_flags[i] = 1; break; } } } } return 0; } subsys_initcall(pcibios_init); /* * No bus fixups needed. */ void __devinit pcibios_fixup_bus(struct pci_bus *bus) { /* Nothing needs to be done. */ } void pcibios_set_master(struct pci_dev *dev) { /* No special bus mastering setup handling. */ } /* * This can be called from the generic PCI layer, but doesn't need to * do anything. */ char __devinit *pcibios_setup(char *str) { /* Nothing needs to be done. */ return str; } /* * This is called from the generic Linux layer. */ void __devinit pcibios_update_irq(struct pci_dev *dev, int irq) { pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); } /* * Enable memory and/or address decoding, as appropriate, for the * device described by the 'dev' struct. * * This is called from the generic PCI layer, and can be called * for bridges or endpoints. */ int pcibios_enable_device(struct pci_dev *dev, int mask) { u16 cmd, old_cmd; u8 header_type; int i; struct resource *r; pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); pci_read_config_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* * For bridges, we enable both memory and I/O decoding * in call cases. */ cmd |= PCI_COMMAND_IO; cmd |= PCI_COMMAND_MEMORY; } else { /* * For endpoints, we enable memory and/or I/O decoding * only if they have a memory resource of that type. */ for (i = 0; i < 6; i++) { r = &dev->resource[i]; if (r->flags & IORESOURCE_UNSET) { pr_err("PCI: Device %s not available " "because of resource collisions\n", pci_name(dev)); return -EINVAL; } if (r->flags & IORESOURCE_IO) cmd |= PCI_COMMAND_IO; if (r->flags & IORESOURCE_MEM) cmd |= PCI_COMMAND_MEMORY; } } /* * We only write the command if it changed. */ if (cmd != old_cmd) pci_write_config_word(dev, PCI_COMMAND, cmd); return 0; } /**************************************************************** * * Tile PCI config space read/write routines * ****************************************************************/ /* * These are the normal read and write ops * These are expanded with macros from pci_bus_read_config_byte() etc. * * devfn is the combined PCI slot & function. * * offset is in bytes, from the start of config space for the * specified bus & slot. */ static int __devinit tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset, int size, u32 *val) { struct pci_controller *controller = bus->sysdata; int busnum = bus->number & 0xff; int slot = (devfn >> 3) & 0x1f; int function = devfn & 0x7; u32 addr; int config_mode = 1; /* * There is no bridge between the Tile and bus 0, so we * use config0 to talk to bus 0. * * If we're talking to a bus other than zero then we * must have found a bridge. */ if (busnum == 0) { /* * We fake an empty slot for (busnum == 0) && (slot > 0), * since there is only one slot on bus 0. */ if (slot) { *val = 0xFFFFFFFF; return 0; } config_mode = 0; } addr = busnum << 20; /* Bus in 27:20 */ addr |= slot << 15; /* Slot (device) in 19:15 */ addr |= function << 12; /* Function is in 14:12 */ addr |= (offset & 0xFFF); /* byte address in 0:11 */ return hv_dev_pread(controller->hv_cfg_fd[config_mode], 0, (HV_VirtAddr)(val), size, addr); } /* * See tile_cfg_read() for relevant comments. * Note that "val" is the value to write, not a pointer to that value. */ static int __devinit tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset, int size, u32 val) { struct pci_controller *controller = bus->sysdata; int busnum = bus->number & 0xff; int slot = (devfn >> 3) & 0x1f; int function = devfn & 0x7; u32 addr; int config_mode = 1; HV_VirtAddr valp = (HV_VirtAddr)&val; /* * For bus 0 slot 0 we use config 0 accesses. */ if (busnum == 0) { /* * We fake an empty slot for (busnum == 0) && (slot > 0), * since there is only one slot on bus 0. */ if (slot) return 0; config_mode = 0; } addr = busnum << 20; /* Bus in 27:20 */ addr |= slot << 15; /* Slot (device) in 19:15 */ addr |= function << 12; /* Function is in 14:12 */ addr |= (offset & 0xFFF); /* byte address in 0:11 */ #ifdef __BIG_ENDIAN /* Point to the correct part of the 32-bit "val". */ valp += 4 - size; #endif return hv_dev_pwrite(controller->hv_cfg_fd[config_mode], 0, valp, size, addr); } static struct pci_ops tile_cfg_ops = { .read = tile_cfg_read, .write = tile_cfg_write, }; /* * In the following, each PCI controller's mem_resources[1] * represents its (non-prefetchable) PCI memory resource. * mem_resources[0] and mem_resources[2] refer to its PCI I/O and * prefetchable PCI memory resources, respectively. * For more details, see pci_setup_bridge() in setup-bus.c. * By comparing the target PCI memory address against the * end address of controller 0, we can determine the controller * that should accept the PCI memory access. */ #define TILE_READ(size, type) \ type _tile_read##size(unsigned long addr) \ { \ type val; \ int idx = 0; \ if (addr > controllers[0].mem_resources[1].end && \ addr > controllers[0].mem_resources[2].end) \ idx = 1; \ if (hv_dev_pread(controllers[idx].hv_mem_fd, 0, \ (HV_VirtAddr)(&val), sizeof(type), addr)) \ pr_err("PCI: read %zd bytes at 0x%lX failed\n", \ sizeof(type), addr); \ return val; \ } \ EXPORT_SYMBOL(_tile_read##size) TILE_READ(b, u8); TILE_READ(w, u16); TILE_READ(l, u32); TILE_READ(q, u64); #define TILE_WRITE(size, type) \ void _tile_write##size(type val, unsigned long addr) \ { \ int idx = 0; \ if (addr > controllers[0].mem_resources[1].end && \ addr > controllers[0].mem_resources[2].end) \ idx = 1; \ if (hv_dev_pwrite(controllers[idx].hv_mem_fd, 0, \ (HV_VirtAddr)(&val), sizeof(type), addr)) \ pr_err("PCI: write %zd bytes at 0x%lX failed\n", \ sizeof(type), addr); \ } \ EXPORT_SYMBOL(_tile_write##size) TILE_WRITE(b, u8); TILE_WRITE(w, u16); TILE_WRITE(l, u32); TILE_WRITE(q, u64);
gpl-2.0
aatjitra/cm12
arch/tile/kernel/pci.c
4447
16534
/* * Copyright 2011 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <linux/capability.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/bootmem.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/export.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/byteorder.h> #include <asm/hv_driver.h> #include <hv/drv_pcie_rc_intf.h> /* * Initialization flow and process * ------------------------------- * * This files contains the routines to search for PCI buses, * enumerate the buses, and configure any attached devices. * * There are two entry points here: * 1) tile_pci_init * This sets up the pci_controller structs, and opens the * FDs to the hypervisor. This is called from setup_arch() early * in the boot process. * 2) pcibios_init * This probes the PCI bus(es) for any attached hardware. It's * called by subsys_initcall. All of the real work is done by the * generic Linux PCI layer. * */ /* * This flag tells if the platform is TILEmpower that needs * special configuration for the PLX switch chip. */ int __write_once tile_plx_gen1; static struct pci_controller controllers[TILE_NUM_PCIE]; static int num_controllers; static int pci_scan_flags[TILE_NUM_PCIE]; static struct pci_ops tile_cfg_ops; /* * We don't need to worry about the alignment of resources. */ resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { return res->start; } EXPORT_SYMBOL(pcibios_align_resource); /* * Open a FD to the hypervisor PCI device. * * controller_id is the controller number, config type is 0 or 1 for * config0 or config1 operations. */ static int __devinit tile_pcie_open(int controller_id, int config_type) { char filename[32]; int fd; sprintf(filename, "pcie/%d/config%d", controller_id, config_type); fd = hv_dev_open((HV_VirtAddr)filename, 0); return fd; } /* * Get the IRQ numbers from the HV and set up the handlers for them. */ static int __devinit tile_init_irqs(int controller_id, struct pci_controller *controller) { char filename[32]; int fd; int ret; int x; struct pcie_rc_config rc_config; sprintf(filename, "pcie/%d/ctl", controller_id); fd = hv_dev_open((HV_VirtAddr)filename, 0); if (fd < 0) { pr_err("PCI: hv_dev_open(%s) failed\n", filename); return -1; } ret = hv_dev_pread(fd, 0, (HV_VirtAddr)(&rc_config), sizeof(rc_config), PCIE_RC_CONFIG_MASK_OFF); hv_dev_close(fd); if (ret != sizeof(rc_config)) { pr_err("PCI: wanted %zd bytes, got %d\n", sizeof(rc_config), ret); return -1; } /* Record irq_base so that we can map INTx to IRQ # later. */ controller->irq_base = rc_config.intr; for (x = 0; x < 4; x++) tile_irq_activate(rc_config.intr + x, TILE_IRQ_HW_CLEAR); if (rc_config.plx_gen1) controller->plx_gen1 = 1; return 0; } /* * First initialization entry point, called from setup_arch(). * * Find valid controllers and fill in pci_controller structs for each * of them. * * Returns the number of controllers discovered. */ int __init tile_pci_init(void) { int i; pr_info("PCI: Searching for controllers...\n"); /* Re-init number of PCIe controllers to support hot-plug feature. */ num_controllers = 0; /* Do any configuration we need before using the PCIe */ for (i = 0; i < TILE_NUM_PCIE; i++) { /* * To see whether we need a real config op based on * the results of pcibios_init(), to support PCIe hot-plug. */ if (pci_scan_flags[i] == 0) { int hv_cfg_fd0 = -1; int hv_cfg_fd1 = -1; int hv_mem_fd = -1; char name[32]; struct pci_controller *controller; /* * Open the fd to the HV. If it fails then this * device doesn't exist. */ hv_cfg_fd0 = tile_pcie_open(i, 0); if (hv_cfg_fd0 < 0) continue; hv_cfg_fd1 = tile_pcie_open(i, 1); if (hv_cfg_fd1 < 0) { pr_err("PCI: Couldn't open config fd to HV " "for controller %d\n", i); goto err_cont; } sprintf(name, "pcie/%d/mem", i); hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0); if (hv_mem_fd < 0) { pr_err("PCI: Could not open mem fd to HV!\n"); goto err_cont; } pr_info("PCI: Found PCI controller #%d\n", i); controller = &controllers[i]; controller->index = i; controller->hv_cfg_fd[0] = hv_cfg_fd0; controller->hv_cfg_fd[1] = hv_cfg_fd1; controller->hv_mem_fd = hv_mem_fd; controller->first_busno = 0; controller->last_busno = 0xff; controller->ops = &tile_cfg_ops; num_controllers++; continue; err_cont: if (hv_cfg_fd0 >= 0) hv_dev_close(hv_cfg_fd0); if (hv_cfg_fd1 >= 0) hv_dev_close(hv_cfg_fd1); if (hv_mem_fd >= 0) hv_dev_close(hv_mem_fd); continue; } } /* * Before using the PCIe, see if we need to do any platform-specific * configuration, such as the PLX switch Gen 1 issue on TILEmpower. */ for (i = 0; i < num_controllers; i++) { struct pci_controller *controller = &controllers[i]; if (controller->plx_gen1) tile_plx_gen1 = 1; } return num_controllers; } /* * (pin - 1) converts from the PCI standard's [1:4] convention to * a normal [0:3] range. */ static int tile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct pci_controller *controller = (struct pci_controller *)dev->sysdata; return (pin - 1) + controller->irq_base; } static void __devinit fixup_read_and_payload_sizes(void) { struct pci_dev *dev = NULL; int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */ int max_read_size = 0x2; /* Limit to 512 byte reads. */ u16 new_values; /* Scan for the smallest maximum payload size. */ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { int pcie_caps_offset; u32 devcap; int max_payload; pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP); if (pcie_caps_offset == 0) continue; pci_read_config_dword(dev, pcie_caps_offset + PCI_EXP_DEVCAP, &devcap); max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD; if (max_payload < smallest_max_payload) smallest_max_payload = max_payload; } /* Now, set the max_payload_size for all devices to that value. */ new_values = (max_read_size << 12) | (smallest_max_payload << 5); while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { int pcie_caps_offset; u16 devctl; pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP); if (pcie_caps_offset == 0) continue; pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, &devctl); devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ); devctl |= new_values; pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, devctl); } } /* * Second PCI initialization entry point, called by subsys_initcall. * * The controllers have been set up by the time we get here, by a call to * tile_pci_init. */ int __init pcibios_init(void) { int i; pr_info("PCI: Probing PCI hardware\n"); /* * Delay a bit in case devices aren't ready. Some devices are * known to require at least 20ms here, but we use a more * conservative value. */ mdelay(250); /* Scan all of the recorded PCI controllers. */ for (i = 0; i < TILE_NUM_PCIE; i++) { /* * Do real pcibios init ops if the controller is initialized * by tile_pci_init() successfully and not initialized by * pcibios_init() yet to support PCIe hot-plug. */ if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) { struct pci_controller *controller = &controllers[i]; struct pci_bus *bus; if (tile_init_irqs(i, controller)) { pr_err("PCI: Could not initialize IRQs\n"); continue; } pr_info("PCI: initializing controller #%d\n", i); /* * This comes from the generic Linux PCI driver. * * It reads the PCI tree for this bus into the Linux * data structures. * * This is inlined in linux/pci.h and calls into * pci_scan_bus_parented() in probe.c. */ bus = pci_scan_bus(0, controller->ops, controller); controller->root_bus = bus; controller->last_busno = bus->subordinate; } } /* Do machine dependent PCI interrupt routing */ pci_fixup_irqs(pci_common_swizzle, tile_map_irq); /* * This comes from the generic Linux PCI driver. * * It allocates all of the resources (I/O memory, etc) * associated with the devices read in above. */ pci_assign_unassigned_resources(); /* Configure the max_read_size and max_payload_size values. */ fixup_read_and_payload_sizes(); /* Record the I/O resources in the PCI controller structure. */ for (i = 0; i < TILE_NUM_PCIE; i++) { /* * Do real pcibios init ops if the controller is initialized * by tile_pci_init() successfully and not initialized by * pcibios_init() yet to support PCIe hot-plug. */ if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) { struct pci_bus *root_bus = controllers[i].root_bus; struct pci_bus *next_bus; struct pci_dev *dev; list_for_each_entry(dev, &root_bus->devices, bus_list) { /* * Find the PCI host controller, ie. the 1st * bridge. */ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && (PCI_SLOT(dev->devfn) == 0)) { next_bus = dev->subordinate; controllers[i].mem_resources[0] = *next_bus->resource[0]; controllers[i].mem_resources[1] = *next_bus->resource[1]; controllers[i].mem_resources[2] = *next_bus->resource[2]; /* Setup flags. */ pci_scan_flags[i] = 1; break; } } } } return 0; } subsys_initcall(pcibios_init); /* * No bus fixups needed. */ void __devinit pcibios_fixup_bus(struct pci_bus *bus) { /* Nothing needs to be done. */ } void pcibios_set_master(struct pci_dev *dev) { /* No special bus mastering setup handling. */ } /* * This can be called from the generic PCI layer, but doesn't need to * do anything. */ char __devinit *pcibios_setup(char *str) { /* Nothing needs to be done. */ return str; } /* * This is called from the generic Linux layer. */ void __devinit pcibios_update_irq(struct pci_dev *dev, int irq) { pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); } /* * Enable memory and/or address decoding, as appropriate, for the * device described by the 'dev' struct. * * This is called from the generic PCI layer, and can be called * for bridges or endpoints. */ int pcibios_enable_device(struct pci_dev *dev, int mask) { u16 cmd, old_cmd; u8 header_type; int i; struct resource *r; pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); pci_read_config_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* * For bridges, we enable both memory and I/O decoding * in call cases. */ cmd |= PCI_COMMAND_IO; cmd |= PCI_COMMAND_MEMORY; } else { /* * For endpoints, we enable memory and/or I/O decoding * only if they have a memory resource of that type. */ for (i = 0; i < 6; i++) { r = &dev->resource[i]; if (r->flags & IORESOURCE_UNSET) { pr_err("PCI: Device %s not available " "because of resource collisions\n", pci_name(dev)); return -EINVAL; } if (r->flags & IORESOURCE_IO) cmd |= PCI_COMMAND_IO; if (r->flags & IORESOURCE_MEM) cmd |= PCI_COMMAND_MEMORY; } } /* * We only write the command if it changed. */ if (cmd != old_cmd) pci_write_config_word(dev, PCI_COMMAND, cmd); return 0; } /**************************************************************** * * Tile PCI config space read/write routines * ****************************************************************/ /* * These are the normal read and write ops * These are expanded with macros from pci_bus_read_config_byte() etc. * * devfn is the combined PCI slot & function. * * offset is in bytes, from the start of config space for the * specified bus & slot. */ static int __devinit tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset, int size, u32 *val) { struct pci_controller *controller = bus->sysdata; int busnum = bus->number & 0xff; int slot = (devfn >> 3) & 0x1f; int function = devfn & 0x7; u32 addr; int config_mode = 1; /* * There is no bridge between the Tile and bus 0, so we * use config0 to talk to bus 0. * * If we're talking to a bus other than zero then we * must have found a bridge. */ if (busnum == 0) { /* * We fake an empty slot for (busnum == 0) && (slot > 0), * since there is only one slot on bus 0. */ if (slot) { *val = 0xFFFFFFFF; return 0; } config_mode = 0; } addr = busnum << 20; /* Bus in 27:20 */ addr |= slot << 15; /* Slot (device) in 19:15 */ addr |= function << 12; /* Function is in 14:12 */ addr |= (offset & 0xFFF); /* byte address in 0:11 */ return hv_dev_pread(controller->hv_cfg_fd[config_mode], 0, (HV_VirtAddr)(val), size, addr); } /* * See tile_cfg_read() for relevant comments. * Note that "val" is the value to write, not a pointer to that value. */ static int __devinit tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset, int size, u32 val) { struct pci_controller *controller = bus->sysdata; int busnum = bus->number & 0xff; int slot = (devfn >> 3) & 0x1f; int function = devfn & 0x7; u32 addr; int config_mode = 1; HV_VirtAddr valp = (HV_VirtAddr)&val; /* * For bus 0 slot 0 we use config 0 accesses. */ if (busnum == 0) { /* * We fake an empty slot for (busnum == 0) && (slot > 0), * since there is only one slot on bus 0. */ if (slot) return 0; config_mode = 0; } addr = busnum << 20; /* Bus in 27:20 */ addr |= slot << 15; /* Slot (device) in 19:15 */ addr |= function << 12; /* Function is in 14:12 */ addr |= (offset & 0xFFF); /* byte address in 0:11 */ #ifdef __BIG_ENDIAN /* Point to the correct part of the 32-bit "val". */ valp += 4 - size; #endif return hv_dev_pwrite(controller->hv_cfg_fd[config_mode], 0, valp, size, addr); } static struct pci_ops tile_cfg_ops = { .read = tile_cfg_read, .write = tile_cfg_write, }; /* * In the following, each PCI controller's mem_resources[1] * represents its (non-prefetchable) PCI memory resource. * mem_resources[0] and mem_resources[2] refer to its PCI I/O and * prefetchable PCI memory resources, respectively. * For more details, see pci_setup_bridge() in setup-bus.c. * By comparing the target PCI memory address against the * end address of controller 0, we can determine the controller * that should accept the PCI memory access. */ #define TILE_READ(size, type) \ type _tile_read##size(unsigned long addr) \ { \ type val; \ int idx = 0; \ if (addr > controllers[0].mem_resources[1].end && \ addr > controllers[0].mem_resources[2].end) \ idx = 1; \ if (hv_dev_pread(controllers[idx].hv_mem_fd, 0, \ (HV_VirtAddr)(&val), sizeof(type), addr)) \ pr_err("PCI: read %zd bytes at 0x%lX failed\n", \ sizeof(type), addr); \ return val; \ } \ EXPORT_SYMBOL(_tile_read##size) TILE_READ(b, u8); TILE_READ(w, u16); TILE_READ(l, u32); TILE_READ(q, u64); #define TILE_WRITE(size, type) \ void _tile_write##size(type val, unsigned long addr) \ { \ int idx = 0; \ if (addr > controllers[0].mem_resources[1].end && \ addr > controllers[0].mem_resources[2].end) \ idx = 1; \ if (hv_dev_pwrite(controllers[idx].hv_mem_fd, 0, \ (HV_VirtAddr)(&val), sizeof(type), addr)) \ pr_err("PCI: write %zd bytes at 0x%lX failed\n", \ sizeof(type), addr); \ } \ EXPORT_SYMBOL(_tile_write##size) TILE_WRITE(b, u8); TILE_WRITE(w, u16); TILE_WRITE(l, u32); TILE_WRITE(q, u64);
gpl-2.0
nc543/linux-stable
net/netfilter/nfnetlink_queue.c
4703
24751
/* * This is a module which is used for queueing packets and communicating with * userspace via nfnetlink. * * (C) 2005 by Harald Welte <laforge@netfilter.org> * (C) 2007 by Patrick McHardy <kaber@trash.net> * * Based on the old ipv4-only ip_queue.c: * (C) 2000-2002 James Morris <jmorris@intercode.com.au> * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/notifier.h> #include <linux/netdevice.h> #include <linux/netfilter.h> #include <linux/proc_fs.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_queue.h> #include <linux/list.h> #include <net/sock.h> #include <net/netfilter/nf_queue.h> #include <linux/atomic.h> #ifdef CONFIG_BRIDGE_NETFILTER #include "../bridge/br_private.h" #endif #define NFQNL_QMAX_DEFAULT 1024 struct nfqnl_instance { struct hlist_node hlist; /* global list of queues */ struct rcu_head rcu; int peer_pid; unsigned int queue_maxlen; unsigned int copy_range; unsigned int queue_dropped; unsigned int queue_user_dropped; u_int16_t queue_num; /* number of this queue */ u_int8_t copy_mode; /* * Following fields are dirtied for each queued packet, * keep them in same cache line if possible. */ spinlock_t lock; unsigned int queue_total; unsigned int id_sequence; /* 'sequence' of pkt ids */ struct list_head queue_list; /* packets in queue */ }; typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long); static DEFINE_SPINLOCK(instances_lock); #define INSTANCE_BUCKETS 16 static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly; static inline u_int8_t instance_hashfn(u_int16_t queue_num) { return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS; } static struct nfqnl_instance * instance_lookup(u_int16_t queue_num) { struct hlist_head *head; struct hlist_node *pos; struct nfqnl_instance *inst; head = &instance_table[instance_hashfn(queue_num)]; hlist_for_each_entry_rcu(inst, pos, head, hlist) { if (inst->queue_num == queue_num) return inst; } return NULL; } static struct nfqnl_instance * instance_create(u_int16_t queue_num, int pid) { struct nfqnl_instance *inst; unsigned int h; int err; spin_lock(&instances_lock); if (instance_lookup(queue_num)) { err = -EEXIST; goto out_unlock; } inst = kzalloc(sizeof(*inst), GFP_ATOMIC); if (!inst) { err = -ENOMEM; goto out_unlock; } inst->queue_num = queue_num; inst->peer_pid = pid; inst->queue_maxlen = NFQNL_QMAX_DEFAULT; inst->copy_range = 0xfffff; inst->copy_mode = NFQNL_COPY_NONE; spin_lock_init(&inst->lock); INIT_LIST_HEAD(&inst->queue_list); if (!try_module_get(THIS_MODULE)) { err = -EAGAIN; goto out_free; } h = instance_hashfn(queue_num); hlist_add_head_rcu(&inst->hlist, &instance_table[h]); spin_unlock(&instances_lock); return inst; out_free: kfree(inst); out_unlock: spin_unlock(&instances_lock); return ERR_PTR(err); } static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data); static void instance_destroy_rcu(struct rcu_head *head) { struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance, rcu); nfqnl_flush(inst, NULL, 0); kfree(inst); module_put(THIS_MODULE); } static void __instance_destroy(struct nfqnl_instance *inst) { hlist_del_rcu(&inst->hlist); call_rcu(&inst->rcu, instance_destroy_rcu); } static void instance_destroy(struct nfqnl_instance *inst) { spin_lock(&instances_lock); __instance_destroy(inst); spin_unlock(&instances_lock); } static inline void __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) { list_add_tail(&entry->list, &queue->queue_list); queue->queue_total++; } static void __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) { list_del(&entry->list); queue->queue_total--; } static struct nf_queue_entry * find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) { struct nf_queue_entry *entry = NULL, *i; spin_lock_bh(&queue->lock); list_for_each_entry(i, &queue->queue_list, list) { if (i->id == id) { entry = i; break; } } if (entry) __dequeue_entry(queue, entry); spin_unlock_bh(&queue->lock); return entry; } static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data) { struct nf_queue_entry *entry, *next; spin_lock_bh(&queue->lock); list_for_each_entry_safe(entry, next, &queue->queue_list, list) { if (!cmpfn || cmpfn(entry, data)) { list_del(&entry->list); queue->queue_total--; nf_reinject(entry, NF_DROP); } } spin_unlock_bh(&queue->lock); } static struct sk_buff * nfqnl_build_packet_message(struct nfqnl_instance *queue, struct nf_queue_entry *entry, __be32 **packet_id_ptr) { sk_buff_data_t old_tail; size_t size; size_t data_len = 0; struct sk_buff *skb; struct nlattr *nla; struct nfqnl_msg_packet_hdr *pmsg; struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; struct sk_buff *entskb = entry->skb; struct net_device *indev; struct net_device *outdev; size = NLMSG_SPACE(sizeof(struct nfgenmsg)) + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ #ifdef CONFIG_BRIDGE_NETFILTER + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ #endif + nla_total_size(sizeof(u_int32_t)) /* mark */ + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); outdev = entry->outdev; switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) { case NFQNL_COPY_META: case NFQNL_COPY_NONE: break; case NFQNL_COPY_PACKET: if (entskb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(entskb)) return NULL; data_len = ACCESS_ONCE(queue->copy_range); if (data_len == 0 || data_len > entskb->len) data_len = entskb->len; size += nla_total_size(data_len); break; } skb = alloc_skb(size, GFP_ATOMIC); if (!skb) goto nlmsg_failure; old_tail = skb->tail; nlh = NLMSG_PUT(skb, 0, 0, NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, sizeof(struct nfgenmsg)); nfmsg = NLMSG_DATA(nlh); nfmsg->nfgen_family = entry->pf; nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(queue->queue_num); nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg)); pmsg = nla_data(nla); pmsg->hw_protocol = entskb->protocol; pmsg->hook = entry->hook; *packet_id_ptr = &pmsg->packet_id; indev = entry->indev; if (indev) { #ifndef CONFIG_BRIDGE_NETFILTER NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)); #else if (entry->pf == PF_BRIDGE) { /* Case 1: indev is physical input device, we need to * look for bridge group (when called from * netfilter_bridge) */ NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, htonl(indev->ifindex)); /* this is the bridge group "brX" */ /* rcu_read_lock()ed by __nf_queue */ NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(br_port_get_rcu(indev)->br->dev->ifindex)); } else { /* Case 2: indev is bridge group, we need to look for * physical device (when called from ipv4) */ NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)); if (entskb->nf_bridge && entskb->nf_bridge->physindev) NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, htonl(entskb->nf_bridge->physindev->ifindex)); } #endif } if (outdev) { #ifndef CONFIG_BRIDGE_NETFILTER NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); #else if (entry->pf == PF_BRIDGE) { /* Case 1: outdev is physical output device, we need to * look for bridge group (when called from * netfilter_bridge) */ NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, htonl(outdev->ifindex)); /* this is the bridge group "brX" */ /* rcu_read_lock()ed by __nf_queue */ NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(br_port_get_rcu(outdev)->br->dev->ifindex)); } else { /* Case 2: outdev is bridge group, we need to look for * physical output device (when called from ipv4) */ NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); if (entskb->nf_bridge && entskb->nf_bridge->physoutdev) NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, htonl(entskb->nf_bridge->physoutdev->ifindex)); } #endif } if (entskb->mark) NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); if (indev && entskb->dev && entskb->mac_header != entskb->network_header) { struct nfqnl_msg_packet_hw phw; int len = dev_parse_header(entskb, phw.hw_addr); if (len) { phw.hw_addrlen = htons(len); NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); } } if (entskb->tstamp.tv64) { struct nfqnl_msg_packet_timestamp ts; struct timeval tv = ktime_to_timeval(entskb->tstamp); ts.sec = cpu_to_be64(tv.tv_sec); ts.usec = cpu_to_be64(tv.tv_usec); NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); } if (data_len) { struct nlattr *nla; int sz = nla_attr_size(data_len); if (skb_tailroom(skb) < nla_total_size(data_len)) { printk(KERN_WARNING "nf_queue: no tailroom!\n"); goto nlmsg_failure; } nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len)); nla->nla_type = NFQA_PAYLOAD; nla->nla_len = sz; if (skb_copy_bits(entskb, 0, nla_data(nla), data_len)) BUG(); } nlh->nlmsg_len = skb->tail - old_tail; return skb; nlmsg_failure: nla_put_failure: if (skb) kfree_skb(skb); if (net_ratelimit()) printk(KERN_ERR "nf_queue: error creating packet message\n"); return NULL; } static int nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) { struct sk_buff *nskb; struct nfqnl_instance *queue; int err = -ENOBUFS; __be32 *packet_id_ptr; /* rcu_read_lock()ed by nf_hook_slow() */ queue = instance_lookup(queuenum); if (!queue) { err = -ESRCH; goto err_out; } if (queue->copy_mode == NFQNL_COPY_NONE) { err = -EINVAL; goto err_out; } nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr); if (nskb == NULL) { err = -ENOMEM; goto err_out; } spin_lock_bh(&queue->lock); if (!queue->peer_pid) { err = -EINVAL; goto err_out_free_nskb; } if (queue->queue_total >= queue->queue_maxlen) { queue->queue_dropped++; if (net_ratelimit()) printk(KERN_WARNING "nf_queue: full at %d entries, " "dropping packets(s).\n", queue->queue_total); goto err_out_free_nskb; } entry->id = ++queue->id_sequence; *packet_id_ptr = htonl(entry->id); /* nfnetlink_unicast will either free the nskb or add it to a socket */ err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT); if (err < 0) { queue->queue_user_dropped++; goto err_out_unlock; } __enqueue_entry(queue, entry); spin_unlock_bh(&queue->lock); return 0; err_out_free_nskb: kfree_skb(nskb); err_out_unlock: spin_unlock_bh(&queue->lock); err_out: return err; } static int nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e) { struct sk_buff *nskb; int diff; diff = data_len - e->skb->len; if (diff < 0) { if (pskb_trim(e->skb, data_len)) return -ENOMEM; } else if (diff > 0) { if (data_len > 0xFFFF) return -EINVAL; if (diff > skb_tailroom(e->skb)) { nskb = skb_copy_expand(e->skb, skb_headroom(e->skb), diff, GFP_ATOMIC); if (!nskb) { printk(KERN_WARNING "nf_queue: OOM " "in mangle, dropping packet\n"); return -ENOMEM; } kfree_skb(e->skb); e->skb = nskb; } skb_put(e->skb, diff); } if (!skb_make_writable(e->skb, data_len)) return -ENOMEM; skb_copy_to_linear_data(e->skb, data, data_len); e->skb->ip_summed = CHECKSUM_NONE; return 0; } static int nfqnl_set_mode(struct nfqnl_instance *queue, unsigned char mode, unsigned int range) { int status = 0; spin_lock_bh(&queue->lock); switch (mode) { case NFQNL_COPY_NONE: case NFQNL_COPY_META: queue->copy_mode = mode; queue->copy_range = 0; break; case NFQNL_COPY_PACKET: queue->copy_mode = mode; /* we're using struct nlattr which has 16bit nla_len */ if (range > 0xffff) queue->copy_range = 0xffff; else queue->copy_range = range; break; default: status = -EINVAL; } spin_unlock_bh(&queue->lock); return status; } static int dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) { if (entry->indev) if (entry->indev->ifindex == ifindex) return 1; if (entry->outdev) if (entry->outdev->ifindex == ifindex) return 1; #ifdef CONFIG_BRIDGE_NETFILTER if (entry->skb->nf_bridge) { if (entry->skb->nf_bridge->physindev && entry->skb->nf_bridge->physindev->ifindex == ifindex) return 1; if (entry->skb->nf_bridge->physoutdev && entry->skb->nf_bridge->physoutdev->ifindex == ifindex) return 1; } #endif return 0; } /* drop all packets with either indev or outdev == ifindex from all queue * instances */ static void nfqnl_dev_drop(int ifindex) { int i; rcu_read_lock(); for (i = 0; i < INSTANCE_BUCKETS; i++) { struct hlist_node *tmp; struct nfqnl_instance *inst; struct hlist_head *head = &instance_table[i]; hlist_for_each_entry_rcu(inst, tmp, head, hlist) nfqnl_flush(inst, dev_cmp, ifindex); } rcu_read_unlock(); } #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) static int nfqnl_rcv_dev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; /* Drop any packets associated with the downed device */ if (event == NETDEV_DOWN) nfqnl_dev_drop(dev->ifindex); return NOTIFY_DONE; } static struct notifier_block nfqnl_dev_notifier = { .notifier_call = nfqnl_rcv_dev_event, }; static int nfqnl_rcv_nl_event(struct notifier_block *this, unsigned long event, void *ptr) { struct netlink_notify *n = ptr; if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { int i; /* destroy all instances for this pid */ spin_lock(&instances_lock); for (i = 0; i < INSTANCE_BUCKETS; i++) { struct hlist_node *tmp, *t2; struct nfqnl_instance *inst; struct hlist_head *head = &instance_table[i]; hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { if ((n->net == &init_net) && (n->pid == inst->peer_pid)) __instance_destroy(inst); } } spin_unlock(&instances_lock); } return NOTIFY_DONE; } static struct notifier_block nfqnl_rtnl_notifier = { .notifier_call = nfqnl_rcv_nl_event, }; static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, [NFQA_MARK] = { .type = NLA_U32 }, [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, }; static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = { [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, [NFQA_MARK] = { .type = NLA_U32 }, }; static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlpid) { struct nfqnl_instance *queue; queue = instance_lookup(queue_num); if (!queue) return ERR_PTR(-ENODEV); if (queue->peer_pid != nlpid) return ERR_PTR(-EPERM); return queue; } static struct nfqnl_msg_verdict_hdr* verdicthdr_get(const struct nlattr * const nfqa[]) { struct nfqnl_msg_verdict_hdr *vhdr; unsigned int verdict; if (!nfqa[NFQA_VERDICT_HDR]) return NULL; vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK; if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN) return NULL; return vhdr; } static int nfq_id_after(unsigned int id, unsigned int max) { return (int)(id - max) > 0; } static int nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nfqa[]) { struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); struct nf_queue_entry *entry, *tmp; unsigned int verdict, maxid; struct nfqnl_msg_verdict_hdr *vhdr; struct nfqnl_instance *queue; LIST_HEAD(batch_list); u16 queue_num = ntohs(nfmsg->res_id); queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid); if (IS_ERR(queue)) return PTR_ERR(queue); vhdr = verdicthdr_get(nfqa); if (!vhdr) return -EINVAL; verdict = ntohl(vhdr->verdict); maxid = ntohl(vhdr->id); spin_lock_bh(&queue->lock); list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) { if (nfq_id_after(entry->id, maxid)) break; __dequeue_entry(queue, entry); list_add_tail(&entry->list, &batch_list); } spin_unlock_bh(&queue->lock); if (list_empty(&batch_list)) return -ENOENT; list_for_each_entry_safe(entry, tmp, &batch_list, list) { if (nfqa[NFQA_MARK]) entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); nf_reinject(entry, verdict); } return 0; } static int nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nfqa[]) { struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int16_t queue_num = ntohs(nfmsg->res_id); struct nfqnl_msg_verdict_hdr *vhdr; struct nfqnl_instance *queue; unsigned int verdict; struct nf_queue_entry *entry; queue = instance_lookup(queue_num); if (!queue) queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid); if (IS_ERR(queue)) return PTR_ERR(queue); vhdr = verdicthdr_get(nfqa); if (!vhdr) return -EINVAL; verdict = ntohl(vhdr->verdict); entry = find_dequeue_entry(queue, ntohl(vhdr->id)); if (entry == NULL) return -ENOENT; if (nfqa[NFQA_PAYLOAD]) { if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0) verdict = NF_DROP; } if (nfqa[NFQA_MARK]) entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); nf_reinject(entry, verdict); return 0; } static int nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nfqa[]) { return -ENOTSUPP; } static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, }; static const struct nf_queue_handler nfqh = { .name = "nf_queue", .outfn = &nfqnl_enqueue_packet, }; static int nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nfqa[]) { struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int16_t queue_num = ntohs(nfmsg->res_id); struct nfqnl_instance *queue; struct nfqnl_msg_config_cmd *cmd = NULL; int ret = 0; if (nfqa[NFQA_CFG_CMD]) { cmd = nla_data(nfqa[NFQA_CFG_CMD]); /* Commands without queue context - might sleep */ switch (cmd->command) { case NFQNL_CFG_CMD_PF_BIND: return nf_register_queue_handler(ntohs(cmd->pf), &nfqh); case NFQNL_CFG_CMD_PF_UNBIND: return nf_unregister_queue_handler(ntohs(cmd->pf), &nfqh); } } rcu_read_lock(); queue = instance_lookup(queue_num); if (queue && queue->peer_pid != NETLINK_CB(skb).pid) { ret = -EPERM; goto err_out_unlock; } if (cmd != NULL) { switch (cmd->command) { case NFQNL_CFG_CMD_BIND: if (queue) { ret = -EBUSY; goto err_out_unlock; } queue = instance_create(queue_num, NETLINK_CB(skb).pid); if (IS_ERR(queue)) { ret = PTR_ERR(queue); goto err_out_unlock; } break; case NFQNL_CFG_CMD_UNBIND: if (!queue) { ret = -ENODEV; goto err_out_unlock; } instance_destroy(queue); break; case NFQNL_CFG_CMD_PF_BIND: case NFQNL_CFG_CMD_PF_UNBIND: break; default: ret = -ENOTSUPP; break; } } if (nfqa[NFQA_CFG_PARAMS]) { struct nfqnl_msg_config_params *params; if (!queue) { ret = -ENODEV; goto err_out_unlock; } params = nla_data(nfqa[NFQA_CFG_PARAMS]); nfqnl_set_mode(queue, params->copy_mode, ntohl(params->copy_range)); } if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) { __be32 *queue_maxlen; if (!queue) { ret = -ENODEV; goto err_out_unlock; } queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]); spin_lock_bh(&queue->lock); queue->queue_maxlen = ntohl(*queue_maxlen); spin_unlock_bh(&queue->lock); } err_out_unlock: rcu_read_unlock(); return ret; } static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { [NFQNL_MSG_PACKET] = { .call_rcu = nfqnl_recv_unsupp, .attr_count = NFQA_MAX, }, [NFQNL_MSG_VERDICT] = { .call_rcu = nfqnl_recv_verdict, .attr_count = NFQA_MAX, .policy = nfqa_verdict_policy }, [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config, .attr_count = NFQA_CFG_MAX, .policy = nfqa_cfg_policy }, [NFQNL_MSG_VERDICT_BATCH]={ .call_rcu = nfqnl_recv_verdict_batch, .attr_count = NFQA_MAX, .policy = nfqa_verdict_batch_policy }, }; static const struct nfnetlink_subsystem nfqnl_subsys = { .name = "nf_queue", .subsys_id = NFNL_SUBSYS_QUEUE, .cb_count = NFQNL_MSG_MAX, .cb = nfqnl_cb, }; #ifdef CONFIG_PROC_FS struct iter_state { unsigned int bucket; }; static struct hlist_node *get_first(struct seq_file *seq) { struct iter_state *st = seq->private; if (!st) return NULL; for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { if (!hlist_empty(&instance_table[st->bucket])) return instance_table[st->bucket].first; } return NULL; } static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h) { struct iter_state *st = seq->private; h = h->next; while (!h) { if (++st->bucket >= INSTANCE_BUCKETS) return NULL; h = instance_table[st->bucket].first; } return h; } static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos) { struct hlist_node *head; head = get_first(seq); if (head) while (pos && (head = get_next(seq, head))) pos--; return pos ? NULL : head; } static void *seq_start(struct seq_file *seq, loff_t *pos) __acquires(instances_lock) { spin_lock(&instances_lock); return get_idx(seq, *pos); } static void *seq_next(struct seq_file *s, void *v, loff_t *pos) { (*pos)++; return get_next(s, v); } static void seq_stop(struct seq_file *s, void *v) __releases(instances_lock) { spin_unlock(&instances_lock); } static int seq_show(struct seq_file *s, void *v) { const struct nfqnl_instance *inst = v; return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n", inst->queue_num, inst->peer_pid, inst->queue_total, inst->copy_mode, inst->copy_range, inst->queue_dropped, inst->queue_user_dropped, inst->id_sequence, 1); } static const struct seq_operations nfqnl_seq_ops = { .start = seq_start, .next = seq_next, .stop = seq_stop, .show = seq_show, }; static int nfqnl_open(struct inode *inode, struct file *file) { return seq_open_private(file, &nfqnl_seq_ops, sizeof(struct iter_state)); } static const struct file_operations nfqnl_file_ops = { .owner = THIS_MODULE, .open = nfqnl_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; #endif /* PROC_FS */ static int __init nfnetlink_queue_init(void) { int i, status = -ENOMEM; for (i = 0; i < INSTANCE_BUCKETS; i++) INIT_HLIST_HEAD(&instance_table[i]); netlink_register_notifier(&nfqnl_rtnl_notifier); status = nfnetlink_subsys_register(&nfqnl_subsys); if (status < 0) { printk(KERN_ERR "nf_queue: failed to create netlink socket\n"); goto cleanup_netlink_notifier; } #ifdef CONFIG_PROC_FS if (!proc_create("nfnetlink_queue", 0440, proc_net_netfilter, &nfqnl_file_ops)) goto cleanup_subsys; #endif register_netdevice_notifier(&nfqnl_dev_notifier); return status; #ifdef CONFIG_PROC_FS cleanup_subsys: nfnetlink_subsys_unregister(&nfqnl_subsys); #endif cleanup_netlink_notifier: netlink_unregister_notifier(&nfqnl_rtnl_notifier); return status; } static void __exit nfnetlink_queue_fini(void) { nf_unregister_queue_handlers(&nfqh); unregister_netdevice_notifier(&nfqnl_dev_notifier); #ifdef CONFIG_PROC_FS remove_proc_entry("nfnetlink_queue", proc_net_netfilter); #endif nfnetlink_subsys_unregister(&nfqnl_subsys); netlink_unregister_notifier(&nfqnl_rtnl_notifier); rcu_barrier(); /* Wait for completion of call_rcu()'s */ } MODULE_DESCRIPTION("netfilter packet queue handler"); MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE); module_init(nfnetlink_queue_init); module_exit(nfnetlink_queue_fini);
gpl-2.0
Rashed97/android_kernel_samsung_lt03lte
arch/arm/mach-iop13xx/setup.c
4959
15393
/* * iop13xx platform Initialization * Copyright (c) 2005-2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/dma-mapping.h> #include <linux/serial_8250.h> #include <linux/io.h> #ifdef CONFIG_MTD_PHYSMAP #include <linux/mtd/physmap.h> #endif #include <asm/mach/map.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/hardware/iop_adma.h> #define IOP13XX_UART_XTAL 33334000 #define IOP13XX_SETUP_DEBUG 0 #define PRINTK(x...) ((void)(IOP13XX_SETUP_DEBUG && printk(x))) /* Standard IO mapping for all IOP13XX based systems */ static struct map_desc iop13xx_std_desc[] __initdata = { { /* mem mapped registers */ .virtual = IOP13XX_PMMR_VIRT_MEM_BASE, .pfn = __phys_to_pfn(IOP13XX_PMMR_PHYS_MEM_BASE), .length = IOP13XX_PMMR_SIZE, .type = MT_DEVICE, }, { /* PCIE IO space */ .virtual = IOP13XX_PCIE_LOWER_IO_VA, .pfn = __phys_to_pfn(IOP13XX_PCIE_LOWER_IO_PA), .length = IOP13XX_PCIX_IO_WINDOW_SIZE, .type = MT_DEVICE, }, { /* PCIX IO space */ .virtual = IOP13XX_PCIX_LOWER_IO_VA, .pfn = __phys_to_pfn(IOP13XX_PCIX_LOWER_IO_PA), .length = IOP13XX_PCIX_IO_WINDOW_SIZE, .type = MT_DEVICE, }, }; static struct resource iop13xx_uart0_resources[] = { [0] = { .start = IOP13XX_UART0_PHYS, .end = IOP13XX_UART0_PHYS + 0x3f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_UART0, .end = IRQ_IOP13XX_UART0, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_uart1_resources[] = { [0] = { .start = IOP13XX_UART1_PHYS, .end = IOP13XX_UART1_PHYS + 0x3f, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_UART1, .end = IRQ_IOP13XX_UART1, .flags = IORESOURCE_IRQ } }; static struct plat_serial8250_port iop13xx_uart0_data[] = { { .membase = (char*)(IOP13XX_UART0_VIRT), .mapbase = (IOP13XX_UART0_PHYS), .irq = IRQ_IOP13XX_UART0, .uartclk = IOP13XX_UART_XTAL, .regshift = 2, .iotype = UPIO_MEM, .flags = UPF_SKIP_TEST, }, { }, }; static struct plat_serial8250_port iop13xx_uart1_data[] = { { .membase = (char*)(IOP13XX_UART1_VIRT), .mapbase = (IOP13XX_UART1_PHYS), .irq = IRQ_IOP13XX_UART1, .uartclk = IOP13XX_UART_XTAL, .regshift = 2, .iotype = UPIO_MEM, .flags = UPF_SKIP_TEST, }, { }, }; /* The ids are fixed up later in iop13xx_platform_init */ static struct platform_device iop13xx_uart0 = { .name = "serial8250", .id = 0, .dev.platform_data = iop13xx_uart0_data, .num_resources = 2, .resource = iop13xx_uart0_resources, }; static struct platform_device iop13xx_uart1 = { .name = "serial8250", .id = 0, .dev.platform_data = iop13xx_uart1_data, .num_resources = 2, .resource = iop13xx_uart1_resources }; static struct resource iop13xx_i2c_0_resources[] = { [0] = { .start = IOP13XX_I2C0_PHYS, .end = IOP13XX_I2C0_PHYS + 0x18, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_I2C_0, .end = IRQ_IOP13XX_I2C_0, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_i2c_1_resources[] = { [0] = { .start = IOP13XX_I2C1_PHYS, .end = IOP13XX_I2C1_PHYS + 0x18, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_I2C_1, .end = IRQ_IOP13XX_I2C_1, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_i2c_2_resources[] = { [0] = { .start = IOP13XX_I2C2_PHYS, .end = IOP13XX_I2C2_PHYS + 0x18, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_I2C_2, .end = IRQ_IOP13XX_I2C_2, .flags = IORESOURCE_IRQ } }; /* I2C controllers. The IOP13XX uses the same block as the IOP3xx, so * we just use the same device name. */ /* The ids are fixed up later in iop13xx_platform_init */ static struct platform_device iop13xx_i2c_0_controller = { .name = "IOP3xx-I2C", .id = 0, .num_resources = 2, .resource = iop13xx_i2c_0_resources }; static struct platform_device iop13xx_i2c_1_controller = { .name = "IOP3xx-I2C", .id = 0, .num_resources = 2, .resource = iop13xx_i2c_1_resources }; static struct platform_device iop13xx_i2c_2_controller = { .name = "IOP3xx-I2C", .id = 0, .num_resources = 2, .resource = iop13xx_i2c_2_resources }; #ifdef CONFIG_MTD_PHYSMAP /* PBI Flash Device */ static struct physmap_flash_data iq8134x_flash_data = { .width = 2, }; static struct resource iq8134x_flash_resource = { .start = IQ81340_FLASHBASE, .end = 0, .flags = IORESOURCE_MEM, }; static struct platform_device iq8134x_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &iq8134x_flash_data, }, .num_resources = 1, .resource = &iq8134x_flash_resource, }; static unsigned long iq8134x_probe_flash_size(void) { uint8_t __iomem *flash_addr = ioremap(IQ81340_FLASHBASE, PAGE_SIZE); int i; char query[3]; unsigned long size = 0; int width = iq8134x_flash_data.width; if (flash_addr) { /* send CFI 'query' command */ writew(0x98, flash_addr); /* check for CFI compliance */ for (i = 0; i < 3 * width; i += width) query[i / width] = readb(flash_addr + (0x10 * width) + i); /* read the size */ if (memcmp(query, "QRY", 3) == 0) size = 1 << readb(flash_addr + (0x27 * width)); /* send CFI 'read array' command */ writew(0xff, flash_addr); iounmap(flash_addr); } return size; } #endif /* ADMA Channels */ static struct resource iop13xx_adma_0_resources[] = { [0] = { .start = IOP13XX_ADMA_PHYS_BASE(0), .end = IOP13XX_ADMA_UPPER_PA(0), .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_ADMA0_EOT, .end = IRQ_IOP13XX_ADMA0_EOT, .flags = IORESOURCE_IRQ }, [2] = { .start = IRQ_IOP13XX_ADMA0_EOC, .end = IRQ_IOP13XX_ADMA0_EOC, .flags = IORESOURCE_IRQ }, [3] = { .start = IRQ_IOP13XX_ADMA0_ERR, .end = IRQ_IOP13XX_ADMA0_ERR, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_adma_1_resources[] = { [0] = { .start = IOP13XX_ADMA_PHYS_BASE(1), .end = IOP13XX_ADMA_UPPER_PA(1), .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_ADMA1_EOT, .end = IRQ_IOP13XX_ADMA1_EOT, .flags = IORESOURCE_IRQ }, [2] = { .start = IRQ_IOP13XX_ADMA1_EOC, .end = IRQ_IOP13XX_ADMA1_EOC, .flags = IORESOURCE_IRQ }, [3] = { .start = IRQ_IOP13XX_ADMA1_ERR, .end = IRQ_IOP13XX_ADMA1_ERR, .flags = IORESOURCE_IRQ } }; static struct resource iop13xx_adma_2_resources[] = { [0] = { .start = IOP13XX_ADMA_PHYS_BASE(2), .end = IOP13XX_ADMA_UPPER_PA(2), .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IOP13XX_ADMA2_EOT, .end = IRQ_IOP13XX_ADMA2_EOT, .flags = IORESOURCE_IRQ }, [2] = { .start = IRQ_IOP13XX_ADMA2_EOC, .end = IRQ_IOP13XX_ADMA2_EOC, .flags = IORESOURCE_IRQ }, [3] = { .start = IRQ_IOP13XX_ADMA2_ERR, .end = IRQ_IOP13XX_ADMA2_ERR, .flags = IORESOURCE_IRQ } }; static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64); static struct iop_adma_platform_data iop13xx_adma_0_data = { .hw_id = 0, .pool_size = PAGE_SIZE, }; static struct iop_adma_platform_data iop13xx_adma_1_data = { .hw_id = 1, .pool_size = PAGE_SIZE, }; static struct iop_adma_platform_data iop13xx_adma_2_data = { .hw_id = 2, .pool_size = PAGE_SIZE, }; /* The ids are fixed up later in iop13xx_platform_init */ static struct platform_device iop13xx_adma_0_channel = { .name = "iop-adma", .id = 0, .num_resources = 4, .resource = iop13xx_adma_0_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(64), .platform_data = (void *) &iop13xx_adma_0_data, }, }; static struct platform_device iop13xx_adma_1_channel = { .name = "iop-adma", .id = 0, .num_resources = 4, .resource = iop13xx_adma_1_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(64), .platform_data = (void *) &iop13xx_adma_1_data, }, }; static struct platform_device iop13xx_adma_2_channel = { .name = "iop-adma", .id = 0, .num_resources = 4, .resource = iop13xx_adma_2_resources, .dev = { .dma_mask = &iop13xx_adma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(64), .platform_data = (void *) &iop13xx_adma_2_data, }, }; void __init iop13xx_map_io(void) { /* Initialize the Static Page Table maps */ iotable_init(iop13xx_std_desc, ARRAY_SIZE(iop13xx_std_desc)); } static int init_uart; static int init_i2c; static int init_adma; void __init iop13xx_platform_init(void) { int i; u32 uart_idx, i2c_idx, adma_idx, plat_idx; struct platform_device *iop13xx_devices[IQ81340_MAX_PLAT_DEVICES]; /* set the bases so we can read the device id */ iop13xx_set_atu_mmr_bases(); memset(iop13xx_devices, 0, sizeof(iop13xx_devices)); if (init_uart == IOP13XX_INIT_UART_DEFAULT) { switch (iop13xx_dev_id()) { /* enable both uarts on iop341 */ case 0x3380: case 0x3384: case 0x3388: case 0x338c: init_uart |= IOP13XX_INIT_UART_0; init_uart |= IOP13XX_INIT_UART_1; break; /* only enable uart 1 */ default: init_uart |= IOP13XX_INIT_UART_1; } } if (init_i2c == IOP13XX_INIT_I2C_DEFAULT) { switch (iop13xx_dev_id()) { /* enable all i2c units on iop341 and iop342 */ case 0x3380: case 0x3384: case 0x3388: case 0x338c: case 0x3382: case 0x3386: case 0x338a: case 0x338e: init_i2c |= IOP13XX_INIT_I2C_0; init_i2c |= IOP13XX_INIT_I2C_1; init_i2c |= IOP13XX_INIT_I2C_2; break; /* only enable i2c 1 and 2 */ default: init_i2c |= IOP13XX_INIT_I2C_1; init_i2c |= IOP13XX_INIT_I2C_2; } } if (init_adma == IOP13XX_INIT_ADMA_DEFAULT) { init_adma |= IOP13XX_INIT_ADMA_0; init_adma |= IOP13XX_INIT_ADMA_1; init_adma |= IOP13XX_INIT_ADMA_2; } plat_idx = 0; uart_idx = 0; i2c_idx = 0; /* uart 1 (if enabled) is ttyS0 */ if (init_uart & IOP13XX_INIT_UART_1) { PRINTK("Adding uart1 to platform device list\n"); iop13xx_uart1.id = uart_idx++; iop13xx_devices[plat_idx++] = &iop13xx_uart1; } if (init_uart & IOP13XX_INIT_UART_0) { PRINTK("Adding uart0 to platform device list\n"); iop13xx_uart0.id = uart_idx++; iop13xx_devices[plat_idx++] = &iop13xx_uart0; } for(i = 0; i < IQ81340_NUM_I2C; i++) { if ((init_i2c & (1 << i)) && IOP13XX_SETUP_DEBUG) printk("Adding i2c%d to platform device list\n", i); switch(init_i2c & (1 << i)) { case IOP13XX_INIT_I2C_0: iop13xx_i2c_0_controller.id = i2c_idx++; iop13xx_devices[plat_idx++] = &iop13xx_i2c_0_controller; break; case IOP13XX_INIT_I2C_1: iop13xx_i2c_1_controller.id = i2c_idx++; iop13xx_devices[plat_idx++] = &iop13xx_i2c_1_controller; break; case IOP13XX_INIT_I2C_2: iop13xx_i2c_2_controller.id = i2c_idx++; iop13xx_devices[plat_idx++] = &iop13xx_i2c_2_controller; break; } } /* initialize adma channel ids and capabilities */ adma_idx = 0; for (i = 0; i < IQ81340_NUM_ADMA; i++) { struct iop_adma_platform_data *plat_data; if ((init_adma & (1 << i)) && IOP13XX_SETUP_DEBUG) printk(KERN_INFO "Adding adma%d to platform device list\n", i); switch (init_adma & (1 << i)) { case IOP13XX_INIT_ADMA_0: iop13xx_adma_0_channel.id = adma_idx++; iop13xx_devices[plat_idx++] = &iop13xx_adma_0_channel; plat_data = &iop13xx_adma_0_data; dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); break; case IOP13XX_INIT_ADMA_1: iop13xx_adma_1_channel.id = adma_idx++; iop13xx_devices[plat_idx++] = &iop13xx_adma_1_channel; plat_data = &iop13xx_adma_1_data; dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); break; case IOP13XX_INIT_ADMA_2: iop13xx_adma_2_channel.id = adma_idx++; iop13xx_devices[plat_idx++] = &iop13xx_adma_2_channel; plat_data = &iop13xx_adma_2_data; dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); dma_cap_set(DMA_PQ, plat_data->cap_mask); dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask); break; } } #ifdef CONFIG_MTD_PHYSMAP iq8134x_flash_resource.end = iq8134x_flash_resource.start + iq8134x_probe_flash_size() - 1; if (iq8134x_flash_resource.end > iq8134x_flash_resource.start) iop13xx_devices[plat_idx++] = &iq8134x_flash; else printk(KERN_ERR "%s: Failed to probe flash size\n", __func__); #endif platform_add_devices(iop13xx_devices, plat_idx); } static int __init iop13xx_init_uart_setup(char *str) { if (str) { while (*str != '\0') { switch(*str) { case '0': init_uart |= IOP13XX_INIT_UART_0; break; case '1': init_uart |= IOP13XX_INIT_UART_1; break; case ',': case '=': break; default: PRINTK("\"iop13xx_init_uart\" malformed" " at character: \'%c\'", *str); *(str + 1) = '\0'; init_uart = IOP13XX_INIT_UART_DEFAULT; } str++; } } return 1; } static int __init iop13xx_init_i2c_setup(char *str) { if (str) { while (*str != '\0') { switch(*str) { case '0': init_i2c |= IOP13XX_INIT_I2C_0; break; case '1': init_i2c |= IOP13XX_INIT_I2C_1; break; case '2': init_i2c |= IOP13XX_INIT_I2C_2; break; case ',': case '=': break; default: PRINTK("\"iop13xx_init_i2c\" malformed" " at character: \'%c\'", *str); *(str + 1) = '\0'; init_i2c = IOP13XX_INIT_I2C_DEFAULT; } str++; } } return 1; } static int __init iop13xx_init_adma_setup(char *str) { if (str) { while (*str != '\0') { switch (*str) { case '0': init_adma |= IOP13XX_INIT_ADMA_0; break; case '1': init_adma |= IOP13XX_INIT_ADMA_1; break; case '2': init_adma |= IOP13XX_INIT_ADMA_2; break; case ',': case '=': break; default: PRINTK("\"iop13xx_init_adma\" malformed" " at character: \'%c\'", *str); *(str + 1) = '\0'; init_adma = IOP13XX_INIT_ADMA_DEFAULT; } str++; } } return 1; } __setup("iop13xx_init_adma", iop13xx_init_adma_setup); __setup("iop13xx_init_uart", iop13xx_init_uart_setup); __setup("iop13xx_init_i2c", iop13xx_init_i2c_setup); void iop13xx_restart(char mode, const char *cmd) { /* * Reset the internal bus (warning both cores are reset) */ write_wdtcr(IOP_WDTCR_EN_ARM); write_wdtcr(IOP_WDTCR_EN); write_wdtsr(IOP13XX_WDTSR_WRITE_EN | IOP13XX_WDTCR_IB_RESET); write_wdtcr(0x1000); }
gpl-2.0
mightyme/linux-3.0.39-m04x
net/ipv4/inet_fragment.c
5727
6507
/* * inet fragments management * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Pavel Emelyanov <xemul@openvz.org> * Started as consolidation of ipv4/ip_fragment.c, * ipv6/reassembly. and ipv6 nf conntrack reassembly */ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/random.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/slab.h> #include <net/inet_frag.h> static void inet_frag_secret_rebuild(unsigned long dummy) { struct inet_frags *f = (struct inet_frags *)dummy; unsigned long now = jiffies; int i; write_lock(&f->lock); get_random_bytes(&f->rnd, sizeof(u32)); for (i = 0; i < INETFRAGS_HASHSZ; i++) { struct inet_frag_queue *q; struct hlist_node *p, *n; hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) { unsigned int hval = f->hashfn(q); if (hval != i) { hlist_del(&q->list); /* Relink to new hash chain. */ hlist_add_head(&q->list, &f->hash[hval]); } } } write_unlock(&f->lock); mod_timer(&f->secret_timer, now + f->secret_interval); } void inet_frags_init(struct inet_frags *f) { int i; for (i = 0; i < INETFRAGS_HASHSZ; i++) INIT_HLIST_HEAD(&f->hash[i]); rwlock_init(&f->lock); f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ (jiffies ^ (jiffies >> 6))); setup_timer(&f->secret_timer, inet_frag_secret_rebuild, (unsigned long)f); f->secret_timer.expires = jiffies + f->secret_interval; add_timer(&f->secret_timer); } EXPORT_SYMBOL(inet_frags_init); void inet_frags_init_net(struct netns_frags *nf) { nf->nqueues = 0; atomic_set(&nf->mem, 0); INIT_LIST_HEAD(&nf->lru_list); } EXPORT_SYMBOL(inet_frags_init_net); void inet_frags_fini(struct inet_frags *f) { del_timer(&f->secret_timer); } EXPORT_SYMBOL(inet_frags_fini); void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f) { nf->low_thresh = 0; local_bh_disable(); inet_frag_evictor(nf, f); local_bh_enable(); } EXPORT_SYMBOL(inet_frags_exit_net); static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) { write_lock(&f->lock); hlist_del(&fq->list); list_del(&fq->lru_list); fq->net->nqueues--; write_unlock(&f->lock); } void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f) { if (del_timer(&fq->timer)) atomic_dec(&fq->refcnt); if (!(fq->last_in & INET_FRAG_COMPLETE)) { fq_unlink(fq, f); atomic_dec(&fq->refcnt); fq->last_in |= INET_FRAG_COMPLETE; } } EXPORT_SYMBOL(inet_frag_kill); static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f, struct sk_buff *skb, int *work) { if (work) *work -= skb->truesize; atomic_sub(skb->truesize, &nf->mem); if (f->skb_free) f->skb_free(skb); kfree_skb(skb); } void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f, int *work) { struct sk_buff *fp; struct netns_frags *nf; WARN_ON(!(q->last_in & INET_FRAG_COMPLETE)); WARN_ON(del_timer(&q->timer) != 0); /* Release all fragment data. */ fp = q->fragments; nf = q->net; while (fp) { struct sk_buff *xp = fp->next; frag_kfree_skb(nf, f, fp, work); fp = xp; } if (work) *work -= f->qsize; atomic_sub(f->qsize, &nf->mem); if (f->destructor) f->destructor(q); kfree(q); } EXPORT_SYMBOL(inet_frag_destroy); int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f) { struct inet_frag_queue *q; int work, evicted = 0; work = atomic_read(&nf->mem) - nf->low_thresh; while (work > 0) { read_lock(&f->lock); if (list_empty(&nf->lru_list)) { read_unlock(&f->lock); break; } q = list_first_entry(&nf->lru_list, struct inet_frag_queue, lru_list); atomic_inc(&q->refcnt); read_unlock(&f->lock); spin_lock(&q->lock); if (!(q->last_in & INET_FRAG_COMPLETE)) inet_frag_kill(q, f); spin_unlock(&q->lock); if (atomic_dec_and_test(&q->refcnt)) inet_frag_destroy(q, f, &work); evicted++; } return evicted; } EXPORT_SYMBOL(inet_frag_evictor); static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, struct inet_frag_queue *qp_in, struct inet_frags *f, void *arg) { struct inet_frag_queue *qp; #ifdef CONFIG_SMP struct hlist_node *n; #endif unsigned int hash; write_lock(&f->lock); /* * While we stayed w/o the lock other CPU could update * the rnd seed, so we need to re-calculate the hash * chain. Fortunatelly the qp_in can be used to get one. */ hash = f->hashfn(qp_in); #ifdef CONFIG_SMP /* With SMP race we have to recheck hash table, because * such entry could be created on other cpu, while we * promoted read lock to write lock. */ hlist_for_each_entry(qp, n, &f->hash[hash], list) { if (qp->net == nf && f->match(qp, arg)) { atomic_inc(&qp->refcnt); write_unlock(&f->lock); qp_in->last_in |= INET_FRAG_COMPLETE; inet_frag_put(qp_in, f); return qp; } } #endif qp = qp_in; if (!mod_timer(&qp->timer, jiffies + nf->timeout)) atomic_inc(&qp->refcnt); atomic_inc(&qp->refcnt); hlist_add_head(&qp->list, &f->hash[hash]); list_add_tail(&qp->lru_list, &nf->lru_list); nf->nqueues++; write_unlock(&f->lock); return qp; } static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, struct inet_frags *f, void *arg) { struct inet_frag_queue *q; q = kzalloc(f->qsize, GFP_ATOMIC); if (q == NULL) return NULL; f->constructor(q, arg); atomic_add(f->qsize, &nf->mem); setup_timer(&q->timer, f->frag_expire, (unsigned long)q); spin_lock_init(&q->lock); atomic_set(&q->refcnt, 1); q->net = nf; return q; } static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf, struct inet_frags *f, void *arg) { struct inet_frag_queue *q; q = inet_frag_alloc(nf, f, arg); if (q == NULL) return NULL; return inet_frag_intern(nf, q, f, arg); } struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, struct inet_frags *f, void *key, unsigned int hash) __releases(&f->lock) { struct inet_frag_queue *q; struct hlist_node *n; hlist_for_each_entry(q, n, &f->hash[hash], list) { if (q->net == nf && f->match(q, key)) { atomic_inc(&q->refcnt); read_unlock(&f->lock); return q; } } read_unlock(&f->lock); return inet_frag_create(nf, f, key); } EXPORT_SYMBOL(inet_frag_find);
gpl-2.0
spock1104/android_kernel_zte_msm8930
net/ipx/af_ipx.c
7007
51483
/* * Implements an IPX socket layer. * * This code is derived from work by * Ross Biro : Writing the original IP stack * Fred Van Kempen : Tidying up the TCP/IP * * Many thanks go to Keith Baker, Institute For Industrial Information * Technology Ltd, Swansea University for allowing me to work on this * in my own time even though it was in some ways related to commercial * work I am currently employed to do there. * * All the material in this file is subject to the Gnu license version 2. * Neither Alan Cox nor the Swansea University Computer Society admit * liability nor provide warranty for any of this software. This material * is provided as is and at no charge. * * Portions Copyright (c) 2000-2003 Conectiva, Inc. <acme@conectiva.com.br> * Neither Arnaldo Carvalho de Melo nor Conectiva, Inc. admit liability nor * provide warranty for any of this software. This material is provided * "AS-IS" and at no charge. * * Portions Copyright (c) 1995 Caldera, Inc. <greg@caldera.com> * Neither Greg Page nor Caldera, Inc. admit liability nor provide * warranty for any of this software. This material is provided * "AS-IS" and at no charge. * * See net/ipx/ChangeLog. */ #include <linux/capability.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/init.h> #include <linux/ipx.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/uio.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/string.h> #include <linux/types.h> #include <linux/termios.h> #include <net/ipx.h> #include <net/p8022.h> #include <net/psnap.h> #include <net/sock.h> #include <net/tcp_states.h> #include <asm/uaccess.h> #ifdef CONFIG_SYSCTL extern void ipx_register_sysctl(void); extern void ipx_unregister_sysctl(void); #else #define ipx_register_sysctl() #define ipx_unregister_sysctl() #endif /* Configuration Variables */ static unsigned char ipxcfg_max_hops = 16; static char ipxcfg_auto_select_primary; static char ipxcfg_auto_create_interfaces; int sysctl_ipx_pprop_broadcasting = 1; /* Global Variables */ static struct datalink_proto *p8022_datalink; static struct datalink_proto *pEII_datalink; static struct datalink_proto *p8023_datalink; static struct datalink_proto *pSNAP_datalink; static const struct proto_ops ipx_dgram_ops; LIST_HEAD(ipx_interfaces); DEFINE_SPINLOCK(ipx_interfaces_lock); struct ipx_interface *ipx_primary_net; struct ipx_interface *ipx_internal_net; extern int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc, unsigned char *node); extern void ipxrtr_del_routes(struct ipx_interface *intrfc); extern int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, struct iovec *iov, size_t len, int noblock); extern int ipxrtr_route_skb(struct sk_buff *skb); extern struct ipx_route *ipxrtr_lookup(__be32 net); extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg); struct ipx_interface *ipx_interfaces_head(void) { struct ipx_interface *rc = NULL; if (!list_empty(&ipx_interfaces)) rc = list_entry(ipx_interfaces.next, struct ipx_interface, node); return rc; } static void ipxcfg_set_auto_select(char val) { ipxcfg_auto_select_primary = val; if (val && !ipx_primary_net) ipx_primary_net = ipx_interfaces_head(); } static int ipxcfg_get_config_data(struct ipx_config_data __user *arg) { struct ipx_config_data vals; vals.ipxcfg_auto_create_interfaces = ipxcfg_auto_create_interfaces; vals.ipxcfg_auto_select_primary = ipxcfg_auto_select_primary; return copy_to_user(arg, &vals, sizeof(vals)) ? -EFAULT : 0; } /* * Note: Sockets may not be removed _during_ an interrupt or inet_bh * handler using this technique. They can be added although we do not * use this facility. */ static void ipx_remove_socket(struct sock *sk) { /* Determine interface with which socket is associated */ struct ipx_interface *intrfc = ipx_sk(sk)->intrfc; if (!intrfc) goto out; ipxitf_hold(intrfc); spin_lock_bh(&intrfc->if_sklist_lock); sk_del_node_init(sk); spin_unlock_bh(&intrfc->if_sklist_lock); ipxitf_put(intrfc); out: return; } static void ipx_destroy_socket(struct sock *sk) { ipx_remove_socket(sk); skb_queue_purge(&sk->sk_receive_queue); sk_refcnt_debug_dec(sk); } /* * The following code is used to support IPX Interfaces (IPXITF). An * IPX interface is defined by a physical device and a frame type. */ /* ipxitf_clear_primary_net has to be called with ipx_interfaces_lock held */ static void ipxitf_clear_primary_net(void) { ipx_primary_net = NULL; if (ipxcfg_auto_select_primary) ipx_primary_net = ipx_interfaces_head(); } static struct ipx_interface *__ipxitf_find_using_phys(struct net_device *dev, __be16 datalink) { struct ipx_interface *i; list_for_each_entry(i, &ipx_interfaces, node) if (i->if_dev == dev && i->if_dlink_type == datalink) goto out; i = NULL; out: return i; } static struct ipx_interface *ipxitf_find_using_phys(struct net_device *dev, __be16 datalink) { struct ipx_interface *i; spin_lock_bh(&ipx_interfaces_lock); i = __ipxitf_find_using_phys(dev, datalink); if (i) ipxitf_hold(i); spin_unlock_bh(&ipx_interfaces_lock); return i; } struct ipx_interface *ipxitf_find_using_net(__be32 net) { struct ipx_interface *i; spin_lock_bh(&ipx_interfaces_lock); if (net) { list_for_each_entry(i, &ipx_interfaces, node) if (i->if_netnum == net) goto hold; i = NULL; goto unlock; } i = ipx_primary_net; if (i) hold: ipxitf_hold(i); unlock: spin_unlock_bh(&ipx_interfaces_lock); return i; } /* Sockets are bound to a particular IPX interface. */ static void ipxitf_insert_socket(struct ipx_interface *intrfc, struct sock *sk) { ipxitf_hold(intrfc); spin_lock_bh(&intrfc->if_sklist_lock); ipx_sk(sk)->intrfc = intrfc; sk_add_node(sk, &intrfc->if_sklist); spin_unlock_bh(&intrfc->if_sklist_lock); ipxitf_put(intrfc); } /* caller must hold intrfc->if_sklist_lock */ static struct sock *__ipxitf_find_socket(struct ipx_interface *intrfc, __be16 port) { struct sock *s; struct hlist_node *node; sk_for_each(s, node, &intrfc->if_sklist) if (ipx_sk(s)->port == port) goto found; s = NULL; found: return s; } /* caller must hold a reference to intrfc */ static struct sock *ipxitf_find_socket(struct ipx_interface *intrfc, __be16 port) { struct sock *s; spin_lock_bh(&intrfc->if_sklist_lock); s = __ipxitf_find_socket(intrfc, port); if (s) sock_hold(s); spin_unlock_bh(&intrfc->if_sklist_lock); return s; } #ifdef CONFIG_IPX_INTERN static struct sock *ipxitf_find_internal_socket(struct ipx_interface *intrfc, unsigned char *ipx_node, __be16 port) { struct sock *s; struct hlist_node *node; ipxitf_hold(intrfc); spin_lock_bh(&intrfc->if_sklist_lock); sk_for_each(s, node, &intrfc->if_sklist) { struct ipx_sock *ipxs = ipx_sk(s); if (ipxs->port == port && !memcmp(ipx_node, ipxs->node, IPX_NODE_LEN)) goto found; } s = NULL; found: spin_unlock_bh(&intrfc->if_sklist_lock); ipxitf_put(intrfc); return s; } #endif static void __ipxitf_down(struct ipx_interface *intrfc) { struct sock *s; struct hlist_node *node, *t; /* Delete all routes associated with this interface */ ipxrtr_del_routes(intrfc); spin_lock_bh(&intrfc->if_sklist_lock); /* error sockets */ sk_for_each_safe(s, node, t, &intrfc->if_sklist) { struct ipx_sock *ipxs = ipx_sk(s); s->sk_err = ENOLINK; s->sk_error_report(s); ipxs->intrfc = NULL; ipxs->port = 0; sock_set_flag(s, SOCK_ZAPPED); /* Indicates it is no longer bound */ sk_del_node_init(s); } INIT_HLIST_HEAD(&intrfc->if_sklist); spin_unlock_bh(&intrfc->if_sklist_lock); /* remove this interface from list */ list_del(&intrfc->node); /* remove this interface from *special* networks */ if (intrfc == ipx_primary_net) ipxitf_clear_primary_net(); if (intrfc == ipx_internal_net) ipx_internal_net = NULL; if (intrfc->if_dev) dev_put(intrfc->if_dev); kfree(intrfc); } void ipxitf_down(struct ipx_interface *intrfc) { spin_lock_bh(&ipx_interfaces_lock); __ipxitf_down(intrfc); spin_unlock_bh(&ipx_interfaces_lock); } static __inline__ void __ipxitf_put(struct ipx_interface *intrfc) { if (atomic_dec_and_test(&intrfc->refcnt)) __ipxitf_down(intrfc); } static int ipxitf_device_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct net_device *dev = ptr; struct ipx_interface *i, *tmp; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (event != NETDEV_DOWN && event != NETDEV_UP) goto out; spin_lock_bh(&ipx_interfaces_lock); list_for_each_entry_safe(i, tmp, &ipx_interfaces, node) if (i->if_dev == dev) { if (event == NETDEV_UP) ipxitf_hold(i); else __ipxitf_put(i); } spin_unlock_bh(&ipx_interfaces_lock); out: return NOTIFY_DONE; } static __exit void ipxitf_cleanup(void) { struct ipx_interface *i, *tmp; spin_lock_bh(&ipx_interfaces_lock); list_for_each_entry_safe(i, tmp, &ipx_interfaces, node) __ipxitf_put(i); spin_unlock_bh(&ipx_interfaces_lock); } static void ipxitf_def_skb_handler(struct sock *sock, struct sk_buff *skb) { if (sock_queue_rcv_skb(sock, skb) < 0) kfree_skb(skb); } /* * On input skb->sk is NULL. Nobody is charged for the memory. */ /* caller must hold a reference to intrfc */ #ifdef CONFIG_IPX_INTERN static int ipxitf_demux_socket(struct ipx_interface *intrfc, struct sk_buff *skb, int copy) { struct ipxhdr *ipx = ipx_hdr(skb); int is_broadcast = !memcmp(ipx->ipx_dest.node, ipx_broadcast_node, IPX_NODE_LEN); struct sock *s; struct hlist_node *node; int rc; spin_lock_bh(&intrfc->if_sklist_lock); sk_for_each(s, node, &intrfc->if_sklist) { struct ipx_sock *ipxs = ipx_sk(s); if (ipxs->port == ipx->ipx_dest.sock && (is_broadcast || !memcmp(ipx->ipx_dest.node, ipxs->node, IPX_NODE_LEN))) { /* We found a socket to which to send */ struct sk_buff *skb1; if (copy) { skb1 = skb_clone(skb, GFP_ATOMIC); rc = -ENOMEM; if (!skb1) goto out; } else { skb1 = skb; copy = 1; /* skb may only be used once */ } ipxitf_def_skb_handler(s, skb1); /* On an external interface, one socket can listen */ if (intrfc != ipx_internal_net) break; } } /* skb was solely for us, and we did not make a copy, so free it. */ if (!copy) kfree_skb(skb); rc = 0; out: spin_unlock_bh(&intrfc->if_sklist_lock); return rc; } #else static struct sock *ncp_connection_hack(struct ipx_interface *intrfc, struct ipxhdr *ipx) { /* The packet's target is a NCP connection handler. We want to hand it * to the correct socket directly within the kernel, so that the * mars_nwe packet distribution process does not have to do it. Here we * only care about NCP and BURST packets. * * You might call this a hack, but believe me, you do not want a * complete NCP layer in the kernel, and this is VERY fast as well. */ struct sock *sk = NULL; int connection = 0; u8 *ncphdr = (u8 *)(ipx + 1); if (*ncphdr == 0x22 && *(ncphdr + 1) == 0x22) /* NCP request */ connection = (((int) *(ncphdr + 5)) << 8) | (int) *(ncphdr + 3); else if (*ncphdr == 0x77 && *(ncphdr + 1) == 0x77) /* BURST packet */ connection = (((int) *(ncphdr + 9)) << 8) | (int) *(ncphdr + 8); if (connection) { struct hlist_node *node; /* Now we have to look for a special NCP connection handling * socket. Only these sockets have ipx_ncp_conn != 0, set by * SIOCIPXNCPCONN. */ spin_lock_bh(&intrfc->if_sklist_lock); sk_for_each(sk, node, &intrfc->if_sklist) if (ipx_sk(sk)->ipx_ncp_conn == connection) { sock_hold(sk); goto found; } sk = NULL; found: spin_unlock_bh(&intrfc->if_sklist_lock); } return sk; } static int ipxitf_demux_socket(struct ipx_interface *intrfc, struct sk_buff *skb, int copy) { struct ipxhdr *ipx = ipx_hdr(skb); struct sock *sock1 = NULL, *sock2 = NULL; struct sk_buff *skb1 = NULL, *skb2 = NULL; int rc; if (intrfc == ipx_primary_net && ntohs(ipx->ipx_dest.sock) == 0x451) sock1 = ncp_connection_hack(intrfc, ipx); if (!sock1) /* No special socket found, forward the packet the normal way */ sock1 = ipxitf_find_socket(intrfc, ipx->ipx_dest.sock); /* * We need to check if there is a primary net and if * this is addressed to one of the *SPECIAL* sockets because * these need to be propagated to the primary net. * The *SPECIAL* socket list contains: 0x452(SAP), 0x453(RIP) and * 0x456(Diagnostic). */ if (ipx_primary_net && intrfc != ipx_primary_net) { const int dsock = ntohs(ipx->ipx_dest.sock); if (dsock == 0x452 || dsock == 0x453 || dsock == 0x456) /* The appropriate thing to do here is to dup the * packet and route to the primary net interface via * ipxitf_send; however, we'll cheat and just demux it * here. */ sock2 = ipxitf_find_socket(ipx_primary_net, ipx->ipx_dest.sock); } /* * If there is nothing to do return. The kfree will cancel any charging. */ rc = 0; if (!sock1 && !sock2) { if (!copy) kfree_skb(skb); goto out; } /* * This next segment of code is a little awkward, but it sets it up * so that the appropriate number of copies of the SKB are made and * that skb1 and skb2 point to it (them) so that it (they) can be * demuxed to sock1 and/or sock2. If we are unable to make enough * copies, we do as much as is possible. */ if (copy) skb1 = skb_clone(skb, GFP_ATOMIC); else skb1 = skb; rc = -ENOMEM; if (!skb1) goto out_put; /* Do we need 2 SKBs? */ if (sock1 && sock2) skb2 = skb_clone(skb1, GFP_ATOMIC); else skb2 = skb1; if (sock1) ipxitf_def_skb_handler(sock1, skb1); if (!skb2) goto out_put; if (sock2) ipxitf_def_skb_handler(sock2, skb2); rc = 0; out_put: if (sock1) sock_put(sock1); if (sock2) sock_put(sock2); out: return rc; } #endif /* CONFIG_IPX_INTERN */ static struct sk_buff *ipxitf_adjust_skbuff(struct ipx_interface *intrfc, struct sk_buff *skb) { struct sk_buff *skb2; int in_offset = (unsigned char *)ipx_hdr(skb) - skb->head; int out_offset = intrfc->if_ipx_offset; int len; /* Hopefully, most cases */ if (in_offset >= out_offset) return skb; /* Need new SKB */ len = skb->len + out_offset; skb2 = alloc_skb(len, GFP_ATOMIC); if (skb2) { skb_reserve(skb2, out_offset); skb_reset_network_header(skb2); skb_reset_transport_header(skb2); skb_put(skb2, skb->len); memcpy(ipx_hdr(skb2), ipx_hdr(skb), skb->len); memcpy(skb2->cb, skb->cb, sizeof(skb->cb)); } kfree_skb(skb); return skb2; } /* caller must hold a reference to intrfc and the skb has to be unshared */ int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node) { struct ipxhdr *ipx = ipx_hdr(skb); struct net_device *dev = intrfc->if_dev; struct datalink_proto *dl = intrfc->if_dlink; char dest_node[IPX_NODE_LEN]; int send_to_wire = 1; int addr_len; ipx->ipx_tctrl = IPX_SKB_CB(skb)->ipx_tctrl; ipx->ipx_dest.net = IPX_SKB_CB(skb)->ipx_dest_net; ipx->ipx_source.net = IPX_SKB_CB(skb)->ipx_source_net; /* see if we need to include the netnum in the route list */ if (IPX_SKB_CB(skb)->last_hop.index >= 0) { __be32 *last_hop = (__be32 *)(((u8 *) skb->data) + sizeof(struct ipxhdr) + IPX_SKB_CB(skb)->last_hop.index * sizeof(__be32)); *last_hop = IPX_SKB_CB(skb)->last_hop.netnum; IPX_SKB_CB(skb)->last_hop.index = -1; } /* * We need to know how many skbuffs it will take to send out this * packet to avoid unnecessary copies. */ if (!dl || !dev || dev->flags & IFF_LOOPBACK) send_to_wire = 0; /* No non looped */ /* * See if this should be demuxed to sockets on this interface * * We want to ensure the original was eaten or that we only use * up clones. */ if (ipx->ipx_dest.net == intrfc->if_netnum) { /* * To our own node, loop and free the original. * The internal net will receive on all node address. */ if (intrfc == ipx_internal_net || !memcmp(intrfc->if_node, node, IPX_NODE_LEN)) { /* Don't charge sender */ skb_orphan(skb); /* Will charge receiver */ return ipxitf_demux_socket(intrfc, skb, 0); } /* Broadcast, loop and possibly keep to send on. */ if (!memcmp(ipx_broadcast_node, node, IPX_NODE_LEN)) { if (!send_to_wire) skb_orphan(skb); ipxitf_demux_socket(intrfc, skb, send_to_wire); if (!send_to_wire) goto out; } } /* * If the originating net is not equal to our net; this is routed * We are still charging the sender. Which is right - the driver * free will handle this fairly. */ if (ipx->ipx_source.net != intrfc->if_netnum) { /* * Unshare the buffer before modifying the count in * case it's a flood or tcpdump */ skb = skb_unshare(skb, GFP_ATOMIC); if (!skb) goto out; if (++ipx->ipx_tctrl > ipxcfg_max_hops) send_to_wire = 0; } if (!send_to_wire) { kfree_skb(skb); goto out; } /* Determine the appropriate hardware address */ addr_len = dev->addr_len; if (!memcmp(ipx_broadcast_node, node, IPX_NODE_LEN)) memcpy(dest_node, dev->broadcast, addr_len); else memcpy(dest_node, &(node[IPX_NODE_LEN-addr_len]), addr_len); /* Make any compensation for differing physical/data link size */ skb = ipxitf_adjust_skbuff(intrfc, skb); if (!skb) goto out; /* set up data link and physical headers */ skb->dev = dev; skb->protocol = htons(ETH_P_IPX); /* Send it out */ dl->request(dl, skb, dest_node); out: return 0; } static int ipxitf_add_local_route(struct ipx_interface *intrfc) { return ipxrtr_add_route(intrfc->if_netnum, intrfc, NULL); } static void ipxitf_discover_netnum(struct ipx_interface *intrfc, struct sk_buff *skb); static int ipxitf_pprop(struct ipx_interface *intrfc, struct sk_buff *skb); static int ipxitf_rcv(struct ipx_interface *intrfc, struct sk_buff *skb) { struct ipxhdr *ipx = ipx_hdr(skb); int rc = 0; ipxitf_hold(intrfc); /* See if we should update our network number */ if (!intrfc->if_netnum) /* net number of intrfc not known yet */ ipxitf_discover_netnum(intrfc, skb); IPX_SKB_CB(skb)->last_hop.index = -1; if (ipx->ipx_type == IPX_TYPE_PPROP) { rc = ipxitf_pprop(intrfc, skb); if (rc) goto out_free_skb; } /* local processing follows */ if (!IPX_SKB_CB(skb)->ipx_dest_net) IPX_SKB_CB(skb)->ipx_dest_net = intrfc->if_netnum; if (!IPX_SKB_CB(skb)->ipx_source_net) IPX_SKB_CB(skb)->ipx_source_net = intrfc->if_netnum; /* it doesn't make sense to route a pprop packet, there's no meaning * in the ipx_dest_net for such packets */ if (ipx->ipx_type != IPX_TYPE_PPROP && intrfc->if_netnum != IPX_SKB_CB(skb)->ipx_dest_net) { /* We only route point-to-point packets. */ if (skb->pkt_type == PACKET_HOST) { skb = skb_unshare(skb, GFP_ATOMIC); if (skb) rc = ipxrtr_route_skb(skb); goto out_intrfc; } goto out_free_skb; } /* see if we should keep it */ if (!memcmp(ipx_broadcast_node, ipx->ipx_dest.node, IPX_NODE_LEN) || !memcmp(intrfc->if_node, ipx->ipx_dest.node, IPX_NODE_LEN)) { rc = ipxitf_demux_socket(intrfc, skb, 0); goto out_intrfc; } /* we couldn't pawn it off so unload it */ out_free_skb: kfree_skb(skb); out_intrfc: ipxitf_put(intrfc); return rc; } static void ipxitf_discover_netnum(struct ipx_interface *intrfc, struct sk_buff *skb) { const struct ipx_cb *cb = IPX_SKB_CB(skb); /* see if this is an intra packet: source_net == dest_net */ if (cb->ipx_source_net == cb->ipx_dest_net && cb->ipx_source_net) { struct ipx_interface *i = ipxitf_find_using_net(cb->ipx_source_net); /* NB: NetWare servers lie about their hop count so we * dropped the test based on it. This is the best way * to determine this is a 0 hop count packet. */ if (!i) { intrfc->if_netnum = cb->ipx_source_net; ipxitf_add_local_route(intrfc); } else { printk(KERN_WARNING "IPX: Network number collision " "%lx\n %s %s and %s %s\n", (unsigned long) ntohl(cb->ipx_source_net), ipx_device_name(i), ipx_frame_name(i->if_dlink_type), ipx_device_name(intrfc), ipx_frame_name(intrfc->if_dlink_type)); ipxitf_put(i); } } } /** * ipxitf_pprop - Process packet propagation IPX packet type 0x14, used for * NetBIOS broadcasts * @intrfc: IPX interface receiving this packet * @skb: Received packet * * Checks if packet is valid: if its more than %IPX_MAX_PPROP_HOPS hops or if it * is smaller than a IPX header + the room for %IPX_MAX_PPROP_HOPS hops we drop * it, not even processing it locally, if it has exact %IPX_MAX_PPROP_HOPS we * don't broadcast it, but process it locally. See chapter 5 of Novell's "IPX * RIP and SAP Router Specification", Part Number 107-000029-001. * * If it is valid, check if we have pprop broadcasting enabled by the user, * if not, just return zero for local processing. * * If it is enabled check the packet and don't broadcast it if we have already * seen this packet. * * Broadcast: send it to the interfaces that aren't on the packet visited nets * array, just after the IPX header. * * Returns -EINVAL for invalid packets, so that the calling function drops * the packet without local processing. 0 if packet is to be locally processed. */ static int ipxitf_pprop(struct ipx_interface *intrfc, struct sk_buff *skb) { struct ipxhdr *ipx = ipx_hdr(skb); int i, rc = -EINVAL; struct ipx_interface *ifcs; char *c; __be32 *l; /* Illegal packet - too many hops or too short */ /* We decide to throw it away: no broadcasting, no local processing. * NetBIOS unaware implementations route them as normal packets - * tctrl <= 15, any data payload... */ if (IPX_SKB_CB(skb)->ipx_tctrl > IPX_MAX_PPROP_HOPS || ntohs(ipx->ipx_pktsize) < sizeof(struct ipxhdr) + IPX_MAX_PPROP_HOPS * sizeof(u32)) goto out; /* are we broadcasting this damn thing? */ rc = 0; if (!sysctl_ipx_pprop_broadcasting) goto out; /* We do broadcast packet on the IPX_MAX_PPROP_HOPS hop, but we * process it locally. All previous hops broadcasted it, and process it * locally. */ if (IPX_SKB_CB(skb)->ipx_tctrl == IPX_MAX_PPROP_HOPS) goto out; c = ((u8 *) ipx) + sizeof(struct ipxhdr); l = (__be32 *) c; /* Don't broadcast packet if already seen this net */ for (i = 0; i < IPX_SKB_CB(skb)->ipx_tctrl; i++) if (*l++ == intrfc->if_netnum) goto out; /* < IPX_MAX_PPROP_HOPS hops && input interface not in list. Save the * position where we will insert recvd netnum into list, later on, * in ipxitf_send */ IPX_SKB_CB(skb)->last_hop.index = i; IPX_SKB_CB(skb)->last_hop.netnum = intrfc->if_netnum; /* xmit on all other interfaces... */ spin_lock_bh(&ipx_interfaces_lock); list_for_each_entry(ifcs, &ipx_interfaces, node) { /* Except unconfigured interfaces */ if (!ifcs->if_netnum) continue; /* That aren't in the list */ if (ifcs == intrfc) continue; l = (__be32 *) c; /* don't consider the last entry in the packet list, * it is our netnum, and it is not there yet */ for (i = 0; i < IPX_SKB_CB(skb)->ipx_tctrl; i++) if (ifcs->if_netnum == *l++) break; if (i == IPX_SKB_CB(skb)->ipx_tctrl) { struct sk_buff *s = skb_copy(skb, GFP_ATOMIC); if (s) { IPX_SKB_CB(s)->ipx_dest_net = ifcs->if_netnum; ipxrtr_route_skb(s); } } } spin_unlock_bh(&ipx_interfaces_lock); out: return rc; } static void ipxitf_insert(struct ipx_interface *intrfc) { spin_lock_bh(&ipx_interfaces_lock); list_add_tail(&intrfc->node, &ipx_interfaces); spin_unlock_bh(&ipx_interfaces_lock); if (ipxcfg_auto_select_primary && !ipx_primary_net) ipx_primary_net = intrfc; } static struct ipx_interface *ipxitf_alloc(struct net_device *dev, __be32 netnum, __be16 dlink_type, struct datalink_proto *dlink, unsigned char internal, int ipx_offset) { struct ipx_interface *intrfc = kmalloc(sizeof(*intrfc), GFP_ATOMIC); if (intrfc) { intrfc->if_dev = dev; intrfc->if_netnum = netnum; intrfc->if_dlink_type = dlink_type; intrfc->if_dlink = dlink; intrfc->if_internal = internal; intrfc->if_ipx_offset = ipx_offset; intrfc->if_sknum = IPX_MIN_EPHEMERAL_SOCKET; INIT_HLIST_HEAD(&intrfc->if_sklist); atomic_set(&intrfc->refcnt, 1); spin_lock_init(&intrfc->if_sklist_lock); } return intrfc; } static int ipxitf_create_internal(struct ipx_interface_definition *idef) { struct ipx_interface *intrfc; int rc = -EEXIST; /* Only one primary network allowed */ if (ipx_primary_net) goto out; /* Must have a valid network number */ rc = -EADDRNOTAVAIL; if (!idef->ipx_network) goto out; intrfc = ipxitf_find_using_net(idef->ipx_network); rc = -EADDRINUSE; if (intrfc) { ipxitf_put(intrfc); goto out; } intrfc = ipxitf_alloc(NULL, idef->ipx_network, 0, NULL, 1, 0); rc = -EAGAIN; if (!intrfc) goto out; memcpy((char *)&(intrfc->if_node), idef->ipx_node, IPX_NODE_LEN); ipx_internal_net = ipx_primary_net = intrfc; ipxitf_hold(intrfc); ipxitf_insert(intrfc); rc = ipxitf_add_local_route(intrfc); ipxitf_put(intrfc); out: return rc; } static __be16 ipx_map_frame_type(unsigned char type) { __be16 rc = 0; switch (type) { case IPX_FRAME_ETHERII: rc = htons(ETH_P_IPX); break; case IPX_FRAME_8022: rc = htons(ETH_P_802_2); break; case IPX_FRAME_SNAP: rc = htons(ETH_P_SNAP); break; case IPX_FRAME_8023: rc = htons(ETH_P_802_3); break; } return rc; } static int ipxitf_create(struct ipx_interface_definition *idef) { struct net_device *dev; __be16 dlink_type = 0; struct datalink_proto *datalink = NULL; struct ipx_interface *intrfc; int rc; if (idef->ipx_special == IPX_INTERNAL) { rc = ipxitf_create_internal(idef); goto out; } rc = -EEXIST; if (idef->ipx_special == IPX_PRIMARY && ipx_primary_net) goto out; intrfc = ipxitf_find_using_net(idef->ipx_network); rc = -EADDRINUSE; if (idef->ipx_network && intrfc) { ipxitf_put(intrfc); goto out; } if (intrfc) ipxitf_put(intrfc); dev = dev_get_by_name(&init_net, idef->ipx_device); rc = -ENODEV; if (!dev) goto out; switch (idef->ipx_dlink_type) { case IPX_FRAME_TR_8022: printk(KERN_WARNING "IPX frame type 802.2TR is " "obsolete Use 802.2 instead.\n"); /* fall through */ case IPX_FRAME_8022: dlink_type = htons(ETH_P_802_2); datalink = p8022_datalink; break; case IPX_FRAME_ETHERII: if (dev->type != ARPHRD_IEEE802) { dlink_type = htons(ETH_P_IPX); datalink = pEII_datalink; break; } else printk(KERN_WARNING "IPX frame type EtherII over " "token-ring is obsolete. Use SNAP " "instead.\n"); /* fall through */ case IPX_FRAME_SNAP: dlink_type = htons(ETH_P_SNAP); datalink = pSNAP_datalink; break; case IPX_FRAME_8023: dlink_type = htons(ETH_P_802_3); datalink = p8023_datalink; break; case IPX_FRAME_NONE: default: rc = -EPROTONOSUPPORT; goto out_dev; } rc = -ENETDOWN; if (!(dev->flags & IFF_UP)) goto out_dev; /* Check addresses are suitable */ rc = -EINVAL; if (dev->addr_len > IPX_NODE_LEN) goto out_dev; intrfc = ipxitf_find_using_phys(dev, dlink_type); if (!intrfc) { /* Ok now create */ intrfc = ipxitf_alloc(dev, idef->ipx_network, dlink_type, datalink, 0, dev->hard_header_len + datalink->header_length); rc = -EAGAIN; if (!intrfc) goto out_dev; /* Setup primary if necessary */ if (idef->ipx_special == IPX_PRIMARY) ipx_primary_net = intrfc; if (!memcmp(idef->ipx_node, "\000\000\000\000\000\000", IPX_NODE_LEN)) { memset(intrfc->if_node, 0, IPX_NODE_LEN); memcpy(intrfc->if_node + IPX_NODE_LEN - dev->addr_len, dev->dev_addr, dev->addr_len); } else memcpy(intrfc->if_node, idef->ipx_node, IPX_NODE_LEN); ipxitf_hold(intrfc); ipxitf_insert(intrfc); } /* If the network number is known, add a route */ rc = 0; if (!intrfc->if_netnum) goto out_intrfc; rc = ipxitf_add_local_route(intrfc); out_intrfc: ipxitf_put(intrfc); goto out; out_dev: dev_put(dev); out: return rc; } static int ipxitf_delete(struct ipx_interface_definition *idef) { struct net_device *dev = NULL; __be16 dlink_type = 0; struct ipx_interface *intrfc; int rc = 0; spin_lock_bh(&ipx_interfaces_lock); if (idef->ipx_special == IPX_INTERNAL) { if (ipx_internal_net) { __ipxitf_put(ipx_internal_net); goto out; } rc = -ENOENT; goto out; } dlink_type = ipx_map_frame_type(idef->ipx_dlink_type); rc = -EPROTONOSUPPORT; if (!dlink_type) goto out; dev = __dev_get_by_name(&init_net, idef->ipx_device); rc = -ENODEV; if (!dev) goto out; intrfc = __ipxitf_find_using_phys(dev, dlink_type); rc = -EINVAL; if (!intrfc) goto out; __ipxitf_put(intrfc); rc = 0; out: spin_unlock_bh(&ipx_interfaces_lock); return rc; } static struct ipx_interface *ipxitf_auto_create(struct net_device *dev, __be16 dlink_type) { struct ipx_interface *intrfc = NULL; struct datalink_proto *datalink; if (!dev) goto out; /* Check addresses are suitable */ if (dev->addr_len > IPX_NODE_LEN) goto out; switch (ntohs(dlink_type)) { case ETH_P_IPX: datalink = pEII_datalink; break; case ETH_P_802_2: datalink = p8022_datalink; break; case ETH_P_SNAP: datalink = pSNAP_datalink; break; case ETH_P_802_3: datalink = p8023_datalink; break; default: goto out; } intrfc = ipxitf_alloc(dev, 0, dlink_type, datalink, 0, dev->hard_header_len + datalink->header_length); if (intrfc) { memset(intrfc->if_node, 0, IPX_NODE_LEN); memcpy((char *)&(intrfc->if_node[IPX_NODE_LEN-dev->addr_len]), dev->dev_addr, dev->addr_len); spin_lock_init(&intrfc->if_sklist_lock); atomic_set(&intrfc->refcnt, 1); ipxitf_insert(intrfc); dev_hold(dev); } out: return intrfc; } static int ipxitf_ioctl(unsigned int cmd, void __user *arg) { int rc = -EINVAL; struct ifreq ifr; int val; switch (cmd) { case SIOCSIFADDR: { struct sockaddr_ipx *sipx; struct ipx_interface_definition f; rc = -EFAULT; if (copy_from_user(&ifr, arg, sizeof(ifr))) break; sipx = (struct sockaddr_ipx *)&ifr.ifr_addr; rc = -EINVAL; if (sipx->sipx_family != AF_IPX) break; f.ipx_network = sipx->sipx_network; memcpy(f.ipx_device, ifr.ifr_name, sizeof(f.ipx_device)); memcpy(f.ipx_node, sipx->sipx_node, IPX_NODE_LEN); f.ipx_dlink_type = sipx->sipx_type; f.ipx_special = sipx->sipx_special; if (sipx->sipx_action == IPX_DLTITF) rc = ipxitf_delete(&f); else rc = ipxitf_create(&f); break; } case SIOCGIFADDR: { struct sockaddr_ipx *sipx; struct ipx_interface *ipxif; struct net_device *dev; rc = -EFAULT; if (copy_from_user(&ifr, arg, sizeof(ifr))) break; sipx = (struct sockaddr_ipx *)&ifr.ifr_addr; dev = __dev_get_by_name(&init_net, ifr.ifr_name); rc = -ENODEV; if (!dev) break; ipxif = ipxitf_find_using_phys(dev, ipx_map_frame_type(sipx->sipx_type)); rc = -EADDRNOTAVAIL; if (!ipxif) break; sipx->sipx_family = AF_IPX; sipx->sipx_network = ipxif->if_netnum; memcpy(sipx->sipx_node, ipxif->if_node, sizeof(sipx->sipx_node)); rc = -EFAULT; if (copy_to_user(arg, &ifr, sizeof(ifr))) break; ipxitf_put(ipxif); rc = 0; break; } case SIOCAIPXITFCRT: rc = -EFAULT; if (get_user(val, (unsigned char __user *) arg)) break; rc = 0; ipxcfg_auto_create_interfaces = val; break; case SIOCAIPXPRISLT: rc = -EFAULT; if (get_user(val, (unsigned char __user *) arg)) break; rc = 0; ipxcfg_set_auto_select(val); break; } return rc; } /* * Checksum routine for IPX */ /* Note: We assume ipx_tctrl==0 and htons(length)==ipx_pktsize */ /* This functions should *not* mess with packet contents */ __be16 ipx_cksum(struct ipxhdr *packet, int length) { /* * NOTE: sum is a net byte order quantity, which optimizes the * loop. This only works on big and little endian machines. (I * don't know of a machine that isn't.) */ /* handle the first 3 words separately; checksum should be skipped * and ipx_tctrl masked out */ __u16 *p = (__u16 *)packet; __u32 sum = p[1] + (p[2] & (__force u16)htons(0x00ff)); __u32 i = (length >> 1) - 3; /* Number of remaining complete words */ /* Loop through them */ p += 3; while (i--) sum += *p++; /* Add on the last part word if it exists */ if (packet->ipx_pktsize & htons(1)) sum += (__force u16)htons(0xff00) & *p; /* Do final fixup */ sum = (sum & 0xffff) + (sum >> 16); /* It's a pity there's no concept of carry in C */ if (sum >= 0x10000) sum++; /* * Leave 0 alone; we don't want 0xffff here. Note that we can't get * here with 0x10000, so this check is the same as ((__u16)sum) */ if (sum) sum = ~sum; return (__force __be16)sum; } const char *ipx_frame_name(__be16 frame) { char* rc = "None"; switch (ntohs(frame)) { case ETH_P_IPX: rc = "EtherII"; break; case ETH_P_802_2: rc = "802.2"; break; case ETH_P_SNAP: rc = "SNAP"; break; case ETH_P_802_3: rc = "802.3"; break; case ETH_P_TR_802_2: rc = "802.2TR"; break; } return rc; } const char *ipx_device_name(struct ipx_interface *intrfc) { return intrfc->if_internal ? "Internal" : intrfc->if_dev ? intrfc->if_dev->name : "Unknown"; } /* Handling for system calls applied via the various interfaces to an IPX * socket object. */ static int ipx_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; int opt; int rc = -EINVAL; lock_sock(sk); if (optlen != sizeof(int)) goto out; rc = -EFAULT; if (get_user(opt, (unsigned int __user *)optval)) goto out; rc = -ENOPROTOOPT; if (!(level == SOL_IPX && optname == IPX_TYPE)) goto out; ipx_sk(sk)->type = opt; rc = 0; out: release_sock(sk); return rc; } static int ipx_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int val = 0; int len; int rc = -ENOPROTOOPT; lock_sock(sk); if (!(level == SOL_IPX && optname == IPX_TYPE)) goto out; val = ipx_sk(sk)->type; rc = -EFAULT; if (get_user(len, optlen)) goto out; len = min_t(unsigned int, len, sizeof(int)); rc = -EINVAL; if(len < 0) goto out; rc = -EFAULT; if (put_user(len, optlen) || copy_to_user(optval, &val, len)) goto out; rc = 0; out: release_sock(sk); return rc; } static struct proto ipx_proto = { .name = "IPX", .owner = THIS_MODULE, .obj_size = sizeof(struct ipx_sock), }; static int ipx_create(struct net *net, struct socket *sock, int protocol, int kern) { int rc = -ESOCKTNOSUPPORT; struct sock *sk; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; /* * SPX support is not anymore in the kernel sources. If you want to * ressurrect it, completing it and making it understand shared skbs, * be fully multithreaded, etc, grab the sources in an early 2.5 kernel * tree. */ if (sock->type != SOCK_DGRAM) goto out; rc = -ENOMEM; sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto); if (!sk) goto out; sk_refcnt_debug_inc(sk); sock_init_data(sock, sk); sk->sk_no_check = 1; /* Checksum off by default */ sock->ops = &ipx_dgram_ops; rc = 0; out: return rc; } static int ipx_release(struct socket *sock) { struct sock *sk = sock->sk; if (!sk) goto out; lock_sock(sk); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); sock->sk = NULL; sk_refcnt_debug_release(sk); ipx_destroy_socket(sk); release_sock(sk); sock_put(sk); out: return 0; } /* caller must hold a reference to intrfc */ static __be16 ipx_first_free_socketnum(struct ipx_interface *intrfc) { unsigned short socketNum = intrfc->if_sknum; spin_lock_bh(&intrfc->if_sklist_lock); if (socketNum < IPX_MIN_EPHEMERAL_SOCKET) socketNum = IPX_MIN_EPHEMERAL_SOCKET; while (__ipxitf_find_socket(intrfc, htons(socketNum))) if (socketNum > IPX_MAX_EPHEMERAL_SOCKET) socketNum = IPX_MIN_EPHEMERAL_SOCKET; else socketNum++; spin_unlock_bh(&intrfc->if_sklist_lock); intrfc->if_sknum = socketNum; return htons(socketNum); } static int __ipx_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); struct ipx_interface *intrfc; struct sockaddr_ipx *addr = (struct sockaddr_ipx *)uaddr; int rc = -EINVAL; if (!sock_flag(sk, SOCK_ZAPPED) || addr_len != sizeof(struct sockaddr_ipx)) goto out; intrfc = ipxitf_find_using_net(addr->sipx_network); rc = -EADDRNOTAVAIL; if (!intrfc) goto out; if (!addr->sipx_port) { addr->sipx_port = ipx_first_free_socketnum(intrfc); rc = -EINVAL; if (!addr->sipx_port) goto out_put; } /* protect IPX system stuff like routing/sap */ rc = -EACCES; if (ntohs(addr->sipx_port) < IPX_MIN_EPHEMERAL_SOCKET && !capable(CAP_NET_ADMIN)) goto out_put; ipxs->port = addr->sipx_port; #ifdef CONFIG_IPX_INTERN if (intrfc == ipx_internal_net) { /* The source address is to be set explicitly if the * socket is to be bound on the internal network. If a * node number 0 was specified, the default is used. */ rc = -EINVAL; if (!memcmp(addr->sipx_node, ipx_broadcast_node, IPX_NODE_LEN)) goto out_put; if (!memcmp(addr->sipx_node, ipx_this_node, IPX_NODE_LEN)) memcpy(ipxs->node, intrfc->if_node, IPX_NODE_LEN); else memcpy(ipxs->node, addr->sipx_node, IPX_NODE_LEN); rc = -EADDRINUSE; if (ipxitf_find_internal_socket(intrfc, ipxs->node, ipxs->port)) { SOCK_DEBUG(sk, "IPX: bind failed because port %X in use.\n", ntohs(addr->sipx_port)); goto out_put; } } else { /* Source addresses are easy. It must be our * network:node pair for an interface routed to IPX * with the ipx routing ioctl() */ memcpy(ipxs->node, intrfc->if_node, IPX_NODE_LEN); rc = -EADDRINUSE; if (ipxitf_find_socket(intrfc, addr->sipx_port)) { SOCK_DEBUG(sk, "IPX: bind failed because port %X in use.\n", ntohs(addr->sipx_port)); goto out_put; } } #else /* !def CONFIG_IPX_INTERN */ /* Source addresses are easy. It must be our network:node pair for an interface routed to IPX with the ipx routing ioctl() */ rc = -EADDRINUSE; if (ipxitf_find_socket(intrfc, addr->sipx_port)) { SOCK_DEBUG(sk, "IPX: bind failed because port %X in use.\n", ntohs((int)addr->sipx_port)); goto out_put; } #endif /* CONFIG_IPX_INTERN */ ipxitf_insert_socket(intrfc, sk); sock_reset_flag(sk, SOCK_ZAPPED); rc = 0; out_put: ipxitf_put(intrfc); out: return rc; } static int ipx_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; int rc; lock_sock(sk); rc = __ipx_bind(sock, uaddr, addr_len); release_sock(sk); return rc; } static int ipx_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); struct sockaddr_ipx *addr; int rc = -EINVAL; struct ipx_route *rt; sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; lock_sock(sk); if (addr_len != sizeof(*addr)) goto out; addr = (struct sockaddr_ipx *)uaddr; /* put the autobinding in */ if (!ipxs->port) { struct sockaddr_ipx uaddr; uaddr.sipx_port = 0; uaddr.sipx_network = 0; #ifdef CONFIG_IPX_INTERN rc = -ENETDOWN; if (!ipxs->intrfc) goto out; /* Someone zonked the iface */ memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN); #endif /* CONFIG_IPX_INTERN */ rc = __ipx_bind(sock, (struct sockaddr *)&uaddr, sizeof(struct sockaddr_ipx)); if (rc) goto out; } /* We can either connect to primary network or somewhere * we can route to */ rt = ipxrtr_lookup(addr->sipx_network); rc = -ENETUNREACH; if (!rt && !(!addr->sipx_network && ipx_primary_net)) goto out; ipxs->dest_addr.net = addr->sipx_network; ipxs->dest_addr.sock = addr->sipx_port; memcpy(ipxs->dest_addr.node, addr->sipx_node, IPX_NODE_LEN); ipxs->type = addr->sipx_type; if (sock->type == SOCK_DGRAM) { sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; } if (rt) ipxrtr_put(rt); rc = 0; out: release_sock(sk); return rc; } static int ipx_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct ipx_address *addr; struct sockaddr_ipx sipx; struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); int rc; *uaddr_len = sizeof(struct sockaddr_ipx); lock_sock(sk); if (peer) { rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out; addr = &ipxs->dest_addr; sipx.sipx_network = addr->net; sipx.sipx_port = addr->sock; memcpy(sipx.sipx_node, addr->node, IPX_NODE_LEN); } else { if (ipxs->intrfc) { sipx.sipx_network = ipxs->intrfc->if_netnum; #ifdef CONFIG_IPX_INTERN memcpy(sipx.sipx_node, ipxs->node, IPX_NODE_LEN); #else memcpy(sipx.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN); #endif /* CONFIG_IPX_INTERN */ } else { sipx.sipx_network = 0; memset(sipx.sipx_node, '\0', IPX_NODE_LEN); } sipx.sipx_port = ipxs->port; } sipx.sipx_family = AF_IPX; sipx.sipx_type = ipxs->type; sipx.sipx_zero = 0; memcpy(uaddr, &sipx, sizeof(sipx)); rc = 0; out: release_sock(sk); return rc; } static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { /* NULL here for pt means the packet was looped back */ struct ipx_interface *intrfc; struct ipxhdr *ipx; u16 ipx_pktsize; int rc = 0; if (!net_eq(dev_net(dev), &init_net)) goto drop; /* Not ours */ if (skb->pkt_type == PACKET_OTHERHOST) goto drop; if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) goto out; if (!pskb_may_pull(skb, sizeof(struct ipxhdr))) goto drop; ipx_pktsize = ntohs(ipx_hdr(skb)->ipx_pktsize); /* Too small or invalid header? */ if (ipx_pktsize < sizeof(struct ipxhdr) || !pskb_may_pull(skb, ipx_pktsize)) goto drop; ipx = ipx_hdr(skb); if (ipx->ipx_checksum != IPX_NO_CHECKSUM && ipx->ipx_checksum != ipx_cksum(ipx, ipx_pktsize)) goto drop; IPX_SKB_CB(skb)->ipx_tctrl = ipx->ipx_tctrl; IPX_SKB_CB(skb)->ipx_dest_net = ipx->ipx_dest.net; IPX_SKB_CB(skb)->ipx_source_net = ipx->ipx_source.net; /* Determine what local ipx endpoint this is */ intrfc = ipxitf_find_using_phys(dev, pt->type); if (!intrfc) { if (ipxcfg_auto_create_interfaces && IPX_SKB_CB(skb)->ipx_dest_net) { intrfc = ipxitf_auto_create(dev, pt->type); if (intrfc) ipxitf_hold(intrfc); } if (!intrfc) /* Not one of ours */ /* or invalid packet for auto creation */ goto drop; } rc = ipxitf_rcv(intrfc, skb); ipxitf_put(intrfc); goto out; drop: kfree_skb(skb); out: return rc; } static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); struct sockaddr_ipx *usipx = (struct sockaddr_ipx *)msg->msg_name; struct sockaddr_ipx local_sipx; int rc = -EINVAL; int flags = msg->msg_flags; lock_sock(sk); /* Socket gets bound below anyway */ /* if (sk->sk_zapped) return -EIO; */ /* Socket not bound */ if (flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) goto out; /* Max possible packet size limited by 16 bit pktsize in header */ if (len >= 65535 - sizeof(struct ipxhdr)) goto out; if (usipx) { if (!ipxs->port) { struct sockaddr_ipx uaddr; uaddr.sipx_port = 0; uaddr.sipx_network = 0; #ifdef CONFIG_IPX_INTERN rc = -ENETDOWN; if (!ipxs->intrfc) goto out; /* Someone zonked the iface */ memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN); #endif rc = __ipx_bind(sock, (struct sockaddr *)&uaddr, sizeof(struct sockaddr_ipx)); if (rc) goto out; } rc = -EINVAL; if (msg->msg_namelen < sizeof(*usipx) || usipx->sipx_family != AF_IPX) goto out; } else { rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out; usipx = &local_sipx; usipx->sipx_family = AF_IPX; usipx->sipx_type = ipxs->type; usipx->sipx_port = ipxs->dest_addr.sock; usipx->sipx_network = ipxs->dest_addr.net; memcpy(usipx->sipx_node, ipxs->dest_addr.node, IPX_NODE_LEN); } rc = ipxrtr_route_packet(sk, usipx, msg->msg_iov, len, flags & MSG_DONTWAIT); if (rc >= 0) rc = len; out: release_sock(sk); return rc; } static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct ipx_sock *ipxs = ipx_sk(sk); struct sockaddr_ipx *sipx = (struct sockaddr_ipx *)msg->msg_name; struct ipxhdr *ipx = NULL; struct sk_buff *skb; int copied, rc; lock_sock(sk); /* put the autobinding in */ if (!ipxs->port) { struct sockaddr_ipx uaddr; uaddr.sipx_port = 0; uaddr.sipx_network = 0; #ifdef CONFIG_IPX_INTERN rc = -ENETDOWN; if (!ipxs->intrfc) goto out; /* Someone zonked the iface */ memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN); #endif /* CONFIG_IPX_INTERN */ rc = __ipx_bind(sock, (struct sockaddr *)&uaddr, sizeof(struct sockaddr_ipx)); if (rc) goto out; } rc = -ENOTCONN; if (sock_flag(sk, SOCK_ZAPPED)) goto out; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &rc); if (!skb) goto out; ipx = ipx_hdr(skb); copied = ntohs(ipx->ipx_pktsize) - sizeof(struct ipxhdr); if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } rc = skb_copy_datagram_iovec(skb, sizeof(struct ipxhdr), msg->msg_iov, copied); if (rc) goto out_free; if (skb->tstamp.tv64) sk->sk_stamp = skb->tstamp; msg->msg_namelen = sizeof(*sipx); if (sipx) { sipx->sipx_family = AF_IPX; sipx->sipx_port = ipx->ipx_source.sock; memcpy(sipx->sipx_node, ipx->ipx_source.node, IPX_NODE_LEN); sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net; sipx->sipx_type = ipx->ipx_type; sipx->sipx_zero = 0; } rc = copied; out_free: skb_free_datagram(sk, skb); out: release_sock(sk); return rc; } static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { int rc = 0; long amount = 0; struct sock *sk = sock->sk; void __user *argp = (void __user *)arg; lock_sock(sk); switch (cmd) { case TIOCOUTQ: amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; rc = put_user(amount, (int __user *)argp); break; case TIOCINQ: { struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); /* These two are safe on a single CPU system as only * user tasks fiddle here */ if (skb) amount = skb->len - sizeof(struct ipxhdr); rc = put_user(amount, (int __user *)argp); break; } case SIOCADDRT: case SIOCDELRT: rc = -EPERM; if (capable(CAP_NET_ADMIN)) rc = ipxrtr_ioctl(cmd, argp); break; case SIOCSIFADDR: case SIOCAIPXITFCRT: case SIOCAIPXPRISLT: rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; case SIOCGIFADDR: rc = ipxitf_ioctl(cmd, argp); break; case SIOCIPXCFGDATA: rc = ipxcfg_get_config_data(argp); break; case SIOCIPXNCPCONN: /* * This socket wants to take care of the NCP connection * handed to us in arg. */ rc = -EPERM; if (!capable(CAP_NET_ADMIN)) break; rc = get_user(ipx_sk(sk)->ipx_ncp_conn, (const unsigned short __user *)argp); break; case SIOCGSTAMP: rc = -EINVAL; if (sk) rc = sock_get_timestamp(sk, argp); break; case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: rc = -EINVAL; break; default: rc = -ENOIOCTLCMD; break; } release_sock(sk); return rc; } #ifdef CONFIG_COMPAT static int ipx_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { /* * These 4 commands use same structure on 32bit and 64bit. Rest of IPX * commands is handled by generic ioctl code. As these commands are * SIOCPROTOPRIVATE..SIOCPROTOPRIVATE+3, they cannot be handled by generic * code. */ switch (cmd) { case SIOCAIPXITFCRT: case SIOCAIPXPRISLT: case SIOCIPXCFGDATA: case SIOCIPXNCPCONN: return ipx_ioctl(sock, cmd, arg); default: return -ENOIOCTLCMD; } } #endif /* * Socket family declarations */ static const struct net_proto_family ipx_family_ops = { .family = PF_IPX, .create = ipx_create, .owner = THIS_MODULE, }; static const struct proto_ops ipx_dgram_ops = { .family = PF_IPX, .owner = THIS_MODULE, .release = ipx_release, .bind = ipx_bind, .connect = ipx_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = ipx_getname, .poll = datagram_poll, .ioctl = ipx_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ipx_compat_ioctl, #endif .listen = sock_no_listen, .shutdown = sock_no_shutdown, /* FIXME: support shutdown */ .setsockopt = ipx_setsockopt, .getsockopt = ipx_getsockopt, .sendmsg = ipx_sendmsg, .recvmsg = ipx_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct packet_type ipx_8023_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_802_3), .func = ipx_rcv, }; static struct packet_type ipx_dix_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_IPX), .func = ipx_rcv, }; static struct notifier_block ipx_dev_notifier = { .notifier_call = ipxitf_device_event, }; extern struct datalink_proto *make_EII_client(void); extern void destroy_EII_client(struct datalink_proto *); static const unsigned char ipx_8022_type = 0xE0; static const unsigned char ipx_snap_id[5] = { 0x0, 0x0, 0x0, 0x81, 0x37 }; static const char ipx_EII_err_msg[] __initconst = KERN_CRIT "IPX: Unable to register with Ethernet II\n"; static const char ipx_8023_err_msg[] __initconst = KERN_CRIT "IPX: Unable to register with 802.3\n"; static const char ipx_llc_err_msg[] __initconst = KERN_CRIT "IPX: Unable to register with 802.2\n"; static const char ipx_snap_err_msg[] __initconst = KERN_CRIT "IPX: Unable to register with SNAP\n"; static int __init ipx_init(void) { int rc = proto_register(&ipx_proto, 1); if (rc != 0) goto out; sock_register(&ipx_family_ops); pEII_datalink = make_EII_client(); if (pEII_datalink) dev_add_pack(&ipx_dix_packet_type); else printk(ipx_EII_err_msg); p8023_datalink = make_8023_client(); if (p8023_datalink) dev_add_pack(&ipx_8023_packet_type); else printk(ipx_8023_err_msg); p8022_datalink = register_8022_client(ipx_8022_type, ipx_rcv); if (!p8022_datalink) printk(ipx_llc_err_msg); pSNAP_datalink = register_snap_client(ipx_snap_id, ipx_rcv); if (!pSNAP_datalink) printk(ipx_snap_err_msg); register_netdevice_notifier(&ipx_dev_notifier); ipx_register_sysctl(); ipx_proc_init(); out: return rc; } static void __exit ipx_proto_finito(void) { ipx_proc_exit(); ipx_unregister_sysctl(); unregister_netdevice_notifier(&ipx_dev_notifier); ipxitf_cleanup(); if (pSNAP_datalink) { unregister_snap_client(pSNAP_datalink); pSNAP_datalink = NULL; } if (p8022_datalink) { unregister_8022_client(p8022_datalink); p8022_datalink = NULL; } dev_remove_pack(&ipx_8023_packet_type); if (p8023_datalink) { destroy_8023_client(p8023_datalink); p8023_datalink = NULL; } dev_remove_pack(&ipx_dix_packet_type); if (pEII_datalink) { destroy_EII_client(pEII_datalink); pEII_datalink = NULL; } proto_unregister(&ipx_proto); sock_unregister(ipx_family_ops.family); } module_init(ipx_init); module_exit(ipx_proto_finito); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_IPX);
gpl-2.0
czobor/phablet_kernel_samsung_msm7x30-common
drivers/ide/ide.c
10847
10698
/* * Copyright (C) 1994-1998 Linus Torvalds & authors (see below) * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz */ /* * Mostly written by Mark Lord <mlord@pobox.com> * and Gadi Oxman <gadio@netvision.net.il> * and Andre Hedrick <andre@linux-ide.org> * * See linux/MAINTAINERS for address of current maintainer. * * This is the multiple IDE interface driver, as evolved from hd.c. * It supports up to MAX_HWIFS IDE interfaces, on one or more IRQs * (usually 14 & 15). * There can be up to two drives per interface, as per the ATA-2 spec. * * ... * * From hd.c: * | * | It traverses the request-list, using interrupts to jump between functions. * | As nearly all functions can be called within interrupts, we may not sleep. * | Special care is recommended. Have Fun! * | * | modified by Drew Eckhardt to check nr of hd's from the CMOS. * | * | Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug * | in the early extended-partition checks and added DM partitions. * | * | Early work on error handling by Mika Liljeberg (liljeber@cs.Helsinki.FI). * | * | IRQ-unmask, drive-id, multiple-mode, support for ">16 heads", * | and general streamlining by Mark Lord (mlord@pobox.com). * * October, 1994 -- Complete line-by-line overhaul for linux 1.1.x, by: * * Mark Lord (mlord@pobox.com) (IDE Perf.Pkg) * Delman Lee (delman@ieee.org) ("Mr. atdisk2") * Scott Snyder (snyder@fnald0.fnal.gov) (ATAPI IDE cd-rom) * * This was a rewrite of just about everything from hd.c, though some original * code is still sprinkled about. Think of it as a major evolution, with * inspiration from lots of linux users, esp. hamish@zot.apana.org.au */ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/genhd.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/ide.h> #include <linux/hdreg.h> #include <linux/completion.h> #include <linux/device.h> struct class *ide_port_class; /** * ide_device_get - get an additional reference to a ide_drive_t * @drive: device to get a reference to * * Gets a reference to the ide_drive_t and increments the use count of the * underlying LLDD module. */ int ide_device_get(ide_drive_t *drive) { struct device *host_dev; struct module *module; if (!get_device(&drive->gendev)) return -ENXIO; host_dev = drive->hwif->host->dev[0]; module = host_dev ? host_dev->driver->owner : NULL; if (module && !try_module_get(module)) { put_device(&drive->gendev); return -ENXIO; } return 0; } EXPORT_SYMBOL_GPL(ide_device_get); /** * ide_device_put - release a reference to a ide_drive_t * @drive: device to release a reference on * * Release a reference to the ide_drive_t and decrements the use count of * the underlying LLDD module. */ void ide_device_put(ide_drive_t *drive) { #ifdef CONFIG_MODULE_UNLOAD struct device *host_dev = drive->hwif->host->dev[0]; struct module *module = host_dev ? host_dev->driver->owner : NULL; if (module) module_put(module); #endif put_device(&drive->gendev); } EXPORT_SYMBOL_GPL(ide_device_put); static int ide_bus_match(struct device *dev, struct device_driver *drv) { return 1; } static int ide_uevent(struct device *dev, struct kobj_uevent_env *env) { ide_drive_t *drive = to_ide_device(dev); add_uevent_var(env, "MEDIA=%s", ide_media_string(drive)); add_uevent_var(env, "DRIVENAME=%s", drive->name); add_uevent_var(env, "MODALIAS=ide:m-%s", ide_media_string(drive)); return 0; } static int generic_ide_probe(struct device *dev) { ide_drive_t *drive = to_ide_device(dev); struct ide_driver *drv = to_ide_driver(dev->driver); return drv->probe ? drv->probe(drive) : -ENODEV; } static int generic_ide_remove(struct device *dev) { ide_drive_t *drive = to_ide_device(dev); struct ide_driver *drv = to_ide_driver(dev->driver); if (drv->remove) drv->remove(drive); return 0; } static void generic_ide_shutdown(struct device *dev) { ide_drive_t *drive = to_ide_device(dev); struct ide_driver *drv = to_ide_driver(dev->driver); if (dev->driver && drv->shutdown) drv->shutdown(drive); } struct bus_type ide_bus_type = { .name = "ide", .match = ide_bus_match, .uevent = ide_uevent, .probe = generic_ide_probe, .remove = generic_ide_remove, .shutdown = generic_ide_shutdown, .dev_attrs = ide_dev_attrs, .suspend = generic_ide_suspend, .resume = generic_ide_resume, }; EXPORT_SYMBOL_GPL(ide_bus_type); int ide_vlb_clk; EXPORT_SYMBOL_GPL(ide_vlb_clk); module_param_named(vlb_clock, ide_vlb_clk, int, 0); MODULE_PARM_DESC(vlb_clock, "VLB clock frequency (in MHz)"); int ide_pci_clk; EXPORT_SYMBOL_GPL(ide_pci_clk); module_param_named(pci_clock, ide_pci_clk, int, 0); MODULE_PARM_DESC(pci_clock, "PCI bus clock frequency (in MHz)"); static int ide_set_dev_param_mask(const char *s, const struct kernel_param *kp) { int a, b, i, j = 1; unsigned int *dev_param_mask = (unsigned int *)kp->arg; /* controller . device (0 or 1) [ : 1 (set) | 0 (clear) ] */ if (sscanf(s, "%d.%d:%d", &a, &b, &j) != 3 && sscanf(s, "%d.%d", &a, &b) != 2) return -EINVAL; i = a * MAX_DRIVES + b; if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1) return -EINVAL; if (j) *dev_param_mask |= (1 << i); else *dev_param_mask &= ~(1 << i); return 0; } static struct kernel_param_ops param_ops_ide_dev_mask = { .set = ide_set_dev_param_mask }; #define param_check_ide_dev_mask(name, p) param_check_uint(name, p) static unsigned int ide_nodma; module_param_named(nodma, ide_nodma, ide_dev_mask, 0); MODULE_PARM_DESC(nodma, "disallow DMA for a device"); static unsigned int ide_noflush; module_param_named(noflush, ide_noflush, ide_dev_mask, 0); MODULE_PARM_DESC(noflush, "disable flush requests for a device"); static unsigned int ide_nohpa; module_param_named(nohpa, ide_nohpa, ide_dev_mask, 0); MODULE_PARM_DESC(nohpa, "disable Host Protected Area for a device"); static unsigned int ide_noprobe; module_param_named(noprobe, ide_noprobe, ide_dev_mask, 0); MODULE_PARM_DESC(noprobe, "skip probing for a device"); static unsigned int ide_nowerr; module_param_named(nowerr, ide_nowerr, ide_dev_mask, 0); MODULE_PARM_DESC(nowerr, "ignore the ATA_DF bit for a device"); static unsigned int ide_cdroms; module_param_named(cdrom, ide_cdroms, ide_dev_mask, 0); MODULE_PARM_DESC(cdrom, "force device as a CD-ROM"); struct chs_geom { unsigned int cyl; u8 head; u8 sect; }; static unsigned int ide_disks; static struct chs_geom ide_disks_chs[MAX_HWIFS * MAX_DRIVES]; static int ide_set_disk_chs(const char *str, struct kernel_param *kp) { int a, b, c = 0, h = 0, s = 0, i, j = 1; /* controller . device (0 or 1) : Cylinders , Heads , Sectors */ /* controller . device (0 or 1) : 1 (use CHS) | 0 (ignore CHS) */ if (sscanf(str, "%d.%d:%d,%d,%d", &a, &b, &c, &h, &s) != 5 && sscanf(str, "%d.%d:%d", &a, &b, &j) != 3) return -EINVAL; i = a * MAX_DRIVES + b; if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1) return -EINVAL; if (c > INT_MAX || h > 255 || s > 255) return -EINVAL; if (j) ide_disks |= (1 << i); else ide_disks &= ~(1 << i); ide_disks_chs[i].cyl = c; ide_disks_chs[i].head = h; ide_disks_chs[i].sect = s; return 0; } module_param_call(chs, ide_set_disk_chs, NULL, NULL, 0); MODULE_PARM_DESC(chs, "force device as a disk (using CHS)"); static void ide_dev_apply_params(ide_drive_t *drive, u8 unit) { int i = drive->hwif->index * MAX_DRIVES + unit; if (ide_nodma & (1 << i)) { printk(KERN_INFO "ide: disallowing DMA for %s\n", drive->name); drive->dev_flags |= IDE_DFLAG_NODMA; } if (ide_noflush & (1 << i)) { printk(KERN_INFO "ide: disabling flush requests for %s\n", drive->name); drive->dev_flags |= IDE_DFLAG_NOFLUSH; } if (ide_nohpa & (1 << i)) { printk(KERN_INFO "ide: disabling Host Protected Area for %s\n", drive->name); drive->dev_flags |= IDE_DFLAG_NOHPA; } if (ide_noprobe & (1 << i)) { printk(KERN_INFO "ide: skipping probe for %s\n", drive->name); drive->dev_flags |= IDE_DFLAG_NOPROBE; } if (ide_nowerr & (1 << i)) { printk(KERN_INFO "ide: ignoring the ATA_DF bit for %s\n", drive->name); drive->bad_wstat = BAD_R_STAT; } if (ide_cdroms & (1 << i)) { printk(KERN_INFO "ide: forcing %s as a CD-ROM\n", drive->name); drive->dev_flags |= IDE_DFLAG_PRESENT; drive->media = ide_cdrom; /* an ATAPI device ignores DRDY */ drive->ready_stat = 0; } if (ide_disks & (1 << i)) { drive->cyl = drive->bios_cyl = ide_disks_chs[i].cyl; drive->head = drive->bios_head = ide_disks_chs[i].head; drive->sect = drive->bios_sect = ide_disks_chs[i].sect; printk(KERN_INFO "ide: forcing %s as a disk (%d/%d/%d)\n", drive->name, drive->cyl, drive->head, drive->sect); drive->dev_flags |= IDE_DFLAG_FORCED_GEOM | IDE_DFLAG_PRESENT; drive->media = ide_disk; drive->ready_stat = ATA_DRDY; } } static unsigned int ide_ignore_cable; static int ide_set_ignore_cable(const char *s, struct kernel_param *kp) { int i, j = 1; /* controller (ignore) */ /* controller : 1 (ignore) | 0 (use) */ if (sscanf(s, "%d:%d", &i, &j) != 2 && sscanf(s, "%d", &i) != 1) return -EINVAL; if (i >= MAX_HWIFS || j < 0 || j > 1) return -EINVAL; if (j) ide_ignore_cable |= (1 << i); else ide_ignore_cable &= ~(1 << i); return 0; } module_param_call(ignore_cable, ide_set_ignore_cable, NULL, NULL, 0); MODULE_PARM_DESC(ignore_cable, "ignore cable detection"); void ide_port_apply_params(ide_hwif_t *hwif) { ide_drive_t *drive; int i; if (ide_ignore_cable & (1 << hwif->index)) { printk(KERN_INFO "ide: ignoring cable detection for %s\n", hwif->name); hwif->cbl = ATA_CBL_PATA40_SHORT; } ide_port_for_each_dev(i, drive, hwif) ide_dev_apply_params(drive, i); } /* * This is gets invoked once during initialization, to set *everything* up */ static int __init ide_init(void) { int ret; printk(KERN_INFO "Uniform Multi-Platform E-IDE driver\n"); ret = bus_register(&ide_bus_type); if (ret < 0) { printk(KERN_WARNING "IDE: bus_register error: %d\n", ret); return ret; } ide_port_class = class_create(THIS_MODULE, "ide_port"); if (IS_ERR(ide_port_class)) { ret = PTR_ERR(ide_port_class); goto out_port_class; } ide_acpi_init(); proc_ide_create(); return 0; out_port_class: bus_unregister(&ide_bus_type); return ret; } static void __exit ide_exit(void) { proc_ide_destroy(); class_destroy(ide_port_class); bus_unregister(&ide_bus_type); } module_init(ide_init); module_exit(ide_exit); MODULE_LICENSE("GPL");
gpl-2.0
Team-Blackout/Rezound_ION
drivers/media/video/ivtv/ivtv-routing.c
14175
3640
/* Audio/video-routing-related ivtv functions. Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "ivtv-driver.h" #include "ivtv-i2c.h" #include "ivtv-cards.h" #include "ivtv-gpio.h" #include "ivtv-routing.h" #include <media/msp3400.h> #include <media/m52790.h> #include <media/upd64031a.h> #include <media/upd64083.h> /* Selects the audio input and output according to the current settings. */ void ivtv_audio_set_io(struct ivtv *itv) { const struct ivtv_card_audio_input *in; u32 input, output = 0; /* Determine which input to use */ if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) in = &itv->card->radio_input; else in = &itv->card->audio_inputs[itv->audio_input]; /* handle muxer chips */ input = in->muxer_input; if (itv->card->hw_muxer & IVTV_HW_M52790) output = M52790_OUT_STEREO; v4l2_subdev_call(itv->sd_muxer, audio, s_routing, input, output, 0); input = in->audio_input; output = 0; if (itv->card->hw_audio & IVTV_HW_MSP34XX) output = MSP_OUTPUT(MSP_SC_IN_DSP_SCART1); ivtv_call_hw(itv, itv->card->hw_audio, audio, s_routing, input, output, 0); } /* Selects the video input and output according to the current settings. */ void ivtv_video_set_io(struct ivtv *itv) { int inp = itv->active_input; u32 input; u32 type; v4l2_subdev_call(itv->sd_video, video, s_routing, itv->card->video_inputs[inp].video_input, 0, 0); type = itv->card->video_inputs[inp].video_type; if (type == IVTV_CARD_INPUT_VID_TUNER) { input = 0; /* Tuner */ } else if (type < IVTV_CARD_INPUT_COMPOSITE1) { input = 2; /* S-Video */ } else { input = 1; /* Composite */ } if (itv->card->hw_video & IVTV_HW_GPIO) ivtv_call_hw(itv, IVTV_HW_GPIO, video, s_routing, input, 0, 0); if (itv->card->hw_video & IVTV_HW_UPD64031A) { if (type == IVTV_CARD_INPUT_VID_TUNER || type >= IVTV_CARD_INPUT_COMPOSITE1) { /* Composite: GR on, connect to 3DYCS */ input = UPD64031A_GR_ON | UPD64031A_3DYCS_COMPOSITE; } else { /* S-Video: GR bypassed, turn it off */ input = UPD64031A_GR_OFF | UPD64031A_3DYCS_DISABLE; } input |= itv->card->gr_config; ivtv_call_hw(itv, IVTV_HW_UPD64031A, video, s_routing, input, 0, 0); } if (itv->card->hw_video & IVTV_HW_UPD6408X) { input = UPD64083_YCS_MODE; if (type > IVTV_CARD_INPUT_VID_TUNER && type < IVTV_CARD_INPUT_COMPOSITE1) { /* S-Video uses YCNR mode and internal Y-ADC, the upd64031a is not used. */ input |= UPD64083_YCNR_MODE; } else if (itv->card->hw_video & IVTV_HW_UPD64031A) { /* Use upd64031a output for tuner and composite(CX23416GYC only) inputs */ if (type == IVTV_CARD_INPUT_VID_TUNER || itv->card->type == IVTV_CARD_CX23416GYC) { input |= UPD64083_EXT_Y_ADC; } } ivtv_call_hw(itv, IVTV_HW_UPD6408X, video, s_routing, input, 0, 0); } }
gpl-2.0
heechul/linux
drivers/net/wireless/ath/ath9k/ani.c
96
16378
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/kernel.h> #include <linux/export.h> #include "hw.h" #include "hw-ops.h" struct ani_ofdm_level_entry { int spur_immunity_level; int fir_step_level; int ofdm_weak_signal_on; }; /* values here are relative to the INI */ /* * Legend: * * SI: Spur immunity * FS: FIR Step * WS: OFDM / CCK Weak Signal detection * MRC-CCK: Maximal Ratio Combining for CCK */ static const struct ani_ofdm_level_entry ofdm_level_table[] = { /* SI FS WS */ { 0, 0, 1 }, /* lvl 0 */ { 1, 1, 1 }, /* lvl 1 */ { 2, 2, 1 }, /* lvl 2 */ { 3, 2, 1 }, /* lvl 3 (default) */ { 4, 3, 1 }, /* lvl 4 */ { 5, 4, 1 }, /* lvl 5 */ { 6, 5, 1 }, /* lvl 6 */ { 7, 6, 1 }, /* lvl 7 */ { 7, 6, 0 }, /* lvl 8 */ { 7, 7, 0 } /* lvl 9 */ }; #define ATH9K_ANI_OFDM_NUM_LEVEL \ ARRAY_SIZE(ofdm_level_table) #define ATH9K_ANI_OFDM_MAX_LEVEL \ (ATH9K_ANI_OFDM_NUM_LEVEL-1) #define ATH9K_ANI_OFDM_DEF_LEVEL \ 3 /* default level - matches the INI settings */ /* * MRC (Maximal Ratio Combining) has always been used with multi-antenna ofdm. * With OFDM for single stream you just add up all antenna inputs, you're * only interested in what you get after FFT. Signal aligment is also not * required for OFDM because any phase difference adds up in the frequency * domain. * * MRC requires extra work for use with CCK. You need to align the antenna * signals from the different antenna before you can add the signals together. * You need aligment of signals as CCK is in time domain, so addition can cancel * your signal completely if phase is 180 degrees (think of adding sine waves). * You also need to remove noise before the addition and this is where ANI * MRC CCK comes into play. One of the antenna inputs may be stronger but * lower SNR, so just adding after alignment can be dangerous. * * Regardless of alignment in time, the antenna signals add constructively after * FFT and improve your reception. For more information: * * http://en.wikipedia.org/wiki/Maximal-ratio_combining */ struct ani_cck_level_entry { int fir_step_level; int mrc_cck_on; }; static const struct ani_cck_level_entry cck_level_table[] = { /* FS MRC-CCK */ { 0, 1 }, /* lvl 0 */ { 1, 1 }, /* lvl 1 */ { 2, 1 }, /* lvl 2 (default) */ { 3, 1 }, /* lvl 3 */ { 4, 0 }, /* lvl 4 */ { 5, 0 }, /* lvl 5 */ { 6, 0 }, /* lvl 6 */ { 6, 0 }, /* lvl 7 (only for high rssi) */ { 7, 0 } /* lvl 8 (only for high rssi) */ }; #define ATH9K_ANI_CCK_NUM_LEVEL \ ARRAY_SIZE(cck_level_table) #define ATH9K_ANI_CCK_MAX_LEVEL \ (ATH9K_ANI_CCK_NUM_LEVEL-1) #define ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI \ (ATH9K_ANI_CCK_NUM_LEVEL-3) #define ATH9K_ANI_CCK_DEF_LEVEL \ 2 /* default level - matches the INI settings */ static void ath9k_hw_update_mibstats(struct ath_hw *ah, struct ath9k_mib_stats *stats) { stats->ackrcv_bad += REG_READ(ah, AR_ACK_FAIL); stats->rts_bad += REG_READ(ah, AR_RTS_FAIL); stats->fcs_bad += REG_READ(ah, AR_FCS_FAIL); stats->rts_good += REG_READ(ah, AR_RTS_OK); stats->beacons += REG_READ(ah, AR_BEACON_CNT); } static void ath9k_ani_restart(struct ath_hw *ah) { struct ar5416AniState *aniState; if (!DO_ANI(ah)) return; aniState = &ah->curchan->ani; aniState->listenTime = 0; ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_PHY_ERR_1, 0); REG_WRITE(ah, AR_PHY_ERR_2, 0); REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); REGWRITE_BUFFER_FLUSH(ah); ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); aniState->ofdmPhyErrCount = 0; aniState->cckPhyErrCount = 0; } /* Adjust the OFDM Noise Immunity Level */ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel, bool scan) { struct ar5416AniState *aniState = &ah->curchan->ani; struct ath_common *common = ath9k_hw_common(ah); const struct ani_ofdm_level_entry *entry_ofdm; const struct ani_cck_level_entry *entry_cck; bool weak_sig; ath_dbg(common, ANI, "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", aniState->ofdmNoiseImmunityLevel, immunityLevel, BEACON_RSSI(ah), aniState->rssiThrLow, aniState->rssiThrHigh); if (!scan) aniState->ofdmNoiseImmunityLevel = immunityLevel; entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel]; entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel]; if (aniState->spurImmunityLevel != entry_ofdm->spur_immunity_level) ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, entry_ofdm->spur_immunity_level); if (aniState->firstepLevel != entry_ofdm->fir_step_level && entry_ofdm->fir_step_level >= entry_cck->fir_step_level) ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, entry_ofdm->fir_step_level); weak_sig = entry_ofdm->ofdm_weak_signal_on; if (ah->opmode == NL80211_IFTYPE_STATION && BEACON_RSSI(ah) <= aniState->rssiThrHigh) weak_sig = true; if (aniState->ofdmWeakSigDetect != weak_sig) ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, entry_ofdm->ofdm_weak_signal_on); if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) { ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH; ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI; } else { ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI; ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW; } } static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah) { struct ar5416AniState *aniState; if (!DO_ANI(ah)) return; aniState = &ah->curchan->ani; if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL) ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1, false); } /* * Set the ANI settings to match an CCK level. */ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel, bool scan) { struct ar5416AniState *aniState = &ah->curchan->ani; struct ath_common *common = ath9k_hw_common(ah); const struct ani_ofdm_level_entry *entry_ofdm; const struct ani_cck_level_entry *entry_cck; ath_dbg(common, ANI, "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", aniState->cckNoiseImmunityLevel, immunityLevel, BEACON_RSSI(ah), aniState->rssiThrLow, aniState->rssiThrHigh); if (ah->opmode == NL80211_IFTYPE_STATION && BEACON_RSSI(ah) <= aniState->rssiThrLow && immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI) immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI; if (!scan) aniState->cckNoiseImmunityLevel = immunityLevel; entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel]; entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel]; if (aniState->firstepLevel != entry_cck->fir_step_level && entry_cck->fir_step_level >= entry_ofdm->fir_step_level) ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, entry_cck->fir_step_level); /* Skip MRC CCK for pre AR9003 families */ if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah)) return; if (aniState->mrcCCK != entry_cck->mrc_cck_on) ath9k_hw_ani_control(ah, ATH9K_ANI_MRC_CCK, entry_cck->mrc_cck_on); } static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah) { struct ar5416AniState *aniState; if (!DO_ANI(ah)) return; aniState = &ah->curchan->ani; if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL) ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1, false); } /* * only lower either OFDM or CCK errors per turn * we lower the other one next time */ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah) { struct ar5416AniState *aniState; aniState = &ah->curchan->ani; /* lower OFDM noise immunity */ if (aniState->ofdmNoiseImmunityLevel > 0 && (aniState->ofdmsTurn || aniState->cckNoiseImmunityLevel == 0)) { ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel - 1, false); return; } /* lower CCK noise immunity */ if (aniState->cckNoiseImmunityLevel > 0) ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel - 1, false); } /* * Restore the ANI parameters in the HAL and reset the statistics. * This routine should be called for every hardware reset and for * every channel change. */ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning) { struct ar5416AniState *aniState = &ah->curchan->ani; struct ath9k_channel *chan = ah->curchan; struct ath_common *common = ath9k_hw_common(ah); int ofdm_nil, cck_nil; if (!DO_ANI(ah)) return; BUG_ON(aniState == NULL); ah->stats.ast_ani_reset++; /* only allow a subset of functions in AP mode */ if (ah->opmode == NL80211_IFTYPE_AP) { if (IS_CHAN_2GHZ(chan)) { ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL | ATH9K_ANI_FIRSTEP_LEVEL); if (AR_SREV_9300_20_OR_LATER(ah)) ah->ani_function |= ATH9K_ANI_MRC_CCK; } else ah->ani_function = 0; } /* always allow mode (on/off) to be controlled */ ah->ani_function |= ATH9K_ANI_MODE; ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL, aniState->ofdmNoiseImmunityLevel); cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL, aniState->cckNoiseImmunityLevel); if (is_scanning || (ah->opmode != NL80211_IFTYPE_STATION && ah->opmode != NL80211_IFTYPE_ADHOC)) { /* * If we're scanning or in AP mode, the defaults (ini) * should be in place. For an AP we assume the historical * levels for this channel are probably outdated so start * from defaults instead. */ if (aniState->ofdmNoiseImmunityLevel != ATH9K_ANI_OFDM_DEF_LEVEL || aniState->cckNoiseImmunityLevel != ATH9K_ANI_CCK_DEF_LEVEL) { ath_dbg(common, ANI, "Restore defaults: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n", ah->opmode, chan->channel, chan->channelFlags, is_scanning, aniState->ofdmNoiseImmunityLevel, aniState->cckNoiseImmunityLevel); ofdm_nil = ATH9K_ANI_OFDM_DEF_LEVEL; cck_nil = ATH9K_ANI_CCK_DEF_LEVEL; } } else { /* * restore historical levels for this channel */ ath_dbg(common, ANI, "Restore history: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n", ah->opmode, chan->channel, chan->channelFlags, is_scanning, aniState->ofdmNoiseImmunityLevel, aniState->cckNoiseImmunityLevel); } ath9k_hw_set_ofdm_nil(ah, ofdm_nil, is_scanning); ath9k_hw_set_cck_nil(ah, cck_nil, is_scanning); /* * enable phy counters if hw supports or if not, enable phy * interrupts (so we can count each one) */ ath9k_ani_restart(ah); ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); REGWRITE_BUFFER_FLUSH(ah); } static bool ath9k_hw_ani_read_counters(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); struct ar5416AniState *aniState = &ah->curchan->ani; u32 phyCnt1, phyCnt2; int32_t listenTime; ath_hw_cycle_counters_update(common); listenTime = ath_hw_get_listen_time(common); if (listenTime <= 0) { ah->stats.ast_ani_lneg_or_lzero++; ath9k_ani_restart(ah); return false; } aniState->listenTime += listenTime; ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); phyCnt1 = REG_READ(ah, AR_PHY_ERR_1); phyCnt2 = REG_READ(ah, AR_PHY_ERR_2); ah->stats.ast_ani_ofdmerrs += phyCnt1 - aniState->ofdmPhyErrCount; aniState->ofdmPhyErrCount = phyCnt1; ah->stats.ast_ani_cckerrs += phyCnt2 - aniState->cckPhyErrCount; aniState->cckPhyErrCount = phyCnt2; return true; } void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan) { struct ar5416AniState *aniState; struct ath_common *common = ath9k_hw_common(ah); u32 ofdmPhyErrRate, cckPhyErrRate; if (!DO_ANI(ah)) return; aniState = &ah->curchan->ani; if (WARN_ON(!aniState)) return; if (!ath9k_hw_ani_read_counters(ah)) return; ofdmPhyErrRate = aniState->ofdmPhyErrCount * 1000 / aniState->listenTime; cckPhyErrRate = aniState->cckPhyErrCount * 1000 / aniState->listenTime; ath_dbg(common, ANI, "listenTime=%d OFDM:%d errs=%d/s CCK:%d errs=%d/s ofdm_turn=%d\n", aniState->listenTime, aniState->ofdmNoiseImmunityLevel, ofdmPhyErrRate, aniState->cckNoiseImmunityLevel, cckPhyErrRate, aniState->ofdmsTurn); if (aniState->listenTime > ah->aniperiod) { if (cckPhyErrRate < ah->config.cck_trig_low && ofdmPhyErrRate < ah->config.ofdm_trig_low) { ath9k_hw_ani_lower_immunity(ah); aniState->ofdmsTurn = !aniState->ofdmsTurn; } else if (ofdmPhyErrRate > ah->config.ofdm_trig_high) { ath9k_hw_ani_ofdm_err_trigger(ah); aniState->ofdmsTurn = false; } else if (cckPhyErrRate > ah->config.cck_trig_high) { ath9k_hw_ani_cck_err_trigger(ah); aniState->ofdmsTurn = true; } ath9k_ani_restart(ah); } } EXPORT_SYMBOL(ath9k_hw_ani_monitor); void ath9k_enable_mib_counters(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); ath_dbg(common, ANI, "Enable MIB counters\n"); ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_FILT_OFDM, 0); REG_WRITE(ah, AR_FILT_CCK, 0); REG_WRITE(ah, AR_MIBC, ~(AR_MIBC_COW | AR_MIBC_FMC | AR_MIBC_CMC | AR_MIBC_MCS) & 0x0f); REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); REGWRITE_BUFFER_FLUSH(ah); } /* Freeze the MIB counters, get the stats and then clear them */ void ath9k_hw_disable_mib_counters(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); ath_dbg(common, ANI, "Disable MIB counters\n"); REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC); ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); REG_WRITE(ah, AR_MIBC, AR_MIBC_CMC); REG_WRITE(ah, AR_FILT_OFDM, 0); REG_WRITE(ah, AR_FILT_CCK, 0); } EXPORT_SYMBOL(ath9k_hw_disable_mib_counters); void ath9k_hw_ani_setup(struct ath_hw *ah) { int i; static const int totalSizeDesired[] = { -55, -55, -55, -55, -62 }; static const int coarseHigh[] = { -14, -14, -14, -14, -12 }; static const int coarseLow[] = { -64, -64, -64, -64, -70 }; static const int firpwr[] = { -78, -78, -78, -78, -80 }; for (i = 0; i < 5; i++) { ah->totalSizeDesired[i] = totalSizeDesired[i]; ah->coarse_high[i] = coarseHigh[i]; ah->coarse_low[i] = coarseLow[i]; ah->firpwr[i] = firpwr[i]; } } void ath9k_hw_ani_init(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); int i; ath_dbg(common, ANI, "Initialize ANI\n"); ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH; ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW; ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH; ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW; for (i = 0; i < ARRAY_SIZE(ah->channels); i++) { struct ath9k_channel *chan = &ah->channels[i]; struct ar5416AniState *ani = &chan->ani; ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL; ani->mrcCCK = AR_SREV_9300_20_OR_LATER(ah) ? true : false; ani->ofdmsTurn = true; ani->rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH; ani->rssiThrLow = ATH9K_ANI_RSSI_THR_LOW; ani->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG; ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL; ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL; } /* * since we expect some ongoing maintenance on the tables, let's sanity * check here default level should not modify INI setting. */ ah->aniperiod = ATH9K_ANI_PERIOD; ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL; if (ah->config.enable_ani) ah->proc_phyerr |= HAL_PROCESS_ANI; ath9k_ani_restart(ah); ath9k_enable_mib_counters(ah); }
gpl-2.0
VanirAOSP/kernel_motorola_msm8226
block/bfq-sched.c
96
34332
/* * BFQ: Hierarchical B-WF2Q+ scheduler. * * Based on ideas and code from CFQ: * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> * * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> * Paolo Valente <paolo.valente@unimore.it> * * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it> */ #ifdef CONFIG_CGROUP_BFQIO #define for_each_entity(entity) \ for (; entity != NULL; entity = entity->parent) #define for_each_entity_safe(entity, parent) \ for (; entity && ({ parent = entity->parent; 1; }); entity = parent) static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, int extract, struct bfq_data *bfqd); static inline void bfq_update_budget(struct bfq_entity *next_active) { struct bfq_entity *bfqg_entity; struct bfq_group *bfqg; struct bfq_sched_data *group_sd; BUG_ON(next_active == NULL); group_sd = next_active->sched_data; bfqg = container_of(group_sd, struct bfq_group, sched_data); /* * bfq_group's my_entity field is not NULL only if the group * is not the root group. We must not touch the root entity * as it must never become an active entity. */ bfqg_entity = bfqg->my_entity; if (bfqg_entity != NULL) bfqg_entity->budget = next_active->budget; } static int bfq_update_next_active(struct bfq_sched_data *sd) { struct bfq_entity *next_active; if (sd->active_entity != NULL) /* will update/requeue at the end of service */ return 0; /* * NOTE: this can be improved in many ways, such as returning * 1 (and thus propagating upwards the update) only when the * budget changes, or caching the bfqq that will be scheduled * next from this subtree. By now we worry more about * correctness than about performance... */ next_active = bfq_lookup_next_entity(sd, 0, NULL); sd->next_active = next_active; if (next_active != NULL) bfq_update_budget(next_active); return 1; } static inline void bfq_check_next_active(struct bfq_sched_data *sd, struct bfq_entity *entity) { BUG_ON(sd->next_active != entity); } #else #define for_each_entity(entity) \ for (; entity != NULL; entity = NULL) #define for_each_entity_safe(entity, parent) \ for (parent = NULL; entity != NULL; entity = parent) static inline int bfq_update_next_active(struct bfq_sched_data *sd) { return 0; } static inline void bfq_check_next_active(struct bfq_sched_data *sd, struct bfq_entity *entity) { } static inline void bfq_update_budget(struct bfq_entity *next_active) { } #endif /* * Shift for timestamp calculations. This actually limits the maximum * service allowed in one timestamp delta (small shift values increase it), * the maximum total weight that can be used for the queues in the system * (big shift values increase it), and the period of virtual time wraparounds. */ #define WFQ_SERVICE_SHIFT 22 /** * bfq_gt - compare two timestamps. * @a: first ts. * @b: second ts. * * Return @a > @b, dealing with wrapping correctly. */ static inline int bfq_gt(u64 a, u64 b) { return (s64)(a - b) > 0; } static inline struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity) { struct bfq_queue *bfqq = NULL; BUG_ON(entity == NULL); if (entity->my_sched_data == NULL) bfqq = container_of(entity, struct bfq_queue, entity); return bfqq; } /** * bfq_delta - map service into the virtual time domain. * @service: amount of service. * @weight: scale factor (weight of an entity or weight sum). */ static inline u64 bfq_delta(unsigned long service, unsigned long weight) { u64 d = (u64)service << WFQ_SERVICE_SHIFT; do_div(d, weight); return d; } /** * bfq_calc_finish - assign the finish time to an entity. * @entity: the entity to act upon. * @service: the service to be charged to the entity. */ static inline void bfq_calc_finish(struct bfq_entity *entity, unsigned long service) { struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); BUG_ON(entity->weight == 0); entity->finish = entity->start + bfq_delta(service, entity->weight); if (bfqq != NULL) { bfq_log_bfqq(bfqq->bfqd, bfqq, "calc_finish: serv %lu, w %d", service, entity->weight); bfq_log_bfqq(bfqq->bfqd, bfqq, "calc_finish: start %llu, finish %llu, delta %llu", entity->start, entity->finish, bfq_delta(service, entity->weight)); } } /** * bfq_entity_of - get an entity from a node. * @node: the node field of the entity. * * Convert a node pointer to the relative entity. This is used only * to simplify the logic of some functions and not as the generic * conversion mechanism because, e.g., in the tree walking functions, * the check for a %NULL value would be redundant. */ static inline struct bfq_entity *bfq_entity_of(struct rb_node *node) { struct bfq_entity *entity = NULL; if (node != NULL) entity = rb_entry(node, struct bfq_entity, rb_node); return entity; } /** * bfq_extract - remove an entity from a tree. * @root: the tree root. * @entity: the entity to remove. */ static inline void bfq_extract(struct rb_root *root, struct bfq_entity *entity) { BUG_ON(entity->tree != root); entity->tree = NULL; rb_erase(&entity->rb_node, root); } /** * bfq_idle_extract - extract an entity from the idle tree. * @st: the service tree of the owning @entity. * @entity: the entity being removed. */ static void bfq_idle_extract(struct bfq_service_tree *st, struct bfq_entity *entity) { struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); struct rb_node *next; BUG_ON(entity->tree != &st->idle); if (entity == st->first_idle) { next = rb_next(&entity->rb_node); st->first_idle = bfq_entity_of(next); } if (entity == st->last_idle) { next = rb_prev(&entity->rb_node); st->last_idle = bfq_entity_of(next); } bfq_extract(&st->idle, entity); if (bfqq != NULL) list_del(&bfqq->bfqq_list); } /** * bfq_insert - generic tree insertion. * @root: tree root. * @entity: entity to insert. * * This is used for the idle and the active tree, since they are both * ordered by finish time. */ static void bfq_insert(struct rb_root *root, struct bfq_entity *entity) { struct bfq_entity *entry; struct rb_node **node = &root->rb_node; struct rb_node *parent = NULL; BUG_ON(entity->tree != NULL); while (*node != NULL) { parent = *node; entry = rb_entry(parent, struct bfq_entity, rb_node); if (bfq_gt(entry->finish, entity->finish)) node = &parent->rb_left; else node = &parent->rb_right; } rb_link_node(&entity->rb_node, parent, node); rb_insert_color(&entity->rb_node, root); entity->tree = root; } /** * bfq_update_min - update the min_start field of a entity. * @entity: the entity to update. * @node: one of its children. * * This function is called when @entity may store an invalid value for * min_start due to updates to the active tree. The function assumes * that the subtree rooted at @node (which may be its left or its right * child) has a valid min_start value. */ static inline void bfq_update_min(struct bfq_entity *entity, struct rb_node *node) { struct bfq_entity *child; if (node != NULL) { child = rb_entry(node, struct bfq_entity, rb_node); if (bfq_gt(entity->min_start, child->min_start)) entity->min_start = child->min_start; } } /** * bfq_update_active_node - recalculate min_start. * @node: the node to update. * * @node may have changed position or one of its children may have moved, * this function updates its min_start value. The left and right subtrees * are assumed to hold a correct min_start value. */ static inline void bfq_update_active_node(struct rb_node *node) { struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node); entity->min_start = entity->start; bfq_update_min(entity, node->rb_right); bfq_update_min(entity, node->rb_left); } /** * bfq_update_active_tree - update min_start for the whole active tree. * @node: the starting node. * * @node must be the deepest modified node after an update. This function * updates its min_start using the values held by its children, assuming * that they did not change, and then updates all the nodes that may have * changed in the path to the root. The only nodes that may have changed * are the ones in the path or their siblings. */ static void bfq_update_active_tree(struct rb_node *node) { struct rb_node *parent; up: bfq_update_active_node(node); parent = rb_parent(node); if (parent == NULL) return; if (node == parent->rb_left && parent->rb_right != NULL) bfq_update_active_node(parent->rb_right); else if (parent->rb_left != NULL) bfq_update_active_node(parent->rb_left); node = parent; goto up; } /** * bfq_active_insert - insert an entity in the active tree of its group/device. * @st: the service tree of the entity. * @entity: the entity being inserted. * * The active tree is ordered by finish time, but an extra key is kept * per each node, containing the minimum value for the start times of * its children (and the node itself), so it's possible to search for * the eligible node with the lowest finish time in logarithmic time. */ static void bfq_active_insert(struct bfq_service_tree *st, struct bfq_entity *entity) { struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); struct rb_node *node = &entity->rb_node; bfq_insert(&st->active, entity); if (node->rb_left != NULL) node = node->rb_left; else if (node->rb_right != NULL) node = node->rb_right; bfq_update_active_tree(node); if (bfqq != NULL) list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list); } /** * bfq_ioprio_to_weight - calc a weight from an ioprio. * @ioprio: the ioprio value to convert. */ static unsigned short bfq_ioprio_to_weight(int ioprio) { WARN_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR); return IOPRIO_BE_NR - ioprio; } /** * bfq_weight_to_ioprio - calc an ioprio from a weight. * @weight: the weight value to convert. * * To preserve as mush as possible the old only-ioprio user interface, * 0 is used as an escape ioprio value for weights (numerically) equal or * larger than IOPRIO_BE_NR */ static unsigned short bfq_weight_to_ioprio(int weight) { WARN_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT); return IOPRIO_BE_NR - weight < 0 ? 0 : IOPRIO_BE_NR - weight; } static inline void bfq_get_entity(struct bfq_entity *entity) { struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); struct bfq_sched_data *sd; if (bfqq != NULL) { sd = entity->sched_data; atomic_inc(&bfqq->ref); bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", bfqq, atomic_read(&bfqq->ref)); } } /** * bfq_find_deepest - find the deepest node that an extraction can modify. * @node: the node being removed. * * Do the first step of an extraction in an rb tree, looking for the * node that will replace @node, and returning the deepest node that * the following modifications to the tree can touch. If @node is the * last node in the tree return %NULL. */ static struct rb_node *bfq_find_deepest(struct rb_node *node) { struct rb_node *deepest; if (node->rb_right == NULL && node->rb_left == NULL) deepest = rb_parent(node); else if (node->rb_right == NULL) deepest = node->rb_left; else if (node->rb_left == NULL) deepest = node->rb_right; else { deepest = rb_next(node); if (deepest->rb_right != NULL) deepest = deepest->rb_right; else if (rb_parent(deepest) != node) deepest = rb_parent(deepest); } return deepest; } /** * bfq_active_extract - remove an entity from the active tree. * @st: the service_tree containing the tree. * @entity: the entity being removed. */ static void bfq_active_extract(struct bfq_service_tree *st, struct bfq_entity *entity) { struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); struct rb_node *node; node = bfq_find_deepest(&entity->rb_node); bfq_extract(&st->active, entity); if (node != NULL) bfq_update_active_tree(node); if (bfqq != NULL) list_del(&bfqq->bfqq_list); } /** * bfq_idle_insert - insert an entity into the idle tree. * @st: the service tree containing the tree. * @entity: the entity to insert. */ static void bfq_idle_insert(struct bfq_service_tree *st, struct bfq_entity *entity) { struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); struct bfq_entity *first_idle = st->first_idle; struct bfq_entity *last_idle = st->last_idle; if (first_idle == NULL || bfq_gt(first_idle->finish, entity->finish)) st->first_idle = entity; if (last_idle == NULL || bfq_gt(entity->finish, last_idle->finish)) st->last_idle = entity; bfq_insert(&st->idle, entity); if (bfqq != NULL) list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list); } /** * bfq_forget_entity - remove an entity from the wfq trees. * @st: the service tree. * @entity: the entity being removed. * * Update the device status and forget everything about @entity, putting * the device reference to it, if it is a queue. Entities belonging to * groups are not refcounted. */ static void bfq_forget_entity(struct bfq_service_tree *st, struct bfq_entity *entity) { struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); struct bfq_sched_data *sd; BUG_ON(!entity->on_st); entity->on_st = 0; st->wsum -= entity->weight; if (bfqq != NULL) { sd = entity->sched_data; bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d", bfqq, atomic_read(&bfqq->ref)); bfq_put_queue(bfqq); } } /** * bfq_put_idle_entity - release the idle tree ref of an entity. * @st: service tree for the entity. * @entity: the entity being released. */ static void bfq_put_idle_entity(struct bfq_service_tree *st, struct bfq_entity *entity) { bfq_idle_extract(st, entity); bfq_forget_entity(st, entity); } /** * bfq_forget_idle - update the idle tree if necessary. * @st: the service tree to act upon. * * To preserve the global O(log N) complexity we only remove one entry here; * as the idle tree will not grow indefinitely this can be done safely. */ static void bfq_forget_idle(struct bfq_service_tree *st) { struct bfq_entity *first_idle = st->first_idle; struct bfq_entity *last_idle = st->last_idle; if (RB_EMPTY_ROOT(&st->active) && last_idle != NULL && !bfq_gt(last_idle->finish, st->vtime)) { /* * Forget the whole idle tree, increasing the vtime past * the last finish time of idle entities. */ st->vtime = last_idle->finish; } if (first_idle != NULL && !bfq_gt(first_idle->finish, st->vtime)) bfq_put_idle_entity(st, first_idle); } static struct bfq_service_tree * __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, struct bfq_entity *entity) { struct bfq_service_tree *new_st = old_st; if (entity->ioprio_changed) { struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); BUG_ON(old_st->wsum < entity->weight); old_st->wsum -= entity->weight; if (entity->new_weight != entity->orig_weight) { entity->orig_weight = entity->new_weight; entity->ioprio = bfq_weight_to_ioprio(entity->orig_weight); } else if (entity->new_ioprio != entity->ioprio) { entity->ioprio = entity->new_ioprio; entity->orig_weight = bfq_ioprio_to_weight(entity->ioprio); } else entity->new_weight = entity->orig_weight = bfq_ioprio_to_weight(entity->ioprio); entity->ioprio_class = entity->new_ioprio_class; entity->ioprio_changed = 0; /* * NOTE: here we may be changing the weight too early, * this will cause unfairness. The correct approach * would have required additional complexity to defer * weight changes to the proper time instants (i.e., * when entity->finish <= old_st->vtime). */ new_st = bfq_entity_service_tree(entity); entity->weight = entity->orig_weight * (bfqq != NULL ? bfqq->raising_coeff : 1); new_st->wsum += entity->weight; if (new_st != old_st) entity->start = new_st->vtime; } return new_st; } /** * bfq_bfqq_served - update the scheduler status after selection for service. * @bfqq: the queue being served. * @served: bytes to transfer. * * NOTE: this can be optimized, as the timestamps of upper level entities * are synchronized every time a new bfqq is selected for service. By now, * we keep it to better check consistency. */ static void bfq_bfqq_served(struct bfq_queue *bfqq, unsigned long served) { struct bfq_entity *entity = &bfqq->entity; struct bfq_service_tree *st; for_each_entity(entity) { st = bfq_entity_service_tree(entity); entity->service += served; BUG_ON(entity->service > entity->budget); BUG_ON(st->wsum == 0); st->vtime += bfq_delta(served, st->wsum); bfq_forget_idle(st); } bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %lu secs", served); } /** * bfq_bfqq_charge_full_budget - set the service to the entity budget. * @bfqq: the queue that needs a service update. * * When it's not possible to be fair in the service domain, because * a queue is not consuming its budget fast enough (the meaning of * fast depends on the timeout parameter), we charge it a full * budget. In this way we should obtain a sort of time-domain * fairness among all the seeky/slow queues. */ static inline void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq) { struct bfq_entity *entity = &bfqq->entity; bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget"); bfq_bfqq_served(bfqq, entity->budget - entity->service); } /** * __bfq_activate_entity - activate an entity. * @entity: the entity being activated. * * Called whenever an entity is activated, i.e., it is not active and one * of its children receives a new request, or has to be reactivated due to * budget exhaustion. It uses the current budget of the entity (and the * service received if @entity is active) of the queue to calculate its * timestamps. */ static void __bfq_activate_entity(struct bfq_entity *entity) { struct bfq_sched_data *sd = entity->sched_data; struct bfq_service_tree *st = bfq_entity_service_tree(entity); if (entity == sd->active_entity) { BUG_ON(entity->tree != NULL); /* * If we are requeueing the current entity we have * to take care of not charging to it service it has * not received. */ bfq_calc_finish(entity, entity->service); entity->start = entity->finish; sd->active_entity = NULL; } else if (entity->tree == &st->active) { /* * Requeueing an entity due to a change of some * next_active entity below it. We reuse the old * start time. */ bfq_active_extract(st, entity); } else if (entity->tree == &st->idle) { /* * Must be on the idle tree, bfq_idle_extract() will * check for that. */ bfq_idle_extract(st, entity); entity->start = bfq_gt(st->vtime, entity->finish) ? st->vtime : entity->finish; } else { /* * The finish time of the entity may be invalid, and * it is in the past for sure, otherwise the queue * would have been on the idle tree. */ entity->start = st->vtime; st->wsum += entity->weight; bfq_get_entity(entity); BUG_ON(entity->on_st); entity->on_st = 1; } st = __bfq_entity_update_weight_prio(st, entity); bfq_calc_finish(entity, entity->budget); bfq_active_insert(st, entity); } /** * bfq_activate_entity - activate an entity and its ancestors if necessary. * @entity: the entity to activate. * * Activate @entity and all the entities on the path from it to the root. */ static void bfq_activate_entity(struct bfq_entity *entity) { struct bfq_sched_data *sd; for_each_entity(entity) { __bfq_activate_entity(entity); sd = entity->sched_data; if (!bfq_update_next_active(sd)) /* * No need to propagate the activation to the * upper entities, as they will be updated when * the active entity is rescheduled. */ break; } } /** * __bfq_deactivate_entity - deactivate an entity from its service tree. * @entity: the entity to deactivate. * @requeue: if false, the entity will not be put into the idle tree. * * Deactivate an entity, independently from its previous state. If the * entity was not on a service tree just return, otherwise if it is on * any scheduler tree, extract it from that tree, and if necessary * and if the caller did not specify @requeue, put it on the idle tree. * * Return %1 if the caller should update the entity hierarchy, i.e., * if the entity was under service or if it was the next_active for * its sched_data; return %0 otherwise. */ static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue) { struct bfq_sched_data *sd = entity->sched_data; struct bfq_service_tree *st = bfq_entity_service_tree(entity); int was_active = entity == sd->active_entity; int ret = 0; if (!entity->on_st) return 0; BUG_ON(was_active && entity->tree != NULL); if (was_active) { bfq_calc_finish(entity, entity->service); sd->active_entity = NULL; } else if (entity->tree == &st->active) bfq_active_extract(st, entity); else if (entity->tree == &st->idle) bfq_idle_extract(st, entity); else if (entity->tree != NULL) BUG(); if (was_active || sd->next_active == entity) ret = bfq_update_next_active(sd); if (!requeue || !bfq_gt(entity->finish, st->vtime)) bfq_forget_entity(st, entity); else bfq_idle_insert(st, entity); BUG_ON(sd->active_entity == entity); BUG_ON(sd->next_active == entity); return ret; } /** * bfq_deactivate_entity - deactivate an entity. * @entity: the entity to deactivate. * @requeue: true if the entity can be put on the idle tree */ static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue) { struct bfq_sched_data *sd; struct bfq_entity *parent; for_each_entity_safe(entity, parent) { sd = entity->sched_data; if (!__bfq_deactivate_entity(entity, requeue)) /* * The parent entity is still backlogged, and * we don't need to update it as it is still * under service. */ break; if (sd->next_active != NULL) /* * The parent entity is still backlogged and * the budgets on the path towards the root * need to be updated. */ goto update; /* * If we reach there the parent is no more backlogged and * we want to propagate the dequeue upwards. */ requeue = 1; } return; update: entity = parent; for_each_entity(entity) { __bfq_activate_entity(entity); sd = entity->sched_data; if (!bfq_update_next_active(sd)) break; } } /** * bfq_update_vtime - update vtime if necessary. * @st: the service tree to act upon. * * If necessary update the service tree vtime to have at least one * eligible entity, skipping to its start time. Assumes that the * active tree of the device is not empty. * * NOTE: this hierarchical implementation updates vtimes quite often, * we may end up with reactivated tasks getting timestamps after a * vtime skip done because we needed a ->first_active entity on some * intermediate node. */ static void bfq_update_vtime(struct bfq_service_tree *st) { struct bfq_entity *entry; struct rb_node *node = st->active.rb_node; entry = rb_entry(node, struct bfq_entity, rb_node); if (bfq_gt(entry->min_start, st->vtime)) { st->vtime = entry->min_start; bfq_forget_idle(st); } } /** * bfq_first_active - find the eligible entity with the smallest finish time * @st: the service tree to select from. * * This function searches the first schedulable entity, starting from the * root of the tree and going on the left every time on this side there is * a subtree with at least one eligible (start >= vtime) entity. The path * on the right is followed only if a) the left subtree contains no eligible * entities and b) no eligible entity has been found yet. */ static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st) { struct bfq_entity *entry, *first = NULL; struct rb_node *node = st->active.rb_node; while (node != NULL) { entry = rb_entry(node, struct bfq_entity, rb_node); left: if (!bfq_gt(entry->start, st->vtime)) first = entry; BUG_ON(bfq_gt(entry->min_start, st->vtime)); if (node->rb_left != NULL) { entry = rb_entry(node->rb_left, struct bfq_entity, rb_node); if (!bfq_gt(entry->min_start, st->vtime)) { node = node->rb_left; goto left; } } if (first != NULL) break; node = node->rb_right; } BUG_ON(first == NULL && !RB_EMPTY_ROOT(&st->active)); return first; } /** * __bfq_lookup_next_entity - return the first eligible entity in @st. * @st: the service tree. * * Update the virtual time in @st and return the first eligible entity * it contains. */ static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st, bool force) { struct bfq_entity *entity, *new_next_active = NULL; if (RB_EMPTY_ROOT(&st->active)) return NULL; bfq_update_vtime(st); entity = bfq_first_active_entity(st); BUG_ON(bfq_gt(entity->start, st->vtime)); /* * If the chosen entity does not match with the sched_data's * next_active and we are forcedly serving the IDLE priority * class tree, bubble up budget update. */ if (unlikely(force && entity != entity->sched_data->next_active)) { new_next_active = entity; for_each_entity(new_next_active) bfq_update_budget(new_next_active); } return entity; } /** * bfq_lookup_next_entity - return the first eligible entity in @sd. * @sd: the sched_data. * @extract: if true the returned entity will be also extracted from @sd. * * NOTE: since we cache the next_active entity at each level of the * hierarchy, the complexity of the lookup can be decreased with * absolutely no effort just returning the cached next_active value; * we prefer to do full lookups to test the consistency of * the data * structures. */ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, int extract, struct bfq_data *bfqd) { struct bfq_service_tree *st = sd->service_tree; struct bfq_entity *entity; int i=0; BUG_ON(sd->active_entity != NULL); if (bfqd != NULL && jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) { entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1, true); if (entity != NULL) { i = BFQ_IOPRIO_CLASSES - 1; bfqd->bfq_class_idle_last_service = jiffies; sd->next_active = entity; } } for (; i < BFQ_IOPRIO_CLASSES; i++) { entity = __bfq_lookup_next_entity(st + i, false); if (entity != NULL) { if (extract) { bfq_check_next_active(sd, entity); bfq_active_extract(st + i, entity); sd->active_entity = entity; sd->next_active = NULL; } break; } } return entity; } /* * Get next queue for service. */ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) { struct bfq_entity *entity = NULL; struct bfq_sched_data *sd; struct bfq_queue *bfqq; BUG_ON(bfqd->active_queue != NULL); if (bfqd->busy_queues == 0) return NULL; sd = &bfqd->root_group->sched_data; for (; sd != NULL; sd = entity->my_sched_data) { entity = bfq_lookup_next_entity(sd, 1, bfqd); BUG_ON(entity == NULL); entity->service = 0; } bfqq = bfq_entity_to_bfqq(entity); BUG_ON(bfqq == NULL); return bfqq; } static void __bfq_bfqd_reset_active(struct bfq_data *bfqd) { if (bfqd->active_bic != NULL) { put_io_context(bfqd->active_bic->icq.ioc); bfqd->active_bic = NULL; } bfqd->active_queue = NULL; del_timer(&bfqd->idle_slice_timer); } static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, int requeue) { struct bfq_entity *entity = &bfqq->entity; if (bfqq == bfqd->active_queue) __bfq_bfqd_reset_active(bfqd); bfq_deactivate_entity(entity, requeue); } static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) { struct bfq_entity *entity = &bfqq->entity; bfq_activate_entity(entity); } /* * Called when the bfqq no longer has requests pending, remove it from * the service tree. */ static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, int requeue) { BUG_ON(!bfq_bfqq_busy(bfqq)); BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); bfq_log_bfqq(bfqd, bfqq, "del from busy"); bfq_clear_bfqq_busy(bfqq); BUG_ON(bfqd->busy_queues == 0); bfqd->busy_queues--; bfq_deactivate_bfqq(bfqd, bfqq, requeue); } /* * Called when an inactive queue receives a new request. */ static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) { BUG_ON(bfq_bfqq_busy(bfqq)); BUG_ON(bfqq == bfqd->active_queue); bfq_log_bfqq(bfqd, bfqq, "add to busy"); bfq_activate_bfqq(bfqd, bfqq); bfq_mark_bfqq_busy(bfqq); bfqd->busy_queues++; }
gpl-2.0
sumitn/linux-2.6.24
arch/sparc64/kernel/visemul.c
96
19895
/* visemul.c: Emulation of VIS instructions. * * Copyright (C) 2006 David S. Miller (davem@davemloft.net) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/thread_info.h> #include <asm/ptrace.h> #include <asm/pstate.h> #include <asm/system.h> #include <asm/fpumacro.h> #include <asm/uaccess.h> /* OPF field of various VIS instructions. */ /* 000111011 - four 16-bit packs */ #define FPACK16_OPF 0x03b /* 000111010 - two 32-bit packs */ #define FPACK32_OPF 0x03a /* 000111101 - four 16-bit packs */ #define FPACKFIX_OPF 0x03d /* 001001101 - four 16-bit expands */ #define FEXPAND_OPF 0x04d /* 001001011 - two 32-bit merges */ #define FPMERGE_OPF 0x04b /* 000110001 - 8-by-16-bit partitoned product */ #define FMUL8x16_OPF 0x031 /* 000110011 - 8-by-16-bit upper alpha partitioned product */ #define FMUL8x16AU_OPF 0x033 /* 000110101 - 8-by-16-bit lower alpha partitioned product */ #define FMUL8x16AL_OPF 0x035 /* 000110110 - upper 8-by-16-bit partitioned product */ #define FMUL8SUx16_OPF 0x036 /* 000110111 - lower 8-by-16-bit partitioned product */ #define FMUL8ULx16_OPF 0x037 /* 000111000 - upper 8-by-16-bit partitioned product */ #define FMULD8SUx16_OPF 0x038 /* 000111001 - lower unsigned 8-by-16-bit partitioned product */ #define FMULD8ULx16_OPF 0x039 /* 000101000 - four 16-bit compare; set rd if src1 > src2 */ #define FCMPGT16_OPF 0x028 /* 000101100 - two 32-bit compare; set rd if src1 > src2 */ #define FCMPGT32_OPF 0x02c /* 000100000 - four 16-bit compare; set rd if src1 <= src2 */ #define FCMPLE16_OPF 0x020 /* 000100100 - two 32-bit compare; set rd if src1 <= src2 */ #define FCMPLE32_OPF 0x024 /* 000100010 - four 16-bit compare; set rd if src1 != src2 */ #define FCMPNE16_OPF 0x022 /* 000100110 - two 32-bit compare; set rd if src1 != src2 */ #define FCMPNE32_OPF 0x026 /* 000101010 - four 16-bit compare; set rd if src1 == src2 */ #define FCMPEQ16_OPF 0x02a /* 000101110 - two 32-bit compare; set rd if src1 == src2 */ #define FCMPEQ32_OPF 0x02e /* 000000000 - Eight 8-bit edge boundary processing */ #define EDGE8_OPF 0x000 /* 000000001 - Eight 8-bit edge boundary processing, no CC */ #define EDGE8N_OPF 0x001 /* 000000010 - Eight 8-bit edge boundary processing, little-endian */ #define EDGE8L_OPF 0x002 /* 000000011 - Eight 8-bit edge boundary processing, little-endian, no CC */ #define EDGE8LN_OPF 0x003 /* 000000100 - Four 16-bit edge boundary processing */ #define EDGE16_OPF 0x004 /* 000000101 - Four 16-bit edge boundary processing, no CC */ #define EDGE16N_OPF 0x005 /* 000000110 - Four 16-bit edge boundary processing, little-endian */ #define EDGE16L_OPF 0x006 /* 000000111 - Four 16-bit edge boundary processing, little-endian, no CC */ #define EDGE16LN_OPF 0x007 /* 000001000 - Two 32-bit edge boundary processing */ #define EDGE32_OPF 0x008 /* 000001001 - Two 32-bit edge boundary processing, no CC */ #define EDGE32N_OPF 0x009 /* 000001010 - Two 32-bit edge boundary processing, little-endian */ #define EDGE32L_OPF 0x00a /* 000001011 - Two 32-bit edge boundary processing, little-endian, no CC */ #define EDGE32LN_OPF 0x00b /* 000111110 - distance between 8 8-bit components */ #define PDIST_OPF 0x03e /* 000010000 - convert 8-bit 3-D address to blocked byte address */ #define ARRAY8_OPF 0x010 /* 000010010 - convert 16-bit 3-D address to blocked byte address */ #define ARRAY16_OPF 0x012 /* 000010100 - convert 32-bit 3-D address to blocked byte address */ #define ARRAY32_OPF 0x014 /* 000011001 - Set the GSR.MASK field in preparation for a BSHUFFLE */ #define BMASK_OPF 0x019 /* 001001100 - Permute bytes as specified by GSR.MASK */ #define BSHUFFLE_OPF 0x04c #define VIS_OPF_SHIFT 5 #define VIS_OPF_MASK (0x1ff << VIS_OPF_SHIFT) #define RS1(INSN) (((INSN) >> 24) & 0x1f) #define RS2(INSN) (((INSN) >> 0) & 0x1f) #define RD(INSN) (((INSN) >> 25) & 0x1f) static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, unsigned int rd, int from_kernel) { if (rs2 >= 16 || rs1 >= 16 || rd >= 16) { if (from_kernel != 0) __asm__ __volatile__("flushw"); else flushw_user(); } } static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) { unsigned long value; if (reg < 16) return (!reg ? 0 : regs->u_regs[reg]); if (regs->tstate & TSTATE_PRIV) { struct reg_window *win; win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); value = win->locals[reg - 16]; } else if (test_thread_flag(TIF_32BIT)) { struct reg_window32 __user *win32; win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); get_user(value, &win32->locals[reg - 16]); } else { struct reg_window __user *win; win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); get_user(value, &win->locals[reg - 16]); } return value; } static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg, struct pt_regs *regs) { BUG_ON(reg < 16); BUG_ON(regs->tstate & TSTATE_PRIV); if (test_thread_flag(TIF_32BIT)) { struct reg_window32 __user *win32; win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); return (unsigned long __user *)&win32->locals[reg - 16]; } else { struct reg_window __user *win; win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); return &win->locals[reg - 16]; } } static inline unsigned long *__fetch_reg_addr_kern(unsigned int reg, struct pt_regs *regs) { BUG_ON(reg >= 16); BUG_ON(regs->tstate & TSTATE_PRIV); return &regs->u_regs[reg]; } static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd) { if (rd < 16) { unsigned long *rd_kern = __fetch_reg_addr_kern(rd, regs); *rd_kern = val; } else { unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs); if (test_thread_flag(TIF_32BIT)) __put_user((u32)val, (u32 __user *)rd_user); else __put_user(val, rd_user); } } static inline unsigned long fpd_regval(struct fpustate *f, unsigned int insn_regnum) { insn_regnum = (((insn_regnum & 1) << 5) | (insn_regnum & 0x1e)); return *(unsigned long *) &f->regs[insn_regnum]; } static inline unsigned long *fpd_regaddr(struct fpustate *f, unsigned int insn_regnum) { insn_regnum = (((insn_regnum & 1) << 5) | (insn_regnum & 0x1e)); return (unsigned long *) &f->regs[insn_regnum]; } static inline unsigned int fps_regval(struct fpustate *f, unsigned int insn_regnum) { return f->regs[insn_regnum]; } static inline unsigned int *fps_regaddr(struct fpustate *f, unsigned int insn_regnum) { return &f->regs[insn_regnum]; } struct edge_tab { u16 left, right; }; struct edge_tab edge8_tab[8] = { { 0xff, 0x80 }, { 0x7f, 0xc0 }, { 0x3f, 0xe0 }, { 0x1f, 0xf0 }, { 0x0f, 0xf8 }, { 0x07, 0xfc }, { 0x03, 0xfe }, { 0x01, 0xff }, }; struct edge_tab edge8_tab_l[8] = { { 0xff, 0x01 }, { 0xfe, 0x03 }, { 0xfc, 0x07 }, { 0xf8, 0x0f }, { 0xf0, 0x1f }, { 0xe0, 0x3f }, { 0xc0, 0x7f }, { 0x80, 0xff }, }; struct edge_tab edge16_tab[4] = { { 0xf, 0x8 }, { 0x7, 0xc }, { 0x3, 0xe }, { 0x1, 0xf }, }; struct edge_tab edge16_tab_l[4] = { { 0xf, 0x1 }, { 0xe, 0x3 }, { 0xc, 0x7 }, { 0x8, 0xf }, }; struct edge_tab edge32_tab[2] = { { 0x3, 0x2 }, { 0x1, 0x3 }, }; struct edge_tab edge32_tab_l[2] = { { 0x3, 0x1 }, { 0x2, 0x3 }, }; static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf) { unsigned long orig_rs1, rs1, orig_rs2, rs2, rd_val; u16 left, right; maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); orig_rs1 = rs1 = fetch_reg(RS1(insn), regs); orig_rs2 = rs2 = fetch_reg(RS2(insn), regs); if (test_thread_flag(TIF_32BIT)) { rs1 = rs1 & 0xffffffff; rs2 = rs2 & 0xffffffff; } switch (opf) { default: case EDGE8_OPF: case EDGE8N_OPF: left = edge8_tab[rs1 & 0x7].left; right = edge8_tab[rs2 & 0x7].right; break; case EDGE8L_OPF: case EDGE8LN_OPF: left = edge8_tab_l[rs1 & 0x7].left; right = edge8_tab_l[rs2 & 0x7].right; break; case EDGE16_OPF: case EDGE16N_OPF: left = edge16_tab[(rs1 >> 1) & 0x3].left; right = edge16_tab[(rs2 >> 1) & 0x3].right; break; case EDGE16L_OPF: case EDGE16LN_OPF: left = edge16_tab_l[(rs1 >> 1) & 0x3].left; right = edge16_tab_l[(rs2 >> 1) & 0x3].right; break; case EDGE32_OPF: case EDGE32N_OPF: left = edge32_tab[(rs1 >> 2) & 0x1].left; right = edge32_tab[(rs2 >> 2) & 0x1].right; break; case EDGE32L_OPF: case EDGE32LN_OPF: left = edge32_tab_l[(rs1 >> 2) & 0x1].left; right = edge32_tab_l[(rs2 >> 2) & 0x1].right; break; }; if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL)) rd_val = right & left; else rd_val = left; store_reg(regs, rd_val, RD(insn)); switch (opf) { case EDGE8_OPF: case EDGE8L_OPF: case EDGE16_OPF: case EDGE16L_OPF: case EDGE32_OPF: case EDGE32L_OPF: { unsigned long ccr, tstate; __asm__ __volatile__("subcc %1, %2, %%g0\n\t" "rd %%ccr, %0" : "=r" (ccr) : "r" (orig_rs1), "r" (orig_rs2) : "cc"); tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC); regs->tstate = tstate | (ccr << 32UL); } }; } static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf) { unsigned long rs1, rs2, rd_val; unsigned int bits, bits_mask; maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); rs1 = fetch_reg(RS1(insn), regs); rs2 = fetch_reg(RS2(insn), regs); bits = (rs2 > 5 ? 5 : rs2); bits_mask = (1UL << bits) - 1UL; rd_val = ((((rs1 >> 11) & 0x3) << 0) | (((rs1 >> 33) & 0x3) << 2) | (((rs1 >> 55) & 0x1) << 4) | (((rs1 >> 13) & 0xf) << 5) | (((rs1 >> 35) & 0xf) << 9) | (((rs1 >> 56) & 0xf) << 13) | (((rs1 >> 17) & bits_mask) << 17) | (((rs1 >> 39) & bits_mask) << (17 + bits)) | (((rs1 >> 60) & 0xf) << (17 + (2*bits)))); switch (opf) { case ARRAY16_OPF: rd_val <<= 1; break; case ARRAY32_OPF: rd_val <<= 2; }; store_reg(regs, rd_val, RD(insn)); } static void bmask(struct pt_regs *regs, unsigned int insn) { unsigned long rs1, rs2, rd_val, gsr; maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); rs1 = fetch_reg(RS1(insn), regs); rs2 = fetch_reg(RS2(insn), regs); rd_val = rs1 + rs2; store_reg(regs, rd_val, RD(insn)); gsr = current_thread_info()->gsr[0] & 0xffffffff; gsr |= rd_val << 32UL; current_thread_info()->gsr[0] = gsr; } static void bshuffle(struct pt_regs *regs, unsigned int insn) { struct fpustate *f = FPUSTATE; unsigned long rs1, rs2, rd_val; unsigned long bmask, i; bmask = current_thread_info()->gsr[0] >> 32UL; rs1 = fpd_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS2(insn)); rd_val = 0UL; for (i = 0; i < 8; i++) { unsigned long which = (bmask >> (i * 4)) & 0xf; unsigned long byte; if (which < 8) byte = (rs1 >> (which * 8)) & 0xff; else byte = (rs2 >> ((which-8)*8)) & 0xff; rd_val |= (byte << (i * 8)); } *fpd_regaddr(f, RD(insn)) = rd_val; } static void pdist(struct pt_regs *regs, unsigned int insn) { struct fpustate *f = FPUSTATE; unsigned long rs1, rs2, *rd, rd_val; unsigned long i; rs1 = fpd_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS1(insn)); rd = fpd_regaddr(f, RD(insn)); rd_val = *rd; for (i = 0; i < 8; i++) { s16 s1, s2; s1 = (rs1 >> (56 - (i * 8))) & 0xff; s2 = (rs2 >> (56 - (i * 8))) & 0xff; /* Absolute value of difference. */ s1 -= s2; if (s1 < 0) s1 = ~s1 + 1; rd_val += s1; } *rd = rd_val; } static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf) { struct fpustate *f = FPUSTATE; unsigned long rs1, rs2, gsr, scale, rd_val; gsr = current_thread_info()->gsr[0]; scale = (gsr >> 3) & (opf == FPACK16_OPF ? 0xf : 0x1f); switch (opf) { case FPACK16_OPF: { unsigned long byte; rs2 = fpd_regval(f, RS2(insn)); rd_val = 0; for (byte = 0; byte < 4; byte++) { unsigned int val; s16 src = (rs2 >> (byte * 16UL)) & 0xffffUL; int scaled = src << scale; int from_fixed = scaled >> 7; val = ((from_fixed < 0) ? 0 : (from_fixed > 255) ? 255 : from_fixed); rd_val |= (val << (8 * byte)); } *fps_regaddr(f, RD(insn)) = rd_val; break; } case FPACK32_OPF: { unsigned long word; rs1 = fpd_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS2(insn)); rd_val = (rs1 << 8) & ~(0x000000ff000000ffUL); for (word = 0; word < 2; word++) { unsigned long val; s32 src = (rs2 >> (word * 32UL)); s64 scaled = src << scale; s64 from_fixed = scaled >> 23; val = ((from_fixed < 0) ? 0 : (from_fixed > 255) ? 255 : from_fixed); rd_val |= (val << (32 * word)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } case FPACKFIX_OPF: { unsigned long word; rs2 = fpd_regval(f, RS2(insn)); rd_val = 0; for (word = 0; word < 2; word++) { long val; s32 src = (rs2 >> (word * 32UL)); s64 scaled = src << scale; s64 from_fixed = scaled >> 16; val = ((from_fixed < -32768) ? -32768 : (from_fixed > 32767) ? 32767 : from_fixed); rd_val |= ((val & 0xffff) << (word * 16)); } *fps_regaddr(f, RD(insn)) = rd_val; break; } case FEXPAND_OPF: { unsigned long byte; rs2 = fps_regval(f, RS2(insn)); rd_val = 0; for (byte = 0; byte < 4; byte++) { unsigned long val; u8 src = (rs2 >> (byte * 8)) & 0xff; val = src << 4; rd_val |= (val << (byte * 16)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } case FPMERGE_OPF: { rs1 = fps_regval(f, RS1(insn)); rs2 = fps_regval(f, RS2(insn)); rd_val = (((rs2 & 0x000000ff) << 0) | ((rs1 & 0x000000ff) << 8) | ((rs2 & 0x0000ff00) << 8) | ((rs1 & 0x0000ff00) << 16) | ((rs2 & 0x00ff0000) << 16) | ((rs1 & 0x00ff0000) << 24) | ((rs2 & 0xff000000) << 24) | ((rs1 & 0xff000000) << 32)); *fpd_regaddr(f, RD(insn)) = rd_val; break; } }; } static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) { struct fpustate *f = FPUSTATE; unsigned long rs1, rs2, rd_val; switch (opf) { case FMUL8x16_OPF: { unsigned long byte; rs1 = fps_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS2(insn)); rd_val = 0; for (byte = 0; byte < 4; byte++) { u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; s16 src2 = (rs2 >> (byte * 16)) & 0xffff; u32 prod = src1 * src2; u16 scaled = ((prod & 0x00ffff00) >> 8); /* Round up. */ if (prod & 0x80) scaled++; rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } case FMUL8x16AU_OPF: case FMUL8x16AL_OPF: { unsigned long byte; s16 src2; rs1 = fps_regval(f, RS1(insn)); rs2 = fps_regval(f, RS2(insn)); rd_val = 0; src2 = (rs2 >> (opf == FMUL8x16AU_OPF) ? 16 : 0); for (byte = 0; byte < 4; byte++) { u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; u32 prod = src1 * src2; u16 scaled = ((prod & 0x00ffff00) >> 8); /* Round up. */ if (prod & 0x80) scaled++; rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } case FMUL8SUx16_OPF: case FMUL8ULx16_OPF: { unsigned long byte, ushift; rs1 = fpd_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS2(insn)); rd_val = 0; ushift = (opf == FMUL8SUx16_OPF) ? 8 : 0; for (byte = 0; byte < 4; byte++) { u16 src1; s16 src2; u32 prod; u16 scaled; src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff); src2 = ((rs2 >> (16 * byte)) & 0xffff); prod = src1 * src2; scaled = ((prod & 0x00ffff00) >> 8); /* Round up. */ if (prod & 0x80) scaled++; rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } case FMULD8SUx16_OPF: case FMULD8ULx16_OPF: { unsigned long byte, ushift; rs1 = fps_regval(f, RS1(insn)); rs2 = fps_regval(f, RS2(insn)); rd_val = 0; ushift = (opf == FMULD8SUx16_OPF) ? 8 : 0; for (byte = 0; byte < 2; byte++) { u16 src1; s16 src2; u32 prod; u16 scaled; src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff); src2 = ((rs2 >> (16 * byte)) & 0xffff); prod = src1 * src2; scaled = ((prod & 0x00ffff00) >> 8); /* Round up. */ if (prod & 0x80) scaled++; rd_val |= ((scaled & 0xffffUL) << ((byte * 32UL) + 7UL)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } }; } static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) { struct fpustate *f = FPUSTATE; unsigned long rs1, rs2, rd_val, i; rs1 = fpd_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS2(insn)); rd_val = 0; switch (opf) { case FCMPGT16_OPF: for (i = 0; i < 4; i++) { s16 a = (rs1 >> (i * 16)) & 0xffff; s16 b = (rs2 >> (i * 16)) & 0xffff; if (a > b) rd_val |= 1 << i; } break; case FCMPGT32_OPF: for (i = 0; i < 2; i++) { s32 a = (rs1 >> (i * 32)) & 0xffff; s32 b = (rs2 >> (i * 32)) & 0xffff; if (a > b) rd_val |= 1 << i; } break; case FCMPLE16_OPF: for (i = 0; i < 4; i++) { s16 a = (rs1 >> (i * 16)) & 0xffff; s16 b = (rs2 >> (i * 16)) & 0xffff; if (a <= b) rd_val |= 1 << i; } break; case FCMPLE32_OPF: for (i = 0; i < 2; i++) { s32 a = (rs1 >> (i * 32)) & 0xffff; s32 b = (rs2 >> (i * 32)) & 0xffff; if (a <= b) rd_val |= 1 << i; } break; case FCMPNE16_OPF: for (i = 0; i < 4; i++) { s16 a = (rs1 >> (i * 16)) & 0xffff; s16 b = (rs2 >> (i * 16)) & 0xffff; if (a != b) rd_val |= 1 << i; } break; case FCMPNE32_OPF: for (i = 0; i < 2; i++) { s32 a = (rs1 >> (i * 32)) & 0xffff; s32 b = (rs2 >> (i * 32)) & 0xffff; if (a != b) rd_val |= 1 << i; } break; case FCMPEQ16_OPF: for (i = 0; i < 4; i++) { s16 a = (rs1 >> (i * 16)) & 0xffff; s16 b = (rs2 >> (i * 16)) & 0xffff; if (a == b) rd_val |= 1 << i; } break; case FCMPEQ32_OPF: for (i = 0; i < 2; i++) { s32 a = (rs1 >> (i * 32)) & 0xffff; s32 b = (rs2 >> (i * 32)) & 0xffff; if (a == b) rd_val |= 1 << i; } break; }; maybe_flush_windows(0, 0, RD(insn), 0); store_reg(regs, rd_val, RD(insn)); } /* Emulate the VIS instructions which are not implemented in * hardware on Niagara. */ int vis_emul(struct pt_regs *regs, unsigned int insn) { unsigned long pc = regs->tpc; unsigned int opf; BUG_ON(regs->tstate & TSTATE_PRIV); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc)) return -EFAULT; opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT; switch (opf) { default: return -EINVAL; /* Pixel Formatting Instructions. */ case FPACK16_OPF: case FPACK32_OPF: case FPACKFIX_OPF: case FEXPAND_OPF: case FPMERGE_OPF: pformat(regs, insn, opf); break; /* Partitioned Multiply Instructions */ case FMUL8x16_OPF: case FMUL8x16AU_OPF: case FMUL8x16AL_OPF: case FMUL8SUx16_OPF: case FMUL8ULx16_OPF: case FMULD8SUx16_OPF: case FMULD8ULx16_OPF: pmul(regs, insn, opf); break; /* Pixel Compare Instructions */ case FCMPGT16_OPF: case FCMPGT32_OPF: case FCMPLE16_OPF: case FCMPLE32_OPF: case FCMPNE16_OPF: case FCMPNE32_OPF: case FCMPEQ16_OPF: case FCMPEQ32_OPF: pcmp(regs, insn, opf); break; /* Edge Handling Instructions */ case EDGE8_OPF: case EDGE8N_OPF: case EDGE8L_OPF: case EDGE8LN_OPF: case EDGE16_OPF: case EDGE16N_OPF: case EDGE16L_OPF: case EDGE16LN_OPF: case EDGE32_OPF: case EDGE32N_OPF: case EDGE32L_OPF: case EDGE32LN_OPF: edge(regs, insn, opf); break; /* Pixel Component Distance */ case PDIST_OPF: pdist(regs, insn); break; /* Three-Dimensional Array Addressing Instructions */ case ARRAY8_OPF: case ARRAY16_OPF: case ARRAY32_OPF: array(regs, insn, opf); break; /* Byte Mask and Shuffle Instructions */ case BMASK_OPF: bmask(regs, insn); break; case BSHUFFLE_OPF: bshuffle(regs, insn); break; }; regs->tpc = regs->tnpc; regs->tnpc += 4; return 0; }
gpl-2.0
Kenthliu/linux
kernel/bpf/helpers.c
96
4946
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/bpf.h> #include <linux/rcupdate.h> #include <linux/random.h> #include <linux/smp.h> #include <linux/ktime.h> #include <linux/sched.h> #include <linux/uidgid.h> /* If kernel subsystem is allowing eBPF programs to call this function, * inside its own verifier_ops->get_func_proto() callback it should return * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments * * Different map implementations will rely on rcu in map methods * lookup/update/delete, therefore eBPF programs must run under rcu lock * if program is allowed to access maps, so check rcu_read_lock_held in * all three functions. */ static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) { /* verifier checked that R1 contains a valid pointer to bpf_map * and R2 points to a program stack and map->key_size bytes were * initialized */ struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; void *key = (void *) (unsigned long) r2; void *value; WARN_ON_ONCE(!rcu_read_lock_held()); value = map->ops->map_lookup_elem(map, key); /* lookup() returns either pointer to element value or NULL * which is the meaning of PTR_TO_MAP_VALUE_OR_NULL type */ return (unsigned long) value; } const struct bpf_func_proto bpf_map_lookup_elem_proto = { .func = bpf_map_lookup_elem, .gpl_only = false, .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_MAP_KEY, }; static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) { struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; void *key = (void *) (unsigned long) r2; void *value = (void *) (unsigned long) r3; WARN_ON_ONCE(!rcu_read_lock_held()); return map->ops->map_update_elem(map, key, value, r4); } const struct bpf_func_proto bpf_map_update_elem_proto = { .func = bpf_map_update_elem, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_MAP_KEY, .arg3_type = ARG_PTR_TO_MAP_VALUE, .arg4_type = ARG_ANYTHING, }; static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) { struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; void *key = (void *) (unsigned long) r2; WARN_ON_ONCE(!rcu_read_lock_held()); return map->ops->map_delete_elem(map, key); } const struct bpf_func_proto bpf_map_delete_elem_proto = { .func = bpf_map_delete_elem, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_MAP_KEY, }; const struct bpf_func_proto bpf_get_prandom_u32_proto = { .func = bpf_user_rnd_u32, .gpl_only = false, .ret_type = RET_INTEGER, }; static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) { return raw_smp_processor_id(); } const struct bpf_func_proto bpf_get_smp_processor_id_proto = { .func = bpf_get_smp_processor_id, .gpl_only = false, .ret_type = RET_INTEGER, }; static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) { /* NMI safe access to clock monotonic */ return ktime_get_mono_fast_ns(); } const struct bpf_func_proto bpf_ktime_get_ns_proto = { .func = bpf_ktime_get_ns, .gpl_only = true, .ret_type = RET_INTEGER, }; static u64 bpf_get_current_pid_tgid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) { struct task_struct *task = current; if (!task) return -EINVAL; return (u64) task->tgid << 32 | task->pid; } const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { .func = bpf_get_current_pid_tgid, .gpl_only = false, .ret_type = RET_INTEGER, }; static u64 bpf_get_current_uid_gid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) { struct task_struct *task = current; kuid_t uid; kgid_t gid; if (!task) return -EINVAL; current_uid_gid(&uid, &gid); return (u64) from_kgid(&init_user_ns, gid) << 32 | from_kuid(&init_user_ns, uid); } const struct bpf_func_proto bpf_get_current_uid_gid_proto = { .func = bpf_get_current_uid_gid, .gpl_only = false, .ret_type = RET_INTEGER, }; static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5) { struct task_struct *task = current; char *buf = (char *) (long) r1; if (!task) return -EINVAL; strlcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm))); return 0; } const struct bpf_func_proto bpf_get_current_comm_proto = { .func = bpf_get_current_comm, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_STACK, .arg2_type = ARG_CONST_STACK_SIZE, };
gpl-2.0
Pingmin/linux
arch/powerpc/platforms/embedded6xx/gamecube.c
352
1819
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/powerpc/platforms/embedded6xx/gamecube.c * * Nintendo GameCube board-specific support * Copyright (C) 2004-2009 The GameCube Linux Team * Copyright (C) 2007,2008,2009 Albert Herranz */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/kexec.h> #include <linux/seq_file.h> #include <linux/of_platform.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/prom.h> #include <asm/time.h> #include <asm/udbg.h> #include "flipper-pic.h" #include "usbgecko_udbg.h" static void __noreturn gamecube_spin(void) { /* spin until power button pressed */ for (;;) cpu_relax(); } static void __noreturn gamecube_restart(char *cmd) { local_irq_disable(); flipper_platform_reset(); gamecube_spin(); } static void gamecube_power_off(void) { local_irq_disable(); gamecube_spin(); } static void __noreturn gamecube_halt(void) { gamecube_restart(NULL); } static int __init gamecube_probe(void) { if (!of_machine_is_compatible("nintendo,gamecube")) return 0; pm_power_off = gamecube_power_off; ug_udbg_init(); return 1; } static void gamecube_shutdown(void) { flipper_quiesce(); } define_machine(gamecube) { .name = "gamecube", .probe = gamecube_probe, .restart = gamecube_restart, .halt = gamecube_halt, .init_IRQ = flipper_pic_probe, .get_irq = flipper_pic_get_irq, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, .machine_shutdown = gamecube_shutdown, }; static const struct of_device_id gamecube_of_bus[] = { { .compatible = "nintendo,flipper", }, { }, }; static int __init gamecube_device_probe(void) { if (!machine_is(gamecube)) return 0; of_platform_bus_probe(NULL, gamecube_of_bus, NULL); return 0; } device_initcall(gamecube_device_probe);
gpl-2.0
bl4ckic3/linux
drivers/net/wireless/ath/ath9k/ar9003_mci.c
864
43558
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/export.h> #include "hw.h" #include "hw-ops.h" #include "ar9003_phy.h" #include "ar9003_mci.h" #include "ar9003_aic.h" static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah) { REG_RMW_FIELD(ah, AR_MCI_COMMAND2, AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 1); udelay(1); REG_RMW_FIELD(ah, AR_MCI_COMMAND2, AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 0); } static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address, u32 bit_position, int time_out) { struct ath_common *common = ath9k_hw_common(ah); while (time_out) { if (!(REG_READ(ah, address) & bit_position)) { udelay(10); time_out -= 10; if (time_out < 0) break; else continue; } REG_WRITE(ah, address, bit_position); if (address != AR_MCI_INTERRUPT_RX_MSG_RAW) break; if (bit_position & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) ar9003_mci_reset_req_wakeup(ah); if (bit_position & (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING | AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE); REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_RX_MSG); break; } if (time_out <= 0) { ath_dbg(common, MCI, "MCI Wait for Reg 0x%08x = 0x%08x timeout\n", address, bit_position); ath_dbg(common, MCI, "MCI INT_RAW = 0x%08x, RX_MSG_RAW = 0x%08x\n", REG_READ(ah, AR_MCI_INTERRUPT_RAW), REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW)); time_out = 0; } return time_out; } static void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done) { u32 payload[4] = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffff00}; ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, payload, 16, wait_done, false); udelay(5); } static void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done) { u32 payload = 0x00000000; ar9003_mci_send_message(ah, MCI_LNA_TRANS, 0, &payload, 1, wait_done, false); } static void ar9003_mci_send_req_wake(struct ath_hw *ah, bool wait_done) { ar9003_mci_send_message(ah, MCI_REQ_WAKE, MCI_FLAG_DISABLE_TIMESTAMP, NULL, 0, wait_done, false); udelay(5); } static void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done) { ar9003_mci_send_message(ah, MCI_SYS_WAKING, MCI_FLAG_DISABLE_TIMESTAMP, NULL, 0, wait_done, false); } static void ar9003_mci_send_lna_take(struct ath_hw *ah, bool wait_done) { u32 payload = 0x70000000; ar9003_mci_send_message(ah, MCI_LNA_TAKE, 0, &payload, 1, wait_done, false); } static void ar9003_mci_send_sys_sleeping(struct ath_hw *ah, bool wait_done) { ar9003_mci_send_message(ah, MCI_SYS_SLEEPING, MCI_FLAG_DISABLE_TIMESTAMP, NULL, 0, wait_done, false); } static void ar9003_mci_send_coex_version_query(struct ath_hw *ah, bool wait_done) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 payload[4] = {0, 0, 0, 0}; if (mci->bt_version_known || (mci->bt_state == MCI_BT_SLEEP)) return; MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT, MCI_GPM_COEX_VERSION_QUERY); ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true); } static void ar9003_mci_send_coex_version_response(struct ath_hw *ah, bool wait_done) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 payload[4] = {0, 0, 0, 0}; MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT, MCI_GPM_COEX_VERSION_RESPONSE); *(((u8 *)payload) + MCI_GPM_COEX_B_MAJOR_VERSION) = mci->wlan_ver_major; *(((u8 *)payload) + MCI_GPM_COEX_B_MINOR_VERSION) = mci->wlan_ver_minor; ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true); } static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah, bool wait_done) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 *payload = &mci->wlan_channels[0]; if (!mci->wlan_channels_update || (mci->bt_state == MCI_BT_SLEEP)) return; MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT, MCI_GPM_COEX_WLAN_CHANNELS); ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true); MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff); } static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah, bool wait_done, u8 query_type) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 payload[4] = {0, 0, 0, 0}; bool query_btinfo; if (mci->bt_state == MCI_BT_SLEEP) return; query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO | MCI_GPM_COEX_QUERY_BT_TOPOLOGY)); MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT, MCI_GPM_COEX_STATUS_QUERY); *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type; /* * If bt_status_query message is not sent successfully, * then need_flush_btinfo should be set again. */ if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true)) { if (query_btinfo) mci->need_flush_btinfo = true; } if (query_btinfo) mci->query_bt = false; } static void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt, bool wait_done) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 payload[4] = {0, 0, 0, 0}; MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT, MCI_GPM_COEX_HALT_BT_GPM); if (halt) { mci->query_bt = true; /* Send next unhalt no matter halt sent or not */ mci->unhalt_bt_gpm = true; mci->need_flush_btinfo = true; *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) = MCI_GPM_COEX_BT_GPM_HALT; } else *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) = MCI_GPM_COEX_BT_GPM_UNHALT; ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true); } static void ar9003_mci_prep_interface(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 saved_mci_int_en; u32 mci_timeout = 150; mci->bt_state = MCI_BT_SLEEP; saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN); REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0); REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW)); REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, REG_READ(ah, AR_MCI_INTERRUPT_RAW)); ar9003_mci_remote_reset(ah, true); ar9003_mci_send_req_wake(ah, true); if (!ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) goto clear_redunt; mci->bt_state = MCI_BT_AWAKE; /* * we don't need to send more remote_reset at this moment. * If BT receive first remote_reset, then BT HW will * be cleaned up and will be able to receive req_wake * and BT HW will respond sys_waking. * In this case, WLAN will receive BT's HW sys_waking. * Otherwise, if BT SW missed initial remote_reset, * that remote_reset will still clean up BT MCI RX, * and the req_wake will wake BT up, * and BT SW will respond this req_wake with a remote_reset and * sys_waking. In this case, WLAN will receive BT's SW * sys_waking. In either case, BT's RX is cleaned up. So we * don't need to reply BT's remote_reset now, if any. * Similarly, if in any case, WLAN can receive BT's sys_waking, * that means WLAN's RX is also fine. */ ar9003_mci_send_sys_waking(ah, true); udelay(10); /* * Set BT priority interrupt value to be 0xff to * avoid having too many BT PRIORITY interrupts. */ REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF); REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF); REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF); REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF); REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF); /* * A contention reset will be received after send out * sys_waking. Also BT priority interrupt bits will be set. * Clear those bits before the next step. */ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, AR_MCI_INTERRUPT_RX_MSG_CONT_RST); REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_BT_PRI); if (mci->is_2g && MCI_ANT_ARCH_PA_LNA_SHARED(mci)) { ar9003_mci_send_lna_transfer(ah, true); udelay(5); } if (mci->is_2g && !mci->update_2g5g && MCI_ANT_ARCH_PA_LNA_SHARED(mci)) { if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, AR_MCI_INTERRUPT_RX_MSG_LNA_INFO, mci_timeout)) ath_dbg(common, MCI, "MCI WLAN has control over the LNA & BT obeys it\n"); else ath_dbg(common, MCI, "MCI BT didn't respond to LNA_TRANS\n"); } clear_redunt: /* Clear the extra redundant SYS_WAKING from BT */ if ((mci->bt_state == MCI_BT_AWAKE) && (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) && (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) { REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING); REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE); } REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en); } void ar9003_mci_set_full_sleep(struct ath_hw *ah) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; if (ar9003_mci_state(ah, MCI_STATE_ENABLE) && (mci->bt_state != MCI_BT_SLEEP) && !mci->halted_bt_gpm) { ar9003_mci_send_coex_halt_bt_gpm(ah, true, true); } mci->ready = false; } static void ar9003_mci_disable_interrupt(struct ath_hw *ah) { REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0); REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0); } static void ar9003_mci_enable_interrupt(struct ath_hw *ah) { REG_WRITE(ah, AR_MCI_INTERRUPT_EN, AR_MCI_INTERRUPT_DEFAULT); REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, AR_MCI_INTERRUPT_RX_MSG_DEFAULT); } static bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints) { u32 intr; intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW); return ((intr & ints) == ints); } void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr, u32 *rx_msg_intr) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; *raw_intr = mci->raw_intr; *rx_msg_intr = mci->rx_msg_intr; /* Clean int bits after the values are read. */ mci->raw_intr = 0; mci->rx_msg_intr = 0; } EXPORT_SYMBOL(ar9003_mci_get_interrupt); void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 raw_intr, rx_msg_intr; rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW); raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW); if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef)) { ath_dbg(common, MCI, "MCI gets 0xdeadbeef during int processing\n"); } else { mci->rx_msg_intr |= rx_msg_intr; mci->raw_intr |= raw_intr; *masked |= ATH9K_INT_MCI; if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) mci->cont_status = REG_READ(ah, AR_MCI_CONT_STATUS); REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr); REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr); } } static void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; if (!mci->update_2g5g && (mci->is_2g != is_2g)) mci->update_2g5g = true; mci->is_2g = is_2g; } static bool ar9003_mci_is_gpm_valid(struct ath_hw *ah, u32 msg_index) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 *payload; u32 recv_type, offset; if (msg_index == MCI_GPM_INVALID) return false; offset = msg_index << 4; payload = (u32 *)(mci->gpm_buf + offset); recv_type = MCI_GPM_TYPE(payload); if (recv_type == MCI_GPM_RSVD_PATTERN) return false; return true; } static void ar9003_mci_observation_set_up(struct ath_hw *ah) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) { ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA); ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK); ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA); ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK); } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) { ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX); ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX); ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX); ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX); ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) { ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX); ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX); ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA); ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK); } else return; REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, AR_GLB_DS_JTAG_DISABLE, 1); REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, AR_GLB_WLAN_UART_INTF_EN, 0); REG_SET_BIT(ah, AR_GLB_GPIO_CONTROL, ATH_MCI_CONFIG_MCI_OBS_GPIO); REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_GPIO_OBS_SEL, 0); REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL, 1); REG_WRITE(ah, AR_OBS, 0x4b); REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL1, 0x03); REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL2, 0x01); REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_LSB, 0x02); REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_MSB, 0x03); REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS, AR_PHY_TEST_CTL_DEBUGPORT_SEL, 0x07); } static bool ar9003_mci_send_coex_bt_flags(struct ath_hw *ah, bool wait_done, u8 opcode, u32 bt_flags) { u32 pld[4] = {0, 0, 0, 0}; MCI_GPM_SET_TYPE_OPCODE(pld, MCI_GPM_COEX_AGENT, MCI_GPM_COEX_BT_UPDATE_FLAGS); *(((u8 *)pld) + MCI_GPM_COEX_B_BT_FLAGS_OP) = opcode; *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 0) = bt_flags & 0xFF; *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 1) = (bt_flags >> 8) & 0xFF; *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 2) = (bt_flags >> 16) & 0xFF; *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 3) = (bt_flags >> 24) & 0xFF; return ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, wait_done, true); } static void ar9003_mci_sync_bt_state(struct ath_hw *ah) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 cur_bt_state; cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP); if (mci->bt_state != cur_bt_state) mci->bt_state = cur_bt_state; if (mci->bt_state != MCI_BT_SLEEP) { ar9003_mci_send_coex_version_query(ah, true); ar9003_mci_send_coex_wlan_channels(ah, true); if (mci->unhalt_bt_gpm == true) ar9003_mci_send_coex_halt_bt_gpm(ah, false, true); } } void ar9003_mci_check_bt(struct ath_hw *ah) { struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; if (!mci_hw->ready) return; /* * check BT state again to make * sure it's not changed. */ ar9003_mci_sync_bt_state(ah); ar9003_mci_2g5g_switch(ah, true); if ((mci_hw->bt_state == MCI_BT_AWAKE) && (mci_hw->query_bt == true)) { mci_hw->need_flush_btinfo = true; } } static void ar9003_mci_process_gpm_extra(struct ath_hw *ah, u8 gpm_type, u8 gpm_opcode, u32 *p_gpm) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u8 *p_data = (u8 *) p_gpm; if (gpm_type != MCI_GPM_COEX_AGENT) return; switch (gpm_opcode) { case MCI_GPM_COEX_VERSION_QUERY: ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n"); ar9003_mci_send_coex_version_response(ah, true); break; case MCI_GPM_COEX_VERSION_RESPONSE: ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n"); mci->bt_ver_major = *(p_data + MCI_GPM_COEX_B_MAJOR_VERSION); mci->bt_ver_minor = *(p_data + MCI_GPM_COEX_B_MINOR_VERSION); mci->bt_version_known = true; ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n", mci->bt_ver_major, mci->bt_ver_minor); break; case MCI_GPM_COEX_STATUS_QUERY: ath_dbg(common, MCI, "MCI Recv GPM COEX Status Query = 0x%02X\n", *(p_data + MCI_GPM_COEX_B_WLAN_BITMAP)); mci->wlan_channels_update = true; ar9003_mci_send_coex_wlan_channels(ah, true); break; case MCI_GPM_COEX_BT_PROFILE_INFO: mci->query_bt = true; ath_dbg(common, MCI, "MCI Recv GPM COEX BT_Profile_Info\n"); break; case MCI_GPM_COEX_BT_STATUS_UPDATE: mci->query_bt = true; ath_dbg(common, MCI, "MCI Recv GPM COEX BT_Status_Update SEQ=%d (drop&query)\n", *(p_gpm + 3)); break; default: break; } } static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type, u8 gpm_opcode, int time_out) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 *p_gpm = NULL, mismatch = 0, more_data; u32 offset; u8 recv_type = 0, recv_opcode = 0; bool b_is_bt_cal_done = (gpm_type == MCI_GPM_BT_CAL_DONE); more_data = time_out ? MCI_GPM_NOMORE : MCI_GPM_MORE; while (time_out > 0) { if (p_gpm) { MCI_GPM_RECYCLE(p_gpm); p_gpm = NULL; } if (more_data != MCI_GPM_MORE) time_out = ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, AR_MCI_INTERRUPT_RX_MSG_GPM, time_out); if (!time_out) break; offset = ar9003_mci_get_next_gpm_offset(ah, &more_data); if (offset == MCI_GPM_INVALID) continue; p_gpm = (u32 *) (mci->gpm_buf + offset); recv_type = MCI_GPM_TYPE(p_gpm); recv_opcode = MCI_GPM_OPCODE(p_gpm); if (MCI_GPM_IS_CAL_TYPE(recv_type)) { if (recv_type == gpm_type) { if ((gpm_type == MCI_GPM_BT_CAL_DONE) && !b_is_bt_cal_done) { gpm_type = MCI_GPM_BT_CAL_GRANT; continue; } break; } } else if ((recv_type == gpm_type) && (recv_opcode == gpm_opcode)) break; /* * check if it's cal_grant * * When we're waiting for cal_grant in reset routine, * it's possible that BT sends out cal_request at the * same time. Since BT's calibration doesn't happen * that often, we'll let BT completes calibration then * we continue to wait for cal_grant from BT. * Orginal: Wait BT_CAL_GRANT. * New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait * BT_CAL_DONE -> Wait BT_CAL_GRANT. */ if ((gpm_type == MCI_GPM_BT_CAL_GRANT) && (recv_type == MCI_GPM_BT_CAL_REQ)) { u32 payload[4] = {0, 0, 0, 0}; gpm_type = MCI_GPM_BT_CAL_DONE; MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_GRANT); ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, false, false); continue; } else { ath_dbg(common, MCI, "MCI GPM subtype not match 0x%x\n", *(p_gpm + 1)); mismatch++; ar9003_mci_process_gpm_extra(ah, recv_type, recv_opcode, p_gpm); } } if (p_gpm) { MCI_GPM_RECYCLE(p_gpm); p_gpm = NULL; } if (time_out <= 0) time_out = 0; while (more_data == MCI_GPM_MORE) { offset = ar9003_mci_get_next_gpm_offset(ah, &more_data); if (offset == MCI_GPM_INVALID) break; p_gpm = (u32 *) (mci->gpm_buf + offset); recv_type = MCI_GPM_TYPE(p_gpm); recv_opcode = MCI_GPM_OPCODE(p_gpm); if (!MCI_GPM_IS_CAL_TYPE(recv_type)) ar9003_mci_process_gpm_extra(ah, recv_type, recv_opcode, p_gpm); MCI_GPM_RECYCLE(p_gpm); } return time_out; } bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; u32 payload[4] = {0, 0, 0, 0}; ar9003_mci_2g5g_changed(ah, IS_CHAN_2GHZ(chan)); if (mci_hw->bt_state != MCI_BT_CAL_START) return false; mci_hw->bt_state = MCI_BT_CAL; /* * MCI FIX: disable mci interrupt here. This is to avoid * SW_MSG_DONE or RX_MSG bits to trigger MCI_INT and * lead to mci_intr reentry. */ ar9003_mci_disable_interrupt(ah); MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_GRANT); ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, true, false); /* Wait BT calibration to be completed for 25ms */ if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_DONE, 0, 25000)) ath_dbg(common, MCI, "MCI BT_CAL_DONE received\n"); else ath_dbg(common, MCI, "MCI BT_CAL_DONE not received\n"); mci_hw->bt_state = MCI_BT_AWAKE; /* MCI FIX: enable mci interrupt here */ ar9003_mci_enable_interrupt(ah); return true; } int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan, struct ath9k_hw_cal_data *caldata) { struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; if (!mci_hw->ready) return 0; if (!IS_CHAN_2GHZ(chan) || (mci_hw->bt_state != MCI_BT_SLEEP)) goto exit; if (!ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) && !ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) goto exit; /* * BT is sleeping. Check if BT wakes up during * WLAN calibration. If BT wakes up during * WLAN calibration, need to go through all * message exchanges again and recal. */ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, (AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET | AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)); ar9003_mci_remote_reset(ah, true); ar9003_mci_send_sys_waking(ah, true); udelay(1); if (IS_CHAN_2GHZ(chan)) ar9003_mci_send_lna_transfer(ah, true); mci_hw->bt_state = MCI_BT_AWAKE; REG_CLR_BIT(ah, AR_PHY_TIMING4, 1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT); if (caldata) { clear_bit(TXIQCAL_DONE, &caldata->cal_flags); clear_bit(TXCLCAL_DONE, &caldata->cal_flags); clear_bit(RTT_DONE, &caldata->cal_flags); } if (!ath9k_hw_init_cal(ah, chan)) return -EIO; REG_SET_BIT(ah, AR_PHY_TIMING4, 1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT); exit: ar9003_mci_enable_interrupt(ah); return 0; } static void ar9003_mci_mute_bt(struct ath_hw *ah) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; /* disable all MCI messages */ REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000); REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff); REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff); REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff); REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff); REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); /* wait pending HW messages to flush out */ udelay(10); /* * Send LNA_TAKE and SYS_SLEEPING when * 1. reset not after resuming from full sleep * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment */ if (MCI_ANT_ARCH_PA_LNA_SHARED(mci)) { ar9003_mci_send_lna_take(ah, true); udelay(5); } ar9003_mci_send_sys_sleeping(ah, true); } static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 thresh; if (!enable) { REG_CLR_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); return; } REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2, AR_MCI_SCHD_TABLE_2_HW_BASED, 1); REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2, AR_MCI_SCHD_TABLE_2_MEM_BASED, 1); if (AR_SREV_9565(ah)) REG_RMW_FIELD(ah, AR_MCI_MISC, AR_MCI_MISC_HW_FIX_EN, 1); if (!(mci->config & ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) { thresh = MS(mci->config, ATH_MCI_CONFIG_AGGR_THRESH); REG_RMW_FIELD(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_AGGR_THRESH, thresh); REG_RMW_FIELD(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 1); } else REG_RMW_FIELD(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 0); REG_RMW_FIELD(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1); } static void ar9003_mci_stat_setup(struct ath_hw *ah) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; if (!AR_SREV_9565(ah)) return; if (mci->config & ATH_MCI_CONFIG_MCI_STAT_DBG) { REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL, AR_MCI_DBG_CNT_CTRL_ENABLE, 1); REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL, AR_MCI_DBG_CNT_CTRL_BT_LINKID, MCI_STAT_ALL_BT_LINKID); } else { REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL, AR_MCI_DBG_CNT_CTRL_ENABLE, 0); } } static void ar9003_mci_set_btcoex_ctrl_9565_1ANT(struct ath_hw *ah) { u32 regval; regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) | SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) | SM(1, AR_BTCOEX_CTRL_PA_SHARED) | SM(1, AR_BTCOEX_CTRL_LNA_SHARED) | SM(1, AR_BTCOEX_CTRL_NUM_ANTENNAS) | SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK) | SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) | SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) | SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x1); REG_WRITE(ah, AR_BTCOEX_CTRL, regval); } static void ar9003_mci_set_btcoex_ctrl_9565_2ANT(struct ath_hw *ah) { u32 regval; regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) | SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) | SM(0, AR_BTCOEX_CTRL_PA_SHARED) | SM(0, AR_BTCOEX_CTRL_LNA_SHARED) | SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) | SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK) | SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) | SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) | SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x0); REG_WRITE(ah, AR_BTCOEX_CTRL, regval); } static void ar9003_mci_set_btcoex_ctrl_9462(struct ath_hw *ah) { u32 regval; regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) | SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) | SM(1, AR_BTCOEX_CTRL_PA_SHARED) | SM(1, AR_BTCOEX_CTRL_LNA_SHARED) | SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) | SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) | SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) | SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) | SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); REG_WRITE(ah, AR_BTCOEX_CTRL, regval); } int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, bool is_full_sleep) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 regval, i; ath_dbg(common, MCI, "MCI Reset (full_sleep = %d, is_2g = %d)\n", is_full_sleep, is_2g); if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) { ath_err(common, "BTCOEX control register is dead\n"); return -EINVAL; } /* Program MCI DMA related registers */ REG_WRITE(ah, AR_MCI_GPM_0, mci->gpm_addr); REG_WRITE(ah, AR_MCI_GPM_1, mci->gpm_len); REG_WRITE(ah, AR_MCI_SCHD_TABLE_0, mci->sched_addr); /* * To avoid MCI state machine be affected by incoming remote MCI msgs, * MCI mode will be enabled later, right before reset the MCI TX and RX. */ if (AR_SREV_9565(ah)) { u8 ant = MS(mci->config, ATH_MCI_CONFIG_ANT_ARCH); if (ant == ATH_MCI_ANT_ARCH_1_ANT_PA_LNA_SHARED) ar9003_mci_set_btcoex_ctrl_9565_1ANT(ah); else ar9003_mci_set_btcoex_ctrl_9565_2ANT(ah); } else { ar9003_mci_set_btcoex_ctrl_9462(ah); } if (is_2g && !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) ar9003_mci_osla_setup(ah, true); else ar9003_mci_osla_setup(ah, false); REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_SPDT_ENABLE); REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3, AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20); REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 0); REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0); /* Set the time out to 3.125ms (5 BT slots) */ REG_RMW_FIELD(ah, AR_BTCOEX_WL_LNA, AR_BTCOEX_WL_LNA_TIMEOUT, 0x3D090); /* concurrent tx priority */ if (mci->config & ATH_MCI_CONFIG_CONCUR_TX) { REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE, 0); REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_TXPWR_THRESH, 0x7f); REG_RMW_FIELD(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_REDUCE_TXPWR, 0); for (i = 0; i < 8; i++) REG_WRITE(ah, AR_BTCOEX_MAX_TXPWR(i), 0x7f7f7f7f); } regval = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV); REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, regval); REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN); /* Resetting the Rx and Tx paths of MCI */ regval = REG_READ(ah, AR_MCI_COMMAND2); regval |= SM(1, AR_MCI_COMMAND2_RESET_TX); REG_WRITE(ah, AR_MCI_COMMAND2, regval); udelay(1); regval &= ~SM(1, AR_MCI_COMMAND2_RESET_TX); REG_WRITE(ah, AR_MCI_COMMAND2, regval); if (is_full_sleep) { ar9003_mci_mute_bt(ah); udelay(100); } /* Check pending GPM msg before MCI Reset Rx */ ar9003_mci_check_gpm_offset(ah); regval |= SM(1, AR_MCI_COMMAND2_RESET_RX); REG_WRITE(ah, AR_MCI_COMMAND2, regval); udelay(1); regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX); REG_WRITE(ah, AR_MCI_COMMAND2, regval); /* Init GPM offset after MCI Reset Rx */ ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET); REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) | SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM))); if (MCI_ANT_ARCH_PA_LNA_SHARED(mci)) REG_CLR_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); else REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); ar9003_mci_observation_set_up(ah); mci->ready = true; ar9003_mci_prep_interface(ah); ar9003_mci_stat_setup(ah); if (en_int) ar9003_mci_enable_interrupt(ah); if (ath9k_hw_is_aic_enabled(ah)) ar9003_aic_start_normal(ah); return 0; } void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep) { struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; ar9003_mci_disable_interrupt(ah); if (mci_hw->ready && !save_fullsleep) { ar9003_mci_mute_bt(ah); udelay(20); REG_WRITE(ah, AR_BTCOEX_CTRL, 0); } mci_hw->bt_state = MCI_BT_SLEEP; mci_hw->ready = false; } static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 new_flags, to_set, to_clear; if (!mci->update_2g5g || (mci->bt_state == MCI_BT_SLEEP)) return; if (mci->is_2g) { new_flags = MCI_2G_FLAGS; to_clear = MCI_2G_FLAGS_CLEAR_MASK; to_set = MCI_2G_FLAGS_SET_MASK; } else { new_flags = MCI_5G_FLAGS; to_clear = MCI_5G_FLAGS_CLEAR_MASK; to_set = MCI_5G_FLAGS_SET_MASK; } if (to_clear) ar9003_mci_send_coex_bt_flags(ah, wait_done, MCI_GPM_COEX_BT_FLAGS_CLEAR, to_clear); if (to_set) ar9003_mci_send_coex_bt_flags(ah, wait_done, MCI_GPM_COEX_BT_FLAGS_SET, to_set); } static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header, u32 *payload, bool queue) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u8 type, opcode; /* check if the message is to be queued */ if (header != MCI_GPM) return; type = MCI_GPM_TYPE(payload); opcode = MCI_GPM_OPCODE(payload); if (type != MCI_GPM_COEX_AGENT) return; switch (opcode) { case MCI_GPM_COEX_BT_UPDATE_FLAGS: if (*(((u8 *)payload) + MCI_GPM_COEX_B_BT_FLAGS_OP) == MCI_GPM_COEX_BT_FLAGS_READ) break; mci->update_2g5g = queue; break; case MCI_GPM_COEX_WLAN_CHANNELS: mci->wlan_channels_update = queue; break; case MCI_GPM_COEX_HALT_BT_GPM: if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) == MCI_GPM_COEX_BT_GPM_UNHALT) { mci->unhalt_bt_gpm = queue; if (!queue) mci->halted_bt_gpm = false; } if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) == MCI_GPM_COEX_BT_GPM_HALT) { mci->halted_bt_gpm = !queue; } break; default: break; } } void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; if (!mci->update_2g5g && !force) return; if (mci->is_2g) { ar9003_mci_send_2g5g_status(ah, true); ar9003_mci_send_lna_transfer(ah, true); udelay(5); REG_CLR_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL); if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) ar9003_mci_osla_setup(ah, true); if (AR_SREV_9462(ah)) REG_WRITE(ah, AR_SELFGEN_MASK, 0x02); } else { ar9003_mci_send_lna_take(ah, true); udelay(5); REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL); ar9003_mci_osla_setup(ah, false); ar9003_mci_send_2g5g_status(ah, true); } } bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag, u32 *payload, u8 len, bool wait_done, bool check_bt) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; bool msg_sent = false; u32 regval; u32 saved_mci_int_en; int i; saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN); regval = REG_READ(ah, AR_BTCOEX_CTRL); if ((regval == 0xdeadbeef) || !(regval & AR_BTCOEX_CTRL_MCI_MODE_EN)) { ath_dbg(common, MCI, "MCI Not sending 0x%x. MCI is not enabled. full_sleep = %d\n", header, (ah->power_mode == ATH9K_PM_FULL_SLEEP) ? 1 : 0); ar9003_mci_queue_unsent_gpm(ah, header, payload, true); return false; } else if (check_bt && (mci->bt_state == MCI_BT_SLEEP)) { ath_dbg(common, MCI, "MCI Don't send message 0x%x. BT is in sleep state\n", header); ar9003_mci_queue_unsent_gpm(ah, header, payload, true); return false; } if (wait_done) REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0); /* Need to clear SW_MSG_DONE raw bit before wait */ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, (AR_MCI_INTERRUPT_SW_MSG_DONE | AR_MCI_INTERRUPT_MSG_FAIL_MASK)); if (payload) { for (i = 0; (i * 4) < len; i++) REG_WRITE(ah, (AR_MCI_TX_PAYLOAD0 + i * 4), *(payload + i)); } REG_WRITE(ah, AR_MCI_COMMAND0, (SM((flag & MCI_FLAG_DISABLE_TIMESTAMP), AR_MCI_COMMAND0_DISABLE_TIMESTAMP) | SM(len, AR_MCI_COMMAND0_LEN) | SM(header, AR_MCI_COMMAND0_HEADER))); if (wait_done && !(ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_SW_MSG_DONE, 500))) ar9003_mci_queue_unsent_gpm(ah, header, payload, true); else { ar9003_mci_queue_unsent_gpm(ah, header, payload, false); msg_sent = true; } if (wait_done) REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en); return msg_sent; } EXPORT_SYMBOL(ar9003_mci_send_message); void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; u32 pld[4] = {0, 0, 0, 0}; if ((mci_hw->bt_state != MCI_BT_AWAKE) || (mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) return; MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_REQ); pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_seq++; ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false); if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000)) { ath_dbg(common, MCI, "MCI BT_CAL_GRANT received\n"); } else { *is_reusable = false; ath_dbg(common, MCI, "MCI BT_CAL_GRANT not received\n"); } } void ar9003_mci_init_cal_done(struct ath_hw *ah) { struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; u32 pld[4] = {0, 0, 0, 0}; if ((mci_hw->bt_state != MCI_BT_AWAKE) || (mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) return; MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_DONE); pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_done++; ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false); } int ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf, u16 len, u32 sched_addr) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; mci->gpm_addr = gpm_addr; mci->gpm_buf = gpm_buf; mci->gpm_len = len; mci->sched_addr = sched_addr; return ar9003_mci_reset(ah, true, true, true); } EXPORT_SYMBOL(ar9003_mci_setup); void ar9003_mci_cleanup(struct ath_hw *ah) { /* Turn off MCI and Jupiter mode. */ REG_WRITE(ah, AR_BTCOEX_CTRL, 0x00); ar9003_mci_disable_interrupt(ah); } EXPORT_SYMBOL(ar9003_mci_cleanup); u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 value = 0, tsf; u8 query_type; switch (state_type) { case MCI_STATE_ENABLE: if (mci->ready) { value = REG_READ(ah, AR_BTCOEX_CTRL); if ((value == 0xdeadbeef) || (value == 0xffffffff)) value = 0; } value &= AR_BTCOEX_CTRL_MCI_MODE_EN; break; case MCI_STATE_INIT_GPM_OFFSET: value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); if (value < mci->gpm_len) mci->gpm_idx = value; else mci->gpm_idx = 0; break; case MCI_STATE_LAST_SCHD_MSG_OFFSET: value = MS(REG_READ(ah, AR_MCI_RX_STATUS), AR_MCI_RX_LAST_SCHD_MSG_INDEX); /* Make it in bytes */ value <<= 4; break; case MCI_STATE_REMOTE_SLEEP: value = MS(REG_READ(ah, AR_MCI_RX_STATUS), AR_MCI_RX_REMOTE_SLEEP) ? MCI_BT_SLEEP : MCI_BT_AWAKE; break; case MCI_STATE_SET_BT_AWAKE: mci->bt_state = MCI_BT_AWAKE; ar9003_mci_send_coex_version_query(ah, true); ar9003_mci_send_coex_wlan_channels(ah, true); if (mci->unhalt_bt_gpm) ar9003_mci_send_coex_halt_bt_gpm(ah, false, true); ar9003_mci_2g5g_switch(ah, false); break; case MCI_STATE_RESET_REQ_WAKE: ar9003_mci_reset_req_wakeup(ah); mci->update_2g5g = true; if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MASK) { /* Check if we still have control of the GPIOs */ if ((REG_READ(ah, AR_GLB_GPIO_CONTROL) & ATH_MCI_CONFIG_MCI_OBS_GPIO) != ATH_MCI_CONFIG_MCI_OBS_GPIO) { ar9003_mci_observation_set_up(ah); } } break; case MCI_STATE_SEND_WLAN_COEX_VERSION: ar9003_mci_send_coex_version_response(ah, true); break; case MCI_STATE_SEND_VERSION_QUERY: ar9003_mci_send_coex_version_query(ah, true); break; case MCI_STATE_SEND_STATUS_QUERY: query_type = MCI_GPM_COEX_QUERY_BT_TOPOLOGY; ar9003_mci_send_coex_bt_status_query(ah, true, query_type); break; case MCI_STATE_RECOVER_RX: tsf = ath9k_hw_gettsf32(ah); if ((tsf - mci->last_recovery) <= MCI_RECOVERY_DUR_TSF) { ath_dbg(ath9k_hw_common(ah), MCI, "(MCI) ignore Rx recovery\n"); break; } ath_dbg(ath9k_hw_common(ah), MCI, "(MCI) RECOVER RX\n"); mci->last_recovery = tsf; ar9003_mci_prep_interface(ah); mci->query_bt = true; mci->need_flush_btinfo = true; ar9003_mci_send_coex_wlan_channels(ah, true); ar9003_mci_2g5g_switch(ah, false); break; case MCI_STATE_NEED_FTP_STOMP: value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP); break; case MCI_STATE_NEED_FLUSH_BT_INFO: value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0; mci->need_flush_btinfo = false; break; case MCI_STATE_AIC_CAL: if (ath9k_hw_is_aic_enabled(ah)) value = ar9003_aic_calibration(ah); break; case MCI_STATE_AIC_START: if (ath9k_hw_is_aic_enabled(ah)) ar9003_aic_start_normal(ah); break; case MCI_STATE_AIC_CAL_RESET: if (ath9k_hw_is_aic_enabled(ah)) value = ar9003_aic_cal_reset(ah); break; case MCI_STATE_AIC_CAL_SINGLE: if (ath9k_hw_is_aic_enabled(ah)) value = ar9003_aic_calibration_single(ah); break; default: break; } return value; } EXPORT_SYMBOL(ar9003_mci_state); void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; ath_dbg(common, MCI, "Give LNA and SPDT control to BT\n"); ar9003_mci_send_lna_take(ah, true); udelay(50); REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL); mci->is_2g = false; mci->update_2g5g = true; ar9003_mci_send_2g5g_status(ah, true); /* Force another 2g5g update at next scanning */ mci->update_2g5g = true; } void ar9003_mci_set_power_awake(struct ath_hw *ah) { u32 btcoex_ctrl2, diag_sw; int i; u8 lna_ctrl, bt_sleep; for (i = 0; i < AH_WAIT_TIMEOUT; i++) { btcoex_ctrl2 = REG_READ(ah, AR_BTCOEX_CTRL2); if (btcoex_ctrl2 != 0xdeadbeef) break; udelay(AH_TIME_QUANTUM); } REG_WRITE(ah, AR_BTCOEX_CTRL2, (btcoex_ctrl2 | BIT(23))); for (i = 0; i < AH_WAIT_TIMEOUT; i++) { diag_sw = REG_READ(ah, AR_DIAG_SW); if (diag_sw != 0xdeadbeef) break; udelay(AH_TIME_QUANTUM); } REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18))); lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3; bt_sleep = MS(REG_READ(ah, AR_MCI_RX_STATUS), AR_MCI_RX_REMOTE_SLEEP); REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2); REG_WRITE(ah, AR_DIAG_SW, diag_sw); if (bt_sleep && (lna_ctrl == 2)) { REG_SET_BIT(ah, AR_BTCOEX_RC, 0x1); REG_CLR_BIT(ah, AR_BTCOEX_RC, 0x1); udelay(50); } } void ar9003_mci_check_gpm_offset(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 offset; /* * This should only be called before "MAC Warm Reset" or "MCI Reset Rx". */ offset = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); if (mci->gpm_idx == offset) return; ath_dbg(common, MCI, "GPM cached write pointer mismatch %d %d\n", mci->gpm_idx, offset); mci->query_bt = true; mci->need_flush_btinfo = true; mci->gpm_idx = 0; } u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, u32 *more) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 offset, more_gpm = 0, gpm_ptr; /* * This could be useful to avoid new GPM message interrupt which * may lead to spurious interrupt after power sleep, or multiple * entry of ath_mci_intr(). * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can * alleviate this effect, but clearing GPM RX interrupt bit is * safe, because whether this is called from hw or driver code * there must be an interrupt bit set/triggered initially */ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, AR_MCI_INTERRUPT_RX_MSG_GPM); gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); offset = gpm_ptr; if (!offset) offset = mci->gpm_len - 1; else if (offset >= mci->gpm_len) { if (offset != 0xFFFF) offset = 0; } else { offset--; } if ((offset == 0xFFFF) || (gpm_ptr == mci->gpm_idx)) { offset = MCI_GPM_INVALID; more_gpm = MCI_GPM_NOMORE; goto out; } for (;;) { u32 temp_index; /* skip reserved GPM if any */ if (offset != mci->gpm_idx) more_gpm = MCI_GPM_MORE; else more_gpm = MCI_GPM_NOMORE; temp_index = mci->gpm_idx; if (temp_index >= mci->gpm_len) temp_index = 0; mci->gpm_idx++; if (mci->gpm_idx >= mci->gpm_len) mci->gpm_idx = 0; if (ar9003_mci_is_gpm_valid(ah, temp_index)) { offset = temp_index; break; } if (more_gpm == MCI_GPM_NOMORE) { offset = MCI_GPM_INVALID; break; } } if (offset != MCI_GPM_INVALID) offset <<= 4; out: if (more) *more = more_gpm; return offset; } EXPORT_SYMBOL(ar9003_mci_get_next_gpm_offset); void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; mci->bt_ver_major = major; mci->bt_ver_minor = minor; mci->bt_version_known = true; ath_dbg(ath9k_hw_common(ah), MCI, "MCI BT version set: %d.%d\n", mci->bt_ver_major, mci->bt_ver_minor); } EXPORT_SYMBOL(ar9003_mci_set_bt_version); void ar9003_mci_send_wlan_channels(struct ath_hw *ah) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; mci->wlan_channels_update = true; ar9003_mci_send_coex_wlan_channels(ah, true); } EXPORT_SYMBOL(ar9003_mci_send_wlan_channels); u16 ar9003_mci_get_max_txpower(struct ath_hw *ah, u8 ctlmode) { if (!ah->btcoex_hw.mci.concur_tx) goto out; if (ctlmode == CTL_2GHT20) return ATH_BTCOEX_HT20_MAX_TXPOWER; else if (ctlmode == CTL_2GHT40) return ATH_BTCOEX_HT40_MAX_TXPOWER; out: return -1; }
gpl-2.0
Huexxx/diana
arch/x86/kernel/apic/probe_32.c
1120
7169
/* * Default generic APIC driver. This handles up to 8 CPUs. * * Copyright 2003 Andi Kleen, SuSE Labs. * Subject to the GNU Public License, v.2 * * Generic x86 APIC driver probe layer. */ #include <linux/threads.h> #include <linux/cpumask.h> #include <linux/module.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/errno.h> #include <asm/fixmap.h> #include <asm/mpspec.h> #include <asm/apicdef.h> #include <asm/apic.h> #include <asm/setup.h> #include <linux/smp.h> #include <asm/ipi.h> #include <linux/interrupt.h> #include <asm/acpi.h> #include <asm/e820.h> #ifdef CONFIG_HOTPLUG_CPU #define DEFAULT_SEND_IPI (1) #else #define DEFAULT_SEND_IPI (0) #endif int no_broadcast = DEFAULT_SEND_IPI; static __init int no_ipi_broadcast(char *str) { get_option(&str, &no_broadcast); pr_info("Using %s mode\n", no_broadcast ? "No IPI Broadcast" : "IPI Broadcast"); return 1; } __setup("no_ipi_broadcast=", no_ipi_broadcast); static int __init print_ipi_mode(void) { pr_info("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" : "Shortcut"); return 0; } late_initcall(print_ipi_mode); void __init default_setup_apic_routing(void) { int version = apic_version[boot_cpu_physical_apicid]; if (num_possible_cpus() > 8) { switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: if (!APIC_XAPIC(version)) { def_to_bigsmp = 0; break; } /* If P4 and above fall through */ case X86_VENDOR_AMD: def_to_bigsmp = 1; } } #ifdef CONFIG_X86_BIGSMP generic_bigsmp_probe(); #endif if (apic->setup_apic_routing) apic->setup_apic_routing(); } static void setup_apic_flat_routing(void) { #ifdef CONFIG_X86_IO_APIC printk(KERN_INFO "Enabling APIC mode: Flat. Using %d I/O APICs\n", nr_ioapics); #endif } static void default_vector_allocation_domain(int cpu, struct cpumask *retmask) { /* * Careful. Some cpus do not strictly honor the set of cpus * specified in the interrupt destination when using lowest * priority interrupt delivery mode. * * In particular there was a hyperthreading cpu observed to * deliver interrupts to the wrong hyperthread when only one * hyperthread was specified in the interrupt desitination. */ cpumask_clear(retmask); cpumask_bits(retmask)[0] = APIC_ALL_CPUS; } /* should be called last. */ static int probe_default(void) { return 1; } struct apic apic_default = { .name = "default", .probe = probe_default, .acpi_madt_oem_check = NULL, .apic_id_registered = default_apic_id_registered, .irq_delivery_mode = dest_LowestPrio, /* logical delivery broadcast to all CPUs: */ .irq_dest_mode = 1, .target_cpus = default_target_cpus, .disable_esr = 0, .dest_logical = APIC_DEST_LOGICAL, .check_apicid_used = default_check_apicid_used, .check_apicid_present = default_check_apicid_present, .vector_allocation_domain = default_vector_allocation_domain, .init_apic_ldr = default_init_apic_ldr, .ioapic_phys_id_map = default_ioapic_phys_id_map, .setup_apic_routing = setup_apic_flat_routing, .multi_timer_check = NULL, .apicid_to_node = default_apicid_to_node, .cpu_to_logical_apicid = default_cpu_to_logical_apicid, .cpu_present_to_apicid = default_cpu_present_to_apicid, .apicid_to_cpu_present = physid_set_mask_of_physid, .setup_portio_remap = NULL, .check_phys_apicid_present = default_check_phys_apicid_present, .enable_apic_mode = NULL, .phys_pkg_id = default_phys_pkg_id, .mps_oem_check = NULL, .get_apic_id = default_get_apic_id, .set_apic_id = NULL, .apic_id_mask = 0x0F << 24, .cpu_mask_to_apicid = default_cpu_mask_to_apicid, .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, .send_IPI_mask = default_send_IPI_mask_logical, .send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical, .send_IPI_allbutself = default_send_IPI_allbutself, .send_IPI_all = default_send_IPI_all, .send_IPI_self = default_send_IPI_self, .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, .wait_for_init_deassert = default_wait_for_init_deassert, .smp_callin_clear_local_apic = NULL, .inquire_remote_apic = default_inquire_remote_apic, .read = native_apic_mem_read, .write = native_apic_mem_write, .icr_read = native_apic_icr_read, .icr_write = native_apic_icr_write, .wait_icr_idle = native_apic_wait_icr_idle, .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, }; extern struct apic apic_numaq; extern struct apic apic_summit; extern struct apic apic_bigsmp; extern struct apic apic_es7000; extern struct apic apic_es7000_cluster; struct apic *apic = &apic_default; EXPORT_SYMBOL_GPL(apic); static struct apic *apic_probe[] __initdata = { #ifdef CONFIG_X86_NUMAQ &apic_numaq, #endif #ifdef CONFIG_X86_SUMMIT &apic_summit, #endif #ifdef CONFIG_X86_BIGSMP &apic_bigsmp, #endif #ifdef CONFIG_X86_ES7000 &apic_es7000, &apic_es7000_cluster, #endif &apic_default, /* must be last */ NULL, }; static int cmdline_apic __initdata; static int __init parse_apic(char *arg) { int i; if (!arg) return -EINVAL; for (i = 0; apic_probe[i]; i++) { if (!strcmp(apic_probe[i]->name, arg)) { apic = apic_probe[i]; cmdline_apic = 1; return 0; } } /* Parsed again by __setup for debug/verbose */ return 0; } early_param("apic", parse_apic); void __init generic_bigsmp_probe(void) { #ifdef CONFIG_X86_BIGSMP /* * This routine is used to switch to bigsmp mode when * - There is no apic= option specified by the user * - generic_apic_probe() has chosen apic_default as the sub_arch * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support */ if (!cmdline_apic && apic == &apic_default) { if (apic_bigsmp.probe()) { apic = &apic_bigsmp; printk(KERN_INFO "Overriding APIC driver with %s\n", apic->name); } } #endif } void __init generic_apic_probe(void) { if (!cmdline_apic) { int i; for (i = 0; apic_probe[i]; i++) { if (apic_probe[i]->probe()) { apic = apic_probe[i]; break; } } /* Not visible without early console */ if (!apic_probe[i]) panic("Didn't find an APIC driver"); } printk(KERN_INFO "Using APIC driver %s\n", apic->name); } /* These functions can switch the APIC even after the initial ->probe() */ int __init generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) { int i; for (i = 0; apic_probe[i]; ++i) { if (!apic_probe[i]->mps_oem_check) continue; if (!apic_probe[i]->mps_oem_check(mpc, oem, productid)) continue; if (!cmdline_apic) { apic = apic_probe[i]; printk(KERN_INFO "Switched to APIC driver `%s'.\n", apic->name); } return 1; } return 0; } int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { int i; for (i = 0; apic_probe[i]; ++i) { if (!apic_probe[i]->acpi_madt_oem_check) continue; if (!apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) continue; if (!cmdline_apic) { apic = apic_probe[i]; printk(KERN_INFO "Switched to APIC driver `%s'.\n", apic->name); } return 1; } return 0; }
gpl-2.0
morfic/dow-t959
arch/arm/kernel/machine_kexec.c
1376
2055
/* * machine_kexec.c - handle transition of Linux booting another kernel */ #include <linux/mm.h> #include <linux/kexec.h> #include <linux/delay.h> #include <linux/reboot.h> #include <linux/io.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/mach-types.h> extern const unsigned char relocate_new_kernel[]; extern const unsigned int relocate_new_kernel_size; extern void setup_mm_for_reboot(char mode); extern unsigned long kexec_start_address; extern unsigned long kexec_indirection_page; extern unsigned long kexec_mach_type; extern unsigned long kexec_boot_atags; /* * Provide a dummy crash_notes definition while crash dump arrives to arm. * This prevents breakage of crash_notes attribute in kernel/ksysfs.c. */ int machine_kexec_prepare(struct kimage *image) { return 0; } void machine_kexec_cleanup(struct kimage *image) { } void machine_shutdown(void) { } void machine_crash_shutdown(struct pt_regs *regs) { } void machine_kexec(struct kimage *image) { unsigned long page_list; unsigned long reboot_code_buffer_phys; void *reboot_code_buffer; page_list = image->head & PAGE_MASK; /* we need both effective and real address here */ reboot_code_buffer_phys = page_to_pfn(image->control_code_page) << PAGE_SHIFT; reboot_code_buffer = page_address(image->control_code_page); /* Prepare parameters for reboot_code_buffer*/ kexec_start_address = image->start; kexec_indirection_page = page_list; kexec_mach_type = machine_arch_type; kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; /* copy our kernel relocation code to the control code page */ memcpy(reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); flush_icache_range((unsigned long) reboot_code_buffer, (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); cpu_proc_fin(); setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/ cpu_reset(reboot_code_buffer_phys); }
gpl-2.0
PRJosh/kernel.org
drivers/clocksource/samsung_pwm_timer.c
1376
12879
/* * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * samsung - Common hr-timer support (s3c and s5p) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/clockchips.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/sched_clock.h> #include <clocksource/samsung_pwm.h> /* * Clocksource driver */ #define REG_TCFG0 0x00 #define REG_TCFG1 0x04 #define REG_TCON 0x08 #define REG_TINT_CSTAT 0x44 #define REG_TCNTB(chan) (0x0c + 12 * (chan)) #define REG_TCMPB(chan) (0x10 + 12 * (chan)) #define TCFG0_PRESCALER_MASK 0xff #define TCFG0_PRESCALER1_SHIFT 8 #define TCFG1_SHIFT(x) ((x) * 4) #define TCFG1_MUX_MASK 0xf /* * Each channel occupies 4 bits in TCON register, but there is a gap of 4 * bits (one channel) after channel 0, so channels have different numbering * when accessing TCON register. * * In addition, the location of autoreload bit for channel 4 (TCON channel 5) * in its set of bits is 2 as opposed to 3 for other channels. */ #define TCON_START(chan) (1 << (4 * (chan) + 0)) #define TCON_MANUALUPDATE(chan) (1 << (4 * (chan) + 1)) #define TCON_INVERT(chan) (1 << (4 * (chan) + 2)) #define _TCON_AUTORELOAD(chan) (1 << (4 * (chan) + 3)) #define _TCON_AUTORELOAD4(chan) (1 << (4 * (chan) + 2)) #define TCON_AUTORELOAD(chan) \ ((chan < 5) ? _TCON_AUTORELOAD(chan) : _TCON_AUTORELOAD4(chan)) DEFINE_SPINLOCK(samsung_pwm_lock); EXPORT_SYMBOL(samsung_pwm_lock); struct samsung_pwm_clocksource { void __iomem *base; void __iomem *source_reg; unsigned int irq[SAMSUNG_PWM_NUM]; struct samsung_pwm_variant variant; struct clk *timerclk; unsigned int event_id; unsigned int source_id; unsigned int tcnt_max; unsigned int tscaler_div; unsigned int tdiv; unsigned long clock_count_per_tick; }; static struct samsung_pwm_clocksource pwm; static void samsung_timer_set_prescale(unsigned int channel, u16 prescale) { unsigned long flags; u8 shift = 0; u32 reg; if (channel >= 2) shift = TCFG0_PRESCALER1_SHIFT; spin_lock_irqsave(&samsung_pwm_lock, flags); reg = readl(pwm.base + REG_TCFG0); reg &= ~(TCFG0_PRESCALER_MASK << shift); reg |= (prescale - 1) << shift; writel(reg, pwm.base + REG_TCFG0); spin_unlock_irqrestore(&samsung_pwm_lock, flags); } static void samsung_timer_set_divisor(unsigned int channel, u8 divisor) { u8 shift = TCFG1_SHIFT(channel); unsigned long flags; u32 reg; u8 bits; bits = (fls(divisor) - 1) - pwm.variant.div_base; spin_lock_irqsave(&samsung_pwm_lock, flags); reg = readl(pwm.base + REG_TCFG1); reg &= ~(TCFG1_MUX_MASK << shift); reg |= bits << shift; writel(reg, pwm.base + REG_TCFG1); spin_unlock_irqrestore(&samsung_pwm_lock, flags); } static void samsung_time_stop(unsigned int channel) { unsigned long tcon; unsigned long flags; if (channel > 0) ++channel; spin_lock_irqsave(&samsung_pwm_lock, flags); tcon = __raw_readl(pwm.base + REG_TCON); tcon &= ~TCON_START(channel); __raw_writel(tcon, pwm.base + REG_TCON); spin_unlock_irqrestore(&samsung_pwm_lock, flags); } static void samsung_time_setup(unsigned int channel, unsigned long tcnt) { unsigned long tcon; unsigned long flags; unsigned int tcon_chan = channel; if (tcon_chan > 0) ++tcon_chan; spin_lock_irqsave(&samsung_pwm_lock, flags); tcon = __raw_readl(pwm.base + REG_TCON); tcon &= ~(TCON_START(tcon_chan) | TCON_AUTORELOAD(tcon_chan)); tcon |= TCON_MANUALUPDATE(tcon_chan); __raw_writel(tcnt, pwm.base + REG_TCNTB(channel)); __raw_writel(tcnt, pwm.base + REG_TCMPB(channel)); __raw_writel(tcon, pwm.base + REG_TCON); spin_unlock_irqrestore(&samsung_pwm_lock, flags); } static void samsung_time_start(unsigned int channel, bool periodic) { unsigned long tcon; unsigned long flags; if (channel > 0) ++channel; spin_lock_irqsave(&samsung_pwm_lock, flags); tcon = __raw_readl(pwm.base + REG_TCON); tcon &= ~TCON_MANUALUPDATE(channel); tcon |= TCON_START(channel); if (periodic) tcon |= TCON_AUTORELOAD(channel); else tcon &= ~TCON_AUTORELOAD(channel); __raw_writel(tcon, pwm.base + REG_TCON); spin_unlock_irqrestore(&samsung_pwm_lock, flags); } static int samsung_set_next_event(unsigned long cycles, struct clock_event_device *evt) { /* * This check is needed to account for internal rounding * errors inside clockevents core, which might result in * passing cycles = 0, which in turn would not generate any * timer interrupt and hang the system. * * Another solution would be to set up the clockevent device * with min_delta = 2, but this would unnecessarily increase * the minimum sleep period. */ if (!cycles) cycles = 1; samsung_time_setup(pwm.event_id, cycles); samsung_time_start(pwm.event_id, false); return 0; } static void samsung_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { samsung_time_stop(pwm.event_id); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: samsung_time_setup(pwm.event_id, pwm.clock_count_per_tick - 1); samsung_time_start(pwm.event_id, true); break; case CLOCK_EVT_MODE_ONESHOT: break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_RESUME: break; } } static void samsung_clockevent_resume(struct clock_event_device *cev) { samsung_timer_set_prescale(pwm.event_id, pwm.tscaler_div); samsung_timer_set_divisor(pwm.event_id, pwm.tdiv); if (pwm.variant.has_tint_cstat) { u32 mask = (1 << pwm.event_id); writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT); } } static struct clock_event_device time_event_device = { .name = "samsung_event_timer", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .rating = 200, .set_next_event = samsung_set_next_event, .set_mode = samsung_set_mode, .resume = samsung_clockevent_resume, }; static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id) { struct clock_event_device *evt = dev_id; if (pwm.variant.has_tint_cstat) { u32 mask = (1 << pwm.event_id); writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT); } evt->event_handler(evt); return IRQ_HANDLED; } static struct irqaction samsung_clock_event_irq = { .name = "samsung_time_irq", .flags = IRQF_TIMER | IRQF_IRQPOLL, .handler = samsung_clock_event_isr, .dev_id = &time_event_device, }; static void __init samsung_clockevent_init(void) { unsigned long pclk; unsigned long clock_rate; unsigned int irq_number; pclk = clk_get_rate(pwm.timerclk); samsung_timer_set_prescale(pwm.event_id, pwm.tscaler_div); samsung_timer_set_divisor(pwm.event_id, pwm.tdiv); clock_rate = pclk / (pwm.tscaler_div * pwm.tdiv); pwm.clock_count_per_tick = clock_rate / HZ; time_event_device.cpumask = cpumask_of(0); clockevents_config_and_register(&time_event_device, clock_rate, 1, pwm.tcnt_max); irq_number = pwm.irq[pwm.event_id]; setup_irq(irq_number, &samsung_clock_event_irq); if (pwm.variant.has_tint_cstat) { u32 mask = (1 << pwm.event_id); writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT); } } static void samsung_clocksource_suspend(struct clocksource *cs) { samsung_time_stop(pwm.source_id); } static void samsung_clocksource_resume(struct clocksource *cs) { samsung_timer_set_prescale(pwm.source_id, pwm.tscaler_div); samsung_timer_set_divisor(pwm.source_id, pwm.tdiv); samsung_time_setup(pwm.source_id, pwm.tcnt_max); samsung_time_start(pwm.source_id, true); } static cycle_t samsung_clocksource_read(struct clocksource *c) { return ~readl_relaxed(pwm.source_reg); } static struct clocksource samsung_clocksource = { .name = "samsung_clocksource_timer", .rating = 250, .read = samsung_clocksource_read, .suspend = samsung_clocksource_suspend, .resume = samsung_clocksource_resume, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; /* * Override the global weak sched_clock symbol with this * local implementation which uses the clocksource to get some * better resolution when scheduling the kernel. We accept that * this wraps around for now, since it is just a relative time * stamp. (Inspired by U300 implementation.) */ static u64 notrace samsung_read_sched_clock(void) { return samsung_clocksource_read(NULL); } static void __init samsung_clocksource_init(void) { unsigned long pclk; unsigned long clock_rate; int ret; pclk = clk_get_rate(pwm.timerclk); samsung_timer_set_prescale(pwm.source_id, pwm.tscaler_div); samsung_timer_set_divisor(pwm.source_id, pwm.tdiv); clock_rate = pclk / (pwm.tscaler_div * pwm.tdiv); samsung_time_setup(pwm.source_id, pwm.tcnt_max); samsung_time_start(pwm.source_id, true); if (pwm.source_id == 4) pwm.source_reg = pwm.base + 0x40; else pwm.source_reg = pwm.base + pwm.source_id * 0x0c + 0x14; sched_clock_register(samsung_read_sched_clock, pwm.variant.bits, clock_rate); samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits); ret = clocksource_register_hz(&samsung_clocksource, clock_rate); if (ret) panic("samsung_clocksource_timer: can't register clocksource\n"); } static void __init samsung_timer_resources(void) { clk_prepare_enable(pwm.timerclk); pwm.tcnt_max = (1UL << pwm.variant.bits) - 1; if (pwm.variant.bits == 16) { pwm.tscaler_div = 25; pwm.tdiv = 2; } else { pwm.tscaler_div = 2; pwm.tdiv = 1; } } /* * PWM master driver */ static void __init _samsung_pwm_clocksource_init(void) { u8 mask; int channel; mask = ~pwm.variant.output_mask & ((1 << SAMSUNG_PWM_NUM) - 1); channel = fls(mask) - 1; if (channel < 0) panic("failed to find PWM channel for clocksource"); pwm.source_id = channel; mask &= ~(1 << channel); channel = fls(mask) - 1; if (channel < 0) panic("failed to find PWM channel for clock event"); pwm.event_id = channel; samsung_timer_resources(); samsung_clockevent_init(); samsung_clocksource_init(); } void __init samsung_pwm_clocksource_init(void __iomem *base, unsigned int *irqs, struct samsung_pwm_variant *variant) { pwm.base = base; memcpy(&pwm.variant, variant, sizeof(pwm.variant)); memcpy(pwm.irq, irqs, SAMSUNG_PWM_NUM * sizeof(*irqs)); pwm.timerclk = clk_get(NULL, "timers"); if (IS_ERR(pwm.timerclk)) panic("failed to get timers clock for timer"); _samsung_pwm_clocksource_init(); } #ifdef CONFIG_CLKSRC_OF static void __init samsung_pwm_alloc(struct device_node *np, const struct samsung_pwm_variant *variant) { struct property *prop; const __be32 *cur; u32 val; int i; memcpy(&pwm.variant, variant, sizeof(pwm.variant)); for (i = 0; i < SAMSUNG_PWM_NUM; ++i) pwm.irq[i] = irq_of_parse_and_map(np, i); of_property_for_each_u32(np, "samsung,pwm-outputs", prop, cur, val) { if (val >= SAMSUNG_PWM_NUM) { pr_warning("%s: invalid channel index in samsung,pwm-outputs property\n", __func__); continue; } pwm.variant.output_mask |= 1 << val; } pwm.base = of_iomap(np, 0); if (!pwm.base) { pr_err("%s: failed to map PWM registers\n", __func__); return; } pwm.timerclk = of_clk_get_by_name(np, "timers"); if (IS_ERR(pwm.timerclk)) panic("failed to get timers clock for timer"); _samsung_pwm_clocksource_init(); } static const struct samsung_pwm_variant s3c24xx_variant = { .bits = 16, .div_base = 1, .has_tint_cstat = false, .tclk_mask = (1 << 4), }; static void __init s3c2410_pwm_clocksource_init(struct device_node *np) { samsung_pwm_alloc(np, &s3c24xx_variant); } CLOCKSOURCE_OF_DECLARE(s3c2410_pwm, "samsung,s3c2410-pwm", s3c2410_pwm_clocksource_init); static const struct samsung_pwm_variant s3c64xx_variant = { .bits = 32, .div_base = 0, .has_tint_cstat = true, .tclk_mask = (1 << 7) | (1 << 6) | (1 << 5), }; static void __init s3c64xx_pwm_clocksource_init(struct device_node *np) { samsung_pwm_alloc(np, &s3c64xx_variant); } CLOCKSOURCE_OF_DECLARE(s3c6400_pwm, "samsung,s3c6400-pwm", s3c64xx_pwm_clocksource_init); static const struct samsung_pwm_variant s5p64x0_variant = { .bits = 32, .div_base = 0, .has_tint_cstat = true, .tclk_mask = 0, }; static void __init s5p64x0_pwm_clocksource_init(struct device_node *np) { samsung_pwm_alloc(np, &s5p64x0_variant); } CLOCKSOURCE_OF_DECLARE(s5p6440_pwm, "samsung,s5p6440-pwm", s5p64x0_pwm_clocksource_init); static const struct samsung_pwm_variant s5p_variant = { .bits = 32, .div_base = 0, .has_tint_cstat = true, .tclk_mask = (1 << 5), }; static void __init s5p_pwm_clocksource_init(struct device_node *np) { samsung_pwm_alloc(np, &s5p_variant); } CLOCKSOURCE_OF_DECLARE(s5pc100_pwm, "samsung,s5pc100-pwm", s5p_pwm_clocksource_init); #endif
gpl-2.0
malsony/linux
drivers/media/i2c/saa7127.c
1632
26094
/* * saa7127 - Philips SAA7127/SAA7129 video encoder driver * * Copyright (C) 2003 Roy Bulter <rbulter@hetnet.nl> * * Based on SAA7126 video encoder driver by Gillem & Andreas Oberritter * * Copyright (C) 2000-2001 Gillem <htoa@gmx.net> * Copyright (C) 2002 Andreas Oberritter <obi@saftware.de> * * Based on Stadis 4:2:2 MPEG-2 Decoder Driver by Nathan Laredo * * Copyright (C) 1999 Nathan Laredo <laredo@gnu.org> * * This driver is designed for the Hauppauge 250/350 Linux driver * from the ivtv Project * * Copyright (C) 2003 Kevin Thayer <nufan_wfk@yahoo.com> * * Dual output support: * Copyright (C) 2004 Eric Varsanyi * * NTSC Tuning and 7.5 IRE Setup * Copyright (C) 2004 Chris Kennedy <c@groovy.org> * * VBI additions & cleanup: * Copyright (C) 2004, 2005 Hans Verkuil <hverkuil@xs4all.nl> * * Note: the saa7126 is identical to the saa7127, and the saa7128 is * identical to the saa7129, except that the saa7126 and saa7128 have * macrovision anti-taping support. This driver will almost certainly * work fine for those chips, except of course for the missing anti-taping * support. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/saa7127.h> static int debug; static int test_image; MODULE_DESCRIPTION("Philips SAA7127/9 video encoder driver"); MODULE_AUTHOR("Kevin Thayer, Chris Kennedy, Hans Verkuil"); MODULE_LICENSE("GPL"); module_param(debug, int, 0644); module_param(test_image, int, 0644); MODULE_PARM_DESC(debug, "debug level (0-2)"); MODULE_PARM_DESC(test_image, "test_image (0-1)"); /* * SAA7127 registers */ #define SAA7127_REG_STATUS 0x00 #define SAA7127_REG_WIDESCREEN_CONFIG 0x26 #define SAA7127_REG_WIDESCREEN_ENABLE 0x27 #define SAA7127_REG_BURST_START 0x28 #define SAA7127_REG_BURST_END 0x29 #define SAA7127_REG_COPYGEN_0 0x2a #define SAA7127_REG_COPYGEN_1 0x2b #define SAA7127_REG_COPYGEN_2 0x2c #define SAA7127_REG_OUTPUT_PORT_CONTROL 0x2d #define SAA7127_REG_GAIN_LUMINANCE_RGB 0x38 #define SAA7127_REG_GAIN_COLORDIFF_RGB 0x39 #define SAA7127_REG_INPUT_PORT_CONTROL_1 0x3a #define SAA7129_REG_FADE_KEY_COL2 0x4f #define SAA7127_REG_CHROMA_PHASE 0x5a #define SAA7127_REG_GAINU 0x5b #define SAA7127_REG_GAINV 0x5c #define SAA7127_REG_BLACK_LEVEL 0x5d #define SAA7127_REG_BLANKING_LEVEL 0x5e #define SAA7127_REG_VBI_BLANKING 0x5f #define SAA7127_REG_DAC_CONTROL 0x61 #define SAA7127_REG_BURST_AMP 0x62 #define SAA7127_REG_SUBC3 0x63 #define SAA7127_REG_SUBC2 0x64 #define SAA7127_REG_SUBC1 0x65 #define SAA7127_REG_SUBC0 0x66 #define SAA7127_REG_LINE_21_ODD_0 0x67 #define SAA7127_REG_LINE_21_ODD_1 0x68 #define SAA7127_REG_LINE_21_EVEN_0 0x69 #define SAA7127_REG_LINE_21_EVEN_1 0x6a #define SAA7127_REG_RCV_PORT_CONTROL 0x6b #define SAA7127_REG_VTRIG 0x6c #define SAA7127_REG_HTRIG_HI 0x6d #define SAA7127_REG_MULTI 0x6e #define SAA7127_REG_CLOSED_CAPTION 0x6f #define SAA7127_REG_RCV2_OUTPUT_START 0x70 #define SAA7127_REG_RCV2_OUTPUT_END 0x71 #define SAA7127_REG_RCV2_OUTPUT_MSBS 0x72 #define SAA7127_REG_TTX_REQUEST_H_START 0x73 #define SAA7127_REG_TTX_REQUEST_H_DELAY_LENGTH 0x74 #define SAA7127_REG_CSYNC_ADVANCE_VSYNC_SHIFT 0x75 #define SAA7127_REG_TTX_ODD_REQ_VERT_START 0x76 #define SAA7127_REG_TTX_ODD_REQ_VERT_END 0x77 #define SAA7127_REG_TTX_EVEN_REQ_VERT_START 0x78 #define SAA7127_REG_TTX_EVEN_REQ_VERT_END 0x79 #define SAA7127_REG_FIRST_ACTIVE 0x7a #define SAA7127_REG_LAST_ACTIVE 0x7b #define SAA7127_REG_MSB_VERTICAL 0x7c #define SAA7127_REG_DISABLE_TTX_LINE_LO_0 0x7e #define SAA7127_REG_DISABLE_TTX_LINE_LO_1 0x7f /* ********************************************************************** * * Arrays with configuration parameters for the SAA7127 * ********************************************************************** */ struct i2c_reg_value { unsigned char reg; unsigned char value; }; static const struct i2c_reg_value saa7129_init_config_extra[] = { { SAA7127_REG_OUTPUT_PORT_CONTROL, 0x38 }, { SAA7127_REG_VTRIG, 0xfa }, { 0, 0 } }; static const struct i2c_reg_value saa7127_init_config_common[] = { { SAA7127_REG_WIDESCREEN_CONFIG, 0x0d }, { SAA7127_REG_WIDESCREEN_ENABLE, 0x00 }, { SAA7127_REG_COPYGEN_0, 0x77 }, { SAA7127_REG_COPYGEN_1, 0x41 }, { SAA7127_REG_COPYGEN_2, 0x00 }, /* Macrovision enable/disable */ { SAA7127_REG_OUTPUT_PORT_CONTROL, 0xbf }, { SAA7127_REG_GAIN_LUMINANCE_RGB, 0x00 }, { SAA7127_REG_GAIN_COLORDIFF_RGB, 0x00 }, { SAA7127_REG_INPUT_PORT_CONTROL_1, 0x80 }, /* for color bars */ { SAA7127_REG_LINE_21_ODD_0, 0x77 }, { SAA7127_REG_LINE_21_ODD_1, 0x41 }, { SAA7127_REG_LINE_21_EVEN_0, 0x88 }, { SAA7127_REG_LINE_21_EVEN_1, 0x41 }, { SAA7127_REG_RCV_PORT_CONTROL, 0x12 }, { SAA7127_REG_VTRIG, 0xf9 }, { SAA7127_REG_HTRIG_HI, 0x00 }, { SAA7127_REG_RCV2_OUTPUT_START, 0x41 }, { SAA7127_REG_RCV2_OUTPUT_END, 0xc3 }, { SAA7127_REG_RCV2_OUTPUT_MSBS, 0x00 }, { SAA7127_REG_TTX_REQUEST_H_START, 0x3e }, { SAA7127_REG_TTX_REQUEST_H_DELAY_LENGTH, 0xb8 }, { SAA7127_REG_CSYNC_ADVANCE_VSYNC_SHIFT, 0x03 }, { SAA7127_REG_TTX_ODD_REQ_VERT_START, 0x15 }, { SAA7127_REG_TTX_ODD_REQ_VERT_END, 0x16 }, { SAA7127_REG_TTX_EVEN_REQ_VERT_START, 0x15 }, { SAA7127_REG_TTX_EVEN_REQ_VERT_END, 0x16 }, { SAA7127_REG_FIRST_ACTIVE, 0x1a }, { SAA7127_REG_LAST_ACTIVE, 0x01 }, { SAA7127_REG_MSB_VERTICAL, 0xc0 }, { SAA7127_REG_DISABLE_TTX_LINE_LO_0, 0x00 }, { SAA7127_REG_DISABLE_TTX_LINE_LO_1, 0x00 }, { 0, 0 } }; #define SAA7127_60HZ_DAC_CONTROL 0x15 static const struct i2c_reg_value saa7127_init_config_60hz[] = { { SAA7127_REG_BURST_START, 0x19 }, /* BURST_END is also used as a chip ID in saa7127_probe */ { SAA7127_REG_BURST_END, 0x1d }, { SAA7127_REG_CHROMA_PHASE, 0xa3 }, { SAA7127_REG_GAINU, 0x98 }, { SAA7127_REG_GAINV, 0xd3 }, { SAA7127_REG_BLACK_LEVEL, 0x39 }, { SAA7127_REG_BLANKING_LEVEL, 0x2e }, { SAA7127_REG_VBI_BLANKING, 0x2e }, { SAA7127_REG_DAC_CONTROL, 0x15 }, { SAA7127_REG_BURST_AMP, 0x4d }, { SAA7127_REG_SUBC3, 0x1f }, { SAA7127_REG_SUBC2, 0x7c }, { SAA7127_REG_SUBC1, 0xf0 }, { SAA7127_REG_SUBC0, 0x21 }, { SAA7127_REG_MULTI, 0x90 }, { SAA7127_REG_CLOSED_CAPTION, 0x11 }, { 0, 0 } }; #define SAA7127_50HZ_PAL_DAC_CONTROL 0x02 static struct i2c_reg_value saa7127_init_config_50hz_pal[] = { { SAA7127_REG_BURST_START, 0x21 }, /* BURST_END is also used as a chip ID in saa7127_probe */ { SAA7127_REG_BURST_END, 0x1d }, { SAA7127_REG_CHROMA_PHASE, 0x3f }, { SAA7127_REG_GAINU, 0x7d }, { SAA7127_REG_GAINV, 0xaf }, { SAA7127_REG_BLACK_LEVEL, 0x33 }, { SAA7127_REG_BLANKING_LEVEL, 0x35 }, { SAA7127_REG_VBI_BLANKING, 0x35 }, { SAA7127_REG_DAC_CONTROL, 0x02 }, { SAA7127_REG_BURST_AMP, 0x2f }, { SAA7127_REG_SUBC3, 0xcb }, { SAA7127_REG_SUBC2, 0x8a }, { SAA7127_REG_SUBC1, 0x09 }, { SAA7127_REG_SUBC0, 0x2a }, { SAA7127_REG_MULTI, 0xa0 }, { SAA7127_REG_CLOSED_CAPTION, 0x00 }, { 0, 0 } }; #define SAA7127_50HZ_SECAM_DAC_CONTROL 0x08 static struct i2c_reg_value saa7127_init_config_50hz_secam[] = { { SAA7127_REG_BURST_START, 0x21 }, /* BURST_END is also used as a chip ID in saa7127_probe */ { SAA7127_REG_BURST_END, 0x1d }, { SAA7127_REG_CHROMA_PHASE, 0x3f }, { SAA7127_REG_GAINU, 0x6a }, { SAA7127_REG_GAINV, 0x81 }, { SAA7127_REG_BLACK_LEVEL, 0x33 }, { SAA7127_REG_BLANKING_LEVEL, 0x35 }, { SAA7127_REG_VBI_BLANKING, 0x35 }, { SAA7127_REG_DAC_CONTROL, 0x08 }, { SAA7127_REG_BURST_AMP, 0x2f }, { SAA7127_REG_SUBC3, 0xb2 }, { SAA7127_REG_SUBC2, 0x3b }, { SAA7127_REG_SUBC1, 0xa3 }, { SAA7127_REG_SUBC0, 0x28 }, { SAA7127_REG_MULTI, 0x90 }, { SAA7127_REG_CLOSED_CAPTION, 0x00 }, { 0, 0 } }; /* ********************************************************************** * * Encoder Struct, holds the configuration state of the encoder * ********************************************************************** */ enum saa712x_model { SAA7127, SAA7129, }; struct saa7127_state { struct v4l2_subdev sd; v4l2_std_id std; enum saa712x_model ident; enum saa7127_input_type input_type; enum saa7127_output_type output_type; int video_enable; int wss_enable; u16 wss_mode; int cc_enable; u16 cc_data; int xds_enable; u16 xds_data; int vps_enable; u8 vps_data[5]; u8 reg_2d; u8 reg_3a; u8 reg_3a_cb; /* colorbar bit */ u8 reg_61; }; static inline struct saa7127_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct saa7127_state, sd); } static const char * const output_strs[] = { "S-Video + Composite", "Composite", "S-Video", "RGB", "YUV C", "YUV V" }; static const char * const wss_strs[] = { "invalid", "letterbox 14:9 center", "letterbox 14:9 top", "invalid", "letterbox 16:9 top", "invalid", "invalid", "16:9 full format anamorphic", "4:3 full format", "invalid", "invalid", "letterbox 16:9 center", "invalid", "letterbox >16:9 center", "14:9 full format center", "invalid", }; /* ----------------------------------------------------------------------- */ static int saa7127_read(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_read_byte_data(client, reg); } /* ----------------------------------------------------------------------- */ static int saa7127_write(struct v4l2_subdev *sd, u8 reg, u8 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); int i; for (i = 0; i < 3; i++) { if (i2c_smbus_write_byte_data(client, reg, val) == 0) return 0; } v4l2_err(sd, "I2C Write Problem\n"); return -1; } /* ----------------------------------------------------------------------- */ static int saa7127_write_inittab(struct v4l2_subdev *sd, const struct i2c_reg_value *regs) { while (regs->reg != 0) { saa7127_write(sd, regs->reg, regs->value); regs++; } return 0; } /* ----------------------------------------------------------------------- */ static int saa7127_set_vps(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data) { struct saa7127_state *state = to_state(sd); int enable = (data->line != 0); if (enable && (data->field != 0 || data->line != 16)) return -EINVAL; if (state->vps_enable != enable) { v4l2_dbg(1, debug, sd, "Turn VPS Signal %s\n", enable ? "on" : "off"); saa7127_write(sd, 0x54, enable << 7); state->vps_enable = enable; } if (!enable) return 0; state->vps_data[0] = data->data[2]; state->vps_data[1] = data->data[8]; state->vps_data[2] = data->data[9]; state->vps_data[3] = data->data[10]; state->vps_data[4] = data->data[11]; v4l2_dbg(1, debug, sd, "Set VPS data %*ph\n", 5, state->vps_data); saa7127_write(sd, 0x55, state->vps_data[0]); saa7127_write(sd, 0x56, state->vps_data[1]); saa7127_write(sd, 0x57, state->vps_data[2]); saa7127_write(sd, 0x58, state->vps_data[3]); saa7127_write(sd, 0x59, state->vps_data[4]); return 0; } /* ----------------------------------------------------------------------- */ static int saa7127_set_cc(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data) { struct saa7127_state *state = to_state(sd); u16 cc = data->data[1] << 8 | data->data[0]; int enable = (data->line != 0); if (enable && (data->field != 0 || data->line != 21)) return -EINVAL; if (state->cc_enable != enable) { v4l2_dbg(1, debug, sd, "Turn CC %s\n", enable ? "on" : "off"); saa7127_write(sd, SAA7127_REG_CLOSED_CAPTION, (state->xds_enable << 7) | (enable << 6) | 0x11); state->cc_enable = enable; } if (!enable) return 0; v4l2_dbg(2, debug, sd, "CC data: %04x\n", cc); saa7127_write(sd, SAA7127_REG_LINE_21_ODD_0, cc & 0xff); saa7127_write(sd, SAA7127_REG_LINE_21_ODD_1, cc >> 8); state->cc_data = cc; return 0; } /* ----------------------------------------------------------------------- */ static int saa7127_set_xds(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data) { struct saa7127_state *state = to_state(sd); u16 xds = data->data[1] << 8 | data->data[0]; int enable = (data->line != 0); if (enable && (data->field != 1 || data->line != 21)) return -EINVAL; if (state->xds_enable != enable) { v4l2_dbg(1, debug, sd, "Turn XDS %s\n", enable ? "on" : "off"); saa7127_write(sd, SAA7127_REG_CLOSED_CAPTION, (enable << 7) | (state->cc_enable << 6) | 0x11); state->xds_enable = enable; } if (!enable) return 0; v4l2_dbg(2, debug, sd, "XDS data: %04x\n", xds); saa7127_write(sd, SAA7127_REG_LINE_21_EVEN_0, xds & 0xff); saa7127_write(sd, SAA7127_REG_LINE_21_EVEN_1, xds >> 8); state->xds_data = xds; return 0; } /* ----------------------------------------------------------------------- */ static int saa7127_set_wss(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data) { struct saa7127_state *state = to_state(sd); int enable = (data->line != 0); if (enable && (data->field != 0 || data->line != 23)) return -EINVAL; if (state->wss_enable != enable) { v4l2_dbg(1, debug, sd, "Turn WSS %s\n", enable ? "on" : "off"); saa7127_write(sd, 0x27, enable << 7); state->wss_enable = enable; } if (!enable) return 0; saa7127_write(sd, 0x26, data->data[0]); saa7127_write(sd, 0x27, 0x80 | (data->data[1] & 0x3f)); v4l2_dbg(1, debug, sd, "WSS mode: %s\n", wss_strs[data->data[0] & 0xf]); state->wss_mode = (data->data[1] & 0x3f) << 8 | data->data[0]; return 0; } /* ----------------------------------------------------------------------- */ static int saa7127_set_video_enable(struct v4l2_subdev *sd, int enable) { struct saa7127_state *state = to_state(sd); if (enable) { v4l2_dbg(1, debug, sd, "Enable Video Output\n"); saa7127_write(sd, 0x2d, state->reg_2d); saa7127_write(sd, 0x61, state->reg_61); } else { v4l2_dbg(1, debug, sd, "Disable Video Output\n"); saa7127_write(sd, 0x2d, (state->reg_2d & 0xf0)); saa7127_write(sd, 0x61, (state->reg_61 | 0xc0)); } state->video_enable = enable; return 0; } /* ----------------------------------------------------------------------- */ static int saa7127_set_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct saa7127_state *state = to_state(sd); const struct i2c_reg_value *inittab; if (std & V4L2_STD_525_60) { v4l2_dbg(1, debug, sd, "Selecting 60 Hz video Standard\n"); inittab = saa7127_init_config_60hz; state->reg_61 = SAA7127_60HZ_DAC_CONTROL; } else if (state->ident == SAA7129 && (std & V4L2_STD_SECAM) && !(std & (V4L2_STD_625_50 & ~V4L2_STD_SECAM))) { /* If and only if SECAM, with a SAA712[89] */ v4l2_dbg(1, debug, sd, "Selecting 50 Hz SECAM video Standard\n"); inittab = saa7127_init_config_50hz_secam; state->reg_61 = SAA7127_50HZ_SECAM_DAC_CONTROL; } else { v4l2_dbg(1, debug, sd, "Selecting 50 Hz PAL video Standard\n"); inittab = saa7127_init_config_50hz_pal; state->reg_61 = SAA7127_50HZ_PAL_DAC_CONTROL; } /* Write Table */ saa7127_write_inittab(sd, inittab); state->std = std; return 0; } /* ----------------------------------------------------------------------- */ static int saa7127_set_output_type(struct v4l2_subdev *sd, int output) { struct saa7127_state *state = to_state(sd); switch (output) { case SAA7127_OUTPUT_TYPE_RGB: state->reg_2d = 0x0f; /* RGB + CVBS (for sync) */ state->reg_3a = 0x13; /* by default switch YUV to RGB-matrix on */ break; case SAA7127_OUTPUT_TYPE_COMPOSITE: if (state->ident == SAA7129) state->reg_2d = 0x20; /* CVBS only */ else state->reg_2d = 0x08; /* 00001000 CVBS only, RGB DAC's off (high impedance mode) */ state->reg_3a = 0x13; /* by default switch YUV to RGB-matrix on */ break; case SAA7127_OUTPUT_TYPE_SVIDEO: if (state->ident == SAA7129) state->reg_2d = 0x18; /* Y + C */ else state->reg_2d = 0xff; /*11111111 croma -> R, luma -> CVBS + G + B */ state->reg_3a = 0x13; /* by default switch YUV to RGB-matrix on */ break; case SAA7127_OUTPUT_TYPE_YUV_V: state->reg_2d = 0x4f; /* reg 2D = 01001111, all DAC's on, RGB + VBS */ state->reg_3a = 0x0b; /* reg 3A = 00001011, bypass RGB-matrix */ break; case SAA7127_OUTPUT_TYPE_YUV_C: state->reg_2d = 0x0f; /* reg 2D = 00001111, all DAC's on, RGB + CVBS */ state->reg_3a = 0x0b; /* reg 3A = 00001011, bypass RGB-matrix */ break; case SAA7127_OUTPUT_TYPE_BOTH: if (state->ident == SAA7129) state->reg_2d = 0x38; else state->reg_2d = 0xbf; state->reg_3a = 0x13; /* by default switch YUV to RGB-matrix on */ break; default: return -EINVAL; } v4l2_dbg(1, debug, sd, "Selecting %s output type\n", output_strs[output]); /* Configure Encoder */ saa7127_write(sd, 0x2d, state->reg_2d); saa7127_write(sd, 0x3a, state->reg_3a | state->reg_3a_cb); state->output_type = output; return 0; } /* ----------------------------------------------------------------------- */ static int saa7127_set_input_type(struct v4l2_subdev *sd, int input) { struct saa7127_state *state = to_state(sd); switch (input) { case SAA7127_INPUT_TYPE_NORMAL: /* avia */ v4l2_dbg(1, debug, sd, "Selecting Normal Encoder Input\n"); state->reg_3a_cb = 0; break; case SAA7127_INPUT_TYPE_TEST_IMAGE: /* color bar */ v4l2_dbg(1, debug, sd, "Selecting Color Bar generator\n"); state->reg_3a_cb = 0x80; break; default: return -EINVAL; } saa7127_write(sd, 0x3a, state->reg_3a | state->reg_3a_cb); state->input_type = input; return 0; } /* ----------------------------------------------------------------------- */ static int saa7127_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std) { struct saa7127_state *state = to_state(sd); if (state->std == std) return 0; return saa7127_set_std(sd, std); } static int saa7127_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct saa7127_state *state = to_state(sd); int rc = 0; if (state->input_type != input) rc = saa7127_set_input_type(sd, input); if (rc == 0 && state->output_type != output) rc = saa7127_set_output_type(sd, output); return rc; } static int saa7127_s_stream(struct v4l2_subdev *sd, int enable) { struct saa7127_state *state = to_state(sd); if (state->video_enable == enable) return 0; return saa7127_set_video_enable(sd, enable); } static int saa7127_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt) { struct saa7127_state *state = to_state(sd); memset(fmt->service_lines, 0, sizeof(fmt->service_lines)); if (state->vps_enable) fmt->service_lines[0][16] = V4L2_SLICED_VPS; if (state->wss_enable) fmt->service_lines[0][23] = V4L2_SLICED_WSS_625; if (state->cc_enable) { fmt->service_lines[0][21] = V4L2_SLICED_CAPTION_525; fmt->service_lines[1][21] = V4L2_SLICED_CAPTION_525; } fmt->service_set = (state->vps_enable ? V4L2_SLICED_VPS : 0) | (state->wss_enable ? V4L2_SLICED_WSS_625 : 0) | (state->cc_enable ? V4L2_SLICED_CAPTION_525 : 0); return 0; } static int saa7127_s_vbi_data(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data) { switch (data->id) { case V4L2_SLICED_WSS_625: return saa7127_set_wss(sd, data); case V4L2_SLICED_VPS: return saa7127_set_vps(sd, data); case V4L2_SLICED_CAPTION_525: if (data->field == 0) return saa7127_set_cc(sd, data); return saa7127_set_xds(sd, data); default: return -EINVAL; } return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int saa7127_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { reg->val = saa7127_read(sd, reg->reg & 0xff); reg->size = 1; return 0; } static int saa7127_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg) { saa7127_write(sd, reg->reg & 0xff, reg->val & 0xff); return 0; } #endif static int saa7127_log_status(struct v4l2_subdev *sd) { struct saa7127_state *state = to_state(sd); v4l2_info(sd, "Standard: %s\n", (state->std & V4L2_STD_525_60) ? "60 Hz" : "50 Hz"); v4l2_info(sd, "Input: %s\n", state->input_type ? "color bars" : "normal"); v4l2_info(sd, "Output: %s\n", state->video_enable ? output_strs[state->output_type] : "disabled"); v4l2_info(sd, "WSS: %s\n", state->wss_enable ? wss_strs[state->wss_mode] : "disabled"); v4l2_info(sd, "VPS: %s\n", state->vps_enable ? "enabled" : "disabled"); v4l2_info(sd, "CC: %s\n", state->cc_enable ? "enabled" : "disabled"); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops saa7127_core_ops = { .log_status = saa7127_log_status, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = saa7127_g_register, .s_register = saa7127_s_register, #endif }; static const struct v4l2_subdev_video_ops saa7127_video_ops = { .s_std_output = saa7127_s_std_output, .s_routing = saa7127_s_routing, .s_stream = saa7127_s_stream, }; static const struct v4l2_subdev_vbi_ops saa7127_vbi_ops = { .s_vbi_data = saa7127_s_vbi_data, .g_sliced_fmt = saa7127_g_sliced_fmt, }; static const struct v4l2_subdev_ops saa7127_ops = { .core = &saa7127_core_ops, .video = &saa7127_video_ops, .vbi = &saa7127_vbi_ops, }; /* ----------------------------------------------------------------------- */ static int saa7127_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct saa7127_state *state; struct v4l2_subdev *sd; struct v4l2_sliced_vbi_data vbi = { 0, 0, 0, 0 }; /* set to disabled */ /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; v4l_dbg(1, debug, client, "detecting saa7127 client on address 0x%x\n", client->addr << 1); state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &saa7127_ops); /* First test register 0: Bits 5-7 are a version ID (should be 0), and bit 2 should also be 0. This is rather general, so the second test is more specific and looks at the 'ending point of burst in clock cycles' which is 0x1d after a reset and not expected to ever change. */ if ((saa7127_read(sd, 0) & 0xe4) != 0 || (saa7127_read(sd, 0x29) & 0x3f) != 0x1d) { v4l2_dbg(1, debug, sd, "saa7127 not found\n"); return -ENODEV; } if (id->driver_data) { /* Chip type is already known */ state->ident = id->driver_data; } else { /* Needs detection */ int read_result; /* Detect if it's an saa7129 */ read_result = saa7127_read(sd, SAA7129_REG_FADE_KEY_COL2); saa7127_write(sd, SAA7129_REG_FADE_KEY_COL2, 0xaa); if (saa7127_read(sd, SAA7129_REG_FADE_KEY_COL2) == 0xaa) { saa7127_write(sd, SAA7129_REG_FADE_KEY_COL2, read_result); state->ident = SAA7129; strlcpy(client->name, "saa7129", I2C_NAME_SIZE); } else { state->ident = SAA7127; strlcpy(client->name, "saa7127", I2C_NAME_SIZE); } } v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name, client->addr << 1, client->adapter->name); v4l2_dbg(1, debug, sd, "Configuring encoder\n"); saa7127_write_inittab(sd, saa7127_init_config_common); saa7127_set_std(sd, V4L2_STD_NTSC); saa7127_set_output_type(sd, SAA7127_OUTPUT_TYPE_BOTH); saa7127_set_vps(sd, &vbi); saa7127_set_wss(sd, &vbi); saa7127_set_cc(sd, &vbi); saa7127_set_xds(sd, &vbi); if (test_image == 1) /* The Encoder has an internal Colorbar generator */ /* This can be used for debugging */ saa7127_set_input_type(sd, SAA7127_INPUT_TYPE_TEST_IMAGE); else saa7127_set_input_type(sd, SAA7127_INPUT_TYPE_NORMAL); saa7127_set_video_enable(sd, 1); if (state->ident == SAA7129) saa7127_write_inittab(sd, saa7129_init_config_extra); return 0; } /* ----------------------------------------------------------------------- */ static int saa7127_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); /* Turn off TV output */ saa7127_set_video_enable(sd, 0); return 0; } /* ----------------------------------------------------------------------- */ static struct i2c_device_id saa7127_id[] = { { "saa7127_auto", 0 }, /* auto-detection */ { "saa7126", SAA7127 }, { "saa7127", SAA7127 }, { "saa7128", SAA7129 }, { "saa7129", SAA7129 }, { } }; MODULE_DEVICE_TABLE(i2c, saa7127_id); static struct i2c_driver saa7127_driver = { .driver = { .owner = THIS_MODULE, .name = "saa7127", }, .probe = saa7127_probe, .remove = saa7127_remove, .id_table = saa7127_id, }; module_i2c_driver(saa7127_driver);
gpl-2.0
TeamHackYU/SKernel_Yu
arch/powerpc/kernel/pci-common.c
1888
48525
/* * Contains common pci routines for ALL ppc platform * (based on pci_32.c and pci_64.c) * * Port for PPC64 David Engebretsen, IBM Corp. * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. * * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM * Rework, based on alpha PCI code. * * Common pmac/prep/chrp pci routines. -- Cort * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/export.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/mm.h> #include <linux/list.h> #include <linux/syscalls.h> #include <linux/irq.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/vgaarb.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/byteorder.h> #include <asm/machdep.h> #include <asm/ppc-pci.h> #include <asm/eeh.h> static DEFINE_SPINLOCK(hose_spinlock); LIST_HEAD(hose_list); /* XXX kill that some day ... */ static int global_phb_number; /* Global phb counter */ /* ISA Memory physical address */ resource_size_t isa_mem_base; static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; void set_pci_dma_ops(struct dma_map_ops *dma_ops) { pci_dma_ops = dma_ops; } struct dma_map_ops *get_pci_dma_ops(void) { return pci_dma_ops; } EXPORT_SYMBOL(get_pci_dma_ops); struct pci_controller *pcibios_alloc_controller(struct device_node *dev) { struct pci_controller *phb; phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL); if (phb == NULL) return NULL; spin_lock(&hose_spinlock); phb->global_number = global_phb_number++; list_add_tail(&phb->list_node, &hose_list); spin_unlock(&hose_spinlock); phb->dn = dev; phb->is_dynamic = mem_init_done; #ifdef CONFIG_PPC64 if (dev) { int nid = of_node_to_nid(dev); if (nid < 0 || !node_online(nid)) nid = -1; PHB_SET_NODE(phb, nid); } #endif return phb; } void pcibios_free_controller(struct pci_controller *phb) { spin_lock(&hose_spinlock); list_del(&phb->list_node); spin_unlock(&hose_spinlock); if (phb->is_dynamic) kfree(phb); } /* * The function is used to return the minimal alignment * for memory or I/O windows of the associated P2P bridge. * By default, 4KiB alignment for I/O windows and 1MiB for * memory windows. */ resource_size_t pcibios_window_alignment(struct pci_bus *bus, unsigned long type) { if (ppc_md.pcibios_window_alignment) return ppc_md.pcibios_window_alignment(bus, type); /* * PCI core will figure out the default * alignment: 4KiB for I/O and 1MiB for * memory window. */ return 1; } static resource_size_t pcibios_io_size(const struct pci_controller *hose) { #ifdef CONFIG_PPC64 return hose->pci_io_size; #else return resource_size(&hose->io_resource); #endif } int pcibios_vaddr_is_ioport(void __iomem *address) { int ret = 0; struct pci_controller *hose; resource_size_t size; spin_lock(&hose_spinlock); list_for_each_entry(hose, &hose_list, list_node) { size = pcibios_io_size(hose); if (address >= hose->io_base_virt && address < (hose->io_base_virt + size)) { ret = 1; break; } } spin_unlock(&hose_spinlock); return ret; } unsigned long pci_address_to_pio(phys_addr_t address) { struct pci_controller *hose; resource_size_t size; unsigned long ret = ~0; spin_lock(&hose_spinlock); list_for_each_entry(hose, &hose_list, list_node) { size = pcibios_io_size(hose); if (address >= hose->io_base_phys && address < (hose->io_base_phys + size)) { unsigned long base = (unsigned long)hose->io_base_virt - _IO_BASE; ret = base + (address - hose->io_base_phys); break; } } spin_unlock(&hose_spinlock); return ret; } EXPORT_SYMBOL_GPL(pci_address_to_pio); /* * Return the domain number for this bus. */ int pci_domain_nr(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); return hose->global_number; } EXPORT_SYMBOL(pci_domain_nr); /* This routine is meant to be used early during boot, when the * PCI bus numbers have not yet been assigned, and you need to * issue PCI config cycles to an OF device. * It could also be used to "fix" RTAS config cycles if you want * to set pci_assign_all_buses to 1 and still use RTAS for PCI * config cycles. */ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) { while(node) { struct pci_controller *hose, *tmp; list_for_each_entry_safe(hose, tmp, &hose_list, list_node) if (hose->dn == node) return hose; node = node->parent; } return NULL; } static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev; struct device_node *np; pdev = to_pci_dev (dev); np = pci_device_to_OF_node(pdev); if (np == NULL || np->full_name == NULL) return 0; return sprintf(buf, "%s", np->full_name); } static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); /* Add sysfs properties */ int pcibios_add_platform_entries(struct pci_dev *pdev) { return device_create_file(&pdev->dev, &dev_attr_devspec); } /* * Reads the interrupt pin to determine if interrupt is use by card. * If the interrupt is used, then gets the interrupt line from the * openfirmware and sets it in the pci_dev and pci_config line. */ static int pci_read_irq_line(struct pci_dev *pci_dev) { struct of_irq oirq; unsigned int virq; pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); #ifdef DEBUG memset(&oirq, 0xff, sizeof(oirq)); #endif /* Try to get a mapping from the device-tree */ if (of_irq_map_pci(pci_dev, &oirq)) { u8 line, pin; /* If that fails, lets fallback to what is in the config * space and map that through the default controller. We * also set the type to level low since that's what PCI * interrupts are. If your platform does differently, then * either provide a proper interrupt tree or don't use this * function. */ if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin)) return -1; if (pin == 0) return -1; if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) || line == 0xff || line == 0) { return -1; } pr_debug(" No map ! Using line %d (pin %d) from PCI config\n", line, pin); virq = irq_create_mapping(NULL, line); if (virq != NO_IRQ) irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); } else { pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", oirq.size, oirq.specifier[0], oirq.specifier[1], of_node_full_name(oirq.controller)); virq = irq_create_of_mapping(oirq.controller, oirq.specifier, oirq.size); } if(virq == NO_IRQ) { pr_debug(" Failed to map !\n"); return -1; } pr_debug(" Mapped to linux irq %d\n", virq); pci_dev->irq = virq; return 0; } /* * Platform support for /proc/bus/pci/X/Y mmap()s, * modelled on the sparc64 implementation by Dave Miller. * -- paulus. */ /* * Adjust vm_pgoff of VMA such that it is the physical page offset * corresponding to the 32-bit pci bus offset for DEV requested by the user. * * Basically, the user finds the base address for his device which he wishes * to mmap. They read the 32-bit value from the config space base register, * add whatever PAGE_SIZE multiple offset they wish, and feed this into the * offset parameter of mmap on /proc/bus/pci/XXX for that device. * * Returns negative error code on failure, zero on success. */ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, resource_size_t *offset, enum pci_mmap_state mmap_state) { struct pci_controller *hose = pci_bus_to_host(dev->bus); unsigned long io_offset = 0; int i, res_bit; if (hose == 0) return NULL; /* should never happen */ /* If memory, add on the PCI bridge address offset */ if (mmap_state == pci_mmap_mem) { #if 0 /* See comment in pci_resource_to_user() for why this is disabled */ *offset += hose->pci_mem_offset; #endif res_bit = IORESOURCE_MEM; } else { io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; *offset += io_offset; res_bit = IORESOURCE_IO; } /* * Check that the offset requested corresponds to one of the * resources of the device. */ for (i = 0; i <= PCI_ROM_RESOURCE; i++) { struct resource *rp = &dev->resource[i]; int flags = rp->flags; /* treat ROM as memory (should be already) */ if (i == PCI_ROM_RESOURCE) flags |= IORESOURCE_MEM; /* Active and same type? */ if ((flags & res_bit) == 0) continue; /* In the range of this resource? */ if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) continue; /* found it! construct the final physical address */ if (mmap_state == pci_mmap_io) *offset += hose->io_base_phys - io_offset; return rp; } return NULL; } /* * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci * device mapping. */ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, pgprot_t protection, enum pci_mmap_state mmap_state, int write_combine) { /* Write combine is always 0 on non-memory space mappings. On * memory space, if the user didn't pass 1, we check for a * "prefetchable" resource. This is a bit hackish, but we use * this to workaround the inability of /sysfs to provide a write * combine bit */ if (mmap_state != pci_mmap_mem) write_combine = 0; else if (write_combine == 0) { if (rp->flags & IORESOURCE_PREFETCH) write_combine = 1; } /* XXX would be nice to have a way to ask for write-through */ if (write_combine) return pgprot_noncached_wc(protection); else return pgprot_noncached(protection); } /* * This one is used by /dev/mem and fbdev who have no clue about the * PCI device, it tries to find the PCI device first and calls the * above routine */ pgprot_t pci_phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t prot) { struct pci_dev *pdev = NULL; struct resource *found = NULL; resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT; int i; if (page_is_ram(pfn)) return prot; prot = pgprot_noncached(prot); for_each_pci_dev(pdev) { for (i = 0; i <= PCI_ROM_RESOURCE; i++) { struct resource *rp = &pdev->resource[i]; int flags = rp->flags; /* Active and same type? */ if ((flags & IORESOURCE_MEM) == 0) continue; /* In the range of this resource? */ if (offset < (rp->start & PAGE_MASK) || offset > rp->end) continue; found = rp; break; } if (found) break; } if (found) { if (found->flags & IORESOURCE_PREFETCH) prot = pgprot_noncached_wc(prot); pci_dev_put(pdev); } pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n", (unsigned long long)offset, pgprot_val(prot)); return prot; } /* * Perform the actual remap of the pages for a PCI device mapping, as * appropriate for this architecture. The region in the process to map * is described by vm_start and vm_end members of VMA, the base physical * address is found in vm_pgoff. * The pci device structure is provided so that architectures may make mapping * decisions on a per-device or per-bus basis. * * Returns a negative error code on failure, zero on success. */ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine) { resource_size_t offset = ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; struct resource *rp; int ret; rp = __pci_mmap_make_offset(dev, &offset, mmap_state); if (rp == NULL) return -EINVAL; vma->vm_pgoff = offset >> PAGE_SHIFT; vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, vma->vm_page_prot, mmap_state, write_combine); ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); return ret; } /* This provides legacy IO read access on a bus */ int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) { unsigned long offset; struct pci_controller *hose = pci_bus_to_host(bus); struct resource *rp = &hose->io_resource; void __iomem *addr; /* Check if port can be supported by that bus. We only check * the ranges of the PHB though, not the bus itself as the rules * for forwarding legacy cycles down bridges are not our problem * here. So if the host bridge supports it, we do it. */ offset = (unsigned long)hose->io_base_virt - _IO_BASE; offset += port; if (!(rp->flags & IORESOURCE_IO)) return -ENXIO; if (offset < rp->start || (offset + size) > rp->end) return -ENXIO; addr = hose->io_base_virt + port; switch(size) { case 1: *((u8 *)val) = in_8(addr); return 1; case 2: if (port & 1) return -EINVAL; *((u16 *)val) = in_le16(addr); return 2; case 4: if (port & 3) return -EINVAL; *((u32 *)val) = in_le32(addr); return 4; } return -EINVAL; } /* This provides legacy IO write access on a bus */ int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) { unsigned long offset; struct pci_controller *hose = pci_bus_to_host(bus); struct resource *rp = &hose->io_resource; void __iomem *addr; /* Check if port can be supported by that bus. We only check * the ranges of the PHB though, not the bus itself as the rules * for forwarding legacy cycles down bridges are not our problem * here. So if the host bridge supports it, we do it. */ offset = (unsigned long)hose->io_base_virt - _IO_BASE; offset += port; if (!(rp->flags & IORESOURCE_IO)) return -ENXIO; if (offset < rp->start || (offset + size) > rp->end) return -ENXIO; addr = hose->io_base_virt + port; /* WARNING: The generic code is idiotic. It gets passed a pointer * to what can be a 1, 2 or 4 byte quantity and always reads that * as a u32, which means that we have to correct the location of * the data read within those 32 bits for size 1 and 2 */ switch(size) { case 1: out_8(addr, val >> 24); return 1; case 2: if (port & 1) return -EINVAL; out_le16(addr, val >> 16); return 2; case 4: if (port & 3) return -EINVAL; out_le32(addr, val); return 4; } return -EINVAL; } /* This provides legacy IO or memory mmap access on a bus */ int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, enum pci_mmap_state mmap_state) { struct pci_controller *hose = pci_bus_to_host(bus); resource_size_t offset = ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; resource_size_t size = vma->vm_end - vma->vm_start; struct resource *rp; pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n", pci_domain_nr(bus), bus->number, mmap_state == pci_mmap_mem ? "MEM" : "IO", (unsigned long long)offset, (unsigned long long)(offset + size - 1)); if (mmap_state == pci_mmap_mem) { /* Hack alert ! * * Because X is lame and can fail starting if it gets an error trying * to mmap legacy_mem (instead of just moving on without legacy memory * access) we fake it here by giving it anonymous memory, effectively * behaving just like /dev/zero */ if ((offset + size) > hose->isa_mem_size) { printk(KERN_DEBUG "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n", current->comm, current->pid, pci_domain_nr(bus), bus->number); if (vma->vm_flags & VM_SHARED) return shmem_zero_setup(vma); return 0; } offset += hose->isa_mem_phys; } else { unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; unsigned long roffset = offset + io_offset; rp = &hose->io_resource; if (!(rp->flags & IORESOURCE_IO)) return -ENXIO; if (roffset < rp->start || (roffset + size) > rp->end) return -ENXIO; offset += hose->io_base_phys; } pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); vma->vm_pgoff = offset >> PAGE_SHIFT; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); } void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, resource_size_t *start, resource_size_t *end) { struct pci_controller *hose = pci_bus_to_host(dev->bus); resource_size_t offset = 0; if (hose == NULL) return; if (rsrc->flags & IORESOURCE_IO) offset = (unsigned long)hose->io_base_virt - _IO_BASE; /* We pass a fully fixed up address to userland for MMIO instead of * a BAR value because X is lame and expects to be able to use that * to pass to /dev/mem ! * * That means that we'll have potentially 64 bits values where some * userland apps only expect 32 (like X itself since it thinks only * Sparc has 64 bits MMIO) but if we don't do that, we break it on * 32 bits CHRPs :-( * * Hopefully, the sysfs insterface is immune to that gunk. Once X * has been fixed (and the fix spread enough), we can re-enable the * 2 lines below and pass down a BAR value to userland. In that case * we'll also have to re-enable the matching code in * __pci_mmap_make_offset(). * * BenH. */ #if 0 else if (rsrc->flags & IORESOURCE_MEM) offset = hose->pci_mem_offset; #endif *start = rsrc->start - offset; *end = rsrc->end - offset; } /** * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree * @hose: newly allocated pci_controller to be setup * @dev: device node of the host bridge * @primary: set if primary bus (32 bits only, soon to be deprecated) * * This function will parse the "ranges" property of a PCI host bridge device * node and setup the resource mapping of a pci controller based on its * content. * * Life would be boring if it wasn't for a few issues that we have to deal * with here: * * - We can only cope with one IO space range and up to 3 Memory space * ranges. However, some machines (thanks Apple !) tend to split their * space into lots of small contiguous ranges. So we have to coalesce. * * - Some busses have IO space not starting at 0, which causes trouble with * the way we do our IO resource renumbering. The code somewhat deals with * it for 64 bits but I would expect problems on 32 bits. * * - Some 32 bits platforms such as 4xx can have physical space larger than * 32 bits so we need to use 64 bits values for the parsing */ void pci_process_bridge_OF_ranges(struct pci_controller *hose, struct device_node *dev, int primary) { const u32 *ranges; int rlen; int pna = of_n_addr_cells(dev); int np = pna + 5; int memno = 0; u32 pci_space; unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size; struct resource *res; printk(KERN_INFO "PCI host bridge %s %s ranges:\n", dev->full_name, primary ? "(primary)" : ""); /* Get ranges property */ ranges = of_get_property(dev, "ranges", &rlen); if (ranges == NULL) return; /* Parse it */ while ((rlen -= np * 4) >= 0) { /* Read next ranges element */ pci_space = ranges[0]; pci_addr = of_read_number(ranges + 1, 2); cpu_addr = of_translate_address(dev, ranges + 3); size = of_read_number(ranges + pna + 3, 2); ranges += np; /* If we failed translation or got a zero-sized region * (some FW try to feed us with non sensical zero sized regions * such as power3 which look like some kind of attempt at exposing * the VGA memory hole) */ if (cpu_addr == OF_BAD_ADDR || size == 0) continue; /* Now consume following elements while they are contiguous */ for (; rlen >= np * sizeof(u32); ranges += np, rlen -= np * 4) { if (ranges[0] != pci_space) break; pci_next = of_read_number(ranges + 1, 2); cpu_next = of_translate_address(dev, ranges + 3); if (pci_next != pci_addr + size || cpu_next != cpu_addr + size) break; size += of_read_number(ranges + pna + 3, 2); } /* Act based on address space type */ res = NULL; switch ((pci_space >> 24) & 0x3) { case 1: /* PCI IO space */ printk(KERN_INFO " IO 0x%016llx..0x%016llx -> 0x%016llx\n", cpu_addr, cpu_addr + size - 1, pci_addr); /* We support only one IO range */ if (hose->pci_io_size) { printk(KERN_INFO " \\--> Skipped (too many) !\n"); continue; } #ifdef CONFIG_PPC32 /* On 32 bits, limit I/O space to 16MB */ if (size > 0x01000000) size = 0x01000000; /* 32 bits needs to map IOs here */ hose->io_base_virt = ioremap(cpu_addr, size); /* Expect trouble if pci_addr is not 0 */ if (primary) isa_io_base = (unsigned long)hose->io_base_virt; #endif /* CONFIG_PPC32 */ /* pci_io_size and io_base_phys always represent IO * space starting at 0 so we factor in pci_addr */ hose->pci_io_size = pci_addr + size; hose->io_base_phys = cpu_addr - pci_addr; /* Build resource */ res = &hose->io_resource; res->flags = IORESOURCE_IO; res->start = pci_addr; break; case 2: /* PCI Memory space */ case 3: /* PCI 64 bits Memory space */ printk(KERN_INFO " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n", cpu_addr, cpu_addr + size - 1, pci_addr, (pci_space & 0x40000000) ? "Prefetch" : ""); /* We support only 3 memory ranges */ if (memno >= 3) { printk(KERN_INFO " \\--> Skipped (too many) !\n"); continue; } /* Handles ISA memory hole space here */ if (pci_addr == 0) { if (primary || isa_mem_base == 0) isa_mem_base = cpu_addr; hose->isa_mem_phys = cpu_addr; hose->isa_mem_size = size; } /* Build resource */ hose->mem_offset[memno] = cpu_addr - pci_addr; res = &hose->mem_resources[memno++]; res->flags = IORESOURCE_MEM; if (pci_space & 0x40000000) res->flags |= IORESOURCE_PREFETCH; res->start = cpu_addr; break; } if (res != NULL) { res->name = dev->full_name; res->end = res->start + size - 1; res->parent = NULL; res->sibling = NULL; res->child = NULL; } } } /* Decide whether to display the domain number in /proc */ int pci_proc_domain(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS)) return 0; if (pci_has_flag(PCI_COMPAT_DOMAIN_0)) return hose->global_number != 0; return 1; } int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) { if (ppc_md.pcibios_root_bridge_prepare) return ppc_md.pcibios_root_bridge_prepare(bridge); return 0; } /* This header fixup will do the resource fixup for all devices as they are * probed, but not for bridge ranges */ static void pcibios_fixup_resources(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); int i; if (!hose) { printk(KERN_ERR "No host bridge for PCI dev %s !\n", pci_name(dev)); return; } for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { struct resource *res = dev->resource + i; struct pci_bus_region reg; if (!res->flags) continue; /* If we're going to re-assign everything, we mark all resources * as unset (and 0-base them). In addition, we mark BARs starting * at 0 as unset as well, except if PCI_PROBE_ONLY is also set * since in that case, we don't want to re-assign anything */ pcibios_resource_to_bus(dev, &reg, res); if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) || (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) { /* Only print message if not re-assigning */ if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] " "is unassigned\n", pci_name(dev), i, (unsigned long long)res->start, (unsigned long long)res->end, (unsigned int)res->flags); res->end -= res->start; res->start = 0; res->flags |= IORESOURCE_UNSET; continue; } pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n", pci_name(dev), i, (unsigned long long)res->start,\ (unsigned long long)res->end, (unsigned int)res->flags); } /* Call machine specific resource fixup */ if (ppc_md.pcibios_fixup_resources) ppc_md.pcibios_fixup_resources(dev); } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); /* This function tries to figure out if a bridge resource has been initialized * by the firmware or not. It doesn't have to be absolutely bullet proof, but * things go more smoothly when it gets it right. It should covers cases such * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges */ static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus, struct resource *res) { struct pci_controller *hose = pci_bus_to_host(bus); struct pci_dev *dev = bus->self; resource_size_t offset; struct pci_bus_region region; u16 command; int i; /* We don't do anything if PCI_PROBE_ONLY is set */ if (pci_has_flag(PCI_PROBE_ONLY)) return 0; /* Job is a bit different between memory and IO */ if (res->flags & IORESOURCE_MEM) { pcibios_resource_to_bus(dev, &region, res); /* If the BAR is non-0 then it's probably been initialized */ if (region.start != 0) return 0; /* The BAR is 0, let's check if memory decoding is enabled on * the bridge. If not, we consider it unassigned */ pci_read_config_word(dev, PCI_COMMAND, &command); if ((command & PCI_COMMAND_MEMORY) == 0) return 1; /* Memory decoding is enabled and the BAR is 0. If any of the bridge * resources covers that starting address (0 then it's good enough for * us for memory space) */ for (i = 0; i < 3; i++) { if ((hose->mem_resources[i].flags & IORESOURCE_MEM) && hose->mem_resources[i].start == hose->mem_offset[i]) return 0; } /* Well, it starts at 0 and we know it will collide so we may as * well consider it as unassigned. That covers the Apple case. */ return 1; } else { /* If the BAR is non-0, then we consider it assigned */ offset = (unsigned long)hose->io_base_virt - _IO_BASE; if (((res->start - offset) & 0xfffffffful) != 0) return 0; /* Here, we are a bit different than memory as typically IO space * starting at low addresses -is- valid. What we do instead if that * we consider as unassigned anything that doesn't have IO enabled * in the PCI command register, and that's it. */ pci_read_config_word(dev, PCI_COMMAND, &command); if (command & PCI_COMMAND_IO) return 0; /* It's starting at 0 and IO is disabled in the bridge, consider * it unassigned */ return 1; } } /* Fixup resources of a PCI<->PCI bridge */ static void pcibios_fixup_bridge(struct pci_bus *bus) { struct resource *res; int i; struct pci_dev *dev = bus->self; pci_bus_for_each_resource(bus, res, i) { if (!res || !res->flags) continue; if (i >= 3 && bus->self->transparent) continue; /* If we're going to reassign everything, we can * shrink the P2P resource to have size as being * of 0 in order to save space. */ if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) { res->flags |= IORESOURCE_UNSET; res->start = 0; res->end = -1; continue; } pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x]\n", pci_name(dev), i, (unsigned long long)res->start,\ (unsigned long long)res->end, (unsigned int)res->flags); /* Try to detect uninitialized P2P bridge resources, * and clear them out so they get re-assigned later */ if (pcibios_uninitialized_bridge_resource(bus, res)) { res->flags = 0; pr_debug("PCI:%s (unassigned)\n", pci_name(dev)); } } } void pcibios_setup_bus_self(struct pci_bus *bus) { /* Fix up the bus resources for P2P bridges */ if (bus->self != NULL) pcibios_fixup_bridge(bus); /* Platform specific bus fixups. This is currently only used * by fsl_pci and I'm hoping to get rid of it at some point */ if (ppc_md.pcibios_fixup_bus) ppc_md.pcibios_fixup_bus(bus); /* Setup bus DMA mappings */ if (ppc_md.pci_dma_bus_setup) ppc_md.pci_dma_bus_setup(bus); } static void pcibios_setup_device(struct pci_dev *dev) { /* Fixup NUMA node as it may not be setup yet by the generic * code and is needed by the DMA init */ set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); /* Hook up default DMA ops */ set_dma_ops(&dev->dev, pci_dma_ops); set_dma_offset(&dev->dev, PCI_DRAM_OFFSET); /* Additional platform DMA/iommu setup */ if (ppc_md.pci_dma_dev_setup) ppc_md.pci_dma_dev_setup(dev); /* Read default IRQs and fixup if necessary */ pci_read_irq_line(dev); if (ppc_md.pci_irq_fixup) ppc_md.pci_irq_fixup(dev); } int pcibios_add_device(struct pci_dev *dev) { /* * We can only call pcibios_setup_device() after bus setup is complete, * since some of the platform specific DMA setup code depends on it. */ if (dev->bus->is_added) pcibios_setup_device(dev); return 0; } void pcibios_setup_bus_devices(struct pci_bus *bus) { struct pci_dev *dev; pr_debug("PCI: Fixup bus devices %d (%s)\n", bus->number, bus->self ? pci_name(bus->self) : "PHB"); list_for_each_entry(dev, &bus->devices, bus_list) { /* Cardbus can call us to add new devices to a bus, so ignore * those who are already fully discovered */ if (dev->is_added) continue; pcibios_setup_device(dev); } } void pcibios_set_master(struct pci_dev *dev) { /* No special bus mastering setup handling */ } void pcibios_fixup_bus(struct pci_bus *bus) { /* When called from the generic PCI probe, read PCI<->PCI bridge * bases. This is -not- called when generating the PCI tree from * the OF device-tree. */ if (bus->self != NULL) pci_read_bridge_bases(bus); /* Now fixup the bus bus */ pcibios_setup_bus_self(bus); /* Now fixup devices on that bus */ pcibios_setup_bus_devices(bus); } EXPORT_SYMBOL(pcibios_fixup_bus); void pci_fixup_cardbus(struct pci_bus *bus) { /* Now fixup devices on that bus */ pcibios_setup_bus_devices(bus); } static int skip_isa_ioresource_align(struct pci_dev *dev) { if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) && !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA)) return 1; return 0; } /* * We need to avoid collisions with `mirrored' VGA ports * and other strange ISA hardware, so we always want the * addresses to be allocated in the 0x000-0x0ff region * modulo 0x400. * * Why? Because some silly external IO cards only decode * the low 10 bits of the IO address. The 0x00-0xff region * is reserved for motherboard devices that decode all 16 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, * but we want to try to avoid allocating at 0x2900-0x2bff * which might have be mirrored at 0x0100-0x03ff.. */ resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pci_dev *dev = data; resource_size_t start = res->start; if (res->flags & IORESOURCE_IO) { if (skip_isa_ioresource_align(dev)) return start; if (start & 0x300) start = (start + 0x3ff) & ~0x3ff; } return start; } EXPORT_SYMBOL(pcibios_align_resource); /* * Reparent resource children of pr that conflict with res * under res, and make res replace those children. */ static int reparent_resources(struct resource *parent, struct resource *res) { struct resource *p, **pp; struct resource **firstpp = NULL; for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) { if (p->end < res->start) continue; if (res->end < p->start) break; if (p->start < res->start || p->end > res->end) return -1; /* not completely contained */ if (firstpp == NULL) firstpp = pp; } if (firstpp == NULL) return -1; /* didn't find any conflicting entries? */ res->parent = parent; res->child = *firstpp; res->sibling = *pp; *firstpp = res; *pp = NULL; for (p = res->child; p != NULL; p = p->sibling) { p->parent = res; pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n", p->name, (unsigned long long)p->start, (unsigned long long)p->end, res->name); } return 0; } /* * Handle resources of PCI devices. If the world were perfect, we could * just allocate all the resource regions and do nothing more. It isn't. * On the other hand, we cannot just re-allocate all devices, as it would * require us to know lots of host bridge internals. So we attempt to * keep as much of the original configuration as possible, but tweak it * when it's found to be wrong. * * Known BIOS problems we have to work around: * - I/O or memory regions not configured * - regions configured, but not enabled in the command register * - bogus I/O addresses above 64K used * - expansion ROMs left enabled (this may sound harmless, but given * the fact the PCI specs explicitly allow address decoders to be * shared between expansion ROMs and other resource regions, it's * at least dangerous) * * Our solution: * (1) Allocate resources for all buses behind PCI-to-PCI bridges. * This gives us fixed barriers on where we can allocate. * (2) Allocate resources for all enabled devices. If there is * a collision, just mark the resource as unallocated. Also * disable expansion ROMs during this step. * (3) Try to allocate resources for disabled devices. If the * resources were assigned correctly, everything goes well, * if they weren't, they won't disturb allocation of other * resources. * (4) Assign new addresses to resources which were either * not configured at all or misconfigured. If explicitly * requested by the user, configure expansion ROM address * as well. */ void pcibios_allocate_bus_resources(struct pci_bus *bus) { struct pci_bus *b; int i; struct resource *res, *pr; pr_debug("PCI: Allocating bus resources for %04x:%02x...\n", pci_domain_nr(bus), bus->number); pci_bus_for_each_resource(bus, res, i) { if (!res || !res->flags || res->start > res->end || res->parent) continue; /* If the resource was left unset at this point, we clear it */ if (res->flags & IORESOURCE_UNSET) goto clear_resource; if (bus->parent == NULL) pr = (res->flags & IORESOURCE_IO) ? &ioport_resource : &iomem_resource; else { pr = pci_find_parent_resource(bus->self, res); if (pr == res) { /* this happens when the generic PCI * code (wrongly) decides that this * bridge is transparent -- paulus */ continue; } } pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx " "[0x%x], parent %p (%s)\n", bus->self ? pci_name(bus->self) : "PHB", bus->number, i, (unsigned long long)res->start, (unsigned long long)res->end, (unsigned int)res->flags, pr, (pr && pr->name) ? pr->name : "nil"); if (pr && !(pr->flags & IORESOURCE_UNSET)) { if (request_resource(pr, res) == 0) continue; /* * Must be a conflict with an existing entry. * Move that entry (or entries) under the * bridge resource and try again. */ if (reparent_resources(pr, res) == 0) continue; } pr_warning("PCI: Cannot allocate resource region " "%d of PCI bridge %d, will remap\n", i, bus->number); clear_resource: /* The resource might be figured out when doing * reassignment based on the resources required * by the downstream PCI devices. Here we set * the size of the resource to be 0 in order to * save more space. */ res->start = 0; res->end = -1; res->flags = 0; } list_for_each_entry(b, &bus->children, node) pcibios_allocate_bus_resources(b); } static inline void alloc_resource(struct pci_dev *dev, int idx) { struct resource *pr, *r = &dev->resource[idx]; pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n", pci_name(dev), idx, (unsigned long long)r->start, (unsigned long long)r->end, (unsigned int)r->flags); pr = pci_find_parent_resource(dev, r); if (!pr || (pr->flags & IORESOURCE_UNSET) || request_resource(pr, r) < 0) { printk(KERN_WARNING "PCI: Cannot allocate resource region %d" " of device %s, will remap\n", idx, pci_name(dev)); if (pr) pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n", pr, (unsigned long long)pr->start, (unsigned long long)pr->end, (unsigned int)pr->flags); /* We'll assign a new address later */ r->flags |= IORESOURCE_UNSET; r->end -= r->start; r->start = 0; } } static void __init pcibios_allocate_resources(int pass) { struct pci_dev *dev = NULL; int idx, disabled; u16 command; struct resource *r; for_each_pci_dev(dev) { pci_read_config_word(dev, PCI_COMMAND, &command); for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { r = &dev->resource[idx]; if (r->parent) /* Already allocated */ continue; if (!r->flags || (r->flags & IORESOURCE_UNSET)) continue; /* Not assigned at all */ /* We only allocate ROMs on pass 1 just in case they * have been screwed up by firmware */ if (idx == PCI_ROM_RESOURCE ) disabled = 1; if (r->flags & IORESOURCE_IO) disabled = !(command & PCI_COMMAND_IO); else disabled = !(command & PCI_COMMAND_MEMORY); if (pass == disabled) alloc_resource(dev, idx); } if (pass) continue; r = &dev->resource[PCI_ROM_RESOURCE]; if (r->flags) { /* Turn the ROM off, leave the resource region, * but keep it unregistered. */ u32 reg; pci_read_config_dword(dev, dev->rom_base_reg, &reg); if (reg & PCI_ROM_ADDRESS_ENABLE) { pr_debug("PCI: Switching off ROM of %s\n", pci_name(dev)); r->flags &= ~IORESOURCE_ROM_ENABLE; pci_write_config_dword(dev, dev->rom_base_reg, reg & ~PCI_ROM_ADDRESS_ENABLE); } } } } static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); resource_size_t offset; struct resource *res, *pres; int i; pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus)); /* Check for IO */ if (!(hose->io_resource.flags & IORESOURCE_IO)) goto no_io; offset = (unsigned long)hose->io_base_virt - _IO_BASE; res = kzalloc(sizeof(struct resource), GFP_KERNEL); BUG_ON(res == NULL); res->name = "Legacy IO"; res->flags = IORESOURCE_IO; res->start = offset; res->end = (offset + 0xfff) & 0xfffffffful; pr_debug("Candidate legacy IO: %pR\n", res); if (request_resource(&hose->io_resource, res)) { printk(KERN_DEBUG "PCI %04x:%02x Cannot reserve Legacy IO %pR\n", pci_domain_nr(bus), bus->number, res); kfree(res); } no_io: /* Check for memory */ for (i = 0; i < 3; i++) { pres = &hose->mem_resources[i]; offset = hose->mem_offset[i]; if (!(pres->flags & IORESOURCE_MEM)) continue; pr_debug("hose mem res: %pR\n", pres); if ((pres->start - offset) <= 0xa0000 && (pres->end - offset) >= 0xbffff) break; } if (i >= 3) return; res = kzalloc(sizeof(struct resource), GFP_KERNEL); BUG_ON(res == NULL); res->name = "Legacy VGA memory"; res->flags = IORESOURCE_MEM; res->start = 0xa0000 + offset; res->end = 0xbffff + offset; pr_debug("Candidate VGA memory: %pR\n", res); if (request_resource(pres, res)) { printk(KERN_DEBUG "PCI %04x:%02x Cannot reserve VGA memory %pR\n", pci_domain_nr(bus), bus->number, res); kfree(res); } } void __init pcibios_resource_survey(void) { struct pci_bus *b; /* Allocate and assign resources */ list_for_each_entry(b, &pci_root_buses, node) pcibios_allocate_bus_resources(b); pcibios_allocate_resources(0); pcibios_allocate_resources(1); /* Before we start assigning unassigned resource, we try to reserve * the low IO area and the VGA memory area if they intersect the * bus available resources to avoid allocating things on top of them */ if (!pci_has_flag(PCI_PROBE_ONLY)) { list_for_each_entry(b, &pci_root_buses, node) pcibios_reserve_legacy_regions(b); } /* Now, if the platform didn't decide to blindly trust the firmware, * we proceed to assigning things that were left unassigned */ if (!pci_has_flag(PCI_PROBE_ONLY)) { pr_debug("PCI: Assigning unassigned resources...\n"); pci_assign_unassigned_resources(); } /* Call machine dependent fixup */ if (ppc_md.pcibios_fixup) ppc_md.pcibios_fixup(); } /* This is used by the PCI hotplug driver to allocate resource * of newly plugged busses. We can try to consolidate with the * rest of the code later, for now, keep it as-is as our main * resource allocation function doesn't deal with sub-trees yet. */ void pcibios_claim_one_bus(struct pci_bus *bus) { struct pci_dev *dev; struct pci_bus *child_bus; list_for_each_entry(dev, &bus->devices, bus_list) { int i; for (i = 0; i < PCI_NUM_RESOURCES; i++) { struct resource *r = &dev->resource[i]; if (r->parent || !r->start || !r->flags) continue; pr_debug("PCI: Claiming %s: " "Resource %d: %016llx..%016llx [%x]\n", pci_name(dev), i, (unsigned long long)r->start, (unsigned long long)r->end, (unsigned int)r->flags); pci_claim_resource(dev, i); } } list_for_each_entry(child_bus, &bus->children, node) pcibios_claim_one_bus(child_bus); } /* pcibios_finish_adding_to_bus * * This is to be called by the hotplug code after devices have been * added to a bus, this include calling it for a PHB that is just * being added */ void pcibios_finish_adding_to_bus(struct pci_bus *bus) { pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n", pci_domain_nr(bus), bus->number); /* Allocate bus and devices resources */ pcibios_allocate_bus_resources(bus); pcibios_claim_one_bus(bus); /* Fixup EEH */ eeh_add_device_tree_late(bus); /* Add new devices to global lists. Register in proc, sysfs. */ pci_bus_add_devices(bus); /* sysfs files should only be added after devices are added */ eeh_add_sysfs_files(bus); } EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus); int pcibios_enable_device(struct pci_dev *dev, int mask) { if (ppc_md.pcibios_enable_device_hook) if (ppc_md.pcibios_enable_device_hook(dev)) return -EINVAL; return pci_enable_resources(dev, mask); } resource_size_t pcibios_io_space_offset(struct pci_controller *hose) { return (unsigned long) hose->io_base_virt - _IO_BASE; } static void pcibios_setup_phb_resources(struct pci_controller *hose, struct list_head *resources) { struct resource *res; resource_size_t offset; int i; /* Hookup PHB IO resource */ res = &hose->io_resource; if (!res->flags) { printk(KERN_WARNING "PCI: I/O resource not set for host" " bridge %s (domain %d)\n", hose->dn->full_name, hose->global_number); } else { offset = pcibios_io_space_offset(hose); pr_debug("PCI: PHB IO resource = %08llx-%08llx [%lx] off 0x%08llx\n", (unsigned long long)res->start, (unsigned long long)res->end, (unsigned long)res->flags, (unsigned long long)offset); pci_add_resource_offset(resources, res, offset); } /* Hookup PHB Memory resources */ for (i = 0; i < 3; ++i) { res = &hose->mem_resources[i]; if (!res->flags) { if (i == 0) printk(KERN_ERR "PCI: Memory resource 0 not set for " "host bridge %s (domain %d)\n", hose->dn->full_name, hose->global_number); continue; } offset = hose->mem_offset[i]; pr_debug("PCI: PHB MEM resource %d = %08llx-%08llx [%lx] off 0x%08llx\n", i, (unsigned long long)res->start, (unsigned long long)res->end, (unsigned long)res->flags, (unsigned long long)offset); pci_add_resource_offset(resources, res, offset); } } /* * Null PCI config access functions, for the case when we can't * find a hose. */ #define NULL_PCI_OP(rw, size, type) \ static int \ null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \ { \ return PCIBIOS_DEVICE_NOT_FOUND; \ } static int null_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { return PCIBIOS_DEVICE_NOT_FOUND; } static int null_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { return PCIBIOS_DEVICE_NOT_FOUND; } static struct pci_ops null_pci_ops = { .read = null_read_config, .write = null_write_config, }; /* * These functions are used early on before PCI scanning is done * and all of the pci_dev and pci_bus structures have been created. */ static struct pci_bus * fake_pci_bus(struct pci_controller *hose, int busnr) { static struct pci_bus bus; if (hose == 0) { printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr); } bus.number = busnr; bus.sysdata = hose; bus.ops = hose? hose->ops: &null_pci_ops; return &bus; } #define EARLY_PCI_OP(rw, size, type) \ int early_##rw##_config_##size(struct pci_controller *hose, int bus, \ int devfn, int offset, type value) \ { \ return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \ devfn, offset, value); \ } EARLY_PCI_OP(read, byte, u8 *) EARLY_PCI_OP(read, word, u16 *) EARLY_PCI_OP(read, dword, u32 *) EARLY_PCI_OP(write, byte, u8) EARLY_PCI_OP(write, word, u16) EARLY_PCI_OP(write, dword, u32) extern int pci_bus_find_capability (struct pci_bus *bus, unsigned int devfn, int cap); int early_find_capability(struct pci_controller *hose, int bus, int devfn, int cap) { return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap); } struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) { struct pci_controller *hose = bus->sysdata; return of_node_get(hose->dn); } /** * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus * @hose: Pointer to the PCI host controller instance structure */ void pcibios_scan_phb(struct pci_controller *hose) { LIST_HEAD(resources); struct pci_bus *bus; struct device_node *node = hose->dn; int mode; pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node)); /* Get some IO space for the new PHB */ pcibios_setup_phb_io_space(hose); /* Wire up PHB bus resources */ pcibios_setup_phb_resources(hose, &resources); hose->busn.start = hose->first_busno; hose->busn.end = hose->last_busno; hose->busn.flags = IORESOURCE_BUS; pci_add_resource(&resources, &hose->busn); /* Create an empty bus for the toplevel */ bus = pci_create_root_bus(hose->parent, hose->first_busno, hose->ops, hose, &resources); if (bus == NULL) { pr_err("Failed to create bus for PCI domain %04x\n", hose->global_number); pci_free_resource_list(&resources); return; } hose->bus = bus; /* Get probe mode and perform scan */ mode = PCI_PROBE_NORMAL; if (node && ppc_md.pci_probe_mode) mode = ppc_md.pci_probe_mode(bus); pr_debug(" probe mode: %d\n", mode); if (mode == PCI_PROBE_DEVTREE) of_scan_bus(node, bus); if (mode == PCI_PROBE_NORMAL) { pci_bus_update_busn_res_end(bus, 255); hose->last_busno = pci_scan_child_bus(bus); pci_bus_update_busn_res_end(bus, hose->last_busno); } /* Platform gets a chance to do some global fixups before * we proceed to resource allocation */ if (ppc_md.pcibios_fixup_phb) ppc_md.pcibios_fixup_phb(hose); /* Configure PCI Express settings */ if (bus && !pci_has_flag(PCI_PROBE_ONLY)) { struct pci_bus *child; list_for_each_entry(child, &bus->children, node) { struct pci_dev *self = child->self; if (!self) continue; pcie_bus_configure_settings(child, self->pcie_mpss); } } } static void fixup_hide_host_resource_fsl(struct pci_dev *dev) { int i, class = dev->class >> 8; /* When configured as agent, programing interface = 1 */ int prog_if = dev->class & 0xf; if ((class == PCI_CLASS_PROCESSOR_POWERPC || class == PCI_CLASS_BRIDGE_OTHER) && (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) && (prog_if == 0) && (dev->bus->parent == NULL)) { for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { dev->resource[i].start = 0; dev->resource[i].end = 0; dev->resource[i].flags = 0; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl); static void fixup_vga(struct pci_dev *pdev) { u16 cmd; pci_read_config_word(pdev, PCI_COMMAND, &cmd); if ((cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) || !vga_default_device()) vga_set_default_device(pdev); } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, 8, fixup_vga);
gpl-2.0
fedux/linux
drivers/gpu/host1x/drm/hdmi.c
2144
36284
/* * Copyright (C) 2012 Avionic Design GmbH * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/gpio.h> #include <linux/hdmi.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/clk/tegra.h> #include <drm/drm_edid.h> #include "hdmi.h" #include "drm.h" #include "dc.h" #include "host1x_client.h" struct tegra_hdmi { struct host1x_client client; struct tegra_output output; struct device *dev; struct regulator *vdd; struct regulator *pll; void __iomem *regs; unsigned int irq; struct clk *clk_parent; struct clk *clk; unsigned int audio_source; unsigned int audio_freq; bool stereo; bool dvi; struct drm_info_list *debugfs_files; struct drm_minor *minor; struct dentry *debugfs; }; static inline struct tegra_hdmi * host1x_client_to_hdmi(struct host1x_client *client) { return container_of(client, struct tegra_hdmi, client); } static inline struct tegra_hdmi *to_hdmi(struct tegra_output *output) { return container_of(output, struct tegra_hdmi, output); } #define HDMI_AUDIOCLK_FREQ 216000000 #define HDMI_REKEY_DEFAULT 56 enum { AUTO = 0, SPDIF, HDA, }; static inline unsigned long tegra_hdmi_readl(struct tegra_hdmi *hdmi, unsigned long reg) { return readl(hdmi->regs + (reg << 2)); } static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, unsigned long val, unsigned long reg) { writel(val, hdmi->regs + (reg << 2)); } struct tegra_hdmi_audio_config { unsigned int pclk; unsigned int n; unsigned int cts; unsigned int aval; }; static const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = { { 25200000, 4096, 25200, 24000 }, { 27000000, 4096, 27000, 24000 }, { 74250000, 4096, 74250, 24000 }, { 148500000, 4096, 148500, 24000 }, { 0, 0, 0, 0 }, }; static const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = { { 25200000, 5880, 26250, 25000 }, { 27000000, 5880, 28125, 25000 }, { 74250000, 4704, 61875, 20000 }, { 148500000, 4704, 123750, 20000 }, { 0, 0, 0, 0 }, }; static const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = { { 25200000, 6144, 25200, 24000 }, { 27000000, 6144, 27000, 24000 }, { 74250000, 6144, 74250, 24000 }, { 148500000, 6144, 148500, 24000 }, { 0, 0, 0, 0 }, }; static const struct tegra_hdmi_audio_config tegra_hdmi_audio_88_2k[] = { { 25200000, 11760, 26250, 25000 }, { 27000000, 11760, 28125, 25000 }, { 74250000, 9408, 61875, 20000 }, { 148500000, 9408, 123750, 20000 }, { 0, 0, 0, 0 }, }; static const struct tegra_hdmi_audio_config tegra_hdmi_audio_96k[] = { { 25200000, 12288, 25200, 24000 }, { 27000000, 12288, 27000, 24000 }, { 74250000, 12288, 74250, 24000 }, { 148500000, 12288, 148500, 24000 }, { 0, 0, 0, 0 }, }; static const struct tegra_hdmi_audio_config tegra_hdmi_audio_176_4k[] = { { 25200000, 23520, 26250, 25000 }, { 27000000, 23520, 28125, 25000 }, { 74250000, 18816, 61875, 20000 }, { 148500000, 18816, 123750, 20000 }, { 0, 0, 0, 0 }, }; static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = { { 25200000, 24576, 25200, 24000 }, { 27000000, 24576, 27000, 24000 }, { 74250000, 24576, 74250, 24000 }, { 148500000, 24576, 148500, 24000 }, { 0, 0, 0, 0 }, }; struct tmds_config { unsigned int pclk; u32 pll0; u32 pll1; u32 pe_current; u32 drive_current; }; static const struct tmds_config tegra2_tmds_config[] = { { /* slow pixel clock modes */ .pclk = 27000000, .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) | SOR_PLL_TX_REG_LOAD(3), .pll1 = SOR_PLL_TMDS_TERM_ENABLE, .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) | PE_CURRENT1(PE_CURRENT_0_0_mA) | PE_CURRENT2(PE_CURRENT_0_0_mA) | PE_CURRENT3(PE_CURRENT_0_0_mA), .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) | DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) | DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) | DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA), }, { /* high pixel clock modes */ .pclk = UINT_MAX, .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) | SOR_PLL_TX_REG_LOAD(3), .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) | PE_CURRENT1(PE_CURRENT_6_0_mA) | PE_CURRENT2(PE_CURRENT_6_0_mA) | PE_CURRENT3(PE_CURRENT_6_0_mA), .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) | DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) | DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) | DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA), }, }; static const struct tmds_config tegra3_tmds_config[] = { { /* 480p modes */ .pclk = 27000000, .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) | SOR_PLL_TX_REG_LOAD(0), .pll1 = SOR_PLL_TMDS_TERM_ENABLE, .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) | PE_CURRENT1(PE_CURRENT_0_0_mA) | PE_CURRENT2(PE_CURRENT_0_0_mA) | PE_CURRENT3(PE_CURRENT_0_0_mA), .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), }, { /* 720p modes */ .pclk = 74250000, .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) | SOR_PLL_TX_REG_LOAD(0), .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) | PE_CURRENT1(PE_CURRENT_5_0_mA) | PE_CURRENT2(PE_CURRENT_5_0_mA) | PE_CURRENT3(PE_CURRENT_5_0_mA), .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), }, { /* 1080p modes */ .pclk = UINT_MAX, .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) | SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(3) | SOR_PLL_TX_REG_LOAD(0), .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN, .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) | PE_CURRENT1(PE_CURRENT_5_0_mA) | PE_CURRENT2(PE_CURRENT_5_0_mA) | PE_CURRENT3(PE_CURRENT_5_0_mA), .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) | DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA), }, }; static const struct tegra_hdmi_audio_config * tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk) { const struct tegra_hdmi_audio_config *table; switch (audio_freq) { case 32000: table = tegra_hdmi_audio_32k; break; case 44100: table = tegra_hdmi_audio_44_1k; break; case 48000: table = tegra_hdmi_audio_48k; break; case 88200: table = tegra_hdmi_audio_88_2k; break; case 96000: table = tegra_hdmi_audio_96k; break; case 176400: table = tegra_hdmi_audio_176_4k; break; case 192000: table = tegra_hdmi_audio_192k; break; default: return NULL; } while (table->pclk) { if (table->pclk == pclk) return table; table++; } return NULL; } static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi) { const unsigned int freqs[] = { 32000, 44100, 48000, 88200, 96000, 176400, 192000 }; unsigned int i; for (i = 0; i < ARRAY_SIZE(freqs); i++) { unsigned int f = freqs[i]; unsigned int eight_half; unsigned long value; unsigned int delta; if (f > 96000) delta = 2; else if (f > 480000) delta = 6; else delta = 9; eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128); value = AUDIO_FS_LOW(eight_half - delta) | AUDIO_FS_HIGH(eight_half + delta); tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_FS(i)); } } static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk) { struct device_node *node = hdmi->dev->of_node; const struct tegra_hdmi_audio_config *config; unsigned int offset = 0; unsigned long value; switch (hdmi->audio_source) { case HDA: value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL; break; case SPDIF: value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF; break; default: value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO; break; } if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) { value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) | AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0); tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0); } else { value |= AUDIO_CNTRL0_INJECT_NULLSMPL; tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0); value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) | AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0); tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0); } config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk); if (!config) { dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n", hdmi->audio_freq, pclk); return -EINVAL; } tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL); value = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE | AUDIO_N_VALUE(config->n - 1); tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N); tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH); value = ACR_SUBPACK_CTS(config->cts); tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW); value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1); tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE); value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_AUDIO_N); value &= ~AUDIO_N_RESETF; tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N); if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) { switch (hdmi->audio_freq) { case 32000: offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320; break; case 44100: offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441; break; case 48000: offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480; break; case 88200: offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882; break; case 96000: offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960; break; case 176400: offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764; break; case 192000: offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920; break; } tegra_hdmi_writel(hdmi, config->aval, offset); } tegra_hdmi_setup_audio_fs_tables(hdmi); return 0; } static inline unsigned long tegra_hdmi_subpack(const u8 *ptr, size_t size) { unsigned long value = 0; size_t i; for (i = size; i > 0; i--) value = (value << 8) | ptr[i - 1]; return value; } static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data, size_t size) { const u8 *ptr = data; unsigned long offset; unsigned long value; size_t i, j; switch (ptr[0]) { case HDMI_INFOFRAME_TYPE_AVI: offset = HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER; break; case HDMI_INFOFRAME_TYPE_AUDIO: offset = HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER; break; case HDMI_INFOFRAME_TYPE_VENDOR: offset = HDMI_NV_PDISP_HDMI_GENERIC_HEADER; break; default: dev_err(hdmi->dev, "unsupported infoframe type: %02x\n", ptr[0]); return; } value = INFOFRAME_HEADER_TYPE(ptr[0]) | INFOFRAME_HEADER_VERSION(ptr[1]) | INFOFRAME_HEADER_LEN(ptr[2]); tegra_hdmi_writel(hdmi, value, offset); offset++; /* * Each subpack contains 7 bytes, divided into: * - subpack_low: bytes 0 - 3 * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00) */ for (i = 3, j = 0; i < size; i += 7, j += 8) { size_t rem = size - i, num = min_t(size_t, rem, 4); value = tegra_hdmi_subpack(&ptr[i], num); tegra_hdmi_writel(hdmi, value, offset++); num = min_t(size_t, rem - num, 3); value = tegra_hdmi_subpack(&ptr[i + 4], num); tegra_hdmi_writel(hdmi, value, offset++); } } static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi, struct drm_display_mode *mode) { struct hdmi_avi_infoframe frame; u8 buffer[17]; ssize_t err; if (hdmi->dvi) { tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); return; } err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); if (err < 0) { dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err); return; } err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); if (err < 0) { dev_err(hdmi->dev, "failed to pack AVI infoframe: %zd\n", err); return; } tegra_hdmi_write_infopack(hdmi, buffer, err); tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); } static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi) { struct hdmi_audio_infoframe frame; u8 buffer[14]; ssize_t err; if (hdmi->dvi) { tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); return; } err = hdmi_audio_infoframe_init(&frame); if (err < 0) { dev_err(hdmi->dev, "failed to initialize audio infoframe: %d\n", err); return; } frame.channels = 2; err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer)); if (err < 0) { dev_err(hdmi->dev, "failed to pack audio infoframe: %zd\n", err); return; } /* * The audio infoframe has only one set of subpack registers, so the * infoframe needs to be truncated. One set of subpack registers can * contain 7 bytes. Including the 3 byte header only the first 10 * bytes can be programmed. */ tegra_hdmi_write_infopack(hdmi, buffer, min(10, err)); tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); } static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi) { struct hdmi_vendor_infoframe frame; unsigned long value; u8 buffer[10]; ssize_t err; if (!hdmi->stereo) { value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); value &= ~GENERIC_CTRL_ENABLE; tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); return; } memset(&frame, 0, sizeof(frame)); frame.type = HDMI_INFOFRAME_TYPE_VENDOR; frame.version = 0x01; frame.length = 6; frame.data[0] = 0x03; /* regid0 */ frame.data[1] = 0x0c; /* regid1 */ frame.data[2] = 0x00; /* regid2 */ frame.data[3] = 0x02 << 5; /* video format */ /* TODO: 74 MHz limit? */ if (1) { frame.data[4] = 0x00 << 4; /* 3D structure */ } else { frame.data[4] = 0x08 << 4; /* 3D structure */ frame.data[5] = 0x00 << 4; /* 3D ext. data */ } err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer)); if (err < 0) { dev_err(hdmi->dev, "failed to pack vendor infoframe: %zd\n", err); return; } tegra_hdmi_write_infopack(hdmi, buffer, err); value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); value |= GENERIC_CTRL_ENABLE; tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); } static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi, const struct tmds_config *tmds) { unsigned long value; tegra_hdmi_writel(hdmi, tmds->pll0, HDMI_NV_PDISP_SOR_PLL0); tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1); tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT); value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE; tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT); } static int tegra_output_hdmi_enable(struct tegra_output *output) { unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey; struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); struct drm_display_mode *mode = &dc->base.mode; struct tegra_hdmi *hdmi = to_hdmi(output); struct device_node *node = hdmi->dev->of_node; unsigned int pulse_start, div82, pclk; const struct tmds_config *tmds; unsigned int num_tmds; unsigned long value; int retries = 1000; int err; pclk = mode->clock * 1000; h_sync_width = mode->hsync_end - mode->hsync_start; h_back_porch = mode->htotal - mode->hsync_end; h_front_porch = mode->hsync_start - mode->hdisplay; err = regulator_enable(hdmi->vdd); if (err < 0) { dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err); return err; } err = regulator_enable(hdmi->pll); if (err < 0) { dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err); return err; } /* * This assumes that the display controller will divide its parent * clock by 2 to generate the pixel clock. */ err = tegra_output_setup_clock(output, hdmi->clk, pclk * 2); if (err < 0) { dev_err(hdmi->dev, "failed to setup clock: %d\n", err); return err; } err = clk_set_rate(hdmi->clk, pclk); if (err < 0) return err; err = clk_enable(hdmi->clk); if (err < 0) { dev_err(hdmi->dev, "failed to enable clock: %d\n", err); return err; } tegra_periph_reset_assert(hdmi->clk); usleep_range(1000, 2000); tegra_periph_reset_deassert(hdmi->clk); tegra_dc_writel(dc, VSYNC_H_POSITION(1), DC_DISP_DISP_TIMING_OPTIONS); tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888, DC_DISP_DISP_COLOR_CONTROL); /* video_preamble uses h_pulse2 */ pulse_start = 1 + h_sync_width + h_back_porch - 10; tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0); value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE | PULSE_LAST_END_A; tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL); value = PULSE_START(pulse_start) | PULSE_END(pulse_start + 8); tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A); value = VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_START(0x200) | VSYNC_WINDOW_ENABLE; tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW); if (dc->pipe) value = HDMI_SRC_DISPLAYB; else value = HDMI_SRC_DISPLAYA; if ((mode->hdisplay == 720) && ((mode->vdisplay == 480) || (mode->vdisplay == 576))) tegra_hdmi_writel(hdmi, value | ARM_VIDEO_RANGE_FULL, HDMI_NV_PDISP_INPUT_CONTROL); else tegra_hdmi_writel(hdmi, value | ARM_VIDEO_RANGE_LIMITED, HDMI_NV_PDISP_INPUT_CONTROL); div82 = clk_get_rate(hdmi->clk) / 1000000 * 4; value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82); tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK); if (!hdmi->dvi) { err = tegra_hdmi_setup_audio(hdmi, pclk); if (err < 0) hdmi->dvi = true; } if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) { /* * TODO: add ELD support */ } rekey = HDMI_REKEY_DEFAULT; value = HDMI_CTRL_REKEY(rekey); value |= HDMI_CTRL_MAX_AC_PACKET((h_sync_width + h_back_porch + h_front_porch - rekey - 18) / 32); if (!hdmi->dvi) value |= HDMI_CTRL_ENABLE; tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL); if (hdmi->dvi) tegra_hdmi_writel(hdmi, 0x0, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); else tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); tegra_hdmi_setup_avi_infoframe(hdmi, mode); tegra_hdmi_setup_audio_infoframe(hdmi); tegra_hdmi_setup_stereo_infoframe(hdmi); /* TMDS CONFIG */ if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) { num_tmds = ARRAY_SIZE(tegra3_tmds_config); tmds = tegra3_tmds_config; } else { num_tmds = ARRAY_SIZE(tegra2_tmds_config); tmds = tegra2_tmds_config; } for (i = 0; i < num_tmds; i++) { if (pclk <= tmds[i].pclk) { tegra_hdmi_setup_tmds(hdmi, &tmds[i]); break; } } tegra_hdmi_writel(hdmi, SOR_SEQ_CTL_PU_PC(0) | SOR_SEQ_PU_PC_ALT(0) | SOR_SEQ_PD_PC(8) | SOR_SEQ_PD_PC_ALT(8), HDMI_NV_PDISP_SOR_SEQ_CTL); value = SOR_SEQ_INST_WAIT_TIME(1) | SOR_SEQ_INST_WAIT_UNITS_VSYNC | SOR_SEQ_INST_HALT | SOR_SEQ_INST_PIN_A_LOW | SOR_SEQ_INST_PIN_B_LOW | SOR_SEQ_INST_DRIVE_PWM_OUT_LO; tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(0)); tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(8)); value = 0x1c800; value &= ~SOR_CSTM_ROTCLK(~0); value |= SOR_CSTM_ROTCLK(2); tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM); tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND); tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); /* start SOR */ tegra_hdmi_writel(hdmi, SOR_PWR_NORMAL_STATE_PU | SOR_PWR_NORMAL_START_NORMAL | SOR_PWR_SAFE_STATE_PD | SOR_PWR_SETTING_NEW_TRIGGER, HDMI_NV_PDISP_SOR_PWR); tegra_hdmi_writel(hdmi, SOR_PWR_NORMAL_STATE_PU | SOR_PWR_NORMAL_START_NORMAL | SOR_PWR_SAFE_STATE_PD | SOR_PWR_SETTING_NEW_DONE, HDMI_NV_PDISP_SOR_PWR); do { BUG_ON(--retries < 0); value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR); } while (value & SOR_PWR_SETTING_NEW_PENDING); value = SOR_STATE_ASY_CRCMODE_COMPLETE | SOR_STATE_ASY_OWNER_HEAD0 | SOR_STATE_ASY_SUBOWNER_BOTH | SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A | SOR_STATE_ASY_DEPOL_POS; /* setup sync polarities */ if (mode->flags & DRM_MODE_FLAG_PHSYNC) value |= SOR_STATE_ASY_HSYNCPOL_POS; if (mode->flags & DRM_MODE_FLAG_NHSYNC) value |= SOR_STATE_ASY_HSYNCPOL_NEG; if (mode->flags & DRM_MODE_FLAG_PVSYNC) value |= SOR_STATE_ASY_VSYNCPOL_POS; if (mode->flags & DRM_MODE_FLAG_NVSYNC) value |= SOR_STATE_ASY_VSYNCPOL_NEG; tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE2); value = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL; tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE1); tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0); tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0); tegra_hdmi_writel(hdmi, value | SOR_STATE_ATTACHED, HDMI_NV_PDISP_SOR_STATE1); tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0); tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS); value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE; tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL); value = DISP_CTRL_MODE_C_DISPLAY; tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND); tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); /* TODO: add HDCP support */ return 0; } static int tegra_output_hdmi_disable(struct tegra_output *output) { struct tegra_hdmi *hdmi = to_hdmi(output); tegra_periph_reset_assert(hdmi->clk); clk_disable(hdmi->clk); regulator_disable(hdmi->pll); regulator_disable(hdmi->vdd); return 0; } static int tegra_output_hdmi_setup_clock(struct tegra_output *output, struct clk *clk, unsigned long pclk) { struct tegra_hdmi *hdmi = to_hdmi(output); struct clk *base; int err; err = clk_set_parent(clk, hdmi->clk_parent); if (err < 0) { dev_err(output->dev, "failed to set parent: %d\n", err); return err; } base = clk_get_parent(hdmi->clk_parent); /* * This assumes that the parent clock is pll_d_out0 or pll_d2_out * respectively, each of which divides the base pll_d by 2. */ err = clk_set_rate(base, pclk * 2); if (err < 0) dev_err(output->dev, "failed to set base clock rate to %lu Hz\n", pclk * 2); return 0; } static int tegra_output_hdmi_check_mode(struct tegra_output *output, struct drm_display_mode *mode, enum drm_mode_status *status) { struct tegra_hdmi *hdmi = to_hdmi(output); unsigned long pclk = mode->clock * 1000; struct clk *parent; long err; parent = clk_get_parent(hdmi->clk_parent); err = clk_round_rate(parent, pclk * 4); if (err < 0) *status = MODE_NOCLOCK; else *status = MODE_OK; return 0; } static const struct tegra_output_ops hdmi_ops = { .enable = tegra_output_hdmi_enable, .disable = tegra_output_hdmi_disable, .setup_clock = tegra_output_hdmi_setup_clock, .check_mode = tegra_output_hdmi_check_mode, }; static int tegra_hdmi_show_regs(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct tegra_hdmi *hdmi = node->info_ent->data; #define DUMP_REG(name) \ seq_printf(s, "%-56s %#05x %08lx\n", #name, name, \ tegra_hdmi_readl(hdmi, name)) DUMP_REG(HDMI_CTXSW); DUMP_REG(HDMI_NV_PDISP_SOR_STATE0); DUMP_REG(HDMI_NV_PDISP_SOR_STATE1); DUMP_REG(HDMI_NV_PDISP_SOR_STATE2); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB); DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS); DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER); DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW); DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH); DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL); DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT); DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW); DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL); DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS); DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK); DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1); DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2); DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0); DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1); DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA); DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE); DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1); DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2); DUMP_REG(HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL); DUMP_REG(HDMI_NV_PDISP_SOR_CAP); DUMP_REG(HDMI_NV_PDISP_SOR_PWR); DUMP_REG(HDMI_NV_PDISP_SOR_TEST); DUMP_REG(HDMI_NV_PDISP_SOR_PLL0); DUMP_REG(HDMI_NV_PDISP_SOR_PLL1); DUMP_REG(HDMI_NV_PDISP_SOR_PLL2); DUMP_REG(HDMI_NV_PDISP_SOR_CSTM); DUMP_REG(HDMI_NV_PDISP_SOR_LVDS); DUMP_REG(HDMI_NV_PDISP_SOR_CRCA); DUMP_REG(HDMI_NV_PDISP_SOR_CRCB); DUMP_REG(HDMI_NV_PDISP_SOR_BLANK); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(0)); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(1)); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(2)); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(3)); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(4)); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(5)); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(6)); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(7)); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(8)); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(9)); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(10)); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(11)); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(12)); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(13)); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(14)); DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(15)); DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0); DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1); DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0); DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1); DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0); DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1); DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0); DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1); DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0); DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1); DUMP_REG(HDMI_NV_PDISP_SOR_TRIG); DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK); DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT); DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0); DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1); DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2); DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0)); DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1)); DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2)); DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3)); DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4)); DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5)); DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6)); DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH); DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD); DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0); DUMP_REG(HDMI_NV_PDISP_AUDIO_N); DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING); DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK); DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL); DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL); DUMP_REG(HDMI_NV_PDISP_SCRATCH); DUMP_REG(HDMI_NV_PDISP_PE_CURRENT); DUMP_REG(HDMI_NV_PDISP_KEY_CTRL); DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0); DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1); DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2); DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0); DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1); DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2); DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3); DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG); DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX); DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0); DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR); DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE); #undef DUMP_REG return 0; } static struct drm_info_list debugfs_files[] = { { "regs", tegra_hdmi_show_regs, 0, NULL }, }; static int tegra_hdmi_debugfs_init(struct tegra_hdmi *hdmi, struct drm_minor *minor) { unsigned int i; int err; hdmi->debugfs = debugfs_create_dir("hdmi", minor->debugfs_root); if (!hdmi->debugfs) return -ENOMEM; hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files), GFP_KERNEL); if (!hdmi->debugfs_files) { err = -ENOMEM; goto remove; } for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) hdmi->debugfs_files[i].data = hdmi; err = drm_debugfs_create_files(hdmi->debugfs_files, ARRAY_SIZE(debugfs_files), hdmi->debugfs, minor); if (err < 0) goto free; hdmi->minor = minor; return 0; free: kfree(hdmi->debugfs_files); hdmi->debugfs_files = NULL; remove: debugfs_remove(hdmi->debugfs); hdmi->debugfs = NULL; return err; } static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi) { drm_debugfs_remove_files(hdmi->debugfs_files, ARRAY_SIZE(debugfs_files), hdmi->minor); hdmi->minor = NULL; kfree(hdmi->debugfs_files); hdmi->debugfs_files = NULL; debugfs_remove(hdmi->debugfs); hdmi->debugfs = NULL; return 0; } static int tegra_hdmi_drm_init(struct host1x_client *client, struct drm_device *drm) { struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); int err; hdmi->output.type = TEGRA_OUTPUT_HDMI; hdmi->output.dev = client->dev; hdmi->output.ops = &hdmi_ops; err = tegra_output_init(drm, &hdmi->output); if (err < 0) { dev_err(client->dev, "output setup failed: %d\n", err); return err; } if (IS_ENABLED(CONFIG_DEBUG_FS)) { err = tegra_hdmi_debugfs_init(hdmi, drm->primary); if (err < 0) dev_err(client->dev, "debugfs setup failed: %d\n", err); } return 0; } static int tegra_hdmi_drm_exit(struct host1x_client *client) { struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client); int err; if (IS_ENABLED(CONFIG_DEBUG_FS)) { err = tegra_hdmi_debugfs_exit(hdmi); if (err < 0) dev_err(client->dev, "debugfs cleanup failed: %d\n", err); } err = tegra_output_disable(&hdmi->output); if (err < 0) { dev_err(client->dev, "output failed to disable: %d\n", err); return err; } err = tegra_output_exit(&hdmi->output); if (err < 0) { dev_err(client->dev, "output cleanup failed: %d\n", err); return err; } return 0; } static const struct host1x_client_ops hdmi_client_ops = { .drm_init = tegra_hdmi_drm_init, .drm_exit = tegra_hdmi_drm_exit, }; static int tegra_hdmi_probe(struct platform_device *pdev) { struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent); struct tegra_hdmi *hdmi; struct resource *regs; int err; hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); if (!hdmi) return -ENOMEM; hdmi->dev = &pdev->dev; hdmi->audio_source = AUTO; hdmi->audio_freq = 44100; hdmi->stereo = false; hdmi->dvi = false; hdmi->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(hdmi->clk)) { dev_err(&pdev->dev, "failed to get clock\n"); return PTR_ERR(hdmi->clk); } err = clk_prepare(hdmi->clk); if (err < 0) return err; hdmi->clk_parent = devm_clk_get(&pdev->dev, "parent"); if (IS_ERR(hdmi->clk_parent)) return PTR_ERR(hdmi->clk_parent); err = clk_prepare(hdmi->clk_parent); if (err < 0) return err; err = clk_set_parent(hdmi->clk, hdmi->clk_parent); if (err < 0) { dev_err(&pdev->dev, "failed to setup clocks: %d\n", err); return err; } hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd"); if (IS_ERR(hdmi->vdd)) { dev_err(&pdev->dev, "failed to get VDD regulator\n"); return PTR_ERR(hdmi->vdd); } hdmi->pll = devm_regulator_get(&pdev->dev, "pll"); if (IS_ERR(hdmi->pll)) { dev_err(&pdev->dev, "failed to get PLL regulator\n"); return PTR_ERR(hdmi->pll); } hdmi->output.dev = &pdev->dev; err = tegra_output_parse_dt(&hdmi->output); if (err < 0) return err; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) return -ENXIO; hdmi->regs = devm_ioremap_resource(&pdev->dev, regs); if (IS_ERR(hdmi->regs)) return PTR_ERR(hdmi->regs); err = platform_get_irq(pdev, 0); if (err < 0) return err; hdmi->irq = err; hdmi->client.ops = &hdmi_client_ops; INIT_LIST_HEAD(&hdmi->client.list); hdmi->client.dev = &pdev->dev; err = host1x_register_client(host1x, &hdmi->client); if (err < 0) { dev_err(&pdev->dev, "failed to register host1x client: %d\n", err); return err; } platform_set_drvdata(pdev, hdmi); return 0; } static int tegra_hdmi_remove(struct platform_device *pdev) { struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent); struct tegra_hdmi *hdmi = platform_get_drvdata(pdev); int err; err = host1x_unregister_client(host1x, &hdmi->client); if (err < 0) { dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", err); return err; } clk_unprepare(hdmi->clk_parent); clk_unprepare(hdmi->clk); return 0; } static struct of_device_id tegra_hdmi_of_match[] = { { .compatible = "nvidia,tegra30-hdmi", }, { .compatible = "nvidia,tegra20-hdmi", }, { }, }; struct platform_driver tegra_hdmi_driver = { .driver = { .name = "tegra-hdmi", .owner = THIS_MODULE, .of_match_table = tegra_hdmi_of_match, }, .probe = tegra_hdmi_probe, .remove = tegra_hdmi_remove, };
gpl-2.0
Bauuuuu/android_kernel_zte_nx512j
sound/soc/kirkwood/kirkwood-i2s.c
2144
14838
/* * kirkwood-i2s.c * * (c) 2010 Arnaud Patard <apatard@mandriva.com> * (c) 2010 Arnaud Patard <arnaud.patard@rtp-net.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/mbus.h> #include <linux/delay.h> #include <linux/clk.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <linux/platform_data/asoc-kirkwood.h> #include "kirkwood.h" #define DRV_NAME "kirkwood-i2s" #define KIRKWOOD_I2S_RATES \ (SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000) #define KIRKWOOD_I2S_FORMATS \ (SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S24_LE | \ SNDRV_PCM_FMTBIT_S32_LE) static int kirkwood_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct kirkwood_dma_data *priv = snd_soc_dai_get_drvdata(cpu_dai); unsigned long mask; unsigned long value; switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_RIGHT_J: mask = KIRKWOOD_I2S_CTL_RJ; break; case SND_SOC_DAIFMT_LEFT_J: mask = KIRKWOOD_I2S_CTL_LJ; break; case SND_SOC_DAIFMT_I2S: mask = KIRKWOOD_I2S_CTL_I2S; break; default: return -EINVAL; } /* * Set same format for playback and record * This avoids some troubles. */ value = readl(priv->io+KIRKWOOD_I2S_PLAYCTL); value &= ~KIRKWOOD_I2S_CTL_JUST_MASK; value |= mask; writel(value, priv->io+KIRKWOOD_I2S_PLAYCTL); value = readl(priv->io+KIRKWOOD_I2S_RECCTL); value &= ~KIRKWOOD_I2S_CTL_JUST_MASK; value |= mask; writel(value, priv->io+KIRKWOOD_I2S_RECCTL); return 0; } static inline void kirkwood_set_dco(void __iomem *io, unsigned long rate) { unsigned long value; value = KIRKWOOD_DCO_CTL_OFFSET_0; switch (rate) { default: case 44100: value |= KIRKWOOD_DCO_CTL_FREQ_11; break; case 48000: value |= KIRKWOOD_DCO_CTL_FREQ_12; break; case 96000: value |= KIRKWOOD_DCO_CTL_FREQ_24; break; } writel(value, io + KIRKWOOD_DCO_CTL); /* wait for dco locked */ do { cpu_relax(); value = readl(io + KIRKWOOD_DCO_SPCR_STATUS); value &= KIRKWOOD_DCO_SPCR_STATUS_DCO_LOCK; } while (value == 0); } static void kirkwood_set_rate(struct snd_soc_dai *dai, struct kirkwood_dma_data *priv, unsigned long rate) { uint32_t clks_ctrl; if (rate == 44100 || rate == 48000 || rate == 96000) { /* use internal dco for supported rates */ dev_dbg(dai->dev, "%s: dco set rate = %lu\n", __func__, rate); kirkwood_set_dco(priv->io, rate); clks_ctrl = KIRKWOOD_MCLK_SOURCE_DCO; } else if (!IS_ERR(priv->extclk)) { /* use optional external clk for other rates */ dev_dbg(dai->dev, "%s: extclk set rate = %lu -> %lu\n", __func__, rate, 256 * rate); clk_set_rate(priv->extclk, 256 * rate); clks_ctrl = KIRKWOOD_MCLK_SOURCE_EXTCLK; } writel(clks_ctrl, priv->io + KIRKWOOD_CLOCKS_CTRL); } static int kirkwood_i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct kirkwood_dma_data *priv = snd_soc_dai_get_drvdata(dai); snd_soc_dai_set_dma_data(dai, substream, priv); return 0; } static int kirkwood_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct kirkwood_dma_data *priv = snd_soc_dai_get_drvdata(dai); uint32_t ctl_play, ctl_rec; unsigned int i2s_reg; unsigned long i2s_value; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { i2s_reg = KIRKWOOD_I2S_PLAYCTL; } else { i2s_reg = KIRKWOOD_I2S_RECCTL; } kirkwood_set_rate(dai, priv, params_rate(params)); i2s_value = readl(priv->io+i2s_reg); i2s_value &= ~KIRKWOOD_I2S_CTL_SIZE_MASK; /* * Size settings in play/rec i2s control regs and play/rec control * regs must be the same. */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: i2s_value |= KIRKWOOD_I2S_CTL_SIZE_16; ctl_play = KIRKWOOD_PLAYCTL_SIZE_16_C | KIRKWOOD_PLAYCTL_I2S_EN; ctl_rec = KIRKWOOD_RECCTL_SIZE_16_C | KIRKWOOD_RECCTL_I2S_EN; break; /* * doesn't work... S20_3LE != kirkwood 20bit format ? * case SNDRV_PCM_FORMAT_S20_3LE: i2s_value |= KIRKWOOD_I2S_CTL_SIZE_20; ctl_play = KIRKWOOD_PLAYCTL_SIZE_20 | KIRKWOOD_PLAYCTL_I2S_EN; ctl_rec = KIRKWOOD_RECCTL_SIZE_20 | KIRKWOOD_RECCTL_I2S_EN; break; */ case SNDRV_PCM_FORMAT_S24_LE: i2s_value |= KIRKWOOD_I2S_CTL_SIZE_24; ctl_play = KIRKWOOD_PLAYCTL_SIZE_24 | KIRKWOOD_PLAYCTL_I2S_EN; ctl_rec = KIRKWOOD_RECCTL_SIZE_24 | KIRKWOOD_RECCTL_I2S_EN; break; case SNDRV_PCM_FORMAT_S32_LE: i2s_value |= KIRKWOOD_I2S_CTL_SIZE_32; ctl_play = KIRKWOOD_PLAYCTL_SIZE_32 | KIRKWOOD_PLAYCTL_I2S_EN; ctl_rec = KIRKWOOD_RECCTL_SIZE_32 | KIRKWOOD_RECCTL_I2S_EN; break; default: return -EINVAL; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (params_channels(params) == 1) ctl_play |= KIRKWOOD_PLAYCTL_MONO_BOTH; else ctl_play |= KIRKWOOD_PLAYCTL_MONO_OFF; priv->ctl_play &= ~(KIRKWOOD_PLAYCTL_MONO_MASK | KIRKWOOD_PLAYCTL_I2S_EN | KIRKWOOD_PLAYCTL_SPDIF_EN | KIRKWOOD_PLAYCTL_SIZE_MASK); priv->ctl_play |= ctl_play; } else { priv->ctl_rec &= ~KIRKWOOD_RECCTL_SIZE_MASK; priv->ctl_rec |= ctl_rec; } writel(i2s_value, priv->io+i2s_reg); return 0; } static int kirkwood_i2s_play_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct kirkwood_dma_data *priv = snd_soc_dai_get_drvdata(dai); uint32_t ctl, value; ctl = readl(priv->io + KIRKWOOD_PLAYCTL); if (ctl & KIRKWOOD_PLAYCTL_PAUSE) { unsigned timeout = 5000; /* * The Armada510 spec says that if we enter pause mode, the * busy bit must be read back as clear _twice_. Make sure * we respect that otherwise we get DMA underruns. */ do { value = ctl; ctl = readl(priv->io + KIRKWOOD_PLAYCTL); if (!((ctl | value) & KIRKWOOD_PLAYCTL_PLAY_BUSY)) break; udelay(1); } while (timeout--); if ((ctl | value) & KIRKWOOD_PLAYCTL_PLAY_BUSY) dev_notice(dai->dev, "timed out waiting for busy to deassert: %08x\n", ctl); } switch (cmd) { case SNDRV_PCM_TRIGGER_START: /* configure */ ctl = priv->ctl_play; value = ctl & ~(KIRKWOOD_PLAYCTL_I2S_EN | KIRKWOOD_PLAYCTL_SPDIF_EN); writel(value, priv->io + KIRKWOOD_PLAYCTL); /* enable interrupts */ value = readl(priv->io + KIRKWOOD_INT_MASK); value |= KIRKWOOD_INT_CAUSE_PLAY_BYTES; writel(value, priv->io + KIRKWOOD_INT_MASK); /* enable playback */ writel(ctl, priv->io + KIRKWOOD_PLAYCTL); break; case SNDRV_PCM_TRIGGER_STOP: /* stop audio, disable interrupts */ ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE; writel(ctl, priv->io + KIRKWOOD_PLAYCTL); value = readl(priv->io + KIRKWOOD_INT_MASK); value &= ~KIRKWOOD_INT_CAUSE_PLAY_BYTES; writel(value, priv->io + KIRKWOOD_INT_MASK); /* disable all playbacks */ ctl &= ~(KIRKWOOD_PLAYCTL_I2S_EN | KIRKWOOD_PLAYCTL_SPDIF_EN); writel(ctl, priv->io + KIRKWOOD_PLAYCTL); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE; writel(ctl, priv->io + KIRKWOOD_PLAYCTL); break; case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ctl &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE); writel(ctl, priv->io + KIRKWOOD_PLAYCTL); break; default: return -EINVAL; } return 0; } static int kirkwood_i2s_rec_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct kirkwood_dma_data *priv = snd_soc_dai_get_drvdata(dai); uint32_t ctl, value; value = readl(priv->io + KIRKWOOD_RECCTL); switch (cmd) { case SNDRV_PCM_TRIGGER_START: /* configure */ ctl = priv->ctl_rec; value = ctl & ~KIRKWOOD_RECCTL_I2S_EN; writel(value, priv->io + KIRKWOOD_RECCTL); /* enable interrupts */ value = readl(priv->io + KIRKWOOD_INT_MASK); value |= KIRKWOOD_INT_CAUSE_REC_BYTES; writel(value, priv->io + KIRKWOOD_INT_MASK); /* enable record */ writel(ctl, priv->io + KIRKWOOD_RECCTL); break; case SNDRV_PCM_TRIGGER_STOP: /* stop audio, disable interrupts */ value = readl(priv->io + KIRKWOOD_RECCTL); value |= KIRKWOOD_RECCTL_PAUSE | KIRKWOOD_RECCTL_MUTE; writel(value, priv->io + KIRKWOOD_RECCTL); value = readl(priv->io + KIRKWOOD_INT_MASK); value &= ~KIRKWOOD_INT_CAUSE_REC_BYTES; writel(value, priv->io + KIRKWOOD_INT_MASK); /* disable all records */ value = readl(priv->io + KIRKWOOD_RECCTL); value &= ~(KIRKWOOD_RECCTL_I2S_EN | KIRKWOOD_RECCTL_SPDIF_EN); writel(value, priv->io + KIRKWOOD_RECCTL); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: value = readl(priv->io + KIRKWOOD_RECCTL); value |= KIRKWOOD_RECCTL_PAUSE | KIRKWOOD_RECCTL_MUTE; writel(value, priv->io + KIRKWOOD_RECCTL); break; case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: value = readl(priv->io + KIRKWOOD_RECCTL); value &= ~(KIRKWOOD_RECCTL_PAUSE | KIRKWOOD_RECCTL_MUTE); writel(value, priv->io + KIRKWOOD_RECCTL); break; default: return -EINVAL; } return 0; } static int kirkwood_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) return kirkwood_i2s_play_trigger(substream, cmd, dai); else return kirkwood_i2s_rec_trigger(substream, cmd, dai); return 0; } static int kirkwood_i2s_probe(struct snd_soc_dai *dai) { struct kirkwood_dma_data *priv = snd_soc_dai_get_drvdata(dai); unsigned long value; unsigned int reg_data; /* put system in a "safe" state : */ /* disable audio interrupts */ writel(0xffffffff, priv->io + KIRKWOOD_INT_CAUSE); writel(0, priv->io + KIRKWOOD_INT_MASK); reg_data = readl(priv->io + 0x1200); reg_data &= (~(0x333FF8)); reg_data |= 0x111D18; writel(reg_data, priv->io + 0x1200); msleep(500); reg_data = readl(priv->io + 0x1200); reg_data &= (~(0x333FF8)); reg_data |= 0x111D18; writel(reg_data, priv->io + 0x1200); /* disable playback/record */ value = readl(priv->io + KIRKWOOD_PLAYCTL); value &= ~(KIRKWOOD_PLAYCTL_I2S_EN|KIRKWOOD_PLAYCTL_SPDIF_EN); writel(value, priv->io + KIRKWOOD_PLAYCTL); value = readl(priv->io + KIRKWOOD_RECCTL); value &= ~(KIRKWOOD_RECCTL_I2S_EN | KIRKWOOD_RECCTL_SPDIF_EN); writel(value, priv->io + KIRKWOOD_RECCTL); return 0; } static int kirkwood_i2s_remove(struct snd_soc_dai *dai) { return 0; } static const struct snd_soc_dai_ops kirkwood_i2s_dai_ops = { .startup = kirkwood_i2s_startup, .trigger = kirkwood_i2s_trigger, .hw_params = kirkwood_i2s_hw_params, .set_fmt = kirkwood_i2s_set_fmt, }; static struct snd_soc_dai_driver kirkwood_i2s_dai = { .probe = kirkwood_i2s_probe, .remove = kirkwood_i2s_remove, .playback = { .channels_min = 1, .channels_max = 2, .rates = KIRKWOOD_I2S_RATES, .formats = KIRKWOOD_I2S_FORMATS, }, .capture = { .channels_min = 1, .channels_max = 2, .rates = KIRKWOOD_I2S_RATES, .formats = KIRKWOOD_I2S_FORMATS, }, .ops = &kirkwood_i2s_dai_ops, }; static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk = { .probe = kirkwood_i2s_probe, .remove = kirkwood_i2s_remove, .playback = { .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000 | SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_KNOT, .formats = KIRKWOOD_I2S_FORMATS, }, .capture = { .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000 | SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_KNOT, .formats = KIRKWOOD_I2S_FORMATS, }, .ops = &kirkwood_i2s_dai_ops, }; static const struct snd_soc_component_driver kirkwood_i2s_component = { .name = DRV_NAME, }; static int kirkwood_i2s_dev_probe(struct platform_device *pdev) { struct kirkwood_asoc_platform_data *data = pdev->dev.platform_data; struct snd_soc_dai_driver *soc_dai = &kirkwood_i2s_dai; struct kirkwood_dma_data *priv; struct resource *mem; int err; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(&pdev->dev, "allocation failed\n"); return -ENOMEM; } dev_set_drvdata(&pdev->dev, priv); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); priv->io = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(priv->io)) return PTR_ERR(priv->io); priv->irq = platform_get_irq(pdev, 0); if (priv->irq <= 0) { dev_err(&pdev->dev, "platform_get_irq failed\n"); return -ENXIO; } if (!data) { dev_err(&pdev->dev, "no platform data ?!\n"); return -EINVAL; } priv->burst = data->burst; priv->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(priv->clk)) { dev_err(&pdev->dev, "no clock\n"); return PTR_ERR(priv->clk); } err = clk_prepare_enable(priv->clk); if (err < 0) return err; priv->extclk = clk_get(&pdev->dev, "extclk"); if (!IS_ERR(priv->extclk)) { if (priv->extclk == priv->clk) { clk_put(priv->extclk); priv->extclk = ERR_PTR(-EINVAL); } else { dev_info(&pdev->dev, "found external clock\n"); clk_prepare_enable(priv->extclk); soc_dai = &kirkwood_i2s_dai_extclk; } } /* Some sensible defaults - this reflects the powerup values */ priv->ctl_play = KIRKWOOD_PLAYCTL_SIZE_24; priv->ctl_rec = KIRKWOOD_RECCTL_SIZE_24; /* Select the burst size */ if (data->burst == 32) { priv->ctl_play |= KIRKWOOD_PLAYCTL_BURST_32; priv->ctl_rec |= KIRKWOOD_RECCTL_BURST_32; } else { priv->ctl_play |= KIRKWOOD_PLAYCTL_BURST_128; priv->ctl_rec |= KIRKWOOD_RECCTL_BURST_128; } err = snd_soc_register_component(&pdev->dev, &kirkwood_i2s_component, soc_dai, 1); if (!err) return 0; dev_err(&pdev->dev, "snd_soc_register_component failed\n"); if (!IS_ERR(priv->extclk)) { clk_disable_unprepare(priv->extclk); clk_put(priv->extclk); } clk_disable_unprepare(priv->clk); return err; } static int kirkwood_i2s_dev_remove(struct platform_device *pdev) { struct kirkwood_dma_data *priv = dev_get_drvdata(&pdev->dev); snd_soc_unregister_component(&pdev->dev); if (!IS_ERR(priv->extclk)) { clk_disable_unprepare(priv->extclk); clk_put(priv->extclk); } clk_disable_unprepare(priv->clk); return 0; } static struct platform_driver kirkwood_i2s_driver = { .probe = kirkwood_i2s_dev_probe, .remove = kirkwood_i2s_dev_remove, .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, }; module_platform_driver(kirkwood_i2s_driver); /* Module information */ MODULE_AUTHOR("Arnaud Patard, <arnaud.patard@rtp-net.org>"); MODULE_DESCRIPTION("Kirkwood I2S SoC Interface"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:kirkwood-i2s");
gpl-2.0
livlogik/Evil_Yummy_Gumdrop--Tmo-V10-Kernel
drivers/usb/host/fhci-hcd.c
2400
19502
/* * Freescale QUICC Engine USB Host Controller Driver * * Copyright (c) Freescale Semicondutor, Inc. 2006. * Shlomi Gridish <gridish@freescale.com> * Jerry Huang <Chang-Ming.Huang@freescale.com> * Copyright (c) Logic Product Development, Inc. 2007 * Peter Barada <peterb@logicpd.com> * Copyright (c) MontaVista Software, Inc. 2008. * Anton Vorontsov <avorontsov@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/of_platform.h> #include <linux/of_gpio.h> #include <linux/slab.h> #include <asm/qe.h> #include <asm/fsl_gtm.h> #include "fhci.h" void fhci_start_sof_timer(struct fhci_hcd *fhci) { fhci_dbg(fhci, "-> %s\n", __func__); /* clear frame_n */ out_be16(&fhci->pram->frame_num, 0); out_be16(&fhci->regs->usb_ussft, 0); setbits8(&fhci->regs->usb_usmod, USB_MODE_SFTE); fhci_dbg(fhci, "<- %s\n", __func__); } void fhci_stop_sof_timer(struct fhci_hcd *fhci) { fhci_dbg(fhci, "-> %s\n", __func__); clrbits8(&fhci->regs->usb_usmod, USB_MODE_SFTE); gtm_stop_timer16(fhci->timer); fhci_dbg(fhci, "<- %s\n", __func__); } u16 fhci_get_sof_timer_count(struct fhci_usb *usb) { return be16_to_cpu(in_be16(&usb->fhci->regs->usb_ussft) / 12); } /* initialize the endpoint zero */ static u32 endpoint_zero_init(struct fhci_usb *usb, enum fhci_mem_alloc data_mem, u32 ring_len) { u32 rc; rc = fhci_create_ep(usb, data_mem, ring_len); if (rc) return rc; /* inilialize endpoint registers */ fhci_init_ep_registers(usb, usb->ep0, data_mem); return 0; } /* enable the USB interrupts */ void fhci_usb_enable_interrupt(struct fhci_usb *usb) { struct fhci_hcd *fhci = usb->fhci; if (usb->intr_nesting_cnt == 1) { /* initialize the USB interrupt */ enable_irq(fhci_to_hcd(fhci)->irq); /* initialize the event register and mask register */ out_be16(&usb->fhci->regs->usb_usber, 0xffff); out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk); /* enable the timer interrupts */ enable_irq(fhci->timer->irq); } else if (usb->intr_nesting_cnt > 1) fhci_info(fhci, "unbalanced USB interrupts nesting\n"); usb->intr_nesting_cnt--; } /* disable the usb interrupt */ void fhci_usb_disable_interrupt(struct fhci_usb *usb) { struct fhci_hcd *fhci = usb->fhci; if (usb->intr_nesting_cnt == 0) { /* disable the timer interrupt */ disable_irq_nosync(fhci->timer->irq); /* disable the usb interrupt */ disable_irq_nosync(fhci_to_hcd(fhci)->irq); out_be16(&usb->fhci->regs->usb_usbmr, 0); } usb->intr_nesting_cnt++; } /* enable the USB controller */ static u32 fhci_usb_enable(struct fhci_hcd *fhci) { struct fhci_usb *usb = fhci->usb_lld; out_be16(&usb->fhci->regs->usb_usber, 0xffff); out_be16(&usb->fhci->regs->usb_usbmr, usb->saved_msk); setbits8(&usb->fhci->regs->usb_usmod, USB_MODE_EN); mdelay(100); return 0; } /* disable the USB controller */ static u32 fhci_usb_disable(struct fhci_hcd *fhci) { struct fhci_usb *usb = fhci->usb_lld; fhci_usb_disable_interrupt(usb); fhci_port_disable(fhci); /* disable the usb controller */ if (usb->port_status == FHCI_PORT_FULL || usb->port_status == FHCI_PORT_LOW) fhci_device_disconnected_interrupt(fhci); clrbits8(&usb->fhci->regs->usb_usmod, USB_MODE_EN); return 0; } /* check the bus state by polling the QE bit on the IO ports */ int fhci_ioports_check_bus_state(struct fhci_hcd *fhci) { u8 bits = 0; /* check USBOE,if transmitting,exit */ if (!gpio_get_value(fhci->gpios[GPIO_USBOE])) return -1; /* check USBRP */ if (gpio_get_value(fhci->gpios[GPIO_USBRP])) bits |= 0x2; /* check USBRN */ if (gpio_get_value(fhci->gpios[GPIO_USBRN])) bits |= 0x1; return bits; } static void fhci_mem_free(struct fhci_hcd *fhci) { struct ed *ed; struct ed *next_ed; struct td *td; struct td *next_td; list_for_each_entry_safe(ed, next_ed, &fhci->empty_eds, node) { list_del(&ed->node); kfree(ed); } list_for_each_entry_safe(td, next_td, &fhci->empty_tds, node) { list_del(&td->node); kfree(td); } kfree(fhci->vroot_hub); fhci->vroot_hub = NULL; kfree(fhci->hc_list); fhci->hc_list = NULL; } static int fhci_mem_init(struct fhci_hcd *fhci) { int i; fhci->hc_list = kzalloc(sizeof(*fhci->hc_list), GFP_KERNEL); if (!fhci->hc_list) goto err; INIT_LIST_HEAD(&fhci->hc_list->ctrl_list); INIT_LIST_HEAD(&fhci->hc_list->bulk_list); INIT_LIST_HEAD(&fhci->hc_list->iso_list); INIT_LIST_HEAD(&fhci->hc_list->intr_list); INIT_LIST_HEAD(&fhci->hc_list->done_list); fhci->vroot_hub = kzalloc(sizeof(*fhci->vroot_hub), GFP_KERNEL); if (!fhci->vroot_hub) goto err; INIT_LIST_HEAD(&fhci->empty_eds); INIT_LIST_HEAD(&fhci->empty_tds); /* initialize work queue to handle done list */ fhci_tasklet.data = (unsigned long)fhci; fhci->process_done_task = &fhci_tasklet; for (i = 0; i < MAX_TDS; i++) { struct td *td; td = kmalloc(sizeof(*td), GFP_KERNEL); if (!td) goto err; fhci_recycle_empty_td(fhci, td); } for (i = 0; i < MAX_EDS; i++) { struct ed *ed; ed = kmalloc(sizeof(*ed), GFP_KERNEL); if (!ed) goto err; fhci_recycle_empty_ed(fhci, ed); } fhci->active_urbs = 0; return 0; err: fhci_mem_free(fhci); return -ENOMEM; } /* destroy the fhci_usb structure */ static void fhci_usb_free(void *lld) { struct fhci_usb *usb = lld; struct fhci_hcd *fhci; if (usb) { fhci = usb->fhci; fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF); fhci_ep0_free(usb); kfree(usb->actual_frame); kfree(usb); } } /* initialize the USB */ static int fhci_usb_init(struct fhci_hcd *fhci) { struct fhci_usb *usb = fhci->usb_lld; memset_io(usb->fhci->pram, 0, FHCI_PRAM_SIZE); usb->port_status = FHCI_PORT_DISABLED; usb->max_frame_usage = FRAME_TIME_USAGE; usb->sw_transaction_time = SW_FIX_TIME_BETWEEN_TRANSACTION; usb->actual_frame = kzalloc(sizeof(*usb->actual_frame), GFP_KERNEL); if (!usb->actual_frame) { fhci_usb_free(usb); return -ENOMEM; } INIT_LIST_HEAD(&usb->actual_frame->tds_list); /* initializing registers on chip, clear frame number */ out_be16(&fhci->pram->frame_num, 0); /* clear rx state */ out_be32(&fhci->pram->rx_state, 0); /* set mask register */ usb->saved_msk = (USB_E_TXB_MASK | USB_E_TXE1_MASK | USB_E_IDLE_MASK | USB_E_RESET_MASK | USB_E_SFT_MASK | USB_E_MSF_MASK); out_8(&usb->fhci->regs->usb_usmod, USB_MODE_HOST | USB_MODE_EN); /* clearing the mask register */ out_be16(&usb->fhci->regs->usb_usbmr, 0); /* initialing the event register */ out_be16(&usb->fhci->regs->usb_usber, 0xffff); if (endpoint_zero_init(usb, DEFAULT_DATA_MEM, DEFAULT_RING_LEN) != 0) { fhci_usb_free(usb); return -EINVAL; } return 0; } /* initialize the fhci_usb struct and the corresponding data staruct */ static struct fhci_usb *fhci_create_lld(struct fhci_hcd *fhci) { struct fhci_usb *usb; /* allocate memory for SCC data structure */ usb = kzalloc(sizeof(*usb), GFP_KERNEL); if (!usb) { fhci_err(fhci, "no memory for SCC data struct\n"); return NULL; } usb->fhci = fhci; usb->hc_list = fhci->hc_list; usb->vroot_hub = fhci->vroot_hub; usb->transfer_confirm = fhci_transfer_confirm_callback; return usb; } static int fhci_start(struct usb_hcd *hcd) { int ret; struct fhci_hcd *fhci = hcd_to_fhci(hcd); ret = fhci_mem_init(fhci); if (ret) { fhci_err(fhci, "failed to allocate memory\n"); goto err; } fhci->usb_lld = fhci_create_lld(fhci); if (!fhci->usb_lld) { fhci_err(fhci, "low level driver config failed\n"); ret = -ENOMEM; goto err; } ret = fhci_usb_init(fhci); if (ret) { fhci_err(fhci, "low level driver initialize failed\n"); goto err; } spin_lock_init(&fhci->lock); /* connect the virtual root hub */ fhci->vroot_hub->dev_num = 1; /* this field may be needed to fix */ fhci->vroot_hub->hub.wHubStatus = 0; fhci->vroot_hub->hub.wHubChange = 0; fhci->vroot_hub->port.wPortStatus = 0; fhci->vroot_hub->port.wPortChange = 0; hcd->state = HC_STATE_RUNNING; /* * From here on, khubd concurrently accesses the root * hub; drivers will be talking to enumerated devices. * (On restart paths, khubd already knows about the root * hub and could find work as soon as we wrote FLAG_CF.) * * Before this point the HC was idle/ready. After, khubd * and device drivers may start it running. */ fhci_usb_enable(fhci); return 0; err: fhci_mem_free(fhci); return ret; } static void fhci_stop(struct usb_hcd *hcd) { struct fhci_hcd *fhci = hcd_to_fhci(hcd); fhci_usb_disable_interrupt(fhci->usb_lld); fhci_usb_disable(fhci); fhci_usb_free(fhci->usb_lld); fhci->usb_lld = NULL; fhci_mem_free(fhci); } static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) { struct fhci_hcd *fhci = hcd_to_fhci(hcd); u32 pipe = urb->pipe; int ret; int i; int size = 0; struct urb_priv *urb_priv; unsigned long flags; switch (usb_pipetype(pipe)) { case PIPE_CONTROL: /* 1 td fro setup,1 for ack */ size = 2; case PIPE_BULK: /* one td for every 4096 bytes(can be up to 8k) */ size += urb->transfer_buffer_length / 4096; /* ...add for any remaining bytes... */ if ((urb->transfer_buffer_length % 4096) != 0) size++; /* ..and maybe a zero length packet to wrap it up */ if (size == 0) size++; else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0 && (urb->transfer_buffer_length % usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe))) != 0) size++; break; case PIPE_ISOCHRONOUS: size = urb->number_of_packets; if (size <= 0) return -EINVAL; for (i = 0; i < urb->number_of_packets; i++) { urb->iso_frame_desc[i].actual_length = 0; urb->iso_frame_desc[i].status = (u32) (-EXDEV); } break; case PIPE_INTERRUPT: size = 1; } /* allocate the private part of the URB */ urb_priv = kzalloc(sizeof(*urb_priv), mem_flags); if (!urb_priv) return -ENOMEM; /* allocate the private part of the URB */ urb_priv->tds = kcalloc(size, sizeof(*urb_priv->tds), mem_flags); if (!urb_priv->tds) { kfree(urb_priv); return -ENOMEM; } spin_lock_irqsave(&fhci->lock, flags); ret = usb_hcd_link_urb_to_ep(hcd, urb); if (ret) goto err; /* fill the private part of the URB */ urb_priv->num_of_tds = size; urb->status = -EINPROGRESS; urb->actual_length = 0; urb->error_count = 0; urb->hcpriv = urb_priv; fhci_queue_urb(fhci, urb); err: if (ret) { kfree(urb_priv->tds); kfree(urb_priv); } spin_unlock_irqrestore(&fhci->lock, flags); return ret; } /* dequeue FHCI URB */ static int fhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { struct fhci_hcd *fhci = hcd_to_fhci(hcd); struct fhci_usb *usb = fhci->usb_lld; int ret = -EINVAL; unsigned long flags; if (!urb || !urb->dev || !urb->dev->bus) goto out; spin_lock_irqsave(&fhci->lock, flags); ret = usb_hcd_check_unlink_urb(hcd, urb, status); if (ret) goto out2; if (usb->port_status != FHCI_PORT_DISABLED) { struct urb_priv *urb_priv; /* * flag the urb's data for deletion in some upcoming * SF interrupt's delete list processing */ urb_priv = urb->hcpriv; if (!urb_priv || (urb_priv->state == URB_DEL)) goto out2; urb_priv->state = URB_DEL; /* already pending? */ urb_priv->ed->state = FHCI_ED_URB_DEL; } else { fhci_urb_complete_free(fhci, urb); } out2: spin_unlock_irqrestore(&fhci->lock, flags); out: return ret; } static void fhci_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { struct fhci_hcd *fhci; struct ed *ed; unsigned long flags; fhci = hcd_to_fhci(hcd); spin_lock_irqsave(&fhci->lock, flags); ed = ep->hcpriv; if (ed) { while (ed->td_head != NULL) { struct td *td = fhci_remove_td_from_ed(ed); fhci_urb_complete_free(fhci, td->urb); } fhci_recycle_empty_ed(fhci, ed); ep->hcpriv = NULL; } spin_unlock_irqrestore(&fhci->lock, flags); } static int fhci_get_frame_number(struct usb_hcd *hcd) { struct fhci_hcd *fhci = hcd_to_fhci(hcd); return get_frame_num(fhci); } static const struct hc_driver fhci_driver = { .description = "fsl,usb-fhci", .product_desc = "FHCI HOST Controller", .hcd_priv_size = sizeof(struct fhci_hcd), /* generic hardware linkage */ .irq = fhci_irq, .flags = HCD_USB11 | HCD_MEMORY, /* basic lifecycle operation */ .start = fhci_start, .stop = fhci_stop, /* managing i/o requests and associated device resources */ .urb_enqueue = fhci_urb_enqueue, .urb_dequeue = fhci_urb_dequeue, .endpoint_disable = fhci_endpoint_disable, /* scheduling support */ .get_frame_number = fhci_get_frame_number, /* root hub support */ .hub_status_data = fhci_hub_status_data, .hub_control = fhci_hub_control, }; static int of_fhci_probe(struct platform_device *ofdev) { struct device *dev = &ofdev->dev; struct device_node *node = dev->of_node; struct usb_hcd *hcd; struct fhci_hcd *fhci; struct resource usb_regs; unsigned long pram_addr; unsigned int usb_irq; const char *sprop; const u32 *iprop; int size; int ret; int i; int j; if (usb_disabled()) return -ENODEV; sprop = of_get_property(node, "mode", NULL); if (sprop && strcmp(sprop, "host")) return -ENODEV; hcd = usb_create_hcd(&fhci_driver, dev, dev_name(dev)); if (!hcd) { dev_err(dev, "could not create hcd\n"); return -ENOMEM; } fhci = hcd_to_fhci(hcd); hcd->self.controller = dev; dev_set_drvdata(dev, hcd); iprop = of_get_property(node, "hub-power-budget", &size); if (iprop && size == sizeof(*iprop)) hcd->power_budget = *iprop; /* FHCI registers. */ ret = of_address_to_resource(node, 0, &usb_regs); if (ret) { dev_err(dev, "could not get regs\n"); goto err_regs; } hcd->regs = ioremap(usb_regs.start, resource_size(&usb_regs)); if (!hcd->regs) { dev_err(dev, "could not ioremap regs\n"); ret = -ENOMEM; goto err_regs; } fhci->regs = hcd->regs; /* Parameter RAM. */ iprop = of_get_property(node, "reg", &size); if (!iprop || size < sizeof(*iprop) * 4) { dev_err(dev, "can't get pram offset\n"); ret = -EINVAL; goto err_pram; } pram_addr = cpm_muram_alloc(FHCI_PRAM_SIZE, 64); if (IS_ERR_VALUE(pram_addr)) { dev_err(dev, "failed to allocate usb pram\n"); ret = -ENOMEM; goto err_pram; } qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, QE_CR_SUBBLOCK_USB, QE_CR_PROTOCOL_UNSPECIFIED, pram_addr); fhci->pram = cpm_muram_addr(pram_addr); /* GPIOs and pins */ for (i = 0; i < NUM_GPIOS; i++) { int gpio; enum of_gpio_flags flags; gpio = of_get_gpio_flags(node, i, &flags); fhci->gpios[i] = gpio; fhci->alow_gpios[i] = flags & OF_GPIO_ACTIVE_LOW; if (!gpio_is_valid(gpio)) { if (i < GPIO_SPEED) { dev_err(dev, "incorrect GPIO%d: %d\n", i, gpio); goto err_gpios; } else { dev_info(dev, "assuming board doesn't have " "%s gpio\n", i == GPIO_SPEED ? "speed" : "power"); continue; } } ret = gpio_request(gpio, dev_name(dev)); if (ret) { dev_err(dev, "failed to request gpio %d", i); goto err_gpios; } if (i >= GPIO_SPEED) { ret = gpio_direction_output(gpio, 0); if (ret) { dev_err(dev, "failed to set gpio %d as " "an output\n", i); i++; goto err_gpios; } } } for (j = 0; j < NUM_PINS; j++) { fhci->pins[j] = qe_pin_request(node, j); if (IS_ERR(fhci->pins[j])) { ret = PTR_ERR(fhci->pins[j]); dev_err(dev, "can't get pin %d: %d\n", j, ret); goto err_pins; } } /* Frame limit timer and its interrupt. */ fhci->timer = gtm_get_timer16(); if (IS_ERR(fhci->timer)) { ret = PTR_ERR(fhci->timer); dev_err(dev, "failed to request qe timer: %i", ret); goto err_get_timer; } ret = request_irq(fhci->timer->irq, fhci_frame_limit_timer_irq, 0, "qe timer (usb)", hcd); if (ret) { dev_err(dev, "failed to request timer irq"); goto err_timer_irq; } /* USB Host interrupt. */ usb_irq = irq_of_parse_and_map(node, 0); if (usb_irq == NO_IRQ) { dev_err(dev, "could not get usb irq\n"); ret = -EINVAL; goto err_usb_irq; } /* Clocks. */ sprop = of_get_property(node, "fsl,fullspeed-clock", NULL); if (sprop) { fhci->fullspeed_clk = qe_clock_source(sprop); if (fhci->fullspeed_clk == QE_CLK_DUMMY) { dev_err(dev, "wrong fullspeed-clock\n"); ret = -EINVAL; goto err_clocks; } } sprop = of_get_property(node, "fsl,lowspeed-clock", NULL); if (sprop) { fhci->lowspeed_clk = qe_clock_source(sprop); if (fhci->lowspeed_clk == QE_CLK_DUMMY) { dev_err(dev, "wrong lowspeed-clock\n"); ret = -EINVAL; goto err_clocks; } } if (fhci->fullspeed_clk == QE_CLK_NONE && fhci->lowspeed_clk == QE_CLK_NONE) { dev_err(dev, "no clocks specified\n"); ret = -EINVAL; goto err_clocks; } dev_info(dev, "at 0x%p, irq %d\n", hcd->regs, usb_irq); fhci_config_transceiver(fhci, FHCI_PORT_POWER_OFF); /* Start with full-speed, if possible. */ if (fhci->fullspeed_clk != QE_CLK_NONE) { fhci_config_transceiver(fhci, FHCI_PORT_FULL); qe_usb_clock_set(fhci->fullspeed_clk, USB_CLOCK); } else { fhci_config_transceiver(fhci, FHCI_PORT_LOW); qe_usb_clock_set(fhci->lowspeed_clk, USB_CLOCK >> 3); } /* Clear and disable any pending interrupts. */ out_be16(&fhci->regs->usb_usber, 0xffff); out_be16(&fhci->regs->usb_usbmr, 0); ret = usb_add_hcd(hcd, usb_irq, 0); if (ret < 0) goto err_add_hcd; fhci_dfs_create(fhci); return 0; err_add_hcd: err_clocks: irq_dispose_mapping(usb_irq); err_usb_irq: free_irq(fhci->timer->irq, hcd); err_timer_irq: gtm_put_timer16(fhci->timer); err_get_timer: err_pins: while (--j >= 0) qe_pin_free(fhci->pins[j]); err_gpios: while (--i >= 0) { if (gpio_is_valid(fhci->gpios[i])) gpio_free(fhci->gpios[i]); } cpm_muram_free(pram_addr); err_pram: iounmap(hcd->regs); err_regs: usb_put_hcd(hcd); return ret; } static int fhci_remove(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct fhci_hcd *fhci = hcd_to_fhci(hcd); int i; int j; usb_remove_hcd(hcd); free_irq(fhci->timer->irq, hcd); gtm_put_timer16(fhci->timer); cpm_muram_free(cpm_muram_offset(fhci->pram)); for (i = 0; i < NUM_GPIOS; i++) { if (!gpio_is_valid(fhci->gpios[i])) continue; gpio_free(fhci->gpios[i]); } for (j = 0; j < NUM_PINS; j++) qe_pin_free(fhci->pins[j]); fhci_dfs_destroy(fhci); usb_put_hcd(hcd); return 0; } static int of_fhci_remove(struct platform_device *ofdev) { return fhci_remove(&ofdev->dev); } static const struct of_device_id of_fhci_match[] = { { .compatible = "fsl,mpc8323-qe-usb", }, {}, }; MODULE_DEVICE_TABLE(of, of_fhci_match); static struct platform_driver of_fhci_driver = { .driver = { .name = "fsl,usb-fhci", .owner = THIS_MODULE, .of_match_table = of_fhci_match, }, .probe = of_fhci_probe, .remove = of_fhci_remove, }; module_platform_driver(of_fhci_driver); MODULE_DESCRIPTION("USB Freescale Host Controller Interface Driver"); MODULE_AUTHOR("Shlomi Gridish <gridish@freescale.com>, " "Jerry Huang <Chang-Ming.Huang@freescale.com>, " "Anton Vorontsov <avorontsov@ru.mvista.com>"); MODULE_LICENSE("GPL");
gpl-2.0
HRTKernel/Hacker_Kernel_SM-G92X_MM
arch/tile/lib/strlen_64.c
4704
1038
/* * Copyright 2011 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/types.h> #include <linux/string.h> #include <linux/module.h> #include "string-endian.h" size_t strlen(const char *s) { /* Get an aligned pointer. */ const uintptr_t s_int = (uintptr_t) s; const uint64_t *p = (const uint64_t *)(s_int & -8); /* Read and MASK the first word. */ uint64_t v = *p | MASK(s_int); uint64_t bits; while ((bits = __insn_v1cmpeqi(v, 0)) == 0) v = *++p; return ((const char *)p) + (CFZ(bits) >> 3) - s; } EXPORT_SYMBOL(strlen);
gpl-2.0
sunxi/linux-3.14
arch/sparc/prom/misc_32.c
9056
2663
/* * misc.c: Miscellaneous prom functions that don't belong * anywhere else. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/module.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/auxio.h> extern void restore_current(void); DEFINE_SPINLOCK(prom_lock); /* Reset and reboot the machine with the command 'bcommand'. */ void prom_reboot(char *bcommand) { unsigned long flags; spin_lock_irqsave(&prom_lock, flags); (*(romvec->pv_reboot))(bcommand); /* Never get here. */ restore_current(); spin_unlock_irqrestore(&prom_lock, flags); } /* Forth evaluate the expression contained in 'fstring'. */ void prom_feval(char *fstring) { unsigned long flags; if(!fstring || fstring[0] == 0) return; spin_lock_irqsave(&prom_lock, flags); if(prom_vers == PROM_V0) (*(romvec->pv_fortheval.v0_eval))(strlen(fstring), fstring); else (*(romvec->pv_fortheval.v2_eval))(fstring); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); } EXPORT_SYMBOL(prom_feval); /* Drop into the prom, with the chance to continue with the 'go' * prom command. */ void prom_cmdline(void) { unsigned long flags; spin_lock_irqsave(&prom_lock, flags); (*(romvec->pv_abort))(); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); set_auxio(AUXIO_LED, 0); } /* Drop into the prom, but completely terminate the program. * No chance of continuing. */ void __noreturn prom_halt(void) { unsigned long flags; again: spin_lock_irqsave(&prom_lock, flags); (*(romvec->pv_halt))(); /* Never get here. */ restore_current(); spin_unlock_irqrestore(&prom_lock, flags); goto again; /* PROM is out to get me -DaveM */ } typedef void (*sfunc_t)(void); /* Set prom sync handler to call function 'funcp'. */ void prom_setsync(sfunc_t funcp) { if(!funcp) return; *romvec->pv_synchook = funcp; } /* Get the idprom and stuff it into buffer 'idbuf'. Returns the * format type. 'num_bytes' is the number of bytes that your idbuf * has space for. Returns 0xff on error. */ unsigned char prom_get_idprom(char *idbuf, int num_bytes) { int len; len = prom_getproplen(prom_root_node, "idprom"); if((len>num_bytes) || (len==-1)) return 0xff; if(!prom_getproperty(prom_root_node, "idprom", idbuf, num_bytes)) return idbuf[0]; return 0xff; } /* Get the major prom version number. */ int prom_version(void) { return romvec->pv_romvers; } /* Get the prom plugin-revision. */ int prom_getrev(void) { return prom_rev; } /* Get the prom firmware print revision. */ int prom_getprev(void) { return prom_prev; }
gpl-2.0
playfulgod/kernel_lge_fx3
arch/alpha/boot/tools/objstrip.c
12384
6095
/* * arch/alpha/boot/tools/objstrip.c * * Strip the object file headers/trailers from an executable (ELF or ECOFF). * * Copyright (C) 1996 David Mosberger-Tang. */ /* * Converts an ECOFF or ELF object file into a bootable file. The * object file must be a OMAGIC file (i.e., data and bss follow immediately * behind the text). See DEC "Assembly Language Programmer's Guide" * documentation for details. The SRM boot process is documented in * the Alpha AXP Architecture Reference Manual, Second Edition by * Richard L. Sites and Richard T. Witek. */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <sys/fcntl.h> #include <sys/stat.h> #include <sys/types.h> #include <linux/a.out.h> #include <linux/coff.h> #include <linux/param.h> #ifdef __ELF__ # include <linux/elf.h> #endif /* bootfile size must be multiple of BLOCK_SIZE: */ #define BLOCK_SIZE 512 const char * prog_name; static void usage (void) { fprintf(stderr, "usage: %s [-v] -p file primary\n" " %s [-vb] file [secondary]\n", prog_name, prog_name); exit(1); } int main (int argc, char *argv[]) { size_t nwritten, tocopy, n, mem_size, fil_size, pad = 0; int fd, ofd, i, j, verbose = 0, primary = 0; char buf[8192], *inname; struct exec * aout; /* includes file & aout header */ long offset; #ifdef __ELF__ struct elfhdr *elf; struct elf_phdr *elf_phdr; /* program header */ unsigned long long e_entry; #endif prog_name = argv[0]; for (i = 1; i < argc && argv[i][0] == '-'; ++i) { for (j = 1; argv[i][j]; ++j) { switch (argv[i][j]) { case 'v': verbose = ~verbose; break; case 'b': pad = BLOCK_SIZE; break; case 'p': primary = 1; /* make primary bootblock */ break; } } } if (i >= argc) { usage(); } inname = argv[i++]; fd = open(inname, O_RDONLY); if (fd == -1) { perror("open"); exit(1); } ofd = 1; if (i < argc) { ofd = open(argv[i++], O_WRONLY | O_CREAT | O_TRUNC, 0666); if (ofd == -1) { perror("open"); exit(1); } } if (primary) { /* generate bootblock for primary loader */ unsigned long bb[64], sum = 0; struct stat st; off_t size; int i; if (ofd == 1) { usage(); } if (fstat(fd, &st) == -1) { perror("fstat"); exit(1); } size = (st.st_size + BLOCK_SIZE - 1) & ~(BLOCK_SIZE - 1); memset(bb, 0, sizeof(bb)); strcpy((char *) bb, "Linux SRM bootblock"); bb[60] = size / BLOCK_SIZE; /* count */ bb[61] = 1; /* starting sector # */ bb[62] = 0; /* flags---must be 0 */ for (i = 0; i < 63; ++i) { sum += bb[i]; } bb[63] = sum; if (write(ofd, bb, sizeof(bb)) != sizeof(bb)) { perror("boot-block write"); exit(1); } printf("%lu\n", size); return 0; } /* read and inspect exec header: */ if (read(fd, buf, sizeof(buf)) < 0) { perror("read"); exit(1); } #ifdef __ELF__ elf = (struct elfhdr *) buf; if (elf->e_ident[0] == 0x7f && strncmp((char *)elf->e_ident + 1, "ELF", 3) == 0) { if (elf->e_type != ET_EXEC) { fprintf(stderr, "%s: %s is not an ELF executable\n", prog_name, inname); exit(1); } if (!elf_check_arch(elf)) { fprintf(stderr, "%s: is not for this processor (e_machine=%d)\n", prog_name, elf->e_machine); exit(1); } if (elf->e_phnum != 1) { fprintf(stderr, "%s: %d program headers (forgot to link with -N?)\n", prog_name, elf->e_phnum); } e_entry = elf->e_entry; lseek(fd, elf->e_phoff, SEEK_SET); if (read(fd, buf, sizeof(*elf_phdr)) != sizeof(*elf_phdr)) { perror("read"); exit(1); } elf_phdr = (struct elf_phdr *) buf; offset = elf_phdr->p_offset; mem_size = elf_phdr->p_memsz; fil_size = elf_phdr->p_filesz; /* work around ELF bug: */ if (elf_phdr->p_vaddr < e_entry) { unsigned long delta = e_entry - elf_phdr->p_vaddr; offset += delta; mem_size -= delta; fil_size -= delta; elf_phdr->p_vaddr += delta; } if (verbose) { fprintf(stderr, "%s: extracting %#016lx-%#016lx (at %lx)\n", prog_name, (long) elf_phdr->p_vaddr, elf_phdr->p_vaddr + fil_size, offset); } } else #endif { aout = (struct exec *) buf; if (!(aout->fh.f_flags & COFF_F_EXEC)) { fprintf(stderr, "%s: %s is not in executable format\n", prog_name, inname); exit(1); } if (aout->fh.f_opthdr != sizeof(aout->ah)) { fprintf(stderr, "%s: %s has unexpected optional header size\n", prog_name, inname); exit(1); } if (N_MAGIC(*aout) != OMAGIC) { fprintf(stderr, "%s: %s is not an OMAGIC file\n", prog_name, inname); exit(1); } offset = N_TXTOFF(*aout); fil_size = aout->ah.tsize + aout->ah.dsize; mem_size = fil_size + aout->ah.bsize; if (verbose) { fprintf(stderr, "%s: extracting %#016lx-%#016lx (at %lx)\n", prog_name, aout->ah.text_start, aout->ah.text_start + fil_size, offset); } } if (lseek(fd, offset, SEEK_SET) != offset) { perror("lseek"); exit(1); } if (verbose) { fprintf(stderr, "%s: copying %lu byte from %s\n", prog_name, (unsigned long) fil_size, inname); } tocopy = fil_size; while (tocopy > 0) { n = tocopy; if (n > sizeof(buf)) { n = sizeof(buf); } tocopy -= n; if ((size_t) read(fd, buf, n) != n) { perror("read"); exit(1); } do { nwritten = write(ofd, buf, n); if ((ssize_t) nwritten == -1) { perror("write"); exit(1); } n -= nwritten; } while (n > 0); } if (pad) { mem_size = ((mem_size + pad - 1) / pad) * pad; } tocopy = mem_size - fil_size; if (tocopy > 0) { fprintf(stderr, "%s: zero-filling bss and aligning to %lu with %lu bytes\n", prog_name, pad, (unsigned long) tocopy); memset(buf, 0x00, sizeof(buf)); do { n = tocopy; if (n > sizeof(buf)) { n = sizeof(buf); } nwritten = write(ofd, buf, n); if ((ssize_t) nwritten == -1) { perror("write"); exit(1); } tocopy -= nwritten; } while (tocopy > 0); } return 0; }
gpl-2.0
kyasu/android_kernel_samsung_hltedcm
drivers/char/diag/diagfwd.c
97
78641
/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/slab.h> #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/ratelimit.h> #include <linux/workqueue.h> #include <linux/pm_runtime.h> #include <linux/diagchar.h> #include <linux/delay.h> #include <linux/reboot.h> #include <linux/of.h> #include <linux/kmemleak.h> #ifdef CONFIG_DIAG_OVER_USB #include <mach/usbdiag.h> #endif #include <mach/msm_smd.h> #include <mach/socinfo.h> #include <mach/restart.h> #include "diagmem.h" #include "diagchar.h" #include "diagfwd.h" #include "diagfwd_cntl.h" #include "diagfwd_hsic.h" #include "diagchar_hdlc.h" #ifdef CONFIG_DIAG_SDIO_PIPE #include "diagfwd_sdio.h" #endif #include "diag_dci.h" #include "diag_masks.h" #include "diagfwd_bridge.h" #define STM_CMD_VERSION_OFFSET 4 #define STM_CMD_MASK_OFFSET 5 #define STM_CMD_DATA_OFFSET 6 #define STM_CMD_NUM_BYTES 7 #define STM_RSP_VALID_INDEX 7 #define STM_RSP_SUPPORTED_INDEX 8 #define STM_RSP_SMD_COMPLY_INDEX 9 #define STM_RSP_NUM_BYTES 10 #define STM_COMMAND_VALID 1 #define SMD_DRAIN_BUF_SIZE 4096 int diag_debug_buf_idx; unsigned char diag_debug_buf[1024]; /* Number of entries in table of buffers */ static unsigned int buf_tbl_size = 10; struct diag_master_table entry; int wrap_enabled; uint16_t wrap_count; void encode_rsp_and_send(int buf_length) { struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 }; struct diag_hdlc_dest_type enc = { NULL, NULL, 0 }; struct diag_smd_info *data = &(driver->smd_data[MODEM_DATA]); int err; unsigned long flags; if (buf_length > APPS_BUF_SIZE) { pr_err("diag: In %s, invalid len %d, permissible len %d\n", __func__, buf_length, APPS_BUF_SIZE); return; } send.state = DIAG_STATE_START; send.pkt = driver->apps_rsp_buf; send.last = (void *)(driver->apps_rsp_buf + buf_length); send.terminate = 1; if (!data->in_busy_1) { spin_lock_irqsave(&data->in_busy_lock, flags); enc.dest = data->buf_in_1; enc.dest_last = (void *)(data->buf_in_1 + APPS_BUF_SIZE - 1); diag_hdlc_encode(&send, &enc); data->write_ptr_1->buf = data->buf_in_1; data->write_ptr_1->length = (int)(enc.dest - (void *)(data->buf_in_1)); data->in_busy_1 = 1; err = diag_device_write(data->buf_in_1, data->peripheral, data->write_ptr_1); if (err) { pr_err("diag: In %s, Unable to write to device, err: %d\n", __func__, err); data->in_busy_1 = 0; } memset(driver->apps_rsp_buf, '\0', APPS_BUF_SIZE); spin_unlock_irqrestore(&data->in_busy_lock, flags); } } /* Determine if this device uses a device tree */ #ifdef CONFIG_OF static int has_device_tree(void) { struct device_node *node; node = of_find_node_by_path("/"); if (node) { of_node_put(node); return 1; } return 0; } #else static int has_device_tree(void) { return 0; } #endif int chk_config_get_id(void) { /* For all Fusion targets, Modem will always be present */ if (machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa()) return 0; if (driver->use_device_tree) { if (machine_is_msm8974()) return MSM8974_TOOLS_ID; else return 0; } else { switch (socinfo_get_msm_cpu()) { case MSM_CPU_8X60: return APQ8060_TOOLS_ID; case MSM_CPU_8960: case MSM_CPU_8960AB: return AO8960_TOOLS_ID; case MSM_CPU_8064: case MSM_CPU_8064AB: case MSM_CPU_8064AA: return APQ8064_TOOLS_ID; case MSM_CPU_8930: case MSM_CPU_8930AA: case MSM_CPU_8930AB: return MSM8930_TOOLS_ID; case MSM_CPU_8974: return MSM8974_TOOLS_ID; case MSM_CPU_8625: return MSM8625_TOOLS_ID; default: return 0; } } } /* * This will return TRUE for targets which support apps only mode and hence SSR. * This applies to 8960 and newer targets. */ int chk_apps_only(void) { if (driver->use_device_tree) return 1; switch (socinfo_get_msm_cpu()) { case MSM_CPU_8960: case MSM_CPU_8960AB: case MSM_CPU_8064: case MSM_CPU_8064AB: case MSM_CPU_8064AA: case MSM_CPU_8930: case MSM_CPU_8930AA: case MSM_CPU_8930AB: case MSM_CPU_8627: case MSM_CPU_9615: case MSM_CPU_8974: return 1; default: return 0; } } /* * This will return TRUE for targets which support apps as master. * Thus, SW DLOAD and Mode Reset are supported on apps processor. * This applies to 8960 and newer targets. */ int chk_apps_master(void) { if (driver->use_device_tree) return 1; else if (soc_class_is_msm8960() || soc_class_is_msm8930() || soc_class_is_apq8064() || cpu_is_msm9615()) return 1; else return 0; } int chk_polling_response(void) { if (!(driver->polling_reg_flag) && chk_apps_master()) /* * If the apps processor is master and no other processor * has registered to respond for polling */ return 1; else if (!((driver->smd_data[MODEM_DATA].ch) && (driver->rcvd_feature_mask[MODEM_DATA])) && (chk_apps_master())) /* * If the apps processor is not the master and the modem * is not up or we did not receive the feature masks from Modem */ return 1; else return 0; } /* * This function should be called if you feel that the logging process may * need to be woken up. For instance, if the logging mode is MEMORY_DEVICE MODE * and while trying to read data from a SMD data channel there are no buffers * available to read the data into, then this function should be called to * determine if the logging process needs to be woken up. */ void chk_logging_wakeup(void) { int i; /* Find the index of the logging process */ for (i = 0; i < driver->num_clients; i++) if (driver->client_map[i].pid == driver->logging_process_id) break; if (i < driver->num_clients) { /* At very high logging rates a race condition can * occur where the buffers containing the data from * an smd channel are all in use, but the data_ready * flag is cleared. In this case, the buffers never * have their data read/logged. Detect and remedy this * situation. */ if ((driver->data_ready[i] & USER_SPACE_DATA_TYPE) == 0) { driver->data_ready[i] |= USER_SPACE_DATA_TYPE; pr_debug("diag: Force wakeup of logging process\n"); wake_up_interruptible(&driver->wait_q); } } } int diag_add_hdlc_encoding(struct diag_smd_info *smd_info, void *buf, int total_recd, uint8_t *encode_buf, int *encoded_length) { struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 }; struct diag_hdlc_dest_type enc = { NULL, NULL, 0 }; struct data_header { uint8_t control_char; uint8_t version; uint16_t length; }; struct data_header *header; int header_size = sizeof(struct data_header); uint8_t *end_control_char; uint8_t *payload; uint8_t *temp_buf; uint8_t *temp_encode_buf; int src_pkt_len; int encoded_pkt_length; int max_size; int total_processed = 0; int bytes_remaining; int success = 1; temp_buf = buf; temp_encode_buf = encode_buf; bytes_remaining = *encoded_length; while (total_processed < total_recd) { header = (struct data_header *)temp_buf; /* Perform initial error checking */ if (header->control_char != CONTROL_CHAR || header->version != 1) { success = 0; break; } payload = temp_buf + header_size; end_control_char = payload + header->length; if (*end_control_char != CONTROL_CHAR) { success = 0; break; } max_size = 2 * header->length + 3; if (bytes_remaining < max_size) { pr_err("diag: In %s, Not enough room to encode remaining data for peripheral: %d, bytes available: %d, max_size: %d\n", __func__, smd_info->peripheral, bytes_remaining, max_size); success = 0; break; } /* Prepare for encoding the data */ send.state = DIAG_STATE_START; send.pkt = payload; send.last = (void *)(payload + header->length - 1); send.terminate = 1; enc.dest = temp_encode_buf; enc.dest_last = (void *)(temp_encode_buf + max_size); enc.crc = 0; diag_hdlc_encode(&send, &enc); /* Prepare for next packet */ src_pkt_len = (header_size + header->length + 1); total_processed += src_pkt_len; temp_buf += src_pkt_len; encoded_pkt_length = (uint8_t *)enc.dest - temp_encode_buf; bytes_remaining -= encoded_pkt_length; temp_encode_buf = enc.dest; } *encoded_length = (int)(temp_encode_buf - encode_buf); return success; } static int check_bufsize_for_encoding(struct diag_smd_info *smd_info, void *buf, int total_recd) { int buf_size = IN_BUF_SIZE; int max_size = 2 * total_recd + 3; unsigned char *temp_buf; if (max_size > IN_BUF_SIZE) { if (max_size > MAX_IN_BUF_SIZE) { pr_err_ratelimited("diag: In %s, SMD sending packet of %d bytes that may expand to %d bytes, peripheral: %d\n", __func__, total_recd, max_size, smd_info->peripheral); max_size = MAX_IN_BUF_SIZE; } if (buf == smd_info->buf_in_1_raw) { /* Only realloc if we need to increase the size */ if (smd_info->buf_in_1_size < max_size) { temp_buf = krealloc(smd_info->buf_in_1, max_size, GFP_KERNEL); if (temp_buf) { smd_info->buf_in_1 = temp_buf; smd_info->buf_in_1_size = max_size; } } buf_size = smd_info->buf_in_1_size; } else { /* Only realloc if we need to increase the size */ if (smd_info->buf_in_2_size < max_size) { temp_buf = krealloc(smd_info->buf_in_2, max_size, GFP_KERNEL); if (temp_buf) { smd_info->buf_in_2 = temp_buf; smd_info->buf_in_2_size = max_size; } } buf_size = smd_info->buf_in_2_size; } } return buf_size; } void diag_ws_on_notify() { /* * Do not deal with reference count here as there can be * spurious interrupts */ pm_stay_awake(driver->diag_dev); } void diag_ws_on_read(int pkt_len) { unsigned long flags; spin_lock_irqsave(&driver->ws_lock, flags); if (pkt_len > 0) { driver->ws_ref_count++; } else { if (driver->ws_ref_count < 1) { pm_relax(driver->diag_dev); driver->ws_ref_count = 0; driver->copy_count = 0; } } spin_unlock_irqrestore(&driver->ws_lock, flags); } void diag_ws_on_copy() { unsigned long flags; spin_lock_irqsave(&driver->ws_lock, flags); driver->copy_count++; spin_unlock_irqrestore(&driver->ws_lock, flags); } void diag_ws_on_copy_complete() { unsigned long flags; spin_lock_irqsave(&driver->ws_lock, flags); driver->ws_ref_count -= driver->copy_count; if (driver->ws_ref_count < 1) { pm_relax(driver->diag_dev); driver->ws_ref_count = 0; } driver->copy_count = 0; spin_unlock_irqrestore(&driver->ws_lock, flags); } void diag_ws_reset() { unsigned long flags; spin_lock_irqsave(&driver->ws_lock, flags); pm_relax(driver->diag_dev); driver->ws_ref_count = 0; driver->copy_count = 0; spin_unlock_irqrestore(&driver->ws_lock, flags); } /* Process the data read from the smd data channel */ int diag_process_smd_read_data(struct diag_smd_info *smd_info, void *buf, int total_recd) { struct diag_request *write_ptr_modem = NULL; int *in_busy_ptr = 0; int err = 0; unsigned long flags; /* * Do not process data on command channel if the * channel is not designated to do so */ if ((smd_info->type == SMD_CMD_TYPE) && !driver->separate_cmdrsp[smd_info->peripheral]) { /* This print is for debugging */ pr_err("diag, In %s, received data on non-designated command channel: %d\n", __func__, smd_info->peripheral); goto err; } /* If the data is already hdlc encoded */ if (!smd_info->encode_hdlc) { if (smd_info->buf_in_1 == buf) { write_ptr_modem = smd_info->write_ptr_1; in_busy_ptr = &smd_info->in_busy_1; } else if (smd_info->buf_in_2 == buf) { write_ptr_modem = smd_info->write_ptr_2; in_busy_ptr = &smd_info->in_busy_2; } else { pr_err("diag: In %s, no match for in_busy_1, peripheral: %d\n", __func__, smd_info->peripheral); goto err; } if (write_ptr_modem) { spin_lock_irqsave(&smd_info->in_busy_lock, flags); write_ptr_modem->length = total_recd; *in_busy_ptr = 1; err = diag_device_write(buf, smd_info->peripheral, write_ptr_modem); spin_unlock_irqrestore(&smd_info->in_busy_lock, flags); if (err) { pr_err_ratelimited("diag: In %s, diag_device_write error: %d\n", __func__, err); goto err; } } } else { /* The data is raw and needs to be hdlc encoded */ if (smd_info->buf_in_1_raw == buf) { write_ptr_modem = smd_info->write_ptr_1; in_busy_ptr = &smd_info->in_busy_1; } else if (smd_info->buf_in_2_raw == buf) { write_ptr_modem = smd_info->write_ptr_2; in_busy_ptr = &smd_info->in_busy_2; } else { pr_err("diag: In %s, no match for in_busy_1, peripheral: %d\n", __func__, smd_info->peripheral); goto err; } if (write_ptr_modem) { int success = 0; int write_length = 0; unsigned char *write_buf = NULL; write_length = check_bufsize_for_encoding(smd_info, buf, total_recd); if (write_length) { write_buf = (buf == smd_info->buf_in_1_raw) ? smd_info->buf_in_1 : smd_info->buf_in_2; success = diag_add_hdlc_encoding(smd_info, buf, total_recd, write_buf, &write_length); if (success) { spin_lock_irqsave( &smd_info->in_busy_lock, flags); write_ptr_modem->length = write_length; *in_busy_ptr = 1; err = diag_device_write(write_buf, smd_info->peripheral, write_ptr_modem); spin_unlock_irqrestore( &smd_info->in_busy_lock, flags); if (err) { pr_err_ratelimited("diag: In %s, diag_device_write error: %d\n", __func__, err); goto err; } } } else { goto err; } } } return 0; err: if (driver->logging_mode == MEMORY_DEVICE_MODE) diag_ws_on_read(0); return 0; } void diag_smd_queue_read(struct diag_smd_info *smd_info) { if (!smd_info || !smd_info->ch) return; switch (smd_info->type) { case SMD_DCI_TYPE: case SMD_DCI_CMD_TYPE: queue_work(driver->diag_dci_wq, &(smd_info->diag_read_smd_work)); break; case SMD_DATA_TYPE: queue_work(smd_info->wq, &(smd_info->diag_read_smd_work)); break; case SMD_CNTL_TYPE: case SMD_CMD_TYPE: queue_work(driver->diag_wq, &(smd_info->diag_read_smd_work)); break; default: pr_err("diag: In %s, invalid type: %d\n", __func__, smd_info->type); return; } if (driver->logging_mode == MEMORY_DEVICE_MODE && smd_info->type == SMD_DATA_TYPE) diag_ws_on_notify(); } static int diag_smd_resize_buf(struct diag_smd_info *smd_info, void **buf, unsigned int *buf_size, unsigned int requested_size) { int success = 0; void *temp_buf = NULL; unsigned int new_buf_size = requested_size; if (!smd_info) return success; if (requested_size <= MAX_IN_BUF_SIZE) { pr_debug("diag: In %s, SMD peripheral: %d sending in packets up to %d bytes\n", __func__, smd_info->peripheral, requested_size); } else { pr_err_ratelimited("diag: In %s, SMD peripheral: %d, Packet size sent: %d, Max size supported (%d) exceeded. Data beyond max size will be lost\n", __func__, smd_info->peripheral, requested_size, MAX_IN_BUF_SIZE); new_buf_size = MAX_IN_BUF_SIZE; } /* Only resize if the buffer can be increased in size */ if (new_buf_size <= *buf_size) { success = 1; return success; } temp_buf = krealloc(*buf, new_buf_size, GFP_KERNEL); if (temp_buf) { /* Match the buffer and reset the pointer and size */ if (smd_info->encode_hdlc) { /* * This smd channel is supporting HDLC encoding * on the apps */ void *temp_hdlc = NULL; if (*buf == smd_info->buf_in_1_raw) { smd_info->buf_in_1_raw = temp_buf; smd_info->buf_in_1_raw_size = new_buf_size; temp_hdlc = krealloc(smd_info->buf_in_1, MAX_IN_BUF_SIZE, GFP_KERNEL); if (temp_hdlc) { smd_info->buf_in_1 = temp_hdlc; smd_info->buf_in_1_size = MAX_IN_BUF_SIZE; } } else if (*buf == smd_info->buf_in_2_raw) { smd_info->buf_in_2_raw = temp_buf; smd_info->buf_in_2_raw_size = new_buf_size; temp_hdlc = krealloc(smd_info->buf_in_2, MAX_IN_BUF_SIZE, GFP_KERNEL); if (temp_hdlc) { smd_info->buf_in_2 = temp_hdlc; smd_info->buf_in_2_size = MAX_IN_BUF_SIZE; } } } else { if (*buf == smd_info->buf_in_1) { smd_info->buf_in_1 = temp_buf; smd_info->buf_in_1_size = new_buf_size; } else if (*buf == smd_info->buf_in_2) { smd_info->buf_in_2 = temp_buf; smd_info->buf_in_2_size = new_buf_size; } } *buf = temp_buf; *buf_size = new_buf_size; success = 1; } else { pr_err_ratelimited("diag: In %s, SMD peripheral: %d. packet size sent: %d, resize to support failed. Data beyond %d will be lost\n", __func__, smd_info->peripheral, requested_size, *buf_size); } return success; } void diag_smd_send_req(struct diag_smd_info *smd_info) { void *buf = NULL, *temp_buf = NULL; int total_recd = 0, r = 0, pkt_len; int loop_count = 0; int notify = 0; int buf_size = 0; int resize_success = 0; int buf_full = 0; if (!smd_info) { pr_err("diag: In %s, no smd info. Not able to read.\n", __func__); return; } /* Determine the buffer to read the data into. */ if (smd_info->type == SMD_DATA_TYPE) { /* If the data is raw and not hdlc encoded */ if (smd_info->encode_hdlc) { if (!smd_info->in_busy_1) { buf = smd_info->buf_in_1_raw; buf_size = smd_info->buf_in_1_raw_size; } else if (!smd_info->in_busy_2) { buf = smd_info->buf_in_2_raw; buf_size = smd_info->buf_in_2_raw_size; } } else { if (!smd_info->in_busy_1) { buf = smd_info->buf_in_1; buf_size = smd_info->buf_in_1_size; } else if (!smd_info->in_busy_2) { buf = smd_info->buf_in_2; buf_size = smd_info->buf_in_2_size; } } } else if (smd_info->type == SMD_CMD_TYPE) { /* If the data is raw and not hdlc encoded */ if (smd_info->encode_hdlc) { if (!smd_info->in_busy_1) { buf = smd_info->buf_in_1_raw; buf_size = smd_info->buf_in_1_raw_size; } } else { if (!smd_info->in_busy_1) { buf = smd_info->buf_in_1; buf_size = smd_info->buf_in_1_size; } } } else if (!smd_info->in_busy_1) { buf = smd_info->buf_in_1; buf_size = smd_info->buf_in_1_size; } if (!buf && (smd_info->type == SMD_DCI_TYPE || smd_info->type == SMD_DCI_CMD_TYPE)) diag_dci_try_deactivate_wakeup_source(); if (smd_info->ch && buf) { pkt_len = smd_cur_packet_size(smd_info->ch); if (pkt_len == 0 && (smd_info->type == SMD_DCI_TYPE || smd_info->type == SMD_DCI_CMD_TYPE)) diag_dci_try_deactivate_wakeup_source(); if (pkt_len > buf_size) resize_success = diag_smd_resize_buf(smd_info, &buf, &buf_size, pkt_len); temp_buf = buf; while (pkt_len && (pkt_len != total_recd)) { loop_count++; r = smd_read_avail(smd_info->ch); pr_debug("diag: In %s, SMD peripheral: %d, received pkt %d %d\n", __func__, smd_info->peripheral, r, total_recd); if (!r) { /* Nothing to read from SMD */ wait_event(driver->smd_wait_q, ((smd_info->ch == 0) || smd_read_avail(smd_info->ch))); /* If the smd channel is open */ if (smd_info->ch) { pr_debug("diag: In %s, SMD peripheral: %d, return from wait_event\n", __func__, smd_info->peripheral); continue; } else { pr_debug("diag: In %s, SMD peripheral: %d, return from wait_event ch closed\n", __func__, smd_info->peripheral); goto fail_return; } } if (pkt_len < r) { pr_err("diag: In %s, SMD peripheral: %d, sending incorrect pkt\n", __func__, smd_info->peripheral); goto fail_return; } if (pkt_len > r) { pr_debug("diag: In %s, SMD sending partial pkt %d %d %d %d %d %d\n", __func__, pkt_len, r, total_recd, loop_count, smd_info->peripheral, smd_info->type); } /* Protect from going beyond the end of the buffer */ if (total_recd < buf_size) { if (total_recd + r > buf_size) { r = buf_size - total_recd; buf_full = 1; } total_recd += r; /* Keep reading for complete packet */ smd_read(smd_info->ch, temp_buf, r); temp_buf += r; } else { /* * This block handles the very rare case of a * packet that is greater in length than what * we can support. In this case, we * incrementally drain the remaining portion * of the packet that will not fit in the * buffer, so that the entire packet is read * from the smd. */ int drain_bytes = (r > SMD_DRAIN_BUF_SIZE) ? SMD_DRAIN_BUF_SIZE : r; unsigned char *drain_buf = kzalloc(drain_bytes, GFP_KERNEL); if (drain_buf) { total_recd += drain_bytes; smd_read(smd_info->ch, drain_buf, drain_bytes); kfree(drain_buf); } else { pr_err("diag: In %s, SMD peripheral: %d, unable to allocate drain buffer\n", __func__, smd_info->peripheral); break; } } } if (smd_info->type == SMD_DATA_TYPE && driver->logging_mode == MEMORY_DEVICE_MODE) diag_ws_on_read(pkt_len); if (total_recd > 0) { if (!buf) { pr_err("diag: In %s, SMD peripheral: %d, Out of diagmem for Modem\n", __func__, smd_info->peripheral); } else if (smd_info->process_smd_read_data) { /* * If the buffer was totally filled, reset * total_recd appropriately */ if (buf_full) total_recd = buf_size; notify = smd_info->process_smd_read_data( smd_info, buf, total_recd); /* Poll SMD channels to check for data */ if (notify) diag_smd_notify(smd_info, SMD_EVENT_DATA); } } } else if (smd_info->ch && !buf && (driver->logging_mode == MEMORY_DEVICE_MODE)) { chk_logging_wakeup(); } return; fail_return: if (smd_info->type == SMD_DATA_TYPE && driver->logging_mode == MEMORY_DEVICE_MODE) diag_ws_on_read(0); if (smd_info->type == SMD_DCI_TYPE || smd_info->type == SMD_DCI_CMD_TYPE) diag_dci_try_deactivate_wakeup_source(); return; } void diag_read_smd_work_fn(struct work_struct *work) { struct diag_smd_info *smd_info = container_of(work, struct diag_smd_info, diag_read_smd_work); diag_smd_send_req(smd_info); } #ifdef CONFIG_DIAG_OVER_USB static int diag_write_to_usb(struct usb_diag_ch *ch, struct diag_request *write_ptr) { int err = 0; uint8_t retry_count, max_retries; if (!ch || !write_ptr) return -EIO; retry_count = 0; max_retries = 3; while (retry_count < max_retries) { retry_count++; /* If USB is not connected, don't try to write */ if (!driver->usb_connected) { err = -ENODEV; break; } err = usb_diag_write(ch, write_ptr); if (err == -EAGAIN) { /* * USB is not configured. Wait for sometime and * try again. The value 10000 was chosen empirically * as an optimum value for USB to be configured. */ usleep_range(10000, 10100); continue; } else { break; } } return err; } #endif int diag_device_write(void *buf, int data_type, struct diag_request *write_ptr) { int i, err = 0, index; index = 0; if (driver->logging_mode == MEMORY_DEVICE_MODE) { if (data_type == APPS_DATA) { for (i = 0; i < driver->buf_tbl_size; i++) if (driver->buf_tbl[i].length == 0) { driver->buf_tbl[i].buf = buf; driver->buf_tbl[i].length = driver->used; #ifdef DIAG_DEBUG pr_debug("diag: ENQUEUE buf ptr and length is %p , %d\n", driver->buf_tbl[i].buf, driver->buf_tbl[i].length); #endif break; } } #ifdef CONFIG_DIAGFWD_BRIDGE_CODE else if (data_type == HSIC_DATA || data_type == HSIC_2_DATA) { unsigned long flags; int foundIndex = -1; index = data_type - HSIC_DATA; spin_lock_irqsave(&diag_hsic[index].hsic_spinlock, flags); for (i = 0; i < diag_hsic[index].poolsize_hsic_write; i++) { if (diag_hsic[index].hsic_buf_tbl[i].length == 0) { diag_hsic[index].hsic_buf_tbl[i].buf = buf; diag_hsic[index].hsic_buf_tbl[i].length = diag_bridge[index].write_len; diag_hsic[index]. num_hsic_buf_tbl_entries++; foundIndex = i; break; } } spin_unlock_irqrestore(&diag_hsic[index].hsic_spinlock, flags); if (foundIndex == -1) err = -1; else pr_debug("diag: ENQUEUE HSIC buf ptr and length is %p , %d, ch %d\n", buf, diag_bridge[index].write_len, index); } #endif for (i = 0; i < driver->num_clients; i++) if (driver->client_map[i].pid == driver->logging_process_id) break; if (i < driver->num_clients) { pr_debug("diag: wake up logging process\n"); driver->data_ready[i] |= USER_SPACE_DATA_TYPE; wake_up_interruptible(&driver->wait_q); } else return -EINVAL; } else if (driver->logging_mode == NO_LOGGING_MODE) { if ((data_type >= MODEM_DATA) && (data_type <= WCNSS_DATA)) { driver->smd_data[data_type].in_busy_1 = 0; driver->smd_data[data_type].in_busy_2 = 0; queue_work(driver->smd_data[data_type].wq, &(driver->smd_data[data_type]. diag_read_smd_work)); if (data_type == MODEM_DATA && driver->separate_cmdrsp[data_type]) { driver->smd_cmd[data_type].in_busy_1 = 0; driver->smd_cmd[data_type].in_busy_2 = 0; queue_work(driver->diag_wq, &(driver->smd_cmd[data_type]. diag_read_smd_work)); } } #ifdef CONFIG_DIAG_SDIO_PIPE else if (data_type == SDIO_DATA) { driver->in_busy_sdio = 0; queue_work(driver->diag_sdio_wq, &(driver->diag_read_sdio_work)); } #endif #ifdef CONFIG_DIAGFWD_BRIDGE_CODE else if (data_type == HSIC_DATA || data_type == HSIC_2_DATA) { index = data_type - HSIC_DATA; if (diag_hsic[index].hsic_ch) queue_work(diag_bridge[index].wq, &(diag_hsic[index]. diag_read_hsic_work)); } #endif err = -1; } #ifdef CONFIG_DIAG_OVER_USB else if (driver->logging_mode == USB_MODE) { if (data_type == APPS_DATA) { driver->write_ptr_svc = (struct diag_request *) (diagmem_alloc(driver, sizeof(struct diag_request), POOL_TYPE_WRITE_STRUCT)); if (driver->write_ptr_svc) { driver->write_ptr_svc->length = driver->used; driver->write_ptr_svc->buf = buf; err = diag_write_to_usb(driver->legacy_ch, driver->write_ptr_svc); /* Free the buffer if write failed */ if (err) { diagmem_free(driver, (unsigned char *)driver-> write_ptr_svc, POOL_TYPE_WRITE_STRUCT); } } else { err = -ENOMEM; } } else if ((data_type >= MODEM_DATA) && (data_type <= WCNSS_DATA)) { write_ptr->buf = buf; #ifdef DIAG_DEBUG printk(KERN_INFO "writing data to USB," "pkt length %d\n", write_ptr->length); print_hex_dump(KERN_DEBUG, "Written Packet Data to" " USB: ", 16, 1, DUMP_PREFIX_ADDRESS, buf, write_ptr->length, 1); #endif /* DIAG DEBUG */ err = diag_write_to_usb(driver->legacy_ch, write_ptr); } #ifdef CONFIG_DIAG_SDIO_PIPE else if (data_type == SDIO_DATA) { if (machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa()) { write_ptr->buf = buf; err = usb_diag_write(driver->mdm_ch, write_ptr); } else pr_err("diag: Incorrect sdio data " "while USB write\n"); } #endif #ifdef CONFIG_DIAGFWD_BRIDGE_CODE else if (data_type == HSIC_DATA || data_type == HSIC_2_DATA) { index = data_type - HSIC_DATA; if (diag_hsic[index].hsic_device_enabled) { struct diag_request *write_ptr_mdm; write_ptr_mdm = (struct diag_request *) diagmem_alloc(driver, sizeof(struct diag_request), index + POOL_TYPE_HSIC_WRITE); if (write_ptr_mdm) { write_ptr_mdm->buf = buf; write_ptr_mdm->length = diag_bridge[index].write_len; write_ptr_mdm->context = (void *)index; err = usb_diag_write( diag_bridge[index].ch, write_ptr_mdm); /* Return to the pool immediately */ if (err) { diagmem_free(driver, write_ptr_mdm, index + POOL_TYPE_HSIC_WRITE); pr_err_ratelimited("diag: HSIC write failure, err: %d, ch %d\n", err, index); } } else { pr_err("diag: allocate write fail\n"); err = -1; } } else { pr_err("diag: Incorrect HSIC data " "while USB write\n"); err = -1; } } else if (data_type == SMUX_DATA) { write_ptr->buf = buf; write_ptr->context = (void *)SMUX; pr_debug("diag: writing SMUX data\n"); err = usb_diag_write(diag_bridge[SMUX].ch, write_ptr); } #endif APPEND_DEBUG('d'); } #endif /* DIAG OVER USB */ return err; } void diag_update_pkt_buffer(unsigned char *buf, int type) { unsigned char *ptr = NULL; unsigned char *temp = buf; unsigned int length; int *in_busy = NULL; if (!buf) { pr_err("diag: Invalid buffer in %s\n", __func__); return; } switch (type) { case PKT_TYPE: ptr = driver->pkt_buf; length = driver->pkt_length; in_busy = &driver->in_busy_pktdata; break; case DCI_PKT_TYPE: ptr = driver->dci_pkt_buf; length = driver->dci_pkt_length; in_busy = &driver->in_busy_dcipktdata; break; default: pr_err("diag: Invalid type %d in %s\n", type, __func__); return; } if (!ptr || length == 0) { pr_err("diag: Invalid ptr %p and length %d in %s", ptr, length, __func__); return; } mutex_lock(&driver->diagchar_mutex); if (CHK_OVERFLOW(ptr, ptr, ptr + PKT_SIZE, length)) { memcpy(ptr, temp , length); *in_busy = 1; } else { printk(KERN_CRIT " Not enough buffer space for PKT_RESP\n"); } mutex_unlock(&driver->diagchar_mutex); } void diag_update_userspace_clients(unsigned int type) { int i; mutex_lock(&driver->diagchar_mutex); for (i = 0; i < driver->num_clients; i++) if (driver->client_map[i].pid != 0) driver->data_ready[i] |= type; wake_up_interruptible(&driver->wait_q); mutex_unlock(&driver->diagchar_mutex); } void diag_update_sleeping_process(int process_id, int data_type) { int i; mutex_lock(&driver->diagchar_mutex); for (i = 0; i < driver->num_clients; i++) if (driver->client_map[i].pid == process_id) { driver->data_ready[i] |= data_type; break; } wake_up_interruptible(&driver->wait_q); mutex_unlock(&driver->diagchar_mutex); } int diag_send_data(struct diag_master_table entry, unsigned char *buf, int len, int type) { int success = 1; driver->pkt_length = len; /* If the process_id corresponds to an apps process */ if (entry.process_id != NON_APPS_PROC) { /* If the message is to be sent to the apps process */ if (type != MODEM_DATA) { diag_update_pkt_buffer(buf, PKT_TYPE); diag_update_sleeping_process(entry.process_id, PKT_TYPE); } } else { if (len > 0) { if (entry.client_id < NUM_SMD_DATA_CHANNELS) { struct diag_smd_info *smd_info; int index = entry.client_id; if (!driver->rcvd_feature_mask[ entry.client_id]) { pr_debug("diag: In %s, feature mask for peripheral: %d not received yet\n", __func__, entry.client_id); return 0; } smd_info = (driver->separate_cmdrsp[index] && index < NUM_SMD_CMD_CHANNELS) ? &driver->smd_cmd[index] : &driver->smd_data[index]; if (smd_info->ch) { mutex_lock(&smd_info->smd_ch_mutex); smd_write(smd_info->ch, buf, len); mutex_unlock(&smd_info->smd_ch_mutex); } else { pr_err("diag: In %s, smd channel %d not open, peripheral: %d, type: %d\n", __func__, index, smd_info->peripheral, smd_info->type); } } else { pr_alert("diag: In %s, incorrect channel: %d", __func__, entry.client_id); success = 0; } } } return success; } void diag_process_stm_mask(uint8_t cmd, uint8_t data_mask, int data_type, uint8_t *rsp_supported, uint8_t *rsp_smd_comply) { int status = 0; if (data_type >= MODEM_DATA && data_type <= WCNSS_DATA) { if (driver->peripheral_supports_stm[data_type]) { status = diag_send_stm_state( &driver->smd_cntl[data_type], cmd); if (status == 1) *rsp_smd_comply |= data_mask; *rsp_supported |= data_mask; } else if (driver->smd_cntl[data_type].ch) { *rsp_smd_comply |= data_mask; } if ((*rsp_smd_comply & data_mask) && (*rsp_supported & data_mask)) driver->stm_state[data_type] = cmd; driver->stm_state_requested[data_type] = cmd; } else if (data_type == APPS_DATA) { *rsp_supported |= data_mask; *rsp_smd_comply |= data_mask; driver->stm_state[data_type] = cmd; driver->stm_state_requested[data_type] = cmd; } } int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf) { uint8_t version, mask, cmd; uint8_t rsp_supported = 0; uint8_t rsp_smd_comply = 0; int i; if (!buf || !dest_buf) { pr_err("diag: Invalid pointers buf: %p, dest_buf %p in %s\n", buf, dest_buf, __func__); return -EIO; } version = *(buf + STM_CMD_VERSION_OFFSET); mask = *(buf + STM_CMD_MASK_OFFSET); cmd = *(buf + STM_CMD_DATA_OFFSET); /* * Check if command is valid. If the command is asking for * status, then the processor mask field is to be ignored. */ if ((version != 1) || (cmd > STATUS_STM) || ((cmd != STATUS_STM) && ((mask == 0) || (0 != (mask >> 4))))) { /* Command is invalid. Send bad param message response */ dest_buf[0] = BAD_PARAM_RESPONSE_MESSAGE; for (i = 0; i < STM_CMD_NUM_BYTES; i++) dest_buf[i+1] = *(buf + i); return STM_CMD_NUM_BYTES+1; } else if (cmd == STATUS_STM) { /* * Only the status is being queried, so fill in whether diag * over stm is supported or not */ for (i = 0; i < NUM_SMD_CONTROL_CHANNELS; i++) if (driver->peripheral_supports_stm[i]) rsp_supported |= 1 << i; rsp_supported |= DIAG_STM_APPS; } else { if (mask & DIAG_STM_MODEM) diag_process_stm_mask(cmd, DIAG_STM_MODEM, MODEM_DATA, &rsp_supported, &rsp_smd_comply); if (mask & DIAG_STM_LPASS) diag_process_stm_mask(cmd, DIAG_STM_LPASS, LPASS_DATA, &rsp_supported, &rsp_smd_comply); if (mask & DIAG_STM_WCNSS) diag_process_stm_mask(cmd, DIAG_STM_WCNSS, WCNSS_DATA, &rsp_supported, &rsp_smd_comply); if (mask & DIAG_STM_APPS) diag_process_stm_mask(cmd, DIAG_STM_APPS, APPS_DATA, &rsp_supported, &rsp_smd_comply); } for (i = 0; i < STM_CMD_NUM_BYTES; i++) dest_buf[i] = *(buf + i); dest_buf[STM_RSP_VALID_INDEX] = STM_COMMAND_VALID; dest_buf[STM_RSP_SUPPORTED_INDEX] = rsp_supported; dest_buf[STM_RSP_SMD_COMPLY_INDEX] = rsp_smd_comply; return STM_RSP_NUM_BYTES; } int diag_apps_responds() { if (chk_apps_only()) { if (driver->smd_data[MODEM_DATA].ch && driver->rcvd_feature_mask[MODEM_DATA]) { return 0; } return 1; } return 0; } int diag_process_apps_pkt(unsigned char *buf, int len) { uint16_t subsys_cmd_code; int subsys_id, ssid_first, ssid_last, ssid_range; int packet_type = 1, i, cmd_code; unsigned char *temp = buf; int data_type; int mask_ret; int status = 0; #if defined(CONFIG_DIAG_OVER_USB) unsigned char *ptr; #endif /* Check if the command is a supported mask command */ mask_ret = diag_process_apps_masks(buf, len); if (mask_ret <= 0) return mask_ret; /* Check for registered clients and forward packet to apropriate proc */ cmd_code = (int)(*(char *)buf); temp++; subsys_id = (int)(*(char *)temp); temp++; subsys_cmd_code = *(uint16_t *)temp; temp += 2; data_type = APPS_DATA; /* Dont send any command other than mode reset */ if (chk_apps_master() && cmd_code == MODE_CMD) { if (subsys_id != RESET_ID) data_type = MODEM_DATA; } pr_debug("diag: %d %d %d", cmd_code, subsys_id, subsys_cmd_code); for (i = 0; i < diag_max_reg; i++) { entry = driver->table[i]; if (entry.process_id != NO_PROCESS) { if (entry.cmd_code == cmd_code && entry.subsys_id == subsys_id && entry.cmd_code_lo <= subsys_cmd_code && entry.cmd_code_hi >= subsys_cmd_code) { status = diag_send_data(entry, buf, len, data_type); if (status) packet_type = 0; } else if (entry.cmd_code == 255 && cmd_code == 75) { if (entry.subsys_id == subsys_id && entry.cmd_code_lo <= subsys_cmd_code && entry.cmd_code_hi >= subsys_cmd_code) { status = diag_send_data(entry, buf, len, data_type); if (status) packet_type = 0; } } else if (entry.cmd_code == 255 && entry.subsys_id == 255) { if (entry.cmd_code_lo <= cmd_code && entry. cmd_code_hi >= cmd_code) { if (cmd_code == MODE_CMD && subsys_id == RESET_ID && entry.process_id == NON_APPS_PROC) continue; status = diag_send_data(entry, buf, len, data_type); if (status) packet_type = 0; } } } } #if defined(CONFIG_DIAG_OVER_USB) /* Check for the command/respond msg for the maximum packet length */ if ((*buf == 0x4b) && (*(buf+1) == 0x12) && (*(uint16_t *)(buf+2) == 0x0055)) { for (i = 0; i < 4; i++) *(driver->apps_rsp_buf+i) = *(buf+i); *(uint32_t *)(driver->apps_rsp_buf+4) = PKT_SIZE; encode_rsp_and_send(7); return 0; } else if ((*buf == 0x4b) && (*(buf+1) == 0x12) && (*(uint16_t *)(buf+2) == DIAG_DIAG_STM)) { len = diag_process_stm_cmd(buf, driver->apps_rsp_buf); if (len > 0) { encode_rsp_and_send(len - 1); return 0; } return len; } /* Check for Apps Only & get event mask request */ else if (diag_apps_responds() && *buf == 0x81) { driver->apps_rsp_buf[0] = 0x81; driver->apps_rsp_buf[1] = 0x0; *(uint16_t *)(driver->apps_rsp_buf + 2) = 0x0; *(uint16_t *)(driver->apps_rsp_buf + 4) = EVENT_LAST_ID + 1; for (i = 0; i < EVENT_LAST_ID/8 + 1; i++) *(unsigned char *)(driver->apps_rsp_buf + 6 + i) = 0x0; encode_rsp_and_send(6 + EVENT_LAST_ID/8); return 0; } /* Get log ID range & Check for Apps Only */ else if (diag_apps_responds() && (*buf == 0x73) && *(int *)(buf+4) == 1) { driver->apps_rsp_buf[0] = 0x73; *(int *)(driver->apps_rsp_buf + 4) = 0x1; /* operation ID */ *(int *)(driver->apps_rsp_buf + 8) = 0x0; /* success code */ *(int *)(driver->apps_rsp_buf + 12) = LOG_GET_ITEM_NUM(log_code_last_tbl[0]); *(int *)(driver->apps_rsp_buf + 16) = LOG_GET_ITEM_NUM(log_code_last_tbl[1]); *(int *)(driver->apps_rsp_buf + 20) = LOG_GET_ITEM_NUM(log_code_last_tbl[2]); *(int *)(driver->apps_rsp_buf + 24) = LOG_GET_ITEM_NUM(log_code_last_tbl[3]); *(int *)(driver->apps_rsp_buf + 28) = LOG_GET_ITEM_NUM(log_code_last_tbl[4]); *(int *)(driver->apps_rsp_buf + 32) = LOG_GET_ITEM_NUM(log_code_last_tbl[5]); *(int *)(driver->apps_rsp_buf + 36) = LOG_GET_ITEM_NUM(log_code_last_tbl[6]); *(int *)(driver->apps_rsp_buf + 40) = LOG_GET_ITEM_NUM(log_code_last_tbl[7]); *(int *)(driver->apps_rsp_buf + 44) = LOG_GET_ITEM_NUM(log_code_last_tbl[8]); *(int *)(driver->apps_rsp_buf + 48) = LOG_GET_ITEM_NUM(log_code_last_tbl[9]); *(int *)(driver->apps_rsp_buf + 52) = LOG_GET_ITEM_NUM(log_code_last_tbl[10]); *(int *)(driver->apps_rsp_buf + 56) = LOG_GET_ITEM_NUM(log_code_last_tbl[11]); *(int *)(driver->apps_rsp_buf + 60) = LOG_GET_ITEM_NUM(log_code_last_tbl[12]); *(int *)(driver->apps_rsp_buf + 64) = LOG_GET_ITEM_NUM(log_code_last_tbl[13]); *(int *)(driver->apps_rsp_buf + 68) = LOG_GET_ITEM_NUM(log_code_last_tbl[14]); *(int *)(driver->apps_rsp_buf + 72) = LOG_GET_ITEM_NUM(log_code_last_tbl[15]); encode_rsp_and_send(75); return 0; } /* Respond to Get SSID Range request message */ else if (diag_apps_responds() && (*buf == 0x7d) && (*(buf+1) == 0x1)) { driver->apps_rsp_buf[0] = 0x7d; driver->apps_rsp_buf[1] = 0x1; driver->apps_rsp_buf[2] = 0x1; driver->apps_rsp_buf[3] = 0x0; /* -1 to un-account for OEM SSID range */ *(int *)(driver->apps_rsp_buf + 4) = MSG_MASK_TBL_CNT - 1; *(uint16_t *)(driver->apps_rsp_buf + 8) = MSG_SSID_0; *(uint16_t *)(driver->apps_rsp_buf + 10) = MSG_SSID_0_LAST; *(uint16_t *)(driver->apps_rsp_buf + 12) = MSG_SSID_1; *(uint16_t *)(driver->apps_rsp_buf + 14) = MSG_SSID_1_LAST; *(uint16_t *)(driver->apps_rsp_buf + 16) = MSG_SSID_2; *(uint16_t *)(driver->apps_rsp_buf + 18) = MSG_SSID_2_LAST; *(uint16_t *)(driver->apps_rsp_buf + 20) = MSG_SSID_3; *(uint16_t *)(driver->apps_rsp_buf + 22) = MSG_SSID_3_LAST; *(uint16_t *)(driver->apps_rsp_buf + 24) = MSG_SSID_4; *(uint16_t *)(driver->apps_rsp_buf + 26) = MSG_SSID_4_LAST; *(uint16_t *)(driver->apps_rsp_buf + 28) = MSG_SSID_5; *(uint16_t *)(driver->apps_rsp_buf + 30) = MSG_SSID_5_LAST; *(uint16_t *)(driver->apps_rsp_buf + 32) = MSG_SSID_6; *(uint16_t *)(driver->apps_rsp_buf + 34) = MSG_SSID_6_LAST; *(uint16_t *)(driver->apps_rsp_buf + 36) = MSG_SSID_7; *(uint16_t *)(driver->apps_rsp_buf + 38) = MSG_SSID_7_LAST; *(uint16_t *)(driver->apps_rsp_buf + 40) = MSG_SSID_8; *(uint16_t *)(driver->apps_rsp_buf + 42) = MSG_SSID_8_LAST; *(uint16_t *)(driver->apps_rsp_buf + 44) = MSG_SSID_9; *(uint16_t *)(driver->apps_rsp_buf + 46) = MSG_SSID_9_LAST; *(uint16_t *)(driver->apps_rsp_buf + 48) = MSG_SSID_10; *(uint16_t *)(driver->apps_rsp_buf + 50) = MSG_SSID_10_LAST; *(uint16_t *)(driver->apps_rsp_buf + 52) = MSG_SSID_11; *(uint16_t *)(driver->apps_rsp_buf + 54) = MSG_SSID_11_LAST; *(uint16_t *)(driver->apps_rsp_buf + 56) = MSG_SSID_12; *(uint16_t *)(driver->apps_rsp_buf + 58) = MSG_SSID_12_LAST; *(uint16_t *)(driver->apps_rsp_buf + 60) = MSG_SSID_13; *(uint16_t *)(driver->apps_rsp_buf + 62) = MSG_SSID_13_LAST; *(uint16_t *)(driver->apps_rsp_buf + 64) = MSG_SSID_14; *(uint16_t *)(driver->apps_rsp_buf + 66) = MSG_SSID_14_LAST; *(uint16_t *)(driver->apps_rsp_buf + 68) = MSG_SSID_15; *(uint16_t *)(driver->apps_rsp_buf + 70) = MSG_SSID_15_LAST; *(uint16_t *)(driver->apps_rsp_buf + 72) = MSG_SSID_16; *(uint16_t *)(driver->apps_rsp_buf + 74) = MSG_SSID_16_LAST; *(uint16_t *)(driver->apps_rsp_buf + 76) = MSG_SSID_17; *(uint16_t *)(driver->apps_rsp_buf + 78) = MSG_SSID_17_LAST; *(uint16_t *)(driver->apps_rsp_buf + 80) = MSG_SSID_18; *(uint16_t *)(driver->apps_rsp_buf + 82) = MSG_SSID_18_LAST; *(uint16_t *)(driver->apps_rsp_buf + 84) = MSG_SSID_19; *(uint16_t *)(driver->apps_rsp_buf + 86) = MSG_SSID_19_LAST; *(uint16_t *)(driver->apps_rsp_buf + 88) = MSG_SSID_20; *(uint16_t *)(driver->apps_rsp_buf + 90) = MSG_SSID_20_LAST; *(uint16_t *)(driver->apps_rsp_buf + 92) = MSG_SSID_21; *(uint16_t *)(driver->apps_rsp_buf + 94) = MSG_SSID_21_LAST; *(uint16_t *)(driver->apps_rsp_buf + 96) = MSG_SSID_22; *(uint16_t *)(driver->apps_rsp_buf + 98) = MSG_SSID_22_LAST; *(uint16_t *)(driver->apps_rsp_buf + 100) = MSG_SSID_23; *(uint16_t *)(driver->apps_rsp_buf + 102) = MSG_SSID_23_LAST; encode_rsp_and_send(103); return 0; } /* Check for Apps Only Respond to Get Subsys Build mask */ else if (diag_apps_responds() && (*buf == 0x7d) && (*(buf+1) == 0x2)) { ssid_first = *(uint16_t *)(buf + 2); ssid_last = *(uint16_t *)(buf + 4); ssid_range = 4 * (ssid_last - ssid_first + 1); /* frame response */ driver->apps_rsp_buf[0] = 0x7d; driver->apps_rsp_buf[1] = 0x2; *(uint16_t *)(driver->apps_rsp_buf + 2) = ssid_first; *(uint16_t *)(driver->apps_rsp_buf + 4) = ssid_last; driver->apps_rsp_buf[6] = 0x1; driver->apps_rsp_buf[7] = 0x0; ptr = driver->apps_rsp_buf + 8; /* bld time masks */ switch (ssid_first) { case MSG_SSID_0: if (ssid_range > sizeof(msg_bld_masks_0)) { pr_warning("diag: truncating ssid range for ssid 0"); ssid_range = sizeof(msg_bld_masks_0); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_0[i/4]; break; case MSG_SSID_1: if (ssid_range > sizeof(msg_bld_masks_1)) { pr_warning("diag: truncating ssid range for ssid 1"); ssid_range = sizeof(msg_bld_masks_1); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_1[i/4]; break; case MSG_SSID_2: if (ssid_range > sizeof(msg_bld_masks_2)) { pr_warning("diag: truncating ssid range for ssid 2"); ssid_range = sizeof(msg_bld_masks_2); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_2[i/4]; break; case MSG_SSID_3: if (ssid_range > sizeof(msg_bld_masks_3)) { pr_warning("diag: truncating ssid range for ssid 3"); ssid_range = sizeof(msg_bld_masks_3); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_3[i/4]; break; case MSG_SSID_4: if (ssid_range > sizeof(msg_bld_masks_4)) { pr_warning("diag: truncating ssid range for ssid 4"); ssid_range = sizeof(msg_bld_masks_4); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_4[i/4]; break; case MSG_SSID_5: if (ssid_range > sizeof(msg_bld_masks_5)) { pr_warning("diag: truncating ssid range for ssid 5"); ssid_range = sizeof(msg_bld_masks_5); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_5[i/4]; break; case MSG_SSID_6: if (ssid_range > sizeof(msg_bld_masks_6)) { pr_warning("diag: truncating ssid range for ssid 6"); ssid_range = sizeof(msg_bld_masks_6); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_6[i/4]; break; case MSG_SSID_7: if (ssid_range > sizeof(msg_bld_masks_7)) { pr_warning("diag: truncating ssid range for ssid 7"); ssid_range = sizeof(msg_bld_masks_7); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_7[i/4]; break; case MSG_SSID_8: if (ssid_range > sizeof(msg_bld_masks_8)) { pr_warning("diag: truncating ssid range for ssid 8"); ssid_range = sizeof(msg_bld_masks_8); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_8[i/4]; break; case MSG_SSID_9: if (ssid_range > sizeof(msg_bld_masks_9)) { pr_warning("diag: truncating ssid range for ssid 9"); ssid_range = sizeof(msg_bld_masks_9); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_9[i/4]; break; case MSG_SSID_10: if (ssid_range > sizeof(msg_bld_masks_10)) { pr_warning("diag: truncating ssid range for ssid 10"); ssid_range = sizeof(msg_bld_masks_10); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_10[i/4]; break; case MSG_SSID_11: if (ssid_range > sizeof(msg_bld_masks_11)) { pr_warning("diag: truncating ssid range for ssid 11"); ssid_range = sizeof(msg_bld_masks_11); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_11[i/4]; break; case MSG_SSID_12: if (ssid_range > sizeof(msg_bld_masks_12)) { pr_warning("diag: truncating ssid range for ssid 12"); ssid_range = sizeof(msg_bld_masks_12); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_12[i/4]; break; case MSG_SSID_13: if (ssid_range > sizeof(msg_bld_masks_13)) { pr_warning("diag: truncating ssid range for ssid 13"); ssid_range = sizeof(msg_bld_masks_13); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_13[i/4]; break; case MSG_SSID_14: if (ssid_range > sizeof(msg_bld_masks_14)) { pr_warning("diag: truncating ssid range for ssid 14"); ssid_range = sizeof(msg_bld_masks_14); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_14[i/4]; break; case MSG_SSID_15: if (ssid_range > sizeof(msg_bld_masks_15)) { pr_warning("diag: truncating ssid range for ssid 15"); ssid_range = sizeof(msg_bld_masks_15); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_15[i/4]; break; case MSG_SSID_16: if (ssid_range > sizeof(msg_bld_masks_16)) { pr_warning("diag: truncating ssid range for ssid 16"); ssid_range = sizeof(msg_bld_masks_16); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_16[i/4]; break; case MSG_SSID_17: if (ssid_range > sizeof(msg_bld_masks_17)) { pr_warning("diag: truncating ssid range for ssid 17"); ssid_range = sizeof(msg_bld_masks_17); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_17[i/4]; break; case MSG_SSID_18: if (ssid_range > sizeof(msg_bld_masks_18)) { pr_warning("diag: truncating ssid range for ssid 18"); ssid_range = sizeof(msg_bld_masks_18); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_18[i/4]; break; case MSG_SSID_19: if (ssid_range > sizeof(msg_bld_masks_19)) { pr_warning("diag: truncating ssid range for ssid 19"); ssid_range = sizeof(msg_bld_masks_19); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_19[i/4]; break; case MSG_SSID_20: if (ssid_range > sizeof(msg_bld_masks_20)) { pr_warning("diag: truncating ssid range for ssid 20"); ssid_range = sizeof(msg_bld_masks_20); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_20[i/4]; break; case MSG_SSID_21: if (ssid_range > sizeof(msg_bld_masks_21)) { pr_warning("diag: truncating ssid range for ssid 21"); ssid_range = sizeof(msg_bld_masks_21); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_21[i/4]; break; case MSG_SSID_22: if (ssid_range > sizeof(msg_bld_masks_22)) { pr_warning("diag: truncating ssid range for ssid 22"); ssid_range = sizeof(msg_bld_masks_22); } for (i = 0; i < ssid_range; i += 4) *(int *)(ptr + i) = msg_bld_masks_22[i/4]; break; } encode_rsp_and_send(8 + ssid_range - 1); return 0; } /* Check for download command */ else if ((cpu_is_msm8x60() || chk_apps_master()) && (*buf == 0x3A)) { /* send response back */ driver->apps_rsp_buf[0] = *buf; encode_rsp_and_send(0); msleep(5000); /* call download API */ msm_set_restart_mode(RESTART_DLOAD); printk(KERN_CRIT "diag: download mode set, Rebooting SoC..\n"); kernel_restart(NULL); /* Not required, represents that command isnt sent to modem */ return 0; } /* Check for polling for Apps only DIAG */ else if ((*buf == 0x4b) && (*(buf+1) == 0x32) && (*(buf+2) == 0x03)) { /* If no one has registered for polling */ if (chk_polling_response()) { /* Respond to polling for Apps only DIAG */ for (i = 0; i < 3; i++) driver->apps_rsp_buf[i] = *(buf+i); for (i = 0; i < 13; i++) driver->apps_rsp_buf[i+3] = 0; encode_rsp_and_send(15); return 0; } } /* Return the Delayed Response Wrap Status */ else if ((*buf == 0x4b) && (*(buf+1) == 0x32) && (*(buf+2) == 0x04) && (*(buf+3) == 0x0)) { memcpy(driver->apps_rsp_buf, buf, 4); driver->apps_rsp_buf[4] = wrap_enabled; encode_rsp_and_send(4); return 0; } /* Wrap the Delayed Rsp ID */ else if ((*buf == 0x4b) && (*(buf+1) == 0x32) && (*(buf+2) == 0x05) && (*(buf+3) == 0x0)) { wrap_enabled = true; memcpy(driver->apps_rsp_buf, buf, 4); driver->apps_rsp_buf[4] = wrap_count; encode_rsp_and_send(5); return 0; } /* Check for ID for NO MODEM present */ else if (chk_polling_response()) { /* respond to 0x0 command */ if (*buf == 0x00) { for (i = 0; i < 55; i++) driver->apps_rsp_buf[i] = 0; encode_rsp_and_send(54); return 0; } /* respond to 0x7c command */ else if (*buf == 0x7c) { driver->apps_rsp_buf[0] = 0x7c; for (i = 1; i < 8; i++) driver->apps_rsp_buf[i] = 0; /* Tools ID for APQ 8060 */ *(int *)(driver->apps_rsp_buf + 8) = chk_config_get_id(); *(unsigned char *)(driver->apps_rsp_buf + 12) = '\0'; *(unsigned char *)(driver->apps_rsp_buf + 13) = '\0'; encode_rsp_and_send(13); return 0; } } #endif return packet_type; } #ifdef CONFIG_DIAG_OVER_USB void diag_send_error_rsp(int index) { int i; /* -1 to accomodate the first byte 0x13 */ if (index > APPS_BUF_SIZE-1) { pr_err("diag: cannot send err rsp, huge length: %d\n", index); return; } driver->apps_rsp_buf[0] = 0x13; /* error code 13 */ for (i = 0; i < index; i++) driver->apps_rsp_buf[i+1] = *(driver->hdlc_buf+i); encode_rsp_and_send(index - 3); } #else static inline void diag_send_error_rsp(int index) {} #endif void diag_process_hdlc(void *data, unsigned len) { struct diag_hdlc_decode_type hdlc; int ret, type = 0, crc_chk = 0; mutex_lock(&driver->diag_hdlc_mutex); pr_debug("diag: HDLC decode fn, len of data %d\n", len); hdlc.dest_ptr = driver->hdlc_buf; hdlc.dest_size = USB_MAX_OUT_BUF; hdlc.src_ptr = data; hdlc.src_size = len; hdlc.src_idx = 0; hdlc.dest_idx = 0; hdlc.escaping = 0; ret = diag_hdlc_decode(&hdlc); if (ret) { crc_chk = crc_check(hdlc.dest_ptr, hdlc.dest_idx); if (crc_chk) { /* CRC check failed. */ pr_err_ratelimited("diag: In %s, bad CRC. Dropping packet\n", __func__); mutex_unlock(&driver->diag_hdlc_mutex); return; } } /* * If the message is 3 bytes or less in length then the message is * too short. A message will need 4 bytes minimum, since there are * 2 bytes for the CRC and 1 byte for the ending 0x7e for the hdlc * encoding */ if (hdlc.dest_idx < 4) { pr_err_ratelimited("diag: In %s, message is too short, len: %d, dest len: %d\n", __func__, len, hdlc.dest_idx); mutex_unlock(&driver->diag_hdlc_mutex); return; } if (ret) { type = diag_process_apps_pkt(driver->hdlc_buf, hdlc.dest_idx - 3); if (type < 0) { mutex_unlock(&driver->diag_hdlc_mutex); return; } } else if (driver->debug_flag) { pr_err("diag: In %s, partial packet received, dropping packet, len: %d\n", __func__, len); print_hex_dump(KERN_DEBUG, "Dropped Packet Data: ", 16, 1, DUMP_PREFIX_ADDRESS, data, len, 1); driver->debug_flag = 0; } /* send error responses from APPS for Central Routing */ if (type == 1 && chk_apps_only()) { diag_send_error_rsp(hdlc.dest_idx); type = 0; } /* implies this packet is NOT meant for apps */ if (!(driver->smd_data[MODEM_DATA].ch) && type == 1) { if (chk_apps_only()) { diag_send_error_rsp(hdlc.dest_idx); } else { /* APQ 8060, Let Q6 respond */ if (driver->smd_data[LPASS_DATA].ch) { mutex_lock(&driver->smd_data[LPASS_DATA]. smd_ch_mutex); smd_write(driver->smd_data[LPASS_DATA].ch, driver->hdlc_buf, hdlc.dest_idx - 3); mutex_unlock(&driver->smd_data[LPASS_DATA]. smd_ch_mutex); } } type = 0; } #ifdef DIAG_DEBUG pr_debug("diag: hdlc.dest_idx = %d", hdlc.dest_idx); for (i = 0; i < hdlc.dest_idx; i++) printk(KERN_DEBUG "\t%x", *(((unsigned char *) driver->hdlc_buf)+i)); #endif /* DIAG DEBUG */ /* ignore 2 bytes for CRC, one for 7E and send */ if ((driver->smd_data[MODEM_DATA].ch) && (ret) && (type) && (hdlc.dest_idx > 3)) { APPEND_DEBUG('g'); mutex_lock(&driver->smd_data[MODEM_DATA].smd_ch_mutex); smd_write(driver->smd_data[MODEM_DATA].ch, driver->hdlc_buf, hdlc.dest_idx - 3); mutex_unlock(&driver->smd_data[MODEM_DATA].smd_ch_mutex); APPEND_DEBUG('h'); #ifdef DIAG_DEBUG printk(KERN_INFO "writing data to SMD, pkt length %d\n", len); print_hex_dump(KERN_DEBUG, "Written Packet Data to SMD: ", 16, 1, DUMP_PREFIX_ADDRESS, data, len, 1); #endif /* DIAG DEBUG */ } mutex_unlock(&driver->diag_hdlc_mutex); } void diag_reset_smd_data(int queue) { int i; unsigned long flags; for (i = 0; i < NUM_SMD_DATA_CHANNELS; i++) { spin_lock_irqsave(&driver->smd_data[i].in_busy_lock, flags); driver->smd_data[i].in_busy_1 = 0; driver->smd_data[i].in_busy_2 = 0; spin_unlock_irqrestore(&driver->smd_data[i].in_busy_lock, flags); if (queue) /* Poll SMD data channels to check for data */ queue_work(driver->smd_data[i].wq, &(driver->smd_data[i].diag_read_smd_work)); } if (driver->supports_separate_cmdrsp) { for (i = 0; i < NUM_SMD_CMD_CHANNELS; i++) { spin_lock_irqsave(&driver->smd_cmd[i].in_busy_lock, flags); driver->smd_cmd[i].in_busy_1 = 0; driver->smd_cmd[i].in_busy_2 = 0; spin_unlock_irqrestore(&driver->smd_cmd[i].in_busy_lock, flags); if (queue) /* Poll SMD data channels to check for data */ queue_work(driver->diag_wq, &(driver->smd_cmd[i]. diag_read_smd_work)); } } } #ifdef CONFIG_DIAG_OVER_USB /* 2+1 for modem ; 2 for LPASS ; 1 for WCNSS */ #define N_LEGACY_WRITE (driver->poolsize + 6) /* Additionally support number of command data and dci channels */ #define N_LEGACY_WRITE_CMD ((N_LEGACY_WRITE) + 4) #define N_LEGACY_READ 1 static void diag_usb_connect_work_fn(struct work_struct *w) { diagfwd_connect(); } static void diag_usb_disconnect_work_fn(struct work_struct *w) { diagfwd_disconnect(); } int diagfwd_connect(void) { int err; int i; printk(KERN_DEBUG "diag: USB connected\n"); err = usb_diag_alloc_req(driver->legacy_ch, (driver->supports_separate_cmdrsp ? N_LEGACY_WRITE_CMD : N_LEGACY_WRITE), N_LEGACY_READ); if (err) goto exit; driver->usb_connected = 1; diag_reset_smd_data(RESET_AND_QUEUE); for (i = 0; i < NUM_SMD_CONTROL_CHANNELS; i++) { /* Poll SMD CNTL channels to check for data */ diag_smd_notify(&(driver->smd_cntl[i]), SMD_EVENT_DATA); } queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work); /* Poll USB channel to check for data*/ queue_work(driver->diag_wq, &(driver->diag_read_work)); #ifdef CONFIG_DIAG_SDIO_PIPE if (machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa()) { if (driver->mdm_ch && !IS_ERR(driver->mdm_ch)) diagfwd_connect_sdio(); else printk(KERN_INFO "diag: No USB MDM ch"); } #endif return 0; exit: pr_err("diag: unable to alloc USB req on legacy ch, err: %d", err); return err; } int diagfwd_disconnect(void) { int i; unsigned long flags; struct diag_smd_info *smd_info = NULL; printk(KERN_DEBUG "diag: USB disconnected\n"); driver->usb_connected = 0; driver->debug_flag = 1; if (driver->logging_mode == USB_MODE) { for (i = 0; i < NUM_SMD_DATA_CHANNELS; i++) { smd_info = &driver->smd_data[i]; spin_lock_irqsave(&smd_info->in_busy_lock, flags); smd_info->in_busy_1 = 1; smd_info->in_busy_2 = 1; spin_unlock_irqrestore(&smd_info->in_busy_lock, flags); } if (driver->supports_separate_cmdrsp) { for (i = 0; i < NUM_SMD_CMD_CHANNELS; i++) { smd_info = &driver->smd_cmd[i]; spin_lock_irqsave(&smd_info->in_busy_lock, flags); smd_info->in_busy_1 = 1; smd_info->in_busy_2 = 1; spin_unlock_irqrestore(&smd_info->in_busy_lock, flags); } } } queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work); #ifdef CONFIG_DIAG_SDIO_PIPE if (machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa()) if (driver->mdm_ch && !IS_ERR(driver->mdm_ch)) diagfwd_disconnect_sdio(); #endif /* TBD - notify and flow control SMD */ return 0; } static int diagfwd_check_buf_match(int num_channels, struct diag_smd_info *data, unsigned char *buf) { int i; int found_it = 0; unsigned long flags; spin_lock_irqsave(&data->in_busy_lock, flags); for (i = 0; i < num_channels; i++) { if (buf == (void *)data[i].buf_in_1) { data[i].in_busy_1 = 0; found_it = 1; break; } else if (buf == (void *)data[i].buf_in_2) { data[i].in_busy_2 = 0; found_it = 1; break; } } spin_unlock_irqrestore(&data->in_busy_lock, flags); if (found_it) { if (data[i].type == SMD_DATA_TYPE) queue_work(data[i].wq, &(data[i].diag_read_smd_work)); else queue_work(driver->diag_wq, &(data[i].diag_read_smd_work)); } return found_it; } int diagfwd_write_complete(struct diag_request *diag_write_ptr) { unsigned char *buf = diag_write_ptr->buf; int found_it = 0; /* Determine if the write complete is for data from modem/apps/q6 */ found_it = diagfwd_check_buf_match(NUM_SMD_DATA_CHANNELS, driver->smd_data, buf); if (!found_it && driver->supports_separate_cmdrsp) found_it = diagfwd_check_buf_match(NUM_SMD_CMD_CHANNELS, driver->smd_cmd, buf); #ifdef CONFIG_DIAG_SDIO_PIPE if (!found_it) { if (buf == (void *)driver->buf_in_sdio) { if (machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa()) diagfwd_write_complete_sdio(); else pr_err("diag: Incorrect buffer pointer while WRITE"); found_it = 1; } } #endif if (!found_it) { if (driver->logging_mode != USB_MODE) pr_debug("diag: freeing buffer when not in usb mode\n"); diagmem_free(driver, (unsigned char *)buf, POOL_TYPE_HDLC); diagmem_free(driver, (unsigned char *)diag_write_ptr, POOL_TYPE_WRITE_STRUCT); } return 0; } int diagfwd_read_complete(struct diag_request *diag_read_ptr) { int status = diag_read_ptr->status; unsigned char *buf = diag_read_ptr->buf; /* Determine if the read complete is for data on legacy/mdm ch */ if (buf == (void *)driver->usb_buf_out) { driver->read_len_legacy = diag_read_ptr->actual; APPEND_DEBUG('s'); #ifdef DIAG_DEBUG printk(KERN_INFO "read data from USB, pkt length %d", diag_read_ptr->actual); print_hex_dump(KERN_DEBUG, "Read Packet Data from USB: ", 16, 1, DUMP_PREFIX_ADDRESS, diag_read_ptr->buf, diag_read_ptr->actual, 1); #endif /* DIAG DEBUG */ if (driver->logging_mode == USB_MODE) { if (status != -ECONNRESET && status != -ESHUTDOWN) queue_work(driver->diag_wq, &(driver->diag_proc_hdlc_work)); else queue_work(driver->diag_wq, &(driver->diag_read_work)); } } #ifdef CONFIG_DIAG_SDIO_PIPE else if (buf == (void *)driver->usb_buf_mdm_out) { if (machine_is_msm8x60_fusion() || machine_is_msm8x60_fusn_ffa()) { driver->read_len_mdm = diag_read_ptr->actual; diagfwd_read_complete_sdio(); } else pr_err("diag: Incorrect buffer pointer while READ"); } #endif else printk(KERN_ERR "diag: Unknown buffer ptr from USB"); return 0; } void diag_read_work_fn(struct work_struct *work) { APPEND_DEBUG('d'); driver->usb_read_ptr->buf = driver->usb_buf_out; driver->usb_read_ptr->length = USB_MAX_OUT_BUF; usb_diag_read(driver->legacy_ch, driver->usb_read_ptr); APPEND_DEBUG('e'); } void diag_process_hdlc_fn(struct work_struct *work) { APPEND_DEBUG('D'); diag_process_hdlc(driver->usb_buf_out, driver->read_len_legacy); diag_read_work_fn(work); APPEND_DEBUG('E'); } void diag_usb_legacy_notifier(void *priv, unsigned event, struct diag_request *d_req) { switch (event) { case USB_DIAG_CONNECT: queue_work(driver->diag_usb_wq, &driver->diag_usb_connect_work); break; case USB_DIAG_DISCONNECT: queue_work(driver->diag_usb_wq, &driver->diag_usb_disconnect_work); break; case USB_DIAG_READ_DONE: diagfwd_read_complete(d_req); break; case USB_DIAG_WRITE_DONE: diagfwd_write_complete(d_req); break; default: printk(KERN_ERR "Unknown event from USB diag\n"); break; } } #endif /* DIAG OVER USB */ void diag_smd_notify(void *ctxt, unsigned event) { struct diag_smd_info *smd_info = (struct diag_smd_info *)ctxt; if (!smd_info) return; if (event == SMD_EVENT_CLOSE) { smd_info->ch = 0; wake_up(&driver->smd_wait_q); if (smd_info->type == SMD_DATA_TYPE) { smd_info->notify_context = event; queue_work(driver->diag_cntl_wq, &(smd_info->diag_notify_update_smd_work)); } else if (smd_info->type == SMD_DCI_TYPE) { /* Notify the clients of the close */ diag_dci_notify_client(smd_info->peripheral_mask, DIAG_STATUS_CLOSED); } else if (smd_info->type == SMD_CNTL_TYPE) { diag_cntl_stm_notify(smd_info, CLEAR_PERIPHERAL_STM_STATE); } return; } else if (event == SMD_EVENT_OPEN) { if (smd_info->ch_save) smd_info->ch = smd_info->ch_save; if (smd_info->type == SMD_CNTL_TYPE) { smd_info->notify_context = event; queue_work(driver->diag_cntl_wq, &(smd_info->diag_notify_update_smd_work)); } else if (smd_info->type == SMD_DCI_TYPE) { smd_info->notify_context = event; queue_work(driver->diag_dci_wq, &(smd_info->diag_notify_update_smd_work)); /* Notify the clients of the open */ diag_dci_notify_client(smd_info->peripheral_mask, DIAG_STATUS_OPEN); } wake_up(&driver->smd_wait_q); diag_smd_queue_read(smd_info); } else if (event == SMD_EVENT_DATA) { wake_up(&driver->smd_wait_q); diag_smd_queue_read(smd_info); if (smd_info->type == SMD_DCI_TYPE || smd_info->type == SMD_DCI_CMD_TYPE) { diag_dci_try_activate_wakeup_source(); } } } static int diag_smd_probe(struct platform_device *pdev) { int r = 0; int index = -1; const char *channel_name = NULL; if (pdev->id == SMD_APPS_MODEM) { index = MODEM_DATA; channel_name = "DIAG"; } #if defined(CONFIG_MSM_N_WAY_SMD) else if (pdev->id == SMD_APPS_QDSP) { index = LPASS_DATA; channel_name = "DIAG"; } #endif else if (pdev->id == SMD_APPS_WCNSS) { index = WCNSS_DATA; channel_name = "APPS_RIVA_DATA"; } if (index != -1) { r = smd_named_open_on_edge(channel_name, pdev->id, &driver->smd_data[index].ch, &driver->smd_data[index], diag_smd_notify); driver->smd_data[index].ch_save = driver->smd_data[index].ch; } pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); pr_debug("diag: In %s, open SMD port, Id = %d, r = %d\n", __func__, pdev->id, r); return 0; } static int diag_smd_cmd_probe(struct platform_device *pdev) { int r = 0; int index = -1; const char *channel_name = NULL; if (!driver->supports_separate_cmdrsp) return 0; if (pdev->id == SMD_APPS_MODEM) { index = MODEM_DATA; channel_name = "DIAG_CMD"; } if (index != -1) { r = smd_named_open_on_edge(channel_name, pdev->id, &driver->smd_cmd[index].ch, &driver->smd_cmd[index], diag_smd_notify); driver->smd_cmd[index].ch_save = driver->smd_cmd[index].ch; } pr_debug("diag: In %s, open SMD CMD port, Id = %d, r = %d\n", __func__, pdev->id, r); return 0; } static int diag_smd_runtime_suspend(struct device *dev) { dev_dbg(dev, "pm_runtime: suspending...\n"); return 0; } static int diag_smd_runtime_resume(struct device *dev) { dev_dbg(dev, "pm_runtime: resuming...\n"); return 0; } static const struct dev_pm_ops diag_smd_dev_pm_ops = { .runtime_suspend = diag_smd_runtime_suspend, .runtime_resume = diag_smd_runtime_resume, }; static struct platform_driver msm_smd_ch1_driver = { .probe = diag_smd_probe, .driver = { .name = "DIAG", .owner = THIS_MODULE, .pm = &diag_smd_dev_pm_ops, }, }; static struct platform_driver diag_smd_lite_driver = { .probe = diag_smd_probe, .driver = { .name = "APPS_RIVA_DATA", .owner = THIS_MODULE, .pm = &diag_smd_dev_pm_ops, }, }; static struct platform_driver smd_lite_data_cmd_drivers[NUM_SMD_CMD_CHANNELS] = { { /* Modem data */ .probe = diag_smd_cmd_probe, .driver = { .name = "DIAG_CMD", .owner = THIS_MODULE, .pm = &diag_smd_dev_pm_ops, }, } }; int device_supports_separate_cmdrsp(void) { return driver->use_device_tree; } void diag_smd_destructor(struct diag_smd_info *smd_info) { if (smd_info->type == SMD_DATA_TYPE) destroy_workqueue(smd_info->wq); if (smd_info->ch) smd_close(smd_info->ch); smd_info->ch = 0; smd_info->ch_save = 0; kfree(smd_info->buf_in_1); kfree(smd_info->buf_in_2); kfree(smd_info->write_ptr_1); kfree(smd_info->write_ptr_2); kfree(smd_info->buf_in_1_raw); kfree(smd_info->buf_in_2_raw); } int diag_smd_constructor(struct diag_smd_info *smd_info, int peripheral, int type) { smd_info->peripheral = peripheral; smd_info->type = type; smd_info->encode_hdlc = 0; mutex_init(&smd_info->smd_ch_mutex); spin_lock_init(&smd_info->in_busy_lock); switch (peripheral) { case MODEM_DATA: smd_info->peripheral_mask = DIAG_CON_MPSS; break; case LPASS_DATA: smd_info->peripheral_mask = DIAG_CON_LPASS; break; case WCNSS_DATA: smd_info->peripheral_mask = DIAG_CON_WCNSS; break; default: pr_err("diag: In %s, unknown peripheral, peripheral: %d\n", __func__, peripheral); goto err; } smd_info->ch = 0; smd_info->ch_save = 0; if (smd_info->buf_in_1 == NULL) { smd_info->buf_in_1 = kzalloc(IN_BUF_SIZE, GFP_KERNEL); if (smd_info->buf_in_1 == NULL) goto err; smd_info->buf_in_1_size = IN_BUF_SIZE; kmemleak_not_leak(smd_info->buf_in_1); } if (smd_info->write_ptr_1 == NULL) { smd_info->write_ptr_1 = kzalloc(sizeof(struct diag_request), GFP_KERNEL); if (smd_info->write_ptr_1 == NULL) goto err; kmemleak_not_leak(smd_info->write_ptr_1); } /* The smd data type needs two buffers */ if (smd_info->type == SMD_DATA_TYPE) { if (smd_info->buf_in_2 == NULL) { smd_info->buf_in_2 = kzalloc(IN_BUF_SIZE, GFP_KERNEL); if (smd_info->buf_in_2 == NULL) goto err; smd_info->buf_in_2_size = IN_BUF_SIZE; kmemleak_not_leak(smd_info->buf_in_2); } if (smd_info->write_ptr_2 == NULL) { smd_info->write_ptr_2 = kzalloc(sizeof(struct diag_request), GFP_KERNEL); if (smd_info->write_ptr_2 == NULL) goto err; kmemleak_not_leak(smd_info->write_ptr_2); } if (driver->supports_apps_hdlc_encoding) { /* In support of hdlc encoding */ if (smd_info->buf_in_1_raw == NULL) { smd_info->buf_in_1_raw = kzalloc(IN_BUF_SIZE, GFP_KERNEL); if (smd_info->buf_in_1_raw == NULL) goto err; smd_info->buf_in_1_raw_size = IN_BUF_SIZE; kmemleak_not_leak(smd_info->buf_in_1_raw); } if (smd_info->buf_in_2_raw == NULL) { smd_info->buf_in_2_raw = kzalloc(IN_BUF_SIZE, GFP_KERNEL); if (smd_info->buf_in_2_raw == NULL) goto err; smd_info->buf_in_2_raw_size = IN_BUF_SIZE; kmemleak_not_leak(smd_info->buf_in_2_raw); } } } if (smd_info->type == SMD_CMD_TYPE && driver->supports_apps_hdlc_encoding) { /* In support of hdlc encoding */ if (smd_info->buf_in_1_raw == NULL) { smd_info->buf_in_1_raw = kzalloc(IN_BUF_SIZE, GFP_KERNEL); if (smd_info->buf_in_1_raw == NULL) goto err; smd_info->buf_in_1_raw_size = IN_BUF_SIZE; kmemleak_not_leak(smd_info->buf_in_1_raw); } } /* The smd data type needs separate work queues for reads */ if (type == SMD_DATA_TYPE) { switch (peripheral) { case MODEM_DATA: smd_info->wq = create_singlethread_workqueue( "diag_modem_data_read_wq"); break; case LPASS_DATA: smd_info->wq = create_singlethread_workqueue( "diag_lpass_data_read_wq"); break; case WCNSS_DATA: smd_info->wq = create_singlethread_workqueue( "diag_wcnss_data_read_wq"); break; default: smd_info->wq = NULL; break; } } else { smd_info->wq = NULL; } INIT_WORK(&(smd_info->diag_read_smd_work), diag_read_smd_work_fn); /* * The update function assigned to the diag_notify_update_smd_work * work_struct is meant to be used for updating that is not to * be done in the context of the smd notify function. The * notify_context variable can be used for passing additional * information to the update function. */ smd_info->notify_context = 0; smd_info->general_context = 0; switch (type) { case SMD_DATA_TYPE: case SMD_CMD_TYPE: INIT_WORK(&(smd_info->diag_notify_update_smd_work), diag_clean_reg_fn); INIT_WORK(&(smd_info->diag_general_smd_work), diag_cntl_smd_work_fn); break; case SMD_CNTL_TYPE: INIT_WORK(&(smd_info->diag_notify_update_smd_work), diag_mask_update_fn); INIT_WORK(&(smd_info->diag_general_smd_work), diag_cntl_smd_work_fn); break; case SMD_DCI_TYPE: case SMD_DCI_CMD_TYPE: INIT_WORK(&(smd_info->diag_notify_update_smd_work), diag_update_smd_dci_work_fn); INIT_WORK(&(smd_info->diag_general_smd_work), diag_cntl_smd_work_fn); break; default: pr_err("diag: In %s, unknown type, type: %d\n", __func__, type); goto err; } /* * Set function ptr for function to call to process the data that * was just read from the smd channel */ switch (type) { case SMD_DATA_TYPE: case SMD_CMD_TYPE: smd_info->process_smd_read_data = diag_process_smd_read_data; break; case SMD_CNTL_TYPE: smd_info->process_smd_read_data = diag_process_smd_cntl_read_data; break; case SMD_DCI_TYPE: case SMD_DCI_CMD_TYPE: smd_info->process_smd_read_data = diag_process_smd_dci_read_data; break; default: pr_err("diag: In %s, unknown type, type: %d\n", __func__, type); goto err; } return 1; err: kfree(smd_info->buf_in_1); kfree(smd_info->buf_in_2); kfree(smd_info->write_ptr_1); kfree(smd_info->write_ptr_2); kfree(smd_info->buf_in_1_raw); kfree(smd_info->buf_in_2_raw); return 0; } void diagfwd_init(void) { int success; int i; wrap_enabled = 0; wrap_count = 0; diag_debug_buf_idx = 0; driver->read_len_legacy = 0; driver->use_device_tree = has_device_tree(); driver->real_time_mode = 1; /* * The number of entries in table of buffers * should not be any smaller than hdlc poolsize. */ driver->buf_tbl_size = (buf_tbl_size < driver->poolsize_hdlc) ? driver->poolsize_hdlc : buf_tbl_size; driver->supports_separate_cmdrsp = device_supports_separate_cmdrsp(); driver->supports_apps_hdlc_encoding = 1; mutex_init(&driver->diag_hdlc_mutex); mutex_init(&driver->diag_cntl_mutex); spin_lock_init(&driver->ws_lock); driver->ws_ref_count = 0; driver->copy_count = 0; for (i = 0; i < NUM_SMD_CONTROL_CHANNELS; i++) { driver->separate_cmdrsp[i] = 0; driver->peripheral_supports_stm[i] = DISABLE_STM; driver->rcvd_feature_mask[i] = 0; } for (i = 0; i < NUM_STM_PROCESSORS; i++) { driver->stm_state_requested[i] = DISABLE_STM; driver->stm_state[i] = DISABLE_STM; } for (i = 0; i < NUM_SMD_DATA_CHANNELS; i++) { success = diag_smd_constructor(&driver->smd_data[i], i, SMD_DATA_TYPE); if (!success) goto err; } if (driver->supports_separate_cmdrsp) { for (i = 0; i < NUM_SMD_CMD_CHANNELS; i++) { success = diag_smd_constructor(&driver->smd_cmd[i], i, SMD_CMD_TYPE); if (!success) goto err; } } if (driver->usb_buf_out == NULL && (driver->usb_buf_out = kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL)) == NULL) goto err; kmemleak_not_leak(driver->usb_buf_out); if (driver->hdlc_buf == NULL && (driver->hdlc_buf = kzalloc(HDLC_MAX, GFP_KERNEL)) == NULL) goto err; kmemleak_not_leak(driver->hdlc_buf); if (driver->user_space_data_buf == NULL) driver->user_space_data_buf = kzalloc(USER_SPACE_DATA, GFP_KERNEL); if (driver->user_space_data_buf == NULL) goto err; kmemleak_not_leak(driver->user_space_data_buf); if (driver->client_map == NULL && (driver->client_map = kzalloc ((driver->num_clients) * sizeof(struct diag_client_map), GFP_KERNEL)) == NULL) goto err; kmemleak_not_leak(driver->client_map); if (driver->buf_tbl == NULL) driver->buf_tbl = kzalloc(driver->buf_tbl_size * sizeof(struct diag_write_device), GFP_KERNEL); if (driver->buf_tbl == NULL) goto err; kmemleak_not_leak(driver->buf_tbl); if (driver->data_ready == NULL && (driver->data_ready = kzalloc(driver->num_clients * sizeof(int) , GFP_KERNEL)) == NULL) goto err; kmemleak_not_leak(driver->data_ready); if (driver->table == NULL && (driver->table = kzalloc(diag_max_reg* sizeof(struct diag_master_table), GFP_KERNEL)) == NULL) goto err; kmemleak_not_leak(driver->table); if (driver->usb_read_ptr == NULL) { driver->usb_read_ptr = kzalloc( sizeof(struct diag_request), GFP_KERNEL); if (driver->usb_read_ptr == NULL) goto err; kmemleak_not_leak(driver->usb_read_ptr); } if (driver->pkt_buf == NULL && (driver->pkt_buf = kzalloc(PKT_SIZE, GFP_KERNEL)) == NULL) goto err; kmemleak_not_leak(driver->pkt_buf); if (driver->dci_pkt_buf == NULL) { driver->dci_pkt_buf = kzalloc(PKT_SIZE, GFP_KERNEL); if (!driver->dci_pkt_buf) goto err; } kmemleak_not_leak(driver->dci_pkt_buf); if (driver->apps_rsp_buf == NULL) { driver->apps_rsp_buf = kzalloc(APPS_BUF_SIZE, GFP_KERNEL); if (driver->apps_rsp_buf == NULL) goto err; kmemleak_not_leak(driver->apps_rsp_buf); } driver->diag_wq = create_singlethread_workqueue("diag_wq"); driver->diag_usb_wq = create_singlethread_workqueue("diag_usb_wq"); #ifdef CONFIG_DIAG_OVER_USB INIT_WORK(&(driver->diag_usb_connect_work), diag_usb_connect_work_fn); INIT_WORK(&(driver->diag_usb_disconnect_work), diag_usb_disconnect_work_fn); INIT_WORK(&(driver->diag_proc_hdlc_work), diag_process_hdlc_fn); INIT_WORK(&(driver->diag_read_work), diag_read_work_fn); driver->legacy_ch = usb_diag_open(DIAG_LEGACY, driver, diag_usb_legacy_notifier); if (IS_ERR(driver->legacy_ch)) { printk(KERN_ERR "Unable to open USB diag legacy channel\n"); goto err; } #endif platform_driver_register(&msm_smd_ch1_driver); platform_driver_register(&diag_smd_lite_driver); if (driver->supports_separate_cmdrsp) { for (i = 0; i < NUM_SMD_CMD_CHANNELS; i++) platform_driver_register(&smd_lite_data_cmd_drivers[i]); } return; err: pr_err("diag: Could not initialize diag buffers"); for (i = 0; i < NUM_SMD_DATA_CHANNELS; i++) diag_smd_destructor(&driver->smd_data[i]); for (i = 0; i < NUM_SMD_CMD_CHANNELS; i++) diag_smd_destructor(&driver->smd_cmd[i]); kfree(driver->buf_msg_mask_update); kfree(driver->buf_log_mask_update); kfree(driver->buf_event_mask_update); kfree(driver->usb_buf_out); kfree(driver->hdlc_buf); kfree(driver->client_map); kfree(driver->buf_tbl); kfree(driver->data_ready); kfree(driver->table); kfree(driver->pkt_buf); kfree(driver->dci_pkt_buf); kfree(driver->usb_read_ptr); kfree(driver->apps_rsp_buf); kfree(driver->user_space_data_buf); if (driver->diag_wq) destroy_workqueue(driver->diag_wq); if (driver->diag_usb_wq) destroy_workqueue(driver->diag_usb_wq); } void diagfwd_exit(void) { int i; for (i = 0; i < NUM_SMD_DATA_CHANNELS; i++) diag_smd_destructor(&driver->smd_data[i]); #ifdef CONFIG_DIAG_OVER_USB usb_diag_close(driver->legacy_ch); #endif platform_driver_unregister(&msm_smd_ch1_driver); platform_driver_unregister(&diag_smd_lite_driver); if (driver->supports_separate_cmdrsp) { for (i = 0; i < NUM_SMD_CMD_CHANNELS; i++) { diag_smd_destructor(&driver->smd_cmd[i]); platform_driver_unregister( &smd_lite_data_cmd_drivers[i]); } } kfree(driver->buf_msg_mask_update); kfree(driver->buf_log_mask_update); kfree(driver->buf_event_mask_update); kfree(driver->usb_buf_out); kfree(driver->hdlc_buf); kfree(driver->client_map); kfree(driver->buf_tbl); kfree(driver->data_ready); kfree(driver->table); kfree(driver->pkt_buf); kfree(driver->dci_pkt_buf); kfree(driver->usb_read_ptr); kfree(driver->apps_rsp_buf); kfree(driver->user_space_data_buf); destroy_workqueue(driver->diag_wq); destroy_workqueue(driver->diag_usb_wq); }
gpl-2.0
akuster/linux-yocto-3.14
drivers/scsi/lpfc/lpfc_mbox.c
609
79106
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/blkdev.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport_fc.h> #include <scsi/scsi.h> #include <scsi/fc/fc_fs.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_compat.h" /** * lpfc_dump_static_vport - Dump HBA's static vport information. * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @offset: offset for dumping vport info. * * The dump mailbox command provides a method for the device driver to obtain * various types of information from the HBA device. * * This routine prepares the mailbox command for dumping list of static * vports to be created. **/ int lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset) { MAILBOX_t *mb; struct lpfc_dmabuf *mp; mb = &pmb->u.mb; /* Setup to dump vport info region */ memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb->mbxCommand = MBX_DUMP_MEMORY; mb->un.varDmp.type = DMP_NV_PARAMS; mb->un.varDmp.entry_index = offset; mb->un.varDmp.region_id = DMP_REGION_VPORT; mb->mbxOwner = OWN_HOST; /* For SLI3 HBAs data is embedded in mailbox */ if (phba->sli_rev != LPFC_SLI_REV4) { mb->un.varDmp.cv = 1; mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t); return 0; } /* For SLI4 HBAs driver need to allocate memory */ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (mp) mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp || !mp->virt) { kfree(mp); lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "2605 lpfc_dump_static_vport: memory" " allocation failed\n"); return 1; } memset(mp->virt, 0, LPFC_BPL_SIZE); INIT_LIST_HEAD(&mp->list); /* save address for completion */ pmb->context1 = (uint8_t *)mp; mb->un.varWords[3] = putPaddrLow(mp->phys); mb->un.varWords[4] = putPaddrHigh(mp->phys); mb->un.varDmp.sli4_length = sizeof(struct static_vport_info); return 0; } /** * lpfc_down_link - Bring down HBAs link. * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * This routine prepares a mailbox command to bring down HBA link. **/ void lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb; memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb = &pmb->u.mb; mb->mbxCommand = MBX_DOWN_LINK; mb->mbxOwner = OWN_HOST; } /** * lpfc_dump_mem - Prepare a mailbox command for reading a region. * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @offset: offset into the region. * @region_id: config region id. * * The dump mailbox command provides a method for the device driver to obtain * various types of information from the HBA device. * * This routine prepares the mailbox command for dumping HBA's config region. **/ void lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset, uint16_t region_id) { MAILBOX_t *mb; void *ctx; mb = &pmb->u.mb; ctx = pmb->context2; /* Setup to dump VPD region */ memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_DUMP_MEMORY; mb->un.varDmp.cv = 1; mb->un.varDmp.type = DMP_NV_PARAMS; mb->un.varDmp.entry_index = offset; mb->un.varDmp.region_id = region_id; mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t)); mb->un.varDmp.co = 0; mb->un.varDmp.resp_offset = 0; pmb->context2 = ctx; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_dump_wakeup_param - Prepare mailbox command for retrieving wakeup params * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * This function create a dump memory mailbox command to dump wake up * parameters. */ void lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb; void *ctx; mb = &pmb->u.mb; /* Save context so that we can restore after memset */ ctx = pmb->context2; /* Setup to dump VPD region */ memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb->mbxCommand = MBX_DUMP_MEMORY; mb->mbxOwner = OWN_HOST; mb->un.varDmp.cv = 1; mb->un.varDmp.type = DMP_NV_PARAMS; if (phba->sli_rev < LPFC_SLI_REV4) mb->un.varDmp.entry_index = 0; mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID; mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE; mb->un.varDmp.co = 0; mb->un.varDmp.resp_offset = 0; pmb->context2 = ctx; return; } /** * lpfc_read_nv - Prepare a mailbox command for reading HBA's NVRAM param * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The read NVRAM mailbox command returns the HBA's non-volatile parameters * that are used as defaults when the Fibre Channel link is brought on-line. * * This routine prepares the mailbox command for reading information stored * in the HBA's NVRAM. Specifically, the HBA's WWNN and WWPN. **/ void lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_NV; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_config_async - Prepare a mailbox command for enabling HBA async event * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @ring: ring number for the asynchronous event to be configured. * * The asynchronous event enable mailbox command is used to enable the * asynchronous event posting via the ASYNC_STATUS_CN IOCB response and * specifies the default ring to which events are posted. * * This routine prepares the mailbox command for enabling HBA asynchronous * event support on a IOCB ring. **/ void lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t ring) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_ASYNCEVT_ENABLE; mb->un.varCfgAsyncEvent.ring = ring; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_heart_beat - Prepare a mailbox command for heart beat * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The heart beat mailbox command is used to detect an unresponsive HBA, which * is defined as any device where no error attention is sent and both mailbox * and rings are not processed. * * This routine prepares the mailbox command for issuing a heart beat in the * form of mailbox command to the HBA. The timely completion of the heart * beat mailbox command indicates the health of the HBA. **/ void lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_HEARTBEAT; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_read_topology - Prepare a mailbox command for reading HBA topology * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @mp: DMA buffer memory for reading the link attention information into. * * The read topology mailbox command is issued to read the link topology * information indicated by the HBA port when the Link Event bit of the Host * Attention (HSTATT) register is set to 1 (For SLI-3) or when an FC Link * Attention ACQE is received from the port (For SLI-4). A Link Event * Attention occurs based on an exception detected at the Fibre Channel link * interface. * * This routine prepares the mailbox command for reading HBA link topology * information. A DMA memory has been set aside and address passed to the * HBA through @mp for the HBA to DMA link attention information into the * memory as part of the execution of the mailbox command. * * Return codes * 0 - Success (currently always return 0) **/ int lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, struct lpfc_dmabuf *mp) { MAILBOX_t *mb; struct lpfc_sli *psli; psli = &phba->sli; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); INIT_LIST_HEAD(&mp->list); mb->mbxCommand = MBX_READ_TOPOLOGY; mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE; mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys); mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys); /* Save address for later completion and set the owner to host so that * the FW knows this mailbox is available for processing. */ pmb->context1 = (uint8_t *)mp; mb->mbxOwner = OWN_HOST; return (0); } /** * lpfc_clear_la - Prepare a mailbox command for clearing HBA link attention * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The clear link attention mailbox command is issued to clear the link event * attention condition indicated by the Link Event bit of the Host Attention * (HSTATT) register. The link event attention condition is cleared only if * the event tag specified matches that of the current link event counter. * The current event tag is read using the read link attention event mailbox * command. * * This routine prepares the mailbox command for clearing HBA link attention * information. **/ void lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varClearLA.eventTag = phba->fc_eventTag; mb->mbxCommand = MBX_CLEAR_LA; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_config_link - Prepare a mailbox command for configuring link on a HBA * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The configure link mailbox command is used before the initialize link * mailbox command to override default value and to configure link-oriented * parameters such as DID address and various timers. Typically, this * command would be used after an F_Port login to set the returned DID address * and the fabric timeout values. This command is not valid before a configure * port command has configured the HBA port. * * This routine prepares the mailbox command for configuring link on a HBA. **/ void lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { struct lpfc_vport *vport = phba->pport; MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); /* NEW_FEATURE * SLI-2, Coalescing Response Feature. */ if (phba->cfg_cr_delay && (phba->sli_rev < LPFC_SLI_REV4)) { mb->un.varCfgLnk.cr = 1; mb->un.varCfgLnk.ci = 1; mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay; mb->un.varCfgLnk.cr_count = phba->cfg_cr_count; } mb->un.varCfgLnk.myId = vport->fc_myDID; mb->un.varCfgLnk.edtov = phba->fc_edtov; mb->un.varCfgLnk.arbtov = phba->fc_arbtov; mb->un.varCfgLnk.ratov = phba->fc_ratov; mb->un.varCfgLnk.rttov = phba->fc_rttov; mb->un.varCfgLnk.altov = phba->fc_altov; mb->un.varCfgLnk.crtov = phba->fc_crtov; mb->un.varCfgLnk.citov = phba->fc_citov; if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4)) mb->un.varCfgLnk.ack0_enable = 1; mb->mbxCommand = MBX_CONFIG_LINK; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_config_msi - Prepare a mailbox command for configuring msi-x * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The configure MSI-X mailbox command is used to configure the HBA's SLI-3 * MSI-X multi-message interrupt vector association to interrupt attention * conditions. * * Return codes * 0 - Success * -EINVAL - Failure **/ int lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; uint32_t attentionConditions[2]; /* Sanity check */ if (phba->cfg_use_msi != 2) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0475 Not configured for supporting MSI-X " "cfg_use_msi: 0x%x\n", phba->cfg_use_msi); return -EINVAL; } if (phba->sli_rev < 3) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0476 HBA not supporting SLI-3 or later " "SLI Revision: 0x%x\n", phba->sli_rev); return -EINVAL; } /* Clear mailbox command fields */ memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); /* * SLI-3, Message Signaled Interrupt Fearure. */ /* Multi-message attention configuration */ attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT | HA_LATT | HA_MBATT); attentionConditions[1] = 0; mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0]; mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1]; /* * Set up message number to HA bit association */ #ifdef __BIG_ENDIAN_BITFIELD /* RA0 (FCP Ring) */ mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1; /* RA1 (Other Protocol Extra Ring) */ mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1; #else /* __LITTLE_ENDIAN_BITFIELD */ /* RA0 (FCP Ring) */ mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1; /* RA1 (Other Protocol Extra Ring) */ mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1; #endif /* Multi-message interrupt autoclear configuration*/ mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0]; mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1]; /* For now, HBA autoclear does not work reliably, disable it */ mb->un.varCfgMSI.autoClearHA[0] = 0; mb->un.varCfgMSI.autoClearHA[1] = 0; /* Set command and owner bit */ mb->mbxCommand = MBX_CONFIG_MSI; mb->mbxOwner = OWN_HOST; return 0; } /** * lpfc_init_link - Prepare a mailbox command for initialize link on a HBA * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @topology: the link topology for the link to be initialized to. * @linkspeed: the link speed for the link to be initialized to. * * The initialize link mailbox command is used to initialize the Fibre * Channel link. This command must follow a configure port command that * establishes the mode of operation. * * This routine prepares the mailbox command for initializing link on a HBA * with the specified link topology and speed. **/ void lpfc_init_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed) { lpfc_vpd_t *vpd; struct lpfc_sli *psli; MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); psli = &phba->sli; switch (topology) { case FLAGS_TOPOLOGY_MODE_LOOP_PT: mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; break; case FLAGS_TOPOLOGY_MODE_PT_PT: mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; break; case FLAGS_TOPOLOGY_MODE_LOOP: mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; break; case FLAGS_TOPOLOGY_MODE_PT_LOOP: mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; break; case FLAGS_LOCAL_LB: mb->un.varInitLnk.link_flags = FLAGS_LOCAL_LB; break; } /* Enable asynchronous ABTS responses from firmware */ mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT; /* NEW_FEATURE * Setting up the link speed */ vpd = &phba->vpd; if (vpd->rev.feaLevelHigh >= 0x02){ switch(linkspeed){ case LPFC_USER_LINK_SPEED_1G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_1G; break; case LPFC_USER_LINK_SPEED_2G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_2G; break; case LPFC_USER_LINK_SPEED_4G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_4G; break; case LPFC_USER_LINK_SPEED_8G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_8G; break; case LPFC_USER_LINK_SPEED_10G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_10G; break; case LPFC_USER_LINK_SPEED_16G: mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; mb->un.varInitLnk.link_speed = LINK_SPEED_16G; break; case LPFC_USER_LINK_SPEED_AUTO: default: mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO; break; } } else mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO; mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK; mb->mbxOwner = OWN_HOST; mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA; return; } /** * lpfc_read_sparam - Prepare a mailbox command for reading HBA parameters * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * @vpi: virtual N_Port identifier. * * The read service parameter mailbox command is used to read the HBA port * service parameters. The service parameters are read into the buffer * specified directly by a BDE in the mailbox command. These service * parameters may then be used to build the payload of an N_Port/F_POrt * login request and reply (LOGI/ACC). * * This routine prepares the mailbox command for reading HBA port service * parameters. The DMA memory is allocated in this function and the addresses * are populated into the mailbox command for the HBA to DMA the service * parameters into. * * Return codes * 0 - Success * 1 - DMA memory allocation failed **/ int lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) { struct lpfc_dmabuf *mp; MAILBOX_t *mb; struct lpfc_sli *psli; psli = &phba->sli; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxOwner = OWN_HOST; /* Get a buffer to hold the HBAs Service Parameters */ mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); if (mp) mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp || !mp->virt) { kfree(mp); mb->mbxCommand = MBX_READ_SPARM64; /* READ_SPARAM: no buffers */ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "0301 READ_SPARAM: no buffers\n"); return (1); } INIT_LIST_HEAD(&mp->list); mb->mbxCommand = MBX_READ_SPARM64; mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); if (phba->sli_rev >= LPFC_SLI_REV3) mb->un.varRdSparm.vpi = phba->vpi_ids[vpi]; /* save address for completion */ pmb->context1 = mp; return (0); } /** * lpfc_unreg_did - Prepare a mailbox command for unregistering DID * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. * @did: remote port identifier. * @pmb: pointer to the driver internal queue element for mailbox command. * * The unregister DID mailbox command is used to unregister an N_Port/F_Port * login for an unknown RPI by specifying the DID of a remote port. This * command frees an RPI context in the HBA port. This has the effect of * performing an implicit N_Port/F_Port logout. * * This routine prepares the mailbox command for unregistering a remote * N_Port/F_Port (DID) login. **/ void lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varUnregDID.did = did; mb->un.varUnregDID.vpi = vpi; if ((vpi != 0xffff) && (phba->sli_rev == LPFC_SLI_REV4)) mb->un.varUnregDID.vpi = phba->vpi_ids[vpi]; mb->mbxCommand = MBX_UNREG_D_ID; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_read_config - Prepare a mailbox command for reading HBA configuration * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The read configuration mailbox command is used to read the HBA port * configuration parameters. This mailbox command provides a method for * seeing any parameters that may have changed via various configuration * mailbox commands. * * This routine prepares the mailbox command for reading out HBA configuration * parameters. **/ void lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_CONFIG; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_read_lnk_stat - Prepare a mailbox command for reading HBA link stats * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The read link status mailbox command is used to read the link status from * the HBA. Link status includes all link-related error counters. These * counters are maintained by the HBA and originated in the link hardware * unit. Note that all of these counters wrap. * * This routine prepares the mailbox command for reading out HBA link status. **/ void lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_LNK_STAT; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_reg_rpi - Prepare a mailbox command for registering remote login * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. * @did: remote port identifier. * @param: pointer to memory holding the server parameters. * @pmb: pointer to the driver internal queue element for mailbox command. * @rpi: the rpi to use in the registration (usually only used for SLI4. * * The registration login mailbox command is used to register an N_Port or * F_Port login. This registration allows the HBA to cache the remote N_Port * service parameters internally and thereby make the appropriate FC-2 * decisions. The remote port service parameters are handed off by the driver * to the HBA using a descriptor entry that directly identifies a buffer in * host memory. In exchange, the HBA returns an RPI identifier. * * This routine prepares the mailbox command for registering remote port login. * The function allocates DMA buffer for passing the service parameters to the * HBA with the mailbox command. * * Return codes * 0 - Success * 1 - DMA memory allocation failed **/ int lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi) { MAILBOX_t *mb = &pmb->u.mb; uint8_t *sparam; struct lpfc_dmabuf *mp; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varRegLogin.rpi = 0; if (phba->sli_rev == LPFC_SLI_REV4) mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi]; if (phba->sli_rev >= LPFC_SLI_REV3) mb->un.varRegLogin.vpi = phba->vpi_ids[vpi]; mb->un.varRegLogin.did = did; mb->mbxOwner = OWN_HOST; /* Get a buffer to hold NPorts Service Parameters */ mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); if (mp) mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp || !mp->virt) { kfree(mp); mb->mbxCommand = MBX_REG_LOGIN64; /* REG_LOGIN: no buffers */ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, " "rpi x%x\n", vpi, did, rpi); return 1; } INIT_LIST_HEAD(&mp->list); sparam = mp->virt; /* Copy param's into a new buffer */ memcpy(sparam, param, sizeof (struct serv_parm)); /* save address for completion */ pmb->context1 = (uint8_t *) mp; mb->mbxCommand = MBX_REG_LOGIN64; mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys); mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys); return 0; } /** * lpfc_unreg_login - Prepare a mailbox command for unregistering remote login * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. * @rpi: remote port identifier * @pmb: pointer to the driver internal queue element for mailbox command. * * The unregistration login mailbox command is used to unregister an N_Port * or F_Port login. This command frees an RPI context in the HBA. It has the * effect of performing an implicit N_Port/F_Port logout. * * This routine prepares the mailbox command for unregistering remote port * login. * * For SLI4 ports, the rpi passed to this function must be the physical * rpi value, not the logical index. **/ void lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varUnregLogin.rpi = rpi; mb->un.varUnregLogin.rsvd1 = 0; if (phba->sli_rev >= LPFC_SLI_REV3) mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi]; mb->mbxCommand = MBX_UNREG_LOGIN; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA. * @vport: pointer to a vport object. * * This routine sends mailbox command to unregister all active RPIs for * a vport. **/ void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mbox; int rc; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { /* * For SLI4 functions, the rpi field is overloaded for * the vport context unreg all. This routine passes * 0 for the rpi field in lpfc_unreg_login for compatibility * with SLI3 and then overrides the rpi field with the * expected value for SLI4. */ lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi], mbox); mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000; mbox->vport = vport; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->context1 = NULL; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) mempool_free(mbox, phba->mbox_mem_pool); } } /** * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. * @sid: Fibre Channel S_ID (N_Port_ID assigned to a virtual N_Port). * @pmb: pointer to the driver internal queue element for mailbox command. * * The registration vport identifier mailbox command is used to activate a * virtual N_Port after it has acquired an N_Port_ID. The HBA validates the * N_Port_ID against the information in the selected virtual N_Port context * block and marks it active to allow normal processing of IOCB commands and * received unsolicited exchanges. * * This routine prepares the mailbox command for registering a virtual N_Port. **/ void lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_hba *phba = vport->phba; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); /* * Set the re-reg VPI bit for f/w to update the MAC address. */ if ((phba->sli_rev == LPFC_SLI_REV4) && !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) mb->un.varRegVpi.upd = 1; mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi]; mb->un.varRegVpi.sid = vport->fc_myDID; if (phba->sli_rev == LPFC_SLI_REV4) mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi]; else mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname, sizeof(struct lpfc_name)); mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]); mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]); mb->mbxCommand = MBX_REG_VPI; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_unreg_vpi - Prepare a mailbox command for unregistering vport id * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. * @pmb: pointer to the driver internal queue element for mailbox command. * * The unregistration vport identifier mailbox command is used to inactivate * a virtual N_Port. The driver must have logged out and unregistered all * remote N_Ports to abort any activity on the virtual N_Port. The HBA will * unregisters any default RPIs associated with the specified vpi, aborting * any active exchanges. The HBA will post the mailbox response after making * the virtual N_Port inactive. * * This routine prepares the mailbox command for unregistering a virtual * N_Port. **/ void lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); if (phba->sli_rev == LPFC_SLI_REV3) mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi]; else if (phba->sli_rev >= LPFC_SLI_REV4) mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi]; mb->mbxCommand = MBX_UNREG_VPI; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_config_pcb_setup - Set up IOCB rings in the Port Control Block (PCB) * @phba: pointer to lpfc hba data structure. * * This routine sets up and initializes the IOCB rings in the Port Control * Block (PCB). **/ static void lpfc_config_pcb_setup(struct lpfc_hba * phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; PCB_t *pcbp = phba->pcb; dma_addr_t pdma_addr; uint32_t offset; uint32_t iocbCnt = 0; int i; pcbp->maxRing = (psli->num_rings - 1); for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; pring->sli.sli3.sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE : SLI2_IOCB_CMD_SIZE; pring->sli.sli3.sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE : SLI2_IOCB_RSP_SIZE; /* A ring MUST have both cmd and rsp entries defined to be valid */ if ((pring->sli.sli3.numCiocb == 0) || (pring->sli.sli3.numRiocb == 0)) { pcbp->rdsc[i].cmdEntries = 0; pcbp->rdsc[i].rspEntries = 0; pcbp->rdsc[i].cmdAddrHigh = 0; pcbp->rdsc[i].rspAddrHigh = 0; pcbp->rdsc[i].cmdAddrLow = 0; pcbp->rdsc[i].rspAddrLow = 0; pring->sli.sli3.cmdringaddr = NULL; pring->sli.sli3.rspringaddr = NULL; continue; } /* Command ring setup for ring */ pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt]; pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb; offset = (uint8_t *) &phba->IOCBs[iocbCnt] - (uint8_t *) phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr); pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr); iocbCnt += pring->sli.sli3.numCiocb; /* Response ring setup for ring */ pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt]; pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb; offset = (uint8_t *)&phba->IOCBs[iocbCnt] - (uint8_t *)phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr); pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr); iocbCnt += pring->sli.sli3.numRiocb; } } /** * lpfc_read_rev - Prepare a mailbox command for reading HBA revision * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The read revision mailbox command is used to read the revision levels of * the HBA components. These components include hardware units, resident * firmware, and available firmware. HBAs that supports SLI-3 mode of * operation provide different response information depending on the version * requested by the driver. * * This routine prepares the mailbox command for reading HBA revision * information. **/ void lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varRdRev.cv = 1; mb->un.varRdRev.v3req = 1; /* Request SLI3 info */ mb->mbxCommand = MBX_READ_REV; mb->mbxOwner = OWN_HOST; return; } void lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t *mb = &pmb->u.mb; struct lpfc_mqe *mqe; switch (mb->mbxCommand) { case MBX_READ_REV: mqe = &pmb->u.mqe; lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name, mqe->un.read_rev.fw_name, 16); lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name, mqe->un.read_rev.ulp_fw_name, 16); break; default: break; } return; } /** * lpfc_build_hbq_profile2 - Set up the HBQ Selection Profile 2 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. * @hbq_desc: pointer to the HBQ selection profile descriptor. * * The Host Buffer Queue (HBQ) Selection Profile 2 specifies that the HBA * tests the incoming frames' R_CTL/TYPE fields with works 10:15 and performs * the Sequence Length Test using the fields in the Selection Profile 2 * extension in words 20:31. **/ static void lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb, struct lpfc_hbq_init *hbq_desc) { hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt; hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen; hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff; } /** * lpfc_build_hbq_profile3 - Set up the HBQ Selection Profile 3 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. * @hbq_desc: pointer to the HBQ selection profile descriptor. * * The Host Buffer Queue (HBQ) Selection Profile 3 specifies that the HBA * tests the incoming frame's R_CTL/TYPE fields with words 10:15 and performs * the Sequence Length Test and Byte Field Test using the fields in the * Selection Profile 3 extension in words 20:31. **/ static void lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb, struct lpfc_hbq_init *hbq_desc) { hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt; hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen; hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff; hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff; memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch, sizeof(hbqmb->profiles.profile3.cmdmatch)); } /** * lpfc_build_hbq_profile5 - Set up the HBQ Selection Profile 5 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. * @hbq_desc: pointer to the HBQ selection profile descriptor. * * The Host Buffer Queue (HBQ) Selection Profile 5 specifies a header HBQ. The * HBA tests the initial frame of an incoming sequence using the frame's * R_CTL/TYPE fields with words 10:15 and performs the Sequence Length Test * and Byte Field Test using the fields in the Selection Profile 5 extension * words 20:31. **/ static void lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb, struct lpfc_hbq_init *hbq_desc) { hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt; hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen; hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff; hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff; memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch, sizeof(hbqmb->profiles.profile5.cmdmatch)); } /** * lpfc_config_hbq - Prepare a mailbox command for configuring an HBQ * @phba: pointer to lpfc hba data structure. * @id: HBQ identifier. * @hbq_desc: pointer to the HBA descriptor data structure. * @hbq_entry_index: index of the HBQ entry data structures. * @pmb: pointer to the driver internal queue element for mailbox command. * * The configure HBQ (Host Buffer Queue) mailbox command is used to configure * an HBQ. The configuration binds events that require buffers to a particular * ring and HBQ based on a selection profile. * * This routine prepares the mailbox command for configuring an HBQ. **/ void lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id, struct lpfc_hbq_init *hbq_desc, uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb) { int i; MAILBOX_t *mb = &pmb->u.mb; struct config_hbq_var *hbqmb = &mb->un.varCfgHbq; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); hbqmb->hbqId = id; hbqmb->entry_count = hbq_desc->entry_count; /* # entries in HBQ */ hbqmb->recvNotify = hbq_desc->rn; /* Receive * Notification */ hbqmb->numMask = hbq_desc->mask_count; /* # R_CTL/TYPE masks * # in words 0-19 */ hbqmb->profile = hbq_desc->profile; /* Selection profile: * 0 = all, * 7 = logentry */ hbqmb->ringMask = hbq_desc->ring_mask; /* Binds HBQ to a ring * e.g. Ring0=b0001, * ring2=b0100 */ hbqmb->headerLen = hbq_desc->headerLen; /* 0 if not profile 4 * or 5 */ hbqmb->logEntry = hbq_desc->logEntry; /* Set to 1 if this * HBQ will be used * for LogEntry * buffers */ hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) + hbq_entry_index * sizeof(struct lpfc_hbq_entry); hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys); mb->mbxCommand = MBX_CONFIG_HBQ; mb->mbxOwner = OWN_HOST; /* Copy info for profiles 2,3,5. Other * profiles this area is reserved */ if (hbq_desc->profile == 2) lpfc_build_hbq_profile2(hbqmb, hbq_desc); else if (hbq_desc->profile == 3) lpfc_build_hbq_profile3(hbqmb, hbq_desc); else if (hbq_desc->profile == 5) lpfc_build_hbq_profile5(hbqmb, hbq_desc); /* Return if no rctl / type masks for this HBQ */ if (!hbq_desc->mask_count) return; /* Otherwise we setup specific rctl / type masks for this HBQ */ for (i = 0; i < hbq_desc->mask_count; i++) { hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch; hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask; hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch; hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask; } return; } /** * lpfc_config_ring - Prepare a mailbox command for configuring an IOCB ring * @phba: pointer to lpfc hba data structure. * @ring: * @pmb: pointer to the driver internal queue element for mailbox command. * * The configure ring mailbox command is used to configure an IOCB ring. This * configuration binds from one to six of HBA RC_CTL/TYPE mask entries to the * ring. This is used to map incoming sequences to a particular ring whose * RC_CTL/TYPE mask entry matches that of the sequence. The driver should not * attempt to configure a ring whose number is greater than the number * specified in the Port Control Block (PCB). It is an error to issue the * configure ring command more than once with the same ring number. The HBA * returns an error if the driver attempts this. * * This routine prepares the mailbox command for configuring IOCB ring. **/ void lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) { int i; MAILBOX_t *mb = &pmb->u.mb; struct lpfc_sli *psli; struct lpfc_sli_ring *pring; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varCfgRing.ring = ring; mb->un.varCfgRing.maxOrigXchg = 0; mb->un.varCfgRing.maxRespXchg = 0; mb->un.varCfgRing.recvNotify = 1; psli = &phba->sli; pring = &psli->ring[ring]; mb->un.varCfgRing.numMask = pring->num_mask; mb->mbxCommand = MBX_CONFIG_RING; mb->mbxOwner = OWN_HOST; /* Is this ring configured for a specific profile */ if (pring->prt[0].profile) { mb->un.varCfgRing.profile = pring->prt[0].profile; return; } /* Otherwise we setup specific rctl / type masks for this ring */ for (i = 0; i < pring->num_mask; i++) { mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl; if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ) mb->un.varCfgRing.rrRegs[i].rmask = 0xff; else mb->un.varCfgRing.rrRegs[i].rmask = 0xfe; mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type; mb->un.varCfgRing.rrRegs[i].tmask = 0xff; } return; } /** * lpfc_config_port - Prepare a mailbox command for configuring port * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The configure port mailbox command is used to identify the Port Control * Block (PCB) in the driver memory. After this command is issued, the * driver must not access the mailbox in the HBA without first resetting * the HBA. The HBA may copy the PCB information to internal storage for * subsequent use; the driver can not change the PCB information unless it * resets the HBA. * * This routine prepares the mailbox command for configuring port. **/ void lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr; MAILBOX_t *mb = &pmb->u.mb; dma_addr_t pdma_addr; uint32_t bar_low, bar_high; size_t offset; struct lpfc_hgp hgp; int i; uint32_t pgp_offset; memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb->mbxCommand = MBX_CONFIG_PORT; mb->mbxOwner = OWN_HOST; mb->un.varCfgPort.pcbLen = sizeof(PCB_t); offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr); mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr); /* Always Host Group Pointer is in SLIM */ mb->un.varCfgPort.hps = 1; /* If HBA supports SLI=3 ask for it */ if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) { if (phba->cfg_enable_bg) mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ if (phba->cfg_enable_dss) mb->un.varCfgPort.cdss = 1; /* Configure Security */ mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); if (phba->max_vpi && phba->cfg_enable_npiv && phba->vpd.sli3Feat.cmv) { mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI; mb->un.varCfgPort.cmv = 1; } else mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; } else phba->sli_rev = LPFC_SLI_REV2; mb->un.varCfgPort.sli_mode = phba->sli_rev; /* If this is an SLI3 port, configure async status notification. */ if (phba->sli_rev == LPFC_SLI_REV3) mb->un.varCfgPort.casabt = 1; /* Now setup pcb */ phba->pcb->type = TYPE_NATIVE_SLI2; phba->pcb->feature = FEATURE_INITIAL_SLI2; /* Setup Mailbox pointers */ phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE; offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr); phba->pcb->mbAddrLow = putPaddrLow(pdma_addr); /* * Setup Host Group ring pointer. * * For efficiency reasons, the ring get/put pointers can be * placed in adapter memory (SLIM) rather than in host memory. * This allows firmware to avoid PCI reads/writes when updating * and checking pointers. * * The firmware recognizes the use of SLIM memory by comparing * the address of the get/put pointers structure with that of * the SLIM BAR (BAR0). * * Caution: be sure to use the PCI config space value of BAR0/BAR1 * (the hardware's view of the base address), not the OS's * value of pci_resource_start() as the OS value may be a cookie * for ioremap/iomap. */ pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low); pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high); /* * Set up HGP - Port Memory * * The port expects the host get/put pointers to reside in memory * following the "non-diagnostic" mode mailbox (32 words, 0x80 bytes) * area of SLIM. In SLI-2 mode, there's an additional 16 reserved * words (0x40 bytes). This area is not reserved if HBQs are * configured in SLI-3. * * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80 * RR0Get 0xc4 0x84 * CR1Put 0xc8 0x88 * RR1Get 0xcc 0x8c * CR2Put 0xd0 0x90 * RR2Get 0xd4 0x94 * CR3Put 0xd8 0x98 * RR3Get 0xdc 0x9c * * Reserved 0xa0-0xbf * If HBQs configured: * HBQ 0 Put ptr 0xc0 * HBQ 1 Put ptr 0xc4 * HBQ 2 Put ptr 0xc8 * ...... * HBQ(M-1)Put Pointer 0xc0+(M-1)*4 * */ if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) { phba->host_gp = &phba->mbox->us.s2.host[0]; phba->hbq_put = NULL; offset = (uint8_t *)&phba->mbox->us.s2.host - (uint8_t *)phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr); phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr); } else { /* Always Host Group Pointer is in SLIM */ mb->un.varCfgPort.hps = 1; if (phba->sli_rev == 3) { phba->host_gp = &mb_slim->us.s3.host[0]; phba->hbq_put = &mb_slim->us.s3.hbq_put[0]; } else { phba->host_gp = &mb_slim->us.s2.host[0]; phba->hbq_put = NULL; } /* mask off BAR0's flag bits 0 - 3 */ phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) + (void __iomem *)phba->host_gp - (void __iomem *)phba->MBslimaddr; if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64) phba->pcb->hgpAddrHigh = bar_high; else phba->pcb->hgpAddrHigh = 0; /* write HGP data to SLIM at the required longword offset */ memset(&hgp, 0, sizeof(struct lpfc_hgp)); for (i = 0; i < phba->sli.num_rings; i++) { lpfc_memcpy_to_slim(phba->host_gp + i, &hgp, sizeof(*phba->host_gp)); } } /* Setup Port Group offset */ if (phba->sli_rev == 3) pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s3_pgp.port); else pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port); pdma_addr = phba->slim2p.phys + pgp_offset; phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr); phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr); /* Use callback routine to setp rings in the pcb */ lpfc_config_pcb_setup(phba); /* special handling for LC HBAs */ if (lpfc_is_LC_HBA(phba->pcidev->device)) { uint32_t hbainit[5]; lpfc_hba_init(phba, hbainit); memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20); } /* Swap PCB if needed */ lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t)); } /** * lpfc_kill_board - Prepare a mailbox command for killing board * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. * * The kill board mailbox command is used to tell firmware to perform a * graceful shutdown of a channel on a specified board to prepare for reset. * When the kill board mailbox command is received, the ER3 bit is set to 1 * in the Host Status register and the ER Attention bit is set to 1 in the * Host Attention register of the HBA function that received the kill board * command. * * This routine prepares the mailbox command for killing the board in * preparation for a graceful shutdown. **/ void lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb->mbxCommand = MBX_KILL_BOARD; mb->mbxOwner = OWN_HOST; return; } /** * lpfc_mbox_put - Put a mailbox cmd into the tail of driver's mailbox queue * @phba: pointer to lpfc hba data structure. * @mbq: pointer to the driver internal queue element for mailbox command. * * Driver maintains a internal mailbox command queue implemented as a linked * list. When a mailbox command is issued, it shall be put into the mailbox * command queue such that they shall be processed orderly as HBA can process * one mailbox command at a time. **/ void lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) { struct lpfc_sli *psli; psli = &phba->sli; list_add_tail(&mbq->list, &psli->mboxq); psli->mboxq_cnt++; return; } /** * lpfc_mbox_get - Remove a mailbox cmd from the head of driver's mailbox queue * @phba: pointer to lpfc hba data structure. * * Driver maintains a internal mailbox command queue implemented as a linked * list. When a mailbox command is issued, it shall be put into the mailbox * command queue such that they shall be processed orderly as HBA can process * one mailbox command at a time. After HBA finished processing a mailbox * command, the driver will remove a pending mailbox command from the head of * the mailbox command queue and send to the HBA for processing. * * Return codes * pointer to the driver internal queue element for mailbox command. **/ LPFC_MBOXQ_t * lpfc_mbox_get(struct lpfc_hba * phba) { LPFC_MBOXQ_t *mbq = NULL; struct lpfc_sli *psli = &phba->sli; list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list); if (mbq) psli->mboxq_cnt--; return mbq; } /** * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list * @phba: pointer to lpfc hba data structure. * @mbq: pointer to the driver internal queue element for mailbox command. * * This routine put the completed mailbox command into the mailbox command * complete list. This is the unlocked version of the routine. The mailbox * complete list is used by the driver worker thread to process mailbox * complete callback functions outside the driver interrupt handler. **/ void __lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq) { list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); } /** * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list * @phba: pointer to lpfc hba data structure. * @mbq: pointer to the driver internal queue element for mailbox command. * * This routine put the completed mailbox command into the mailbox command * complete list. This is the locked version of the routine. The mailbox * complete list is used by the driver worker thread to process mailbox * complete callback functions outside the driver interrupt handler. **/ void lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq) { unsigned long iflag; /* This function expects to be called from interrupt context */ spin_lock_irqsave(&phba->hbalock, iflag); __lpfc_mbox_cmpl_put(phba, mbq); spin_unlock_irqrestore(&phba->hbalock, iflag); return; } /** * lpfc_mbox_cmd_check - Check the validality of a mailbox command * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to the driver internal queue element for mailbox command. * * This routine is to check whether a mailbox command is valid to be issued. * This check will be performed by both the mailbox issue API when a client * is to issue a mailbox command to the mailbox transport. * * Return 0 - pass the check, -ENODEV - fail the check **/ int lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { /* Mailbox command that have a completion handler must also have a * vport specified. */ if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) { if (!mboxq->vport) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT, "1814 Mbox x%x failed, no vport\n", mboxq->u.mb.mbxCommand); dump_stack(); return -ENODEV; } } return 0; } /** * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command * @phba: pointer to lpfc hba data structure. * * This routine is to check whether the HBA device is ready for posting a * mailbox command. It is used by the mailbox transport API at the time the * to post a mailbox command to the device. * * Return 0 - pass the check, -ENODEV - fail the check **/ int lpfc_mbox_dev_check(struct lpfc_hba *phba) { /* If the PCI channel is in offline state, do not issue mbox */ if (unlikely(pci_channel_offline(phba->pcidev))) return -ENODEV; /* If the HBA is in error state, do not issue mbox */ if (phba->link_state == LPFC_HBA_ERROR) return -ENODEV; return 0; } /** * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value * @phba: pointer to lpfc hba data structure. * @cmd: mailbox command code. * * This routine retrieves the proper timeout value according to the mailbox * command code. * * Return codes * Timeout value to be used for the given mailbox command **/ int lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { MAILBOX_t *mbox = &mboxq->u.mb; uint8_t subsys, opcode; switch (mbox->mbxCommand) { case MBX_WRITE_NV: /* 0x03 */ case MBX_DUMP_MEMORY: /* 0x17 */ case MBX_UPDATE_CFG: /* 0x1B */ case MBX_DOWN_LOAD: /* 0x1C */ case MBX_DEL_LD_ENTRY: /* 0x1D */ case MBX_WRITE_VPARMS: /* 0x32 */ case MBX_LOAD_AREA: /* 0x81 */ case MBX_WRITE_WWN: /* 0x98 */ case MBX_LOAD_EXP_ROM: /* 0x9C */ case MBX_ACCESS_VDATA: /* 0xA5 */ return LPFC_MBOX_TMO_FLASH_CMD; case MBX_SLI4_CONFIG: /* 0x9b */ subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq); opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq); if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) { switch (opcode) { case LPFC_MBOX_OPCODE_READ_OBJECT: case LPFC_MBOX_OPCODE_WRITE_OBJECT: case LPFC_MBOX_OPCODE_READ_OBJECT_LIST: case LPFC_MBOX_OPCODE_DELETE_OBJECT: case LPFC_MBOX_OPCODE_GET_PROFILE_LIST: case LPFC_MBOX_OPCODE_SET_ACT_PROFILE: case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG: case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG: case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG: case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES: case LPFC_MBOX_OPCODE_SEND_ACTIVATION: case LPFC_MBOX_OPCODE_RESET_LICENSES: case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG: case LPFC_MBOX_OPCODE_GET_VPD_DATA: case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG: return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO; } } if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) { switch (opcode) { case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS: return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO; } } return LPFC_MBOX_SLI4_CONFIG_TMO; } return LPFC_MBOX_TMO; } /** * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command * @mbox: pointer to lpfc mbox command. * @sgentry: sge entry index. * @phyaddr: physical address for the sge * @length: Length of the sge. * * This routine sets up an entry in the non-embedded mailbox command at the sge * index location. **/ void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry, dma_addr_t phyaddr, uint32_t length) { struct lpfc_mbx_nembed_cmd *nembed_sge; nembed_sge = (struct lpfc_mbx_nembed_cmd *) &mbox->u.mqe.un.nembed_cmd; nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr); nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr); nembed_sge->sge[sgentry].length = length; } /** * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command * @mbox: pointer to lpfc mbox command. * @sgentry: sge entry index. * * This routine gets an entry from the non-embedded mailbox command at the sge * index location. **/ void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry, struct lpfc_mbx_sge *sge) { struct lpfc_mbx_nembed_cmd *nembed_sge; nembed_sge = (struct lpfc_mbx_nembed_cmd *) &mbox->u.mqe.un.nembed_cmd; sge->pa_lo = nembed_sge->sge[sgentry].pa_lo; sge->pa_hi = nembed_sge->sge[sgentry].pa_hi; sge->length = nembed_sge->sge[sgentry].length; } /** * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command * @phba: pointer to lpfc hba data structure. * @mbox: pointer to lpfc mbox command. * * This routine frees SLI4 specific mailbox command for sending IOCTL command. **/ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox) { struct lpfc_mbx_sli4_config *sli4_cfg; struct lpfc_mbx_sge sge; dma_addr_t phyaddr; uint32_t sgecount, sgentry; sli4_cfg = &mbox->u.mqe.un.sli4_config; /* For embedded mbox command, just free the mbox command */ if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { mempool_free(mbox, phba->mbox_mem_pool); return; } /* For non-embedded mbox command, we need to free the pages first */ sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr); /* There is nothing we can do if there is no sge address array */ if (unlikely(!mbox->sge_array)) { mempool_free(mbox, phba->mbox_mem_pool); return; } /* Each non-embedded DMA memory was allocated in the length of a page */ for (sgentry = 0; sgentry < sgecount; sgentry++) { lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge); phyaddr = getPaddr(sge.pa_hi, sge.pa_lo); dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, mbox->sge_array->addr[sgentry], phyaddr); } /* Free the sge address array memory */ kfree(mbox->sge_array); /* Finally, free the mailbox command itself */ mempool_free(mbox, phba->mbox_mem_pool); } /** * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command * @phba: pointer to lpfc hba data structure. * @mbox: pointer to lpfc mbox command. * @subsystem: The sli4 config sub mailbox subsystem. * @opcode: The sli4 config sub mailbox command opcode. * @length: Length of the sli4 config mailbox command (including sub-header). * * This routine sets up the header fields of SLI4 specific mailbox command * for sending IOCTL command. * * Return: the actual length of the mbox command allocated (mostly useful * for none embedded mailbox command). **/ int lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb) { struct lpfc_mbx_sli4_config *sli4_config; union lpfc_sli4_cfg_shdr *cfg_shdr = NULL; uint32_t alloc_len; uint32_t resid_len; uint32_t pagen, pcount; void *viraddr; dma_addr_t phyaddr; /* Set up SLI4 mailbox command header fields */ memset(mbox, 0, sizeof(*mbox)); bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG); /* Set up SLI4 ioctl command header fields */ sli4_config = &mbox->u.mqe.un.sli4_config; /* Setup for the embedded mbox command */ if (emb) { /* Set up main header fields */ bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1); sli4_config->header.cfg_mhdr.payload_length = length; /* Set up sub-header fields following main header */ bf_set(lpfc_mbox_hdr_opcode, &sli4_config->header.cfg_shdr.request, opcode); bf_set(lpfc_mbox_hdr_subsystem, &sli4_config->header.cfg_shdr.request, subsystem); sli4_config->header.cfg_shdr.request.request_length = length - LPFC_MBX_CMD_HDR_LENGTH; return length; } /* Setup for the non-embedded mbox command */ pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE; pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; /* Allocate record for keeping SGE virtual addresses */ mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), GFP_KERNEL); if (!mbox->sge_array) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "2527 Failed to allocate non-embedded SGE " "array.\n"); return 0; } for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) { /* The DMA memory is always allocated in the length of a * page even though the last SGE might not fill up to a * page, this is used as a priori size of SLI4_PAGE_SIZE for * the later DMA memory free. */ viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, &phyaddr, GFP_KERNEL); /* In case of malloc fails, proceed with whatever we have */ if (!viraddr) break; memset(viraddr, 0, SLI4_PAGE_SIZE); mbox->sge_array->addr[pagen] = viraddr; /* Keep the first page for later sub-header construction */ if (pagen == 0) cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr; resid_len = length - alloc_len; if (resid_len > SLI4_PAGE_SIZE) { lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, SLI4_PAGE_SIZE); alloc_len += SLI4_PAGE_SIZE; } else { lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, resid_len); alloc_len = length; } } /* Set up main header fields in mailbox command */ sli4_config->header.cfg_mhdr.payload_length = alloc_len; bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen); /* Set up sub-header fields into the first page */ if (pagen > 0) { bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode); bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem); cfg_shdr->request.request_length = alloc_len - sizeof(union lpfc_sli4_cfg_shdr); } /* The sub-header is in DMA memory, which needs endian converstion */ if (cfg_shdr) lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr, sizeof(union lpfc_sli4_cfg_shdr)); return alloc_len; } /** * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent. * @phba: pointer to lpfc hba data structure. * @mbox: pointer to an allocated lpfc mbox resource. * @exts_count: the number of extents, if required, to allocate. * @rsrc_type: the resource extent type. * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED. * * This routine completes the subcommand header for SLI4 resource extent * mailbox commands. It is called after lpfc_sli4_config. The caller must * pass an allocated mailbox and the attributes required to initialize the * mailbox correctly. * * Return: the actual length of the mbox command allocated. **/ int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t exts_count, uint16_t rsrc_type, bool emb) { uint8_t opcode = 0; struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL; void *virtaddr = NULL; /* Set up SLI4 ioctl command header fields */ if (emb == LPFC_SLI4_MBX_NEMBED) { /* Get the first SGE entry from the non-embedded DMA memory */ virtaddr = mbox->sge_array->addr[0]; if (virtaddr == NULL) return 1; n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; } /* * The resource type is common to all extent Opcodes and resides in the * same position. */ if (emb == LPFC_SLI4_MBX_EMBED) bf_set(lpfc_mbx_alloc_rsrc_extents_type, &mbox->u.mqe.un.alloc_rsrc_extents.u.req, rsrc_type); else { /* This is DMA data. Byteswap is required. */ bf_set(lpfc_mbx_alloc_rsrc_extents_type, n_rsrc_extnt, rsrc_type); lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4, &n_rsrc_extnt->word4, sizeof(uint32_t)); } /* Complete the initialization for the particular Opcode. */ opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox); switch (opcode) { case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT: if (emb == LPFC_SLI4_MBX_EMBED) bf_set(lpfc_mbx_alloc_rsrc_extents_cnt, &mbox->u.mqe.un.alloc_rsrc_extents.u.req, exts_count); else bf_set(lpfc_mbx_alloc_rsrc_extents_cnt, n_rsrc_extnt, exts_count); break; case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT: case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO: case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT: /* Initialization is complete.*/ break; default: lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "2929 Resource Extent Opcode x%x is " "unsupported\n", opcode); return 1; } return 0; } /** * lpfc_sli_config_mbox_subsys_get - Get subsystem from a sli_config mbox cmd * @phba: pointer to lpfc hba data structure. * @mbox: pointer to lpfc mbox command queue entry. * * This routine gets the subsystem from a SLI4 specific SLI_CONFIG mailbox * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if the * sub-header is not present, subsystem LPFC_MBOX_SUBSYSTEM_NA (0x0) shall * be returned. **/ uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) { struct lpfc_mbx_sli4_config *sli4_cfg; union lpfc_sli4_cfg_shdr *cfg_shdr; if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG) return LPFC_MBOX_SUBSYSTEM_NA; sli4_cfg = &mbox->u.mqe.un.sli4_config; /* For embedded mbox command, get opcode from embedded sub-header*/ if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request); } /* For non-embedded mbox command, get opcode from first dma page */ if (unlikely(!mbox->sge_array)) return LPFC_MBOX_SUBSYSTEM_NA; cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0]; return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request); } /** * lpfc_sli_config_mbox_opcode_get - Get opcode from a sli_config mbox cmd * @phba: pointer to lpfc hba data structure. * @mbox: pointer to lpfc mbox command queue entry. * * This routine gets the opcode from a SLI4 specific SLI_CONFIG mailbox * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if * the sub-header is not present, opcode LPFC_MBOX_OPCODE_NA (0x0) be * returned. **/ uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) { struct lpfc_mbx_sli4_config *sli4_cfg; union lpfc_sli4_cfg_shdr *cfg_shdr; if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG) return LPFC_MBOX_OPCODE_NA; sli4_cfg = &mbox->u.mqe.un.sli4_config; /* For embedded mbox command, get opcode from embedded sub-header*/ if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); } /* For non-embedded mbox command, get opcode from first dma page */ if (unlikely(!mbox->sge_array)) return LPFC_MBOX_OPCODE_NA; cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0]; return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); } /** * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd * @phba: pointer to lpfc hba data structure. * @fcf_index: index to fcf table. * * This routine routine allocates and constructs non-embedded mailbox command * for reading a FCF table entry referred by @fcf_index. * * Return: pointer to the mailbox command constructed if successful, otherwise * NULL. **/ int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba, struct lpfcMboxq *mboxq, uint16_t fcf_index) { void *virt_addr; dma_addr_t phys_addr; uint8_t *bytep; struct lpfc_mbx_sge sge; uint32_t alloc_len, req_len; struct lpfc_mbx_read_fcf_tbl *read_fcf; if (!mboxq) return -ENOMEM; req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t); /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */ alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len, LPFC_SLI4_MBX_NEMBED); if (alloc_len < req_len) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "0291 Allocated DMA memory size (x%x) is " "less than the requested DMA memory " "size (x%x)\n", alloc_len, req_len); return -ENOMEM; } /* Get the first SGE entry from the non-embedded DMA memory. This * routine only uses a single SGE. */ lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); virt_addr = mboxq->sge_array->addr[0]; read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; /* Set up command fields */ bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index); /* Perform necessary endian conversion */ bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t)); return 0; } /** * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox * @mboxq: pointer to lpfc mbox command. * * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES * mailbox command. **/ void lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) { /* Set up SLI4 mailbox command header fields */ memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS); /* Set up host requested features. */ bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1); /* Enable DIF (block guard) only if configured to do so. */ if (phba->cfg_enable_bg) bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1); /* Enable NPIV only if configured to do so. */ if (phba->max_vpi && phba->cfg_enable_npiv) bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1); return; } /** * lpfc_init_vfi - Initialize the INIT_VFI mailbox command * @mbox: pointer to lpfc mbox command to initialize. * @vport: Vport associated with the VF. * * This routine initializes @mbox to all zeros and then fills in the mailbox * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI * in the context of an FCF. The driver issues this command to setup a VFI * before issuing a FLOGI to login to the VSAN. The driver should also issue a * REG_VFI after a successful VSAN login. **/ void lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport) { struct lpfc_mbx_init_vfi *init_vfi; memset(mbox, 0, sizeof(*mbox)); mbox->vport = vport; init_vfi = &mbox->u.mqe.un.init_vfi; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI); bf_set(lpfc_init_vfi_vr, init_vfi, 1); bf_set(lpfc_init_vfi_vt, init_vfi, 1); bf_set(lpfc_init_vfi_vp, init_vfi, 1); bf_set(lpfc_init_vfi_vfi, init_vfi, vport->phba->sli4_hba.vfi_ids[vport->vfi]); bf_set(lpfc_init_vfi_vpi, init_vfi, vport->phba->vpi_ids[vport->vpi]); bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi); } /** * lpfc_reg_vfi - Initialize the REG_VFI mailbox command * @mbox: pointer to lpfc mbox command to initialize. * @vport: vport associated with the VF. * @phys: BDE DMA bus address used to send the service parameters to the HBA. * * This routine initializes @mbox to all zeros and then fills in the mailbox * fields from @vport, and uses @buf as a DMAable buffer to send the vport's * fc service parameters to the HBA for this VFI. REG_VFI configures virtual * fabrics identified by VFI in the context of an FCF. **/ void lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) { struct lpfc_mbx_reg_vfi *reg_vfi; struct lpfc_hba *phba = vport->phba; memset(mbox, 0, sizeof(*mbox)); reg_vfi = &mbox->u.mqe.un.reg_vfi; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI); bf_set(lpfc_reg_vfi_vp, reg_vfi, 1); bf_set(lpfc_reg_vfi_vfi, reg_vfi, phba->sli4_hba.vfi_ids[vport->vfi]); bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi); bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]); memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name)); reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]); reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); reg_vfi->e_d_tov = phba->fc_edtov; reg_vfi->r_a_tov = phba->fc_ratov; reg_vfi->bde.addrHigh = putPaddrHigh(phys); reg_vfi->bde.addrLow = putPaddrLow(phys); reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID); /* Only FC supports upd bit */ if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) && (vport->fc_flag & FC_VFI_REGISTERED) && (!phba->fc_topology_changed)) { bf_set(lpfc_reg_vfi_vp, reg_vfi, 0); bf_set(lpfc_reg_vfi_upd, reg_vfi, 1); } lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX, "3134 Register VFI, mydid:x%x, fcfi:%d, " " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x" " port_state:x%x topology chg:%d\n", vport->fc_myDID, phba->fcf.fcfi, phba->sli4_hba.vfi_ids[vport->vfi], phba->vpi_ids[vport->vpi], reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag, vport->port_state, phba->fc_topology_changed); } /** * lpfc_init_vpi - Initialize the INIT_VPI mailbox command * @phba: pointer to the hba structure to init the VPI for. * @mbox: pointer to lpfc mbox command to initialize. * @vpi: VPI to be initialized. * * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the * command to activate a virtual N_Port. The HBA assigns a MAC address to use * with the virtual N Port. The SLI Host issues this command before issuing a * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a * successful virtual NPort login. **/ void lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi) { memset(mbox, 0, sizeof(*mbox)); bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI); bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, phba->vpi_ids[vpi]); bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi, phba->sli4_hba.vfi_ids[phba->pport->vfi]); } /** * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command * @mbox: pointer to lpfc mbox command to initialize. * @vport: vport associated with the VF. * * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric * (logical NPort) into the inactive state. The SLI Host must have logged out * and unregistered all remote N_Ports to abort any activity on the virtual * fabric. The SLI Port posts the mailbox response after marking the virtual * fabric inactive. **/ void lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport) { memset(mbox, 0, sizeof(*mbox)); bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vport->phba->sli4_hba.vfi_ids[vport->vfi]); } /** * lpfc_sli4_dump_cfg_rg23 - Dump sli4 port config region 23 * @phba: pointer to the hba structure containing. * @mbox: pointer to lpfc mbox command to initialize. * * This function create a SLI4 dump mailbox command to dump configure * region 23. **/ int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox) { struct lpfc_dmabuf *mp = NULL; MAILBOX_t *mb; memset(mbox, 0, sizeof(*mbox)); mb = &mbox->u.mb; mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (mp) mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp || !mp->virt) { kfree(mp); /* dump config region 23 failed to allocate memory */ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "2569 lpfc dump config region 23: memory" " allocation failed\n"); return 1; } memset(mp->virt, 0, LPFC_BPL_SIZE); INIT_LIST_HEAD(&mp->list); /* save address for completion */ mbox->context1 = (uint8_t *) mp; mb->mbxCommand = MBX_DUMP_MEMORY; mb->un.varDmp.type = DMP_NV_PARAMS; mb->un.varDmp.region_id = DMP_REGION_23; mb->un.varDmp.sli4_length = DMP_RGN23_SIZE; mb->un.varWords[3] = putPaddrLow(mp->phys); mb->un.varWords[4] = putPaddrHigh(mp->phys); return 0; } /** * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command * @phba: pointer to the hba structure containing the FCF index and RQ ID. * @mbox: pointer to lpfc mbox command to initialize. * * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The * SLI Host uses the command to activate an FCF after it has acquired FCF * information via a READ_FCF mailbox command. This mailbox command also is used * to indicate where received unsolicited frames from this FCF will be sent. By * default this routine will set up the FCF to forward all unsolicited frames * the the RQ ID passed in the @phba. This can be overridden by the caller for * more complicated setups. **/ void lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox) { struct lpfc_mbx_reg_fcfi *reg_fcfi; memset(mbox, 0, sizeof(*mbox)); reg_fcfi = &mbox->u.mqe.un.reg_fcfi; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI); bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id); bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.current_rec.fcf_indx); /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */ bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3); if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) { bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.current_rec.vlan_id); } } /** * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command * @mbox: pointer to lpfc mbox command to initialize. * @fcfi: FCFI to be unregistered. * * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). * The SLI Host uses the command to inactivate an FCFI. **/ void lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi) { memset(mbox, 0, sizeof(*mbox)); bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI); bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi); } /** * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command * @mbox: pointer to lpfc mbox command to initialize. * @ndlp: The nodelist structure that describes the RPI to resume. * * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a * link event. **/ void lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = ndlp->phba; struct lpfc_mbx_resume_rpi *resume_rpi; memset(mbox, 0, sizeof(*mbox)); resume_rpi = &mbox->u.mqe.un.resume_rpi; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI); bf_set(lpfc_resume_rpi_index, resume_rpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI); resume_rpi->event_tag = ndlp->phba->fc_eventTag; } /** * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages * mailbox command. * @mbox: pointer to lpfc mbox command to initialize. * * The PORT_CAPABILITIES supported pages mailbox command is issued to * retrieve the particular feature pages supported by the port. **/ void lpfc_supported_pages(struct lpfcMboxq *mbox) { struct lpfc_mbx_supp_pages *supp_pages; memset(mbox, 0, sizeof(*mbox)); supp_pages = &mbox->u.mqe.un.supp_pages; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES); bf_set(cpn, supp_pages, LPFC_SUPP_PAGES); } /** * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd. * @mbox: pointer to lpfc mbox command to initialize. * * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to * retrieve the particular SLI4 features supported by the port. **/ void lpfc_pc_sli4_params(struct lpfcMboxq *mbox) { struct lpfc_mbx_pc_sli4_params *sli4_params; memset(mbox, 0, sizeof(*mbox)); sli4_params = &mbox->u.mqe.un.sli4_params; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES); bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS); }
gpl-2.0
calonso-conabio/linux
drivers/mtd/maps/sa1100-flash.c
609
6449
/* * Flash memory access on SA11x0 based devices * * (C) 2000 Nicolas Pitre <nico@fluxnic.net> */ #include <linux/module.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/mtd/concat.h> #include <mach/hardware.h> #include <asm/sizes.h> #include <asm/mach/flash.h> struct sa_subdev_info { char name[16]; struct map_info map; struct mtd_info *mtd; struct flash_platform_data *plat; }; struct sa_info { struct mtd_info *mtd; int num_subdev; struct sa_subdev_info subdev[0]; }; static DEFINE_SPINLOCK(sa1100_vpp_lock); static int sa1100_vpp_refcnt; static void sa1100_set_vpp(struct map_info *map, int on) { struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map); unsigned long flags; spin_lock_irqsave(&sa1100_vpp_lock, flags); if (on) { if (++sa1100_vpp_refcnt == 1) /* first nested 'on' */ subdev->plat->set_vpp(1); } else { if (--sa1100_vpp_refcnt == 0) /* last nested 'off' */ subdev->plat->set_vpp(0); } spin_unlock_irqrestore(&sa1100_vpp_lock, flags); } static void sa1100_destroy_subdev(struct sa_subdev_info *subdev) { if (subdev->mtd) map_destroy(subdev->mtd); if (subdev->map.virt) iounmap(subdev->map.virt); release_mem_region(subdev->map.phys, subdev->map.size); } static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *res) { unsigned long phys; unsigned int size; int ret; phys = res->start; size = res->end - phys + 1; /* * Retrieve the bankwidth from the MSC registers. * We currently only implement CS0 and CS1 here. */ switch (phys) { default: printk(KERN_WARNING "SA1100 flash: unknown base address " "0x%08lx, assuming CS0\n", phys); case SA1100_CS0_PHYS: subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4; break; case SA1100_CS1_PHYS: subdev->map.bankwidth = ((MSC0 >> 16) & MSC_RBW) ? 2 : 4; break; } if (!request_mem_region(phys, size, subdev->name)) { ret = -EBUSY; goto out; } if (subdev->plat->set_vpp) subdev->map.set_vpp = sa1100_set_vpp; subdev->map.phys = phys; subdev->map.size = size; subdev->map.virt = ioremap(phys, size); if (!subdev->map.virt) { ret = -ENOMEM; goto err; } simple_map_init(&subdev->map); /* * Now let's probe for the actual flash. Do it here since * specific machine settings might have been set above. */ subdev->mtd = do_map_probe(subdev->plat->map_name, &subdev->map); if (subdev->mtd == NULL) { ret = -ENXIO; goto err; } subdev->mtd->owner = THIS_MODULE; printk(KERN_INFO "SA1100 flash: CFI device at 0x%08lx, %uMiB, %d-bit\n", phys, (unsigned)(subdev->mtd->size >> 20), subdev->map.bankwidth * 8); return 0; err: sa1100_destroy_subdev(subdev); out: return ret; } static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *plat) { int i; if (info->mtd) { mtd_device_unregister(info->mtd); if (info->mtd != info->subdev[0].mtd) mtd_concat_destroy(info->mtd); } for (i = info->num_subdev - 1; i >= 0; i--) sa1100_destroy_subdev(&info->subdev[i]); kfree(info); if (plat->exit) plat->exit(); } static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat) { struct sa_info *info; int nr, size, i, ret = 0; /* * Count number of devices. */ for (nr = 0; ; nr++) if (!platform_get_resource(pdev, IORESOURCE_MEM, nr)) break; if (nr == 0) { ret = -ENODEV; goto out; } size = sizeof(struct sa_info) + sizeof(struct sa_subdev_info) * nr; /* * Allocate the map_info structs in one go. */ info = kzalloc(size, GFP_KERNEL); if (!info) { ret = -ENOMEM; goto out; } if (plat->init) { ret = plat->init(); if (ret) goto err; } /* * Claim and then map the memory regions. */ for (i = 0; i < nr; i++) { struct sa_subdev_info *subdev = &info->subdev[i]; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, i); if (!res) break; subdev->map.name = subdev->name; sprintf(subdev->name, "%s-%d", plat->name, i); subdev->plat = plat; ret = sa1100_probe_subdev(subdev, res); if (ret) break; } info->num_subdev = i; /* * ENXIO is special. It means we didn't find a chip when we probed. */ if (ret != 0 && !(ret == -ENXIO && info->num_subdev > 0)) goto err; /* * If we found one device, don't bother with concat support. If * we found multiple devices, use concat if we have it available, * otherwise fail. Either way, it'll be called "sa1100". */ if (info->num_subdev == 1) { strcpy(info->subdev[0].name, plat->name); info->mtd = info->subdev[0].mtd; ret = 0; } else if (info->num_subdev > 1) { struct mtd_info *cdev[nr]; /* * We detected multiple devices. Concatenate them together. */ for (i = 0; i < info->num_subdev; i++) cdev[i] = info->subdev[i].mtd; info->mtd = mtd_concat_create(cdev, info->num_subdev, plat->name); if (info->mtd == NULL) ret = -ENXIO; } if (ret == 0) return info; err: sa1100_destroy(info, plat); out: return ERR_PTR(ret); } static const char * const part_probes[] = { "cmdlinepart", "RedBoot", NULL }; static int sa1100_mtd_probe(struct platform_device *pdev) { struct flash_platform_data *plat = dev_get_platdata(&pdev->dev); struct sa_info *info; int err; if (!plat) return -ENODEV; info = sa1100_setup_mtd(pdev, plat); if (IS_ERR(info)) { err = PTR_ERR(info); goto out; } /* * Partition selection stuff. */ mtd_device_parse_register(info->mtd, part_probes, NULL, plat->parts, plat->nr_parts); platform_set_drvdata(pdev, info); err = 0; out: return err; } static int sa1100_mtd_remove(struct platform_device *pdev) { struct sa_info *info = platform_get_drvdata(pdev); struct flash_platform_data *plat = dev_get_platdata(&pdev->dev); sa1100_destroy(info, plat); return 0; } static struct platform_driver sa1100_mtd_driver = { .probe = sa1100_mtd_probe, .remove = sa1100_mtd_remove, .driver = { .name = "sa1100-mtd", }, }; module_platform_driver(sa1100_mtd_driver); MODULE_AUTHOR("Nicolas Pitre"); MODULE_DESCRIPTION("SA1100 CFI map driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:sa1100-mtd");
gpl-2.0
npf-ati/linux-2.6-imx
drivers/net/wireless/ath/ath6kl/core.c
865
9201
/* * Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "core.h" #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/export.h> #include <linux/vmalloc.h> #include "debug.h" #include "hif-ops.h" #include "htc-ops.h" #include "cfg80211.h" unsigned int debug_mask; static unsigned int suspend_mode; static unsigned int wow_mode; static unsigned int uart_debug; static unsigned int ath6kl_p2p; static unsigned int testmode; static unsigned int recovery_enable; static unsigned int heart_beat_poll; module_param(debug_mask, uint, 0644); module_param(suspend_mode, uint, 0644); module_param(wow_mode, uint, 0644); module_param(uart_debug, uint, 0644); module_param(ath6kl_p2p, uint, 0644); module_param(testmode, uint, 0644); module_param(recovery_enable, uint, 0644); module_param(heart_beat_poll, uint, 0644); MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error"); MODULE_PARM_DESC(heart_beat_poll, "Enable fw error detection periodic polling in msecs - Also set recovery_enable for this to be effective"); void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb) { ath6kl_htc_tx_complete(ar, skb); } EXPORT_SYMBOL(ath6kl_core_tx_complete); void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe) { ath6kl_htc_rx_complete(ar, skb, pipe); } EXPORT_SYMBOL(ath6kl_core_rx_complete); int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type) { struct ath6kl_bmi_target_info targ_info; struct wireless_dev *wdev; int ret = 0, i; switch (htc_type) { case ATH6KL_HTC_TYPE_MBOX: ath6kl_htc_mbox_attach(ar); break; case ATH6KL_HTC_TYPE_PIPE: ath6kl_htc_pipe_attach(ar); break; default: WARN_ON(1); return -ENOMEM; } ar->ath6kl_wq = create_singlethread_workqueue("ath6kl"); if (!ar->ath6kl_wq) return -ENOMEM; ret = ath6kl_bmi_init(ar); if (ret) goto err_wq; /* * Turn on power to get hardware (target) version and leave power * on delibrately as we will boot the hardware anyway within few * seconds. */ ret = ath6kl_hif_power_on(ar); if (ret) goto err_bmi_cleanup; ret = ath6kl_bmi_get_target_info(ar, &targ_info); if (ret) goto err_power_off; ar->version.target_ver = le32_to_cpu(targ_info.version); ar->target_type = le32_to_cpu(targ_info.type); ar->wiphy->hw_version = le32_to_cpu(targ_info.version); ret = ath6kl_init_hw_params(ar); if (ret) goto err_power_off; ar->htc_target = ath6kl_htc_create(ar); if (!ar->htc_target) { ret = -ENOMEM; goto err_power_off; } ar->testmode = testmode; ret = ath6kl_init_fetch_firmwares(ar); if (ret) goto err_htc_cleanup; /* FIXME: we should free all firmwares in the error cases below */ /* * Backwards compatibility support for older ar6004 firmware images * which do not set these feature flags. */ if (ar->target_type == TARGET_TYPE_AR6004 && ar->fw_api <= 4) { __set_bit(ATH6KL_FW_CAPABILITY_64BIT_RATES, ar->fw_capabilities); __set_bit(ATH6KL_FW_CAPABILITY_AP_INACTIVITY_MINS, ar->fw_capabilities); if (ar->hw.id == AR6004_HW_1_3_VERSION) __set_bit(ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT, ar->fw_capabilities); } /* Indicate that WMI is enabled (although not ready yet) */ set_bit(WMI_ENABLED, &ar->flag); ar->wmi = ath6kl_wmi_init(ar); if (!ar->wmi) { ath6kl_err("failed to initialize wmi\n"); ret = -EIO; goto err_htc_cleanup; } ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi); /* setup access class priority mappings */ ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */ ar->ac_stream_pri_map[WMM_AC_BE] = 1; ar->ac_stream_pri_map[WMM_AC_VI] = 2; ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */ /* allocate some buffers that handle larger AMSDU frames */ ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS); ath6kl_cookie_init(ar); ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER | ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST; if (suspend_mode && suspend_mode >= WLAN_POWER_STATE_CUT_PWR && suspend_mode <= WLAN_POWER_STATE_WOW) ar->suspend_mode = suspend_mode; else ar->suspend_mode = 0; if (suspend_mode == WLAN_POWER_STATE_WOW && (wow_mode == WLAN_POWER_STATE_CUT_PWR || wow_mode == WLAN_POWER_STATE_DEEP_SLEEP)) ar->wow_suspend_mode = wow_mode; else ar->wow_suspend_mode = 0; if (uart_debug) ar->conf_flags |= ATH6KL_CONF_UART_DEBUG; set_bit(FIRST_BOOT, &ar->flag); ath6kl_debug_init(ar); ret = ath6kl_init_hw_start(ar); if (ret) { ath6kl_err("Failed to start hardware: %d\n", ret); goto err_rxbuf_cleanup; } /* give our connected endpoints some buffers */ ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep); ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]); ret = ath6kl_cfg80211_init(ar); if (ret) goto err_rxbuf_cleanup; ret = ath6kl_debug_init_fs(ar); if (ret) { wiphy_unregister(ar->wiphy); goto err_rxbuf_cleanup; } for (i = 0; i < ar->vif_max; i++) ar->avail_idx_map |= BIT(i); rtnl_lock(); /* Add an initial station interface */ wdev = ath6kl_interface_add(ar, "wlan%d", NET_NAME_ENUM, NL80211_IFTYPE_STATION, 0, INFRA_NETWORK); rtnl_unlock(); if (!wdev) { ath6kl_err("Failed to instantiate a network device\n"); ret = -ENOMEM; wiphy_unregister(ar->wiphy); goto err_rxbuf_cleanup; } ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n", __func__, wdev->netdev->name, wdev->netdev, ar); ar->fw_recovery.enable = !!recovery_enable; if (!ar->fw_recovery.enable) return ret; if (heart_beat_poll && test_bit(ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL, ar->fw_capabilities)) ar->fw_recovery.hb_poll = heart_beat_poll; ath6kl_recovery_init(ar); return ret; err_rxbuf_cleanup: ath6kl_debug_cleanup(ar); ath6kl_htc_flush_rx_buf(ar->htc_target); ath6kl_cleanup_amsdu_rxbufs(ar); ath6kl_wmi_shutdown(ar->wmi); clear_bit(WMI_ENABLED, &ar->flag); ar->wmi = NULL; err_htc_cleanup: ath6kl_htc_cleanup(ar->htc_target); err_power_off: ath6kl_hif_power_off(ar); err_bmi_cleanup: ath6kl_bmi_cleanup(ar); err_wq: destroy_workqueue(ar->ath6kl_wq); return ret; } EXPORT_SYMBOL(ath6kl_core_init); struct ath6kl *ath6kl_core_create(struct device *dev) { struct ath6kl *ar; u8 ctr; ar = ath6kl_cfg80211_create(); if (!ar) return NULL; ar->p2p = !!ath6kl_p2p; ar->dev = dev; ar->vif_max = 1; ar->max_norm_iface = 1; spin_lock_init(&ar->lock); spin_lock_init(&ar->mcastpsq_lock); spin_lock_init(&ar->list_lock); init_waitqueue_head(&ar->event_wq); sema_init(&ar->sem, 1); INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue); INIT_LIST_HEAD(&ar->vif_list); clear_bit(WMI_ENABLED, &ar->flag); clear_bit(SKIP_SCAN, &ar->flag); clear_bit(DESTROY_IN_PROGRESS, &ar->flag); ar->tx_pwr = 0; ar->intra_bss = 1; ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD; ar->state = ATH6KL_STATE_OFF; memset((u8 *)ar->sta_list, 0, AP_MAX_NUM_STA * sizeof(struct ath6kl_sta)); /* Init the PS queues */ for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { spin_lock_init(&ar->sta_list[ctr].psq_lock); skb_queue_head_init(&ar->sta_list[ctr].psq); skb_queue_head_init(&ar->sta_list[ctr].apsdq); ar->sta_list[ctr].mgmt_psq_len = 0; INIT_LIST_HEAD(&ar->sta_list[ctr].mgmt_psq); ar->sta_list[ctr].aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL); if (!ar->sta_list[ctr].aggr_conn) { ath6kl_err("Failed to allocate memory for sta aggregation information\n"); ath6kl_core_destroy(ar); return NULL; } } skb_queue_head_init(&ar->mcastpsq); memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3); return ar; } EXPORT_SYMBOL(ath6kl_core_create); void ath6kl_core_cleanup(struct ath6kl *ar) { ath6kl_hif_power_off(ar); ath6kl_recovery_cleanup(ar); destroy_workqueue(ar->ath6kl_wq); if (ar->htc_target) ath6kl_htc_cleanup(ar->htc_target); ath6kl_cookie_cleanup(ar); ath6kl_cleanup_amsdu_rxbufs(ar); ath6kl_bmi_cleanup(ar); ath6kl_debug_cleanup(ar); kfree(ar->fw_board); kfree(ar->fw_otp); vfree(ar->fw); kfree(ar->fw_patch); kfree(ar->fw_testscript); ath6kl_cfg80211_cleanup(ar); } EXPORT_SYMBOL(ath6kl_core_cleanup); void ath6kl_core_destroy(struct ath6kl *ar) { ath6kl_cfg80211_destroy(ar); } EXPORT_SYMBOL(ath6kl_core_destroy); MODULE_AUTHOR("Qualcomm Atheros"); MODULE_DESCRIPTION("Core module for AR600x SDIO and USB devices."); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
vickylinuxer/at91sam9263-kernel
drivers/staging/rtl8192su/r8192U_wx.c
865
32433
/* This file contains wireless extension handlers. This is part of rtl8180 OpenSource driver. Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it> Released under the terms of GPL (General Public Licence) Parts of this driver are based on the GPL part of the official realtek driver. Parts of this driver are based on the rtl8180 driver skeleton from Patric Schenke & Andres Salomon. Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver. We want to tanks the Authors of those projects and the Ndiswrapper project Authors. */ #include <linux/string.h> #include "r8192U.h" #include "r8192S_hw.h" #include "ieee80211/dot11d.h" #define RATE_COUNT 12 u32 rtl8180_rates[] = {1000000,2000000,5500000,11000000, 6000000,9000000,12000000,18000000,24000000,36000000,48000000,54000000}; #ifndef ENETDOWN #define ENETDOWN 1 #endif static int r8192_wx_get_freq(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8192_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_freq(priv->ieee80211,a,wrqu,b); } static int r8192_wx_get_mode(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8192_priv *priv=ieee80211_priv(dev); return ieee80211_wx_get_mode(priv->ieee80211,a,wrqu,b); } static int r8192_wx_get_rate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_rate(priv->ieee80211,info,wrqu,extra); } static int r8192_wx_set_rate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret; struct r8192_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); ret = ieee80211_wx_set_rate(priv->ieee80211,info,wrqu,extra); up(&priv->wx_sem); return ret; } static int r8192_wx_set_rts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret; struct r8192_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); ret = ieee80211_wx_set_rts(priv->ieee80211,info,wrqu,extra); up(&priv->wx_sem); return ret; } static int r8192_wx_get_rts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_rts(priv->ieee80211,info,wrqu,extra); } static int r8192_wx_set_power(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret; struct r8192_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); ret = ieee80211_wx_set_power(priv->ieee80211,info,wrqu,extra); up(&priv->wx_sem); return ret; } static int r8192_wx_get_power(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_power(priv->ieee80211,info,wrqu,extra); } #ifdef JOHN_IOCTL u16 read_rtl8225(struct net_device *dev, u8 addr); void write_rtl8225(struct net_device *dev, u8 adr, u16 data); u32 john_read_rtl8225(struct net_device *dev, u8 adr); void _write_rtl8225(struct net_device *dev, u8 adr, u16 data); static int r8192_wx_read_regs(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); u8 addr; u16 data1; down(&priv->wx_sem); get_user(addr,(u8*)wrqu->data.pointer); data1 = read_rtl8225(dev, addr); wrqu->data.length = data1; up(&priv->wx_sem); return 0; } static int r8192_wx_write_regs(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); u8 addr; down(&priv->wx_sem); get_user(addr, (u8*)wrqu->data.pointer); write_rtl8225(dev, addr, wrqu->data.length); up(&priv->wx_sem); return 0; } void rtl8187_write_phy(struct net_device *dev, u8 adr, u32 data); u8 rtl8187_read_phy(struct net_device *dev,u8 adr, u32 data); static int r8192_wx_read_bb(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); u8 databb; down(&priv->wx_sem); databb = rtl8187_read_phy(dev, (u8)wrqu->data.length, 0x00000000); wrqu->data.length = databb; up(&priv->wx_sem); return 0; } void rtl8187_write_phy(struct net_device *dev, u8 adr, u32 data); static int r8192_wx_write_bb(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); u8 databb; down(&priv->wx_sem); get_user(databb, (u8*)wrqu->data.pointer); rtl8187_write_phy(dev, wrqu->data.length, databb); up(&priv->wx_sem); return 0; } static int r8192_wx_write_nicb(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); u32 addr; down(&priv->wx_sem); get_user(addr, (u32*)wrqu->data.pointer); write_nic_byte(dev, addr, wrqu->data.length); up(&priv->wx_sem); return 0; } static int r8192_wx_read_nicb(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); u32 addr; u16 data1; down(&priv->wx_sem); get_user(addr,(u32*)wrqu->data.pointer); data1 = read_nic_byte(dev, addr); wrqu->data.length = data1; up(&priv->wx_sem); return 0; } static int r8192_wx_get_ap_status(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); struct ieee80211_device *ieee = priv->ieee80211; struct ieee80211_network *target; int name_len; down(&priv->wx_sem); //count the length of input ssid for(name_len=0 ; ((char*)wrqu->data.pointer)[name_len]!='\0' ; name_len++); //search for the correspoding info which is received list_for_each_entry(target, &ieee->network_list, list) { if ( (target->ssid_len == name_len) && (strncmp(target->ssid, (char*)wrqu->data.pointer, name_len)==0)){ if(target->wpa_ie_len>0 || target->rsn_ie_len>0 ) //set flags=1 to indicate this ap is WPA wrqu->data.flags = 1; else wrqu->data.flags = 0; break; } } up(&priv->wx_sem); return 0; } #endif static int r8192_wx_force_reset(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); printk("%s(): force reset ! extra is %d\n",__FUNCTION__, *extra); priv->force_reset = *extra; up(&priv->wx_sem); return 0; } static int r8191su_wx_get_firm_version(struct net_device *dev, struct iw_request_info *info, struct iw_param *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); u16 firmware_version; down(&priv->wx_sem); firmware_version = priv->pFirmware->FirmwareVersion; wrqu->value = firmware_version; wrqu->fixed = 1; up(&priv->wx_sem); return 0; } static int r8192_wx_set_rawtx(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); int ret; down(&priv->wx_sem); ret = ieee80211_wx_set_rawtx(priv->ieee80211, info, wrqu, extra); up(&priv->wx_sem); return ret; } static int r8192_wx_set_crcmon(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); int *parms = (int *)extra; int enable = (parms[0] > 0); short prev = priv->crcmon; down(&priv->wx_sem); if(enable) priv->crcmon=1; else priv->crcmon=0; DMESG("bad CRC in monitor mode are %s", priv->crcmon ? "accepted" : "rejected"); if(prev != priv->crcmon && priv->up){ //rtl8180_down(dev); //rtl8180_up(dev); } up(&priv->wx_sem); return 0; } static int r8192_wx_set_mode(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8192_priv *priv = ieee80211_priv(dev); int ret; down(&priv->wx_sem); ret = ieee80211_wx_set_mode(priv->ieee80211,a,wrqu,b); rtl8192_set_rxconf(dev); up(&priv->wx_sem); return ret; } struct iw_range_with_scan_capa { /* Informative stuff (to choose between different interface) */ __u32 throughput; /* To give an idea... */ /* In theory this value should be the maximum benchmarked * TCP/IP throughput, because with most of these devices the * bit rate is meaningless (overhead an co) to estimate how * fast the connection will go and pick the fastest one. * I suggest people to play with Netperf or any benchmark... */ /* NWID (or domain id) */ __u32 min_nwid; /* Minimal NWID we are able to set */ __u32 max_nwid; /* Maximal NWID we are able to set */ /* Old Frequency (backward compat - moved lower ) */ __u16 old_num_channels; __u8 old_num_frequency; /* Scan capabilities */ __u8 scan_capa; }; static int rtl8180_wx_get_range(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_range *range = (struct iw_range *)extra; struct iw_range_with_scan_capa* tmp = (struct iw_range_with_scan_capa*)range; struct r8192_priv *priv = ieee80211_priv(dev); u16 val; int i; wrqu->data.length = sizeof(*range); memset(range, 0, sizeof(*range)); /* Let's try to keep this struct in the same order as in * linux/include/wireless.h */ /* TODO: See what values we can set, and remove the ones we can't * set, or fill them with some default data. */ /* ~5 Mb/s real (802.11b) */ range->throughput = 5 * 1000 * 1000; // TODO: Not used in 802.11b? // range->min_nwid; /* Minimal NWID we are able to set */ // TODO: Not used in 802.11b? // range->max_nwid; /* Maximal NWID we are able to set */ /* Old Frequency (backward compat - moved lower ) */ // range->old_num_channels; // range->old_num_frequency; // range->old_freq[6]; /* Filler to keep "version" at the same offset */ if(priv->rf_set_sens != NULL) range->sensitivity = priv->max_sens; /* signal level threshold range */ range->max_qual.qual = 100; /* TODO: Find real max RSSI and stick here */ range->max_qual.level = 0; range->max_qual.noise = -98; range->max_qual.updated = 7; /* Updated all three */ range->avg_qual.qual = 92; /* > 8% missed beacons is 'bad' */ /* TODO: Find real 'good' to 'bad' threshold value for RSSI */ range->avg_qual.level = 20 + -98; range->avg_qual.noise = 0; range->avg_qual.updated = 7; /* Updated all three */ range->num_bitrates = RATE_COUNT; for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++) { range->bitrate[i] = rtl8180_rates[i]; } range->min_frag = MIN_FRAG_THRESHOLD; range->max_frag = MAX_FRAG_THRESHOLD; range->min_pmp=0; range->max_pmp = 5000000; range->min_pmt = 0; range->max_pmt = 65535*1000; range->pmp_flags = IW_POWER_PERIOD; range->pmt_flags = IW_POWER_TIMEOUT; range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R; range->we_version_compiled = WIRELESS_EXT; range->we_version_source = 16; // range->retry_capa; /* What retry options are supported */ // range->retry_flags; /* How to decode max/min retry limit */ // range->r_time_flags; /* How to decode max/min retry life */ // range->min_retry; /* Minimal number of retries */ // range->max_retry; /* Maximal number of retries */ // range->min_r_time; /* Minimal retry lifetime */ // range->max_r_time; /* Maximal retry lifetime */ for (i = 0, val = 0; i < 14; i++) { // Include only legal frequencies for some countries if ((GET_DOT11D_INFO(priv->ieee80211)->channel_map)[i+1]) { range->freq[val].i = i + 1; range->freq[val].m = ieee80211_wlan_frequencies[i] * 100000; range->freq[val].e = 1; val++; } else { // FIXME: do we need to set anything for channels // we don't use ? } if (val == IW_MAX_FREQUENCIES) break; } range->num_frequency = val; range->num_channels = val; range->enc_capa = IW_ENC_CAPA_WPA|IW_ENC_CAPA_WPA2| IW_ENC_CAPA_CIPHER_TKIP|IW_ENC_CAPA_CIPHER_CCMP; tmp->scan_capa = 0x01; return 0; } static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8192_priv *priv = ieee80211_priv(dev); struct ieee80211_device* ieee = priv->ieee80211; int ret = 0; if(!priv->up) return -ENETDOWN; if (priv->ieee80211->LinkDetectInfo.bBusyTraffic == true) return -EAGAIN; if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { struct iw_scan_req* req = (struct iw_scan_req*)b; if (req->essid_len) { //printk("==**&*&*&**===>scan set ssid:%s\n", req->essid); ieee->current_network.ssid_len = req->essid_len; memcpy(ieee->current_network.ssid, req->essid, req->essid_len); //printk("=====>network ssid:%s\n", ieee->current_network.ssid); } } down(&priv->wx_sem); if(priv->ieee80211->state != IEEE80211_LINKED){ priv->ieee80211->scanning = 0; ieee80211_softmac_scan_syncro(priv->ieee80211); ret = 0; } else ret = ieee80211_wx_set_scan(priv->ieee80211,a,wrqu,b); up(&priv->wx_sem); return ret; } static int r8192_wx_get_scan(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct r8192_priv *priv = ieee80211_priv(dev); if(!priv->up) return -ENETDOWN; down(&priv->wx_sem); ret = ieee80211_wx_get_scan(priv->ieee80211,a,wrqu,b); up(&priv->wx_sem); return ret; } static int r8192_wx_set_essid(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8192_priv *priv = ieee80211_priv(dev); int ret; down(&priv->wx_sem); ret = ieee80211_wx_set_essid(priv->ieee80211,a,wrqu,b); up(&priv->wx_sem); return ret; } static int r8192_wx_get_essid(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct r8192_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); ret = ieee80211_wx_get_essid(priv->ieee80211, a, wrqu, b); up(&priv->wx_sem); return ret; } static int r8192_wx_set_freq(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct r8192_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); ret = ieee80211_wx_set_freq(priv->ieee80211, a, wrqu, b); up(&priv->wx_sem); return ret; } static int r8192_wx_get_name(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_name(priv->ieee80211, info, wrqu, extra); } static int r8192_wx_set_frag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); if (wrqu->frag.disabled) priv->ieee80211->fts = DEFAULT_FRAG_THRESHOLD; else { if (wrqu->frag.value < MIN_FRAG_THRESHOLD || wrqu->frag.value > MAX_FRAG_THRESHOLD) return -EINVAL; priv->ieee80211->fts = wrqu->frag.value & ~0x1; } return 0; } static int r8192_wx_get_frag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); wrqu->frag.value = priv->ieee80211->fts; wrqu->frag.fixed = 0; /* no auto select */ wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FRAG_THRESHOLD); return 0; } static int r8192_wx_set_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *awrq, char *extra) { int ret; struct r8192_priv *priv = ieee80211_priv(dev); // struct sockaddr *temp = (struct sockaddr *)awrq; down(&priv->wx_sem); ret = ieee80211_wx_set_wap(priv->ieee80211,info,awrq,extra); up(&priv->wx_sem); return ret; } static int r8192_wx_get_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_wap(priv->ieee80211,info,wrqu,extra); } static int r8192_wx_get_enc(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *key) { struct r8192_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_encode(priv->ieee80211, info, wrqu, key); } static int r8192_wx_set_enc(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *key) { struct r8192_priv *priv = ieee80211_priv(dev); struct ieee80211_device *ieee = priv->ieee80211; int ret; //u32 TargetContent; u32 hwkey[4]={0,0,0,0}; u8 mask=0xff; u32 key_idx=0; //u8 broadcast_addr[6] ={ 0xff,0xff,0xff,0xff,0xff,0xff}; u8 zero_addr[4][6] ={ {0x00,0x00,0x00,0x00,0x00,0x00}, {0x00,0x00,0x00,0x00,0x00,0x01}, {0x00,0x00,0x00,0x00,0x00,0x02}, {0x00,0x00,0x00,0x00,0x00,0x03} }; int i; if(!priv->up) return -ENETDOWN; down(&priv->wx_sem); RT_TRACE(COMP_SEC, "Setting SW wep key"); ret = ieee80211_wx_set_encode(priv->ieee80211,info,wrqu,key); up(&priv->wx_sem); //sometimes, the length is zero while we do not type key value if(wrqu->encoding.length!=0){ for(i=0 ; i<4 ; i++){ hwkey[i] |= key[4*i+0]&mask; if(i==1&&(4*i+1)==wrqu->encoding.length) mask=0x00; if(i==3&&(4*i+1)==wrqu->encoding.length) mask=0x00; hwkey[i] |= (key[4*i+1]&mask)<<8; hwkey[i] |= (key[4*i+2]&mask)<<16; hwkey[i] |= (key[4*i+3]&mask)<<24; } #define CONF_WEP40 0x4 #define CONF_WEP104 0x14 switch(wrqu->encoding.flags & IW_ENCODE_INDEX){ case 0: key_idx = ieee->tx_keyidx; break; case 1: key_idx = 0; break; case 2: key_idx = 1; break; case 3: key_idx = 2; break; case 4: key_idx = 3; break; default: break; } if(wrqu->encoding.length==0x5){ ieee->pairwise_key_type = KEY_TYPE_WEP40; EnableHWSecurityConfig8192(dev); setKey( dev, key_idx, //EntryNo key_idx, //KeyIndex KEY_TYPE_WEP40, //KeyType zero_addr[key_idx], 0, //DefaultKey hwkey); //KeyContent } else if(wrqu->encoding.length==0xd){ ieee->pairwise_key_type = KEY_TYPE_WEP104; EnableHWSecurityConfig8192(dev); setKey( dev, key_idx, //EntryNo key_idx, //KeyIndex KEY_TYPE_WEP104, //KeyType zero_addr[key_idx], 0, //DefaultKey hwkey); //KeyContent } else printk("wrong type in WEP, not WEP40 and WEP104\n"); } return ret; } static int r8192_wx_set_scan_type(struct net_device *dev, struct iw_request_info *aa, union iwreq_data *wrqu, char *p){ struct r8192_priv *priv = ieee80211_priv(dev); int *parms=(int*)p; int mode=parms[0]; priv->ieee80211->active_scan = mode; return 1; } static int r8192_wx_set_retry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); int err = 0; down(&priv->wx_sem); if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled){ err = -EINVAL; goto exit; } if (!(wrqu->retry.flags & IW_RETRY_LIMIT)){ err = -EINVAL; goto exit; } if(wrqu->retry.value > R8180_MAX_RETRY){ err= -EINVAL; goto exit; } if (wrqu->retry.flags & IW_RETRY_MAX) { priv->retry_rts = wrqu->retry.value; DMESG("Setting retry for RTS/CTS data to %d", wrqu->retry.value); }else { priv->retry_data = wrqu->retry.value; DMESG("Setting retry for non RTS/CTS data to %d", wrqu->retry.value); } /* FIXME ! * We might try to write directly the TX config register * or to restart just the (R)TX process. * I'm unsure if whole reset is really needed */ rtl8192_commit(dev); /* if(priv->up){ rtl8180_rtx_disable(dev); rtl8180_rx_enable(dev); rtl8180_tx_enable(dev); } */ exit: up(&priv->wx_sem); return err; } static int r8192_wx_get_retry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); wrqu->retry.disabled = 0; /* can't be disabled */ if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) return -EINVAL; if (wrqu->retry.flags & IW_RETRY_MAX) { wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX; wrqu->retry.value = priv->retry_rts; } else { wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN; wrqu->retry.value = priv->retry_data; } //printk("returning %d",wrqu->retry.value); return 0; } static int r8192_wx_get_sens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); if(priv->rf_set_sens == NULL) return -1; /* we have not this support for this radio */ wrqu->sens.value = priv->sens; return 0; } static int r8192_wx_set_sens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8192_priv *priv = ieee80211_priv(dev); short err = 0; down(&priv->wx_sem); //DMESG("attempt to set sensivity to %ddb",wrqu->sens.value); if(priv->rf_set_sens == NULL) { err= -1; /* we have not this support for this radio */ goto exit; } if(priv->rf_set_sens(dev, wrqu->sens.value) == 0) priv->sens = wrqu->sens.value; else err= -EINVAL; exit: up(&priv->wx_sem); return err; } //hw security need to reorganized. static int r8192_wx_set_enc_ext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret=0; struct r8192_priv *priv = ieee80211_priv(dev); struct ieee80211_device* ieee = priv->ieee80211; //printk("===>%s()\n", __FUNCTION__); down(&priv->wx_sem); ret = ieee80211_wx_set_encode_ext(priv->ieee80211, info, wrqu, extra); { u8 broadcast_addr[6] = {0xff,0xff,0xff,0xff,0xff,0xff}; u8 zero[6] = {0}; u32 key[4] = {0}; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; struct iw_point *encoding = &wrqu->encoding; u8 idx = 0, alg = 0, group = 0; if ((encoding->flags & IW_ENCODE_DISABLED) || ext->alg == IW_ENCODE_ALG_NONE) //none is not allowed to use hwsec WB 2008.07.01 { ieee->pairwise_key_type = ieee->group_key_type = KEY_TYPE_NA; CamResetAllEntry(dev); goto end_hw_sec; } alg = (ext->alg == IW_ENCODE_ALG_CCMP)?KEY_TYPE_CCMP:ext->alg; // as IW_ENCODE_ALG_CCMP is defined to be 3 and KEY_TYPE_CCMP is defined to 4; idx = encoding->flags & IW_ENCODE_INDEX; if (idx) idx --; group = ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY; if ((!group) || (IW_MODE_ADHOC == ieee->iw_mode) || (alg == KEY_TYPE_WEP40)) { if ((ext->key_len == 13) && (alg == KEY_TYPE_WEP40) ) alg = KEY_TYPE_WEP104; ieee->pairwise_key_type = alg; EnableHWSecurityConfig8192(dev); } memcpy((u8*)key, ext->key, 16); //we only get 16 bytes key.why? WB 2008.7.1 if ((alg & KEY_TYPE_WEP40) && (ieee->auth_mode !=2) ) { setKey( dev, idx,//EntryNo idx, //KeyIndex alg, //KeyType zero, //MacAddr 0, //DefaultKey key); //KeyContent } else if (group) { ieee->group_key_type = alg; setKey( dev, idx,//EntryNo idx, //KeyIndex alg, //KeyType broadcast_addr, //MacAddr 0, //DefaultKey key); //KeyContent } else //pairwise key { setKey( dev, 4,//EntryNo idx, //KeyIndex alg, //KeyType (u8*)ieee->ap_mac_addr, //MacAddr 0, //DefaultKey key); //KeyContent } } end_hw_sec: up(&priv->wx_sem); return ret; } static int r8192_wx_set_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *data, char *extra) { int ret=0; //printk("====>%s()\n", __FUNCTION__); struct r8192_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); ret = ieee80211_wx_set_auth(priv->ieee80211, info, &(data->param), extra); up(&priv->wx_sem); return ret; } static int r8192_wx_set_mlme(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { //printk("====>%s()\n", __FUNCTION__); int ret=0; struct r8192_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); ret = ieee80211_wx_set_mlme(priv->ieee80211, info, wrqu, extra); up(&priv->wx_sem); return ret; } static int r8192_wx_set_gen_ie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *data, char *extra) { //printk("====>%s(), len:%d\n", __FUNCTION__, data->length); int ret=0; struct r8192_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); #if 1 ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, data->data.length); #endif up(&priv->wx_sem); //printk("<======%s(), ret:%d\n", __FUNCTION__, ret); return ret; } static int dummy(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu,char *b) { return -1; } static iw_handler r8192_wx_handlers[] = { NULL, /* SIOCSIWCOMMIT */ r8192_wx_get_name, /* SIOCGIWNAME */ dummy, /* SIOCSIWNWID */ dummy, /* SIOCGIWNWID */ r8192_wx_set_freq, /* SIOCSIWFREQ */ r8192_wx_get_freq, /* SIOCGIWFREQ */ r8192_wx_set_mode, /* SIOCSIWMODE */ r8192_wx_get_mode, /* SIOCGIWMODE */ r8192_wx_set_sens, /* SIOCSIWSENS */ r8192_wx_get_sens, /* SIOCGIWSENS */ NULL, /* SIOCSIWRANGE */ rtl8180_wx_get_range, /* SIOCGIWRANGE */ NULL, /* SIOCSIWPRIV */ NULL, /* SIOCGIWPRIV */ NULL, /* SIOCSIWSTATS */ NULL, /* SIOCGIWSTATS */ dummy, /* SIOCSIWSPY */ dummy, /* SIOCGIWSPY */ NULL, /* SIOCGIWTHRSPY */ NULL, /* SIOCWIWTHRSPY */ r8192_wx_set_wap, /* SIOCSIWAP */ r8192_wx_get_wap, /* SIOCGIWAP */ r8192_wx_set_mlme, /* MLME-- */ dummy, /* SIOCGIWAPLIST -- depricated */ r8192_wx_set_scan, /* SIOCSIWSCAN */ r8192_wx_get_scan, /* SIOCGIWSCAN */ r8192_wx_set_essid, /* SIOCSIWESSID */ r8192_wx_get_essid, /* SIOCGIWESSID */ dummy, /* SIOCSIWNICKN */ dummy, /* SIOCGIWNICKN */ NULL, /* -- hole -- */ NULL, /* -- hole -- */ r8192_wx_set_rate, /* SIOCSIWRATE */ r8192_wx_get_rate, /* SIOCGIWRATE */ r8192_wx_set_rts, /* SIOCSIWRTS */ r8192_wx_get_rts, /* SIOCGIWRTS */ r8192_wx_set_frag, /* SIOCSIWFRAG */ r8192_wx_get_frag, /* SIOCGIWFRAG */ dummy, /* SIOCSIWTXPOW */ dummy, /* SIOCGIWTXPOW */ r8192_wx_set_retry, /* SIOCSIWRETRY */ r8192_wx_get_retry, /* SIOCGIWRETRY */ r8192_wx_set_enc, /* SIOCSIWENCODE */ r8192_wx_get_enc, /* SIOCGIWENCODE */ r8192_wx_set_power, /* SIOCSIWPOWER */ r8192_wx_get_power, /* SIOCGIWPOWER */ NULL, /*---hole---*/ NULL, /*---hole---*/ r8192_wx_set_gen_ie,//NULL, /* SIOCSIWGENIE */ NULL, /* SIOCSIWGENIE */ r8192_wx_set_auth,//NULL, /* SIOCSIWAUTH */ NULL,//r8192_wx_get_auth,//NULL, /* SIOCSIWAUTH */ r8192_wx_set_enc_ext, /* SIOCSIWENCODEEXT */ NULL,//r8192_wx_get_enc_ext,//NULL, /* SIOCSIWENCODEEXT */ NULL, /* SIOCSIWPMKSA */ NULL, /*---hole---*/ }; static const struct iw_priv_args r8192_private_args[] = { { SIOCIWFIRSTPRIV + 0x0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "badcrc" }, { SIOCIWFIRSTPRIV + 0x1, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "activescan" }, { SIOCIWFIRSTPRIV + 0x2, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rawtx" } #ifdef JOHN_IOCTL , { SIOCIWFIRSTPRIV + 0x3, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "readRF" } , { SIOCIWFIRSTPRIV + 0x4, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "writeRF" } , { SIOCIWFIRSTPRIV + 0x5, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "readBB" } , { SIOCIWFIRSTPRIV + 0x6, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "writeBB" } , { SIOCIWFIRSTPRIV + 0x7, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "readnicb" } , { SIOCIWFIRSTPRIV + 0x8, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "writenicb" } , { SIOCIWFIRSTPRIV + 0x9, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "apinfo" } #endif , { SIOCIWFIRSTPRIV + 0x3, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "forcereset" } , { SIOCIWFIRSTPRIV + 0x5, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_INT|IW_PRIV_SIZE_FIXED|1, "firm_ver" } }; static iw_handler r8192_private_handler[] = { // r8192_wx_set_monitor, /* SIOCIWFIRSTPRIV */ r8192_wx_set_crcmon, /*SIOCIWSECONDPRIV*/ // r8192_wx_set_forceassociate, // r8192_wx_set_beaconinterval, // r8192_wx_set_monitor_type, r8192_wx_set_scan_type, r8192_wx_set_rawtx, #ifdef JOHN_IOCTL r8192_wx_read_regs, r8192_wx_write_regs, r8192_wx_read_bb, r8192_wx_write_bb, r8192_wx_read_nicb, r8192_wx_write_nicb, r8192_wx_get_ap_status, #endif r8192_wx_force_reset, (iw_handler)NULL, (iw_handler)r8191su_wx_get_firm_version, }; struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); struct ieee80211_device* ieee = priv->ieee80211; struct iw_statistics* wstats = &priv->wstats; int tmp_level = 0; int tmp_qual = 0; int tmp_noise = 0; if(ieee->state < IEEE80211_LINKED) { wstats->qual.qual = 0; wstats->qual.level = 0; wstats->qual.noise = 0; wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; return wstats; } tmp_level = (&ieee->current_network)->stats.rssi; tmp_qual = (&ieee->current_network)->stats.signal; tmp_noise = (&ieee->current_network)->stats.noise; //printk("level:%d, qual:%d, noise:%d\n", tmp_level, tmp_qual, tmp_noise); wstats->qual.level = tmp_level; wstats->qual.qual = tmp_qual; wstats->qual.noise = tmp_noise; wstats->qual.updated = IW_QUAL_ALL_UPDATED| IW_QUAL_DBM; return wstats; } struct iw_handler_def r8192_wx_handlers_def={ .standard = r8192_wx_handlers, .num_standard = ARRAY_SIZE(r8192_wx_handlers), .private = r8192_private_handler, .num_private = ARRAY_SIZE(r8192_private_handler), .num_private_args = sizeof(r8192_private_args) / sizeof(struct iw_priv_args), .get_wireless_stats = r8192_get_wireless_stats, .private_args = (struct iw_priv_args *)r8192_private_args, };
gpl-2.0
junkyde/vikinger-stock-kk
drivers/char/hw_random/msm_rng.c
1121
6607
/* * Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/hw_random.h> #include <linux/clk.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/err.h> #include <linux/types.h> #include <mach/msm_iomap.h> #include <mach/socinfo.h> #define DRIVER_NAME "msm_rng" /* Device specific register offsets */ #define PRNG_DATA_OUT_OFFSET 0x0000 #define PRNG_STATUS_OFFSET 0x0004 #define PRNG_LFSR_CFG_OFFSET 0x0100 #define PRNG_CONFIG_OFFSET 0x0104 /* Device specific register masks and config values */ #define PRNG_LFSR_CFG_MASK 0xFFFF0000 #define PRNG_LFSR_CFG_CLOCKS 0x0000DDDD #define PRNG_CONFIG_MASK 0xFFFFFFFD #define PRNG_HW_ENABLE 0x00000002 #define MAX_HW_FIFO_DEPTH 16 /* FIFO is 16 words deep */ #define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) /* FIFO is 32 bits wide */ struct msm_rng_device { struct platform_device *pdev; void __iomem *base; struct clk *prng_clk; }; static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct msm_rng_device *msm_rng_dev; struct platform_device *pdev; void __iomem *base; size_t maxsize; size_t currsize = 0; unsigned long val; unsigned long *retdata = data; int ret; msm_rng_dev = (struct msm_rng_device *)rng->priv; pdev = msm_rng_dev->pdev; base = msm_rng_dev->base; /* calculate max size bytes to transfer back to caller */ maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max); /* no room for word data */ if (maxsize < 4) return 0; /* enable PRNG clock */ ret = clk_prepare_enable(msm_rng_dev->prng_clk); if (ret) { dev_err(&pdev->dev, "failed to enable clock in callback\n"); return 0; } /* read random data from h/w */ do { /* check status bit if data is available */ if (!(readl_relaxed(base + PRNG_STATUS_OFFSET) & 0x00000001)) break; /* no data to read so just bail */ /* read FIFO */ val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET); if (!val) break; /* no data to read so just bail */ /* write data back to callers pointer */ *(retdata++) = val; currsize += 4; /* make sure we stay on 32bit boundary */ if ((maxsize - currsize) < 4) break; } while (currsize < maxsize); /* vote to turn off clock */ clk_disable_unprepare(msm_rng_dev->prng_clk); return currsize; } static struct hwrng msm_rng = { .name = DRIVER_NAME, .read = msm_rng_read, }; static int __devinit msm_rng_enable_hw(struct msm_rng_device *msm_rng_dev) { unsigned long val = 0; unsigned long reg_val = 0; int ret = 0; /* Enable the PRNG CLK */ ret = clk_prepare_enable(msm_rng_dev->prng_clk); if (ret) { dev_err(&(msm_rng_dev->pdev)->dev, "failed to enable clock in probe\n"); return -EPERM; } /* Enable PRNG h/w only if it is NOT ON */ val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET) & PRNG_HW_ENABLE; /* PRNG H/W is not ON */ if (val != PRNG_HW_ENABLE) { val = readl_relaxed(msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET); val &= PRNG_LFSR_CFG_MASK; val |= PRNG_LFSR_CFG_CLOCKS; writel_relaxed(val, msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET); /* The PRNG CONFIG register should be first written */ mb(); reg_val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET) & PRNG_CONFIG_MASK; reg_val |= PRNG_HW_ENABLE; writel_relaxed(reg_val, msm_rng_dev->base + PRNG_CONFIG_OFFSET); /* The PRNG clk should be disabled only after we enable the * PRNG h/w by writing to the PRNG CONFIG register. */ mb(); } clk_disable_unprepare(msm_rng_dev->prng_clk); return 0; } static int __devinit msm_rng_probe(struct platform_device *pdev) { struct resource *res; struct msm_rng_device *msm_rng_dev = NULL; void __iomem *base = NULL; int error = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "invalid address\n"); error = -EFAULT; goto err_exit; } msm_rng_dev = kzalloc(sizeof(msm_rng_dev), GFP_KERNEL); if (!msm_rng_dev) { dev_err(&pdev->dev, "cannot allocate memory\n"); error = -ENOMEM; goto err_exit; } base = ioremap(res->start, resource_size(res)); if (!base) { dev_err(&pdev->dev, "ioremap failed\n"); error = -ENOMEM; goto err_iomap; } msm_rng_dev->base = base; /* create a handle for clock control */ msm_rng_dev->prng_clk = clk_get(&pdev->dev, "core_clk"); if (IS_ERR(msm_rng_dev->prng_clk)) { dev_err(&pdev->dev, "failed to register clock source\n"); error = -EPERM; goto err_clk_get; } /* save away pdev and register driver data */ msm_rng_dev->pdev = pdev; platform_set_drvdata(pdev, msm_rng_dev); /* Enable rng h/w */ error = msm_rng_enable_hw(msm_rng_dev); if (error) goto rollback_clk; /* register with hwrng framework */ msm_rng.priv = (unsigned long) msm_rng_dev; error = hwrng_register(&msm_rng); if (error) { dev_err(&pdev->dev, "failed to register hwrng\n"); error = -EPERM; goto rollback_clk; } return 0; rollback_clk: clk_put(msm_rng_dev->prng_clk); err_clk_get: iounmap(msm_rng_dev->base); err_iomap: kfree(msm_rng_dev); err_exit: return error; } static int __devexit msm_rng_remove(struct platform_device *pdev) { struct msm_rng_device *msm_rng_dev = platform_get_drvdata(pdev); hwrng_unregister(&msm_rng); clk_put(msm_rng_dev->prng_clk); iounmap(msm_rng_dev->base); platform_set_drvdata(pdev, NULL); kfree(msm_rng_dev); return 0; } static struct of_device_id qrng_match[] = { { .compatible = "qcom,msm-rng", }, {} }; static struct platform_driver rng_driver = { .probe = msm_rng_probe, .remove = __devexit_p(msm_rng_remove), .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = qrng_match, } }; static int __init msm_rng_init(void) { return platform_driver_register(&rng_driver); } module_init(msm_rng_init); static void __exit msm_rng_exit(void) { platform_driver_unregister(&rng_driver); } module_exit(msm_rng_exit); MODULE_AUTHOR("The Linux Foundation"); MODULE_DESCRIPTION("Qualcomm MSM Random Number Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
javelinanddart/android_kernel_3.10_ville
sound/drivers/vx/vx_core.c
2145
20684
/* * Driver for Digigram VX soundcards * * Hardware core part * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/delay.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/asoundef.h> #include <sound/info.h> #include <asm/io.h> #include <sound/vx_core.h> #include "vx_cmd.h" MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("Common routines for Digigram VX drivers"); MODULE_LICENSE("GPL"); /* * vx_check_reg_bit - wait for the specified bit is set/reset on a register * @reg: register to check * @mask: bit mask * @bit: resultant bit to be checked * @time: time-out of loop in msec * * returns zero if a bit matches, or a negative error code. */ int snd_vx_check_reg_bit(struct vx_core *chip, int reg, int mask, int bit, int time) { unsigned long end_time = jiffies + (time * HZ + 999) / 1000; static char *reg_names[VX_REG_MAX] = { "ICR", "CVR", "ISR", "IVR", "RXH", "RXM", "RXL", "DMA", "CDSP", "RFREQ", "RUER/V2", "DATA", "MEMIRQ", "ACQ", "BIT0", "BIT1", "MIC0", "MIC1", "MIC2", "MIC3", "INTCSR", "CNTRL", "GPIOC", "LOFREQ", "HIFREQ", "CSUER", "RUER" }; do { if ((snd_vx_inb(chip, reg) & mask) == bit) return 0; //msleep(10); } while (time_after_eq(end_time, jiffies)); snd_printd(KERN_DEBUG "vx_check_reg_bit: timeout, reg=%s, mask=0x%x, val=0x%x\n", reg_names[reg], mask, snd_vx_inb(chip, reg)); return -EIO; } EXPORT_SYMBOL(snd_vx_check_reg_bit); /* * vx_send_irq_dsp - set command irq bit * @num: the requested IRQ type, IRQ_XXX * * this triggers the specified IRQ request * returns 0 if successful, or a negative error code. * */ static int vx_send_irq_dsp(struct vx_core *chip, int num) { int nirq; /* wait for Hc = 0 */ if (snd_vx_check_reg_bit(chip, VX_CVR, CVR_HC, 0, 200) < 0) return -EIO; nirq = num; if (vx_has_new_dsp(chip)) nirq += VXP_IRQ_OFFSET; vx_outb(chip, CVR, (nirq >> 1) | CVR_HC); return 0; } /* * vx_reset_chk - reset CHK bit on ISR * * returns 0 if successful, or a negative error code. */ static int vx_reset_chk(struct vx_core *chip) { /* Reset irq CHK */ if (vx_send_irq_dsp(chip, IRQ_RESET_CHK) < 0) return -EIO; /* Wait until CHK = 0 */ if (vx_check_isr(chip, ISR_CHK, 0, 200) < 0) return -EIO; return 0; } /* * vx_transfer_end - terminate message transfer * @cmd: IRQ message to send (IRQ_MESS_XXX_END) * * returns 0 if successful, or a negative error code. * the error code can be VX-specific, retrieved via vx_get_error(). * NB: call with spinlock held! */ static int vx_transfer_end(struct vx_core *chip, int cmd) { int err; if ((err = vx_reset_chk(chip)) < 0) return err; /* irq MESS_READ/WRITE_END */ if ((err = vx_send_irq_dsp(chip, cmd)) < 0) return err; /* Wait CHK = 1 */ if ((err = vx_wait_isr_bit(chip, ISR_CHK)) < 0) return err; /* If error, Read RX */ if ((err = vx_inb(chip, ISR)) & ISR_ERR) { if ((err = vx_wait_for_rx_full(chip)) < 0) { snd_printd(KERN_DEBUG "transfer_end: error in rx_full\n"); return err; } err = vx_inb(chip, RXH) << 16; err |= vx_inb(chip, RXM) << 8; err |= vx_inb(chip, RXL); snd_printd(KERN_DEBUG "transfer_end: error = 0x%x\n", err); return -(VX_ERR_MASK | err); } return 0; } /* * vx_read_status - return the status rmh * @rmh: rmh record to store the status * * returns 0 if successful, or a negative error code. * the error code can be VX-specific, retrieved via vx_get_error(). * NB: call with spinlock held! */ static int vx_read_status(struct vx_core *chip, struct vx_rmh *rmh) { int i, err, val, size; /* no read necessary? */ if (rmh->DspStat == RMH_SSIZE_FIXED && rmh->LgStat == 0) return 0; /* Wait for RX full (with timeout protection) * The first word of status is in RX */ err = vx_wait_for_rx_full(chip); if (err < 0) return err; /* Read RX */ val = vx_inb(chip, RXH) << 16; val |= vx_inb(chip, RXM) << 8; val |= vx_inb(chip, RXL); /* If status given by DSP, let's decode its size */ switch (rmh->DspStat) { case RMH_SSIZE_ARG: size = val & 0xff; rmh->Stat[0] = val & 0xffff00; rmh->LgStat = size + 1; break; case RMH_SSIZE_MASK: /* Let's count the arg numbers from a mask */ rmh->Stat[0] = val; size = 0; while (val) { if (val & 0x01) size++; val >>= 1; } rmh->LgStat = size + 1; break; default: /* else retrieve the status length given by the driver */ size = rmh->LgStat; rmh->Stat[0] = val; /* Val is the status 1st word */ size--; /* hence adjust remaining length */ break; } if (size < 1) return 0; if (snd_BUG_ON(size > SIZE_MAX_STATUS)) return -EINVAL; for (i = 1; i <= size; i++) { /* trigger an irq MESS_WRITE_NEXT */ err = vx_send_irq_dsp(chip, IRQ_MESS_WRITE_NEXT); if (err < 0) return err; /* Wait for RX full (with timeout protection) */ err = vx_wait_for_rx_full(chip); if (err < 0) return err; rmh->Stat[i] = vx_inb(chip, RXH) << 16; rmh->Stat[i] |= vx_inb(chip, RXM) << 8; rmh->Stat[i] |= vx_inb(chip, RXL); } return vx_transfer_end(chip, IRQ_MESS_WRITE_END); } #define MASK_MORE_THAN_1_WORD_COMMAND 0x00008000 #define MASK_1_WORD_COMMAND 0x00ff7fff /* * vx_send_msg_nolock - send a DSP message and read back the status * @rmh: the rmh record to send and receive * * returns 0 if successful, or a negative error code. * the error code can be VX-specific, retrieved via vx_get_error(). * * this function doesn't call spinlock at all. */ int vx_send_msg_nolock(struct vx_core *chip, struct vx_rmh *rmh) { int i, err; if (chip->chip_status & VX_STAT_IS_STALE) return -EBUSY; if ((err = vx_reset_chk(chip)) < 0) { snd_printd(KERN_DEBUG "vx_send_msg: vx_reset_chk error\n"); return err; } #if 0 printk(KERN_DEBUG "rmh: cmd = 0x%06x, length = %d, stype = %d\n", rmh->Cmd[0], rmh->LgCmd, rmh->DspStat); if (rmh->LgCmd > 1) { printk(KERN_DEBUG " "); for (i = 1; i < rmh->LgCmd; i++) printk("0x%06x ", rmh->Cmd[i]); printk("\n"); } #endif /* Check bit M is set according to length of the command */ if (rmh->LgCmd > 1) rmh->Cmd[0] |= MASK_MORE_THAN_1_WORD_COMMAND; else rmh->Cmd[0] &= MASK_1_WORD_COMMAND; /* Wait for TX empty */ if ((err = vx_wait_isr_bit(chip, ISR_TX_EMPTY)) < 0) { snd_printd(KERN_DEBUG "vx_send_msg: wait tx empty error\n"); return err; } /* Write Cmd[0] */ vx_outb(chip, TXH, (rmh->Cmd[0] >> 16) & 0xff); vx_outb(chip, TXM, (rmh->Cmd[0] >> 8) & 0xff); vx_outb(chip, TXL, rmh->Cmd[0] & 0xff); /* Trigger irq MESSAGE */ if ((err = vx_send_irq_dsp(chip, IRQ_MESSAGE)) < 0) { snd_printd(KERN_DEBUG "vx_send_msg: send IRQ_MESSAGE error\n"); return err; } /* Wait for CHK = 1 */ if ((err = vx_wait_isr_bit(chip, ISR_CHK)) < 0) return err; /* If error, get error value from RX */ if (vx_inb(chip, ISR) & ISR_ERR) { if ((err = vx_wait_for_rx_full(chip)) < 0) { snd_printd(KERN_DEBUG "vx_send_msg: rx_full read error\n"); return err; } err = vx_inb(chip, RXH) << 16; err |= vx_inb(chip, RXM) << 8; err |= vx_inb(chip, RXL); snd_printd(KERN_DEBUG "msg got error = 0x%x at cmd[0]\n", err); err = -(VX_ERR_MASK | err); return err; } /* Send the other words */ if (rmh->LgCmd > 1) { for (i = 1; i < rmh->LgCmd; i++) { /* Wait for TX ready */ if ((err = vx_wait_isr_bit(chip, ISR_TX_READY)) < 0) { snd_printd(KERN_DEBUG "vx_send_msg: tx_ready error\n"); return err; } /* Write Cmd[i] */ vx_outb(chip, TXH, (rmh->Cmd[i] >> 16) & 0xff); vx_outb(chip, TXM, (rmh->Cmd[i] >> 8) & 0xff); vx_outb(chip, TXL, rmh->Cmd[i] & 0xff); /* Trigger irq MESS_READ_NEXT */ if ((err = vx_send_irq_dsp(chip, IRQ_MESS_READ_NEXT)) < 0) { snd_printd(KERN_DEBUG "vx_send_msg: IRQ_READ_NEXT error\n"); return err; } } /* Wait for TX empty */ if ((err = vx_wait_isr_bit(chip, ISR_TX_READY)) < 0) { snd_printd(KERN_DEBUG "vx_send_msg: TX_READY error\n"); return err; } /* End of transfer */ err = vx_transfer_end(chip, IRQ_MESS_READ_END); if (err < 0) return err; } return vx_read_status(chip, rmh); } /* * vx_send_msg - send a DSP message with spinlock * @rmh: the rmh record to send and receive * * returns 0 if successful, or a negative error code. * see vx_send_msg_nolock(). */ int vx_send_msg(struct vx_core *chip, struct vx_rmh *rmh) { unsigned long flags; int err; spin_lock_irqsave(&chip->lock, flags); err = vx_send_msg_nolock(chip, rmh); spin_unlock_irqrestore(&chip->lock, flags); return err; } /* * vx_send_rih_nolock - send an RIH to xilinx * @cmd: the command to send * * returns 0 if successful, or a negative error code. * the error code can be VX-specific, retrieved via vx_get_error(). * * this function doesn't call spinlock at all. * * unlike RMH, no command is sent to DSP. */ int vx_send_rih_nolock(struct vx_core *chip, int cmd) { int err; if (chip->chip_status & VX_STAT_IS_STALE) return -EBUSY; #if 0 printk(KERN_DEBUG "send_rih: cmd = 0x%x\n", cmd); #endif if ((err = vx_reset_chk(chip)) < 0) return err; /* send the IRQ */ if ((err = vx_send_irq_dsp(chip, cmd)) < 0) return err; /* Wait CHK = 1 */ if ((err = vx_wait_isr_bit(chip, ISR_CHK)) < 0) return err; /* If error, read RX */ if (vx_inb(chip, ISR) & ISR_ERR) { if ((err = vx_wait_for_rx_full(chip)) < 0) return err; err = vx_inb(chip, RXH) << 16; err |= vx_inb(chip, RXM) << 8; err |= vx_inb(chip, RXL); return -(VX_ERR_MASK | err); } return 0; } /* * vx_send_rih - send an RIH with spinlock * @cmd: the command to send * * see vx_send_rih_nolock(). */ int vx_send_rih(struct vx_core *chip, int cmd) { unsigned long flags; int err; spin_lock_irqsave(&chip->lock, flags); err = vx_send_rih_nolock(chip, cmd); spin_unlock_irqrestore(&chip->lock, flags); return err; } #define END_OF_RESET_WAIT_TIME 500 /* us */ /** * snd_vx_boot_xilinx - boot up the xilinx interface * @boot: the boot record to load */ int snd_vx_load_boot_image(struct vx_core *chip, const struct firmware *boot) { unsigned int i; int no_fillup = vx_has_new_dsp(chip); /* check the length of boot image */ if (boot->size <= 0) return -EINVAL; if (boot->size % 3) return -EINVAL; #if 0 { /* more strict check */ unsigned int c = ((u32)boot->data[0] << 16) | ((u32)boot->data[1] << 8) | boot->data[2]; if (boot->size != (c + 2) * 3) return -EINVAL; } #endif /* reset dsp */ vx_reset_dsp(chip); udelay(END_OF_RESET_WAIT_TIME); /* another wait? */ /* download boot strap */ for (i = 0; i < 0x600; i += 3) { if (i >= boot->size) { if (no_fillup) break; if (vx_wait_isr_bit(chip, ISR_TX_EMPTY) < 0) { snd_printk(KERN_ERR "dsp boot failed at %d\n", i); return -EIO; } vx_outb(chip, TXH, 0); vx_outb(chip, TXM, 0); vx_outb(chip, TXL, 0); } else { const unsigned char *image = boot->data + i; if (vx_wait_isr_bit(chip, ISR_TX_EMPTY) < 0) { snd_printk(KERN_ERR "dsp boot failed at %d\n", i); return -EIO; } vx_outb(chip, TXH, image[0]); vx_outb(chip, TXM, image[1]); vx_outb(chip, TXL, image[2]); } } return 0; } EXPORT_SYMBOL(snd_vx_load_boot_image); /* * vx_test_irq_src - query the source of interrupts * * called from irq handler only */ static int vx_test_irq_src(struct vx_core *chip, unsigned int *ret) { int err; vx_init_rmh(&chip->irq_rmh, CMD_TEST_IT); spin_lock(&chip->lock); err = vx_send_msg_nolock(chip, &chip->irq_rmh); if (err < 0) *ret = 0; else *ret = chip->irq_rmh.Stat[0]; spin_unlock(&chip->lock); return err; } /* * vx_interrupt - soft irq handler */ static void vx_interrupt(unsigned long private_data) { struct vx_core *chip = (struct vx_core *) private_data; unsigned int events; if (chip->chip_status & VX_STAT_IS_STALE) return; if (vx_test_irq_src(chip, &events) < 0) return; #if 0 if (events & 0x000800) printk(KERN_ERR "DSP Stream underrun ! IRQ events = 0x%x\n", events); #endif // printk(KERN_DEBUG "IRQ events = 0x%x\n", events); /* We must prevent any application using this DSP * and block any further request until the application * either unregisters or reloads the DSP */ if (events & FATAL_DSP_ERROR) { snd_printk(KERN_ERR "vx_core: fatal DSP error!!\n"); return; } /* The start on time code conditions are filled (ie the time code * received by the board is equal to one of those given to it). */ if (events & TIME_CODE_EVENT_PENDING) ; /* so far, nothing to do yet */ /* The frequency has changed on the board (UER mode). */ if (events & FREQUENCY_CHANGE_EVENT_PENDING) vx_change_frequency(chip); /* update the pcm streams */ vx_pcm_update_intr(chip, events); } /** * snd_vx_irq_handler - interrupt handler */ irqreturn_t snd_vx_irq_handler(int irq, void *dev) { struct vx_core *chip = dev; if (! (chip->chip_status & VX_STAT_CHIP_INIT) || (chip->chip_status & VX_STAT_IS_STALE)) return IRQ_NONE; if (! vx_test_and_ack(chip)) tasklet_schedule(&chip->tq); return IRQ_HANDLED; } EXPORT_SYMBOL(snd_vx_irq_handler); /* */ static void vx_reset_board(struct vx_core *chip, int cold_reset) { if (snd_BUG_ON(!chip->ops->reset_board)) return; /* current source, later sync'ed with target */ chip->audio_source = VX_AUDIO_SRC_LINE; if (cold_reset) { chip->audio_source_target = chip->audio_source; chip->clock_source = INTERNAL_QUARTZ; chip->clock_mode = VX_CLOCK_MODE_AUTO; chip->freq = 48000; chip->uer_detected = VX_UER_MODE_NOT_PRESENT; chip->uer_bits = SNDRV_PCM_DEFAULT_CON_SPDIF; } chip->ops->reset_board(chip, cold_reset); vx_reset_codec(chip, cold_reset); vx_set_internal_clock(chip, chip->freq); /* Reset the DSP */ vx_reset_dsp(chip); if (vx_is_pcmcia(chip)) { /* Acknowledge any pending IRQ and reset the MEMIRQ flag. */ vx_test_and_ack(chip); vx_validate_irq(chip, 1); } /* init CBits */ vx_set_iec958_status(chip, chip->uer_bits); } /* * proc interface */ static void vx_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct vx_core *chip = entry->private_data; static char *audio_src_vxp[] = { "Line", "Mic", "Digital" }; static char *audio_src_vx2[] = { "Analog", "Analog", "Digital" }; static char *clock_mode[] = { "Auto", "Internal", "External" }; static char *clock_src[] = { "Internal", "External" }; static char *uer_type[] = { "Consumer", "Professional", "Not Present" }; snd_iprintf(buffer, "%s\n", chip->card->longname); snd_iprintf(buffer, "Xilinx Firmware: %s\n", chip->chip_status & VX_STAT_XILINX_LOADED ? "Loaded" : "No"); snd_iprintf(buffer, "Device Initialized: %s\n", chip->chip_status & VX_STAT_DEVICE_INIT ? "Yes" : "No"); snd_iprintf(buffer, "DSP audio info:"); if (chip->audio_info & VX_AUDIO_INFO_REAL_TIME) snd_iprintf(buffer, " realtime"); if (chip->audio_info & VX_AUDIO_INFO_OFFLINE) snd_iprintf(buffer, " offline"); if (chip->audio_info & VX_AUDIO_INFO_MPEG1) snd_iprintf(buffer, " mpeg1"); if (chip->audio_info & VX_AUDIO_INFO_MPEG2) snd_iprintf(buffer, " mpeg2"); if (chip->audio_info & VX_AUDIO_INFO_LINEAR_8) snd_iprintf(buffer, " linear8"); if (chip->audio_info & VX_AUDIO_INFO_LINEAR_16) snd_iprintf(buffer, " linear16"); if (chip->audio_info & VX_AUDIO_INFO_LINEAR_24) snd_iprintf(buffer, " linear24"); snd_iprintf(buffer, "\n"); snd_iprintf(buffer, "Input Source: %s\n", vx_is_pcmcia(chip) ? audio_src_vxp[chip->audio_source] : audio_src_vx2[chip->audio_source]); snd_iprintf(buffer, "Clock Mode: %s\n", clock_mode[chip->clock_mode]); snd_iprintf(buffer, "Clock Source: %s\n", clock_src[chip->clock_source]); snd_iprintf(buffer, "Frequency: %d\n", chip->freq); snd_iprintf(buffer, "Detected Frequency: %d\n", chip->freq_detected); snd_iprintf(buffer, "Detected UER type: %s\n", uer_type[chip->uer_detected]); snd_iprintf(buffer, "Min/Max/Cur IBL: %d/%d/%d (granularity=%d)\n", chip->ibl.min_size, chip->ibl.max_size, chip->ibl.size, chip->ibl.granularity); } static void vx_proc_init(struct vx_core *chip) { struct snd_info_entry *entry; if (! snd_card_proc_new(chip->card, "vx-status", &entry)) snd_info_set_text_ops(entry, chip, vx_proc_read); } /** * snd_vx_dsp_boot - load the DSP boot */ int snd_vx_dsp_boot(struct vx_core *chip, const struct firmware *boot) { int err; int cold_reset = !(chip->chip_status & VX_STAT_DEVICE_INIT); vx_reset_board(chip, cold_reset); vx_validate_irq(chip, 0); if ((err = snd_vx_load_boot_image(chip, boot)) < 0) return err; msleep(10); return 0; } EXPORT_SYMBOL(snd_vx_dsp_boot); /** * snd_vx_dsp_load - load the DSP image */ int snd_vx_dsp_load(struct vx_core *chip, const struct firmware *dsp) { unsigned int i; int err; unsigned int csum = 0; const unsigned char *image, *cptr; if (dsp->size % 3) return -EINVAL; vx_toggle_dac_mute(chip, 1); /* Transfert data buffer from PC to DSP */ for (i = 0; i < dsp->size; i += 3) { image = dsp->data + i; /* Wait DSP ready for a new read */ if ((err = vx_wait_isr_bit(chip, ISR_TX_EMPTY)) < 0) { printk(KERN_ERR "dsp loading error at position %d\n", i); return err; } cptr = image; csum ^= *cptr; csum = (csum >> 24) | (csum << 8); vx_outb(chip, TXH, *cptr++); csum ^= *cptr; csum = (csum >> 24) | (csum << 8); vx_outb(chip, TXM, *cptr++); csum ^= *cptr; csum = (csum >> 24) | (csum << 8); vx_outb(chip, TXL, *cptr++); } snd_printdd(KERN_DEBUG "checksum = 0x%08x\n", csum); msleep(200); if ((err = vx_wait_isr_bit(chip, ISR_CHK)) < 0) return err; vx_toggle_dac_mute(chip, 0); vx_test_and_ack(chip); vx_validate_irq(chip, 1); return 0; } EXPORT_SYMBOL(snd_vx_dsp_load); #ifdef CONFIG_PM /* * suspend */ int snd_vx_suspend(struct vx_core *chip) { unsigned int i; snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); chip->chip_status |= VX_STAT_IN_SUSPEND; for (i = 0; i < chip->hw->num_codecs; i++) snd_pcm_suspend_all(chip->pcm[i]); return 0; } EXPORT_SYMBOL(snd_vx_suspend); /* * resume */ int snd_vx_resume(struct vx_core *chip) { int i, err; chip->chip_status &= ~VX_STAT_CHIP_INIT; for (i = 0; i < 4; i++) { if (! chip->firmware[i]) continue; err = chip->ops->load_dsp(chip, i, chip->firmware[i]); if (err < 0) { snd_printk(KERN_ERR "vx: firmware resume error at DSP %d\n", i); return -EIO; } } chip->chip_status |= VX_STAT_CHIP_INIT; chip->chip_status &= ~VX_STAT_IN_SUSPEND; snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0); return 0; } EXPORT_SYMBOL(snd_vx_resume); #endif /** * snd_vx_create - constructor for struct vx_core * @hw: hardware specific record * * this function allocates the instance and prepare for the hardware * initialization. * * return the instance pointer if successful, NULL in error. */ struct vx_core *snd_vx_create(struct snd_card *card, struct snd_vx_hardware *hw, struct snd_vx_ops *ops, int extra_size) { struct vx_core *chip; if (snd_BUG_ON(!card || !hw || !ops)) return NULL; chip = kzalloc(sizeof(*chip) + extra_size, GFP_KERNEL); if (! chip) { snd_printk(KERN_ERR "vx_core: no memory\n"); return NULL; } spin_lock_init(&chip->lock); spin_lock_init(&chip->irq_lock); chip->irq = -1; chip->hw = hw; chip->type = hw->type; chip->ops = ops; tasklet_init(&chip->tq, vx_interrupt, (unsigned long)chip); mutex_init(&chip->mixer_mutex); chip->card = card; card->private_data = chip; strcpy(card->driver, hw->name); sprintf(card->shortname, "Digigram %s", hw->name); vx_proc_init(chip); return chip; } EXPORT_SYMBOL(snd_vx_create); /* * module entries */ static int __init alsa_vx_core_init(void) { return 0; } static void __exit alsa_vx_core_exit(void) { } module_init(alsa_vx_core_init) module_exit(alsa_vx_core_exit)
gpl-2.0
pranav01/Xeon_sprout
drivers/net/wireless/iwlegacy/3945-rs.c
2145
25552
/****************************************************************************** * * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/kernel.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/mac80211.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/workqueue.h> #include "commands.h" #include "3945.h" #define RS_NAME "iwl-3945-rs" static s32 il3945_expected_tpt_g[RATE_COUNT_3945] = { 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202 }; static s32 il3945_expected_tpt_g_prot[RATE_COUNT_3945] = { 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125 }; static s32 il3945_expected_tpt_a[RATE_COUNT_3945] = { 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186 }; static s32 il3945_expected_tpt_b[RATE_COUNT_3945] = { 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0 }; struct il3945_tpt_entry { s8 min_rssi; u8 idx; }; static struct il3945_tpt_entry il3945_tpt_table_a[] = { {-60, RATE_54M_IDX}, {-64, RATE_48M_IDX}, {-72, RATE_36M_IDX}, {-80, RATE_24M_IDX}, {-84, RATE_18M_IDX}, {-85, RATE_12M_IDX}, {-87, RATE_9M_IDX}, {-89, RATE_6M_IDX} }; static struct il3945_tpt_entry il3945_tpt_table_g[] = { {-60, RATE_54M_IDX}, {-64, RATE_48M_IDX}, {-68, RATE_36M_IDX}, {-80, RATE_24M_IDX}, {-84, RATE_18M_IDX}, {-85, RATE_12M_IDX}, {-86, RATE_11M_IDX}, {-88, RATE_5M_IDX}, {-90, RATE_2M_IDX}, {-92, RATE_1M_IDX} }; #define RATE_MAX_WINDOW 62 #define RATE_FLUSH (3*HZ) #define RATE_WIN_FLUSH (HZ/2) #define IL39_RATE_HIGH_TH 11520 #define IL_SUCCESS_UP_TH 8960 #define IL_SUCCESS_DOWN_TH 10880 #define RATE_MIN_FAILURE_TH 6 #define RATE_MIN_SUCCESS_TH 8 #define RATE_DECREASE_TH 1920 #define RATE_RETRY_TH 15 static u8 il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band) { u32 idx = 0; u32 table_size = 0; struct il3945_tpt_entry *tpt_table = NULL; if (rssi < IL_MIN_RSSI_VAL || rssi > IL_MAX_RSSI_VAL) rssi = IL_MIN_RSSI_VAL; switch (band) { case IEEE80211_BAND_2GHZ: tpt_table = il3945_tpt_table_g; table_size = ARRAY_SIZE(il3945_tpt_table_g); break; case IEEE80211_BAND_5GHZ: tpt_table = il3945_tpt_table_a; table_size = ARRAY_SIZE(il3945_tpt_table_a); break; default: BUG(); break; } while (idx < table_size && rssi < tpt_table[idx].min_rssi) idx++; idx = min(idx, table_size - 1); return tpt_table[idx].idx; } static void il3945_clear_win(struct il3945_rate_scale_data *win) { win->data = 0; win->success_counter = 0; win->success_ratio = -1; win->counter = 0; win->average_tpt = IL_INVALID_VALUE; win->stamp = 0; } /** * il3945_rate_scale_flush_wins - flush out the rate scale wins * * Returns the number of wins that have gathered data but were * not flushed. If there were any that were not flushed, then * reschedule the rate flushing routine. */ static int il3945_rate_scale_flush_wins(struct il3945_rs_sta *rs_sta) { int unflushed = 0; int i; unsigned long flags; struct il_priv *il __maybe_unused = rs_sta->il; /* * For each rate, if we have collected data on that rate * and it has been more than RATE_WIN_FLUSH * since we flushed, clear out the gathered stats */ for (i = 0; i < RATE_COUNT_3945; i++) { if (!rs_sta->win[i].counter) continue; spin_lock_irqsave(&rs_sta->lock, flags); if (time_after(jiffies, rs_sta->win[i].stamp + RATE_WIN_FLUSH)) { D_RATE("flushing %d samples of rate " "idx %d\n", rs_sta->win[i].counter, i); il3945_clear_win(&rs_sta->win[i]); } else unflushed++; spin_unlock_irqrestore(&rs_sta->lock, flags); } return unflushed; } #define RATE_FLUSH_MAX 5000 /* msec */ #define RATE_FLUSH_MIN 50 /* msec */ #define IL_AVERAGE_PACKETS 1500 static void il3945_bg_rate_scale_flush(unsigned long data) { struct il3945_rs_sta *rs_sta = (void *)data; struct il_priv *il __maybe_unused = rs_sta->il; int unflushed = 0; unsigned long flags; u32 packet_count, duration, pps; D_RATE("enter\n"); unflushed = il3945_rate_scale_flush_wins(rs_sta); spin_lock_irqsave(&rs_sta->lock, flags); /* Number of packets Rx'd since last time this timer ran */ packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1; rs_sta->last_tx_packets = rs_sta->tx_packets + 1; if (unflushed) { duration = jiffies_to_msecs(jiffies - rs_sta->last_partial_flush); D_RATE("Tx'd %d packets in %dms\n", packet_count, duration); /* Determine packets per second */ if (duration) pps = (packet_count * 1000) / duration; else pps = 0; if (pps) { duration = (IL_AVERAGE_PACKETS * 1000) / pps; if (duration < RATE_FLUSH_MIN) duration = RATE_FLUSH_MIN; else if (duration > RATE_FLUSH_MAX) duration = RATE_FLUSH_MAX; } else duration = RATE_FLUSH_MAX; rs_sta->flush_time = msecs_to_jiffies(duration); D_RATE("new flush period: %d msec ave %d\n", duration, packet_count); mod_timer(&rs_sta->rate_scale_flush, jiffies + rs_sta->flush_time); rs_sta->last_partial_flush = jiffies; } else { rs_sta->flush_time = RATE_FLUSH; rs_sta->flush_pending = 0; } /* If there weren't any unflushed entries, we don't schedule the timer * to run again */ rs_sta->last_flush = jiffies; spin_unlock_irqrestore(&rs_sta->lock, flags); D_RATE("leave\n"); } /** * il3945_collect_tx_data - Update the success/failure sliding win * * We keep a sliding win of the last 64 packets transmitted * at this rate. win->data contains the bitmask of successful * packets. */ static void il3945_collect_tx_data(struct il3945_rs_sta *rs_sta, struct il3945_rate_scale_data *win, int success, int retries, int idx) { unsigned long flags; s32 fail_count; struct il_priv *il __maybe_unused = rs_sta->il; if (!retries) { D_RATE("leave: retries == 0 -- should be at least 1\n"); return; } spin_lock_irqsave(&rs_sta->lock, flags); /* * Keep track of only the latest 62 tx frame attempts in this rate's * history win; anything older isn't really relevant any more. * If we have filled up the sliding win, drop the oldest attempt; * if the oldest attempt (highest bit in bitmap) shows "success", * subtract "1" from the success counter (this is the main reason * we keep these bitmaps!). * */ while (retries > 0) { if (win->counter >= RATE_MAX_WINDOW) { /* remove earliest */ win->counter = RATE_MAX_WINDOW - 1; if (win->data & (1ULL << (RATE_MAX_WINDOW - 1))) { win->data &= ~(1ULL << (RATE_MAX_WINDOW - 1)); win->success_counter--; } } /* Increment frames-attempted counter */ win->counter++; /* Shift bitmap by one frame (throw away oldest history), * OR in "1", and increment "success" if this * frame was successful. */ win->data <<= 1; if (success > 0) { win->success_counter++; win->data |= 0x1; success--; } retries--; } /* Calculate current success ratio, avoid divide-by-0! */ if (win->counter > 0) win->success_ratio = 128 * (100 * win->success_counter) / win->counter; else win->success_ratio = IL_INVALID_VALUE; fail_count = win->counter - win->success_counter; /* Calculate average throughput, if we have enough history. */ if (fail_count >= RATE_MIN_FAILURE_TH || win->success_counter >= RATE_MIN_SUCCESS_TH) win->average_tpt = ((win->success_ratio * rs_sta->expected_tpt[idx] + 64) / 128); else win->average_tpt = IL_INVALID_VALUE; /* Tag this win as having been updated */ win->stamp = jiffies; spin_unlock_irqrestore(&rs_sta->lock, flags); } /* * Called after adding a new station to initialize rate scaling */ void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id) { struct ieee80211_hw *hw = il->hw; struct ieee80211_conf *conf = &il->hw->conf; struct il3945_sta_priv *psta; struct il3945_rs_sta *rs_sta; struct ieee80211_supported_band *sband; int i; D_INFO("enter\n"); if (sta_id == il->hw_params.bcast_id) goto out; psta = (struct il3945_sta_priv *)sta->drv_priv; rs_sta = &psta->rs_sta; sband = hw->wiphy->bands[conf->chandef.chan->band]; rs_sta->il = il; rs_sta->start_rate = RATE_INVALID; /* default to just 802.11b */ rs_sta->expected_tpt = il3945_expected_tpt_b; rs_sta->last_partial_flush = jiffies; rs_sta->last_flush = jiffies; rs_sta->flush_time = RATE_FLUSH; rs_sta->last_tx_packets = 0; rs_sta->rate_scale_flush.data = (unsigned long)rs_sta; rs_sta->rate_scale_flush.function = il3945_bg_rate_scale_flush; for (i = 0; i < RATE_COUNT_3945; i++) il3945_clear_win(&rs_sta->win[i]); /* TODO: what is a good starting rate for STA? About middle? Maybe not * the lowest or the highest rate.. Could consider using RSSI from * previous packets? Need to have IEEE 802.1X auth succeed immediately * after assoc.. */ for (i = sband->n_bitrates - 1; i >= 0; i--) { if (sta->supp_rates[sband->band] & (1 << i)) { rs_sta->last_txrate_idx = i; break; } } il->_3945.sta_supp_rates = sta->supp_rates[sband->band]; /* For 5 GHz band it start at IL_FIRST_OFDM_RATE */ if (sband->band == IEEE80211_BAND_5GHZ) { rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE; il->_3945.sta_supp_rates <<= IL_FIRST_OFDM_RATE; } out: il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS; D_INFO("leave\n"); } static void * il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) { return hw->priv; } /* rate scale requires free function to be implemented */ static void il3945_rs_free(void *il) { } static void * il3945_rs_alloc_sta(void *il_priv, struct ieee80211_sta *sta, gfp_t gfp) { struct il3945_rs_sta *rs_sta; struct il3945_sta_priv *psta = (void *)sta->drv_priv; struct il_priv *il __maybe_unused = il_priv; D_RATE("enter\n"); rs_sta = &psta->rs_sta; spin_lock_init(&rs_sta->lock); init_timer(&rs_sta->rate_scale_flush); D_RATE("leave\n"); return rs_sta; } static void il3945_rs_free_sta(void *il_priv, struct ieee80211_sta *sta, void *il_sta) { struct il3945_rs_sta *rs_sta = il_sta; /* * Be careful not to use any members of il3945_rs_sta (like trying * to use il_priv to print out debugging) since it may not be fully * initialized at this point. */ del_timer_sync(&rs_sta->rate_scale_flush); } /** * il3945_rs_tx_status - Update rate control values based on Tx results * * NOTE: Uses il_priv->retry_rate for the # of retries attempted by * the hardware for each rate. */ static void il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *il_sta, struct sk_buff *skb) { s8 retries = 0, current_count; int scale_rate_idx, first_idx, last_idx; unsigned long flags; struct il_priv *il = (struct il_priv *)il_rate; struct il3945_rs_sta *rs_sta = il_sta; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); D_RATE("enter\n"); retries = info->status.rates[0].count; /* Sanity Check for retries */ if (retries > RATE_RETRY_TH) retries = RATE_RETRY_TH; first_idx = sband->bitrates[info->status.rates[0].idx].hw_value; if (first_idx < 0 || first_idx >= RATE_COUNT_3945) { D_RATE("leave: Rate out of bounds: %d\n", first_idx); return; } if (!il_sta) { D_RATE("leave: No STA il data to update!\n"); return; } /* Treat uninitialized rate scaling data same as non-existing. */ if (!rs_sta->il) { D_RATE("leave: STA il data uninitialized!\n"); return; } rs_sta->tx_packets++; scale_rate_idx = first_idx; last_idx = first_idx; /* * Update the win for each rate. We determine which rates * were Tx'd based on the total number of retries vs. the number * of retries configured for each rate -- currently set to the * il value 'retry_rate' vs. rate specific * * On exit from this while loop last_idx indicates the rate * at which the frame was finally transmitted (or failed if no * ACK) */ while (retries > 1) { if ((retries - 1) < il->retry_rate) { current_count = (retries - 1); last_idx = scale_rate_idx; } else { current_count = il->retry_rate; last_idx = il3945_rs_next_rate(il, scale_rate_idx); } /* Update this rate accounting for as many retries * as was used for it (per current_count) */ il3945_collect_tx_data(rs_sta, &rs_sta->win[scale_rate_idx], 0, current_count, scale_rate_idx); D_RATE("Update rate %d for %d retries.\n", scale_rate_idx, current_count); retries -= current_count; scale_rate_idx = last_idx; } /* Update the last idx win with success/failure based on ACK */ D_RATE("Update rate %d with %s.\n", last_idx, (info->flags & IEEE80211_TX_STAT_ACK) ? "success" : "failure"); il3945_collect_tx_data(rs_sta, &rs_sta->win[last_idx], info->flags & IEEE80211_TX_STAT_ACK, 1, last_idx); /* We updated the rate scale win -- if its been more than * flush_time since the last run, schedule the flush * again */ spin_lock_irqsave(&rs_sta->lock, flags); if (!rs_sta->flush_pending && time_after(jiffies, rs_sta->last_flush + rs_sta->flush_time)) { rs_sta->last_partial_flush = jiffies; rs_sta->flush_pending = 1; mod_timer(&rs_sta->rate_scale_flush, jiffies + rs_sta->flush_time); } spin_unlock_irqrestore(&rs_sta->lock, flags); D_RATE("leave\n"); } static u16 il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask, enum ieee80211_band band) { u8 high = RATE_INVALID; u8 low = RATE_INVALID; struct il_priv *il __maybe_unused = rs_sta->il; /* 802.11A walks to the next literal adjacent rate in * the rate table */ if (unlikely(band == IEEE80211_BAND_5GHZ)) { int i; u32 mask; /* Find the previous rate that is in the rate mask */ i = idx - 1; for (mask = (1 << i); i >= 0; i--, mask >>= 1) { if (rate_mask & mask) { low = i; break; } } /* Find the next rate that is in the rate mask */ i = idx + 1; for (mask = (1 << i); i < RATE_COUNT_3945; i++, mask <<= 1) { if (rate_mask & mask) { high = i; break; } } return (high << 8) | low; } low = idx; while (low != RATE_INVALID) { if (rs_sta->tgg) low = il3945_rates[low].prev_rs_tgg; else low = il3945_rates[low].prev_rs; if (low == RATE_INVALID) break; if (rate_mask & (1 << low)) break; D_RATE("Skipping masked lower rate: %d\n", low); } high = idx; while (high != RATE_INVALID) { if (rs_sta->tgg) high = il3945_rates[high].next_rs_tgg; else high = il3945_rates[high].next_rs; if (high == RATE_INVALID) break; if (rate_mask & (1 << high)) break; D_RATE("Skipping masked higher rate: %d\n", high); } return (high << 8) | low; } /** * il3945_rs_get_rate - find the rate for the requested packet * * Returns the ieee80211_rate structure allocated by the driver. * * The rate control algorithm has no internal mapping between hw_mode's * rate ordering and the rate ordering used by the rate control algorithm. * * The rate control algorithm uses a single table of rates that goes across * the entire A/B/G spectrum vs. being limited to just one particular * hw_mode. * * As such, we can't convert the idx obtained below into the hw_mode's * rate table and must reference the driver allocated rate table * */ static void il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta, struct ieee80211_tx_rate_control *txrc) { struct ieee80211_supported_band *sband = txrc->sband; struct sk_buff *skb = txrc->skb; u8 low = RATE_INVALID; u8 high = RATE_INVALID; u16 high_low; int idx; struct il3945_rs_sta *rs_sta = il_sta; struct il3945_rate_scale_data *win = NULL; int current_tpt = IL_INVALID_VALUE; int low_tpt = IL_INVALID_VALUE; int high_tpt = IL_INVALID_VALUE; u32 fail_count; s8 scale_action = 0; unsigned long flags; u16 rate_mask; s8 max_rate_idx = -1; struct il_priv *il __maybe_unused = (struct il_priv *)il_r; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); D_RATE("enter\n"); /* Treat uninitialized rate scaling data same as non-existing. */ if (rs_sta && !rs_sta->il) { D_RATE("Rate scaling information not initialized yet.\n"); il_sta = NULL; } if (rate_control_send_low(sta, il_sta, txrc)) return; rate_mask = sta->supp_rates[sband->band]; /* get user max rate if set */ max_rate_idx = txrc->max_rate_idx; if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1) max_rate_idx += IL_FIRST_OFDM_RATE; if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT) max_rate_idx = -1; idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1); if (sband->band == IEEE80211_BAND_5GHZ) rate_mask = rate_mask << IL_FIRST_OFDM_RATE; spin_lock_irqsave(&rs_sta->lock, flags); /* for recent assoc, choose best rate regarding * to rssi value */ if (rs_sta->start_rate != RATE_INVALID) { if (rs_sta->start_rate < idx && (rate_mask & (1 << rs_sta->start_rate))) idx = rs_sta->start_rate; rs_sta->start_rate = RATE_INVALID; } /* force user max rate if set by user */ if (max_rate_idx != -1 && max_rate_idx < idx) { if (rate_mask & (1 << max_rate_idx)) idx = max_rate_idx; } win = &(rs_sta->win[idx]); fail_count = win->counter - win->success_counter; if (fail_count < RATE_MIN_FAILURE_TH && win->success_counter < RATE_MIN_SUCCESS_TH) { spin_unlock_irqrestore(&rs_sta->lock, flags); D_RATE("Invalid average_tpt on rate %d: " "counter: %d, success_counter: %d, " "expected_tpt is %sNULL\n", idx, win->counter, win->success_counter, rs_sta->expected_tpt ? "not " : ""); /* Can't calculate this yet; not enough history */ win->average_tpt = IL_INVALID_VALUE; goto out; } current_tpt = win->average_tpt; high_low = il3945_get_adjacent_rate(rs_sta, idx, rate_mask, sband->band); low = high_low & 0xff; high = (high_low >> 8) & 0xff; /* If user set max rate, dont allow higher than user constrain */ if (max_rate_idx != -1 && max_rate_idx < high) high = RATE_INVALID; /* Collect Measured throughputs of adjacent rates */ if (low != RATE_INVALID) low_tpt = rs_sta->win[low].average_tpt; if (high != RATE_INVALID) high_tpt = rs_sta->win[high].average_tpt; spin_unlock_irqrestore(&rs_sta->lock, flags); scale_action = 0; /* Low success ratio , need to drop the rate */ if (win->success_ratio < RATE_DECREASE_TH || !current_tpt) { D_RATE("decrease rate because of low success_ratio\n"); scale_action = -1; /* No throughput measured yet for adjacent rates, * try increase */ } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) { if (high != RATE_INVALID && win->success_ratio >= RATE_INCREASE_TH) scale_action = 1; else if (low != RATE_INVALID) scale_action = 0; /* Both adjacent throughputs are measured, but neither one has * better throughput; we're using the best rate, don't change * it! */ } else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE && low_tpt < current_tpt && high_tpt < current_tpt) { D_RATE("No action -- low [%d] & high [%d] < " "current_tpt [%d]\n", low_tpt, high_tpt, current_tpt); scale_action = 0; /* At least one of the rates has better throughput */ } else { if (high_tpt != IL_INVALID_VALUE) { /* High rate has better throughput, Increase * rate */ if (high_tpt > current_tpt && win->success_ratio >= RATE_INCREASE_TH) scale_action = 1; else { D_RATE("decrease rate because of high tpt\n"); scale_action = 0; } } else if (low_tpt != IL_INVALID_VALUE) { if (low_tpt > current_tpt) { D_RATE("decrease rate because of low tpt\n"); scale_action = -1; } else if (win->success_ratio >= RATE_INCREASE_TH) { /* Lower rate has better * throughput,decrease rate */ scale_action = 1; } } } /* Sanity check; asked for decrease, but success rate or throughput * has been good at old rate. Don't change it. */ if (scale_action == -1 && low != RATE_INVALID && (win->success_ratio > RATE_HIGH_TH || current_tpt > 100 * rs_sta->expected_tpt[low])) scale_action = 0; switch (scale_action) { case -1: /* Decrese rate */ if (low != RATE_INVALID) idx = low; break; case 1: /* Increase rate */ if (high != RATE_INVALID) idx = high; break; case 0: default: /* No change */ break; } D_RATE("Selected %d (action %d) - low %d high %d\n", idx, scale_action, low, high); out: if (sband->band == IEEE80211_BAND_5GHZ) { if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE)) idx = IL_FIRST_OFDM_RATE; rs_sta->last_txrate_idx = idx; info->control.rates[0].idx = idx - IL_FIRST_OFDM_RATE; } else { rs_sta->last_txrate_idx = idx; info->control.rates[0].idx = rs_sta->last_txrate_idx; } info->control.rates[0].count = 1; D_RATE("leave: %d\n", idx); } #ifdef CONFIG_MAC80211_DEBUGFS static ssize_t il3945_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char *buff; int desc = 0; int j; ssize_t ret; struct il3945_rs_sta *lq_sta = file->private_data; buff = kmalloc(1024, GFP_KERNEL); if (!buff) return -ENOMEM; desc += sprintf(buff + desc, "tx packets=%d last rate idx=%d\n" "rate=0x%X flush time %d\n", lq_sta->tx_packets, lq_sta->last_txrate_idx, lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time)); for (j = 0; j < RATE_COUNT_3945; j++) { desc += sprintf(buff + desc, "counter=%d success=%d %%=%d\n", lq_sta->win[j].counter, lq_sta->win[j].success_counter, lq_sta->win[j].success_ratio); } ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); kfree(buff); return ret; } static const struct file_operations rs_sta_dbgfs_stats_table_ops = { .read = il3945_sta_dbgfs_stats_table_read, .open = simple_open, .llseek = default_llseek, }; static void il3945_add_debugfs(void *il, void *il_sta, struct dentry *dir) { struct il3945_rs_sta *lq_sta = il_sta; lq_sta->rs_sta_dbgfs_stats_table_file = debugfs_create_file("rate_stats_table", 0600, dir, lq_sta, &rs_sta_dbgfs_stats_table_ops); } static void il3945_remove_debugfs(void *il, void *il_sta) { struct il3945_rs_sta *lq_sta = il_sta; debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); } #endif /* * Initialization of rate scaling information is done by driver after * the station is added. Since mac80211 calls this function before a * station is added we ignore it. */ static void il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *il_sta) { } static struct rate_control_ops rs_ops = { .module = NULL, .name = RS_NAME, .tx_status = il3945_rs_tx_status, .get_rate = il3945_rs_get_rate, .rate_init = il3945_rs_rate_init_stub, .alloc = il3945_rs_alloc, .free = il3945_rs_free, .alloc_sta = il3945_rs_alloc_sta, .free_sta = il3945_rs_free_sta, #ifdef CONFIG_MAC80211_DEBUGFS .add_sta_debugfs = il3945_add_debugfs, .remove_sta_debugfs = il3945_remove_debugfs, #endif }; void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) { struct il_priv *il = hw->priv; s32 rssi = 0; unsigned long flags; struct il3945_rs_sta *rs_sta; struct ieee80211_sta *sta; struct il3945_sta_priv *psta; D_RATE("enter\n"); rcu_read_lock(); sta = ieee80211_find_sta(il->vif, il->stations[sta_id].sta.sta.addr); if (!sta) { D_RATE("Unable to find station to initialize rate scaling.\n"); rcu_read_unlock(); return; } psta = (void *)sta->drv_priv; rs_sta = &psta->rs_sta; spin_lock_irqsave(&rs_sta->lock, flags); rs_sta->tgg = 0; switch (il->band) { case IEEE80211_BAND_2GHZ: /* TODO: this always does G, not a regression */ if (il->active.flags & RXON_FLG_TGG_PROTECT_MSK) { rs_sta->tgg = 1; rs_sta->expected_tpt = il3945_expected_tpt_g_prot; } else rs_sta->expected_tpt = il3945_expected_tpt_g; break; case IEEE80211_BAND_5GHZ: rs_sta->expected_tpt = il3945_expected_tpt_a; break; default: BUG(); break; } spin_unlock_irqrestore(&rs_sta->lock, flags); rssi = il->_3945.last_rx_rssi; if (rssi == 0) rssi = IL_MIN_RSSI_VAL; D_RATE("Network RSSI: %d\n", rssi); rs_sta->start_rate = il3945_get_rate_idx_by_rssi(rssi, il->band); D_RATE("leave: rssi %d assign rate idx: " "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate, il3945_rates[rs_sta->start_rate].plcp); rcu_read_unlock(); } int il3945_rate_control_register(void) { return ieee80211_rate_control_register(&rs_ops); } void il3945_rate_control_unregister(void) { ieee80211_rate_control_unregister(&rs_ops); }
gpl-2.0